xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision f7c0e362a25f99fafa73d62a2e8c3da00cf1fc0e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
968 	} else {
969 		*lo_state = FPE_STATE_OFF;
970 		*lp_state = FPE_STATE_OFF;
971 	}
972 }
973 
974 static void stmmac_mac_link_down(struct phylink_config *config,
975 				 unsigned int mode, phy_interface_t interface)
976 {
977 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
978 
979 	stmmac_mac_set(priv, priv->ioaddr, false);
980 	priv->eee_active = false;
981 	priv->tx_lpi_enabled = false;
982 	priv->eee_enabled = stmmac_eee_init(priv);
983 	stmmac_set_eee_pls(priv, priv->hw, false);
984 
985 	if (priv->dma_cap.fpesel)
986 		stmmac_fpe_link_state_handle(priv, false);
987 }
988 
989 static void stmmac_mac_link_up(struct phylink_config *config,
990 			       struct phy_device *phy,
991 			       unsigned int mode, phy_interface_t interface,
992 			       int speed, int duplex,
993 			       bool tx_pause, bool rx_pause)
994 {
995 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
996 	u32 old_ctrl, ctrl;
997 
998 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
999 	    priv->plat->serdes_powerup)
1000 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1001 
1002 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1003 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1004 
1005 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1006 		switch (speed) {
1007 		case SPEED_10000:
1008 			ctrl |= priv->hw->link.xgmii.speed10000;
1009 			break;
1010 		case SPEED_5000:
1011 			ctrl |= priv->hw->link.xgmii.speed5000;
1012 			break;
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.xgmii.speed2500;
1015 			break;
1016 		default:
1017 			return;
1018 		}
1019 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1020 		switch (speed) {
1021 		case SPEED_100000:
1022 			ctrl |= priv->hw->link.xlgmii.speed100000;
1023 			break;
1024 		case SPEED_50000:
1025 			ctrl |= priv->hw->link.xlgmii.speed50000;
1026 			break;
1027 		case SPEED_40000:
1028 			ctrl |= priv->hw->link.xlgmii.speed40000;
1029 			break;
1030 		case SPEED_25000:
1031 			ctrl |= priv->hw->link.xlgmii.speed25000;
1032 			break;
1033 		case SPEED_10000:
1034 			ctrl |= priv->hw->link.xgmii.speed10000;
1035 			break;
1036 		case SPEED_2500:
1037 			ctrl |= priv->hw->link.speed2500;
1038 			break;
1039 		case SPEED_1000:
1040 			ctrl |= priv->hw->link.speed1000;
1041 			break;
1042 		default:
1043 			return;
1044 		}
1045 	} else {
1046 		switch (speed) {
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		case SPEED_100:
1054 			ctrl |= priv->hw->link.speed100;
1055 			break;
1056 		case SPEED_10:
1057 			ctrl |= priv->hw->link.speed10;
1058 			break;
1059 		default:
1060 			return;
1061 		}
1062 	}
1063 
1064 	priv->speed = speed;
1065 
1066 	if (priv->plat->fix_mac_speed)
1067 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1068 
1069 	if (!duplex)
1070 		ctrl &= ~priv->hw->link.duplex;
1071 	else
1072 		ctrl |= priv->hw->link.duplex;
1073 
1074 	/* Flow Control operation */
1075 	if (rx_pause && tx_pause)
1076 		priv->flow_ctrl = FLOW_AUTO;
1077 	else if (rx_pause && !tx_pause)
1078 		priv->flow_ctrl = FLOW_RX;
1079 	else if (!rx_pause && tx_pause)
1080 		priv->flow_ctrl = FLOW_TX;
1081 	else
1082 		priv->flow_ctrl = FLOW_OFF;
1083 
1084 	stmmac_mac_flow_ctrl(priv, duplex);
1085 
1086 	if (ctrl != old_ctrl)
1087 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1088 
1089 	stmmac_mac_set(priv, priv->ioaddr, true);
1090 	if (phy && priv->dma_cap.eee) {
1091 		priv->eee_active =
1092 			phy_init_eee(phy, !(priv->plat->flags &
1093 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1094 		priv->eee_enabled = stmmac_eee_init(priv);
1095 		priv->tx_lpi_enabled = priv->eee_enabled;
1096 		stmmac_set_eee_pls(priv, priv->hw, true);
1097 	}
1098 
1099 	if (priv->dma_cap.fpesel)
1100 		stmmac_fpe_link_state_handle(priv, true);
1101 
1102 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1103 		stmmac_hwtstamp_correct_latency(priv, priv);
1104 }
1105 
1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1107 	.mac_select_pcs = stmmac_mac_select_pcs,
1108 	.mac_config = stmmac_mac_config,
1109 	.mac_link_down = stmmac_mac_link_down,
1110 	.mac_link_up = stmmac_mac_link_up,
1111 };
1112 
1113 /**
1114  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1115  * @priv: driver private structure
1116  * Description: this is to verify if the HW supports the PCS.
1117  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1118  * configured for the TBI, RTBI, or SGMII PHY interface.
1119  */
1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1121 {
1122 	int interface = priv->plat->mac_interface;
1123 
1124 	if (priv->dma_cap.pcs) {
1125 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1129 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1130 			priv->hw->pcs = STMMAC_PCS_RGMII;
1131 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1132 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1133 			priv->hw->pcs = STMMAC_PCS_SGMII;
1134 		}
1135 	}
1136 }
1137 
1138 /**
1139  * stmmac_init_phy - PHY initialization
1140  * @dev: net device structure
1141  * Description: it initializes the driver's PHY state, and attaches the PHY
1142  * to the mac driver.
1143  *  Return value:
1144  *  0 on success
1145  */
1146 static int stmmac_init_phy(struct net_device *dev)
1147 {
1148 	struct stmmac_priv *priv = netdev_priv(dev);
1149 	struct fwnode_handle *phy_fwnode;
1150 	struct fwnode_handle *fwnode;
1151 	int ret;
1152 
1153 	if (!phylink_expects_phy(priv->phylink))
1154 		return 0;
1155 
1156 	fwnode = priv->plat->port_node;
1157 	if (!fwnode)
1158 		fwnode = dev_fwnode(priv->device);
1159 
1160 	if (fwnode)
1161 		phy_fwnode = fwnode_get_phy_node(fwnode);
1162 	else
1163 		phy_fwnode = NULL;
1164 
1165 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1166 	 * manually parse it
1167 	 */
1168 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1169 		int addr = priv->plat->phy_addr;
1170 		struct phy_device *phydev;
1171 
1172 		if (addr < 0) {
1173 			netdev_err(priv->dev, "no phy found\n");
1174 			return -ENODEV;
1175 		}
1176 
1177 		phydev = mdiobus_get_phy(priv->mii, addr);
1178 		if (!phydev) {
1179 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1180 			return -ENODEV;
1181 		}
1182 
1183 		ret = phylink_connect_phy(priv->phylink, phydev);
1184 	} else {
1185 		fwnode_handle_put(phy_fwnode);
1186 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 	}
1188 
1189 	if (!priv->plat->pmt) {
1190 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191 
1192 		phylink_ethtool_get_wol(priv->phylink, &wol);
1193 		device_set_wakeup_capable(priv->device, !!wol.supported);
1194 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 	}
1196 
1197 	return ret;
1198 }
1199 
1200 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1201 {
1202 	/* Half-Duplex can only work with single tx queue */
1203 	if (priv->plat->tx_queues_to_use > 1)
1204 		priv->phylink_config.mac_capabilities &=
1205 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1206 	else
1207 		priv->phylink_config.mac_capabilities |=
1208 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1209 }
1210 
1211 static int stmmac_phy_setup(struct stmmac_priv *priv)
1212 {
1213 	struct stmmac_mdio_bus_data *mdio_bus_data;
1214 	int mode = priv->plat->phy_interface;
1215 	struct fwnode_handle *fwnode;
1216 	struct phylink *phylink;
1217 	int max_speed;
1218 
1219 	priv->phylink_config.dev = &priv->dev->dev;
1220 	priv->phylink_config.type = PHYLINK_NETDEV;
1221 	priv->phylink_config.mac_managed_pm = true;
1222 
1223 	mdio_bus_data = priv->plat->mdio_bus_data;
1224 	if (mdio_bus_data)
1225 		priv->phylink_config.ovr_an_inband =
1226 			mdio_bus_data->xpcs_an_inband;
1227 
1228 	/* Set the platform/firmware specified interface mode. Note, phylink
1229 	 * deals with the PHY interface mode, not the MAC interface mode.
1230 	 */
1231 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1232 
1233 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1234 	if (priv->hw->xpcs)
1235 		xpcs_get_interfaces(priv->hw->xpcs,
1236 				    priv->phylink_config.supported_interfaces);
1237 
1238 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1239 						MAC_10FD | MAC_100FD |
1240 						MAC_1000FD;
1241 
1242 	stmmac_set_half_duplex(priv);
1243 
1244 	/* Get the MAC specific capabilities */
1245 	stmmac_mac_phylink_get_caps(priv);
1246 
1247 	max_speed = priv->plat->max_speed;
1248 	if (max_speed)
1249 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1250 
1251 	fwnode = priv->plat->port_node;
1252 	if (!fwnode)
1253 		fwnode = dev_fwnode(priv->device);
1254 
1255 	phylink = phylink_create(&priv->phylink_config, fwnode,
1256 				 mode, &stmmac_phylink_mac_ops);
1257 	if (IS_ERR(phylink))
1258 		return PTR_ERR(phylink);
1259 
1260 	priv->phylink = phylink;
1261 	return 0;
1262 }
1263 
1264 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1265 				    struct stmmac_dma_conf *dma_conf)
1266 {
1267 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1268 	unsigned int desc_size;
1269 	void *head_rx;
1270 	u32 queue;
1271 
1272 	/* Display RX rings */
1273 	for (queue = 0; queue < rx_cnt; queue++) {
1274 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1275 
1276 		pr_info("\tRX Queue %u rings\n", queue);
1277 
1278 		if (priv->extend_desc) {
1279 			head_rx = (void *)rx_q->dma_erx;
1280 			desc_size = sizeof(struct dma_extended_desc);
1281 		} else {
1282 			head_rx = (void *)rx_q->dma_rx;
1283 			desc_size = sizeof(struct dma_desc);
1284 		}
1285 
1286 		/* Display RX ring */
1287 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1288 				    rx_q->dma_rx_phy, desc_size);
1289 	}
1290 }
1291 
1292 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1293 				    struct stmmac_dma_conf *dma_conf)
1294 {
1295 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1296 	unsigned int desc_size;
1297 	void *head_tx;
1298 	u32 queue;
1299 
1300 	/* Display TX rings */
1301 	for (queue = 0; queue < tx_cnt; queue++) {
1302 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1303 
1304 		pr_info("\tTX Queue %d rings\n", queue);
1305 
1306 		if (priv->extend_desc) {
1307 			head_tx = (void *)tx_q->dma_etx;
1308 			desc_size = sizeof(struct dma_extended_desc);
1309 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1310 			head_tx = (void *)tx_q->dma_entx;
1311 			desc_size = sizeof(struct dma_edesc);
1312 		} else {
1313 			head_tx = (void *)tx_q->dma_tx;
1314 			desc_size = sizeof(struct dma_desc);
1315 		}
1316 
1317 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1318 				    tx_q->dma_tx_phy, desc_size);
1319 	}
1320 }
1321 
1322 static void stmmac_display_rings(struct stmmac_priv *priv,
1323 				 struct stmmac_dma_conf *dma_conf)
1324 {
1325 	/* Display RX ring */
1326 	stmmac_display_rx_rings(priv, dma_conf);
1327 
1328 	/* Display TX ring */
1329 	stmmac_display_tx_rings(priv, dma_conf);
1330 }
1331 
1332 static int stmmac_set_bfsize(int mtu, int bufsize)
1333 {
1334 	int ret = bufsize;
1335 
1336 	if (mtu >= BUF_SIZE_8KiB)
1337 		ret = BUF_SIZE_16KiB;
1338 	else if (mtu >= BUF_SIZE_4KiB)
1339 		ret = BUF_SIZE_8KiB;
1340 	else if (mtu >= BUF_SIZE_2KiB)
1341 		ret = BUF_SIZE_4KiB;
1342 	else if (mtu > DEFAULT_BUFSIZE)
1343 		ret = BUF_SIZE_2KiB;
1344 	else
1345 		ret = DEFAULT_BUFSIZE;
1346 
1347 	return ret;
1348 }
1349 
1350 /**
1351  * stmmac_clear_rx_descriptors - clear RX descriptors
1352  * @priv: driver private structure
1353  * @dma_conf: structure to take the dma data
1354  * @queue: RX queue index
1355  * Description: this function is called to clear the RX descriptors
1356  * in case of both basic and extended descriptors are used.
1357  */
1358 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1359 					struct stmmac_dma_conf *dma_conf,
1360 					u32 queue)
1361 {
1362 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1363 	int i;
1364 
1365 	/* Clear the RX descriptors */
1366 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1367 		if (priv->extend_desc)
1368 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1369 					priv->use_riwt, priv->mode,
1370 					(i == dma_conf->dma_rx_size - 1),
1371 					dma_conf->dma_buf_sz);
1372 		else
1373 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1374 					priv->use_riwt, priv->mode,
1375 					(i == dma_conf->dma_rx_size - 1),
1376 					dma_conf->dma_buf_sz);
1377 }
1378 
1379 /**
1380  * stmmac_clear_tx_descriptors - clear tx descriptors
1381  * @priv: driver private structure
1382  * @dma_conf: structure to take the dma data
1383  * @queue: TX queue index.
1384  * Description: this function is called to clear the TX descriptors
1385  * in case of both basic and extended descriptors are used.
1386  */
1387 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1388 					struct stmmac_dma_conf *dma_conf,
1389 					u32 queue)
1390 {
1391 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1392 	int i;
1393 
1394 	/* Clear the TX descriptors */
1395 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1396 		int last = (i == (dma_conf->dma_tx_size - 1));
1397 		struct dma_desc *p;
1398 
1399 		if (priv->extend_desc)
1400 			p = &tx_q->dma_etx[i].basic;
1401 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1402 			p = &tx_q->dma_entx[i].basic;
1403 		else
1404 			p = &tx_q->dma_tx[i];
1405 
1406 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1407 	}
1408 }
1409 
1410 /**
1411  * stmmac_clear_descriptors - clear descriptors
1412  * @priv: driver private structure
1413  * @dma_conf: structure to take the dma data
1414  * Description: this function is called to clear the TX and RX descriptors
1415  * in case of both basic and extended descriptors are used.
1416  */
1417 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1418 				     struct stmmac_dma_conf *dma_conf)
1419 {
1420 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1421 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1422 	u32 queue;
1423 
1424 	/* Clear the RX descriptors */
1425 	for (queue = 0; queue < rx_queue_cnt; queue++)
1426 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1427 
1428 	/* Clear the TX descriptors */
1429 	for (queue = 0; queue < tx_queue_cnt; queue++)
1430 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1431 }
1432 
1433 /**
1434  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1435  * @priv: driver private structure
1436  * @dma_conf: structure to take the dma data
1437  * @p: descriptor pointer
1438  * @i: descriptor index
1439  * @flags: gfp flag
1440  * @queue: RX queue index
1441  * Description: this function is called to allocate a receive buffer, perform
1442  * the DMA mapping and init the descriptor.
1443  */
1444 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1445 				  struct stmmac_dma_conf *dma_conf,
1446 				  struct dma_desc *p,
1447 				  int i, gfp_t flags, u32 queue)
1448 {
1449 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1450 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1451 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1452 
1453 	if (priv->dma_cap.host_dma_width <= 32)
1454 		gfp |= GFP_DMA32;
1455 
1456 	if (!buf->page) {
1457 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1458 		if (!buf->page)
1459 			return -ENOMEM;
1460 		buf->page_offset = stmmac_rx_offset(priv);
1461 	}
1462 
1463 	if (priv->sph && !buf->sec_page) {
1464 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1465 		if (!buf->sec_page)
1466 			return -ENOMEM;
1467 
1468 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1469 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1470 	} else {
1471 		buf->sec_page = NULL;
1472 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1473 	}
1474 
1475 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1476 
1477 	stmmac_set_desc_addr(priv, p, buf->addr);
1478 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1479 		stmmac_init_desc3(priv, p);
1480 
1481 	return 0;
1482 }
1483 
1484 /**
1485  * stmmac_free_rx_buffer - free RX dma buffers
1486  * @priv: private structure
1487  * @rx_q: RX queue
1488  * @i: buffer index.
1489  */
1490 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1491 				  struct stmmac_rx_queue *rx_q,
1492 				  int i)
1493 {
1494 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1495 
1496 	if (buf->page)
1497 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1498 	buf->page = NULL;
1499 
1500 	if (buf->sec_page)
1501 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1502 	buf->sec_page = NULL;
1503 }
1504 
1505 /**
1506  * stmmac_free_tx_buffer - free RX dma buffers
1507  * @priv: private structure
1508  * @dma_conf: structure to take the dma data
1509  * @queue: RX queue index
1510  * @i: buffer index.
1511  */
1512 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1513 				  struct stmmac_dma_conf *dma_conf,
1514 				  u32 queue, int i)
1515 {
1516 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1517 
1518 	if (tx_q->tx_skbuff_dma[i].buf &&
1519 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1520 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1521 			dma_unmap_page(priv->device,
1522 				       tx_q->tx_skbuff_dma[i].buf,
1523 				       tx_q->tx_skbuff_dma[i].len,
1524 				       DMA_TO_DEVICE);
1525 		else
1526 			dma_unmap_single(priv->device,
1527 					 tx_q->tx_skbuff_dma[i].buf,
1528 					 tx_q->tx_skbuff_dma[i].len,
1529 					 DMA_TO_DEVICE);
1530 	}
1531 
1532 	if (tx_q->xdpf[i] &&
1533 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1534 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1535 		xdp_return_frame(tx_q->xdpf[i]);
1536 		tx_q->xdpf[i] = NULL;
1537 	}
1538 
1539 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1540 		tx_q->xsk_frames_done++;
1541 
1542 	if (tx_q->tx_skbuff[i] &&
1543 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1544 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1545 		tx_q->tx_skbuff[i] = NULL;
1546 	}
1547 
1548 	tx_q->tx_skbuff_dma[i].buf = 0;
1549 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1550 }
1551 
1552 /**
1553  * dma_free_rx_skbufs - free RX dma buffers
1554  * @priv: private structure
1555  * @dma_conf: structure to take the dma data
1556  * @queue: RX queue index
1557  */
1558 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1559 			       struct stmmac_dma_conf *dma_conf,
1560 			       u32 queue)
1561 {
1562 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1563 	int i;
1564 
1565 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1566 		stmmac_free_rx_buffer(priv, rx_q, i);
1567 }
1568 
1569 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1570 				   struct stmmac_dma_conf *dma_conf,
1571 				   u32 queue, gfp_t flags)
1572 {
1573 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1574 	int i;
1575 
1576 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1577 		struct dma_desc *p;
1578 		int ret;
1579 
1580 		if (priv->extend_desc)
1581 			p = &((rx_q->dma_erx + i)->basic);
1582 		else
1583 			p = rx_q->dma_rx + i;
1584 
1585 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1586 					     queue);
1587 		if (ret)
1588 			return ret;
1589 
1590 		rx_q->buf_alloc_num++;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 /**
1597  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1598  * @priv: private structure
1599  * @dma_conf: structure to take the dma data
1600  * @queue: RX queue index
1601  */
1602 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1603 				struct stmmac_dma_conf *dma_conf,
1604 				u32 queue)
1605 {
1606 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1607 	int i;
1608 
1609 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1610 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1611 
1612 		if (!buf->xdp)
1613 			continue;
1614 
1615 		xsk_buff_free(buf->xdp);
1616 		buf->xdp = NULL;
1617 	}
1618 }
1619 
1620 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1621 				      struct stmmac_dma_conf *dma_conf,
1622 				      u32 queue)
1623 {
1624 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1625 	int i;
1626 
1627 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1628 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1629 	 * use this macro to make sure no size violations.
1630 	 */
1631 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1632 
1633 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1634 		struct stmmac_rx_buffer *buf;
1635 		dma_addr_t dma_addr;
1636 		struct dma_desc *p;
1637 
1638 		if (priv->extend_desc)
1639 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1640 		else
1641 			p = rx_q->dma_rx + i;
1642 
1643 		buf = &rx_q->buf_pool[i];
1644 
1645 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1646 		if (!buf->xdp)
1647 			return -ENOMEM;
1648 
1649 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1650 		stmmac_set_desc_addr(priv, p, dma_addr);
1651 		rx_q->buf_alloc_num++;
1652 	}
1653 
1654 	return 0;
1655 }
1656 
1657 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1658 {
1659 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1660 		return NULL;
1661 
1662 	return xsk_get_pool_from_qid(priv->dev, queue);
1663 }
1664 
1665 /**
1666  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1667  * @priv: driver private structure
1668  * @dma_conf: structure to take the dma data
1669  * @queue: RX queue index
1670  * @flags: gfp flag.
1671  * Description: this function initializes the DMA RX descriptors
1672  * and allocates the socket buffers. It supports the chained and ring
1673  * modes.
1674  */
1675 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1676 				    struct stmmac_dma_conf *dma_conf,
1677 				    u32 queue, gfp_t flags)
1678 {
1679 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1680 	int ret;
1681 
1682 	netif_dbg(priv, probe, priv->dev,
1683 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1684 		  (u32)rx_q->dma_rx_phy);
1685 
1686 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1687 
1688 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1689 
1690 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1691 
1692 	if (rx_q->xsk_pool) {
1693 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1694 						   MEM_TYPE_XSK_BUFF_POOL,
1695 						   NULL));
1696 		netdev_info(priv->dev,
1697 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1698 			    rx_q->queue_index);
1699 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1700 	} else {
1701 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1702 						   MEM_TYPE_PAGE_POOL,
1703 						   rx_q->page_pool));
1704 		netdev_info(priv->dev,
1705 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1706 			    rx_q->queue_index);
1707 	}
1708 
1709 	if (rx_q->xsk_pool) {
1710 		/* RX XDP ZC buffer pool may not be populated, e.g.
1711 		 * xdpsock TX-only.
1712 		 */
1713 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1714 	} else {
1715 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1716 		if (ret < 0)
1717 			return -ENOMEM;
1718 	}
1719 
1720 	/* Setup the chained descriptor addresses */
1721 	if (priv->mode == STMMAC_CHAIN_MODE) {
1722 		if (priv->extend_desc)
1723 			stmmac_mode_init(priv, rx_q->dma_erx,
1724 					 rx_q->dma_rx_phy,
1725 					 dma_conf->dma_rx_size, 1);
1726 		else
1727 			stmmac_mode_init(priv, rx_q->dma_rx,
1728 					 rx_q->dma_rx_phy,
1729 					 dma_conf->dma_rx_size, 0);
1730 	}
1731 
1732 	return 0;
1733 }
1734 
1735 static int init_dma_rx_desc_rings(struct net_device *dev,
1736 				  struct stmmac_dma_conf *dma_conf,
1737 				  gfp_t flags)
1738 {
1739 	struct stmmac_priv *priv = netdev_priv(dev);
1740 	u32 rx_count = priv->plat->rx_queues_to_use;
1741 	int queue;
1742 	int ret;
1743 
1744 	/* RX INITIALIZATION */
1745 	netif_dbg(priv, probe, priv->dev,
1746 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1747 
1748 	for (queue = 0; queue < rx_count; queue++) {
1749 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1750 		if (ret)
1751 			goto err_init_rx_buffers;
1752 	}
1753 
1754 	return 0;
1755 
1756 err_init_rx_buffers:
1757 	while (queue >= 0) {
1758 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1759 
1760 		if (rx_q->xsk_pool)
1761 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1762 		else
1763 			dma_free_rx_skbufs(priv, dma_conf, queue);
1764 
1765 		rx_q->buf_alloc_num = 0;
1766 		rx_q->xsk_pool = NULL;
1767 
1768 		queue--;
1769 	}
1770 
1771 	return ret;
1772 }
1773 
1774 /**
1775  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1776  * @priv: driver private structure
1777  * @dma_conf: structure to take the dma data
1778  * @queue: TX queue index
1779  * Description: this function initializes the DMA TX descriptors
1780  * and allocates the socket buffers. It supports the chained and ring
1781  * modes.
1782  */
1783 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1784 				    struct stmmac_dma_conf *dma_conf,
1785 				    u32 queue)
1786 {
1787 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1788 	int i;
1789 
1790 	netif_dbg(priv, probe, priv->dev,
1791 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1792 		  (u32)tx_q->dma_tx_phy);
1793 
1794 	/* Setup the chained descriptor addresses */
1795 	if (priv->mode == STMMAC_CHAIN_MODE) {
1796 		if (priv->extend_desc)
1797 			stmmac_mode_init(priv, tx_q->dma_etx,
1798 					 tx_q->dma_tx_phy,
1799 					 dma_conf->dma_tx_size, 1);
1800 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1801 			stmmac_mode_init(priv, tx_q->dma_tx,
1802 					 tx_q->dma_tx_phy,
1803 					 dma_conf->dma_tx_size, 0);
1804 	}
1805 
1806 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1807 
1808 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1809 		struct dma_desc *p;
1810 
1811 		if (priv->extend_desc)
1812 			p = &((tx_q->dma_etx + i)->basic);
1813 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1814 			p = &((tx_q->dma_entx + i)->basic);
1815 		else
1816 			p = tx_q->dma_tx + i;
1817 
1818 		stmmac_clear_desc(priv, p);
1819 
1820 		tx_q->tx_skbuff_dma[i].buf = 0;
1821 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1822 		tx_q->tx_skbuff_dma[i].len = 0;
1823 		tx_q->tx_skbuff_dma[i].last_segment = false;
1824 		tx_q->tx_skbuff[i] = NULL;
1825 	}
1826 
1827 	return 0;
1828 }
1829 
1830 static int init_dma_tx_desc_rings(struct net_device *dev,
1831 				  struct stmmac_dma_conf *dma_conf)
1832 {
1833 	struct stmmac_priv *priv = netdev_priv(dev);
1834 	u32 tx_queue_cnt;
1835 	u32 queue;
1836 
1837 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1838 
1839 	for (queue = 0; queue < tx_queue_cnt; queue++)
1840 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1841 
1842 	return 0;
1843 }
1844 
1845 /**
1846  * init_dma_desc_rings - init the RX/TX descriptor rings
1847  * @dev: net device structure
1848  * @dma_conf: structure to take the dma data
1849  * @flags: gfp flag.
1850  * Description: this function initializes the DMA RX/TX descriptors
1851  * and allocates the socket buffers. It supports the chained and ring
1852  * modes.
1853  */
1854 static int init_dma_desc_rings(struct net_device *dev,
1855 			       struct stmmac_dma_conf *dma_conf,
1856 			       gfp_t flags)
1857 {
1858 	struct stmmac_priv *priv = netdev_priv(dev);
1859 	int ret;
1860 
1861 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1862 	if (ret)
1863 		return ret;
1864 
1865 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1866 
1867 	stmmac_clear_descriptors(priv, dma_conf);
1868 
1869 	if (netif_msg_hw(priv))
1870 		stmmac_display_rings(priv, dma_conf);
1871 
1872 	return ret;
1873 }
1874 
1875 /**
1876  * dma_free_tx_skbufs - free TX dma buffers
1877  * @priv: private structure
1878  * @dma_conf: structure to take the dma data
1879  * @queue: TX queue index
1880  */
1881 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1882 			       struct stmmac_dma_conf *dma_conf,
1883 			       u32 queue)
1884 {
1885 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1886 	int i;
1887 
1888 	tx_q->xsk_frames_done = 0;
1889 
1890 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1891 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1892 
1893 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1894 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1895 		tx_q->xsk_frames_done = 0;
1896 		tx_q->xsk_pool = NULL;
1897 	}
1898 }
1899 
1900 /**
1901  * stmmac_free_tx_skbufs - free TX skb buffers
1902  * @priv: private structure
1903  */
1904 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1905 {
1906 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1907 	u32 queue;
1908 
1909 	for (queue = 0; queue < tx_queue_cnt; queue++)
1910 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1911 }
1912 
1913 /**
1914  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1915  * @priv: private structure
1916  * @dma_conf: structure to take the dma data
1917  * @queue: RX queue index
1918  */
1919 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1920 					 struct stmmac_dma_conf *dma_conf,
1921 					 u32 queue)
1922 {
1923 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1924 
1925 	/* Release the DMA RX socket buffers */
1926 	if (rx_q->xsk_pool)
1927 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1928 	else
1929 		dma_free_rx_skbufs(priv, dma_conf, queue);
1930 
1931 	rx_q->buf_alloc_num = 0;
1932 	rx_q->xsk_pool = NULL;
1933 
1934 	/* Free DMA regions of consistent memory previously allocated */
1935 	if (!priv->extend_desc)
1936 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1937 				  sizeof(struct dma_desc),
1938 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1939 	else
1940 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1941 				  sizeof(struct dma_extended_desc),
1942 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1943 
1944 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1945 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1946 
1947 	kfree(rx_q->buf_pool);
1948 	if (rx_q->page_pool)
1949 		page_pool_destroy(rx_q->page_pool);
1950 }
1951 
1952 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1953 				       struct stmmac_dma_conf *dma_conf)
1954 {
1955 	u32 rx_count = priv->plat->rx_queues_to_use;
1956 	u32 queue;
1957 
1958 	/* Free RX queue resources */
1959 	for (queue = 0; queue < rx_count; queue++)
1960 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1961 }
1962 
1963 /**
1964  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1965  * @priv: private structure
1966  * @dma_conf: structure to take the dma data
1967  * @queue: TX queue index
1968  */
1969 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1970 					 struct stmmac_dma_conf *dma_conf,
1971 					 u32 queue)
1972 {
1973 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1974 	size_t size;
1975 	void *addr;
1976 
1977 	/* Release the DMA TX socket buffers */
1978 	dma_free_tx_skbufs(priv, dma_conf, queue);
1979 
1980 	if (priv->extend_desc) {
1981 		size = sizeof(struct dma_extended_desc);
1982 		addr = tx_q->dma_etx;
1983 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1984 		size = sizeof(struct dma_edesc);
1985 		addr = tx_q->dma_entx;
1986 	} else {
1987 		size = sizeof(struct dma_desc);
1988 		addr = tx_q->dma_tx;
1989 	}
1990 
1991 	size *= dma_conf->dma_tx_size;
1992 
1993 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1994 
1995 	kfree(tx_q->tx_skbuff_dma);
1996 	kfree(tx_q->tx_skbuff);
1997 }
1998 
1999 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2000 				       struct stmmac_dma_conf *dma_conf)
2001 {
2002 	u32 tx_count = priv->plat->tx_queues_to_use;
2003 	u32 queue;
2004 
2005 	/* Free TX queue resources */
2006 	for (queue = 0; queue < tx_count; queue++)
2007 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2008 }
2009 
2010 /**
2011  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2012  * @priv: private structure
2013  * @dma_conf: structure to take the dma data
2014  * @queue: RX queue index
2015  * Description: according to which descriptor can be used (extend or basic)
2016  * this function allocates the resources for TX and RX paths. In case of
2017  * reception, for example, it pre-allocated the RX socket buffer in order to
2018  * allow zero-copy mechanism.
2019  */
2020 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2021 					 struct stmmac_dma_conf *dma_conf,
2022 					 u32 queue)
2023 {
2024 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2025 	struct stmmac_channel *ch = &priv->channel[queue];
2026 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2027 	struct page_pool_params pp_params = { 0 };
2028 	unsigned int num_pages;
2029 	unsigned int napi_id;
2030 	int ret;
2031 
2032 	rx_q->queue_index = queue;
2033 	rx_q->priv_data = priv;
2034 
2035 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2036 	pp_params.pool_size = dma_conf->dma_rx_size;
2037 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2038 	pp_params.order = ilog2(num_pages);
2039 	pp_params.nid = dev_to_node(priv->device);
2040 	pp_params.dev = priv->device;
2041 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2042 	pp_params.offset = stmmac_rx_offset(priv);
2043 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2044 
2045 	rx_q->page_pool = page_pool_create(&pp_params);
2046 	if (IS_ERR(rx_q->page_pool)) {
2047 		ret = PTR_ERR(rx_q->page_pool);
2048 		rx_q->page_pool = NULL;
2049 		return ret;
2050 	}
2051 
2052 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2053 				 sizeof(*rx_q->buf_pool),
2054 				 GFP_KERNEL);
2055 	if (!rx_q->buf_pool)
2056 		return -ENOMEM;
2057 
2058 	if (priv->extend_desc) {
2059 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2060 						   dma_conf->dma_rx_size *
2061 						   sizeof(struct dma_extended_desc),
2062 						   &rx_q->dma_rx_phy,
2063 						   GFP_KERNEL);
2064 		if (!rx_q->dma_erx)
2065 			return -ENOMEM;
2066 
2067 	} else {
2068 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2069 						  dma_conf->dma_rx_size *
2070 						  sizeof(struct dma_desc),
2071 						  &rx_q->dma_rx_phy,
2072 						  GFP_KERNEL);
2073 		if (!rx_q->dma_rx)
2074 			return -ENOMEM;
2075 	}
2076 
2077 	if (stmmac_xdp_is_enabled(priv) &&
2078 	    test_bit(queue, priv->af_xdp_zc_qps))
2079 		napi_id = ch->rxtx_napi.napi_id;
2080 	else
2081 		napi_id = ch->rx_napi.napi_id;
2082 
2083 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2084 			       rx_q->queue_index,
2085 			       napi_id);
2086 	if (ret) {
2087 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2088 		return -EINVAL;
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2095 				       struct stmmac_dma_conf *dma_conf)
2096 {
2097 	u32 rx_count = priv->plat->rx_queues_to_use;
2098 	u32 queue;
2099 	int ret;
2100 
2101 	/* RX queues buffers and DMA */
2102 	for (queue = 0; queue < rx_count; queue++) {
2103 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2104 		if (ret)
2105 			goto err_dma;
2106 	}
2107 
2108 	return 0;
2109 
2110 err_dma:
2111 	free_dma_rx_desc_resources(priv, dma_conf);
2112 
2113 	return ret;
2114 }
2115 
2116 /**
2117  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2118  * @priv: private structure
2119  * @dma_conf: structure to take the dma data
2120  * @queue: TX queue index
2121  * Description: according to which descriptor can be used (extend or basic)
2122  * this function allocates the resources for TX and RX paths. In case of
2123  * reception, for example, it pre-allocated the RX socket buffer in order to
2124  * allow zero-copy mechanism.
2125  */
2126 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2127 					 struct stmmac_dma_conf *dma_conf,
2128 					 u32 queue)
2129 {
2130 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2131 	size_t size;
2132 	void *addr;
2133 
2134 	tx_q->queue_index = queue;
2135 	tx_q->priv_data = priv;
2136 
2137 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2138 				      sizeof(*tx_q->tx_skbuff_dma),
2139 				      GFP_KERNEL);
2140 	if (!tx_q->tx_skbuff_dma)
2141 		return -ENOMEM;
2142 
2143 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2144 				  sizeof(struct sk_buff *),
2145 				  GFP_KERNEL);
2146 	if (!tx_q->tx_skbuff)
2147 		return -ENOMEM;
2148 
2149 	if (priv->extend_desc)
2150 		size = sizeof(struct dma_extended_desc);
2151 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2152 		size = sizeof(struct dma_edesc);
2153 	else
2154 		size = sizeof(struct dma_desc);
2155 
2156 	size *= dma_conf->dma_tx_size;
2157 
2158 	addr = dma_alloc_coherent(priv->device, size,
2159 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2160 	if (!addr)
2161 		return -ENOMEM;
2162 
2163 	if (priv->extend_desc)
2164 		tx_q->dma_etx = addr;
2165 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2166 		tx_q->dma_entx = addr;
2167 	else
2168 		tx_q->dma_tx = addr;
2169 
2170 	return 0;
2171 }
2172 
2173 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2174 				       struct stmmac_dma_conf *dma_conf)
2175 {
2176 	u32 tx_count = priv->plat->tx_queues_to_use;
2177 	u32 queue;
2178 	int ret;
2179 
2180 	/* TX queues buffers and DMA */
2181 	for (queue = 0; queue < tx_count; queue++) {
2182 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2183 		if (ret)
2184 			goto err_dma;
2185 	}
2186 
2187 	return 0;
2188 
2189 err_dma:
2190 	free_dma_tx_desc_resources(priv, dma_conf);
2191 	return ret;
2192 }
2193 
2194 /**
2195  * alloc_dma_desc_resources - alloc TX/RX resources.
2196  * @priv: private structure
2197  * @dma_conf: structure to take the dma data
2198  * Description: according to which descriptor can be used (extend or basic)
2199  * this function allocates the resources for TX and RX paths. In case of
2200  * reception, for example, it pre-allocated the RX socket buffer in order to
2201  * allow zero-copy mechanism.
2202  */
2203 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2204 				    struct stmmac_dma_conf *dma_conf)
2205 {
2206 	/* RX Allocation */
2207 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2208 
2209 	if (ret)
2210 		return ret;
2211 
2212 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2213 
2214 	return ret;
2215 }
2216 
2217 /**
2218  * free_dma_desc_resources - free dma desc resources
2219  * @priv: private structure
2220  * @dma_conf: structure to take the dma data
2221  */
2222 static void free_dma_desc_resources(struct stmmac_priv *priv,
2223 				    struct stmmac_dma_conf *dma_conf)
2224 {
2225 	/* Release the DMA TX socket buffers */
2226 	free_dma_tx_desc_resources(priv, dma_conf);
2227 
2228 	/* Release the DMA RX socket buffers later
2229 	 * to ensure all pending XDP_TX buffers are returned.
2230 	 */
2231 	free_dma_rx_desc_resources(priv, dma_conf);
2232 }
2233 
2234 /**
2235  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2236  *  @priv: driver private structure
2237  *  Description: It is used for enabling the rx queues in the MAC
2238  */
2239 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2240 {
2241 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2242 	int queue;
2243 	u8 mode;
2244 
2245 	for (queue = 0; queue < rx_queues_count; queue++) {
2246 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2247 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2248 	}
2249 }
2250 
2251 /**
2252  * stmmac_start_rx_dma - start RX DMA channel
2253  * @priv: driver private structure
2254  * @chan: RX channel index
2255  * Description:
2256  * This starts a RX DMA channel
2257  */
2258 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2259 {
2260 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2261 	stmmac_start_rx(priv, priv->ioaddr, chan);
2262 }
2263 
2264 /**
2265  * stmmac_start_tx_dma - start TX DMA channel
2266  * @priv: driver private structure
2267  * @chan: TX channel index
2268  * Description:
2269  * This starts a TX DMA channel
2270  */
2271 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2272 {
2273 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2274 	stmmac_start_tx(priv, priv->ioaddr, chan);
2275 }
2276 
2277 /**
2278  * stmmac_stop_rx_dma - stop RX DMA channel
2279  * @priv: driver private structure
2280  * @chan: RX channel index
2281  * Description:
2282  * This stops a RX DMA channel
2283  */
2284 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2285 {
2286 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2287 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2288 }
2289 
2290 /**
2291  * stmmac_stop_tx_dma - stop TX DMA channel
2292  * @priv: driver private structure
2293  * @chan: TX channel index
2294  * Description:
2295  * This stops a TX DMA channel
2296  */
2297 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2298 {
2299 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2300 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2301 }
2302 
2303 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2304 {
2305 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2306 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2307 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2308 	u32 chan;
2309 
2310 	for (chan = 0; chan < dma_csr_ch; chan++) {
2311 		struct stmmac_channel *ch = &priv->channel[chan];
2312 		unsigned long flags;
2313 
2314 		spin_lock_irqsave(&ch->lock, flags);
2315 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2316 		spin_unlock_irqrestore(&ch->lock, flags);
2317 	}
2318 }
2319 
2320 /**
2321  * stmmac_start_all_dma - start all RX and TX DMA channels
2322  * @priv: driver private structure
2323  * Description:
2324  * This starts all the RX and TX DMA channels
2325  */
2326 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2327 {
2328 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2329 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2330 	u32 chan = 0;
2331 
2332 	for (chan = 0; chan < rx_channels_count; chan++)
2333 		stmmac_start_rx_dma(priv, chan);
2334 
2335 	for (chan = 0; chan < tx_channels_count; chan++)
2336 		stmmac_start_tx_dma(priv, chan);
2337 }
2338 
2339 /**
2340  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2341  * @priv: driver private structure
2342  * Description:
2343  * This stops the RX and TX DMA channels
2344  */
2345 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2346 {
2347 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2348 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2349 	u32 chan = 0;
2350 
2351 	for (chan = 0; chan < rx_channels_count; chan++)
2352 		stmmac_stop_rx_dma(priv, chan);
2353 
2354 	for (chan = 0; chan < tx_channels_count; chan++)
2355 		stmmac_stop_tx_dma(priv, chan);
2356 }
2357 
2358 /**
2359  *  stmmac_dma_operation_mode - HW DMA operation mode
2360  *  @priv: driver private structure
2361  *  Description: it is used for configuring the DMA operation mode register in
2362  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2363  */
2364 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2365 {
2366 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2367 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2368 	int rxfifosz = priv->plat->rx_fifo_size;
2369 	int txfifosz = priv->plat->tx_fifo_size;
2370 	u32 txmode = 0;
2371 	u32 rxmode = 0;
2372 	u32 chan = 0;
2373 	u8 qmode = 0;
2374 
2375 	if (rxfifosz == 0)
2376 		rxfifosz = priv->dma_cap.rx_fifo_size;
2377 	if (txfifosz == 0)
2378 		txfifosz = priv->dma_cap.tx_fifo_size;
2379 
2380 	/* Adjust for real per queue fifo size */
2381 	rxfifosz /= rx_channels_count;
2382 	txfifosz /= tx_channels_count;
2383 
2384 	if (priv->plat->force_thresh_dma_mode) {
2385 		txmode = tc;
2386 		rxmode = tc;
2387 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2388 		/*
2389 		 * In case of GMAC, SF mode can be enabled
2390 		 * to perform the TX COE in HW. This depends on:
2391 		 * 1) TX COE if actually supported
2392 		 * 2) There is no bugged Jumbo frame support
2393 		 *    that needs to not insert csum in the TDES.
2394 		 */
2395 		txmode = SF_DMA_MODE;
2396 		rxmode = SF_DMA_MODE;
2397 		priv->xstats.threshold = SF_DMA_MODE;
2398 	} else {
2399 		txmode = tc;
2400 		rxmode = SF_DMA_MODE;
2401 	}
2402 
2403 	/* configure all channels */
2404 	for (chan = 0; chan < rx_channels_count; chan++) {
2405 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2406 		u32 buf_size;
2407 
2408 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2409 
2410 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2411 				rxfifosz, qmode);
2412 
2413 		if (rx_q->xsk_pool) {
2414 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2415 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2416 					      buf_size,
2417 					      chan);
2418 		} else {
2419 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2420 					      priv->dma_conf.dma_buf_sz,
2421 					      chan);
2422 		}
2423 	}
2424 
2425 	for (chan = 0; chan < tx_channels_count; chan++) {
2426 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2427 
2428 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2429 				txfifosz, qmode);
2430 	}
2431 }
2432 
2433 static void stmmac_xsk_request_timestamp(void *_priv)
2434 {
2435 	struct stmmac_metadata_request *meta_req = _priv;
2436 
2437 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2438 	*meta_req->set_ic = true;
2439 }
2440 
2441 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2442 {
2443 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2444 	struct stmmac_priv *priv = tx_compl->priv;
2445 	struct dma_desc *desc = tx_compl->desc;
2446 	bool found = false;
2447 	u64 ns = 0;
2448 
2449 	if (!priv->hwts_tx_en)
2450 		return 0;
2451 
2452 	/* check tx tstamp status */
2453 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2454 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2455 		found = true;
2456 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2457 		found = true;
2458 	}
2459 
2460 	if (found) {
2461 		ns -= priv->plat->cdc_error_adj;
2462 		return ns_to_ktime(ns);
2463 	}
2464 
2465 	return 0;
2466 }
2467 
2468 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2469 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2470 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2471 };
2472 
2473 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2474 {
2475 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2476 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2477 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2478 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2479 	unsigned int entry = tx_q->cur_tx;
2480 	struct dma_desc *tx_desc = NULL;
2481 	struct xdp_desc xdp_desc;
2482 	bool work_done = true;
2483 	u32 tx_set_ic_bit = 0;
2484 	unsigned long flags;
2485 
2486 	/* Avoids TX time-out as we are sharing with slow path */
2487 	txq_trans_cond_update(nq);
2488 
2489 	budget = min(budget, stmmac_tx_avail(priv, queue));
2490 
2491 	while (budget-- > 0) {
2492 		struct stmmac_metadata_request meta_req;
2493 		struct xsk_tx_metadata *meta = NULL;
2494 		dma_addr_t dma_addr;
2495 		bool set_ic;
2496 
2497 		/* We are sharing with slow path and stop XSK TX desc submission when
2498 		 * available TX ring is less than threshold.
2499 		 */
2500 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2501 		    !netif_carrier_ok(priv->dev)) {
2502 			work_done = false;
2503 			break;
2504 		}
2505 
2506 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2507 			break;
2508 
2509 		if (likely(priv->extend_desc))
2510 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2511 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2512 			tx_desc = &tx_q->dma_entx[entry].basic;
2513 		else
2514 			tx_desc = tx_q->dma_tx + entry;
2515 
2516 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2517 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2518 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2519 
2520 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2521 
2522 		/* To return XDP buffer to XSK pool, we simple call
2523 		 * xsk_tx_completed(), so we don't need to fill up
2524 		 * 'buf' and 'xdpf'.
2525 		 */
2526 		tx_q->tx_skbuff_dma[entry].buf = 0;
2527 		tx_q->xdpf[entry] = NULL;
2528 
2529 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2530 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2531 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2532 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2533 
2534 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2535 
2536 		tx_q->tx_count_frames++;
2537 
2538 		if (!priv->tx_coal_frames[queue])
2539 			set_ic = false;
2540 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2541 			set_ic = true;
2542 		else
2543 			set_ic = false;
2544 
2545 		meta_req.priv = priv;
2546 		meta_req.tx_desc = tx_desc;
2547 		meta_req.set_ic = &set_ic;
2548 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2549 					&meta_req);
2550 		if (set_ic) {
2551 			tx_q->tx_count_frames = 0;
2552 			stmmac_set_tx_ic(priv, tx_desc);
2553 			tx_set_ic_bit++;
2554 		}
2555 
2556 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2557 				       true, priv->mode, true, true,
2558 				       xdp_desc.len);
2559 
2560 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2561 
2562 		xsk_tx_metadata_to_compl(meta,
2563 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2564 
2565 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2566 		entry = tx_q->cur_tx;
2567 	}
2568 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2569 	txq_stats->tx_set_ic_bit += tx_set_ic_bit;
2570 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2571 
2572 	if (tx_desc) {
2573 		stmmac_flush_tx_descriptors(priv, queue);
2574 		xsk_tx_release(pool);
2575 	}
2576 
2577 	/* Return true if all of the 3 conditions are met
2578 	 *  a) TX Budget is still available
2579 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2580 	 *     pending XSK TX for transmission)
2581 	 */
2582 	return !!budget && work_done;
2583 }
2584 
2585 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2586 {
2587 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2588 		tc += 64;
2589 
2590 		if (priv->plat->force_thresh_dma_mode)
2591 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2592 		else
2593 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2594 						      chan);
2595 
2596 		priv->xstats.threshold = tc;
2597 	}
2598 }
2599 
2600 /**
2601  * stmmac_tx_clean - to manage the transmission completion
2602  * @priv: driver private structure
2603  * @budget: napi budget limiting this functions packet handling
2604  * @queue: TX queue index
2605  * @pending_packets: signal to arm the TX coal timer
2606  * Description: it reclaims the transmit resources after transmission completes.
2607  * If some packets still needs to be handled, due to TX coalesce, set
2608  * pending_packets to true to make NAPI arm the TX coal timer.
2609  */
2610 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2611 			   bool *pending_packets)
2612 {
2613 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2614 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2615 	unsigned int bytes_compl = 0, pkts_compl = 0;
2616 	unsigned int entry, xmits = 0, count = 0;
2617 	u32 tx_packets = 0, tx_errors = 0;
2618 	unsigned long flags;
2619 
2620 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2621 
2622 	tx_q->xsk_frames_done = 0;
2623 
2624 	entry = tx_q->dirty_tx;
2625 
2626 	/* Try to clean all TX complete frame in 1 shot */
2627 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2628 		struct xdp_frame *xdpf;
2629 		struct sk_buff *skb;
2630 		struct dma_desc *p;
2631 		int status;
2632 
2633 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2634 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2635 			xdpf = tx_q->xdpf[entry];
2636 			skb = NULL;
2637 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2638 			xdpf = NULL;
2639 			skb = tx_q->tx_skbuff[entry];
2640 		} else {
2641 			xdpf = NULL;
2642 			skb = NULL;
2643 		}
2644 
2645 		if (priv->extend_desc)
2646 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2647 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2648 			p = &tx_q->dma_entx[entry].basic;
2649 		else
2650 			p = tx_q->dma_tx + entry;
2651 
2652 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2653 		/* Check if the descriptor is owned by the DMA */
2654 		if (unlikely(status & tx_dma_own))
2655 			break;
2656 
2657 		count++;
2658 
2659 		/* Make sure descriptor fields are read after reading
2660 		 * the own bit.
2661 		 */
2662 		dma_rmb();
2663 
2664 		/* Just consider the last segment and ...*/
2665 		if (likely(!(status & tx_not_ls))) {
2666 			/* ... verify the status error condition */
2667 			if (unlikely(status & tx_err)) {
2668 				tx_errors++;
2669 				if (unlikely(status & tx_err_bump_tc))
2670 					stmmac_bump_dma_threshold(priv, queue);
2671 			} else {
2672 				tx_packets++;
2673 			}
2674 			if (skb) {
2675 				stmmac_get_tx_hwtstamp(priv, p, skb);
2676 			} else {
2677 				struct stmmac_xsk_tx_complete tx_compl = {
2678 					.priv = priv,
2679 					.desc = p,
2680 				};
2681 
2682 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2683 							 &stmmac_xsk_tx_metadata_ops,
2684 							 &tx_compl);
2685 			}
2686 		}
2687 
2688 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2689 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2690 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2691 				dma_unmap_page(priv->device,
2692 					       tx_q->tx_skbuff_dma[entry].buf,
2693 					       tx_q->tx_skbuff_dma[entry].len,
2694 					       DMA_TO_DEVICE);
2695 			else
2696 				dma_unmap_single(priv->device,
2697 						 tx_q->tx_skbuff_dma[entry].buf,
2698 						 tx_q->tx_skbuff_dma[entry].len,
2699 						 DMA_TO_DEVICE);
2700 			tx_q->tx_skbuff_dma[entry].buf = 0;
2701 			tx_q->tx_skbuff_dma[entry].len = 0;
2702 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2703 		}
2704 
2705 		stmmac_clean_desc3(priv, tx_q, p);
2706 
2707 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2708 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2709 
2710 		if (xdpf &&
2711 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2712 			xdp_return_frame_rx_napi(xdpf);
2713 			tx_q->xdpf[entry] = NULL;
2714 		}
2715 
2716 		if (xdpf &&
2717 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2718 			xdp_return_frame(xdpf);
2719 			tx_q->xdpf[entry] = NULL;
2720 		}
2721 
2722 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2723 			tx_q->xsk_frames_done++;
2724 
2725 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2726 			if (likely(skb)) {
2727 				pkts_compl++;
2728 				bytes_compl += skb->len;
2729 				dev_consume_skb_any(skb);
2730 				tx_q->tx_skbuff[entry] = NULL;
2731 			}
2732 		}
2733 
2734 		stmmac_release_tx_desc(priv, p, priv->mode);
2735 
2736 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2737 	}
2738 	tx_q->dirty_tx = entry;
2739 
2740 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2741 				  pkts_compl, bytes_compl);
2742 
2743 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2744 								queue))) &&
2745 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2746 
2747 		netif_dbg(priv, tx_done, priv->dev,
2748 			  "%s: restart transmit\n", __func__);
2749 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2750 	}
2751 
2752 	if (tx_q->xsk_pool) {
2753 		bool work_done;
2754 
2755 		if (tx_q->xsk_frames_done)
2756 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2757 
2758 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2759 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2760 
2761 		/* For XSK TX, we try to send as many as possible.
2762 		 * If XSK work done (XSK TX desc empty and budget still
2763 		 * available), return "budget - 1" to reenable TX IRQ.
2764 		 * Else, return "budget" to make NAPI continue polling.
2765 		 */
2766 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2767 					       STMMAC_XSK_TX_BUDGET_MAX);
2768 		if (work_done)
2769 			xmits = budget - 1;
2770 		else
2771 			xmits = budget;
2772 	}
2773 
2774 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2775 	    priv->eee_sw_timer_en) {
2776 		if (stmmac_enable_eee_mode(priv))
2777 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2778 	}
2779 
2780 	/* We still have pending packets, let's call for a new scheduling */
2781 	if (tx_q->dirty_tx != tx_q->cur_tx)
2782 		*pending_packets = true;
2783 
2784 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
2785 	txq_stats->tx_packets += tx_packets;
2786 	txq_stats->tx_pkt_n += tx_packets;
2787 	txq_stats->tx_clean++;
2788 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
2789 
2790 	priv->xstats.tx_errors += tx_errors;
2791 
2792 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2793 
2794 	/* Combine decisions from TX clean and XSK TX */
2795 	return max(count, xmits);
2796 }
2797 
2798 /**
2799  * stmmac_tx_err - to manage the tx error
2800  * @priv: driver private structure
2801  * @chan: channel index
2802  * Description: it cleans the descriptors and restarts the transmission
2803  * in case of transmission errors.
2804  */
2805 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2806 {
2807 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2808 
2809 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2810 
2811 	stmmac_stop_tx_dma(priv, chan);
2812 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2813 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2814 	stmmac_reset_tx_queue(priv, chan);
2815 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2816 			    tx_q->dma_tx_phy, chan);
2817 	stmmac_start_tx_dma(priv, chan);
2818 
2819 	priv->xstats.tx_errors++;
2820 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2821 }
2822 
2823 /**
2824  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2825  *  @priv: driver private structure
2826  *  @txmode: TX operating mode
2827  *  @rxmode: RX operating mode
2828  *  @chan: channel index
2829  *  Description: it is used for configuring of the DMA operation mode in
2830  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2831  *  mode.
2832  */
2833 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2834 					  u32 rxmode, u32 chan)
2835 {
2836 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2837 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2838 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2839 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2840 	int rxfifosz = priv->plat->rx_fifo_size;
2841 	int txfifosz = priv->plat->tx_fifo_size;
2842 
2843 	if (rxfifosz == 0)
2844 		rxfifosz = priv->dma_cap.rx_fifo_size;
2845 	if (txfifosz == 0)
2846 		txfifosz = priv->dma_cap.tx_fifo_size;
2847 
2848 	/* Adjust for real per queue fifo size */
2849 	rxfifosz /= rx_channels_count;
2850 	txfifosz /= tx_channels_count;
2851 
2852 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2853 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2854 }
2855 
2856 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2857 {
2858 	int ret;
2859 
2860 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2861 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2862 	if (ret && (ret != -EINVAL)) {
2863 		stmmac_global_err(priv);
2864 		return true;
2865 	}
2866 
2867 	return false;
2868 }
2869 
2870 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2871 {
2872 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2873 						 &priv->xstats, chan, dir);
2874 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2875 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2876 	struct stmmac_channel *ch = &priv->channel[chan];
2877 	struct napi_struct *rx_napi;
2878 	struct napi_struct *tx_napi;
2879 	unsigned long flags;
2880 
2881 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2882 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2883 
2884 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2885 		if (napi_schedule_prep(rx_napi)) {
2886 			spin_lock_irqsave(&ch->lock, flags);
2887 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2888 			spin_unlock_irqrestore(&ch->lock, flags);
2889 			__napi_schedule(rx_napi);
2890 		}
2891 	}
2892 
2893 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2894 		if (napi_schedule_prep(tx_napi)) {
2895 			spin_lock_irqsave(&ch->lock, flags);
2896 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2897 			spin_unlock_irqrestore(&ch->lock, flags);
2898 			__napi_schedule(tx_napi);
2899 		}
2900 	}
2901 
2902 	return status;
2903 }
2904 
2905 /**
2906  * stmmac_dma_interrupt - DMA ISR
2907  * @priv: driver private structure
2908  * Description: this is the DMA ISR. It is called by the main ISR.
2909  * It calls the dwmac dma routine and schedule poll method in case of some
2910  * work can be done.
2911  */
2912 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2913 {
2914 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2915 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2916 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2917 				tx_channel_count : rx_channel_count;
2918 	u32 chan;
2919 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2920 
2921 	/* Make sure we never check beyond our status buffer. */
2922 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2923 		channels_to_check = ARRAY_SIZE(status);
2924 
2925 	for (chan = 0; chan < channels_to_check; chan++)
2926 		status[chan] = stmmac_napi_check(priv, chan,
2927 						 DMA_DIR_RXTX);
2928 
2929 	for (chan = 0; chan < tx_channel_count; chan++) {
2930 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2931 			/* Try to bump up the dma threshold on this failure */
2932 			stmmac_bump_dma_threshold(priv, chan);
2933 		} else if (unlikely(status[chan] == tx_hard_error)) {
2934 			stmmac_tx_err(priv, chan);
2935 		}
2936 	}
2937 }
2938 
2939 /**
2940  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2941  * @priv: driver private structure
2942  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2943  */
2944 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2945 {
2946 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2947 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2948 
2949 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2950 
2951 	if (priv->dma_cap.rmon) {
2952 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2953 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2954 	} else
2955 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2956 }
2957 
2958 /**
2959  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2960  * @priv: driver private structure
2961  * Description:
2962  *  new GMAC chip generations have a new register to indicate the
2963  *  presence of the optional feature/functions.
2964  *  This can be also used to override the value passed through the
2965  *  platform and necessary for old MAC10/100 and GMAC chips.
2966  */
2967 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2968 {
2969 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2970 }
2971 
2972 /**
2973  * stmmac_check_ether_addr - check if the MAC addr is valid
2974  * @priv: driver private structure
2975  * Description:
2976  * it is to verify if the MAC address is valid, in case of failures it
2977  * generates a random MAC address
2978  */
2979 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2980 {
2981 	u8 addr[ETH_ALEN];
2982 
2983 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2984 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2985 		if (is_valid_ether_addr(addr))
2986 			eth_hw_addr_set(priv->dev, addr);
2987 		else
2988 			eth_hw_addr_random(priv->dev);
2989 		dev_info(priv->device, "device MAC address %pM\n",
2990 			 priv->dev->dev_addr);
2991 	}
2992 }
2993 
2994 /**
2995  * stmmac_init_dma_engine - DMA init.
2996  * @priv: driver private structure
2997  * Description:
2998  * It inits the DMA invoking the specific MAC/GMAC callback.
2999  * Some DMA parameters can be passed from the platform;
3000  * in case of these are not passed a default is kept for the MAC or GMAC.
3001  */
3002 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3003 {
3004 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3005 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3006 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3007 	struct stmmac_rx_queue *rx_q;
3008 	struct stmmac_tx_queue *tx_q;
3009 	u32 chan = 0;
3010 	int atds = 0;
3011 	int ret = 0;
3012 
3013 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3014 		dev_err(priv->device, "Invalid DMA configuration\n");
3015 		return -EINVAL;
3016 	}
3017 
3018 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3019 		atds = 1;
3020 
3021 	ret = stmmac_reset(priv, priv->ioaddr);
3022 	if (ret) {
3023 		dev_err(priv->device, "Failed to reset the dma\n");
3024 		return ret;
3025 	}
3026 
3027 	/* DMA Configuration */
3028 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3029 
3030 	if (priv->plat->axi)
3031 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3032 
3033 	/* DMA CSR Channel configuration */
3034 	for (chan = 0; chan < dma_csr_ch; chan++) {
3035 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3036 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3037 	}
3038 
3039 	/* DMA RX Channel Configuration */
3040 	for (chan = 0; chan < rx_channels_count; chan++) {
3041 		rx_q = &priv->dma_conf.rx_queue[chan];
3042 
3043 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3044 				    rx_q->dma_rx_phy, chan);
3045 
3046 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3047 				     (rx_q->buf_alloc_num *
3048 				      sizeof(struct dma_desc));
3049 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3050 				       rx_q->rx_tail_addr, chan);
3051 	}
3052 
3053 	/* DMA TX Channel Configuration */
3054 	for (chan = 0; chan < tx_channels_count; chan++) {
3055 		tx_q = &priv->dma_conf.tx_queue[chan];
3056 
3057 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3058 				    tx_q->dma_tx_phy, chan);
3059 
3060 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3061 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3062 				       tx_q->tx_tail_addr, chan);
3063 	}
3064 
3065 	return ret;
3066 }
3067 
3068 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3069 {
3070 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3071 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3072 	struct stmmac_channel *ch;
3073 	struct napi_struct *napi;
3074 
3075 	if (!tx_coal_timer)
3076 		return;
3077 
3078 	ch = &priv->channel[tx_q->queue_index];
3079 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3080 
3081 	/* Arm timer only if napi is not already scheduled.
3082 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3083 	 * again in the next scheduled napi.
3084 	 */
3085 	if (unlikely(!napi_is_scheduled(napi)))
3086 		hrtimer_start(&tx_q->txtimer,
3087 			      STMMAC_COAL_TIMER(tx_coal_timer),
3088 			      HRTIMER_MODE_REL);
3089 	else
3090 		hrtimer_try_to_cancel(&tx_q->txtimer);
3091 }
3092 
3093 /**
3094  * stmmac_tx_timer - mitigation sw timer for tx.
3095  * @t: data pointer
3096  * Description:
3097  * This is the timer handler to directly invoke the stmmac_tx_clean.
3098  */
3099 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3100 {
3101 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3102 	struct stmmac_priv *priv = tx_q->priv_data;
3103 	struct stmmac_channel *ch;
3104 	struct napi_struct *napi;
3105 
3106 	ch = &priv->channel[tx_q->queue_index];
3107 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3108 
3109 	if (likely(napi_schedule_prep(napi))) {
3110 		unsigned long flags;
3111 
3112 		spin_lock_irqsave(&ch->lock, flags);
3113 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3114 		spin_unlock_irqrestore(&ch->lock, flags);
3115 		__napi_schedule(napi);
3116 	}
3117 
3118 	return HRTIMER_NORESTART;
3119 }
3120 
3121 /**
3122  * stmmac_init_coalesce - init mitigation options.
3123  * @priv: driver private structure
3124  * Description:
3125  * This inits the coalesce parameters: i.e. timer rate,
3126  * timer handler and default threshold used for enabling the
3127  * interrupt on completion bit.
3128  */
3129 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3130 {
3131 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3132 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3133 	u32 chan;
3134 
3135 	for (chan = 0; chan < tx_channel_count; chan++) {
3136 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3137 
3138 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3139 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3140 
3141 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3142 		tx_q->txtimer.function = stmmac_tx_timer;
3143 	}
3144 
3145 	for (chan = 0; chan < rx_channel_count; chan++)
3146 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3147 }
3148 
3149 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3150 {
3151 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3152 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3153 	u32 chan;
3154 
3155 	/* set TX ring length */
3156 	for (chan = 0; chan < tx_channels_count; chan++)
3157 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3158 				       (priv->dma_conf.dma_tx_size - 1), chan);
3159 
3160 	/* set RX ring length */
3161 	for (chan = 0; chan < rx_channels_count; chan++)
3162 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3163 				       (priv->dma_conf.dma_rx_size - 1), chan);
3164 }
3165 
3166 /**
3167  *  stmmac_set_tx_queue_weight - Set TX queue weight
3168  *  @priv: driver private structure
3169  *  Description: It is used for setting TX queues weight
3170  */
3171 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3172 {
3173 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3174 	u32 weight;
3175 	u32 queue;
3176 
3177 	for (queue = 0; queue < tx_queues_count; queue++) {
3178 		weight = priv->plat->tx_queues_cfg[queue].weight;
3179 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3180 	}
3181 }
3182 
3183 /**
3184  *  stmmac_configure_cbs - Configure CBS in TX queue
3185  *  @priv: driver private structure
3186  *  Description: It is used for configuring CBS in AVB TX queues
3187  */
3188 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3189 {
3190 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3191 	u32 mode_to_use;
3192 	u32 queue;
3193 
3194 	/* queue 0 is reserved for legacy traffic */
3195 	for (queue = 1; queue < tx_queues_count; queue++) {
3196 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3197 		if (mode_to_use == MTL_QUEUE_DCB)
3198 			continue;
3199 
3200 		stmmac_config_cbs(priv, priv->hw,
3201 				priv->plat->tx_queues_cfg[queue].send_slope,
3202 				priv->plat->tx_queues_cfg[queue].idle_slope,
3203 				priv->plat->tx_queues_cfg[queue].high_credit,
3204 				priv->plat->tx_queues_cfg[queue].low_credit,
3205 				queue);
3206 	}
3207 }
3208 
3209 /**
3210  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3211  *  @priv: driver private structure
3212  *  Description: It is used for mapping RX queues to RX dma channels
3213  */
3214 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3215 {
3216 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3217 	u32 queue;
3218 	u32 chan;
3219 
3220 	for (queue = 0; queue < rx_queues_count; queue++) {
3221 		chan = priv->plat->rx_queues_cfg[queue].chan;
3222 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3223 	}
3224 }
3225 
3226 /**
3227  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3228  *  @priv: driver private structure
3229  *  Description: It is used for configuring the RX Queue Priority
3230  */
3231 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3232 {
3233 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3234 	u32 queue;
3235 	u32 prio;
3236 
3237 	for (queue = 0; queue < rx_queues_count; queue++) {
3238 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3239 			continue;
3240 
3241 		prio = priv->plat->rx_queues_cfg[queue].prio;
3242 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3243 	}
3244 }
3245 
3246 /**
3247  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3248  *  @priv: driver private structure
3249  *  Description: It is used for configuring the TX Queue Priority
3250  */
3251 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3252 {
3253 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3254 	u32 queue;
3255 	u32 prio;
3256 
3257 	for (queue = 0; queue < tx_queues_count; queue++) {
3258 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3259 			continue;
3260 
3261 		prio = priv->plat->tx_queues_cfg[queue].prio;
3262 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3263 	}
3264 }
3265 
3266 /**
3267  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3268  *  @priv: driver private structure
3269  *  Description: It is used for configuring the RX queue routing
3270  */
3271 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3272 {
3273 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3274 	u32 queue;
3275 	u8 packet;
3276 
3277 	for (queue = 0; queue < rx_queues_count; queue++) {
3278 		/* no specific packet type routing specified for the queue */
3279 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3280 			continue;
3281 
3282 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3283 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3284 	}
3285 }
3286 
3287 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3288 {
3289 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3290 		priv->rss.enable = false;
3291 		return;
3292 	}
3293 
3294 	if (priv->dev->features & NETIF_F_RXHASH)
3295 		priv->rss.enable = true;
3296 	else
3297 		priv->rss.enable = false;
3298 
3299 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3300 			     priv->plat->rx_queues_to_use);
3301 }
3302 
3303 /**
3304  *  stmmac_mtl_configuration - Configure MTL
3305  *  @priv: driver private structure
3306  *  Description: It is used for configurring MTL
3307  */
3308 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3309 {
3310 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3311 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3312 
3313 	if (tx_queues_count > 1)
3314 		stmmac_set_tx_queue_weight(priv);
3315 
3316 	/* Configure MTL RX algorithms */
3317 	if (rx_queues_count > 1)
3318 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3319 				priv->plat->rx_sched_algorithm);
3320 
3321 	/* Configure MTL TX algorithms */
3322 	if (tx_queues_count > 1)
3323 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3324 				priv->plat->tx_sched_algorithm);
3325 
3326 	/* Configure CBS in AVB TX queues */
3327 	if (tx_queues_count > 1)
3328 		stmmac_configure_cbs(priv);
3329 
3330 	/* Map RX MTL to DMA channels */
3331 	stmmac_rx_queue_dma_chan_map(priv);
3332 
3333 	/* Enable MAC RX Queues */
3334 	stmmac_mac_enable_rx_queues(priv);
3335 
3336 	/* Set RX priorities */
3337 	if (rx_queues_count > 1)
3338 		stmmac_mac_config_rx_queues_prio(priv);
3339 
3340 	/* Set TX priorities */
3341 	if (tx_queues_count > 1)
3342 		stmmac_mac_config_tx_queues_prio(priv);
3343 
3344 	/* Set RX routing */
3345 	if (rx_queues_count > 1)
3346 		stmmac_mac_config_rx_queues_routing(priv);
3347 
3348 	/* Receive Side Scaling */
3349 	if (rx_queues_count > 1)
3350 		stmmac_mac_config_rss(priv);
3351 }
3352 
3353 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3354 {
3355 	if (priv->dma_cap.asp) {
3356 		netdev_info(priv->dev, "Enabling Safety Features\n");
3357 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3358 					  priv->plat->safety_feat_cfg);
3359 	} else {
3360 		netdev_info(priv->dev, "No Safety Features support found\n");
3361 	}
3362 }
3363 
3364 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3365 {
3366 	char *name;
3367 
3368 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3369 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3370 
3371 	name = priv->wq_name;
3372 	sprintf(name, "%s-fpe", priv->dev->name);
3373 
3374 	priv->fpe_wq = create_singlethread_workqueue(name);
3375 	if (!priv->fpe_wq) {
3376 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3377 
3378 		return -ENOMEM;
3379 	}
3380 	netdev_info(priv->dev, "FPE workqueue start");
3381 
3382 	return 0;
3383 }
3384 
3385 /**
3386  * stmmac_hw_setup - setup mac in a usable state.
3387  *  @dev : pointer to the device structure.
3388  *  @ptp_register: register PTP if set
3389  *  Description:
3390  *  this is the main function to setup the HW in a usable state because the
3391  *  dma engine is reset, the core registers are configured (e.g. AXI,
3392  *  Checksum features, timers). The DMA is ready to start receiving and
3393  *  transmitting.
3394  *  Return value:
3395  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3396  *  file on failure.
3397  */
3398 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3399 {
3400 	struct stmmac_priv *priv = netdev_priv(dev);
3401 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3402 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3403 	bool sph_en;
3404 	u32 chan;
3405 	int ret;
3406 
3407 	/* DMA initialization and SW reset */
3408 	ret = stmmac_init_dma_engine(priv);
3409 	if (ret < 0) {
3410 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3411 			   __func__);
3412 		return ret;
3413 	}
3414 
3415 	/* Copy the MAC addr into the HW  */
3416 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3417 
3418 	/* PS and related bits will be programmed according to the speed */
3419 	if (priv->hw->pcs) {
3420 		int speed = priv->plat->mac_port_sel_speed;
3421 
3422 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3423 		    (speed == SPEED_1000)) {
3424 			priv->hw->ps = speed;
3425 		} else {
3426 			dev_warn(priv->device, "invalid port speed\n");
3427 			priv->hw->ps = 0;
3428 		}
3429 	}
3430 
3431 	/* Initialize the MAC Core */
3432 	stmmac_core_init(priv, priv->hw, dev);
3433 
3434 	/* Initialize MTL*/
3435 	stmmac_mtl_configuration(priv);
3436 
3437 	/* Initialize Safety Features */
3438 	stmmac_safety_feat_configuration(priv);
3439 
3440 	ret = stmmac_rx_ipc(priv, priv->hw);
3441 	if (!ret) {
3442 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3443 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3444 		priv->hw->rx_csum = 0;
3445 	}
3446 
3447 	/* Enable the MAC Rx/Tx */
3448 	stmmac_mac_set(priv, priv->ioaddr, true);
3449 
3450 	/* Set the HW DMA mode and the COE */
3451 	stmmac_dma_operation_mode(priv);
3452 
3453 	stmmac_mmc_setup(priv);
3454 
3455 	if (ptp_register) {
3456 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3457 		if (ret < 0)
3458 			netdev_warn(priv->dev,
3459 				    "failed to enable PTP reference clock: %pe\n",
3460 				    ERR_PTR(ret));
3461 	}
3462 
3463 	ret = stmmac_init_ptp(priv);
3464 	if (ret == -EOPNOTSUPP)
3465 		netdev_info(priv->dev, "PTP not supported by HW\n");
3466 	else if (ret)
3467 		netdev_warn(priv->dev, "PTP init failed\n");
3468 	else if (ptp_register)
3469 		stmmac_ptp_register(priv);
3470 
3471 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3472 
3473 	/* Convert the timer from msec to usec */
3474 	if (!priv->tx_lpi_timer)
3475 		priv->tx_lpi_timer = eee_timer * 1000;
3476 
3477 	if (priv->use_riwt) {
3478 		u32 queue;
3479 
3480 		for (queue = 0; queue < rx_cnt; queue++) {
3481 			if (!priv->rx_riwt[queue])
3482 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3483 
3484 			stmmac_rx_watchdog(priv, priv->ioaddr,
3485 					   priv->rx_riwt[queue], queue);
3486 		}
3487 	}
3488 
3489 	if (priv->hw->pcs)
3490 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3491 
3492 	/* set TX and RX rings length */
3493 	stmmac_set_rings_length(priv);
3494 
3495 	/* Enable TSO */
3496 	if (priv->tso) {
3497 		for (chan = 0; chan < tx_cnt; chan++) {
3498 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3499 
3500 			/* TSO and TBS cannot co-exist */
3501 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3502 				continue;
3503 
3504 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3505 		}
3506 	}
3507 
3508 	/* Enable Split Header */
3509 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3510 	for (chan = 0; chan < rx_cnt; chan++)
3511 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3512 
3513 
3514 	/* VLAN Tag Insertion */
3515 	if (priv->dma_cap.vlins)
3516 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3517 
3518 	/* TBS */
3519 	for (chan = 0; chan < tx_cnt; chan++) {
3520 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3521 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3522 
3523 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3524 	}
3525 
3526 	/* Configure real RX and TX queues */
3527 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3528 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3529 
3530 	/* Start the ball rolling... */
3531 	stmmac_start_all_dma(priv);
3532 
3533 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3534 
3535 	if (priv->dma_cap.fpesel) {
3536 		stmmac_fpe_start_wq(priv);
3537 
3538 		if (priv->plat->fpe_cfg->enable)
3539 			stmmac_fpe_handshake(priv, true);
3540 	}
3541 
3542 	return 0;
3543 }
3544 
3545 static void stmmac_hw_teardown(struct net_device *dev)
3546 {
3547 	struct stmmac_priv *priv = netdev_priv(dev);
3548 
3549 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3550 }
3551 
3552 static void stmmac_free_irq(struct net_device *dev,
3553 			    enum request_irq_err irq_err, int irq_idx)
3554 {
3555 	struct stmmac_priv *priv = netdev_priv(dev);
3556 	int j;
3557 
3558 	switch (irq_err) {
3559 	case REQ_IRQ_ERR_ALL:
3560 		irq_idx = priv->plat->tx_queues_to_use;
3561 		fallthrough;
3562 	case REQ_IRQ_ERR_TX:
3563 		for (j = irq_idx - 1; j >= 0; j--) {
3564 			if (priv->tx_irq[j] > 0) {
3565 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3566 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3567 			}
3568 		}
3569 		irq_idx = priv->plat->rx_queues_to_use;
3570 		fallthrough;
3571 	case REQ_IRQ_ERR_RX:
3572 		for (j = irq_idx - 1; j >= 0; j--) {
3573 			if (priv->rx_irq[j] > 0) {
3574 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3575 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3576 			}
3577 		}
3578 
3579 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3580 			free_irq(priv->sfty_ue_irq, dev);
3581 		fallthrough;
3582 	case REQ_IRQ_ERR_SFTY_UE:
3583 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3584 			free_irq(priv->sfty_ce_irq, dev);
3585 		fallthrough;
3586 	case REQ_IRQ_ERR_SFTY_CE:
3587 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3588 			free_irq(priv->lpi_irq, dev);
3589 		fallthrough;
3590 	case REQ_IRQ_ERR_LPI:
3591 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3592 			free_irq(priv->wol_irq, dev);
3593 		fallthrough;
3594 	case REQ_IRQ_ERR_WOL:
3595 		free_irq(dev->irq, dev);
3596 		fallthrough;
3597 	case REQ_IRQ_ERR_MAC:
3598 	case REQ_IRQ_ERR_NO:
3599 		/* If MAC IRQ request error, no more IRQ to free */
3600 		break;
3601 	}
3602 }
3603 
3604 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3605 {
3606 	struct stmmac_priv *priv = netdev_priv(dev);
3607 	enum request_irq_err irq_err;
3608 	cpumask_t cpu_mask;
3609 	int irq_idx = 0;
3610 	char *int_name;
3611 	int ret;
3612 	int i;
3613 
3614 	/* For common interrupt */
3615 	int_name = priv->int_name_mac;
3616 	sprintf(int_name, "%s:%s", dev->name, "mac");
3617 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3618 			  0, int_name, dev);
3619 	if (unlikely(ret < 0)) {
3620 		netdev_err(priv->dev,
3621 			   "%s: alloc mac MSI %d (error: %d)\n",
3622 			   __func__, dev->irq, ret);
3623 		irq_err = REQ_IRQ_ERR_MAC;
3624 		goto irq_error;
3625 	}
3626 
3627 	/* Request the Wake IRQ in case of another line
3628 	 * is used for WoL
3629 	 */
3630 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3631 		int_name = priv->int_name_wol;
3632 		sprintf(int_name, "%s:%s", dev->name, "wol");
3633 		ret = request_irq(priv->wol_irq,
3634 				  stmmac_mac_interrupt,
3635 				  0, int_name, dev);
3636 		if (unlikely(ret < 0)) {
3637 			netdev_err(priv->dev,
3638 				   "%s: alloc wol MSI %d (error: %d)\n",
3639 				   __func__, priv->wol_irq, ret);
3640 			irq_err = REQ_IRQ_ERR_WOL;
3641 			goto irq_error;
3642 		}
3643 	}
3644 
3645 	/* Request the LPI IRQ in case of another line
3646 	 * is used for LPI
3647 	 */
3648 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3649 		int_name = priv->int_name_lpi;
3650 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3651 		ret = request_irq(priv->lpi_irq,
3652 				  stmmac_mac_interrupt,
3653 				  0, int_name, dev);
3654 		if (unlikely(ret < 0)) {
3655 			netdev_err(priv->dev,
3656 				   "%s: alloc lpi MSI %d (error: %d)\n",
3657 				   __func__, priv->lpi_irq, ret);
3658 			irq_err = REQ_IRQ_ERR_LPI;
3659 			goto irq_error;
3660 		}
3661 	}
3662 
3663 	/* Request the Safety Feature Correctible Error line in
3664 	 * case of another line is used
3665 	 */
3666 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3667 		int_name = priv->int_name_sfty_ce;
3668 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3669 		ret = request_irq(priv->sfty_ce_irq,
3670 				  stmmac_safety_interrupt,
3671 				  0, int_name, dev);
3672 		if (unlikely(ret < 0)) {
3673 			netdev_err(priv->dev,
3674 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3675 				   __func__, priv->sfty_ce_irq, ret);
3676 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3677 			goto irq_error;
3678 		}
3679 	}
3680 
3681 	/* Request the Safety Feature Uncorrectible Error line in
3682 	 * case of another line is used
3683 	 */
3684 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3685 		int_name = priv->int_name_sfty_ue;
3686 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3687 		ret = request_irq(priv->sfty_ue_irq,
3688 				  stmmac_safety_interrupt,
3689 				  0, int_name, dev);
3690 		if (unlikely(ret < 0)) {
3691 			netdev_err(priv->dev,
3692 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3693 				   __func__, priv->sfty_ue_irq, ret);
3694 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3695 			goto irq_error;
3696 		}
3697 	}
3698 
3699 	/* Request Rx MSI irq */
3700 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3701 		if (i >= MTL_MAX_RX_QUEUES)
3702 			break;
3703 		if (priv->rx_irq[i] == 0)
3704 			continue;
3705 
3706 		int_name = priv->int_name_rx_irq[i];
3707 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3708 		ret = request_irq(priv->rx_irq[i],
3709 				  stmmac_msi_intr_rx,
3710 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3711 		if (unlikely(ret < 0)) {
3712 			netdev_err(priv->dev,
3713 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3714 				   __func__, i, priv->rx_irq[i], ret);
3715 			irq_err = REQ_IRQ_ERR_RX;
3716 			irq_idx = i;
3717 			goto irq_error;
3718 		}
3719 		cpumask_clear(&cpu_mask);
3720 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3721 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3722 	}
3723 
3724 	/* Request Tx MSI irq */
3725 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3726 		if (i >= MTL_MAX_TX_QUEUES)
3727 			break;
3728 		if (priv->tx_irq[i] == 0)
3729 			continue;
3730 
3731 		int_name = priv->int_name_tx_irq[i];
3732 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3733 		ret = request_irq(priv->tx_irq[i],
3734 				  stmmac_msi_intr_tx,
3735 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3736 		if (unlikely(ret < 0)) {
3737 			netdev_err(priv->dev,
3738 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3739 				   __func__, i, priv->tx_irq[i], ret);
3740 			irq_err = REQ_IRQ_ERR_TX;
3741 			irq_idx = i;
3742 			goto irq_error;
3743 		}
3744 		cpumask_clear(&cpu_mask);
3745 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3746 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3747 	}
3748 
3749 	return 0;
3750 
3751 irq_error:
3752 	stmmac_free_irq(dev, irq_err, irq_idx);
3753 	return ret;
3754 }
3755 
3756 static int stmmac_request_irq_single(struct net_device *dev)
3757 {
3758 	struct stmmac_priv *priv = netdev_priv(dev);
3759 	enum request_irq_err irq_err;
3760 	int ret;
3761 
3762 	ret = request_irq(dev->irq, stmmac_interrupt,
3763 			  IRQF_SHARED, dev->name, dev);
3764 	if (unlikely(ret < 0)) {
3765 		netdev_err(priv->dev,
3766 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3767 			   __func__, dev->irq, ret);
3768 		irq_err = REQ_IRQ_ERR_MAC;
3769 		goto irq_error;
3770 	}
3771 
3772 	/* Request the Wake IRQ in case of another line
3773 	 * is used for WoL
3774 	 */
3775 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3776 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3777 				  IRQF_SHARED, dev->name, dev);
3778 		if (unlikely(ret < 0)) {
3779 			netdev_err(priv->dev,
3780 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3781 				   __func__, priv->wol_irq, ret);
3782 			irq_err = REQ_IRQ_ERR_WOL;
3783 			goto irq_error;
3784 		}
3785 	}
3786 
3787 	/* Request the IRQ lines */
3788 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3789 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3790 				  IRQF_SHARED, dev->name, dev);
3791 		if (unlikely(ret < 0)) {
3792 			netdev_err(priv->dev,
3793 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3794 				   __func__, priv->lpi_irq, ret);
3795 			irq_err = REQ_IRQ_ERR_LPI;
3796 			goto irq_error;
3797 		}
3798 	}
3799 
3800 	return 0;
3801 
3802 irq_error:
3803 	stmmac_free_irq(dev, irq_err, 0);
3804 	return ret;
3805 }
3806 
3807 static int stmmac_request_irq(struct net_device *dev)
3808 {
3809 	struct stmmac_priv *priv = netdev_priv(dev);
3810 	int ret;
3811 
3812 	/* Request the IRQ lines */
3813 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3814 		ret = stmmac_request_irq_multi_msi(dev);
3815 	else
3816 		ret = stmmac_request_irq_single(dev);
3817 
3818 	return ret;
3819 }
3820 
3821 /**
3822  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3823  *  @priv: driver private structure
3824  *  @mtu: MTU to setup the dma queue and buf with
3825  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3826  *  Allocate the Tx/Rx DMA queue and init them.
3827  *  Return value:
3828  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3829  */
3830 static struct stmmac_dma_conf *
3831 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3832 {
3833 	struct stmmac_dma_conf *dma_conf;
3834 	int chan, bfsize, ret;
3835 
3836 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3837 	if (!dma_conf) {
3838 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3839 			   __func__);
3840 		return ERR_PTR(-ENOMEM);
3841 	}
3842 
3843 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3844 	if (bfsize < 0)
3845 		bfsize = 0;
3846 
3847 	if (bfsize < BUF_SIZE_16KiB)
3848 		bfsize = stmmac_set_bfsize(mtu, 0);
3849 
3850 	dma_conf->dma_buf_sz = bfsize;
3851 	/* Chose the tx/rx size from the already defined one in the
3852 	 * priv struct. (if defined)
3853 	 */
3854 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3855 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3856 
3857 	if (!dma_conf->dma_tx_size)
3858 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3859 	if (!dma_conf->dma_rx_size)
3860 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3861 
3862 	/* Earlier check for TBS */
3863 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3864 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3865 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3866 
3867 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3868 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3869 	}
3870 
3871 	ret = alloc_dma_desc_resources(priv, dma_conf);
3872 	if (ret < 0) {
3873 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3874 			   __func__);
3875 		goto alloc_error;
3876 	}
3877 
3878 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3879 	if (ret < 0) {
3880 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3881 			   __func__);
3882 		goto init_error;
3883 	}
3884 
3885 	return dma_conf;
3886 
3887 init_error:
3888 	free_dma_desc_resources(priv, dma_conf);
3889 alloc_error:
3890 	kfree(dma_conf);
3891 	return ERR_PTR(ret);
3892 }
3893 
3894 /**
3895  *  __stmmac_open - open entry point of the driver
3896  *  @dev : pointer to the device structure.
3897  *  @dma_conf :  structure to take the dma data
3898  *  Description:
3899  *  This function is the open entry point of the driver.
3900  *  Return value:
3901  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3902  *  file on failure.
3903  */
3904 static int __stmmac_open(struct net_device *dev,
3905 			 struct stmmac_dma_conf *dma_conf)
3906 {
3907 	struct stmmac_priv *priv = netdev_priv(dev);
3908 	int mode = priv->plat->phy_interface;
3909 	u32 chan;
3910 	int ret;
3911 
3912 	ret = pm_runtime_resume_and_get(priv->device);
3913 	if (ret < 0)
3914 		return ret;
3915 
3916 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3917 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3918 	    (!priv->hw->xpcs ||
3919 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3920 	    !priv->hw->lynx_pcs) {
3921 		ret = stmmac_init_phy(dev);
3922 		if (ret) {
3923 			netdev_err(priv->dev,
3924 				   "%s: Cannot attach to PHY (error: %d)\n",
3925 				   __func__, ret);
3926 			goto init_phy_error;
3927 		}
3928 	}
3929 
3930 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3931 
3932 	buf_sz = dma_conf->dma_buf_sz;
3933 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3934 
3935 	stmmac_reset_queues_param(priv);
3936 
3937 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3938 	    priv->plat->serdes_powerup) {
3939 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3940 		if (ret < 0) {
3941 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3942 				   __func__);
3943 			goto init_error;
3944 		}
3945 	}
3946 
3947 	ret = stmmac_hw_setup(dev, true);
3948 	if (ret < 0) {
3949 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3950 		goto init_error;
3951 	}
3952 
3953 	stmmac_init_coalesce(priv);
3954 
3955 	phylink_start(priv->phylink);
3956 	/* We may have called phylink_speed_down before */
3957 	phylink_speed_up(priv->phylink);
3958 
3959 	ret = stmmac_request_irq(dev);
3960 	if (ret)
3961 		goto irq_error;
3962 
3963 	stmmac_enable_all_queues(priv);
3964 	netif_tx_start_all_queues(priv->dev);
3965 	stmmac_enable_all_dma_irq(priv);
3966 
3967 	return 0;
3968 
3969 irq_error:
3970 	phylink_stop(priv->phylink);
3971 
3972 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3973 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3974 
3975 	stmmac_hw_teardown(dev);
3976 init_error:
3977 	phylink_disconnect_phy(priv->phylink);
3978 init_phy_error:
3979 	pm_runtime_put(priv->device);
3980 	return ret;
3981 }
3982 
3983 static int stmmac_open(struct net_device *dev)
3984 {
3985 	struct stmmac_priv *priv = netdev_priv(dev);
3986 	struct stmmac_dma_conf *dma_conf;
3987 	int ret;
3988 
3989 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3990 	if (IS_ERR(dma_conf))
3991 		return PTR_ERR(dma_conf);
3992 
3993 	ret = __stmmac_open(dev, dma_conf);
3994 	if (ret)
3995 		free_dma_desc_resources(priv, dma_conf);
3996 
3997 	kfree(dma_conf);
3998 	return ret;
3999 }
4000 
4001 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4002 {
4003 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4004 
4005 	if (priv->fpe_wq)
4006 		destroy_workqueue(priv->fpe_wq);
4007 
4008 	netdev_info(priv->dev, "FPE workqueue stop");
4009 }
4010 
4011 /**
4012  *  stmmac_release - close entry point of the driver
4013  *  @dev : device pointer.
4014  *  Description:
4015  *  This is the stop entry point of the driver.
4016  */
4017 static int stmmac_release(struct net_device *dev)
4018 {
4019 	struct stmmac_priv *priv = netdev_priv(dev);
4020 	u32 chan;
4021 
4022 	if (device_may_wakeup(priv->device))
4023 		phylink_speed_down(priv->phylink, false);
4024 	/* Stop and disconnect the PHY */
4025 	phylink_stop(priv->phylink);
4026 	phylink_disconnect_phy(priv->phylink);
4027 
4028 	stmmac_disable_all_queues(priv);
4029 
4030 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4031 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4032 
4033 	netif_tx_disable(dev);
4034 
4035 	/* Free the IRQ lines */
4036 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4037 
4038 	if (priv->eee_enabled) {
4039 		priv->tx_path_in_lpi_mode = false;
4040 		del_timer_sync(&priv->eee_ctrl_timer);
4041 	}
4042 
4043 	/* Stop TX/RX DMA and clear the descriptors */
4044 	stmmac_stop_all_dma(priv);
4045 
4046 	/* Release and free the Rx/Tx resources */
4047 	free_dma_desc_resources(priv, &priv->dma_conf);
4048 
4049 	/* Disable the MAC Rx/Tx */
4050 	stmmac_mac_set(priv, priv->ioaddr, false);
4051 
4052 	/* Powerdown Serdes if there is */
4053 	if (priv->plat->serdes_powerdown)
4054 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4055 
4056 	netif_carrier_off(dev);
4057 
4058 	stmmac_release_ptp(priv);
4059 
4060 	pm_runtime_put(priv->device);
4061 
4062 	if (priv->dma_cap.fpesel)
4063 		stmmac_fpe_stop_wq(priv);
4064 
4065 	return 0;
4066 }
4067 
4068 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4069 			       struct stmmac_tx_queue *tx_q)
4070 {
4071 	u16 tag = 0x0, inner_tag = 0x0;
4072 	u32 inner_type = 0x0;
4073 	struct dma_desc *p;
4074 
4075 	if (!priv->dma_cap.vlins)
4076 		return false;
4077 	if (!skb_vlan_tag_present(skb))
4078 		return false;
4079 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4080 		inner_tag = skb_vlan_tag_get(skb);
4081 		inner_type = STMMAC_VLAN_INSERT;
4082 	}
4083 
4084 	tag = skb_vlan_tag_get(skb);
4085 
4086 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4087 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4088 	else
4089 		p = &tx_q->dma_tx[tx_q->cur_tx];
4090 
4091 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4092 		return false;
4093 
4094 	stmmac_set_tx_owner(priv, p);
4095 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4096 	return true;
4097 }
4098 
4099 /**
4100  *  stmmac_tso_allocator - close entry point of the driver
4101  *  @priv: driver private structure
4102  *  @des: buffer start address
4103  *  @total_len: total length to fill in descriptors
4104  *  @last_segment: condition for the last descriptor
4105  *  @queue: TX queue index
4106  *  Description:
4107  *  This function fills descriptor and request new descriptors according to
4108  *  buffer length to fill
4109  */
4110 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4111 				 int total_len, bool last_segment, u32 queue)
4112 {
4113 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4114 	struct dma_desc *desc;
4115 	u32 buff_size;
4116 	int tmp_len;
4117 
4118 	tmp_len = total_len;
4119 
4120 	while (tmp_len > 0) {
4121 		dma_addr_t curr_addr;
4122 
4123 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4124 						priv->dma_conf.dma_tx_size);
4125 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4126 
4127 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4128 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4129 		else
4130 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4131 
4132 		curr_addr = des + (total_len - tmp_len);
4133 		if (priv->dma_cap.addr64 <= 32)
4134 			desc->des0 = cpu_to_le32(curr_addr);
4135 		else
4136 			stmmac_set_desc_addr(priv, desc, curr_addr);
4137 
4138 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4139 			    TSO_MAX_BUFF_SIZE : tmp_len;
4140 
4141 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4142 				0, 1,
4143 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4144 				0, 0);
4145 
4146 		tmp_len -= TSO_MAX_BUFF_SIZE;
4147 	}
4148 }
4149 
4150 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4151 {
4152 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4153 	int desc_size;
4154 
4155 	if (likely(priv->extend_desc))
4156 		desc_size = sizeof(struct dma_extended_desc);
4157 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4158 		desc_size = sizeof(struct dma_edesc);
4159 	else
4160 		desc_size = sizeof(struct dma_desc);
4161 
4162 	/* The own bit must be the latest setting done when prepare the
4163 	 * descriptor and then barrier is needed to make sure that
4164 	 * all is coherent before granting the DMA engine.
4165 	 */
4166 	wmb();
4167 
4168 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4169 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4170 }
4171 
4172 /**
4173  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4174  *  @skb : the socket buffer
4175  *  @dev : device pointer
4176  *  Description: this is the transmit function that is called on TSO frames
4177  *  (support available on GMAC4 and newer chips).
4178  *  Diagram below show the ring programming in case of TSO frames:
4179  *
4180  *  First Descriptor
4181  *   --------
4182  *   | DES0 |---> buffer1 = L2/L3/L4 header
4183  *   | DES1 |---> TCP Payload (can continue on next descr...)
4184  *   | DES2 |---> buffer 1 and 2 len
4185  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4186  *   --------
4187  *	|
4188  *     ...
4189  *	|
4190  *   --------
4191  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4192  *   | DES1 | --|
4193  *   | DES2 | --> buffer 1 and 2 len
4194  *   | DES3 |
4195  *   --------
4196  *
4197  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4198  */
4199 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4200 {
4201 	struct dma_desc *desc, *first, *mss_desc = NULL;
4202 	struct stmmac_priv *priv = netdev_priv(dev);
4203 	int nfrags = skb_shinfo(skb)->nr_frags;
4204 	u32 queue = skb_get_queue_mapping(skb);
4205 	unsigned int first_entry, tx_packets;
4206 	struct stmmac_txq_stats *txq_stats;
4207 	int tmp_pay_len = 0, first_tx;
4208 	struct stmmac_tx_queue *tx_q;
4209 	bool has_vlan, set_ic;
4210 	u8 proto_hdr_len, hdr;
4211 	unsigned long flags;
4212 	u32 pay_len, mss;
4213 	dma_addr_t des;
4214 	int i;
4215 
4216 	tx_q = &priv->dma_conf.tx_queue[queue];
4217 	txq_stats = &priv->xstats.txq_stats[queue];
4218 	first_tx = tx_q->cur_tx;
4219 
4220 	/* Compute header lengths */
4221 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4222 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4223 		hdr = sizeof(struct udphdr);
4224 	} else {
4225 		proto_hdr_len = skb_tcp_all_headers(skb);
4226 		hdr = tcp_hdrlen(skb);
4227 	}
4228 
4229 	/* Desc availability based on threshold should be enough safe */
4230 	if (unlikely(stmmac_tx_avail(priv, queue) <
4231 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4232 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4233 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4234 								queue));
4235 			/* This is a hard error, log it. */
4236 			netdev_err(priv->dev,
4237 				   "%s: Tx Ring full when queue awake\n",
4238 				   __func__);
4239 		}
4240 		return NETDEV_TX_BUSY;
4241 	}
4242 
4243 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4244 
4245 	mss = skb_shinfo(skb)->gso_size;
4246 
4247 	/* set new MSS value if needed */
4248 	if (mss != tx_q->mss) {
4249 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4250 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4251 		else
4252 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4253 
4254 		stmmac_set_mss(priv, mss_desc, mss);
4255 		tx_q->mss = mss;
4256 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4257 						priv->dma_conf.dma_tx_size);
4258 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4259 	}
4260 
4261 	if (netif_msg_tx_queued(priv)) {
4262 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4263 			__func__, hdr, proto_hdr_len, pay_len, mss);
4264 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4265 			skb->data_len);
4266 	}
4267 
4268 	/* Check if VLAN can be inserted by HW */
4269 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4270 
4271 	first_entry = tx_q->cur_tx;
4272 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4273 
4274 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4275 		desc = &tx_q->dma_entx[first_entry].basic;
4276 	else
4277 		desc = &tx_q->dma_tx[first_entry];
4278 	first = desc;
4279 
4280 	if (has_vlan)
4281 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4282 
4283 	/* first descriptor: fill Headers on Buf1 */
4284 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4285 			     DMA_TO_DEVICE);
4286 	if (dma_mapping_error(priv->device, des))
4287 		goto dma_map_err;
4288 
4289 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4290 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4291 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4292 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4293 
4294 	if (priv->dma_cap.addr64 <= 32) {
4295 		first->des0 = cpu_to_le32(des);
4296 
4297 		/* Fill start of payload in buff2 of first descriptor */
4298 		if (pay_len)
4299 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4300 
4301 		/* If needed take extra descriptors to fill the remaining payload */
4302 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4303 	} else {
4304 		stmmac_set_desc_addr(priv, first, des);
4305 		tmp_pay_len = pay_len;
4306 		des += proto_hdr_len;
4307 		pay_len = 0;
4308 	}
4309 
4310 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4311 
4312 	/* Prepare fragments */
4313 	for (i = 0; i < nfrags; i++) {
4314 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4315 
4316 		des = skb_frag_dma_map(priv->device, frag, 0,
4317 				       skb_frag_size(frag),
4318 				       DMA_TO_DEVICE);
4319 		if (dma_mapping_error(priv->device, des))
4320 			goto dma_map_err;
4321 
4322 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4323 				     (i == nfrags - 1), queue);
4324 
4325 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4326 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4327 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4328 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4329 	}
4330 
4331 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4332 
4333 	/* Only the last descriptor gets to point to the skb. */
4334 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4335 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4336 
4337 	/* Manage tx mitigation */
4338 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4339 	tx_q->tx_count_frames += tx_packets;
4340 
4341 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4342 		set_ic = true;
4343 	else if (!priv->tx_coal_frames[queue])
4344 		set_ic = false;
4345 	else if (tx_packets > priv->tx_coal_frames[queue])
4346 		set_ic = true;
4347 	else if ((tx_q->tx_count_frames %
4348 		  priv->tx_coal_frames[queue]) < tx_packets)
4349 		set_ic = true;
4350 	else
4351 		set_ic = false;
4352 
4353 	if (set_ic) {
4354 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4355 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4356 		else
4357 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4358 
4359 		tx_q->tx_count_frames = 0;
4360 		stmmac_set_tx_ic(priv, desc);
4361 	}
4362 
4363 	/* We've used all descriptors we need for this skb, however,
4364 	 * advance cur_tx so that it references a fresh descriptor.
4365 	 * ndo_start_xmit will fill this descriptor the next time it's
4366 	 * called and stmmac_tx_clean may clean up to this descriptor.
4367 	 */
4368 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4369 
4370 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4371 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4372 			  __func__);
4373 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4374 	}
4375 
4376 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4377 	txq_stats->tx_bytes += skb->len;
4378 	txq_stats->tx_tso_frames++;
4379 	txq_stats->tx_tso_nfrags += nfrags;
4380 	if (set_ic)
4381 		txq_stats->tx_set_ic_bit++;
4382 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4383 
4384 	if (priv->sarc_type)
4385 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4386 
4387 	skb_tx_timestamp(skb);
4388 
4389 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4390 		     priv->hwts_tx_en)) {
4391 		/* declare that device is doing timestamping */
4392 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4393 		stmmac_enable_tx_timestamp(priv, first);
4394 	}
4395 
4396 	/* Complete the first descriptor before granting the DMA */
4397 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4398 			proto_hdr_len,
4399 			pay_len,
4400 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4401 			hdr / 4, (skb->len - proto_hdr_len));
4402 
4403 	/* If context desc is used to change MSS */
4404 	if (mss_desc) {
4405 		/* Make sure that first descriptor has been completely
4406 		 * written, including its own bit. This is because MSS is
4407 		 * actually before first descriptor, so we need to make
4408 		 * sure that MSS's own bit is the last thing written.
4409 		 */
4410 		dma_wmb();
4411 		stmmac_set_tx_owner(priv, mss_desc);
4412 	}
4413 
4414 	if (netif_msg_pktdata(priv)) {
4415 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4416 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4417 			tx_q->cur_tx, first, nfrags);
4418 		pr_info(">>> frame to be transmitted: ");
4419 		print_pkt(skb->data, skb_headlen(skb));
4420 	}
4421 
4422 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4423 
4424 	stmmac_flush_tx_descriptors(priv, queue);
4425 	stmmac_tx_timer_arm(priv, queue);
4426 
4427 	return NETDEV_TX_OK;
4428 
4429 dma_map_err:
4430 	dev_err(priv->device, "Tx dma map failed\n");
4431 	dev_kfree_skb(skb);
4432 	priv->xstats.tx_dropped++;
4433 	return NETDEV_TX_OK;
4434 }
4435 
4436 /**
4437  *  stmmac_xmit - Tx entry point of the driver
4438  *  @skb : the socket buffer
4439  *  @dev : device pointer
4440  *  Description : this is the tx entry point of the driver.
4441  *  It programs the chain or the ring and supports oversized frames
4442  *  and SG feature.
4443  */
4444 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4445 {
4446 	unsigned int first_entry, tx_packets, enh_desc;
4447 	struct stmmac_priv *priv = netdev_priv(dev);
4448 	unsigned int nopaged_len = skb_headlen(skb);
4449 	int i, csum_insertion = 0, is_jumbo = 0;
4450 	u32 queue = skb_get_queue_mapping(skb);
4451 	int nfrags = skb_shinfo(skb)->nr_frags;
4452 	int gso = skb_shinfo(skb)->gso_type;
4453 	struct stmmac_txq_stats *txq_stats;
4454 	struct dma_edesc *tbs_desc = NULL;
4455 	struct dma_desc *desc, *first;
4456 	struct stmmac_tx_queue *tx_q;
4457 	bool has_vlan, set_ic;
4458 	int entry, first_tx;
4459 	unsigned long flags;
4460 	dma_addr_t des;
4461 
4462 	tx_q = &priv->dma_conf.tx_queue[queue];
4463 	txq_stats = &priv->xstats.txq_stats[queue];
4464 	first_tx = tx_q->cur_tx;
4465 
4466 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4467 		stmmac_disable_eee_mode(priv);
4468 
4469 	/* Manage oversized TCP frames for GMAC4 device */
4470 	if (skb_is_gso(skb) && priv->tso) {
4471 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4472 			return stmmac_tso_xmit(skb, dev);
4473 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4474 			return stmmac_tso_xmit(skb, dev);
4475 	}
4476 
4477 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4478 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4479 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4480 								queue));
4481 			/* This is a hard error, log it. */
4482 			netdev_err(priv->dev,
4483 				   "%s: Tx Ring full when queue awake\n",
4484 				   __func__);
4485 		}
4486 		return NETDEV_TX_BUSY;
4487 	}
4488 
4489 	/* Check if VLAN can be inserted by HW */
4490 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4491 
4492 	entry = tx_q->cur_tx;
4493 	first_entry = entry;
4494 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4495 
4496 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4497 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4498 	 * queues. In that case, checksum offloading for those queues that don't
4499 	 * support tx coe needs to fallback to software checksum calculation.
4500 	 */
4501 	if (csum_insertion &&
4502 	    priv->plat->tx_queues_cfg[queue].coe_unsupported) {
4503 		if (unlikely(skb_checksum_help(skb)))
4504 			goto dma_map_err;
4505 		csum_insertion = !csum_insertion;
4506 	}
4507 
4508 	if (likely(priv->extend_desc))
4509 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4510 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4511 		desc = &tx_q->dma_entx[entry].basic;
4512 	else
4513 		desc = tx_q->dma_tx + entry;
4514 
4515 	first = desc;
4516 
4517 	if (has_vlan)
4518 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4519 
4520 	enh_desc = priv->plat->enh_desc;
4521 	/* To program the descriptors according to the size of the frame */
4522 	if (enh_desc)
4523 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4524 
4525 	if (unlikely(is_jumbo)) {
4526 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4527 		if (unlikely(entry < 0) && (entry != -EINVAL))
4528 			goto dma_map_err;
4529 	}
4530 
4531 	for (i = 0; i < nfrags; i++) {
4532 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4533 		int len = skb_frag_size(frag);
4534 		bool last_segment = (i == (nfrags - 1));
4535 
4536 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4537 		WARN_ON(tx_q->tx_skbuff[entry]);
4538 
4539 		if (likely(priv->extend_desc))
4540 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4541 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4542 			desc = &tx_q->dma_entx[entry].basic;
4543 		else
4544 			desc = tx_q->dma_tx + entry;
4545 
4546 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4547 				       DMA_TO_DEVICE);
4548 		if (dma_mapping_error(priv->device, des))
4549 			goto dma_map_err; /* should reuse desc w/o issues */
4550 
4551 		tx_q->tx_skbuff_dma[entry].buf = des;
4552 
4553 		stmmac_set_desc_addr(priv, desc, des);
4554 
4555 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4556 		tx_q->tx_skbuff_dma[entry].len = len;
4557 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4558 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4559 
4560 		/* Prepare the descriptor and set the own bit too */
4561 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4562 				priv->mode, 1, last_segment, skb->len);
4563 	}
4564 
4565 	/* Only the last descriptor gets to point to the skb. */
4566 	tx_q->tx_skbuff[entry] = skb;
4567 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4568 
4569 	/* According to the coalesce parameter the IC bit for the latest
4570 	 * segment is reset and the timer re-started to clean the tx status.
4571 	 * This approach takes care about the fragments: desc is the first
4572 	 * element in case of no SG.
4573 	 */
4574 	tx_packets = (entry + 1) - first_tx;
4575 	tx_q->tx_count_frames += tx_packets;
4576 
4577 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4578 		set_ic = true;
4579 	else if (!priv->tx_coal_frames[queue])
4580 		set_ic = false;
4581 	else if (tx_packets > priv->tx_coal_frames[queue])
4582 		set_ic = true;
4583 	else if ((tx_q->tx_count_frames %
4584 		  priv->tx_coal_frames[queue]) < tx_packets)
4585 		set_ic = true;
4586 	else
4587 		set_ic = false;
4588 
4589 	if (set_ic) {
4590 		if (likely(priv->extend_desc))
4591 			desc = &tx_q->dma_etx[entry].basic;
4592 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4593 			desc = &tx_q->dma_entx[entry].basic;
4594 		else
4595 			desc = &tx_q->dma_tx[entry];
4596 
4597 		tx_q->tx_count_frames = 0;
4598 		stmmac_set_tx_ic(priv, desc);
4599 	}
4600 
4601 	/* We've used all descriptors we need for this skb, however,
4602 	 * advance cur_tx so that it references a fresh descriptor.
4603 	 * ndo_start_xmit will fill this descriptor the next time it's
4604 	 * called and stmmac_tx_clean may clean up to this descriptor.
4605 	 */
4606 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4607 	tx_q->cur_tx = entry;
4608 
4609 	if (netif_msg_pktdata(priv)) {
4610 		netdev_dbg(priv->dev,
4611 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4612 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4613 			   entry, first, nfrags);
4614 
4615 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4616 		print_pkt(skb->data, skb->len);
4617 	}
4618 
4619 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4620 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4621 			  __func__);
4622 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4623 	}
4624 
4625 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4626 	txq_stats->tx_bytes += skb->len;
4627 	if (set_ic)
4628 		txq_stats->tx_set_ic_bit++;
4629 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4630 
4631 	if (priv->sarc_type)
4632 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4633 
4634 	skb_tx_timestamp(skb);
4635 
4636 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4637 	 * problems because all the descriptors are actually ready to be
4638 	 * passed to the DMA engine.
4639 	 */
4640 	if (likely(!is_jumbo)) {
4641 		bool last_segment = (nfrags == 0);
4642 
4643 		des = dma_map_single(priv->device, skb->data,
4644 				     nopaged_len, DMA_TO_DEVICE);
4645 		if (dma_mapping_error(priv->device, des))
4646 			goto dma_map_err;
4647 
4648 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4649 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4650 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4651 
4652 		stmmac_set_desc_addr(priv, first, des);
4653 
4654 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4655 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4656 
4657 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4658 			     priv->hwts_tx_en)) {
4659 			/* declare that device is doing timestamping */
4660 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4661 			stmmac_enable_tx_timestamp(priv, first);
4662 		}
4663 
4664 		/* Prepare the first descriptor setting the OWN bit too */
4665 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4666 				csum_insertion, priv->mode, 0, last_segment,
4667 				skb->len);
4668 	}
4669 
4670 	if (tx_q->tbs & STMMAC_TBS_EN) {
4671 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4672 
4673 		tbs_desc = &tx_q->dma_entx[first_entry];
4674 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4675 	}
4676 
4677 	stmmac_set_tx_owner(priv, first);
4678 
4679 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4680 
4681 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4682 
4683 	stmmac_flush_tx_descriptors(priv, queue);
4684 	stmmac_tx_timer_arm(priv, queue);
4685 
4686 	return NETDEV_TX_OK;
4687 
4688 dma_map_err:
4689 	netdev_err(priv->dev, "Tx DMA map failed\n");
4690 	dev_kfree_skb(skb);
4691 	priv->xstats.tx_dropped++;
4692 	return NETDEV_TX_OK;
4693 }
4694 
4695 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4696 {
4697 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4698 	__be16 vlan_proto = veth->h_vlan_proto;
4699 	u16 vlanid;
4700 
4701 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4702 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4703 	    (vlan_proto == htons(ETH_P_8021AD) &&
4704 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4705 		/* pop the vlan tag */
4706 		vlanid = ntohs(veth->h_vlan_TCI);
4707 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4708 		skb_pull(skb, VLAN_HLEN);
4709 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4710 	}
4711 }
4712 
4713 /**
4714  * stmmac_rx_refill - refill used skb preallocated buffers
4715  * @priv: driver private structure
4716  * @queue: RX queue index
4717  * Description : this is to reallocate the skb for the reception process
4718  * that is based on zero-copy.
4719  */
4720 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4721 {
4722 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4723 	int dirty = stmmac_rx_dirty(priv, queue);
4724 	unsigned int entry = rx_q->dirty_rx;
4725 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4726 
4727 	if (priv->dma_cap.host_dma_width <= 32)
4728 		gfp |= GFP_DMA32;
4729 
4730 	while (dirty-- > 0) {
4731 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4732 		struct dma_desc *p;
4733 		bool use_rx_wd;
4734 
4735 		if (priv->extend_desc)
4736 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4737 		else
4738 			p = rx_q->dma_rx + entry;
4739 
4740 		if (!buf->page) {
4741 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4742 			if (!buf->page)
4743 				break;
4744 		}
4745 
4746 		if (priv->sph && !buf->sec_page) {
4747 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4748 			if (!buf->sec_page)
4749 				break;
4750 
4751 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4752 		}
4753 
4754 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4755 
4756 		stmmac_set_desc_addr(priv, p, buf->addr);
4757 		if (priv->sph)
4758 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4759 		else
4760 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4761 		stmmac_refill_desc3(priv, rx_q, p);
4762 
4763 		rx_q->rx_count_frames++;
4764 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4765 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4766 			rx_q->rx_count_frames = 0;
4767 
4768 		use_rx_wd = !priv->rx_coal_frames[queue];
4769 		use_rx_wd |= rx_q->rx_count_frames > 0;
4770 		if (!priv->use_riwt)
4771 			use_rx_wd = false;
4772 
4773 		dma_wmb();
4774 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4775 
4776 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4777 	}
4778 	rx_q->dirty_rx = entry;
4779 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4780 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4781 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4782 }
4783 
4784 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4785 				       struct dma_desc *p,
4786 				       int status, unsigned int len)
4787 {
4788 	unsigned int plen = 0, hlen = 0;
4789 	int coe = priv->hw->rx_csum;
4790 
4791 	/* Not first descriptor, buffer is always zero */
4792 	if (priv->sph && len)
4793 		return 0;
4794 
4795 	/* First descriptor, get split header length */
4796 	stmmac_get_rx_header_len(priv, p, &hlen);
4797 	if (priv->sph && hlen) {
4798 		priv->xstats.rx_split_hdr_pkt_n++;
4799 		return hlen;
4800 	}
4801 
4802 	/* First descriptor, not last descriptor and not split header */
4803 	if (status & rx_not_ls)
4804 		return priv->dma_conf.dma_buf_sz;
4805 
4806 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4807 
4808 	/* First descriptor and last descriptor and not split header */
4809 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4810 }
4811 
4812 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4813 				       struct dma_desc *p,
4814 				       int status, unsigned int len)
4815 {
4816 	int coe = priv->hw->rx_csum;
4817 	unsigned int plen = 0;
4818 
4819 	/* Not split header, buffer is not available */
4820 	if (!priv->sph)
4821 		return 0;
4822 
4823 	/* Not last descriptor */
4824 	if (status & rx_not_ls)
4825 		return priv->dma_conf.dma_buf_sz;
4826 
4827 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4828 
4829 	/* Last descriptor */
4830 	return plen - len;
4831 }
4832 
4833 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4834 				struct xdp_frame *xdpf, bool dma_map)
4835 {
4836 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4837 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4838 	unsigned int entry = tx_q->cur_tx;
4839 	struct dma_desc *tx_desc;
4840 	dma_addr_t dma_addr;
4841 	bool set_ic;
4842 
4843 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4844 		return STMMAC_XDP_CONSUMED;
4845 
4846 	if (likely(priv->extend_desc))
4847 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4848 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4849 		tx_desc = &tx_q->dma_entx[entry].basic;
4850 	else
4851 		tx_desc = tx_q->dma_tx + entry;
4852 
4853 	if (dma_map) {
4854 		dma_addr = dma_map_single(priv->device, xdpf->data,
4855 					  xdpf->len, DMA_TO_DEVICE);
4856 		if (dma_mapping_error(priv->device, dma_addr))
4857 			return STMMAC_XDP_CONSUMED;
4858 
4859 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4860 	} else {
4861 		struct page *page = virt_to_page(xdpf->data);
4862 
4863 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4864 			   xdpf->headroom;
4865 		dma_sync_single_for_device(priv->device, dma_addr,
4866 					   xdpf->len, DMA_BIDIRECTIONAL);
4867 
4868 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4869 	}
4870 
4871 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4872 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4873 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4874 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4875 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4876 
4877 	tx_q->xdpf[entry] = xdpf;
4878 
4879 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4880 
4881 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4882 			       true, priv->mode, true, true,
4883 			       xdpf->len);
4884 
4885 	tx_q->tx_count_frames++;
4886 
4887 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4888 		set_ic = true;
4889 	else
4890 		set_ic = false;
4891 
4892 	if (set_ic) {
4893 		unsigned long flags;
4894 		tx_q->tx_count_frames = 0;
4895 		stmmac_set_tx_ic(priv, tx_desc);
4896 		flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
4897 		txq_stats->tx_set_ic_bit++;
4898 		u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
4899 	}
4900 
4901 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4902 
4903 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4904 	tx_q->cur_tx = entry;
4905 
4906 	return STMMAC_XDP_TX;
4907 }
4908 
4909 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4910 				   int cpu)
4911 {
4912 	int index = cpu;
4913 
4914 	if (unlikely(index < 0))
4915 		index = 0;
4916 
4917 	while (index >= priv->plat->tx_queues_to_use)
4918 		index -= priv->plat->tx_queues_to_use;
4919 
4920 	return index;
4921 }
4922 
4923 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4924 				struct xdp_buff *xdp)
4925 {
4926 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4927 	int cpu = smp_processor_id();
4928 	struct netdev_queue *nq;
4929 	int queue;
4930 	int res;
4931 
4932 	if (unlikely(!xdpf))
4933 		return STMMAC_XDP_CONSUMED;
4934 
4935 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
4936 	nq = netdev_get_tx_queue(priv->dev, queue);
4937 
4938 	__netif_tx_lock(nq, cpu);
4939 	/* Avoids TX time-out as we are sharing with slow path */
4940 	txq_trans_cond_update(nq);
4941 
4942 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4943 	if (res == STMMAC_XDP_TX)
4944 		stmmac_flush_tx_descriptors(priv, queue);
4945 
4946 	__netif_tx_unlock(nq);
4947 
4948 	return res;
4949 }
4950 
4951 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4952 				 struct bpf_prog *prog,
4953 				 struct xdp_buff *xdp)
4954 {
4955 	u32 act;
4956 	int res;
4957 
4958 	act = bpf_prog_run_xdp(prog, xdp);
4959 	switch (act) {
4960 	case XDP_PASS:
4961 		res = STMMAC_XDP_PASS;
4962 		break;
4963 	case XDP_TX:
4964 		res = stmmac_xdp_xmit_back(priv, xdp);
4965 		break;
4966 	case XDP_REDIRECT:
4967 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4968 			res = STMMAC_XDP_CONSUMED;
4969 		else
4970 			res = STMMAC_XDP_REDIRECT;
4971 		break;
4972 	default:
4973 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
4974 		fallthrough;
4975 	case XDP_ABORTED:
4976 		trace_xdp_exception(priv->dev, prog, act);
4977 		fallthrough;
4978 	case XDP_DROP:
4979 		res = STMMAC_XDP_CONSUMED;
4980 		break;
4981 	}
4982 
4983 	return res;
4984 }
4985 
4986 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4987 					   struct xdp_buff *xdp)
4988 {
4989 	struct bpf_prog *prog;
4990 	int res;
4991 
4992 	prog = READ_ONCE(priv->xdp_prog);
4993 	if (!prog) {
4994 		res = STMMAC_XDP_PASS;
4995 		goto out;
4996 	}
4997 
4998 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
4999 out:
5000 	return ERR_PTR(-res);
5001 }
5002 
5003 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5004 				   int xdp_status)
5005 {
5006 	int cpu = smp_processor_id();
5007 	int queue;
5008 
5009 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5010 
5011 	if (xdp_status & STMMAC_XDP_TX)
5012 		stmmac_tx_timer_arm(priv, queue);
5013 
5014 	if (xdp_status & STMMAC_XDP_REDIRECT)
5015 		xdp_do_flush();
5016 }
5017 
5018 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5019 					       struct xdp_buff *xdp)
5020 {
5021 	unsigned int metasize = xdp->data - xdp->data_meta;
5022 	unsigned int datasize = xdp->data_end - xdp->data;
5023 	struct sk_buff *skb;
5024 
5025 	skb = __napi_alloc_skb(&ch->rxtx_napi,
5026 			       xdp->data_end - xdp->data_hard_start,
5027 			       GFP_ATOMIC | __GFP_NOWARN);
5028 	if (unlikely(!skb))
5029 		return NULL;
5030 
5031 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5032 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5033 	if (metasize)
5034 		skb_metadata_set(skb, metasize);
5035 
5036 	return skb;
5037 }
5038 
5039 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5040 				   struct dma_desc *p, struct dma_desc *np,
5041 				   struct xdp_buff *xdp)
5042 {
5043 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5044 	struct stmmac_channel *ch = &priv->channel[queue];
5045 	unsigned int len = xdp->data_end - xdp->data;
5046 	enum pkt_hash_types hash_type;
5047 	int coe = priv->hw->rx_csum;
5048 	unsigned long flags;
5049 	struct sk_buff *skb;
5050 	u32 hash;
5051 
5052 	skb = stmmac_construct_skb_zc(ch, xdp);
5053 	if (!skb) {
5054 		priv->xstats.rx_dropped++;
5055 		return;
5056 	}
5057 
5058 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5059 	if (priv->hw->hw_vlan_en)
5060 		/* MAC level stripping. */
5061 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5062 	else
5063 		/* Driver level stripping. */
5064 		stmmac_rx_vlan(priv->dev, skb);
5065 	skb->protocol = eth_type_trans(skb, priv->dev);
5066 
5067 	if (unlikely(!coe))
5068 		skb_checksum_none_assert(skb);
5069 	else
5070 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5071 
5072 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5073 		skb_set_hash(skb, hash, hash_type);
5074 
5075 	skb_record_rx_queue(skb, queue);
5076 	napi_gro_receive(&ch->rxtx_napi, skb);
5077 
5078 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5079 	rxq_stats->rx_pkt_n++;
5080 	rxq_stats->rx_bytes += len;
5081 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5082 }
5083 
5084 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5085 {
5086 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5087 	unsigned int entry = rx_q->dirty_rx;
5088 	struct dma_desc *rx_desc = NULL;
5089 	bool ret = true;
5090 
5091 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5092 
5093 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5094 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5095 		dma_addr_t dma_addr;
5096 		bool use_rx_wd;
5097 
5098 		if (!buf->xdp) {
5099 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5100 			if (!buf->xdp) {
5101 				ret = false;
5102 				break;
5103 			}
5104 		}
5105 
5106 		if (priv->extend_desc)
5107 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5108 		else
5109 			rx_desc = rx_q->dma_rx + entry;
5110 
5111 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5112 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5113 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5114 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5115 
5116 		rx_q->rx_count_frames++;
5117 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5118 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5119 			rx_q->rx_count_frames = 0;
5120 
5121 		use_rx_wd = !priv->rx_coal_frames[queue];
5122 		use_rx_wd |= rx_q->rx_count_frames > 0;
5123 		if (!priv->use_riwt)
5124 			use_rx_wd = false;
5125 
5126 		dma_wmb();
5127 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5128 
5129 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5130 	}
5131 
5132 	if (rx_desc) {
5133 		rx_q->dirty_rx = entry;
5134 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5135 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5136 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5137 	}
5138 
5139 	return ret;
5140 }
5141 
5142 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5143 {
5144 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5145 	 * to represent incoming packet, whereas cb field in the same structure
5146 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5147 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5148 	 */
5149 	return (struct stmmac_xdp_buff *)xdp;
5150 }
5151 
5152 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5153 {
5154 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5155 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5156 	unsigned int count = 0, error = 0, len = 0;
5157 	int dirty = stmmac_rx_dirty(priv, queue);
5158 	unsigned int next_entry = rx_q->cur_rx;
5159 	u32 rx_errors = 0, rx_dropped = 0;
5160 	unsigned int desc_size;
5161 	struct bpf_prog *prog;
5162 	bool failure = false;
5163 	unsigned long flags;
5164 	int xdp_status = 0;
5165 	int status = 0;
5166 
5167 	if (netif_msg_rx_status(priv)) {
5168 		void *rx_head;
5169 
5170 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5171 		if (priv->extend_desc) {
5172 			rx_head = (void *)rx_q->dma_erx;
5173 			desc_size = sizeof(struct dma_extended_desc);
5174 		} else {
5175 			rx_head = (void *)rx_q->dma_rx;
5176 			desc_size = sizeof(struct dma_desc);
5177 		}
5178 
5179 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5180 				    rx_q->dma_rx_phy, desc_size);
5181 	}
5182 	while (count < limit) {
5183 		struct stmmac_rx_buffer *buf;
5184 		struct stmmac_xdp_buff *ctx;
5185 		unsigned int buf1_len = 0;
5186 		struct dma_desc *np, *p;
5187 		int entry;
5188 		int res;
5189 
5190 		if (!count && rx_q->state_saved) {
5191 			error = rx_q->state.error;
5192 			len = rx_q->state.len;
5193 		} else {
5194 			rx_q->state_saved = false;
5195 			error = 0;
5196 			len = 0;
5197 		}
5198 
5199 		if (count >= limit)
5200 			break;
5201 
5202 read_again:
5203 		buf1_len = 0;
5204 		entry = next_entry;
5205 		buf = &rx_q->buf_pool[entry];
5206 
5207 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5208 			failure = failure ||
5209 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5210 			dirty = 0;
5211 		}
5212 
5213 		if (priv->extend_desc)
5214 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5215 		else
5216 			p = rx_q->dma_rx + entry;
5217 
5218 		/* read the status of the incoming frame */
5219 		status = stmmac_rx_status(priv, &priv->xstats, p);
5220 		/* check if managed by the DMA otherwise go ahead */
5221 		if (unlikely(status & dma_own))
5222 			break;
5223 
5224 		/* Prefetch the next RX descriptor */
5225 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5226 						priv->dma_conf.dma_rx_size);
5227 		next_entry = rx_q->cur_rx;
5228 
5229 		if (priv->extend_desc)
5230 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5231 		else
5232 			np = rx_q->dma_rx + next_entry;
5233 
5234 		prefetch(np);
5235 
5236 		/* Ensure a valid XSK buffer before proceed */
5237 		if (!buf->xdp)
5238 			break;
5239 
5240 		if (priv->extend_desc)
5241 			stmmac_rx_extended_status(priv, &priv->xstats,
5242 						  rx_q->dma_erx + entry);
5243 		if (unlikely(status == discard_frame)) {
5244 			xsk_buff_free(buf->xdp);
5245 			buf->xdp = NULL;
5246 			dirty++;
5247 			error = 1;
5248 			if (!priv->hwts_rx_en)
5249 				rx_errors++;
5250 		}
5251 
5252 		if (unlikely(error && (status & rx_not_ls)))
5253 			goto read_again;
5254 		if (unlikely(error)) {
5255 			count++;
5256 			continue;
5257 		}
5258 
5259 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5260 		if (likely(status & rx_not_ls)) {
5261 			xsk_buff_free(buf->xdp);
5262 			buf->xdp = NULL;
5263 			dirty++;
5264 			count++;
5265 			goto read_again;
5266 		}
5267 
5268 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5269 		ctx->priv = priv;
5270 		ctx->desc = p;
5271 		ctx->ndesc = np;
5272 
5273 		/* XDP ZC Frame only support primary buffers for now */
5274 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5275 		len += buf1_len;
5276 
5277 		/* ACS is disabled; strip manually. */
5278 		if (likely(!(status & rx_not_ls))) {
5279 			buf1_len -= ETH_FCS_LEN;
5280 			len -= ETH_FCS_LEN;
5281 		}
5282 
5283 		/* RX buffer is good and fit into a XSK pool buffer */
5284 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5285 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5286 
5287 		prog = READ_ONCE(priv->xdp_prog);
5288 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5289 
5290 		switch (res) {
5291 		case STMMAC_XDP_PASS:
5292 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5293 			xsk_buff_free(buf->xdp);
5294 			break;
5295 		case STMMAC_XDP_CONSUMED:
5296 			xsk_buff_free(buf->xdp);
5297 			rx_dropped++;
5298 			break;
5299 		case STMMAC_XDP_TX:
5300 		case STMMAC_XDP_REDIRECT:
5301 			xdp_status |= res;
5302 			break;
5303 		}
5304 
5305 		buf->xdp = NULL;
5306 		dirty++;
5307 		count++;
5308 	}
5309 
5310 	if (status & rx_not_ls) {
5311 		rx_q->state_saved = true;
5312 		rx_q->state.error = error;
5313 		rx_q->state.len = len;
5314 	}
5315 
5316 	stmmac_finalize_xdp_rx(priv, xdp_status);
5317 
5318 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5319 	rxq_stats->rx_pkt_n += count;
5320 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5321 
5322 	priv->xstats.rx_dropped += rx_dropped;
5323 	priv->xstats.rx_errors += rx_errors;
5324 
5325 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5326 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5327 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5328 		else
5329 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5330 
5331 		return (int)count;
5332 	}
5333 
5334 	return failure ? limit : (int)count;
5335 }
5336 
5337 /**
5338  * stmmac_rx - manage the receive process
5339  * @priv: driver private structure
5340  * @limit: napi bugget
5341  * @queue: RX queue index.
5342  * Description :  this the function called by the napi poll method.
5343  * It gets all the frames inside the ring.
5344  */
5345 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5346 {
5347 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5348 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5349 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5350 	struct stmmac_channel *ch = &priv->channel[queue];
5351 	unsigned int count = 0, error = 0, len = 0;
5352 	int status = 0, coe = priv->hw->rx_csum;
5353 	unsigned int next_entry = rx_q->cur_rx;
5354 	enum dma_data_direction dma_dir;
5355 	unsigned int desc_size;
5356 	struct sk_buff *skb = NULL;
5357 	struct stmmac_xdp_buff ctx;
5358 	unsigned long flags;
5359 	int xdp_status = 0;
5360 	int buf_sz;
5361 
5362 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5363 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5364 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5365 
5366 	if (netif_msg_rx_status(priv)) {
5367 		void *rx_head;
5368 
5369 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5370 		if (priv->extend_desc) {
5371 			rx_head = (void *)rx_q->dma_erx;
5372 			desc_size = sizeof(struct dma_extended_desc);
5373 		} else {
5374 			rx_head = (void *)rx_q->dma_rx;
5375 			desc_size = sizeof(struct dma_desc);
5376 		}
5377 
5378 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5379 				    rx_q->dma_rx_phy, desc_size);
5380 	}
5381 	while (count < limit) {
5382 		unsigned int buf1_len = 0, buf2_len = 0;
5383 		enum pkt_hash_types hash_type;
5384 		struct stmmac_rx_buffer *buf;
5385 		struct dma_desc *np, *p;
5386 		int entry;
5387 		u32 hash;
5388 
5389 		if (!count && rx_q->state_saved) {
5390 			skb = rx_q->state.skb;
5391 			error = rx_q->state.error;
5392 			len = rx_q->state.len;
5393 		} else {
5394 			rx_q->state_saved = false;
5395 			skb = NULL;
5396 			error = 0;
5397 			len = 0;
5398 		}
5399 
5400 read_again:
5401 		if (count >= limit)
5402 			break;
5403 
5404 		buf1_len = 0;
5405 		buf2_len = 0;
5406 		entry = next_entry;
5407 		buf = &rx_q->buf_pool[entry];
5408 
5409 		if (priv->extend_desc)
5410 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5411 		else
5412 			p = rx_q->dma_rx + entry;
5413 
5414 		/* read the status of the incoming frame */
5415 		status = stmmac_rx_status(priv, &priv->xstats, p);
5416 		/* check if managed by the DMA otherwise go ahead */
5417 		if (unlikely(status & dma_own))
5418 			break;
5419 
5420 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5421 						priv->dma_conf.dma_rx_size);
5422 		next_entry = rx_q->cur_rx;
5423 
5424 		if (priv->extend_desc)
5425 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5426 		else
5427 			np = rx_q->dma_rx + next_entry;
5428 
5429 		prefetch(np);
5430 
5431 		if (priv->extend_desc)
5432 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5433 		if (unlikely(status == discard_frame)) {
5434 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5435 			buf->page = NULL;
5436 			error = 1;
5437 			if (!priv->hwts_rx_en)
5438 				rx_errors++;
5439 		}
5440 
5441 		if (unlikely(error && (status & rx_not_ls)))
5442 			goto read_again;
5443 		if (unlikely(error)) {
5444 			dev_kfree_skb(skb);
5445 			skb = NULL;
5446 			count++;
5447 			continue;
5448 		}
5449 
5450 		/* Buffer is good. Go on. */
5451 
5452 		prefetch(page_address(buf->page) + buf->page_offset);
5453 		if (buf->sec_page)
5454 			prefetch(page_address(buf->sec_page));
5455 
5456 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5457 		len += buf1_len;
5458 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5459 		len += buf2_len;
5460 
5461 		/* ACS is disabled; strip manually. */
5462 		if (likely(!(status & rx_not_ls))) {
5463 			if (buf2_len) {
5464 				buf2_len -= ETH_FCS_LEN;
5465 				len -= ETH_FCS_LEN;
5466 			} else if (buf1_len) {
5467 				buf1_len -= ETH_FCS_LEN;
5468 				len -= ETH_FCS_LEN;
5469 			}
5470 		}
5471 
5472 		if (!skb) {
5473 			unsigned int pre_len, sync_len;
5474 
5475 			dma_sync_single_for_cpu(priv->device, buf->addr,
5476 						buf1_len, dma_dir);
5477 
5478 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5479 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5480 					 buf->page_offset, buf1_len, true);
5481 
5482 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5483 				  buf->page_offset;
5484 
5485 			ctx.priv = priv;
5486 			ctx.desc = p;
5487 			ctx.ndesc = np;
5488 
5489 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5490 			/* Due xdp_adjust_tail: DMA sync for_device
5491 			 * cover max len CPU touch
5492 			 */
5493 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5494 				   buf->page_offset;
5495 			sync_len = max(sync_len, pre_len);
5496 
5497 			/* For Not XDP_PASS verdict */
5498 			if (IS_ERR(skb)) {
5499 				unsigned int xdp_res = -PTR_ERR(skb);
5500 
5501 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5502 					page_pool_put_page(rx_q->page_pool,
5503 							   virt_to_head_page(ctx.xdp.data),
5504 							   sync_len, true);
5505 					buf->page = NULL;
5506 					rx_dropped++;
5507 
5508 					/* Clear skb as it was set as
5509 					 * status by XDP program.
5510 					 */
5511 					skb = NULL;
5512 
5513 					if (unlikely((status & rx_not_ls)))
5514 						goto read_again;
5515 
5516 					count++;
5517 					continue;
5518 				} else if (xdp_res & (STMMAC_XDP_TX |
5519 						      STMMAC_XDP_REDIRECT)) {
5520 					xdp_status |= xdp_res;
5521 					buf->page = NULL;
5522 					skb = NULL;
5523 					count++;
5524 					continue;
5525 				}
5526 			}
5527 		}
5528 
5529 		if (!skb) {
5530 			/* XDP program may expand or reduce tail */
5531 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5532 
5533 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5534 			if (!skb) {
5535 				rx_dropped++;
5536 				count++;
5537 				goto drain_data;
5538 			}
5539 
5540 			/* XDP program may adjust header */
5541 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5542 			skb_put(skb, buf1_len);
5543 
5544 			/* Data payload copied into SKB, page ready for recycle */
5545 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5546 			buf->page = NULL;
5547 		} else if (buf1_len) {
5548 			dma_sync_single_for_cpu(priv->device, buf->addr,
5549 						buf1_len, dma_dir);
5550 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5551 					buf->page, buf->page_offset, buf1_len,
5552 					priv->dma_conf.dma_buf_sz);
5553 
5554 			/* Data payload appended into SKB */
5555 			skb_mark_for_recycle(skb);
5556 			buf->page = NULL;
5557 		}
5558 
5559 		if (buf2_len) {
5560 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5561 						buf2_len, dma_dir);
5562 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5563 					buf->sec_page, 0, buf2_len,
5564 					priv->dma_conf.dma_buf_sz);
5565 
5566 			/* Data payload appended into SKB */
5567 			skb_mark_for_recycle(skb);
5568 			buf->sec_page = NULL;
5569 		}
5570 
5571 drain_data:
5572 		if (likely(status & rx_not_ls))
5573 			goto read_again;
5574 		if (!skb)
5575 			continue;
5576 
5577 		/* Got entire packet into SKB. Finish it. */
5578 
5579 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5580 
5581 		if (priv->hw->hw_vlan_en)
5582 			/* MAC level stripping. */
5583 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5584 		else
5585 			/* Driver level stripping. */
5586 			stmmac_rx_vlan(priv->dev, skb);
5587 
5588 		skb->protocol = eth_type_trans(skb, priv->dev);
5589 
5590 		if (unlikely(!coe))
5591 			skb_checksum_none_assert(skb);
5592 		else
5593 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5594 
5595 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5596 			skb_set_hash(skb, hash, hash_type);
5597 
5598 		skb_record_rx_queue(skb, queue);
5599 		napi_gro_receive(&ch->rx_napi, skb);
5600 		skb = NULL;
5601 
5602 		rx_packets++;
5603 		rx_bytes += len;
5604 		count++;
5605 	}
5606 
5607 	if (status & rx_not_ls || skb) {
5608 		rx_q->state_saved = true;
5609 		rx_q->state.skb = skb;
5610 		rx_q->state.error = error;
5611 		rx_q->state.len = len;
5612 	}
5613 
5614 	stmmac_finalize_xdp_rx(priv, xdp_status);
5615 
5616 	stmmac_rx_refill(priv, queue);
5617 
5618 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5619 	rxq_stats->rx_packets += rx_packets;
5620 	rxq_stats->rx_bytes += rx_bytes;
5621 	rxq_stats->rx_pkt_n += count;
5622 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5623 
5624 	priv->xstats.rx_dropped += rx_dropped;
5625 	priv->xstats.rx_errors += rx_errors;
5626 
5627 	return count;
5628 }
5629 
5630 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5631 {
5632 	struct stmmac_channel *ch =
5633 		container_of(napi, struct stmmac_channel, rx_napi);
5634 	struct stmmac_priv *priv = ch->priv_data;
5635 	struct stmmac_rxq_stats *rxq_stats;
5636 	u32 chan = ch->index;
5637 	unsigned long flags;
5638 	int work_done;
5639 
5640 	rxq_stats = &priv->xstats.rxq_stats[chan];
5641 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5642 	rxq_stats->napi_poll++;
5643 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5644 
5645 	work_done = stmmac_rx(priv, budget, chan);
5646 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5647 		unsigned long flags;
5648 
5649 		spin_lock_irqsave(&ch->lock, flags);
5650 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5651 		spin_unlock_irqrestore(&ch->lock, flags);
5652 	}
5653 
5654 	return work_done;
5655 }
5656 
5657 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5658 {
5659 	struct stmmac_channel *ch =
5660 		container_of(napi, struct stmmac_channel, tx_napi);
5661 	struct stmmac_priv *priv = ch->priv_data;
5662 	struct stmmac_txq_stats *txq_stats;
5663 	bool pending_packets = false;
5664 	u32 chan = ch->index;
5665 	unsigned long flags;
5666 	int work_done;
5667 
5668 	txq_stats = &priv->xstats.txq_stats[chan];
5669 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5670 	txq_stats->napi_poll++;
5671 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5672 
5673 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5674 	work_done = min(work_done, budget);
5675 
5676 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5677 		unsigned long flags;
5678 
5679 		spin_lock_irqsave(&ch->lock, flags);
5680 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5681 		spin_unlock_irqrestore(&ch->lock, flags);
5682 	}
5683 
5684 	/* TX still have packet to handle, check if we need to arm tx timer */
5685 	if (pending_packets)
5686 		stmmac_tx_timer_arm(priv, chan);
5687 
5688 	return work_done;
5689 }
5690 
5691 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5692 {
5693 	struct stmmac_channel *ch =
5694 		container_of(napi, struct stmmac_channel, rxtx_napi);
5695 	struct stmmac_priv *priv = ch->priv_data;
5696 	bool tx_pending_packets = false;
5697 	int rx_done, tx_done, rxtx_done;
5698 	struct stmmac_rxq_stats *rxq_stats;
5699 	struct stmmac_txq_stats *txq_stats;
5700 	u32 chan = ch->index;
5701 	unsigned long flags;
5702 
5703 	rxq_stats = &priv->xstats.rxq_stats[chan];
5704 	flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
5705 	rxq_stats->napi_poll++;
5706 	u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
5707 
5708 	txq_stats = &priv->xstats.txq_stats[chan];
5709 	flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
5710 	txq_stats->napi_poll++;
5711 	u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
5712 
5713 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5714 	tx_done = min(tx_done, budget);
5715 
5716 	rx_done = stmmac_rx_zc(priv, budget, chan);
5717 
5718 	rxtx_done = max(tx_done, rx_done);
5719 
5720 	/* If either TX or RX work is not complete, return budget
5721 	 * and keep pooling
5722 	 */
5723 	if (rxtx_done >= budget)
5724 		return budget;
5725 
5726 	/* all work done, exit the polling mode */
5727 	if (napi_complete_done(napi, rxtx_done)) {
5728 		unsigned long flags;
5729 
5730 		spin_lock_irqsave(&ch->lock, flags);
5731 		/* Both RX and TX work done are compelte,
5732 		 * so enable both RX & TX IRQs.
5733 		 */
5734 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5735 		spin_unlock_irqrestore(&ch->lock, flags);
5736 	}
5737 
5738 	/* TX still have packet to handle, check if we need to arm tx timer */
5739 	if (tx_pending_packets)
5740 		stmmac_tx_timer_arm(priv, chan);
5741 
5742 	return min(rxtx_done, budget - 1);
5743 }
5744 
5745 /**
5746  *  stmmac_tx_timeout
5747  *  @dev : Pointer to net device structure
5748  *  @txqueue: the index of the hanging transmit queue
5749  *  Description: this function is called when a packet transmission fails to
5750  *   complete within a reasonable time. The driver will mark the error in the
5751  *   netdev structure and arrange for the device to be reset to a sane state
5752  *   in order to transmit a new packet.
5753  */
5754 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5755 {
5756 	struct stmmac_priv *priv = netdev_priv(dev);
5757 
5758 	stmmac_global_err(priv);
5759 }
5760 
5761 /**
5762  *  stmmac_set_rx_mode - entry point for multicast addressing
5763  *  @dev : pointer to the device structure
5764  *  Description:
5765  *  This function is a driver entry point which gets called by the kernel
5766  *  whenever multicast addresses must be enabled/disabled.
5767  *  Return value:
5768  *  void.
5769  */
5770 static void stmmac_set_rx_mode(struct net_device *dev)
5771 {
5772 	struct stmmac_priv *priv = netdev_priv(dev);
5773 
5774 	stmmac_set_filter(priv, priv->hw, dev);
5775 }
5776 
5777 /**
5778  *  stmmac_change_mtu - entry point to change MTU size for the device.
5779  *  @dev : device pointer.
5780  *  @new_mtu : the new MTU size for the device.
5781  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5782  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5783  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5784  *  Return value:
5785  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5786  *  file on failure.
5787  */
5788 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5789 {
5790 	struct stmmac_priv *priv = netdev_priv(dev);
5791 	int txfifosz = priv->plat->tx_fifo_size;
5792 	struct stmmac_dma_conf *dma_conf;
5793 	const int mtu = new_mtu;
5794 	int ret;
5795 
5796 	if (txfifosz == 0)
5797 		txfifosz = priv->dma_cap.tx_fifo_size;
5798 
5799 	txfifosz /= priv->plat->tx_queues_to_use;
5800 
5801 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5802 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5803 		return -EINVAL;
5804 	}
5805 
5806 	new_mtu = STMMAC_ALIGN(new_mtu);
5807 
5808 	/* If condition true, FIFO is too small or MTU too large */
5809 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5810 		return -EINVAL;
5811 
5812 	if (netif_running(dev)) {
5813 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5814 		/* Try to allocate the new DMA conf with the new mtu */
5815 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5816 		if (IS_ERR(dma_conf)) {
5817 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5818 				   mtu);
5819 			return PTR_ERR(dma_conf);
5820 		}
5821 
5822 		stmmac_release(dev);
5823 
5824 		ret = __stmmac_open(dev, dma_conf);
5825 		if (ret) {
5826 			free_dma_desc_resources(priv, dma_conf);
5827 			kfree(dma_conf);
5828 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5829 			return ret;
5830 		}
5831 
5832 		kfree(dma_conf);
5833 
5834 		stmmac_set_rx_mode(dev);
5835 	}
5836 
5837 	dev->mtu = mtu;
5838 	netdev_update_features(dev);
5839 
5840 	return 0;
5841 }
5842 
5843 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5844 					     netdev_features_t features)
5845 {
5846 	struct stmmac_priv *priv = netdev_priv(dev);
5847 
5848 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5849 		features &= ~NETIF_F_RXCSUM;
5850 
5851 	if (!priv->plat->tx_coe)
5852 		features &= ~NETIF_F_CSUM_MASK;
5853 
5854 	/* Some GMAC devices have a bugged Jumbo frame support that
5855 	 * needs to have the Tx COE disabled for oversized frames
5856 	 * (due to limited buffer sizes). In this case we disable
5857 	 * the TX csum insertion in the TDES and not use SF.
5858 	 */
5859 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5860 		features &= ~NETIF_F_CSUM_MASK;
5861 
5862 	/* Disable tso if asked by ethtool */
5863 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5864 		if (features & NETIF_F_TSO)
5865 			priv->tso = true;
5866 		else
5867 			priv->tso = false;
5868 	}
5869 
5870 	return features;
5871 }
5872 
5873 static int stmmac_set_features(struct net_device *netdev,
5874 			       netdev_features_t features)
5875 {
5876 	struct stmmac_priv *priv = netdev_priv(netdev);
5877 
5878 	/* Keep the COE Type in case of csum is supporting */
5879 	if (features & NETIF_F_RXCSUM)
5880 		priv->hw->rx_csum = priv->plat->rx_coe;
5881 	else
5882 		priv->hw->rx_csum = 0;
5883 	/* No check needed because rx_coe has been set before and it will be
5884 	 * fixed in case of issue.
5885 	 */
5886 	stmmac_rx_ipc(priv, priv->hw);
5887 
5888 	if (priv->sph_cap) {
5889 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5890 		u32 chan;
5891 
5892 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5893 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5894 	}
5895 
5896 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5897 		priv->hw->hw_vlan_en = true;
5898 	else
5899 		priv->hw->hw_vlan_en = false;
5900 
5901 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5902 
5903 	return 0;
5904 }
5905 
5906 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5907 {
5908 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5909 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5910 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5911 	bool *hs_enable = &fpe_cfg->hs_enable;
5912 
5913 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5914 		return;
5915 
5916 	/* If LP has sent verify mPacket, LP is FPE capable */
5917 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5918 		if (*lp_state < FPE_STATE_CAPABLE)
5919 			*lp_state = FPE_STATE_CAPABLE;
5920 
5921 		/* If user has requested FPE enable, quickly response */
5922 		if (*hs_enable)
5923 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5924 						MPACKET_RESPONSE);
5925 	}
5926 
5927 	/* If Local has sent verify mPacket, Local is FPE capable */
5928 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5929 		if (*lo_state < FPE_STATE_CAPABLE)
5930 			*lo_state = FPE_STATE_CAPABLE;
5931 	}
5932 
5933 	/* If LP has sent response mPacket, LP is entering FPE ON */
5934 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5935 		*lp_state = FPE_STATE_ENTERING_ON;
5936 
5937 	/* If Local has sent response mPacket, Local is entering FPE ON */
5938 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5939 		*lo_state = FPE_STATE_ENTERING_ON;
5940 
5941 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5942 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5943 	    priv->fpe_wq) {
5944 		queue_work(priv->fpe_wq, &priv->fpe_task);
5945 	}
5946 }
5947 
5948 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5949 {
5950 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5951 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5952 	u32 queues_count;
5953 	u32 queue;
5954 	bool xmac;
5955 
5956 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5957 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5958 
5959 	if (priv->irq_wake)
5960 		pm_wakeup_event(priv->device, 0);
5961 
5962 	if (priv->dma_cap.estsel)
5963 		stmmac_est_irq_status(priv, priv, priv->dev,
5964 				      &priv->xstats, tx_cnt);
5965 
5966 	if (priv->dma_cap.fpesel) {
5967 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5968 						   priv->dev);
5969 
5970 		stmmac_fpe_event_status(priv, status);
5971 	}
5972 
5973 	/* To handle GMAC own interrupts */
5974 	if ((priv->plat->has_gmac) || xmac) {
5975 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5976 
5977 		if (unlikely(status)) {
5978 			/* For LPI we need to save the tx status */
5979 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5980 				priv->tx_path_in_lpi_mode = true;
5981 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5982 				priv->tx_path_in_lpi_mode = false;
5983 		}
5984 
5985 		for (queue = 0; queue < queues_count; queue++) {
5986 			status = stmmac_host_mtl_irq_status(priv, priv->hw,
5987 							    queue);
5988 		}
5989 
5990 		/* PCS link status */
5991 		if (priv->hw->pcs &&
5992 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5993 			if (priv->xstats.pcs_link)
5994 				netif_carrier_on(priv->dev);
5995 			else
5996 				netif_carrier_off(priv->dev);
5997 		}
5998 
5999 		stmmac_timestamp_interrupt(priv, priv);
6000 	}
6001 }
6002 
6003 /**
6004  *  stmmac_interrupt - main ISR
6005  *  @irq: interrupt number.
6006  *  @dev_id: to pass the net device pointer.
6007  *  Description: this is the main driver interrupt service routine.
6008  *  It can call:
6009  *  o DMA service routine (to manage incoming frame reception and transmission
6010  *    status)
6011  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6012  *    interrupts.
6013  */
6014 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6015 {
6016 	struct net_device *dev = (struct net_device *)dev_id;
6017 	struct stmmac_priv *priv = netdev_priv(dev);
6018 
6019 	/* Check if adapter is up */
6020 	if (test_bit(STMMAC_DOWN, &priv->state))
6021 		return IRQ_HANDLED;
6022 
6023 	/* Check if a fatal error happened */
6024 	if (stmmac_safety_feat_interrupt(priv))
6025 		return IRQ_HANDLED;
6026 
6027 	/* To handle Common interrupts */
6028 	stmmac_common_interrupt(priv);
6029 
6030 	/* To handle DMA interrupts */
6031 	stmmac_dma_interrupt(priv);
6032 
6033 	return IRQ_HANDLED;
6034 }
6035 
6036 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6037 {
6038 	struct net_device *dev = (struct net_device *)dev_id;
6039 	struct stmmac_priv *priv = netdev_priv(dev);
6040 
6041 	if (unlikely(!dev)) {
6042 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6043 		return IRQ_NONE;
6044 	}
6045 
6046 	/* Check if adapter is up */
6047 	if (test_bit(STMMAC_DOWN, &priv->state))
6048 		return IRQ_HANDLED;
6049 
6050 	/* To handle Common interrupts */
6051 	stmmac_common_interrupt(priv);
6052 
6053 	return IRQ_HANDLED;
6054 }
6055 
6056 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6057 {
6058 	struct net_device *dev = (struct net_device *)dev_id;
6059 	struct stmmac_priv *priv = netdev_priv(dev);
6060 
6061 	if (unlikely(!dev)) {
6062 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6063 		return IRQ_NONE;
6064 	}
6065 
6066 	/* Check if adapter is up */
6067 	if (test_bit(STMMAC_DOWN, &priv->state))
6068 		return IRQ_HANDLED;
6069 
6070 	/* Check if a fatal error happened */
6071 	stmmac_safety_feat_interrupt(priv);
6072 
6073 	return IRQ_HANDLED;
6074 }
6075 
6076 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6077 {
6078 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6079 	struct stmmac_dma_conf *dma_conf;
6080 	int chan = tx_q->queue_index;
6081 	struct stmmac_priv *priv;
6082 	int status;
6083 
6084 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6085 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6086 
6087 	if (unlikely(!data)) {
6088 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6089 		return IRQ_NONE;
6090 	}
6091 
6092 	/* Check if adapter is up */
6093 	if (test_bit(STMMAC_DOWN, &priv->state))
6094 		return IRQ_HANDLED;
6095 
6096 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6097 
6098 	if (unlikely(status & tx_hard_error_bump_tc)) {
6099 		/* Try to bump up the dma threshold on this failure */
6100 		stmmac_bump_dma_threshold(priv, chan);
6101 	} else if (unlikely(status == tx_hard_error)) {
6102 		stmmac_tx_err(priv, chan);
6103 	}
6104 
6105 	return IRQ_HANDLED;
6106 }
6107 
6108 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6109 {
6110 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6111 	struct stmmac_dma_conf *dma_conf;
6112 	int chan = rx_q->queue_index;
6113 	struct stmmac_priv *priv;
6114 
6115 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6116 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6117 
6118 	if (unlikely(!data)) {
6119 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6120 		return IRQ_NONE;
6121 	}
6122 
6123 	/* Check if adapter is up */
6124 	if (test_bit(STMMAC_DOWN, &priv->state))
6125 		return IRQ_HANDLED;
6126 
6127 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6128 
6129 	return IRQ_HANDLED;
6130 }
6131 
6132 /**
6133  *  stmmac_ioctl - Entry point for the Ioctl
6134  *  @dev: Device pointer.
6135  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6136  *  a proprietary structure used to pass information to the driver.
6137  *  @cmd: IOCTL command
6138  *  Description:
6139  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6140  */
6141 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6142 {
6143 	struct stmmac_priv *priv = netdev_priv (dev);
6144 	int ret = -EOPNOTSUPP;
6145 
6146 	if (!netif_running(dev))
6147 		return -EINVAL;
6148 
6149 	switch (cmd) {
6150 	case SIOCGMIIPHY:
6151 	case SIOCGMIIREG:
6152 	case SIOCSMIIREG:
6153 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6154 		break;
6155 	case SIOCSHWTSTAMP:
6156 		ret = stmmac_hwtstamp_set(dev, rq);
6157 		break;
6158 	case SIOCGHWTSTAMP:
6159 		ret = stmmac_hwtstamp_get(dev, rq);
6160 		break;
6161 	default:
6162 		break;
6163 	}
6164 
6165 	return ret;
6166 }
6167 
6168 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6169 				    void *cb_priv)
6170 {
6171 	struct stmmac_priv *priv = cb_priv;
6172 	int ret = -EOPNOTSUPP;
6173 
6174 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6175 		return ret;
6176 
6177 	__stmmac_disable_all_queues(priv);
6178 
6179 	switch (type) {
6180 	case TC_SETUP_CLSU32:
6181 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6182 		break;
6183 	case TC_SETUP_CLSFLOWER:
6184 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6185 		break;
6186 	default:
6187 		break;
6188 	}
6189 
6190 	stmmac_enable_all_queues(priv);
6191 	return ret;
6192 }
6193 
6194 static LIST_HEAD(stmmac_block_cb_list);
6195 
6196 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6197 			   void *type_data)
6198 {
6199 	struct stmmac_priv *priv = netdev_priv(ndev);
6200 
6201 	switch (type) {
6202 	case TC_QUERY_CAPS:
6203 		return stmmac_tc_query_caps(priv, priv, type_data);
6204 	case TC_SETUP_BLOCK:
6205 		return flow_block_cb_setup_simple(type_data,
6206 						  &stmmac_block_cb_list,
6207 						  stmmac_setup_tc_block_cb,
6208 						  priv, priv, true);
6209 	case TC_SETUP_QDISC_CBS:
6210 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6211 	case TC_SETUP_QDISC_TAPRIO:
6212 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6213 	case TC_SETUP_QDISC_ETF:
6214 		return stmmac_tc_setup_etf(priv, priv, type_data);
6215 	default:
6216 		return -EOPNOTSUPP;
6217 	}
6218 }
6219 
6220 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6221 			       struct net_device *sb_dev)
6222 {
6223 	int gso = skb_shinfo(skb)->gso_type;
6224 
6225 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6226 		/*
6227 		 * There is no way to determine the number of TSO/USO
6228 		 * capable Queues. Let's use always the Queue 0
6229 		 * because if TSO/USO is supported then at least this
6230 		 * one will be capable.
6231 		 */
6232 		return 0;
6233 	}
6234 
6235 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6236 }
6237 
6238 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6239 {
6240 	struct stmmac_priv *priv = netdev_priv(ndev);
6241 	int ret = 0;
6242 
6243 	ret = pm_runtime_resume_and_get(priv->device);
6244 	if (ret < 0)
6245 		return ret;
6246 
6247 	ret = eth_mac_addr(ndev, addr);
6248 	if (ret)
6249 		goto set_mac_error;
6250 
6251 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6252 
6253 set_mac_error:
6254 	pm_runtime_put(priv->device);
6255 
6256 	return ret;
6257 }
6258 
6259 #ifdef CONFIG_DEBUG_FS
6260 static struct dentry *stmmac_fs_dir;
6261 
6262 static void sysfs_display_ring(void *head, int size, int extend_desc,
6263 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6264 {
6265 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6266 	struct dma_desc *p = (struct dma_desc *)head;
6267 	unsigned int desc_size;
6268 	dma_addr_t dma_addr;
6269 	int i;
6270 
6271 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6272 	for (i = 0; i < size; i++) {
6273 		dma_addr = dma_phy_addr + i * desc_size;
6274 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6275 				i, &dma_addr,
6276 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6277 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6278 		if (extend_desc)
6279 			p = &(++ep)->basic;
6280 		else
6281 			p++;
6282 	}
6283 }
6284 
6285 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6286 {
6287 	struct net_device *dev = seq->private;
6288 	struct stmmac_priv *priv = netdev_priv(dev);
6289 	u32 rx_count = priv->plat->rx_queues_to_use;
6290 	u32 tx_count = priv->plat->tx_queues_to_use;
6291 	u32 queue;
6292 
6293 	if ((dev->flags & IFF_UP) == 0)
6294 		return 0;
6295 
6296 	for (queue = 0; queue < rx_count; queue++) {
6297 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6298 
6299 		seq_printf(seq, "RX Queue %d:\n", queue);
6300 
6301 		if (priv->extend_desc) {
6302 			seq_printf(seq, "Extended descriptor ring:\n");
6303 			sysfs_display_ring((void *)rx_q->dma_erx,
6304 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6305 		} else {
6306 			seq_printf(seq, "Descriptor ring:\n");
6307 			sysfs_display_ring((void *)rx_q->dma_rx,
6308 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6309 		}
6310 	}
6311 
6312 	for (queue = 0; queue < tx_count; queue++) {
6313 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6314 
6315 		seq_printf(seq, "TX Queue %d:\n", queue);
6316 
6317 		if (priv->extend_desc) {
6318 			seq_printf(seq, "Extended descriptor ring:\n");
6319 			sysfs_display_ring((void *)tx_q->dma_etx,
6320 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6321 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6322 			seq_printf(seq, "Descriptor ring:\n");
6323 			sysfs_display_ring((void *)tx_q->dma_tx,
6324 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6325 		}
6326 	}
6327 
6328 	return 0;
6329 }
6330 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6331 
6332 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6333 {
6334 	static const char * const dwxgmac_timestamp_source[] = {
6335 		"None",
6336 		"Internal",
6337 		"External",
6338 		"Both",
6339 	};
6340 	static const char * const dwxgmac_safety_feature_desc[] = {
6341 		"No",
6342 		"All Safety Features with ECC and Parity",
6343 		"All Safety Features without ECC or Parity",
6344 		"All Safety Features with Parity Only",
6345 		"ECC Only",
6346 		"UNDEFINED",
6347 		"UNDEFINED",
6348 		"UNDEFINED",
6349 	};
6350 	struct net_device *dev = seq->private;
6351 	struct stmmac_priv *priv = netdev_priv(dev);
6352 
6353 	if (!priv->hw_cap_support) {
6354 		seq_printf(seq, "DMA HW features not supported\n");
6355 		return 0;
6356 	}
6357 
6358 	seq_printf(seq, "==============================\n");
6359 	seq_printf(seq, "\tDMA HW features\n");
6360 	seq_printf(seq, "==============================\n");
6361 
6362 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6363 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6364 	seq_printf(seq, "\t1000 Mbps: %s\n",
6365 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6366 	seq_printf(seq, "\tHalf duplex: %s\n",
6367 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6368 	if (priv->plat->has_xgmac) {
6369 		seq_printf(seq,
6370 			   "\tNumber of Additional MAC address registers: %d\n",
6371 			   priv->dma_cap.multi_addr);
6372 	} else {
6373 		seq_printf(seq, "\tHash Filter: %s\n",
6374 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6375 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6376 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6377 	}
6378 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6379 		   (priv->dma_cap.pcs) ? "Y" : "N");
6380 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6381 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6382 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6383 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6384 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6385 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6386 	seq_printf(seq, "\tRMON module: %s\n",
6387 		   (priv->dma_cap.rmon) ? "Y" : "N");
6388 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6389 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6390 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6391 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6392 	if (priv->plat->has_xgmac)
6393 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6394 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6395 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6396 		   (priv->dma_cap.eee) ? "Y" : "N");
6397 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6398 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6399 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6400 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6401 	    priv->plat->has_xgmac) {
6402 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6403 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6404 	} else {
6405 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6406 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6407 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6408 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6409 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6410 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6411 	}
6412 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6413 		   priv->dma_cap.number_rx_channel);
6414 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6415 		   priv->dma_cap.number_tx_channel);
6416 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6417 		   priv->dma_cap.number_rx_queues);
6418 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6419 		   priv->dma_cap.number_tx_queues);
6420 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6421 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6422 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6423 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6424 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6425 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6426 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6427 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6428 		   priv->dma_cap.pps_out_num);
6429 	seq_printf(seq, "\tSafety Features: %s\n",
6430 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6431 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6432 		   priv->dma_cap.frpsel ? "Y" : "N");
6433 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6434 		   priv->dma_cap.host_dma_width);
6435 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6436 		   priv->dma_cap.rssen ? "Y" : "N");
6437 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6438 		   priv->dma_cap.vlhash ? "Y" : "N");
6439 	seq_printf(seq, "\tSplit Header: %s\n",
6440 		   priv->dma_cap.sphen ? "Y" : "N");
6441 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6442 		   priv->dma_cap.vlins ? "Y" : "N");
6443 	seq_printf(seq, "\tDouble VLAN: %s\n",
6444 		   priv->dma_cap.dvlan ? "Y" : "N");
6445 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6446 		   priv->dma_cap.l3l4fnum);
6447 	seq_printf(seq, "\tARP Offloading: %s\n",
6448 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6449 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6450 		   priv->dma_cap.estsel ? "Y" : "N");
6451 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6452 		   priv->dma_cap.fpesel ? "Y" : "N");
6453 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6454 		   priv->dma_cap.tbssel ? "Y" : "N");
6455 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6456 		   priv->dma_cap.tbs_ch_num);
6457 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6458 		   priv->dma_cap.sgfsel ? "Y" : "N");
6459 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6460 		   BIT(priv->dma_cap.ttsfd) >> 1);
6461 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6462 		   priv->dma_cap.numtc);
6463 	seq_printf(seq, "\tDCB Feature: %s\n",
6464 		   priv->dma_cap.dcben ? "Y" : "N");
6465 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6466 		   priv->dma_cap.advthword ? "Y" : "N");
6467 	seq_printf(seq, "\tPTP Offload: %s\n",
6468 		   priv->dma_cap.ptoen ? "Y" : "N");
6469 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6470 		   priv->dma_cap.osten ? "Y" : "N");
6471 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6472 		   priv->dma_cap.pfcen ? "Y" : "N");
6473 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6474 		   BIT(priv->dma_cap.frpes) << 6);
6475 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6476 		   BIT(priv->dma_cap.frpbs) << 6);
6477 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6478 		   priv->dma_cap.frppipe_num);
6479 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6480 		   priv->dma_cap.nrvf_num ?
6481 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6482 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6483 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6484 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6485 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6486 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6487 		   priv->dma_cap.cbtisel ? "Y" : "N");
6488 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6489 		   priv->dma_cap.aux_snapshot_n);
6490 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6491 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6492 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6493 		   priv->dma_cap.edma ? "Y" : "N");
6494 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6495 		   priv->dma_cap.ediffc ? "Y" : "N");
6496 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6497 		   priv->dma_cap.vxn ? "Y" : "N");
6498 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6499 		   priv->dma_cap.dbgmem ? "Y" : "N");
6500 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6501 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6502 	return 0;
6503 }
6504 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6505 
6506 /* Use network device events to rename debugfs file entries.
6507  */
6508 static int stmmac_device_event(struct notifier_block *unused,
6509 			       unsigned long event, void *ptr)
6510 {
6511 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6512 	struct stmmac_priv *priv = netdev_priv(dev);
6513 
6514 	if (dev->netdev_ops != &stmmac_netdev_ops)
6515 		goto done;
6516 
6517 	switch (event) {
6518 	case NETDEV_CHANGENAME:
6519 		if (priv->dbgfs_dir)
6520 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6521 							 priv->dbgfs_dir,
6522 							 stmmac_fs_dir,
6523 							 dev->name);
6524 		break;
6525 	}
6526 done:
6527 	return NOTIFY_DONE;
6528 }
6529 
6530 static struct notifier_block stmmac_notifier = {
6531 	.notifier_call = stmmac_device_event,
6532 };
6533 
6534 static void stmmac_init_fs(struct net_device *dev)
6535 {
6536 	struct stmmac_priv *priv = netdev_priv(dev);
6537 
6538 	rtnl_lock();
6539 
6540 	/* Create per netdev entries */
6541 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6542 
6543 	/* Entry to report DMA RX/TX rings */
6544 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6545 			    &stmmac_rings_status_fops);
6546 
6547 	/* Entry to report the DMA HW features */
6548 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6549 			    &stmmac_dma_cap_fops);
6550 
6551 	rtnl_unlock();
6552 }
6553 
6554 static void stmmac_exit_fs(struct net_device *dev)
6555 {
6556 	struct stmmac_priv *priv = netdev_priv(dev);
6557 
6558 	debugfs_remove_recursive(priv->dbgfs_dir);
6559 }
6560 #endif /* CONFIG_DEBUG_FS */
6561 
6562 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6563 {
6564 	unsigned char *data = (unsigned char *)&vid_le;
6565 	unsigned char data_byte = 0;
6566 	u32 crc = ~0x0;
6567 	u32 temp = 0;
6568 	int i, bits;
6569 
6570 	bits = get_bitmask_order(VLAN_VID_MASK);
6571 	for (i = 0; i < bits; i++) {
6572 		if ((i % 8) == 0)
6573 			data_byte = data[i / 8];
6574 
6575 		temp = ((crc & 1) ^ data_byte) & 1;
6576 		crc >>= 1;
6577 		data_byte >>= 1;
6578 
6579 		if (temp)
6580 			crc ^= 0xedb88320;
6581 	}
6582 
6583 	return crc;
6584 }
6585 
6586 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6587 {
6588 	u32 crc, hash = 0;
6589 	__le16 pmatch = 0;
6590 	int count = 0;
6591 	u16 vid = 0;
6592 
6593 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6594 		__le16 vid_le = cpu_to_le16(vid);
6595 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6596 		hash |= (1 << crc);
6597 		count++;
6598 	}
6599 
6600 	if (!priv->dma_cap.vlhash) {
6601 		if (count > 2) /* VID = 0 always passes filter */
6602 			return -EOPNOTSUPP;
6603 
6604 		pmatch = cpu_to_le16(vid);
6605 		hash = 0;
6606 	}
6607 
6608 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6609 }
6610 
6611 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6612 {
6613 	struct stmmac_priv *priv = netdev_priv(ndev);
6614 	bool is_double = false;
6615 	int ret;
6616 
6617 	ret = pm_runtime_resume_and_get(priv->device);
6618 	if (ret < 0)
6619 		return ret;
6620 
6621 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6622 		is_double = true;
6623 
6624 	set_bit(vid, priv->active_vlans);
6625 	ret = stmmac_vlan_update(priv, is_double);
6626 	if (ret) {
6627 		clear_bit(vid, priv->active_vlans);
6628 		goto err_pm_put;
6629 	}
6630 
6631 	if (priv->hw->num_vlan) {
6632 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6633 		if (ret)
6634 			goto err_pm_put;
6635 	}
6636 err_pm_put:
6637 	pm_runtime_put(priv->device);
6638 
6639 	return ret;
6640 }
6641 
6642 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6643 {
6644 	struct stmmac_priv *priv = netdev_priv(ndev);
6645 	bool is_double = false;
6646 	int ret;
6647 
6648 	ret = pm_runtime_resume_and_get(priv->device);
6649 	if (ret < 0)
6650 		return ret;
6651 
6652 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6653 		is_double = true;
6654 
6655 	clear_bit(vid, priv->active_vlans);
6656 
6657 	if (priv->hw->num_vlan) {
6658 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6659 		if (ret)
6660 			goto del_vlan_error;
6661 	}
6662 
6663 	ret = stmmac_vlan_update(priv, is_double);
6664 
6665 del_vlan_error:
6666 	pm_runtime_put(priv->device);
6667 
6668 	return ret;
6669 }
6670 
6671 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6672 {
6673 	struct stmmac_priv *priv = netdev_priv(dev);
6674 
6675 	switch (bpf->command) {
6676 	case XDP_SETUP_PROG:
6677 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6678 	case XDP_SETUP_XSK_POOL:
6679 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6680 					     bpf->xsk.queue_id);
6681 	default:
6682 		return -EOPNOTSUPP;
6683 	}
6684 }
6685 
6686 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6687 			   struct xdp_frame **frames, u32 flags)
6688 {
6689 	struct stmmac_priv *priv = netdev_priv(dev);
6690 	int cpu = smp_processor_id();
6691 	struct netdev_queue *nq;
6692 	int i, nxmit = 0;
6693 	int queue;
6694 
6695 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6696 		return -ENETDOWN;
6697 
6698 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6699 		return -EINVAL;
6700 
6701 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6702 	nq = netdev_get_tx_queue(priv->dev, queue);
6703 
6704 	__netif_tx_lock(nq, cpu);
6705 	/* Avoids TX time-out as we are sharing with slow path */
6706 	txq_trans_cond_update(nq);
6707 
6708 	for (i = 0; i < num_frames; i++) {
6709 		int res;
6710 
6711 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6712 		if (res == STMMAC_XDP_CONSUMED)
6713 			break;
6714 
6715 		nxmit++;
6716 	}
6717 
6718 	if (flags & XDP_XMIT_FLUSH) {
6719 		stmmac_flush_tx_descriptors(priv, queue);
6720 		stmmac_tx_timer_arm(priv, queue);
6721 	}
6722 
6723 	__netif_tx_unlock(nq);
6724 
6725 	return nxmit;
6726 }
6727 
6728 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6729 {
6730 	struct stmmac_channel *ch = &priv->channel[queue];
6731 	unsigned long flags;
6732 
6733 	spin_lock_irqsave(&ch->lock, flags);
6734 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6735 	spin_unlock_irqrestore(&ch->lock, flags);
6736 
6737 	stmmac_stop_rx_dma(priv, queue);
6738 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6739 }
6740 
6741 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6742 {
6743 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6744 	struct stmmac_channel *ch = &priv->channel[queue];
6745 	unsigned long flags;
6746 	u32 buf_size;
6747 	int ret;
6748 
6749 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6750 	if (ret) {
6751 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6752 		return;
6753 	}
6754 
6755 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6756 	if (ret) {
6757 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6758 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6759 		return;
6760 	}
6761 
6762 	stmmac_reset_rx_queue(priv, queue);
6763 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6764 
6765 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6766 			    rx_q->dma_rx_phy, rx_q->queue_index);
6767 
6768 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6769 			     sizeof(struct dma_desc));
6770 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6771 			       rx_q->rx_tail_addr, rx_q->queue_index);
6772 
6773 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6774 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6775 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6776 				      buf_size,
6777 				      rx_q->queue_index);
6778 	} else {
6779 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6780 				      priv->dma_conf.dma_buf_sz,
6781 				      rx_q->queue_index);
6782 	}
6783 
6784 	stmmac_start_rx_dma(priv, queue);
6785 
6786 	spin_lock_irqsave(&ch->lock, flags);
6787 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6788 	spin_unlock_irqrestore(&ch->lock, flags);
6789 }
6790 
6791 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6792 {
6793 	struct stmmac_channel *ch = &priv->channel[queue];
6794 	unsigned long flags;
6795 
6796 	spin_lock_irqsave(&ch->lock, flags);
6797 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6798 	spin_unlock_irqrestore(&ch->lock, flags);
6799 
6800 	stmmac_stop_tx_dma(priv, queue);
6801 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6802 }
6803 
6804 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6805 {
6806 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6807 	struct stmmac_channel *ch = &priv->channel[queue];
6808 	unsigned long flags;
6809 	int ret;
6810 
6811 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6812 	if (ret) {
6813 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6814 		return;
6815 	}
6816 
6817 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6818 	if (ret) {
6819 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6820 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6821 		return;
6822 	}
6823 
6824 	stmmac_reset_tx_queue(priv, queue);
6825 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6826 
6827 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6828 			    tx_q->dma_tx_phy, tx_q->queue_index);
6829 
6830 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6831 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6832 
6833 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6834 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6835 			       tx_q->tx_tail_addr, tx_q->queue_index);
6836 
6837 	stmmac_start_tx_dma(priv, queue);
6838 
6839 	spin_lock_irqsave(&ch->lock, flags);
6840 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6841 	spin_unlock_irqrestore(&ch->lock, flags);
6842 }
6843 
6844 void stmmac_xdp_release(struct net_device *dev)
6845 {
6846 	struct stmmac_priv *priv = netdev_priv(dev);
6847 	u32 chan;
6848 
6849 	/* Ensure tx function is not running */
6850 	netif_tx_disable(dev);
6851 
6852 	/* Disable NAPI process */
6853 	stmmac_disable_all_queues(priv);
6854 
6855 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6856 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6857 
6858 	/* Free the IRQ lines */
6859 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6860 
6861 	/* Stop TX/RX DMA channels */
6862 	stmmac_stop_all_dma(priv);
6863 
6864 	/* Release and free the Rx/Tx resources */
6865 	free_dma_desc_resources(priv, &priv->dma_conf);
6866 
6867 	/* Disable the MAC Rx/Tx */
6868 	stmmac_mac_set(priv, priv->ioaddr, false);
6869 
6870 	/* set trans_start so we don't get spurious
6871 	 * watchdogs during reset
6872 	 */
6873 	netif_trans_update(dev);
6874 	netif_carrier_off(dev);
6875 }
6876 
6877 int stmmac_xdp_open(struct net_device *dev)
6878 {
6879 	struct stmmac_priv *priv = netdev_priv(dev);
6880 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6881 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6882 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6883 	struct stmmac_rx_queue *rx_q;
6884 	struct stmmac_tx_queue *tx_q;
6885 	u32 buf_size;
6886 	bool sph_en;
6887 	u32 chan;
6888 	int ret;
6889 
6890 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6891 	if (ret < 0) {
6892 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6893 			   __func__);
6894 		goto dma_desc_error;
6895 	}
6896 
6897 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6898 	if (ret < 0) {
6899 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6900 			   __func__);
6901 		goto init_error;
6902 	}
6903 
6904 	stmmac_reset_queues_param(priv);
6905 
6906 	/* DMA CSR Channel configuration */
6907 	for (chan = 0; chan < dma_csr_ch; chan++) {
6908 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6909 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6910 	}
6911 
6912 	/* Adjust Split header */
6913 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6914 
6915 	/* DMA RX Channel Configuration */
6916 	for (chan = 0; chan < rx_cnt; chan++) {
6917 		rx_q = &priv->dma_conf.rx_queue[chan];
6918 
6919 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6920 				    rx_q->dma_rx_phy, chan);
6921 
6922 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6923 				     (rx_q->buf_alloc_num *
6924 				      sizeof(struct dma_desc));
6925 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6926 				       rx_q->rx_tail_addr, chan);
6927 
6928 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6929 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6930 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6931 					      buf_size,
6932 					      rx_q->queue_index);
6933 		} else {
6934 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6935 					      priv->dma_conf.dma_buf_sz,
6936 					      rx_q->queue_index);
6937 		}
6938 
6939 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6940 	}
6941 
6942 	/* DMA TX Channel Configuration */
6943 	for (chan = 0; chan < tx_cnt; chan++) {
6944 		tx_q = &priv->dma_conf.tx_queue[chan];
6945 
6946 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6947 				    tx_q->dma_tx_phy, chan);
6948 
6949 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6950 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6951 				       tx_q->tx_tail_addr, chan);
6952 
6953 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6954 		tx_q->txtimer.function = stmmac_tx_timer;
6955 	}
6956 
6957 	/* Enable the MAC Rx/Tx */
6958 	stmmac_mac_set(priv, priv->ioaddr, true);
6959 
6960 	/* Start Rx & Tx DMA Channels */
6961 	stmmac_start_all_dma(priv);
6962 
6963 	ret = stmmac_request_irq(dev);
6964 	if (ret)
6965 		goto irq_error;
6966 
6967 	/* Enable NAPI process*/
6968 	stmmac_enable_all_queues(priv);
6969 	netif_carrier_on(dev);
6970 	netif_tx_start_all_queues(dev);
6971 	stmmac_enable_all_dma_irq(priv);
6972 
6973 	return 0;
6974 
6975 irq_error:
6976 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6977 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6978 
6979 	stmmac_hw_teardown(dev);
6980 init_error:
6981 	free_dma_desc_resources(priv, &priv->dma_conf);
6982 dma_desc_error:
6983 	return ret;
6984 }
6985 
6986 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6987 {
6988 	struct stmmac_priv *priv = netdev_priv(dev);
6989 	struct stmmac_rx_queue *rx_q;
6990 	struct stmmac_tx_queue *tx_q;
6991 	struct stmmac_channel *ch;
6992 
6993 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6994 	    !netif_carrier_ok(priv->dev))
6995 		return -ENETDOWN;
6996 
6997 	if (!stmmac_xdp_is_enabled(priv))
6998 		return -EINVAL;
6999 
7000 	if (queue >= priv->plat->rx_queues_to_use ||
7001 	    queue >= priv->plat->tx_queues_to_use)
7002 		return -EINVAL;
7003 
7004 	rx_q = &priv->dma_conf.rx_queue[queue];
7005 	tx_q = &priv->dma_conf.tx_queue[queue];
7006 	ch = &priv->channel[queue];
7007 
7008 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7009 		return -EINVAL;
7010 
7011 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7012 		/* EQoS does not have per-DMA channel SW interrupt,
7013 		 * so we schedule RX Napi straight-away.
7014 		 */
7015 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7016 			__napi_schedule(&ch->rxtx_napi);
7017 	}
7018 
7019 	return 0;
7020 }
7021 
7022 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7023 {
7024 	struct stmmac_priv *priv = netdev_priv(dev);
7025 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7026 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7027 	unsigned int start;
7028 	int q;
7029 
7030 	for (q = 0; q < tx_cnt; q++) {
7031 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7032 		u64 tx_packets;
7033 		u64 tx_bytes;
7034 
7035 		do {
7036 			start = u64_stats_fetch_begin(&txq_stats->syncp);
7037 			tx_packets = txq_stats->tx_packets;
7038 			tx_bytes   = txq_stats->tx_bytes;
7039 		} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
7040 
7041 		stats->tx_packets += tx_packets;
7042 		stats->tx_bytes += tx_bytes;
7043 	}
7044 
7045 	for (q = 0; q < rx_cnt; q++) {
7046 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7047 		u64 rx_packets;
7048 		u64 rx_bytes;
7049 
7050 		do {
7051 			start = u64_stats_fetch_begin(&rxq_stats->syncp);
7052 			rx_packets = rxq_stats->rx_packets;
7053 			rx_bytes   = rxq_stats->rx_bytes;
7054 		} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
7055 
7056 		stats->rx_packets += rx_packets;
7057 		stats->rx_bytes += rx_bytes;
7058 	}
7059 
7060 	stats->rx_dropped = priv->xstats.rx_dropped;
7061 	stats->rx_errors = priv->xstats.rx_errors;
7062 	stats->tx_dropped = priv->xstats.tx_dropped;
7063 	stats->tx_errors = priv->xstats.tx_errors;
7064 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7065 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7066 	stats->rx_length_errors = priv->xstats.rx_length;
7067 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7068 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7069 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7070 }
7071 
7072 static const struct net_device_ops stmmac_netdev_ops = {
7073 	.ndo_open = stmmac_open,
7074 	.ndo_start_xmit = stmmac_xmit,
7075 	.ndo_stop = stmmac_release,
7076 	.ndo_change_mtu = stmmac_change_mtu,
7077 	.ndo_fix_features = stmmac_fix_features,
7078 	.ndo_set_features = stmmac_set_features,
7079 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7080 	.ndo_tx_timeout = stmmac_tx_timeout,
7081 	.ndo_eth_ioctl = stmmac_ioctl,
7082 	.ndo_get_stats64 = stmmac_get_stats64,
7083 	.ndo_setup_tc = stmmac_setup_tc,
7084 	.ndo_select_queue = stmmac_select_queue,
7085 	.ndo_set_mac_address = stmmac_set_mac_address,
7086 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7087 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7088 	.ndo_bpf = stmmac_bpf,
7089 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7090 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7091 };
7092 
7093 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7094 {
7095 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7096 		return;
7097 	if (test_bit(STMMAC_DOWN, &priv->state))
7098 		return;
7099 
7100 	netdev_err(priv->dev, "Reset adapter.\n");
7101 
7102 	rtnl_lock();
7103 	netif_trans_update(priv->dev);
7104 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7105 		usleep_range(1000, 2000);
7106 
7107 	set_bit(STMMAC_DOWN, &priv->state);
7108 	dev_close(priv->dev);
7109 	dev_open(priv->dev, NULL);
7110 	clear_bit(STMMAC_DOWN, &priv->state);
7111 	clear_bit(STMMAC_RESETING, &priv->state);
7112 	rtnl_unlock();
7113 }
7114 
7115 static void stmmac_service_task(struct work_struct *work)
7116 {
7117 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7118 			service_task);
7119 
7120 	stmmac_reset_subtask(priv);
7121 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7122 }
7123 
7124 /**
7125  *  stmmac_hw_init - Init the MAC device
7126  *  @priv: driver private structure
7127  *  Description: this function is to configure the MAC device according to
7128  *  some platform parameters or the HW capability register. It prepares the
7129  *  driver to use either ring or chain modes and to setup either enhanced or
7130  *  normal descriptors.
7131  */
7132 static int stmmac_hw_init(struct stmmac_priv *priv)
7133 {
7134 	int ret;
7135 
7136 	/* dwmac-sun8i only work in chain mode */
7137 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7138 		chain_mode = 1;
7139 	priv->chain_mode = chain_mode;
7140 
7141 	/* Initialize HW Interface */
7142 	ret = stmmac_hwif_init(priv);
7143 	if (ret)
7144 		return ret;
7145 
7146 	/* Get the HW capability (new GMAC newer than 3.50a) */
7147 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7148 	if (priv->hw_cap_support) {
7149 		dev_info(priv->device, "DMA HW capability register supported\n");
7150 
7151 		/* We can override some gmac/dma configuration fields: e.g.
7152 		 * enh_desc, tx_coe (e.g. that are passed through the
7153 		 * platform) with the values from the HW capability
7154 		 * register (if supported).
7155 		 */
7156 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7157 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7158 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7159 		priv->hw->pmt = priv->plat->pmt;
7160 		if (priv->dma_cap.hash_tb_sz) {
7161 			priv->hw->multicast_filter_bins =
7162 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7163 			priv->hw->mcast_bits_log2 =
7164 					ilog2(priv->hw->multicast_filter_bins);
7165 		}
7166 
7167 		/* TXCOE doesn't work in thresh DMA mode */
7168 		if (priv->plat->force_thresh_dma_mode)
7169 			priv->plat->tx_coe = 0;
7170 		else
7171 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7172 
7173 		/* In case of GMAC4 rx_coe is from HW cap register. */
7174 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7175 
7176 		if (priv->dma_cap.rx_coe_type2)
7177 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7178 		else if (priv->dma_cap.rx_coe_type1)
7179 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7180 
7181 	} else {
7182 		dev_info(priv->device, "No HW DMA feature register supported\n");
7183 	}
7184 
7185 	if (priv->plat->rx_coe) {
7186 		priv->hw->rx_csum = priv->plat->rx_coe;
7187 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7188 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7189 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7190 	}
7191 	if (priv->plat->tx_coe)
7192 		dev_info(priv->device, "TX Checksum insertion supported\n");
7193 
7194 	if (priv->plat->pmt) {
7195 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7196 		device_set_wakeup_capable(priv->device, 1);
7197 	}
7198 
7199 	if (priv->dma_cap.tsoen)
7200 		dev_info(priv->device, "TSO supported\n");
7201 
7202 	priv->hw->vlan_fail_q_en =
7203 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7204 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7205 
7206 	/* Run HW quirks, if any */
7207 	if (priv->hwif_quirks) {
7208 		ret = priv->hwif_quirks(priv);
7209 		if (ret)
7210 			return ret;
7211 	}
7212 
7213 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7214 	 * In some case, for example on bugged HW this feature
7215 	 * has to be disable and this can be done by passing the
7216 	 * riwt_off field from the platform.
7217 	 */
7218 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7219 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7220 		priv->use_riwt = 1;
7221 		dev_info(priv->device,
7222 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7223 	}
7224 
7225 	return 0;
7226 }
7227 
7228 static void stmmac_napi_add(struct net_device *dev)
7229 {
7230 	struct stmmac_priv *priv = netdev_priv(dev);
7231 	u32 queue, maxq;
7232 
7233 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7234 
7235 	for (queue = 0; queue < maxq; queue++) {
7236 		struct stmmac_channel *ch = &priv->channel[queue];
7237 
7238 		ch->priv_data = priv;
7239 		ch->index = queue;
7240 		spin_lock_init(&ch->lock);
7241 
7242 		if (queue < priv->plat->rx_queues_to_use) {
7243 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7244 		}
7245 		if (queue < priv->plat->tx_queues_to_use) {
7246 			netif_napi_add_tx(dev, &ch->tx_napi,
7247 					  stmmac_napi_poll_tx);
7248 		}
7249 		if (queue < priv->plat->rx_queues_to_use &&
7250 		    queue < priv->plat->tx_queues_to_use) {
7251 			netif_napi_add(dev, &ch->rxtx_napi,
7252 				       stmmac_napi_poll_rxtx);
7253 		}
7254 	}
7255 }
7256 
7257 static void stmmac_napi_del(struct net_device *dev)
7258 {
7259 	struct stmmac_priv *priv = netdev_priv(dev);
7260 	u32 queue, maxq;
7261 
7262 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7263 
7264 	for (queue = 0; queue < maxq; queue++) {
7265 		struct stmmac_channel *ch = &priv->channel[queue];
7266 
7267 		if (queue < priv->plat->rx_queues_to_use)
7268 			netif_napi_del(&ch->rx_napi);
7269 		if (queue < priv->plat->tx_queues_to_use)
7270 			netif_napi_del(&ch->tx_napi);
7271 		if (queue < priv->plat->rx_queues_to_use &&
7272 		    queue < priv->plat->tx_queues_to_use) {
7273 			netif_napi_del(&ch->rxtx_napi);
7274 		}
7275 	}
7276 }
7277 
7278 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7279 {
7280 	struct stmmac_priv *priv = netdev_priv(dev);
7281 	int ret = 0, i;
7282 
7283 	if (netif_running(dev))
7284 		stmmac_release(dev);
7285 
7286 	stmmac_napi_del(dev);
7287 
7288 	priv->plat->rx_queues_to_use = rx_cnt;
7289 	priv->plat->tx_queues_to_use = tx_cnt;
7290 	if (!netif_is_rxfh_configured(dev))
7291 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7292 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7293 									rx_cnt);
7294 
7295 	stmmac_set_half_duplex(priv);
7296 	stmmac_napi_add(dev);
7297 
7298 	if (netif_running(dev))
7299 		ret = stmmac_open(dev);
7300 
7301 	return ret;
7302 }
7303 
7304 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7305 {
7306 	struct stmmac_priv *priv = netdev_priv(dev);
7307 	int ret = 0;
7308 
7309 	if (netif_running(dev))
7310 		stmmac_release(dev);
7311 
7312 	priv->dma_conf.dma_rx_size = rx_size;
7313 	priv->dma_conf.dma_tx_size = tx_size;
7314 
7315 	if (netif_running(dev))
7316 		ret = stmmac_open(dev);
7317 
7318 	return ret;
7319 }
7320 
7321 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7322 static void stmmac_fpe_lp_task(struct work_struct *work)
7323 {
7324 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7325 						fpe_task);
7326 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7327 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7328 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7329 	bool *hs_enable = &fpe_cfg->hs_enable;
7330 	bool *enable = &fpe_cfg->enable;
7331 	int retries = 20;
7332 
7333 	while (retries-- > 0) {
7334 		/* Bail out immediately if FPE handshake is OFF */
7335 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7336 			break;
7337 
7338 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7339 		    *lp_state == FPE_STATE_ENTERING_ON) {
7340 			stmmac_fpe_configure(priv, priv->ioaddr,
7341 					     priv->plat->tx_queues_to_use,
7342 					     priv->plat->rx_queues_to_use,
7343 					     *enable);
7344 
7345 			netdev_info(priv->dev, "configured FPE\n");
7346 
7347 			*lo_state = FPE_STATE_ON;
7348 			*lp_state = FPE_STATE_ON;
7349 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7350 			break;
7351 		}
7352 
7353 		if ((*lo_state == FPE_STATE_CAPABLE ||
7354 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7355 		     *lp_state != FPE_STATE_ON) {
7356 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7357 				    *lo_state, *lp_state);
7358 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7359 						MPACKET_VERIFY);
7360 		}
7361 		/* Sleep then retry */
7362 		msleep(500);
7363 	}
7364 
7365 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7366 }
7367 
7368 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7369 {
7370 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7371 		if (enable) {
7372 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7373 						MPACKET_VERIFY);
7374 		} else {
7375 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7376 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7377 		}
7378 
7379 		priv->plat->fpe_cfg->hs_enable = enable;
7380 	}
7381 }
7382 
7383 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7384 {
7385 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7386 	struct dma_desc *desc_contains_ts = ctx->desc;
7387 	struct stmmac_priv *priv = ctx->priv;
7388 	struct dma_desc *ndesc = ctx->ndesc;
7389 	struct dma_desc *desc = ctx->desc;
7390 	u64 ns = 0;
7391 
7392 	if (!priv->hwts_rx_en)
7393 		return -ENODATA;
7394 
7395 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7396 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7397 		desc_contains_ts = ndesc;
7398 
7399 	/* Check if timestamp is available */
7400 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7401 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7402 		ns -= priv->plat->cdc_error_adj;
7403 		*timestamp = ns_to_ktime(ns);
7404 		return 0;
7405 	}
7406 
7407 	return -ENODATA;
7408 }
7409 
7410 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7411 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7412 };
7413 
7414 /**
7415  * stmmac_dvr_probe
7416  * @device: device pointer
7417  * @plat_dat: platform data pointer
7418  * @res: stmmac resource pointer
7419  * Description: this is the main probe function used to
7420  * call the alloc_etherdev, allocate the priv structure.
7421  * Return:
7422  * returns 0 on success, otherwise errno.
7423  */
7424 int stmmac_dvr_probe(struct device *device,
7425 		     struct plat_stmmacenet_data *plat_dat,
7426 		     struct stmmac_resources *res)
7427 {
7428 	struct net_device *ndev = NULL;
7429 	struct stmmac_priv *priv;
7430 	u32 rxq;
7431 	int i, ret = 0;
7432 
7433 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7434 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7435 	if (!ndev)
7436 		return -ENOMEM;
7437 
7438 	SET_NETDEV_DEV(ndev, device);
7439 
7440 	priv = netdev_priv(ndev);
7441 	priv->device = device;
7442 	priv->dev = ndev;
7443 
7444 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7445 		u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
7446 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7447 		u64_stats_init(&priv->xstats.txq_stats[i].syncp);
7448 
7449 	stmmac_set_ethtool_ops(ndev);
7450 	priv->pause = pause;
7451 	priv->plat = plat_dat;
7452 	priv->ioaddr = res->addr;
7453 	priv->dev->base_addr = (unsigned long)res->addr;
7454 	priv->plat->dma_cfg->multi_msi_en =
7455 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7456 
7457 	priv->dev->irq = res->irq;
7458 	priv->wol_irq = res->wol_irq;
7459 	priv->lpi_irq = res->lpi_irq;
7460 	priv->sfty_ce_irq = res->sfty_ce_irq;
7461 	priv->sfty_ue_irq = res->sfty_ue_irq;
7462 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7463 		priv->rx_irq[i] = res->rx_irq[i];
7464 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7465 		priv->tx_irq[i] = res->tx_irq[i];
7466 
7467 	if (!is_zero_ether_addr(res->mac))
7468 		eth_hw_addr_set(priv->dev, res->mac);
7469 
7470 	dev_set_drvdata(device, priv->dev);
7471 
7472 	/* Verify driver arguments */
7473 	stmmac_verify_args();
7474 
7475 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7476 	if (!priv->af_xdp_zc_qps)
7477 		return -ENOMEM;
7478 
7479 	/* Allocate workqueue */
7480 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7481 	if (!priv->wq) {
7482 		dev_err(priv->device, "failed to create workqueue\n");
7483 		ret = -ENOMEM;
7484 		goto error_wq_init;
7485 	}
7486 
7487 	INIT_WORK(&priv->service_task, stmmac_service_task);
7488 
7489 	/* Initialize Link Partner FPE workqueue */
7490 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7491 
7492 	/* Override with kernel parameters if supplied XXX CRS XXX
7493 	 * this needs to have multiple instances
7494 	 */
7495 	if ((phyaddr >= 0) && (phyaddr <= 31))
7496 		priv->plat->phy_addr = phyaddr;
7497 
7498 	if (priv->plat->stmmac_rst) {
7499 		ret = reset_control_assert(priv->plat->stmmac_rst);
7500 		reset_control_deassert(priv->plat->stmmac_rst);
7501 		/* Some reset controllers have only reset callback instead of
7502 		 * assert + deassert callbacks pair.
7503 		 */
7504 		if (ret == -ENOTSUPP)
7505 			reset_control_reset(priv->plat->stmmac_rst);
7506 	}
7507 
7508 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7509 	if (ret == -ENOTSUPP)
7510 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7511 			ERR_PTR(ret));
7512 
7513 	/* Init MAC and get the capabilities */
7514 	ret = stmmac_hw_init(priv);
7515 	if (ret)
7516 		goto error_hw_init;
7517 
7518 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7519 	 */
7520 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7521 		priv->plat->dma_cfg->dche = false;
7522 
7523 	stmmac_check_ether_addr(priv);
7524 
7525 	ndev->netdev_ops = &stmmac_netdev_ops;
7526 
7527 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7528 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7529 
7530 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7531 			    NETIF_F_RXCSUM;
7532 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7533 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7534 
7535 	ret = stmmac_tc_init(priv, priv);
7536 	if (!ret) {
7537 		ndev->hw_features |= NETIF_F_HW_TC;
7538 	}
7539 
7540 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7541 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7542 		if (priv->plat->has_gmac4)
7543 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7544 		priv->tso = true;
7545 		dev_info(priv->device, "TSO feature enabled\n");
7546 	}
7547 
7548 	if (priv->dma_cap.sphen &&
7549 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7550 		ndev->hw_features |= NETIF_F_GRO;
7551 		priv->sph_cap = true;
7552 		priv->sph = priv->sph_cap;
7553 		dev_info(priv->device, "SPH feature enabled\n");
7554 	}
7555 
7556 	/* Ideally our host DMA address width is the same as for the
7557 	 * device. However, it may differ and then we have to use our
7558 	 * host DMA width for allocation and the device DMA width for
7559 	 * register handling.
7560 	 */
7561 	if (priv->plat->host_dma_width)
7562 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7563 	else
7564 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7565 
7566 	if (priv->dma_cap.host_dma_width) {
7567 		ret = dma_set_mask_and_coherent(device,
7568 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7569 		if (!ret) {
7570 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7571 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7572 
7573 			/*
7574 			 * If more than 32 bits can be addressed, make sure to
7575 			 * enable enhanced addressing mode.
7576 			 */
7577 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7578 				priv->plat->dma_cfg->eame = true;
7579 		} else {
7580 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7581 			if (ret) {
7582 				dev_err(priv->device, "Failed to set DMA Mask\n");
7583 				goto error_hw_init;
7584 			}
7585 
7586 			priv->dma_cap.host_dma_width = 32;
7587 		}
7588 	}
7589 
7590 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7591 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7592 #ifdef STMMAC_VLAN_TAG_USED
7593 	/* Both mac100 and gmac support receive VLAN tag detection */
7594 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7595 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7596 	priv->hw->hw_vlan_en = true;
7597 
7598 	if (priv->dma_cap.vlhash) {
7599 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7600 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7601 	}
7602 	if (priv->dma_cap.vlins) {
7603 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7604 		if (priv->dma_cap.dvlan)
7605 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7606 	}
7607 #endif
7608 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7609 
7610 	priv->xstats.threshold = tc;
7611 
7612 	/* Initialize RSS */
7613 	rxq = priv->plat->rx_queues_to_use;
7614 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7615 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7616 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7617 
7618 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7619 		ndev->features |= NETIF_F_RXHASH;
7620 
7621 	ndev->vlan_features |= ndev->features;
7622 	/* TSO doesn't work on VLANs yet */
7623 	ndev->vlan_features &= ~NETIF_F_TSO;
7624 
7625 	/* MTU range: 46 - hw-specific max */
7626 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7627 	if (priv->plat->has_xgmac)
7628 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7629 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7630 		ndev->max_mtu = JUMBO_LEN;
7631 	else
7632 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7633 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7634 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7635 	 */
7636 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7637 	    (priv->plat->maxmtu >= ndev->min_mtu))
7638 		ndev->max_mtu = priv->plat->maxmtu;
7639 	else if (priv->plat->maxmtu < ndev->min_mtu)
7640 		dev_warn(priv->device,
7641 			 "%s: warning: maxmtu having invalid value (%d)\n",
7642 			 __func__, priv->plat->maxmtu);
7643 
7644 	if (flow_ctrl)
7645 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7646 
7647 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7648 
7649 	/* Setup channels NAPI */
7650 	stmmac_napi_add(ndev);
7651 
7652 	mutex_init(&priv->lock);
7653 
7654 	/* If a specific clk_csr value is passed from the platform
7655 	 * this means that the CSR Clock Range selection cannot be
7656 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7657 	 * set the MDC clock dynamically according to the csr actual
7658 	 * clock input.
7659 	 */
7660 	if (priv->plat->clk_csr >= 0)
7661 		priv->clk_csr = priv->plat->clk_csr;
7662 	else
7663 		stmmac_clk_csr_set(priv);
7664 
7665 	stmmac_check_pcs_mode(priv);
7666 
7667 	pm_runtime_get_noresume(device);
7668 	pm_runtime_set_active(device);
7669 	if (!pm_runtime_enabled(device))
7670 		pm_runtime_enable(device);
7671 
7672 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7673 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7674 		/* MDIO bus Registration */
7675 		ret = stmmac_mdio_register(ndev);
7676 		if (ret < 0) {
7677 			dev_err_probe(priv->device, ret,
7678 				      "%s: MDIO bus (id: %d) registration failed\n",
7679 				      __func__, priv->plat->bus_id);
7680 			goto error_mdio_register;
7681 		}
7682 	}
7683 
7684 	if (priv->plat->speed_mode_2500)
7685 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7686 
7687 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7688 		ret = stmmac_xpcs_setup(priv->mii);
7689 		if (ret)
7690 			goto error_xpcs_setup;
7691 	}
7692 
7693 	ret = stmmac_phy_setup(priv);
7694 	if (ret) {
7695 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7696 		goto error_phy_setup;
7697 	}
7698 
7699 	ret = register_netdev(ndev);
7700 	if (ret) {
7701 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7702 			__func__, ret);
7703 		goto error_netdev_register;
7704 	}
7705 
7706 #ifdef CONFIG_DEBUG_FS
7707 	stmmac_init_fs(ndev);
7708 #endif
7709 
7710 	if (priv->plat->dump_debug_regs)
7711 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7712 
7713 	/* Let pm_runtime_put() disable the clocks.
7714 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7715 	 */
7716 	pm_runtime_put(device);
7717 
7718 	return ret;
7719 
7720 error_netdev_register:
7721 	phylink_destroy(priv->phylink);
7722 error_xpcs_setup:
7723 error_phy_setup:
7724 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7725 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7726 		stmmac_mdio_unregister(ndev);
7727 error_mdio_register:
7728 	stmmac_napi_del(ndev);
7729 error_hw_init:
7730 	destroy_workqueue(priv->wq);
7731 error_wq_init:
7732 	bitmap_free(priv->af_xdp_zc_qps);
7733 
7734 	return ret;
7735 }
7736 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7737 
7738 /**
7739  * stmmac_dvr_remove
7740  * @dev: device pointer
7741  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7742  * changes the link status, releases the DMA descriptor rings.
7743  */
7744 void stmmac_dvr_remove(struct device *dev)
7745 {
7746 	struct net_device *ndev = dev_get_drvdata(dev);
7747 	struct stmmac_priv *priv = netdev_priv(ndev);
7748 
7749 	netdev_info(priv->dev, "%s: removing driver", __func__);
7750 
7751 	pm_runtime_get_sync(dev);
7752 
7753 	stmmac_stop_all_dma(priv);
7754 	stmmac_mac_set(priv, priv->ioaddr, false);
7755 	netif_carrier_off(ndev);
7756 	unregister_netdev(ndev);
7757 
7758 #ifdef CONFIG_DEBUG_FS
7759 	stmmac_exit_fs(ndev);
7760 #endif
7761 	phylink_destroy(priv->phylink);
7762 	if (priv->plat->stmmac_rst)
7763 		reset_control_assert(priv->plat->stmmac_rst);
7764 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7765 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7766 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7767 		stmmac_mdio_unregister(ndev);
7768 	destroy_workqueue(priv->wq);
7769 	mutex_destroy(&priv->lock);
7770 	bitmap_free(priv->af_xdp_zc_qps);
7771 
7772 	pm_runtime_disable(dev);
7773 	pm_runtime_put_noidle(dev);
7774 }
7775 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7776 
7777 /**
7778  * stmmac_suspend - suspend callback
7779  * @dev: device pointer
7780  * Description: this is the function to suspend the device and it is called
7781  * by the platform driver to stop the network queue, release the resources,
7782  * program the PMT register (for WoL), clean and release driver resources.
7783  */
7784 int stmmac_suspend(struct device *dev)
7785 {
7786 	struct net_device *ndev = dev_get_drvdata(dev);
7787 	struct stmmac_priv *priv = netdev_priv(ndev);
7788 	u32 chan;
7789 
7790 	if (!ndev || !netif_running(ndev))
7791 		return 0;
7792 
7793 	mutex_lock(&priv->lock);
7794 
7795 	netif_device_detach(ndev);
7796 
7797 	stmmac_disable_all_queues(priv);
7798 
7799 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7800 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7801 
7802 	if (priv->eee_enabled) {
7803 		priv->tx_path_in_lpi_mode = false;
7804 		del_timer_sync(&priv->eee_ctrl_timer);
7805 	}
7806 
7807 	/* Stop TX/RX DMA */
7808 	stmmac_stop_all_dma(priv);
7809 
7810 	if (priv->plat->serdes_powerdown)
7811 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7812 
7813 	/* Enable Power down mode by programming the PMT regs */
7814 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7815 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7816 		priv->irq_wake = 1;
7817 	} else {
7818 		stmmac_mac_set(priv, priv->ioaddr, false);
7819 		pinctrl_pm_select_sleep_state(priv->device);
7820 	}
7821 
7822 	mutex_unlock(&priv->lock);
7823 
7824 	rtnl_lock();
7825 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7826 		phylink_suspend(priv->phylink, true);
7827 	} else {
7828 		if (device_may_wakeup(priv->device))
7829 			phylink_speed_down(priv->phylink, false);
7830 		phylink_suspend(priv->phylink, false);
7831 	}
7832 	rtnl_unlock();
7833 
7834 	if (priv->dma_cap.fpesel) {
7835 		/* Disable FPE */
7836 		stmmac_fpe_configure(priv, priv->ioaddr,
7837 				     priv->plat->tx_queues_to_use,
7838 				     priv->plat->rx_queues_to_use, false);
7839 
7840 		stmmac_fpe_handshake(priv, false);
7841 		stmmac_fpe_stop_wq(priv);
7842 	}
7843 
7844 	priv->speed = SPEED_UNKNOWN;
7845 	return 0;
7846 }
7847 EXPORT_SYMBOL_GPL(stmmac_suspend);
7848 
7849 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7850 {
7851 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7852 
7853 	rx_q->cur_rx = 0;
7854 	rx_q->dirty_rx = 0;
7855 }
7856 
7857 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7858 {
7859 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7860 
7861 	tx_q->cur_tx = 0;
7862 	tx_q->dirty_tx = 0;
7863 	tx_q->mss = 0;
7864 
7865 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7866 }
7867 
7868 /**
7869  * stmmac_reset_queues_param - reset queue parameters
7870  * @priv: device pointer
7871  */
7872 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7873 {
7874 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7875 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7876 	u32 queue;
7877 
7878 	for (queue = 0; queue < rx_cnt; queue++)
7879 		stmmac_reset_rx_queue(priv, queue);
7880 
7881 	for (queue = 0; queue < tx_cnt; queue++)
7882 		stmmac_reset_tx_queue(priv, queue);
7883 }
7884 
7885 /**
7886  * stmmac_resume - resume callback
7887  * @dev: device pointer
7888  * Description: when resume this function is invoked to setup the DMA and CORE
7889  * in a usable state.
7890  */
7891 int stmmac_resume(struct device *dev)
7892 {
7893 	struct net_device *ndev = dev_get_drvdata(dev);
7894 	struct stmmac_priv *priv = netdev_priv(ndev);
7895 	int ret;
7896 
7897 	if (!netif_running(ndev))
7898 		return 0;
7899 
7900 	/* Power Down bit, into the PM register, is cleared
7901 	 * automatically as soon as a magic packet or a Wake-up frame
7902 	 * is received. Anyway, it's better to manually clear
7903 	 * this bit because it can generate problems while resuming
7904 	 * from another devices (e.g. serial console).
7905 	 */
7906 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7907 		mutex_lock(&priv->lock);
7908 		stmmac_pmt(priv, priv->hw, 0);
7909 		mutex_unlock(&priv->lock);
7910 		priv->irq_wake = 0;
7911 	} else {
7912 		pinctrl_pm_select_default_state(priv->device);
7913 		/* reset the phy so that it's ready */
7914 		if (priv->mii)
7915 			stmmac_mdio_reset(priv->mii);
7916 	}
7917 
7918 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7919 	    priv->plat->serdes_powerup) {
7920 		ret = priv->plat->serdes_powerup(ndev,
7921 						 priv->plat->bsp_priv);
7922 
7923 		if (ret < 0)
7924 			return ret;
7925 	}
7926 
7927 	rtnl_lock();
7928 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7929 		phylink_resume(priv->phylink);
7930 	} else {
7931 		phylink_resume(priv->phylink);
7932 		if (device_may_wakeup(priv->device))
7933 			phylink_speed_up(priv->phylink);
7934 	}
7935 	rtnl_unlock();
7936 
7937 	rtnl_lock();
7938 	mutex_lock(&priv->lock);
7939 
7940 	stmmac_reset_queues_param(priv);
7941 
7942 	stmmac_free_tx_skbufs(priv);
7943 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7944 
7945 	stmmac_hw_setup(ndev, false);
7946 	stmmac_init_coalesce(priv);
7947 	stmmac_set_rx_mode(ndev);
7948 
7949 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7950 
7951 	stmmac_enable_all_queues(priv);
7952 	stmmac_enable_all_dma_irq(priv);
7953 
7954 	mutex_unlock(&priv->lock);
7955 	rtnl_unlock();
7956 
7957 	netif_device_attach(ndev);
7958 
7959 	return 0;
7960 }
7961 EXPORT_SYMBOL_GPL(stmmac_resume);
7962 
7963 #ifndef MODULE
7964 static int __init stmmac_cmdline_opt(char *str)
7965 {
7966 	char *opt;
7967 
7968 	if (!str || !*str)
7969 		return 1;
7970 	while ((opt = strsep(&str, ",")) != NULL) {
7971 		if (!strncmp(opt, "debug:", 6)) {
7972 			if (kstrtoint(opt + 6, 0, &debug))
7973 				goto err;
7974 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7975 			if (kstrtoint(opt + 8, 0, &phyaddr))
7976 				goto err;
7977 		} else if (!strncmp(opt, "buf_sz:", 7)) {
7978 			if (kstrtoint(opt + 7, 0, &buf_sz))
7979 				goto err;
7980 		} else if (!strncmp(opt, "tc:", 3)) {
7981 			if (kstrtoint(opt + 3, 0, &tc))
7982 				goto err;
7983 		} else if (!strncmp(opt, "watchdog:", 9)) {
7984 			if (kstrtoint(opt + 9, 0, &watchdog))
7985 				goto err;
7986 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7987 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7988 				goto err;
7989 		} else if (!strncmp(opt, "pause:", 6)) {
7990 			if (kstrtoint(opt + 6, 0, &pause))
7991 				goto err;
7992 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7993 			if (kstrtoint(opt + 10, 0, &eee_timer))
7994 				goto err;
7995 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7996 			if (kstrtoint(opt + 11, 0, &chain_mode))
7997 				goto err;
7998 		}
7999 	}
8000 	return 1;
8001 
8002 err:
8003 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8004 	return 1;
8005 }
8006 
8007 __setup("stmmaceth=", stmmac_cmdline_opt);
8008 #endif /* MODULE */
8009 
8010 static int __init stmmac_init(void)
8011 {
8012 #ifdef CONFIG_DEBUG_FS
8013 	/* Create debugfs main directory if it doesn't exist yet */
8014 	if (!stmmac_fs_dir)
8015 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8016 	register_netdevice_notifier(&stmmac_notifier);
8017 #endif
8018 
8019 	return 0;
8020 }
8021 
8022 static void __exit stmmac_exit(void)
8023 {
8024 #ifdef CONFIG_DEBUG_FS
8025 	unregister_netdevice_notifier(&stmmac_notifier);
8026 	debugfs_remove_recursive(stmmac_fs_dir);
8027 #endif
8028 }
8029 
8030 module_init(stmmac_init)
8031 module_exit(stmmac_exit)
8032 
8033 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8034 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8035 MODULE_LICENSE("GPL");
8036