xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 860a9bed265146b10311bcadbbcef59c3af4454d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	return priv->hw->phylink_pcs;
948 }
949 
950 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
951 			      const struct phylink_link_state *state)
952 {
953 	/* Nothing to do, xpcs_config() handles everything */
954 }
955 
956 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
957 {
958 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
959 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
960 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
961 	bool *hs_enable = &fpe_cfg->hs_enable;
962 
963 	if (is_up && *hs_enable) {
964 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
965 					MPACKET_VERIFY);
966 	} else {
967 		*lo_state = FPE_STATE_OFF;
968 		*lp_state = FPE_STATE_OFF;
969 	}
970 }
971 
972 static void stmmac_mac_link_down(struct phylink_config *config,
973 				 unsigned int mode, phy_interface_t interface)
974 {
975 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
976 
977 	stmmac_mac_set(priv, priv->ioaddr, false);
978 	priv->eee_active = false;
979 	priv->tx_lpi_enabled = false;
980 	priv->eee_enabled = stmmac_eee_init(priv);
981 	stmmac_set_eee_pls(priv, priv->hw, false);
982 
983 	if (priv->dma_cap.fpesel)
984 		stmmac_fpe_link_state_handle(priv, false);
985 }
986 
987 static void stmmac_mac_link_up(struct phylink_config *config,
988 			       struct phy_device *phy,
989 			       unsigned int mode, phy_interface_t interface,
990 			       int speed, int duplex,
991 			       bool tx_pause, bool rx_pause)
992 {
993 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
994 	u32 old_ctrl, ctrl;
995 
996 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
997 	    priv->plat->serdes_powerup)
998 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
999 
1000 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1001 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1002 
1003 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1004 		switch (speed) {
1005 		case SPEED_10000:
1006 			ctrl |= priv->hw->link.xgmii.speed10000;
1007 			break;
1008 		case SPEED_5000:
1009 			ctrl |= priv->hw->link.xgmii.speed5000;
1010 			break;
1011 		case SPEED_2500:
1012 			ctrl |= priv->hw->link.xgmii.speed2500;
1013 			break;
1014 		default:
1015 			return;
1016 		}
1017 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1018 		switch (speed) {
1019 		case SPEED_100000:
1020 			ctrl |= priv->hw->link.xlgmii.speed100000;
1021 			break;
1022 		case SPEED_50000:
1023 			ctrl |= priv->hw->link.xlgmii.speed50000;
1024 			break;
1025 		case SPEED_40000:
1026 			ctrl |= priv->hw->link.xlgmii.speed40000;
1027 			break;
1028 		case SPEED_25000:
1029 			ctrl |= priv->hw->link.xlgmii.speed25000;
1030 			break;
1031 		case SPEED_10000:
1032 			ctrl |= priv->hw->link.xgmii.speed10000;
1033 			break;
1034 		case SPEED_2500:
1035 			ctrl |= priv->hw->link.speed2500;
1036 			break;
1037 		case SPEED_1000:
1038 			ctrl |= priv->hw->link.speed1000;
1039 			break;
1040 		default:
1041 			return;
1042 		}
1043 	} else {
1044 		switch (speed) {
1045 		case SPEED_2500:
1046 			ctrl |= priv->hw->link.speed2500;
1047 			break;
1048 		case SPEED_1000:
1049 			ctrl |= priv->hw->link.speed1000;
1050 			break;
1051 		case SPEED_100:
1052 			ctrl |= priv->hw->link.speed100;
1053 			break;
1054 		case SPEED_10:
1055 			ctrl |= priv->hw->link.speed10;
1056 			break;
1057 		default:
1058 			return;
1059 		}
1060 	}
1061 
1062 	priv->speed = speed;
1063 
1064 	if (priv->plat->fix_mac_speed)
1065 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1066 
1067 	if (!duplex)
1068 		ctrl &= ~priv->hw->link.duplex;
1069 	else
1070 		ctrl |= priv->hw->link.duplex;
1071 
1072 	/* Flow Control operation */
1073 	if (rx_pause && tx_pause)
1074 		priv->flow_ctrl = FLOW_AUTO;
1075 	else if (rx_pause && !tx_pause)
1076 		priv->flow_ctrl = FLOW_RX;
1077 	else if (!rx_pause && tx_pause)
1078 		priv->flow_ctrl = FLOW_TX;
1079 	else
1080 		priv->flow_ctrl = FLOW_OFF;
1081 
1082 	stmmac_mac_flow_ctrl(priv, duplex);
1083 
1084 	if (ctrl != old_ctrl)
1085 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1086 
1087 	stmmac_mac_set(priv, priv->ioaddr, true);
1088 	if (phy && priv->dma_cap.eee) {
1089 		priv->eee_active =
1090 			phy_init_eee(phy, !(priv->plat->flags &
1091 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1092 		priv->eee_enabled = stmmac_eee_init(priv);
1093 		priv->tx_lpi_enabled = priv->eee_enabled;
1094 		stmmac_set_eee_pls(priv, priv->hw, true);
1095 	}
1096 
1097 	if (priv->dma_cap.fpesel)
1098 		stmmac_fpe_link_state_handle(priv, true);
1099 
1100 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1101 		stmmac_hwtstamp_correct_latency(priv, priv);
1102 }
1103 
1104 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1105 	.mac_select_pcs = stmmac_mac_select_pcs,
1106 	.mac_config = stmmac_mac_config,
1107 	.mac_link_down = stmmac_mac_link_down,
1108 	.mac_link_up = stmmac_mac_link_up,
1109 };
1110 
1111 /**
1112  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1113  * @priv: driver private structure
1114  * Description: this is to verify if the HW supports the PCS.
1115  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1116  * configured for the TBI, RTBI, or SGMII PHY interface.
1117  */
1118 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1119 {
1120 	int interface = priv->plat->mac_interface;
1121 
1122 	if (priv->dma_cap.pcs) {
1123 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1124 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1125 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1127 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1128 			priv->hw->pcs = STMMAC_PCS_RGMII;
1129 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1130 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_SGMII;
1132 		}
1133 	}
1134 }
1135 
1136 /**
1137  * stmmac_init_phy - PHY initialization
1138  * @dev: net device structure
1139  * Description: it initializes the driver's PHY state, and attaches the PHY
1140  * to the mac driver.
1141  *  Return value:
1142  *  0 on success
1143  */
1144 static int stmmac_init_phy(struct net_device *dev)
1145 {
1146 	struct stmmac_priv *priv = netdev_priv(dev);
1147 	struct fwnode_handle *phy_fwnode;
1148 	struct fwnode_handle *fwnode;
1149 	int ret;
1150 
1151 	if (!phylink_expects_phy(priv->phylink))
1152 		return 0;
1153 
1154 	fwnode = priv->plat->port_node;
1155 	if (!fwnode)
1156 		fwnode = dev_fwnode(priv->device);
1157 
1158 	if (fwnode)
1159 		phy_fwnode = fwnode_get_phy_node(fwnode);
1160 	else
1161 		phy_fwnode = NULL;
1162 
1163 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1164 	 * manually parse it
1165 	 */
1166 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1167 		int addr = priv->plat->phy_addr;
1168 		struct phy_device *phydev;
1169 
1170 		if (addr < 0) {
1171 			netdev_err(priv->dev, "no phy found\n");
1172 			return -ENODEV;
1173 		}
1174 
1175 		phydev = mdiobus_get_phy(priv->mii, addr);
1176 		if (!phydev) {
1177 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1178 			return -ENODEV;
1179 		}
1180 
1181 		ret = phylink_connect_phy(priv->phylink, phydev);
1182 	} else {
1183 		fwnode_handle_put(phy_fwnode);
1184 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1185 	}
1186 
1187 	if (!priv->plat->pmt) {
1188 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1189 
1190 		phylink_ethtool_get_wol(priv->phylink, &wol);
1191 		device_set_wakeup_capable(priv->device, !!wol.supported);
1192 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1193 	}
1194 
1195 	return ret;
1196 }
1197 
1198 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1199 {
1200 	/* Half-Duplex can only work with single tx queue */
1201 	if (priv->plat->tx_queues_to_use > 1)
1202 		priv->phylink_config.mac_capabilities &=
1203 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1204 	else
1205 		priv->phylink_config.mac_capabilities |=
1206 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 }
1208 
1209 static int stmmac_phy_setup(struct stmmac_priv *priv)
1210 {
1211 	struct stmmac_mdio_bus_data *mdio_bus_data;
1212 	int mode = priv->plat->phy_interface;
1213 	struct fwnode_handle *fwnode;
1214 	struct phylink *phylink;
1215 	int max_speed;
1216 
1217 	priv->phylink_config.dev = &priv->dev->dev;
1218 	priv->phylink_config.type = PHYLINK_NETDEV;
1219 	priv->phylink_config.mac_managed_pm = true;
1220 
1221 	/* Stmmac always requires an RX clock for hardware initialization */
1222 	priv->phylink_config.mac_requires_rxc = true;
1223 
1224 	mdio_bus_data = priv->plat->mdio_bus_data;
1225 	if (mdio_bus_data)
1226 		priv->phylink_config.ovr_an_inband =
1227 			mdio_bus_data->xpcs_an_inband;
1228 
1229 	/* Set the platform/firmware specified interface mode. Note, phylink
1230 	 * deals with the PHY interface mode, not the MAC interface mode.
1231 	 */
1232 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1233 
1234 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 	if (priv->hw->xpcs)
1236 		xpcs_get_interfaces(priv->hw->xpcs,
1237 				    priv->phylink_config.supported_interfaces);
1238 
1239 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 						MAC_10FD | MAC_100FD |
1241 						MAC_1000FD;
1242 
1243 	stmmac_set_half_duplex(priv);
1244 
1245 	/* Get the MAC specific capabilities */
1246 	stmmac_mac_phylink_get_caps(priv);
1247 
1248 	max_speed = priv->plat->max_speed;
1249 	if (max_speed)
1250 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251 
1252 	fwnode = priv->plat->port_node;
1253 	if (!fwnode)
1254 		fwnode = dev_fwnode(priv->device);
1255 
1256 	phylink = phylink_create(&priv->phylink_config, fwnode,
1257 				 mode, &stmmac_phylink_mac_ops);
1258 	if (IS_ERR(phylink))
1259 		return PTR_ERR(phylink);
1260 
1261 	priv->phylink = phylink;
1262 	return 0;
1263 }
1264 
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 				    struct stmmac_dma_conf *dma_conf)
1267 {
1268 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 	unsigned int desc_size;
1270 	void *head_rx;
1271 	u32 queue;
1272 
1273 	/* Display RX rings */
1274 	for (queue = 0; queue < rx_cnt; queue++) {
1275 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276 
1277 		pr_info("\tRX Queue %u rings\n", queue);
1278 
1279 		if (priv->extend_desc) {
1280 			head_rx = (void *)rx_q->dma_erx;
1281 			desc_size = sizeof(struct dma_extended_desc);
1282 		} else {
1283 			head_rx = (void *)rx_q->dma_rx;
1284 			desc_size = sizeof(struct dma_desc);
1285 		}
1286 
1287 		/* Display RX ring */
1288 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 				    rx_q->dma_rx_phy, desc_size);
1290 	}
1291 }
1292 
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 				    struct stmmac_dma_conf *dma_conf)
1295 {
1296 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 	unsigned int desc_size;
1298 	void *head_tx;
1299 	u32 queue;
1300 
1301 	/* Display TX rings */
1302 	for (queue = 0; queue < tx_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304 
1305 		pr_info("\tTX Queue %d rings\n", queue);
1306 
1307 		if (priv->extend_desc) {
1308 			head_tx = (void *)tx_q->dma_etx;
1309 			desc_size = sizeof(struct dma_extended_desc);
1310 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 			head_tx = (void *)tx_q->dma_entx;
1312 			desc_size = sizeof(struct dma_edesc);
1313 		} else {
1314 			head_tx = (void *)tx_q->dma_tx;
1315 			desc_size = sizeof(struct dma_desc);
1316 		}
1317 
1318 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 				    tx_q->dma_tx_phy, desc_size);
1320 	}
1321 }
1322 
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 				 struct stmmac_dma_conf *dma_conf)
1325 {
1326 	/* Display RX ring */
1327 	stmmac_display_rx_rings(priv, dma_conf);
1328 
1329 	/* Display TX ring */
1330 	stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332 
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 	int ret = bufsize;
1336 
1337 	if (mtu >= BUF_SIZE_8KiB)
1338 		ret = BUF_SIZE_16KiB;
1339 	else if (mtu >= BUF_SIZE_4KiB)
1340 		ret = BUF_SIZE_8KiB;
1341 	else if (mtu >= BUF_SIZE_2KiB)
1342 		ret = BUF_SIZE_4KiB;
1343 	else if (mtu > DEFAULT_BUFSIZE)
1344 		ret = BUF_SIZE_2KiB;
1345 	else
1346 		ret = DEFAULT_BUFSIZE;
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 					struct stmmac_dma_conf *dma_conf,
1361 					u32 queue)
1362 {
1363 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 	int i;
1365 
1366 	/* Clear the RX descriptors */
1367 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 		if (priv->extend_desc)
1369 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 					priv->use_riwt, priv->mode,
1371 					(i == dma_conf->dma_rx_size - 1),
1372 					dma_conf->dma_buf_sz);
1373 		else
1374 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 					priv->use_riwt, priv->mode,
1376 					(i == dma_conf->dma_rx_size - 1),
1377 					dma_conf->dma_buf_sz);
1378 }
1379 
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 					struct stmmac_dma_conf *dma_conf,
1390 					u32 queue)
1391 {
1392 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 	int i;
1394 
1395 	/* Clear the TX descriptors */
1396 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 		int last = (i == (dma_conf->dma_tx_size - 1));
1398 		struct dma_desc *p;
1399 
1400 		if (priv->extend_desc)
1401 			p = &tx_q->dma_etx[i].basic;
1402 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 			p = &tx_q->dma_entx[i].basic;
1404 		else
1405 			p = &tx_q->dma_tx[i];
1406 
1407 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 	}
1409 }
1410 
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 				     struct stmmac_dma_conf *dma_conf)
1420 {
1421 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Clear the RX descriptors */
1426 	for (queue = 0; queue < rx_queue_cnt; queue++)
1427 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428 
1429 	/* Clear the TX descriptors */
1430 	for (queue = 0; queue < tx_queue_cnt; queue++)
1431 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433 
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 				  struct stmmac_dma_conf *dma_conf,
1447 				  struct dma_desc *p,
1448 				  int i, gfp_t flags, u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453 
1454 	if (priv->dma_cap.host_dma_width <= 32)
1455 		gfp |= GFP_DMA32;
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 				  struct stmmac_rx_queue *rx_q,
1493 				  int i)
1494 {
1495 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496 
1497 	if (buf->page)
1498 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 	buf->page = NULL;
1500 
1501 	if (buf->sec_page)
1502 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 	buf->sec_page = NULL;
1504 }
1505 
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 				  struct stmmac_dma_conf *dma_conf,
1515 				  u32 queue, int i)
1516 {
1517 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518 
1519 	if (tx_q->tx_skbuff_dma[i].buf &&
1520 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 			dma_unmap_page(priv->device,
1523 				       tx_q->tx_skbuff_dma[i].buf,
1524 				       tx_q->tx_skbuff_dma[i].len,
1525 				       DMA_TO_DEVICE);
1526 		else
1527 			dma_unmap_single(priv->device,
1528 					 tx_q->tx_skbuff_dma[i].buf,
1529 					 tx_q->tx_skbuff_dma[i].len,
1530 					 DMA_TO_DEVICE);
1531 	}
1532 
1533 	if (tx_q->xdpf[i] &&
1534 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 		xdp_return_frame(tx_q->xdpf[i]);
1537 		tx_q->xdpf[i] = NULL;
1538 	}
1539 
1540 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 		tx_q->xsk_frames_done++;
1542 
1543 	if (tx_q->tx_skbuff[i] &&
1544 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 		tx_q->tx_skbuff[i] = NULL;
1547 	}
1548 
1549 	tx_q->tx_skbuff_dma[i].buf = 0;
1550 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552 
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 			       struct stmmac_dma_conf *dma_conf,
1561 			       u32 queue)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 		stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569 
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 				   struct stmmac_dma_conf *dma_conf,
1572 				   u32 queue, gfp_t flags)
1573 {
1574 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 	int i;
1576 
1577 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 		struct dma_desc *p;
1579 		int ret;
1580 
1581 		if (priv->extend_desc)
1582 			p = &((rx_q->dma_erx + i)->basic);
1583 		else
1584 			p = rx_q->dma_rx + i;
1585 
1586 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 					     queue);
1588 		if (ret)
1589 			return ret;
1590 
1591 		rx_q->buf_alloc_num++;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 				struct stmmac_dma_conf *dma_conf,
1605 				u32 queue)
1606 {
1607 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 	int i;
1609 
1610 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612 
1613 		if (!buf->xdp)
1614 			continue;
1615 
1616 		xsk_buff_free(buf->xdp);
1617 		buf->xdp = NULL;
1618 	}
1619 }
1620 
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 				      struct stmmac_dma_conf *dma_conf,
1623 				      u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 	 * use this macro to make sure no size violations.
1631 	 */
1632 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633 
1634 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 		struct stmmac_rx_buffer *buf;
1636 		dma_addr_t dma_addr;
1637 		struct dma_desc *p;
1638 
1639 		if (priv->extend_desc)
1640 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 		else
1642 			p = rx_q->dma_rx + i;
1643 
1644 		buf = &rx_q->buf_pool[i];
1645 
1646 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 		if (!buf->xdp)
1648 			return -ENOMEM;
1649 
1650 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 		stmmac_set_desc_addr(priv, p, dma_addr);
1652 		rx_q->buf_alloc_num++;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 		return NULL;
1662 
1663 	return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665 
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 				    struct stmmac_dma_conf *dma_conf,
1678 				    u32 queue, gfp_t flags)
1679 {
1680 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 	int ret;
1682 
1683 	netif_dbg(priv, probe, priv->dev,
1684 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 		  (u32)rx_q->dma_rx_phy);
1686 
1687 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688 
1689 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690 
1691 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692 
1693 	if (rx_q->xsk_pool) {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_XSK_BUFF_POOL,
1696 						   NULL));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 	} else {
1702 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 						   MEM_TYPE_PAGE_POOL,
1704 						   rx_q->page_pool));
1705 		netdev_info(priv->dev,
1706 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 			    rx_q->queue_index);
1708 	}
1709 
1710 	if (rx_q->xsk_pool) {
1711 		/* RX XDP ZC buffer pool may not be populated, e.g.
1712 		 * xdpsock TX-only.
1713 		 */
1714 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 	} else {
1716 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 		if (ret < 0)
1718 			return -ENOMEM;
1719 	}
1720 
1721 	/* Setup the chained descriptor addresses */
1722 	if (priv->mode == STMMAC_CHAIN_MODE) {
1723 		if (priv->extend_desc)
1724 			stmmac_mode_init(priv, rx_q->dma_erx,
1725 					 rx_q->dma_rx_phy,
1726 					 dma_conf->dma_rx_size, 1);
1727 		else
1728 			stmmac_mode_init(priv, rx_q->dma_rx,
1729 					 rx_q->dma_rx_phy,
1730 					 dma_conf->dma_rx_size, 0);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 				  struct stmmac_dma_conf *dma_conf,
1738 				  gfp_t flags)
1739 {
1740 	struct stmmac_priv *priv = netdev_priv(dev);
1741 	u32 rx_count = priv->plat->rx_queues_to_use;
1742 	int queue;
1743 	int ret;
1744 
1745 	/* RX INITIALIZATION */
1746 	netif_dbg(priv, probe, priv->dev,
1747 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 
1749 	for (queue = 0; queue < rx_count; queue++) {
1750 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 		if (ret)
1752 			goto err_init_rx_buffers;
1753 	}
1754 
1755 	return 0;
1756 
1757 err_init_rx_buffers:
1758 	while (queue >= 0) {
1759 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760 
1761 		if (rx_q->xsk_pool)
1762 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 		else
1764 			dma_free_rx_skbufs(priv, dma_conf, queue);
1765 
1766 		rx_q->buf_alloc_num = 0;
1767 		rx_q->xsk_pool = NULL;
1768 
1769 		queue--;
1770 	}
1771 
1772 	return ret;
1773 }
1774 
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 				    struct stmmac_dma_conf *dma_conf,
1786 				    u32 queue)
1787 {
1788 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 	int i;
1790 
1791 	netif_dbg(priv, probe, priv->dev,
1792 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 		  (u32)tx_q->dma_tx_phy);
1794 
1795 	/* Setup the chained descriptor addresses */
1796 	if (priv->mode == STMMAC_CHAIN_MODE) {
1797 		if (priv->extend_desc)
1798 			stmmac_mode_init(priv, tx_q->dma_etx,
1799 					 tx_q->dma_tx_phy,
1800 					 dma_conf->dma_tx_size, 1);
1801 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 			stmmac_mode_init(priv, tx_q->dma_tx,
1803 					 tx_q->dma_tx_phy,
1804 					 dma_conf->dma_tx_size, 0);
1805 	}
1806 
1807 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808 
1809 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 		struct dma_desc *p;
1811 
1812 		if (priv->extend_desc)
1813 			p = &((tx_q->dma_etx + i)->basic);
1814 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 			p = &((tx_q->dma_entx + i)->basic);
1816 		else
1817 			p = tx_q->dma_tx + i;
1818 
1819 		stmmac_clear_desc(priv, p);
1820 
1821 		tx_q->tx_skbuff_dma[i].buf = 0;
1822 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 		tx_q->tx_skbuff_dma[i].len = 0;
1824 		tx_q->tx_skbuff_dma[i].last_segment = false;
1825 		tx_q->tx_skbuff[i] = NULL;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 				  struct stmmac_dma_conf *dma_conf)
1833 {
1834 	struct stmmac_priv *priv = netdev_priv(dev);
1835 	u32 tx_queue_cnt;
1836 	u32 queue;
1837 
1838 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1839 
1840 	for (queue = 0; queue < tx_queue_cnt; queue++)
1841 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1842 
1843 	return 0;
1844 }
1845 
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856 			       struct stmmac_dma_conf *dma_conf,
1857 			       gfp_t flags)
1858 {
1859 	struct stmmac_priv *priv = netdev_priv(dev);
1860 	int ret;
1861 
1862 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1867 
1868 	stmmac_clear_descriptors(priv, dma_conf);
1869 
1870 	if (netif_msg_hw(priv))
1871 		stmmac_display_rings(priv, dma_conf);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 			       struct stmmac_dma_conf *dma_conf,
1884 			       u32 queue)
1885 {
1886 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 	int i;
1888 
1889 	tx_q->xsk_frames_done = 0;
1890 
1891 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 
1894 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 		tx_q->xsk_frames_done = 0;
1897 		tx_q->xsk_pool = NULL;
1898 	}
1899 }
1900 
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 	u32 queue;
1909 
1910 	for (queue = 0; queue < tx_queue_cnt; queue++)
1911 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913 
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 					 struct stmmac_dma_conf *dma_conf,
1922 					 u32 queue)
1923 {
1924 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925 
1926 	/* Release the DMA RX socket buffers */
1927 	if (rx_q->xsk_pool)
1928 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 	else
1930 		dma_free_rx_skbufs(priv, dma_conf, queue);
1931 
1932 	rx_q->buf_alloc_num = 0;
1933 	rx_q->xsk_pool = NULL;
1934 
1935 	/* Free DMA regions of consistent memory previously allocated */
1936 	if (!priv->extend_desc)
1937 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 				  sizeof(struct dma_desc),
1939 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1940 	else
1941 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 				  sizeof(struct dma_extended_desc),
1943 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1944 
1945 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947 
1948 	kfree(rx_q->buf_pool);
1949 	if (rx_q->page_pool)
1950 		page_pool_destroy(rx_q->page_pool);
1951 }
1952 
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 				       struct stmmac_dma_conf *dma_conf)
1955 {
1956 	u32 rx_count = priv->plat->rx_queues_to_use;
1957 	u32 queue;
1958 
1959 	/* Free RX queue resources */
1960 	for (queue = 0; queue < rx_count; queue++)
1961 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963 
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 					 struct stmmac_dma_conf *dma_conf,
1972 					 u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	size_t size;
1976 	void *addr;
1977 
1978 	/* Release the DMA TX socket buffers */
1979 	dma_free_tx_skbufs(priv, dma_conf, queue);
1980 
1981 	if (priv->extend_desc) {
1982 		size = sizeof(struct dma_extended_desc);
1983 		addr = tx_q->dma_etx;
1984 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 		size = sizeof(struct dma_edesc);
1986 		addr = tx_q->dma_entx;
1987 	} else {
1988 		size = sizeof(struct dma_desc);
1989 		addr = tx_q->dma_tx;
1990 	}
1991 
1992 	size *= dma_conf->dma_tx_size;
1993 
1994 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995 
1996 	kfree(tx_q->tx_skbuff_dma);
1997 	kfree(tx_q->tx_skbuff);
1998 }
1999 
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 tx_count = priv->plat->tx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free TX queue resources */
2007 	for (queue = 0; queue < tx_count; queue++)
2008 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 					 struct stmmac_dma_conf *dma_conf,
2023 					 u32 queue)
2024 {
2025 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 	struct stmmac_channel *ch = &priv->channel[queue];
2027 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 	struct page_pool_params pp_params = { 0 };
2029 	unsigned int num_pages;
2030 	unsigned int napi_id;
2031 	int ret;
2032 
2033 	rx_q->queue_index = queue;
2034 	rx_q->priv_data = priv;
2035 
2036 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 	pp_params.pool_size = dma_conf->dma_rx_size;
2038 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 	pp_params.order = ilog2(num_pages);
2040 	pp_params.nid = dev_to_node(priv->device);
2041 	pp_params.dev = priv->device;
2042 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 	pp_params.offset = stmmac_rx_offset(priv);
2044 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045 
2046 	rx_q->page_pool = page_pool_create(&pp_params);
2047 	if (IS_ERR(rx_q->page_pool)) {
2048 		ret = PTR_ERR(rx_q->page_pool);
2049 		rx_q->page_pool = NULL;
2050 		return ret;
2051 	}
2052 
2053 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 				 sizeof(*rx_q->buf_pool),
2055 				 GFP_KERNEL);
2056 	if (!rx_q->buf_pool)
2057 		return -ENOMEM;
2058 
2059 	if (priv->extend_desc) {
2060 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 						   dma_conf->dma_rx_size *
2062 						   sizeof(struct dma_extended_desc),
2063 						   &rx_q->dma_rx_phy,
2064 						   GFP_KERNEL);
2065 		if (!rx_q->dma_erx)
2066 			return -ENOMEM;
2067 
2068 	} else {
2069 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 						  dma_conf->dma_rx_size *
2071 						  sizeof(struct dma_desc),
2072 						  &rx_q->dma_rx_phy,
2073 						  GFP_KERNEL);
2074 		if (!rx_q->dma_rx)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	if (stmmac_xdp_is_enabled(priv) &&
2079 	    test_bit(queue, priv->af_xdp_zc_qps))
2080 		napi_id = ch->rxtx_napi.napi_id;
2081 	else
2082 		napi_id = ch->rx_napi.napi_id;
2083 
2084 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 			       rx_q->queue_index,
2086 			       napi_id);
2087 	if (ret) {
2088 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 				       struct stmmac_dma_conf *dma_conf)
2097 {
2098 	u32 rx_count = priv->plat->rx_queues_to_use;
2099 	u32 queue;
2100 	int ret;
2101 
2102 	/* RX queues buffers and DMA */
2103 	for (queue = 0; queue < rx_count; queue++) {
2104 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 		if (ret)
2106 			goto err_dma;
2107 	}
2108 
2109 	return 0;
2110 
2111 err_dma:
2112 	free_dma_rx_desc_resources(priv, dma_conf);
2113 
2114 	return ret;
2115 }
2116 
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 					 struct stmmac_dma_conf *dma_conf,
2129 					 u32 queue)
2130 {
2131 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 	size_t size;
2133 	void *addr;
2134 
2135 	tx_q->queue_index = queue;
2136 	tx_q->priv_data = priv;
2137 
2138 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 				      sizeof(*tx_q->tx_skbuff_dma),
2140 				      GFP_KERNEL);
2141 	if (!tx_q->tx_skbuff_dma)
2142 		return -ENOMEM;
2143 
2144 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 				  sizeof(struct sk_buff *),
2146 				  GFP_KERNEL);
2147 	if (!tx_q->tx_skbuff)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		size = sizeof(struct dma_extended_desc);
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		size = sizeof(struct dma_edesc);
2154 	else
2155 		size = sizeof(struct dma_desc);
2156 
2157 	size *= dma_conf->dma_tx_size;
2158 
2159 	addr = dma_alloc_coherent(priv->device, size,
2160 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2161 	if (!addr)
2162 		return -ENOMEM;
2163 
2164 	if (priv->extend_desc)
2165 		tx_q->dma_etx = addr;
2166 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 		tx_q->dma_entx = addr;
2168 	else
2169 		tx_q->dma_tx = addr;
2170 
2171 	return 0;
2172 }
2173 
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 				       struct stmmac_dma_conf *dma_conf)
2176 {
2177 	u32 tx_count = priv->plat->tx_queues_to_use;
2178 	u32 queue;
2179 	int ret;
2180 
2181 	/* TX queues buffers and DMA */
2182 	for (queue = 0; queue < tx_count; queue++) {
2183 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 		if (ret)
2185 			goto err_dma;
2186 	}
2187 
2188 	return 0;
2189 
2190 err_dma:
2191 	free_dma_tx_desc_resources(priv, dma_conf);
2192 	return ret;
2193 }
2194 
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* RX Allocation */
2208 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	if (ret)
2211 		return ret;
2212 
2213 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	return ret;
2216 }
2217 
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 				    struct stmmac_dma_conf *dma_conf)
2225 {
2226 	/* Release the DMA TX socket buffers */
2227 	free_dma_tx_desc_resources(priv, dma_conf);
2228 
2229 	/* Release the DMA RX socket buffers later
2230 	 * to ensure all pending XDP_TX buffers are returned.
2231 	 */
2232 	free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234 
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 	int queue;
2244 	u8 mode;
2245 
2246 	for (queue = 0; queue < rx_queues_count; queue++) {
2247 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 	}
2250 }
2251 
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 	stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 	stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 	u32 chan;
2310 
2311 	for (chan = 0; chan < dma_csr_ch; chan++) {
2312 		struct stmmac_channel *ch = &priv->channel[chan];
2313 		unsigned long flags;
2314 
2315 		spin_lock_irqsave(&ch->lock, flags);
2316 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 		spin_unlock_irqrestore(&ch->lock, flags);
2318 	}
2319 }
2320 
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_start_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_start_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	u32 chan = 0;
2351 
2352 	for (chan = 0; chan < rx_channels_count; chan++)
2353 		stmmac_stop_rx_dma(priv, chan);
2354 
2355 	for (chan = 0; chan < tx_channels_count; chan++)
2356 		stmmac_stop_tx_dma(priv, chan);
2357 }
2358 
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 	int rxfifosz = priv->plat->rx_fifo_size;
2370 	int txfifosz = priv->plat->tx_fifo_size;
2371 	u32 txmode = 0;
2372 	u32 rxmode = 0;
2373 	u32 chan = 0;
2374 	u8 qmode = 0;
2375 
2376 	if (rxfifosz == 0)
2377 		rxfifosz = priv->dma_cap.rx_fifo_size;
2378 	if (txfifosz == 0)
2379 		txfifosz = priv->dma_cap.tx_fifo_size;
2380 
2381 	/* Adjust for real per queue fifo size */
2382 	rxfifosz /= rx_channels_count;
2383 	txfifosz /= tx_channels_count;
2384 
2385 	if (priv->plat->force_thresh_dma_mode) {
2386 		txmode = tc;
2387 		rxmode = tc;
2388 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 		/*
2390 		 * In case of GMAC, SF mode can be enabled
2391 		 * to perform the TX COE in HW. This depends on:
2392 		 * 1) TX COE if actually supported
2393 		 * 2) There is no bugged Jumbo frame support
2394 		 *    that needs to not insert csum in the TDES.
2395 		 */
2396 		txmode = SF_DMA_MODE;
2397 		rxmode = SF_DMA_MODE;
2398 		priv->xstats.threshold = SF_DMA_MODE;
2399 	} else {
2400 		txmode = tc;
2401 		rxmode = SF_DMA_MODE;
2402 	}
2403 
2404 	/* configure all channels */
2405 	for (chan = 0; chan < rx_channels_count; chan++) {
2406 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 		u32 buf_size;
2408 
2409 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410 
2411 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 				rxfifosz, qmode);
2413 
2414 		if (rx_q->xsk_pool) {
2415 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 					      buf_size,
2418 					      chan);
2419 		} else {
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      priv->dma_conf.dma_buf_sz,
2422 					      chan);
2423 		}
2424 	}
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++) {
2427 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428 
2429 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 				txfifosz, qmode);
2431 	}
2432 }
2433 
2434 static void stmmac_xsk_request_timestamp(void *_priv)
2435 {
2436 	struct stmmac_metadata_request *meta_req = _priv;
2437 
2438 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2439 	*meta_req->set_ic = true;
2440 }
2441 
2442 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2443 {
2444 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2445 	struct stmmac_priv *priv = tx_compl->priv;
2446 	struct dma_desc *desc = tx_compl->desc;
2447 	bool found = false;
2448 	u64 ns = 0;
2449 
2450 	if (!priv->hwts_tx_en)
2451 		return 0;
2452 
2453 	/* check tx tstamp status */
2454 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2455 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2456 		found = true;
2457 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2458 		found = true;
2459 	}
2460 
2461 	if (found) {
2462 		ns -= priv->plat->cdc_error_adj;
2463 		return ns_to_ktime(ns);
2464 	}
2465 
2466 	return 0;
2467 }
2468 
2469 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2470 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2471 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2472 };
2473 
2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2475 {
2476 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2477 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2478 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2479 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2480 	unsigned int entry = tx_q->cur_tx;
2481 	struct dma_desc *tx_desc = NULL;
2482 	struct xdp_desc xdp_desc;
2483 	bool work_done = true;
2484 	u32 tx_set_ic_bit = 0;
2485 
2486 	/* Avoids TX time-out as we are sharing with slow path */
2487 	txq_trans_cond_update(nq);
2488 
2489 	budget = min(budget, stmmac_tx_avail(priv, queue));
2490 
2491 	while (budget-- > 0) {
2492 		struct stmmac_metadata_request meta_req;
2493 		struct xsk_tx_metadata *meta = NULL;
2494 		dma_addr_t dma_addr;
2495 		bool set_ic;
2496 
2497 		/* We are sharing with slow path and stop XSK TX desc submission when
2498 		 * available TX ring is less than threshold.
2499 		 */
2500 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2501 		    !netif_carrier_ok(priv->dev)) {
2502 			work_done = false;
2503 			break;
2504 		}
2505 
2506 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2507 			break;
2508 
2509 		if (priv->plat->est && priv->plat->est->enable &&
2510 		    priv->plat->est->max_sdu[queue] &&
2511 		    xdp_desc.len > priv->plat->est->max_sdu[queue]) {
2512 			priv->xstats.max_sdu_txq_drop[queue]++;
2513 			continue;
2514 		}
2515 
2516 		if (likely(priv->extend_desc))
2517 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2518 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2519 			tx_desc = &tx_q->dma_entx[entry].basic;
2520 		else
2521 			tx_desc = tx_q->dma_tx + entry;
2522 
2523 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2524 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2525 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2526 
2527 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2528 
2529 		/* To return XDP buffer to XSK pool, we simple call
2530 		 * xsk_tx_completed(), so we don't need to fill up
2531 		 * 'buf' and 'xdpf'.
2532 		 */
2533 		tx_q->tx_skbuff_dma[entry].buf = 0;
2534 		tx_q->xdpf[entry] = NULL;
2535 
2536 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2537 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2538 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2539 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2540 
2541 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2542 
2543 		tx_q->tx_count_frames++;
2544 
2545 		if (!priv->tx_coal_frames[queue])
2546 			set_ic = false;
2547 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2548 			set_ic = true;
2549 		else
2550 			set_ic = false;
2551 
2552 		meta_req.priv = priv;
2553 		meta_req.tx_desc = tx_desc;
2554 		meta_req.set_ic = &set_ic;
2555 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2556 					&meta_req);
2557 		if (set_ic) {
2558 			tx_q->tx_count_frames = 0;
2559 			stmmac_set_tx_ic(priv, tx_desc);
2560 			tx_set_ic_bit++;
2561 		}
2562 
2563 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2564 				       true, priv->mode, true, true,
2565 				       xdp_desc.len);
2566 
2567 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2568 
2569 		xsk_tx_metadata_to_compl(meta,
2570 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2571 
2572 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2573 		entry = tx_q->cur_tx;
2574 	}
2575 	u64_stats_update_begin(&txq_stats->napi_syncp);
2576 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2577 	u64_stats_update_end(&txq_stats->napi_syncp);
2578 
2579 	if (tx_desc) {
2580 		stmmac_flush_tx_descriptors(priv, queue);
2581 		xsk_tx_release(pool);
2582 	}
2583 
2584 	/* Return true if all of the 3 conditions are met
2585 	 *  a) TX Budget is still available
2586 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2587 	 *     pending XSK TX for transmission)
2588 	 */
2589 	return !!budget && work_done;
2590 }
2591 
2592 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2593 {
2594 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2595 		tc += 64;
2596 
2597 		if (priv->plat->force_thresh_dma_mode)
2598 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2599 		else
2600 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2601 						      chan);
2602 
2603 		priv->xstats.threshold = tc;
2604 	}
2605 }
2606 
2607 /**
2608  * stmmac_tx_clean - to manage the transmission completion
2609  * @priv: driver private structure
2610  * @budget: napi budget limiting this functions packet handling
2611  * @queue: TX queue index
2612  * @pending_packets: signal to arm the TX coal timer
2613  * Description: it reclaims the transmit resources after transmission completes.
2614  * If some packets still needs to be handled, due to TX coalesce, set
2615  * pending_packets to true to make NAPI arm the TX coal timer.
2616  */
2617 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2618 			   bool *pending_packets)
2619 {
2620 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2621 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2622 	unsigned int bytes_compl = 0, pkts_compl = 0;
2623 	unsigned int entry, xmits = 0, count = 0;
2624 	u32 tx_packets = 0, tx_errors = 0;
2625 
2626 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2627 
2628 	tx_q->xsk_frames_done = 0;
2629 
2630 	entry = tx_q->dirty_tx;
2631 
2632 	/* Try to clean all TX complete frame in 1 shot */
2633 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2634 		struct xdp_frame *xdpf;
2635 		struct sk_buff *skb;
2636 		struct dma_desc *p;
2637 		int status;
2638 
2639 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2640 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2641 			xdpf = tx_q->xdpf[entry];
2642 			skb = NULL;
2643 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2644 			xdpf = NULL;
2645 			skb = tx_q->tx_skbuff[entry];
2646 		} else {
2647 			xdpf = NULL;
2648 			skb = NULL;
2649 		}
2650 
2651 		if (priv->extend_desc)
2652 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2653 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2654 			p = &tx_q->dma_entx[entry].basic;
2655 		else
2656 			p = tx_q->dma_tx + entry;
2657 
2658 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2659 		/* Check if the descriptor is owned by the DMA */
2660 		if (unlikely(status & tx_dma_own))
2661 			break;
2662 
2663 		count++;
2664 
2665 		/* Make sure descriptor fields are read after reading
2666 		 * the own bit.
2667 		 */
2668 		dma_rmb();
2669 
2670 		/* Just consider the last segment and ...*/
2671 		if (likely(!(status & tx_not_ls))) {
2672 			/* ... verify the status error condition */
2673 			if (unlikely(status & tx_err)) {
2674 				tx_errors++;
2675 				if (unlikely(status & tx_err_bump_tc))
2676 					stmmac_bump_dma_threshold(priv, queue);
2677 			} else {
2678 				tx_packets++;
2679 			}
2680 			if (skb) {
2681 				stmmac_get_tx_hwtstamp(priv, p, skb);
2682 			} else if (tx_q->xsk_pool &&
2683 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2684 				struct stmmac_xsk_tx_complete tx_compl = {
2685 					.priv = priv,
2686 					.desc = p,
2687 				};
2688 
2689 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2690 							 &stmmac_xsk_tx_metadata_ops,
2691 							 &tx_compl);
2692 			}
2693 		}
2694 
2695 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2696 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2697 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2698 				dma_unmap_page(priv->device,
2699 					       tx_q->tx_skbuff_dma[entry].buf,
2700 					       tx_q->tx_skbuff_dma[entry].len,
2701 					       DMA_TO_DEVICE);
2702 			else
2703 				dma_unmap_single(priv->device,
2704 						 tx_q->tx_skbuff_dma[entry].buf,
2705 						 tx_q->tx_skbuff_dma[entry].len,
2706 						 DMA_TO_DEVICE);
2707 			tx_q->tx_skbuff_dma[entry].buf = 0;
2708 			tx_q->tx_skbuff_dma[entry].len = 0;
2709 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2710 		}
2711 
2712 		stmmac_clean_desc3(priv, tx_q, p);
2713 
2714 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2715 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2716 
2717 		if (xdpf &&
2718 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2719 			xdp_return_frame_rx_napi(xdpf);
2720 			tx_q->xdpf[entry] = NULL;
2721 		}
2722 
2723 		if (xdpf &&
2724 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2725 			xdp_return_frame(xdpf);
2726 			tx_q->xdpf[entry] = NULL;
2727 		}
2728 
2729 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2730 			tx_q->xsk_frames_done++;
2731 
2732 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2733 			if (likely(skb)) {
2734 				pkts_compl++;
2735 				bytes_compl += skb->len;
2736 				dev_consume_skb_any(skb);
2737 				tx_q->tx_skbuff[entry] = NULL;
2738 			}
2739 		}
2740 
2741 		stmmac_release_tx_desc(priv, p, priv->mode);
2742 
2743 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2744 	}
2745 	tx_q->dirty_tx = entry;
2746 
2747 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2748 				  pkts_compl, bytes_compl);
2749 
2750 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2751 								queue))) &&
2752 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2753 
2754 		netif_dbg(priv, tx_done, priv->dev,
2755 			  "%s: restart transmit\n", __func__);
2756 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2757 	}
2758 
2759 	if (tx_q->xsk_pool) {
2760 		bool work_done;
2761 
2762 		if (tx_q->xsk_frames_done)
2763 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2764 
2765 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2766 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2767 
2768 		/* For XSK TX, we try to send as many as possible.
2769 		 * If XSK work done (XSK TX desc empty and budget still
2770 		 * available), return "budget - 1" to reenable TX IRQ.
2771 		 * Else, return "budget" to make NAPI continue polling.
2772 		 */
2773 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2774 					       STMMAC_XSK_TX_BUDGET_MAX);
2775 		if (work_done)
2776 			xmits = budget - 1;
2777 		else
2778 			xmits = budget;
2779 	}
2780 
2781 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2782 	    priv->eee_sw_timer_en) {
2783 		if (stmmac_enable_eee_mode(priv))
2784 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2785 	}
2786 
2787 	/* We still have pending packets, let's call for a new scheduling */
2788 	if (tx_q->dirty_tx != tx_q->cur_tx)
2789 		*pending_packets = true;
2790 
2791 	u64_stats_update_begin(&txq_stats->napi_syncp);
2792 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2793 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2794 	u64_stats_inc(&txq_stats->napi.tx_clean);
2795 	u64_stats_update_end(&txq_stats->napi_syncp);
2796 
2797 	priv->xstats.tx_errors += tx_errors;
2798 
2799 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2800 
2801 	/* Combine decisions from TX clean and XSK TX */
2802 	return max(count, xmits);
2803 }
2804 
2805 /**
2806  * stmmac_tx_err - to manage the tx error
2807  * @priv: driver private structure
2808  * @chan: channel index
2809  * Description: it cleans the descriptors and restarts the transmission
2810  * in case of transmission errors.
2811  */
2812 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2813 {
2814 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2815 
2816 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2817 
2818 	stmmac_stop_tx_dma(priv, chan);
2819 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2820 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2821 	stmmac_reset_tx_queue(priv, chan);
2822 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2823 			    tx_q->dma_tx_phy, chan);
2824 	stmmac_start_tx_dma(priv, chan);
2825 
2826 	priv->xstats.tx_errors++;
2827 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2828 }
2829 
2830 /**
2831  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2832  *  @priv: driver private structure
2833  *  @txmode: TX operating mode
2834  *  @rxmode: RX operating mode
2835  *  @chan: channel index
2836  *  Description: it is used for configuring of the DMA operation mode in
2837  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2838  *  mode.
2839  */
2840 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2841 					  u32 rxmode, u32 chan)
2842 {
2843 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2844 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2845 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2846 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2847 	int rxfifosz = priv->plat->rx_fifo_size;
2848 	int txfifosz = priv->plat->tx_fifo_size;
2849 
2850 	if (rxfifosz == 0)
2851 		rxfifosz = priv->dma_cap.rx_fifo_size;
2852 	if (txfifosz == 0)
2853 		txfifosz = priv->dma_cap.tx_fifo_size;
2854 
2855 	/* Adjust for real per queue fifo size */
2856 	rxfifosz /= rx_channels_count;
2857 	txfifosz /= tx_channels_count;
2858 
2859 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2860 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2861 }
2862 
2863 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2864 {
2865 	int ret;
2866 
2867 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2868 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2869 	if (ret && (ret != -EINVAL)) {
2870 		stmmac_global_err(priv);
2871 		return true;
2872 	}
2873 
2874 	return false;
2875 }
2876 
2877 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2878 {
2879 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2880 						 &priv->xstats, chan, dir);
2881 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2882 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2883 	struct stmmac_channel *ch = &priv->channel[chan];
2884 	struct napi_struct *rx_napi;
2885 	struct napi_struct *tx_napi;
2886 	unsigned long flags;
2887 
2888 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2889 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2890 
2891 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2892 		if (napi_schedule_prep(rx_napi)) {
2893 			spin_lock_irqsave(&ch->lock, flags);
2894 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2895 			spin_unlock_irqrestore(&ch->lock, flags);
2896 			__napi_schedule(rx_napi);
2897 		}
2898 	}
2899 
2900 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2901 		if (napi_schedule_prep(tx_napi)) {
2902 			spin_lock_irqsave(&ch->lock, flags);
2903 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2904 			spin_unlock_irqrestore(&ch->lock, flags);
2905 			__napi_schedule(tx_napi);
2906 		}
2907 	}
2908 
2909 	return status;
2910 }
2911 
2912 /**
2913  * stmmac_dma_interrupt - DMA ISR
2914  * @priv: driver private structure
2915  * Description: this is the DMA ISR. It is called by the main ISR.
2916  * It calls the dwmac dma routine and schedule poll method in case of some
2917  * work can be done.
2918  */
2919 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2920 {
2921 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2922 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2923 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2924 				tx_channel_count : rx_channel_count;
2925 	u32 chan;
2926 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2927 
2928 	/* Make sure we never check beyond our status buffer. */
2929 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2930 		channels_to_check = ARRAY_SIZE(status);
2931 
2932 	for (chan = 0; chan < channels_to_check; chan++)
2933 		status[chan] = stmmac_napi_check(priv, chan,
2934 						 DMA_DIR_RXTX);
2935 
2936 	for (chan = 0; chan < tx_channel_count; chan++) {
2937 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2938 			/* Try to bump up the dma threshold on this failure */
2939 			stmmac_bump_dma_threshold(priv, chan);
2940 		} else if (unlikely(status[chan] == tx_hard_error)) {
2941 			stmmac_tx_err(priv, chan);
2942 		}
2943 	}
2944 }
2945 
2946 /**
2947  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2948  * @priv: driver private structure
2949  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2950  */
2951 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2952 {
2953 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2954 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2955 
2956 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2957 
2958 	if (priv->dma_cap.rmon) {
2959 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2960 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2961 	} else
2962 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2963 }
2964 
2965 /**
2966  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2967  * @priv: driver private structure
2968  * Description:
2969  *  new GMAC chip generations have a new register to indicate the
2970  *  presence of the optional feature/functions.
2971  *  This can be also used to override the value passed through the
2972  *  platform and necessary for old MAC10/100 and GMAC chips.
2973  */
2974 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2975 {
2976 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2977 }
2978 
2979 /**
2980  * stmmac_check_ether_addr - check if the MAC addr is valid
2981  * @priv: driver private structure
2982  * Description:
2983  * it is to verify if the MAC address is valid, in case of failures it
2984  * generates a random MAC address
2985  */
2986 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2987 {
2988 	u8 addr[ETH_ALEN];
2989 
2990 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2991 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2992 		if (is_valid_ether_addr(addr))
2993 			eth_hw_addr_set(priv->dev, addr);
2994 		else
2995 			eth_hw_addr_random(priv->dev);
2996 		dev_info(priv->device, "device MAC address %pM\n",
2997 			 priv->dev->dev_addr);
2998 	}
2999 }
3000 
3001 /**
3002  * stmmac_init_dma_engine - DMA init.
3003  * @priv: driver private structure
3004  * Description:
3005  * It inits the DMA invoking the specific MAC/GMAC callback.
3006  * Some DMA parameters can be passed from the platform;
3007  * in case of these are not passed a default is kept for the MAC or GMAC.
3008  */
3009 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3010 {
3011 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3012 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3013 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3014 	struct stmmac_rx_queue *rx_q;
3015 	struct stmmac_tx_queue *tx_q;
3016 	u32 chan = 0;
3017 	int atds = 0;
3018 	int ret = 0;
3019 
3020 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3021 		dev_err(priv->device, "Invalid DMA configuration\n");
3022 		return -EINVAL;
3023 	}
3024 
3025 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3026 		atds = 1;
3027 
3028 	ret = stmmac_reset(priv, priv->ioaddr);
3029 	if (ret) {
3030 		dev_err(priv->device, "Failed to reset the dma\n");
3031 		return ret;
3032 	}
3033 
3034 	/* DMA Configuration */
3035 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3036 
3037 	if (priv->plat->axi)
3038 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3039 
3040 	/* DMA CSR Channel configuration */
3041 	for (chan = 0; chan < dma_csr_ch; chan++) {
3042 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3043 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3044 	}
3045 
3046 	/* DMA RX Channel Configuration */
3047 	for (chan = 0; chan < rx_channels_count; chan++) {
3048 		rx_q = &priv->dma_conf.rx_queue[chan];
3049 
3050 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3051 				    rx_q->dma_rx_phy, chan);
3052 
3053 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3054 				     (rx_q->buf_alloc_num *
3055 				      sizeof(struct dma_desc));
3056 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3057 				       rx_q->rx_tail_addr, chan);
3058 	}
3059 
3060 	/* DMA TX Channel Configuration */
3061 	for (chan = 0; chan < tx_channels_count; chan++) {
3062 		tx_q = &priv->dma_conf.tx_queue[chan];
3063 
3064 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3065 				    tx_q->dma_tx_phy, chan);
3066 
3067 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3068 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3069 				       tx_q->tx_tail_addr, chan);
3070 	}
3071 
3072 	return ret;
3073 }
3074 
3075 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3076 {
3077 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3078 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3079 	struct stmmac_channel *ch;
3080 	struct napi_struct *napi;
3081 
3082 	if (!tx_coal_timer)
3083 		return;
3084 
3085 	ch = &priv->channel[tx_q->queue_index];
3086 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3087 
3088 	/* Arm timer only if napi is not already scheduled.
3089 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3090 	 * again in the next scheduled napi.
3091 	 */
3092 	if (unlikely(!napi_is_scheduled(napi)))
3093 		hrtimer_start(&tx_q->txtimer,
3094 			      STMMAC_COAL_TIMER(tx_coal_timer),
3095 			      HRTIMER_MODE_REL);
3096 	else
3097 		hrtimer_try_to_cancel(&tx_q->txtimer);
3098 }
3099 
3100 /**
3101  * stmmac_tx_timer - mitigation sw timer for tx.
3102  * @t: data pointer
3103  * Description:
3104  * This is the timer handler to directly invoke the stmmac_tx_clean.
3105  */
3106 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3107 {
3108 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3109 	struct stmmac_priv *priv = tx_q->priv_data;
3110 	struct stmmac_channel *ch;
3111 	struct napi_struct *napi;
3112 
3113 	ch = &priv->channel[tx_q->queue_index];
3114 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3115 
3116 	if (likely(napi_schedule_prep(napi))) {
3117 		unsigned long flags;
3118 
3119 		spin_lock_irqsave(&ch->lock, flags);
3120 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3121 		spin_unlock_irqrestore(&ch->lock, flags);
3122 		__napi_schedule(napi);
3123 	}
3124 
3125 	return HRTIMER_NORESTART;
3126 }
3127 
3128 /**
3129  * stmmac_init_coalesce - init mitigation options.
3130  * @priv: driver private structure
3131  * Description:
3132  * This inits the coalesce parameters: i.e. timer rate,
3133  * timer handler and default threshold used for enabling the
3134  * interrupt on completion bit.
3135  */
3136 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3137 {
3138 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3139 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3140 	u32 chan;
3141 
3142 	for (chan = 0; chan < tx_channel_count; chan++) {
3143 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3144 
3145 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3146 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3147 
3148 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3149 		tx_q->txtimer.function = stmmac_tx_timer;
3150 	}
3151 
3152 	for (chan = 0; chan < rx_channel_count; chan++)
3153 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3154 }
3155 
3156 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3157 {
3158 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3159 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3160 	u32 chan;
3161 
3162 	/* set TX ring length */
3163 	for (chan = 0; chan < tx_channels_count; chan++)
3164 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3165 				       (priv->dma_conf.dma_tx_size - 1), chan);
3166 
3167 	/* set RX ring length */
3168 	for (chan = 0; chan < rx_channels_count; chan++)
3169 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3170 				       (priv->dma_conf.dma_rx_size - 1), chan);
3171 }
3172 
3173 /**
3174  *  stmmac_set_tx_queue_weight - Set TX queue weight
3175  *  @priv: driver private structure
3176  *  Description: It is used for setting TX queues weight
3177  */
3178 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3179 {
3180 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3181 	u32 weight;
3182 	u32 queue;
3183 
3184 	for (queue = 0; queue < tx_queues_count; queue++) {
3185 		weight = priv->plat->tx_queues_cfg[queue].weight;
3186 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3187 	}
3188 }
3189 
3190 /**
3191  *  stmmac_configure_cbs - Configure CBS in TX queue
3192  *  @priv: driver private structure
3193  *  Description: It is used for configuring CBS in AVB TX queues
3194  */
3195 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3196 {
3197 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3198 	u32 mode_to_use;
3199 	u32 queue;
3200 
3201 	/* queue 0 is reserved for legacy traffic */
3202 	for (queue = 1; queue < tx_queues_count; queue++) {
3203 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3204 		if (mode_to_use == MTL_QUEUE_DCB)
3205 			continue;
3206 
3207 		stmmac_config_cbs(priv, priv->hw,
3208 				priv->plat->tx_queues_cfg[queue].send_slope,
3209 				priv->plat->tx_queues_cfg[queue].idle_slope,
3210 				priv->plat->tx_queues_cfg[queue].high_credit,
3211 				priv->plat->tx_queues_cfg[queue].low_credit,
3212 				queue);
3213 	}
3214 }
3215 
3216 /**
3217  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3218  *  @priv: driver private structure
3219  *  Description: It is used for mapping RX queues to RX dma channels
3220  */
3221 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3222 {
3223 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3224 	u32 queue;
3225 	u32 chan;
3226 
3227 	for (queue = 0; queue < rx_queues_count; queue++) {
3228 		chan = priv->plat->rx_queues_cfg[queue].chan;
3229 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3230 	}
3231 }
3232 
3233 /**
3234  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3235  *  @priv: driver private structure
3236  *  Description: It is used for configuring the RX Queue Priority
3237  */
3238 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3239 {
3240 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3241 	u32 queue;
3242 	u32 prio;
3243 
3244 	for (queue = 0; queue < rx_queues_count; queue++) {
3245 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3246 			continue;
3247 
3248 		prio = priv->plat->rx_queues_cfg[queue].prio;
3249 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3250 	}
3251 }
3252 
3253 /**
3254  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3255  *  @priv: driver private structure
3256  *  Description: It is used for configuring the TX Queue Priority
3257  */
3258 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3259 {
3260 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3261 	u32 queue;
3262 	u32 prio;
3263 
3264 	for (queue = 0; queue < tx_queues_count; queue++) {
3265 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3266 			continue;
3267 
3268 		prio = priv->plat->tx_queues_cfg[queue].prio;
3269 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3270 	}
3271 }
3272 
3273 /**
3274  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3275  *  @priv: driver private structure
3276  *  Description: It is used for configuring the RX queue routing
3277  */
3278 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3279 {
3280 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3281 	u32 queue;
3282 	u8 packet;
3283 
3284 	for (queue = 0; queue < rx_queues_count; queue++) {
3285 		/* no specific packet type routing specified for the queue */
3286 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3287 			continue;
3288 
3289 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3290 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3291 	}
3292 }
3293 
3294 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3295 {
3296 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3297 		priv->rss.enable = false;
3298 		return;
3299 	}
3300 
3301 	if (priv->dev->features & NETIF_F_RXHASH)
3302 		priv->rss.enable = true;
3303 	else
3304 		priv->rss.enable = false;
3305 
3306 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3307 			     priv->plat->rx_queues_to_use);
3308 }
3309 
3310 /**
3311  *  stmmac_mtl_configuration - Configure MTL
3312  *  @priv: driver private structure
3313  *  Description: It is used for configurring MTL
3314  */
3315 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3316 {
3317 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3318 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3319 
3320 	if (tx_queues_count > 1)
3321 		stmmac_set_tx_queue_weight(priv);
3322 
3323 	/* Configure MTL RX algorithms */
3324 	if (rx_queues_count > 1)
3325 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3326 				priv->plat->rx_sched_algorithm);
3327 
3328 	/* Configure MTL TX algorithms */
3329 	if (tx_queues_count > 1)
3330 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3331 				priv->plat->tx_sched_algorithm);
3332 
3333 	/* Configure CBS in AVB TX queues */
3334 	if (tx_queues_count > 1)
3335 		stmmac_configure_cbs(priv);
3336 
3337 	/* Map RX MTL to DMA channels */
3338 	stmmac_rx_queue_dma_chan_map(priv);
3339 
3340 	/* Enable MAC RX Queues */
3341 	stmmac_mac_enable_rx_queues(priv);
3342 
3343 	/* Set RX priorities */
3344 	if (rx_queues_count > 1)
3345 		stmmac_mac_config_rx_queues_prio(priv);
3346 
3347 	/* Set TX priorities */
3348 	if (tx_queues_count > 1)
3349 		stmmac_mac_config_tx_queues_prio(priv);
3350 
3351 	/* Set RX routing */
3352 	if (rx_queues_count > 1)
3353 		stmmac_mac_config_rx_queues_routing(priv);
3354 
3355 	/* Receive Side Scaling */
3356 	if (rx_queues_count > 1)
3357 		stmmac_mac_config_rss(priv);
3358 }
3359 
3360 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3361 {
3362 	if (priv->dma_cap.asp) {
3363 		netdev_info(priv->dev, "Enabling Safety Features\n");
3364 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3365 					  priv->plat->safety_feat_cfg);
3366 	} else {
3367 		netdev_info(priv->dev, "No Safety Features support found\n");
3368 	}
3369 }
3370 
3371 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3372 {
3373 	char *name;
3374 
3375 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3376 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3377 
3378 	name = priv->wq_name;
3379 	sprintf(name, "%s-fpe", priv->dev->name);
3380 
3381 	priv->fpe_wq = create_singlethread_workqueue(name);
3382 	if (!priv->fpe_wq) {
3383 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3384 
3385 		return -ENOMEM;
3386 	}
3387 	netdev_info(priv->dev, "FPE workqueue start");
3388 
3389 	return 0;
3390 }
3391 
3392 /**
3393  * stmmac_hw_setup - setup mac in a usable state.
3394  *  @dev : pointer to the device structure.
3395  *  @ptp_register: register PTP if set
3396  *  Description:
3397  *  this is the main function to setup the HW in a usable state because the
3398  *  dma engine is reset, the core registers are configured (e.g. AXI,
3399  *  Checksum features, timers). The DMA is ready to start receiving and
3400  *  transmitting.
3401  *  Return value:
3402  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3403  *  file on failure.
3404  */
3405 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3406 {
3407 	struct stmmac_priv *priv = netdev_priv(dev);
3408 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3409 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3410 	bool sph_en;
3411 	u32 chan;
3412 	int ret;
3413 
3414 	/* Make sure RX clock is enabled */
3415 	if (priv->hw->phylink_pcs)
3416 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3417 
3418 	/* DMA initialization and SW reset */
3419 	ret = stmmac_init_dma_engine(priv);
3420 	if (ret < 0) {
3421 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3422 			   __func__);
3423 		return ret;
3424 	}
3425 
3426 	/* Copy the MAC addr into the HW  */
3427 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3428 
3429 	/* PS and related bits will be programmed according to the speed */
3430 	if (priv->hw->pcs) {
3431 		int speed = priv->plat->mac_port_sel_speed;
3432 
3433 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3434 		    (speed == SPEED_1000)) {
3435 			priv->hw->ps = speed;
3436 		} else {
3437 			dev_warn(priv->device, "invalid port speed\n");
3438 			priv->hw->ps = 0;
3439 		}
3440 	}
3441 
3442 	/* Initialize the MAC Core */
3443 	stmmac_core_init(priv, priv->hw, dev);
3444 
3445 	/* Initialize MTL*/
3446 	stmmac_mtl_configuration(priv);
3447 
3448 	/* Initialize Safety Features */
3449 	stmmac_safety_feat_configuration(priv);
3450 
3451 	ret = stmmac_rx_ipc(priv, priv->hw);
3452 	if (!ret) {
3453 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3454 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3455 		priv->hw->rx_csum = 0;
3456 	}
3457 
3458 	/* Enable the MAC Rx/Tx */
3459 	stmmac_mac_set(priv, priv->ioaddr, true);
3460 
3461 	/* Set the HW DMA mode and the COE */
3462 	stmmac_dma_operation_mode(priv);
3463 
3464 	stmmac_mmc_setup(priv);
3465 
3466 	if (ptp_register) {
3467 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3468 		if (ret < 0)
3469 			netdev_warn(priv->dev,
3470 				    "failed to enable PTP reference clock: %pe\n",
3471 				    ERR_PTR(ret));
3472 	}
3473 
3474 	ret = stmmac_init_ptp(priv);
3475 	if (ret == -EOPNOTSUPP)
3476 		netdev_info(priv->dev, "PTP not supported by HW\n");
3477 	else if (ret)
3478 		netdev_warn(priv->dev, "PTP init failed\n");
3479 	else if (ptp_register)
3480 		stmmac_ptp_register(priv);
3481 
3482 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3483 
3484 	/* Convert the timer from msec to usec */
3485 	if (!priv->tx_lpi_timer)
3486 		priv->tx_lpi_timer = eee_timer * 1000;
3487 
3488 	if (priv->use_riwt) {
3489 		u32 queue;
3490 
3491 		for (queue = 0; queue < rx_cnt; queue++) {
3492 			if (!priv->rx_riwt[queue])
3493 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3494 
3495 			stmmac_rx_watchdog(priv, priv->ioaddr,
3496 					   priv->rx_riwt[queue], queue);
3497 		}
3498 	}
3499 
3500 	if (priv->hw->pcs)
3501 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3502 
3503 	/* set TX and RX rings length */
3504 	stmmac_set_rings_length(priv);
3505 
3506 	/* Enable TSO */
3507 	if (priv->tso) {
3508 		for (chan = 0; chan < tx_cnt; chan++) {
3509 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3510 
3511 			/* TSO and TBS cannot co-exist */
3512 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3513 				continue;
3514 
3515 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3516 		}
3517 	}
3518 
3519 	/* Enable Split Header */
3520 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3521 	for (chan = 0; chan < rx_cnt; chan++)
3522 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3523 
3524 
3525 	/* VLAN Tag Insertion */
3526 	if (priv->dma_cap.vlins)
3527 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3528 
3529 	/* TBS */
3530 	for (chan = 0; chan < tx_cnt; chan++) {
3531 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3532 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3533 
3534 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3535 	}
3536 
3537 	/* Configure real RX and TX queues */
3538 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3539 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3540 
3541 	/* Start the ball rolling... */
3542 	stmmac_start_all_dma(priv);
3543 
3544 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3545 
3546 	if (priv->dma_cap.fpesel) {
3547 		stmmac_fpe_start_wq(priv);
3548 
3549 		if (priv->plat->fpe_cfg->enable)
3550 			stmmac_fpe_handshake(priv, true);
3551 	}
3552 
3553 	return 0;
3554 }
3555 
3556 static void stmmac_hw_teardown(struct net_device *dev)
3557 {
3558 	struct stmmac_priv *priv = netdev_priv(dev);
3559 
3560 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3561 }
3562 
3563 static void stmmac_free_irq(struct net_device *dev,
3564 			    enum request_irq_err irq_err, int irq_idx)
3565 {
3566 	struct stmmac_priv *priv = netdev_priv(dev);
3567 	int j;
3568 
3569 	switch (irq_err) {
3570 	case REQ_IRQ_ERR_ALL:
3571 		irq_idx = priv->plat->tx_queues_to_use;
3572 		fallthrough;
3573 	case REQ_IRQ_ERR_TX:
3574 		for (j = irq_idx - 1; j >= 0; j--) {
3575 			if (priv->tx_irq[j] > 0) {
3576 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3577 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3578 			}
3579 		}
3580 		irq_idx = priv->plat->rx_queues_to_use;
3581 		fallthrough;
3582 	case REQ_IRQ_ERR_RX:
3583 		for (j = irq_idx - 1; j >= 0; j--) {
3584 			if (priv->rx_irq[j] > 0) {
3585 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3586 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3587 			}
3588 		}
3589 
3590 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3591 			free_irq(priv->sfty_ue_irq, dev);
3592 		fallthrough;
3593 	case REQ_IRQ_ERR_SFTY_UE:
3594 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3595 			free_irq(priv->sfty_ce_irq, dev);
3596 		fallthrough;
3597 	case REQ_IRQ_ERR_SFTY_CE:
3598 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3599 			free_irq(priv->lpi_irq, dev);
3600 		fallthrough;
3601 	case REQ_IRQ_ERR_LPI:
3602 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3603 			free_irq(priv->wol_irq, dev);
3604 		fallthrough;
3605 	case REQ_IRQ_ERR_SFTY:
3606 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3607 			free_irq(priv->sfty_irq, dev);
3608 		fallthrough;
3609 	case REQ_IRQ_ERR_WOL:
3610 		free_irq(dev->irq, dev);
3611 		fallthrough;
3612 	case REQ_IRQ_ERR_MAC:
3613 	case REQ_IRQ_ERR_NO:
3614 		/* If MAC IRQ request error, no more IRQ to free */
3615 		break;
3616 	}
3617 }
3618 
3619 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3620 {
3621 	struct stmmac_priv *priv = netdev_priv(dev);
3622 	enum request_irq_err irq_err;
3623 	cpumask_t cpu_mask;
3624 	int irq_idx = 0;
3625 	char *int_name;
3626 	int ret;
3627 	int i;
3628 
3629 	/* For common interrupt */
3630 	int_name = priv->int_name_mac;
3631 	sprintf(int_name, "%s:%s", dev->name, "mac");
3632 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3633 			  0, int_name, dev);
3634 	if (unlikely(ret < 0)) {
3635 		netdev_err(priv->dev,
3636 			   "%s: alloc mac MSI %d (error: %d)\n",
3637 			   __func__, dev->irq, ret);
3638 		irq_err = REQ_IRQ_ERR_MAC;
3639 		goto irq_error;
3640 	}
3641 
3642 	/* Request the Wake IRQ in case of another line
3643 	 * is used for WoL
3644 	 */
3645 	priv->wol_irq_disabled = true;
3646 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3647 		int_name = priv->int_name_wol;
3648 		sprintf(int_name, "%s:%s", dev->name, "wol");
3649 		ret = request_irq(priv->wol_irq,
3650 				  stmmac_mac_interrupt,
3651 				  0, int_name, dev);
3652 		if (unlikely(ret < 0)) {
3653 			netdev_err(priv->dev,
3654 				   "%s: alloc wol MSI %d (error: %d)\n",
3655 				   __func__, priv->wol_irq, ret);
3656 			irq_err = REQ_IRQ_ERR_WOL;
3657 			goto irq_error;
3658 		}
3659 	}
3660 
3661 	/* Request the LPI IRQ in case of another line
3662 	 * is used for LPI
3663 	 */
3664 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3665 		int_name = priv->int_name_lpi;
3666 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3667 		ret = request_irq(priv->lpi_irq,
3668 				  stmmac_mac_interrupt,
3669 				  0, int_name, dev);
3670 		if (unlikely(ret < 0)) {
3671 			netdev_err(priv->dev,
3672 				   "%s: alloc lpi MSI %d (error: %d)\n",
3673 				   __func__, priv->lpi_irq, ret);
3674 			irq_err = REQ_IRQ_ERR_LPI;
3675 			goto irq_error;
3676 		}
3677 	}
3678 
3679 	/* Request the common Safety Feature Correctible/Uncorrectible
3680 	 * Error line in case of another line is used
3681 	 */
3682 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3683 		int_name = priv->int_name_sfty;
3684 		sprintf(int_name, "%s:%s", dev->name, "safety");
3685 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3686 				  0, int_name, dev);
3687 		if (unlikely(ret < 0)) {
3688 			netdev_err(priv->dev,
3689 				   "%s: alloc sfty MSI %d (error: %d)\n",
3690 				   __func__, priv->sfty_irq, ret);
3691 			irq_err = REQ_IRQ_ERR_SFTY;
3692 			goto irq_error;
3693 		}
3694 	}
3695 
3696 	/* Request the Safety Feature Correctible Error line in
3697 	 * case of another line is used
3698 	 */
3699 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3700 		int_name = priv->int_name_sfty_ce;
3701 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3702 		ret = request_irq(priv->sfty_ce_irq,
3703 				  stmmac_safety_interrupt,
3704 				  0, int_name, dev);
3705 		if (unlikely(ret < 0)) {
3706 			netdev_err(priv->dev,
3707 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3708 				   __func__, priv->sfty_ce_irq, ret);
3709 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3710 			goto irq_error;
3711 		}
3712 	}
3713 
3714 	/* Request the Safety Feature Uncorrectible Error line in
3715 	 * case of another line is used
3716 	 */
3717 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3718 		int_name = priv->int_name_sfty_ue;
3719 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3720 		ret = request_irq(priv->sfty_ue_irq,
3721 				  stmmac_safety_interrupt,
3722 				  0, int_name, dev);
3723 		if (unlikely(ret < 0)) {
3724 			netdev_err(priv->dev,
3725 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3726 				   __func__, priv->sfty_ue_irq, ret);
3727 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3728 			goto irq_error;
3729 		}
3730 	}
3731 
3732 	/* Request Rx MSI irq */
3733 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3734 		if (i >= MTL_MAX_RX_QUEUES)
3735 			break;
3736 		if (priv->rx_irq[i] == 0)
3737 			continue;
3738 
3739 		int_name = priv->int_name_rx_irq[i];
3740 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3741 		ret = request_irq(priv->rx_irq[i],
3742 				  stmmac_msi_intr_rx,
3743 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3744 		if (unlikely(ret < 0)) {
3745 			netdev_err(priv->dev,
3746 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3747 				   __func__, i, priv->rx_irq[i], ret);
3748 			irq_err = REQ_IRQ_ERR_RX;
3749 			irq_idx = i;
3750 			goto irq_error;
3751 		}
3752 		cpumask_clear(&cpu_mask);
3753 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3754 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3755 	}
3756 
3757 	/* Request Tx MSI irq */
3758 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3759 		if (i >= MTL_MAX_TX_QUEUES)
3760 			break;
3761 		if (priv->tx_irq[i] == 0)
3762 			continue;
3763 
3764 		int_name = priv->int_name_tx_irq[i];
3765 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3766 		ret = request_irq(priv->tx_irq[i],
3767 				  stmmac_msi_intr_tx,
3768 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3769 		if (unlikely(ret < 0)) {
3770 			netdev_err(priv->dev,
3771 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3772 				   __func__, i, priv->tx_irq[i], ret);
3773 			irq_err = REQ_IRQ_ERR_TX;
3774 			irq_idx = i;
3775 			goto irq_error;
3776 		}
3777 		cpumask_clear(&cpu_mask);
3778 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3779 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3780 	}
3781 
3782 	return 0;
3783 
3784 irq_error:
3785 	stmmac_free_irq(dev, irq_err, irq_idx);
3786 	return ret;
3787 }
3788 
3789 static int stmmac_request_irq_single(struct net_device *dev)
3790 {
3791 	struct stmmac_priv *priv = netdev_priv(dev);
3792 	enum request_irq_err irq_err;
3793 	int ret;
3794 
3795 	ret = request_irq(dev->irq, stmmac_interrupt,
3796 			  IRQF_SHARED, dev->name, dev);
3797 	if (unlikely(ret < 0)) {
3798 		netdev_err(priv->dev,
3799 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3800 			   __func__, dev->irq, ret);
3801 		irq_err = REQ_IRQ_ERR_MAC;
3802 		goto irq_error;
3803 	}
3804 
3805 	/* Request the Wake IRQ in case of another line
3806 	 * is used for WoL
3807 	 */
3808 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3809 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3810 				  IRQF_SHARED, dev->name, dev);
3811 		if (unlikely(ret < 0)) {
3812 			netdev_err(priv->dev,
3813 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3814 				   __func__, priv->wol_irq, ret);
3815 			irq_err = REQ_IRQ_ERR_WOL;
3816 			goto irq_error;
3817 		}
3818 	}
3819 
3820 	/* Request the IRQ lines */
3821 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3822 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3823 				  IRQF_SHARED, dev->name, dev);
3824 		if (unlikely(ret < 0)) {
3825 			netdev_err(priv->dev,
3826 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3827 				   __func__, priv->lpi_irq, ret);
3828 			irq_err = REQ_IRQ_ERR_LPI;
3829 			goto irq_error;
3830 		}
3831 	}
3832 
3833 	/* Request the common Safety Feature Correctible/Uncorrectible
3834 	 * Error line in case of another line is used
3835 	 */
3836 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3837 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3838 				  IRQF_SHARED, dev->name, dev);
3839 		if (unlikely(ret < 0)) {
3840 			netdev_err(priv->dev,
3841 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3842 				   __func__, priv->sfty_irq, ret);
3843 			irq_err = REQ_IRQ_ERR_SFTY;
3844 			goto irq_error;
3845 		}
3846 	}
3847 
3848 	return 0;
3849 
3850 irq_error:
3851 	stmmac_free_irq(dev, irq_err, 0);
3852 	return ret;
3853 }
3854 
3855 static int stmmac_request_irq(struct net_device *dev)
3856 {
3857 	struct stmmac_priv *priv = netdev_priv(dev);
3858 	int ret;
3859 
3860 	/* Request the IRQ lines */
3861 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3862 		ret = stmmac_request_irq_multi_msi(dev);
3863 	else
3864 		ret = stmmac_request_irq_single(dev);
3865 
3866 	return ret;
3867 }
3868 
3869 /**
3870  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3871  *  @priv: driver private structure
3872  *  @mtu: MTU to setup the dma queue and buf with
3873  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3874  *  Allocate the Tx/Rx DMA queue and init them.
3875  *  Return value:
3876  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3877  */
3878 static struct stmmac_dma_conf *
3879 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3880 {
3881 	struct stmmac_dma_conf *dma_conf;
3882 	int chan, bfsize, ret;
3883 
3884 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3885 	if (!dma_conf) {
3886 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3887 			   __func__);
3888 		return ERR_PTR(-ENOMEM);
3889 	}
3890 
3891 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3892 	if (bfsize < 0)
3893 		bfsize = 0;
3894 
3895 	if (bfsize < BUF_SIZE_16KiB)
3896 		bfsize = stmmac_set_bfsize(mtu, 0);
3897 
3898 	dma_conf->dma_buf_sz = bfsize;
3899 	/* Chose the tx/rx size from the already defined one in the
3900 	 * priv struct. (if defined)
3901 	 */
3902 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3903 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3904 
3905 	if (!dma_conf->dma_tx_size)
3906 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3907 	if (!dma_conf->dma_rx_size)
3908 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3909 
3910 	/* Earlier check for TBS */
3911 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3912 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3913 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3914 
3915 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3916 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3917 	}
3918 
3919 	ret = alloc_dma_desc_resources(priv, dma_conf);
3920 	if (ret < 0) {
3921 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3922 			   __func__);
3923 		goto alloc_error;
3924 	}
3925 
3926 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3927 	if (ret < 0) {
3928 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3929 			   __func__);
3930 		goto init_error;
3931 	}
3932 
3933 	return dma_conf;
3934 
3935 init_error:
3936 	free_dma_desc_resources(priv, dma_conf);
3937 alloc_error:
3938 	kfree(dma_conf);
3939 	return ERR_PTR(ret);
3940 }
3941 
3942 /**
3943  *  __stmmac_open - open entry point of the driver
3944  *  @dev : pointer to the device structure.
3945  *  @dma_conf :  structure to take the dma data
3946  *  Description:
3947  *  This function is the open entry point of the driver.
3948  *  Return value:
3949  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3950  *  file on failure.
3951  */
3952 static int __stmmac_open(struct net_device *dev,
3953 			 struct stmmac_dma_conf *dma_conf)
3954 {
3955 	struct stmmac_priv *priv = netdev_priv(dev);
3956 	int mode = priv->plat->phy_interface;
3957 	u32 chan;
3958 	int ret;
3959 
3960 	ret = pm_runtime_resume_and_get(priv->device);
3961 	if (ret < 0)
3962 		return ret;
3963 
3964 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3965 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3966 	    (!priv->hw->xpcs ||
3967 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3968 		ret = stmmac_init_phy(dev);
3969 		if (ret) {
3970 			netdev_err(priv->dev,
3971 				   "%s: Cannot attach to PHY (error: %d)\n",
3972 				   __func__, ret);
3973 			goto init_phy_error;
3974 		}
3975 	}
3976 
3977 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3978 
3979 	buf_sz = dma_conf->dma_buf_sz;
3980 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3981 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3982 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3983 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3984 
3985 	stmmac_reset_queues_param(priv);
3986 
3987 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3988 	    priv->plat->serdes_powerup) {
3989 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3990 		if (ret < 0) {
3991 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3992 				   __func__);
3993 			goto init_error;
3994 		}
3995 	}
3996 
3997 	ret = stmmac_hw_setup(dev, true);
3998 	if (ret < 0) {
3999 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4000 		goto init_error;
4001 	}
4002 
4003 	stmmac_init_coalesce(priv);
4004 
4005 	phylink_start(priv->phylink);
4006 	/* We may have called phylink_speed_down before */
4007 	phylink_speed_up(priv->phylink);
4008 
4009 	ret = stmmac_request_irq(dev);
4010 	if (ret)
4011 		goto irq_error;
4012 
4013 	stmmac_enable_all_queues(priv);
4014 	netif_tx_start_all_queues(priv->dev);
4015 	stmmac_enable_all_dma_irq(priv);
4016 
4017 	return 0;
4018 
4019 irq_error:
4020 	phylink_stop(priv->phylink);
4021 
4022 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4023 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4024 
4025 	stmmac_hw_teardown(dev);
4026 init_error:
4027 	phylink_disconnect_phy(priv->phylink);
4028 init_phy_error:
4029 	pm_runtime_put(priv->device);
4030 	return ret;
4031 }
4032 
4033 static int stmmac_open(struct net_device *dev)
4034 {
4035 	struct stmmac_priv *priv = netdev_priv(dev);
4036 	struct stmmac_dma_conf *dma_conf;
4037 	int ret;
4038 
4039 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4040 	if (IS_ERR(dma_conf))
4041 		return PTR_ERR(dma_conf);
4042 
4043 	ret = __stmmac_open(dev, dma_conf);
4044 	if (ret)
4045 		free_dma_desc_resources(priv, dma_conf);
4046 
4047 	kfree(dma_conf);
4048 	return ret;
4049 }
4050 
4051 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4052 {
4053 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4054 
4055 	if (priv->fpe_wq) {
4056 		destroy_workqueue(priv->fpe_wq);
4057 		priv->fpe_wq = NULL;
4058 	}
4059 
4060 	netdev_info(priv->dev, "FPE workqueue stop");
4061 }
4062 
4063 /**
4064  *  stmmac_release - close entry point of the driver
4065  *  @dev : device pointer.
4066  *  Description:
4067  *  This is the stop entry point of the driver.
4068  */
4069 static int stmmac_release(struct net_device *dev)
4070 {
4071 	struct stmmac_priv *priv = netdev_priv(dev);
4072 	u32 chan;
4073 
4074 	if (device_may_wakeup(priv->device))
4075 		phylink_speed_down(priv->phylink, false);
4076 	/* Stop and disconnect the PHY */
4077 	phylink_stop(priv->phylink);
4078 	phylink_disconnect_phy(priv->phylink);
4079 
4080 	stmmac_disable_all_queues(priv);
4081 
4082 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4083 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4084 
4085 	netif_tx_disable(dev);
4086 
4087 	/* Free the IRQ lines */
4088 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4089 
4090 	if (priv->eee_enabled) {
4091 		priv->tx_path_in_lpi_mode = false;
4092 		del_timer_sync(&priv->eee_ctrl_timer);
4093 	}
4094 
4095 	/* Stop TX/RX DMA and clear the descriptors */
4096 	stmmac_stop_all_dma(priv);
4097 
4098 	/* Release and free the Rx/Tx resources */
4099 	free_dma_desc_resources(priv, &priv->dma_conf);
4100 
4101 	/* Disable the MAC Rx/Tx */
4102 	stmmac_mac_set(priv, priv->ioaddr, false);
4103 
4104 	/* Powerdown Serdes if there is */
4105 	if (priv->plat->serdes_powerdown)
4106 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4107 
4108 	netif_carrier_off(dev);
4109 
4110 	stmmac_release_ptp(priv);
4111 
4112 	pm_runtime_put(priv->device);
4113 
4114 	if (priv->dma_cap.fpesel)
4115 		stmmac_fpe_stop_wq(priv);
4116 
4117 	return 0;
4118 }
4119 
4120 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4121 			       struct stmmac_tx_queue *tx_q)
4122 {
4123 	u16 tag = 0x0, inner_tag = 0x0;
4124 	u32 inner_type = 0x0;
4125 	struct dma_desc *p;
4126 
4127 	if (!priv->dma_cap.vlins)
4128 		return false;
4129 	if (!skb_vlan_tag_present(skb))
4130 		return false;
4131 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4132 		inner_tag = skb_vlan_tag_get(skb);
4133 		inner_type = STMMAC_VLAN_INSERT;
4134 	}
4135 
4136 	tag = skb_vlan_tag_get(skb);
4137 
4138 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4139 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4140 	else
4141 		p = &tx_q->dma_tx[tx_q->cur_tx];
4142 
4143 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4144 		return false;
4145 
4146 	stmmac_set_tx_owner(priv, p);
4147 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4148 	return true;
4149 }
4150 
4151 /**
4152  *  stmmac_tso_allocator - close entry point of the driver
4153  *  @priv: driver private structure
4154  *  @des: buffer start address
4155  *  @total_len: total length to fill in descriptors
4156  *  @last_segment: condition for the last descriptor
4157  *  @queue: TX queue index
4158  *  Description:
4159  *  This function fills descriptor and request new descriptors according to
4160  *  buffer length to fill
4161  */
4162 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4163 				 int total_len, bool last_segment, u32 queue)
4164 {
4165 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4166 	struct dma_desc *desc;
4167 	u32 buff_size;
4168 	int tmp_len;
4169 
4170 	tmp_len = total_len;
4171 
4172 	while (tmp_len > 0) {
4173 		dma_addr_t curr_addr;
4174 
4175 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4176 						priv->dma_conf.dma_tx_size);
4177 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4178 
4179 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4180 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4181 		else
4182 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4183 
4184 		curr_addr = des + (total_len - tmp_len);
4185 		if (priv->dma_cap.addr64 <= 32)
4186 			desc->des0 = cpu_to_le32(curr_addr);
4187 		else
4188 			stmmac_set_desc_addr(priv, desc, curr_addr);
4189 
4190 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4191 			    TSO_MAX_BUFF_SIZE : tmp_len;
4192 
4193 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4194 				0, 1,
4195 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4196 				0, 0);
4197 
4198 		tmp_len -= TSO_MAX_BUFF_SIZE;
4199 	}
4200 }
4201 
4202 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4203 {
4204 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4205 	int desc_size;
4206 
4207 	if (likely(priv->extend_desc))
4208 		desc_size = sizeof(struct dma_extended_desc);
4209 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4210 		desc_size = sizeof(struct dma_edesc);
4211 	else
4212 		desc_size = sizeof(struct dma_desc);
4213 
4214 	/* The own bit must be the latest setting done when prepare the
4215 	 * descriptor and then barrier is needed to make sure that
4216 	 * all is coherent before granting the DMA engine.
4217 	 */
4218 	wmb();
4219 
4220 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4221 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4222 }
4223 
4224 /**
4225  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4226  *  @skb : the socket buffer
4227  *  @dev : device pointer
4228  *  Description: this is the transmit function that is called on TSO frames
4229  *  (support available on GMAC4 and newer chips).
4230  *  Diagram below show the ring programming in case of TSO frames:
4231  *
4232  *  First Descriptor
4233  *   --------
4234  *   | DES0 |---> buffer1 = L2/L3/L4 header
4235  *   | DES1 |---> TCP Payload (can continue on next descr...)
4236  *   | DES2 |---> buffer 1 and 2 len
4237  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4238  *   --------
4239  *	|
4240  *     ...
4241  *	|
4242  *   --------
4243  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4244  *   | DES1 | --|
4245  *   | DES2 | --> buffer 1 and 2 len
4246  *   | DES3 |
4247  *   --------
4248  *
4249  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4250  */
4251 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4252 {
4253 	struct dma_desc *desc, *first, *mss_desc = NULL;
4254 	struct stmmac_priv *priv = netdev_priv(dev);
4255 	int nfrags = skb_shinfo(skb)->nr_frags;
4256 	u32 queue = skb_get_queue_mapping(skb);
4257 	unsigned int first_entry, tx_packets;
4258 	struct stmmac_txq_stats *txq_stats;
4259 	int tmp_pay_len = 0, first_tx;
4260 	struct stmmac_tx_queue *tx_q;
4261 	bool has_vlan, set_ic;
4262 	u8 proto_hdr_len, hdr;
4263 	u32 pay_len, mss;
4264 	dma_addr_t des;
4265 	int i;
4266 
4267 	tx_q = &priv->dma_conf.tx_queue[queue];
4268 	txq_stats = &priv->xstats.txq_stats[queue];
4269 	first_tx = tx_q->cur_tx;
4270 
4271 	/* Compute header lengths */
4272 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4273 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4274 		hdr = sizeof(struct udphdr);
4275 	} else {
4276 		proto_hdr_len = skb_tcp_all_headers(skb);
4277 		hdr = tcp_hdrlen(skb);
4278 	}
4279 
4280 	/* Desc availability based on threshold should be enough safe */
4281 	if (unlikely(stmmac_tx_avail(priv, queue) <
4282 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4283 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4284 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4285 								queue));
4286 			/* This is a hard error, log it. */
4287 			netdev_err(priv->dev,
4288 				   "%s: Tx Ring full when queue awake\n",
4289 				   __func__);
4290 		}
4291 		return NETDEV_TX_BUSY;
4292 	}
4293 
4294 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4295 
4296 	mss = skb_shinfo(skb)->gso_size;
4297 
4298 	/* set new MSS value if needed */
4299 	if (mss != tx_q->mss) {
4300 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4301 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4302 		else
4303 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4304 
4305 		stmmac_set_mss(priv, mss_desc, mss);
4306 		tx_q->mss = mss;
4307 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4308 						priv->dma_conf.dma_tx_size);
4309 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4310 	}
4311 
4312 	if (netif_msg_tx_queued(priv)) {
4313 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4314 			__func__, hdr, proto_hdr_len, pay_len, mss);
4315 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4316 			skb->data_len);
4317 	}
4318 
4319 	/* Check if VLAN can be inserted by HW */
4320 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4321 
4322 	first_entry = tx_q->cur_tx;
4323 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4324 
4325 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4326 		desc = &tx_q->dma_entx[first_entry].basic;
4327 	else
4328 		desc = &tx_q->dma_tx[first_entry];
4329 	first = desc;
4330 
4331 	if (has_vlan)
4332 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4333 
4334 	/* first descriptor: fill Headers on Buf1 */
4335 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4336 			     DMA_TO_DEVICE);
4337 	if (dma_mapping_error(priv->device, des))
4338 		goto dma_map_err;
4339 
4340 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4341 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4342 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4343 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4344 
4345 	if (priv->dma_cap.addr64 <= 32) {
4346 		first->des0 = cpu_to_le32(des);
4347 
4348 		/* Fill start of payload in buff2 of first descriptor */
4349 		if (pay_len)
4350 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4351 
4352 		/* If needed take extra descriptors to fill the remaining payload */
4353 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4354 	} else {
4355 		stmmac_set_desc_addr(priv, first, des);
4356 		tmp_pay_len = pay_len;
4357 		des += proto_hdr_len;
4358 		pay_len = 0;
4359 	}
4360 
4361 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4362 
4363 	/* Prepare fragments */
4364 	for (i = 0; i < nfrags; i++) {
4365 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4366 
4367 		des = skb_frag_dma_map(priv->device, frag, 0,
4368 				       skb_frag_size(frag),
4369 				       DMA_TO_DEVICE);
4370 		if (dma_mapping_error(priv->device, des))
4371 			goto dma_map_err;
4372 
4373 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4374 				     (i == nfrags - 1), queue);
4375 
4376 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4377 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4378 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4379 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4380 	}
4381 
4382 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4383 
4384 	/* Only the last descriptor gets to point to the skb. */
4385 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4386 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4387 
4388 	/* Manage tx mitigation */
4389 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4390 	tx_q->tx_count_frames += tx_packets;
4391 
4392 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4393 		set_ic = true;
4394 	else if (!priv->tx_coal_frames[queue])
4395 		set_ic = false;
4396 	else if (tx_packets > priv->tx_coal_frames[queue])
4397 		set_ic = true;
4398 	else if ((tx_q->tx_count_frames %
4399 		  priv->tx_coal_frames[queue]) < tx_packets)
4400 		set_ic = true;
4401 	else
4402 		set_ic = false;
4403 
4404 	if (set_ic) {
4405 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4406 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4407 		else
4408 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4409 
4410 		tx_q->tx_count_frames = 0;
4411 		stmmac_set_tx_ic(priv, desc);
4412 	}
4413 
4414 	/* We've used all descriptors we need for this skb, however,
4415 	 * advance cur_tx so that it references a fresh descriptor.
4416 	 * ndo_start_xmit will fill this descriptor the next time it's
4417 	 * called and stmmac_tx_clean may clean up to this descriptor.
4418 	 */
4419 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4420 
4421 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4422 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4423 			  __func__);
4424 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4425 	}
4426 
4427 	u64_stats_update_begin(&txq_stats->q_syncp);
4428 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4429 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4430 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4431 	if (set_ic)
4432 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4433 	u64_stats_update_end(&txq_stats->q_syncp);
4434 
4435 	if (priv->sarc_type)
4436 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4437 
4438 	skb_tx_timestamp(skb);
4439 
4440 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4441 		     priv->hwts_tx_en)) {
4442 		/* declare that device is doing timestamping */
4443 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4444 		stmmac_enable_tx_timestamp(priv, first);
4445 	}
4446 
4447 	/* Complete the first descriptor before granting the DMA */
4448 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4449 			proto_hdr_len,
4450 			pay_len,
4451 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4452 			hdr / 4, (skb->len - proto_hdr_len));
4453 
4454 	/* If context desc is used to change MSS */
4455 	if (mss_desc) {
4456 		/* Make sure that first descriptor has been completely
4457 		 * written, including its own bit. This is because MSS is
4458 		 * actually before first descriptor, so we need to make
4459 		 * sure that MSS's own bit is the last thing written.
4460 		 */
4461 		dma_wmb();
4462 		stmmac_set_tx_owner(priv, mss_desc);
4463 	}
4464 
4465 	if (netif_msg_pktdata(priv)) {
4466 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4467 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4468 			tx_q->cur_tx, first, nfrags);
4469 		pr_info(">>> frame to be transmitted: ");
4470 		print_pkt(skb->data, skb_headlen(skb));
4471 	}
4472 
4473 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4474 
4475 	stmmac_flush_tx_descriptors(priv, queue);
4476 	stmmac_tx_timer_arm(priv, queue);
4477 
4478 	return NETDEV_TX_OK;
4479 
4480 dma_map_err:
4481 	dev_err(priv->device, "Tx dma map failed\n");
4482 	dev_kfree_skb(skb);
4483 	priv->xstats.tx_dropped++;
4484 	return NETDEV_TX_OK;
4485 }
4486 
4487 /**
4488  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4489  * @skb: socket buffer to check
4490  *
4491  * Check if a packet has an ethertype that will trigger the IP header checks
4492  * and IP/TCP checksum engine of the stmmac core.
4493  *
4494  * Return: true if the ethertype can trigger the checksum engine, false
4495  * otherwise
4496  */
4497 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4498 {
4499 	int depth = 0;
4500 	__be16 proto;
4501 
4502 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4503 				    &depth);
4504 
4505 	return (depth <= ETH_HLEN) &&
4506 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4507 }
4508 
4509 /**
4510  *  stmmac_xmit - Tx entry point of the driver
4511  *  @skb : the socket buffer
4512  *  @dev : device pointer
4513  *  Description : this is the tx entry point of the driver.
4514  *  It programs the chain or the ring and supports oversized frames
4515  *  and SG feature.
4516  */
4517 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4518 {
4519 	unsigned int first_entry, tx_packets, enh_desc;
4520 	struct stmmac_priv *priv = netdev_priv(dev);
4521 	unsigned int nopaged_len = skb_headlen(skb);
4522 	int i, csum_insertion = 0, is_jumbo = 0;
4523 	u32 queue = skb_get_queue_mapping(skb);
4524 	int nfrags = skb_shinfo(skb)->nr_frags;
4525 	int gso = skb_shinfo(skb)->gso_type;
4526 	struct stmmac_txq_stats *txq_stats;
4527 	struct dma_edesc *tbs_desc = NULL;
4528 	struct dma_desc *desc, *first;
4529 	struct stmmac_tx_queue *tx_q;
4530 	bool has_vlan, set_ic;
4531 	int entry, first_tx;
4532 	dma_addr_t des;
4533 
4534 	tx_q = &priv->dma_conf.tx_queue[queue];
4535 	txq_stats = &priv->xstats.txq_stats[queue];
4536 	first_tx = tx_q->cur_tx;
4537 
4538 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4539 		stmmac_disable_eee_mode(priv);
4540 
4541 	/* Manage oversized TCP frames for GMAC4 device */
4542 	if (skb_is_gso(skb) && priv->tso) {
4543 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4544 			return stmmac_tso_xmit(skb, dev);
4545 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4546 			return stmmac_tso_xmit(skb, dev);
4547 	}
4548 
4549 	if (priv->plat->est && priv->plat->est->enable &&
4550 	    priv->plat->est->max_sdu[queue] &&
4551 	    skb->len > priv->plat->est->max_sdu[queue]){
4552 		priv->xstats.max_sdu_txq_drop[queue]++;
4553 		goto max_sdu_err;
4554 	}
4555 
4556 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4557 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4558 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4559 								queue));
4560 			/* This is a hard error, log it. */
4561 			netdev_err(priv->dev,
4562 				   "%s: Tx Ring full when queue awake\n",
4563 				   __func__);
4564 		}
4565 		return NETDEV_TX_BUSY;
4566 	}
4567 
4568 	/* Check if VLAN can be inserted by HW */
4569 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4570 
4571 	entry = tx_q->cur_tx;
4572 	first_entry = entry;
4573 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4574 
4575 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4576 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4577 	 * queues. In that case, checksum offloading for those queues that don't
4578 	 * support tx coe needs to fallback to software checksum calculation.
4579 	 *
4580 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4581 	 * also have to be checksummed in software.
4582 	 */
4583 	if (csum_insertion &&
4584 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4585 	     !stmmac_has_ip_ethertype(skb))) {
4586 		if (unlikely(skb_checksum_help(skb)))
4587 			goto dma_map_err;
4588 		csum_insertion = !csum_insertion;
4589 	}
4590 
4591 	if (likely(priv->extend_desc))
4592 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4593 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4594 		desc = &tx_q->dma_entx[entry].basic;
4595 	else
4596 		desc = tx_q->dma_tx + entry;
4597 
4598 	first = desc;
4599 
4600 	if (has_vlan)
4601 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4602 
4603 	enh_desc = priv->plat->enh_desc;
4604 	/* To program the descriptors according to the size of the frame */
4605 	if (enh_desc)
4606 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4607 
4608 	if (unlikely(is_jumbo)) {
4609 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4610 		if (unlikely(entry < 0) && (entry != -EINVAL))
4611 			goto dma_map_err;
4612 	}
4613 
4614 	for (i = 0; i < nfrags; i++) {
4615 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4616 		int len = skb_frag_size(frag);
4617 		bool last_segment = (i == (nfrags - 1));
4618 
4619 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4620 		WARN_ON(tx_q->tx_skbuff[entry]);
4621 
4622 		if (likely(priv->extend_desc))
4623 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4624 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4625 			desc = &tx_q->dma_entx[entry].basic;
4626 		else
4627 			desc = tx_q->dma_tx + entry;
4628 
4629 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4630 				       DMA_TO_DEVICE);
4631 		if (dma_mapping_error(priv->device, des))
4632 			goto dma_map_err; /* should reuse desc w/o issues */
4633 
4634 		tx_q->tx_skbuff_dma[entry].buf = des;
4635 
4636 		stmmac_set_desc_addr(priv, desc, des);
4637 
4638 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4639 		tx_q->tx_skbuff_dma[entry].len = len;
4640 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4641 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4642 
4643 		/* Prepare the descriptor and set the own bit too */
4644 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4645 				priv->mode, 1, last_segment, skb->len);
4646 	}
4647 
4648 	/* Only the last descriptor gets to point to the skb. */
4649 	tx_q->tx_skbuff[entry] = skb;
4650 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4651 
4652 	/* According to the coalesce parameter the IC bit for the latest
4653 	 * segment is reset and the timer re-started to clean the tx status.
4654 	 * This approach takes care about the fragments: desc is the first
4655 	 * element in case of no SG.
4656 	 */
4657 	tx_packets = (entry + 1) - first_tx;
4658 	tx_q->tx_count_frames += tx_packets;
4659 
4660 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4661 		set_ic = true;
4662 	else if (!priv->tx_coal_frames[queue])
4663 		set_ic = false;
4664 	else if (tx_packets > priv->tx_coal_frames[queue])
4665 		set_ic = true;
4666 	else if ((tx_q->tx_count_frames %
4667 		  priv->tx_coal_frames[queue]) < tx_packets)
4668 		set_ic = true;
4669 	else
4670 		set_ic = false;
4671 
4672 	if (set_ic) {
4673 		if (likely(priv->extend_desc))
4674 			desc = &tx_q->dma_etx[entry].basic;
4675 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4676 			desc = &tx_q->dma_entx[entry].basic;
4677 		else
4678 			desc = &tx_q->dma_tx[entry];
4679 
4680 		tx_q->tx_count_frames = 0;
4681 		stmmac_set_tx_ic(priv, desc);
4682 	}
4683 
4684 	/* We've used all descriptors we need for this skb, however,
4685 	 * advance cur_tx so that it references a fresh descriptor.
4686 	 * ndo_start_xmit will fill this descriptor the next time it's
4687 	 * called and stmmac_tx_clean may clean up to this descriptor.
4688 	 */
4689 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4690 	tx_q->cur_tx = entry;
4691 
4692 	if (netif_msg_pktdata(priv)) {
4693 		netdev_dbg(priv->dev,
4694 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4695 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4696 			   entry, first, nfrags);
4697 
4698 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4699 		print_pkt(skb->data, skb->len);
4700 	}
4701 
4702 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4703 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4704 			  __func__);
4705 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4706 	}
4707 
4708 	u64_stats_update_begin(&txq_stats->q_syncp);
4709 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4710 	if (set_ic)
4711 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4712 	u64_stats_update_end(&txq_stats->q_syncp);
4713 
4714 	if (priv->sarc_type)
4715 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4716 
4717 	skb_tx_timestamp(skb);
4718 
4719 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4720 	 * problems because all the descriptors are actually ready to be
4721 	 * passed to the DMA engine.
4722 	 */
4723 	if (likely(!is_jumbo)) {
4724 		bool last_segment = (nfrags == 0);
4725 
4726 		des = dma_map_single(priv->device, skb->data,
4727 				     nopaged_len, DMA_TO_DEVICE);
4728 		if (dma_mapping_error(priv->device, des))
4729 			goto dma_map_err;
4730 
4731 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4732 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4733 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4734 
4735 		stmmac_set_desc_addr(priv, first, des);
4736 
4737 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4738 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4739 
4740 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4741 			     priv->hwts_tx_en)) {
4742 			/* declare that device is doing timestamping */
4743 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4744 			stmmac_enable_tx_timestamp(priv, first);
4745 		}
4746 
4747 		/* Prepare the first descriptor setting the OWN bit too */
4748 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4749 				csum_insertion, priv->mode, 0, last_segment,
4750 				skb->len);
4751 	}
4752 
4753 	if (tx_q->tbs & STMMAC_TBS_EN) {
4754 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4755 
4756 		tbs_desc = &tx_q->dma_entx[first_entry];
4757 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4758 	}
4759 
4760 	stmmac_set_tx_owner(priv, first);
4761 
4762 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4763 
4764 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4765 
4766 	stmmac_flush_tx_descriptors(priv, queue);
4767 	stmmac_tx_timer_arm(priv, queue);
4768 
4769 	return NETDEV_TX_OK;
4770 
4771 dma_map_err:
4772 	netdev_err(priv->dev, "Tx DMA map failed\n");
4773 max_sdu_err:
4774 	dev_kfree_skb(skb);
4775 	priv->xstats.tx_dropped++;
4776 	return NETDEV_TX_OK;
4777 }
4778 
4779 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4780 {
4781 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4782 	__be16 vlan_proto = veth->h_vlan_proto;
4783 	u16 vlanid;
4784 
4785 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4786 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4787 	    (vlan_proto == htons(ETH_P_8021AD) &&
4788 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4789 		/* pop the vlan tag */
4790 		vlanid = ntohs(veth->h_vlan_TCI);
4791 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4792 		skb_pull(skb, VLAN_HLEN);
4793 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4794 	}
4795 }
4796 
4797 /**
4798  * stmmac_rx_refill - refill used skb preallocated buffers
4799  * @priv: driver private structure
4800  * @queue: RX queue index
4801  * Description : this is to reallocate the skb for the reception process
4802  * that is based on zero-copy.
4803  */
4804 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4805 {
4806 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4807 	int dirty = stmmac_rx_dirty(priv, queue);
4808 	unsigned int entry = rx_q->dirty_rx;
4809 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4810 
4811 	if (priv->dma_cap.host_dma_width <= 32)
4812 		gfp |= GFP_DMA32;
4813 
4814 	while (dirty-- > 0) {
4815 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4816 		struct dma_desc *p;
4817 		bool use_rx_wd;
4818 
4819 		if (priv->extend_desc)
4820 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4821 		else
4822 			p = rx_q->dma_rx + entry;
4823 
4824 		if (!buf->page) {
4825 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4826 			if (!buf->page)
4827 				break;
4828 		}
4829 
4830 		if (priv->sph && !buf->sec_page) {
4831 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4832 			if (!buf->sec_page)
4833 				break;
4834 
4835 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4836 		}
4837 
4838 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4839 
4840 		stmmac_set_desc_addr(priv, p, buf->addr);
4841 		if (priv->sph)
4842 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4843 		else
4844 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4845 		stmmac_refill_desc3(priv, rx_q, p);
4846 
4847 		rx_q->rx_count_frames++;
4848 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4849 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4850 			rx_q->rx_count_frames = 0;
4851 
4852 		use_rx_wd = !priv->rx_coal_frames[queue];
4853 		use_rx_wd |= rx_q->rx_count_frames > 0;
4854 		if (!priv->use_riwt)
4855 			use_rx_wd = false;
4856 
4857 		dma_wmb();
4858 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4859 
4860 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4861 	}
4862 	rx_q->dirty_rx = entry;
4863 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4864 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4865 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4866 }
4867 
4868 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4869 				       struct dma_desc *p,
4870 				       int status, unsigned int len)
4871 {
4872 	unsigned int plen = 0, hlen = 0;
4873 	int coe = priv->hw->rx_csum;
4874 
4875 	/* Not first descriptor, buffer is always zero */
4876 	if (priv->sph && len)
4877 		return 0;
4878 
4879 	/* First descriptor, get split header length */
4880 	stmmac_get_rx_header_len(priv, p, &hlen);
4881 	if (priv->sph && hlen) {
4882 		priv->xstats.rx_split_hdr_pkt_n++;
4883 		return hlen;
4884 	}
4885 
4886 	/* First descriptor, not last descriptor and not split header */
4887 	if (status & rx_not_ls)
4888 		return priv->dma_conf.dma_buf_sz;
4889 
4890 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4891 
4892 	/* First descriptor and last descriptor and not split header */
4893 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4894 }
4895 
4896 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4897 				       struct dma_desc *p,
4898 				       int status, unsigned int len)
4899 {
4900 	int coe = priv->hw->rx_csum;
4901 	unsigned int plen = 0;
4902 
4903 	/* Not split header, buffer is not available */
4904 	if (!priv->sph)
4905 		return 0;
4906 
4907 	/* Not last descriptor */
4908 	if (status & rx_not_ls)
4909 		return priv->dma_conf.dma_buf_sz;
4910 
4911 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4912 
4913 	/* Last descriptor */
4914 	return plen - len;
4915 }
4916 
4917 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4918 				struct xdp_frame *xdpf, bool dma_map)
4919 {
4920 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4921 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4922 	unsigned int entry = tx_q->cur_tx;
4923 	struct dma_desc *tx_desc;
4924 	dma_addr_t dma_addr;
4925 	bool set_ic;
4926 
4927 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4928 		return STMMAC_XDP_CONSUMED;
4929 
4930 	if (priv->plat->est && priv->plat->est->enable &&
4931 	    priv->plat->est->max_sdu[queue] &&
4932 	    xdpf->len > priv->plat->est->max_sdu[queue]) {
4933 		priv->xstats.max_sdu_txq_drop[queue]++;
4934 		return STMMAC_XDP_CONSUMED;
4935 	}
4936 
4937 	if (likely(priv->extend_desc))
4938 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4939 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4940 		tx_desc = &tx_q->dma_entx[entry].basic;
4941 	else
4942 		tx_desc = tx_q->dma_tx + entry;
4943 
4944 	if (dma_map) {
4945 		dma_addr = dma_map_single(priv->device, xdpf->data,
4946 					  xdpf->len, DMA_TO_DEVICE);
4947 		if (dma_mapping_error(priv->device, dma_addr))
4948 			return STMMAC_XDP_CONSUMED;
4949 
4950 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4951 	} else {
4952 		struct page *page = virt_to_page(xdpf->data);
4953 
4954 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4955 			   xdpf->headroom;
4956 		dma_sync_single_for_device(priv->device, dma_addr,
4957 					   xdpf->len, DMA_BIDIRECTIONAL);
4958 
4959 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4960 	}
4961 
4962 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4963 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4964 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4965 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4966 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4967 
4968 	tx_q->xdpf[entry] = xdpf;
4969 
4970 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4971 
4972 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4973 			       true, priv->mode, true, true,
4974 			       xdpf->len);
4975 
4976 	tx_q->tx_count_frames++;
4977 
4978 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4979 		set_ic = true;
4980 	else
4981 		set_ic = false;
4982 
4983 	if (set_ic) {
4984 		tx_q->tx_count_frames = 0;
4985 		stmmac_set_tx_ic(priv, tx_desc);
4986 		u64_stats_update_begin(&txq_stats->q_syncp);
4987 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4988 		u64_stats_update_end(&txq_stats->q_syncp);
4989 	}
4990 
4991 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4992 
4993 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4994 	tx_q->cur_tx = entry;
4995 
4996 	return STMMAC_XDP_TX;
4997 }
4998 
4999 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5000 				   int cpu)
5001 {
5002 	int index = cpu;
5003 
5004 	if (unlikely(index < 0))
5005 		index = 0;
5006 
5007 	while (index >= priv->plat->tx_queues_to_use)
5008 		index -= priv->plat->tx_queues_to_use;
5009 
5010 	return index;
5011 }
5012 
5013 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5014 				struct xdp_buff *xdp)
5015 {
5016 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5017 	int cpu = smp_processor_id();
5018 	struct netdev_queue *nq;
5019 	int queue;
5020 	int res;
5021 
5022 	if (unlikely(!xdpf))
5023 		return STMMAC_XDP_CONSUMED;
5024 
5025 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5026 	nq = netdev_get_tx_queue(priv->dev, queue);
5027 
5028 	__netif_tx_lock(nq, cpu);
5029 	/* Avoids TX time-out as we are sharing with slow path */
5030 	txq_trans_cond_update(nq);
5031 
5032 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5033 	if (res == STMMAC_XDP_TX)
5034 		stmmac_flush_tx_descriptors(priv, queue);
5035 
5036 	__netif_tx_unlock(nq);
5037 
5038 	return res;
5039 }
5040 
5041 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5042 				 struct bpf_prog *prog,
5043 				 struct xdp_buff *xdp)
5044 {
5045 	u32 act;
5046 	int res;
5047 
5048 	act = bpf_prog_run_xdp(prog, xdp);
5049 	switch (act) {
5050 	case XDP_PASS:
5051 		res = STMMAC_XDP_PASS;
5052 		break;
5053 	case XDP_TX:
5054 		res = stmmac_xdp_xmit_back(priv, xdp);
5055 		break;
5056 	case XDP_REDIRECT:
5057 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5058 			res = STMMAC_XDP_CONSUMED;
5059 		else
5060 			res = STMMAC_XDP_REDIRECT;
5061 		break;
5062 	default:
5063 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5064 		fallthrough;
5065 	case XDP_ABORTED:
5066 		trace_xdp_exception(priv->dev, prog, act);
5067 		fallthrough;
5068 	case XDP_DROP:
5069 		res = STMMAC_XDP_CONSUMED;
5070 		break;
5071 	}
5072 
5073 	return res;
5074 }
5075 
5076 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5077 					   struct xdp_buff *xdp)
5078 {
5079 	struct bpf_prog *prog;
5080 	int res;
5081 
5082 	prog = READ_ONCE(priv->xdp_prog);
5083 	if (!prog) {
5084 		res = STMMAC_XDP_PASS;
5085 		goto out;
5086 	}
5087 
5088 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5089 out:
5090 	return ERR_PTR(-res);
5091 }
5092 
5093 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5094 				   int xdp_status)
5095 {
5096 	int cpu = smp_processor_id();
5097 	int queue;
5098 
5099 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5100 
5101 	if (xdp_status & STMMAC_XDP_TX)
5102 		stmmac_tx_timer_arm(priv, queue);
5103 
5104 	if (xdp_status & STMMAC_XDP_REDIRECT)
5105 		xdp_do_flush();
5106 }
5107 
5108 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5109 					       struct xdp_buff *xdp)
5110 {
5111 	unsigned int metasize = xdp->data - xdp->data_meta;
5112 	unsigned int datasize = xdp->data_end - xdp->data;
5113 	struct sk_buff *skb;
5114 
5115 	skb = napi_alloc_skb(&ch->rxtx_napi,
5116 			     xdp->data_end - xdp->data_hard_start);
5117 	if (unlikely(!skb))
5118 		return NULL;
5119 
5120 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5121 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5122 	if (metasize)
5123 		skb_metadata_set(skb, metasize);
5124 
5125 	return skb;
5126 }
5127 
5128 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5129 				   struct dma_desc *p, struct dma_desc *np,
5130 				   struct xdp_buff *xdp)
5131 {
5132 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5133 	struct stmmac_channel *ch = &priv->channel[queue];
5134 	unsigned int len = xdp->data_end - xdp->data;
5135 	enum pkt_hash_types hash_type;
5136 	int coe = priv->hw->rx_csum;
5137 	struct sk_buff *skb;
5138 	u32 hash;
5139 
5140 	skb = stmmac_construct_skb_zc(ch, xdp);
5141 	if (!skb) {
5142 		priv->xstats.rx_dropped++;
5143 		return;
5144 	}
5145 
5146 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5147 	if (priv->hw->hw_vlan_en)
5148 		/* MAC level stripping. */
5149 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5150 	else
5151 		/* Driver level stripping. */
5152 		stmmac_rx_vlan(priv->dev, skb);
5153 	skb->protocol = eth_type_trans(skb, priv->dev);
5154 
5155 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5156 		skb_checksum_none_assert(skb);
5157 	else
5158 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5159 
5160 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5161 		skb_set_hash(skb, hash, hash_type);
5162 
5163 	skb_record_rx_queue(skb, queue);
5164 	napi_gro_receive(&ch->rxtx_napi, skb);
5165 
5166 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5167 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5168 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5169 	u64_stats_update_end(&rxq_stats->napi_syncp);
5170 }
5171 
5172 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5173 {
5174 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5175 	unsigned int entry = rx_q->dirty_rx;
5176 	struct dma_desc *rx_desc = NULL;
5177 	bool ret = true;
5178 
5179 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5180 
5181 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5182 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5183 		dma_addr_t dma_addr;
5184 		bool use_rx_wd;
5185 
5186 		if (!buf->xdp) {
5187 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5188 			if (!buf->xdp) {
5189 				ret = false;
5190 				break;
5191 			}
5192 		}
5193 
5194 		if (priv->extend_desc)
5195 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5196 		else
5197 			rx_desc = rx_q->dma_rx + entry;
5198 
5199 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5200 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5201 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5202 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5203 
5204 		rx_q->rx_count_frames++;
5205 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5206 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5207 			rx_q->rx_count_frames = 0;
5208 
5209 		use_rx_wd = !priv->rx_coal_frames[queue];
5210 		use_rx_wd |= rx_q->rx_count_frames > 0;
5211 		if (!priv->use_riwt)
5212 			use_rx_wd = false;
5213 
5214 		dma_wmb();
5215 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5216 
5217 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5218 	}
5219 
5220 	if (rx_desc) {
5221 		rx_q->dirty_rx = entry;
5222 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5223 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5224 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5225 	}
5226 
5227 	return ret;
5228 }
5229 
5230 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5231 {
5232 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5233 	 * to represent incoming packet, whereas cb field in the same structure
5234 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5235 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5236 	 */
5237 	return (struct stmmac_xdp_buff *)xdp;
5238 }
5239 
5240 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5241 {
5242 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5243 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5244 	unsigned int count = 0, error = 0, len = 0;
5245 	int dirty = stmmac_rx_dirty(priv, queue);
5246 	unsigned int next_entry = rx_q->cur_rx;
5247 	u32 rx_errors = 0, rx_dropped = 0;
5248 	unsigned int desc_size;
5249 	struct bpf_prog *prog;
5250 	bool failure = false;
5251 	int xdp_status = 0;
5252 	int status = 0;
5253 
5254 	if (netif_msg_rx_status(priv)) {
5255 		void *rx_head;
5256 
5257 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5258 		if (priv->extend_desc) {
5259 			rx_head = (void *)rx_q->dma_erx;
5260 			desc_size = sizeof(struct dma_extended_desc);
5261 		} else {
5262 			rx_head = (void *)rx_q->dma_rx;
5263 			desc_size = sizeof(struct dma_desc);
5264 		}
5265 
5266 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5267 				    rx_q->dma_rx_phy, desc_size);
5268 	}
5269 	while (count < limit) {
5270 		struct stmmac_rx_buffer *buf;
5271 		struct stmmac_xdp_buff *ctx;
5272 		unsigned int buf1_len = 0;
5273 		struct dma_desc *np, *p;
5274 		int entry;
5275 		int res;
5276 
5277 		if (!count && rx_q->state_saved) {
5278 			error = rx_q->state.error;
5279 			len = rx_q->state.len;
5280 		} else {
5281 			rx_q->state_saved = false;
5282 			error = 0;
5283 			len = 0;
5284 		}
5285 
5286 		if (count >= limit)
5287 			break;
5288 
5289 read_again:
5290 		buf1_len = 0;
5291 		entry = next_entry;
5292 		buf = &rx_q->buf_pool[entry];
5293 
5294 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5295 			failure = failure ||
5296 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5297 			dirty = 0;
5298 		}
5299 
5300 		if (priv->extend_desc)
5301 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5302 		else
5303 			p = rx_q->dma_rx + entry;
5304 
5305 		/* read the status of the incoming frame */
5306 		status = stmmac_rx_status(priv, &priv->xstats, p);
5307 		/* check if managed by the DMA otherwise go ahead */
5308 		if (unlikely(status & dma_own))
5309 			break;
5310 
5311 		/* Prefetch the next RX descriptor */
5312 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5313 						priv->dma_conf.dma_rx_size);
5314 		next_entry = rx_q->cur_rx;
5315 
5316 		if (priv->extend_desc)
5317 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5318 		else
5319 			np = rx_q->dma_rx + next_entry;
5320 
5321 		prefetch(np);
5322 
5323 		/* Ensure a valid XSK buffer before proceed */
5324 		if (!buf->xdp)
5325 			break;
5326 
5327 		if (priv->extend_desc)
5328 			stmmac_rx_extended_status(priv, &priv->xstats,
5329 						  rx_q->dma_erx + entry);
5330 		if (unlikely(status == discard_frame)) {
5331 			xsk_buff_free(buf->xdp);
5332 			buf->xdp = NULL;
5333 			dirty++;
5334 			error = 1;
5335 			if (!priv->hwts_rx_en)
5336 				rx_errors++;
5337 		}
5338 
5339 		if (unlikely(error && (status & rx_not_ls)))
5340 			goto read_again;
5341 		if (unlikely(error)) {
5342 			count++;
5343 			continue;
5344 		}
5345 
5346 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5347 		if (likely(status & rx_not_ls)) {
5348 			xsk_buff_free(buf->xdp);
5349 			buf->xdp = NULL;
5350 			dirty++;
5351 			count++;
5352 			goto read_again;
5353 		}
5354 
5355 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5356 		ctx->priv = priv;
5357 		ctx->desc = p;
5358 		ctx->ndesc = np;
5359 
5360 		/* XDP ZC Frame only support primary buffers for now */
5361 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5362 		len += buf1_len;
5363 
5364 		/* ACS is disabled; strip manually. */
5365 		if (likely(!(status & rx_not_ls))) {
5366 			buf1_len -= ETH_FCS_LEN;
5367 			len -= ETH_FCS_LEN;
5368 		}
5369 
5370 		/* RX buffer is good and fit into a XSK pool buffer */
5371 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5372 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5373 
5374 		prog = READ_ONCE(priv->xdp_prog);
5375 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5376 
5377 		switch (res) {
5378 		case STMMAC_XDP_PASS:
5379 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5380 			xsk_buff_free(buf->xdp);
5381 			break;
5382 		case STMMAC_XDP_CONSUMED:
5383 			xsk_buff_free(buf->xdp);
5384 			rx_dropped++;
5385 			break;
5386 		case STMMAC_XDP_TX:
5387 		case STMMAC_XDP_REDIRECT:
5388 			xdp_status |= res;
5389 			break;
5390 		}
5391 
5392 		buf->xdp = NULL;
5393 		dirty++;
5394 		count++;
5395 	}
5396 
5397 	if (status & rx_not_ls) {
5398 		rx_q->state_saved = true;
5399 		rx_q->state.error = error;
5400 		rx_q->state.len = len;
5401 	}
5402 
5403 	stmmac_finalize_xdp_rx(priv, xdp_status);
5404 
5405 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5406 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5407 	u64_stats_update_end(&rxq_stats->napi_syncp);
5408 
5409 	priv->xstats.rx_dropped += rx_dropped;
5410 	priv->xstats.rx_errors += rx_errors;
5411 
5412 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5413 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5414 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5415 		else
5416 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5417 
5418 		return (int)count;
5419 	}
5420 
5421 	return failure ? limit : (int)count;
5422 }
5423 
5424 /**
5425  * stmmac_rx - manage the receive process
5426  * @priv: driver private structure
5427  * @limit: napi bugget
5428  * @queue: RX queue index.
5429  * Description :  this the function called by the napi poll method.
5430  * It gets all the frames inside the ring.
5431  */
5432 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5433 {
5434 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5435 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5436 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5437 	struct stmmac_channel *ch = &priv->channel[queue];
5438 	unsigned int count = 0, error = 0, len = 0;
5439 	int status = 0, coe = priv->hw->rx_csum;
5440 	unsigned int next_entry = rx_q->cur_rx;
5441 	enum dma_data_direction dma_dir;
5442 	unsigned int desc_size;
5443 	struct sk_buff *skb = NULL;
5444 	struct stmmac_xdp_buff ctx;
5445 	int xdp_status = 0;
5446 	int buf_sz;
5447 
5448 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5449 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5450 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5451 
5452 	if (netif_msg_rx_status(priv)) {
5453 		void *rx_head;
5454 
5455 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5456 		if (priv->extend_desc) {
5457 			rx_head = (void *)rx_q->dma_erx;
5458 			desc_size = sizeof(struct dma_extended_desc);
5459 		} else {
5460 			rx_head = (void *)rx_q->dma_rx;
5461 			desc_size = sizeof(struct dma_desc);
5462 		}
5463 
5464 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5465 				    rx_q->dma_rx_phy, desc_size);
5466 	}
5467 	while (count < limit) {
5468 		unsigned int buf1_len = 0, buf2_len = 0;
5469 		enum pkt_hash_types hash_type;
5470 		struct stmmac_rx_buffer *buf;
5471 		struct dma_desc *np, *p;
5472 		int entry;
5473 		u32 hash;
5474 
5475 		if (!count && rx_q->state_saved) {
5476 			skb = rx_q->state.skb;
5477 			error = rx_q->state.error;
5478 			len = rx_q->state.len;
5479 		} else {
5480 			rx_q->state_saved = false;
5481 			skb = NULL;
5482 			error = 0;
5483 			len = 0;
5484 		}
5485 
5486 read_again:
5487 		if (count >= limit)
5488 			break;
5489 
5490 		buf1_len = 0;
5491 		buf2_len = 0;
5492 		entry = next_entry;
5493 		buf = &rx_q->buf_pool[entry];
5494 
5495 		if (priv->extend_desc)
5496 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5497 		else
5498 			p = rx_q->dma_rx + entry;
5499 
5500 		/* read the status of the incoming frame */
5501 		status = stmmac_rx_status(priv, &priv->xstats, p);
5502 		/* check if managed by the DMA otherwise go ahead */
5503 		if (unlikely(status & dma_own))
5504 			break;
5505 
5506 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5507 						priv->dma_conf.dma_rx_size);
5508 		next_entry = rx_q->cur_rx;
5509 
5510 		if (priv->extend_desc)
5511 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5512 		else
5513 			np = rx_q->dma_rx + next_entry;
5514 
5515 		prefetch(np);
5516 
5517 		if (priv->extend_desc)
5518 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5519 		if (unlikely(status == discard_frame)) {
5520 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5521 			buf->page = NULL;
5522 			error = 1;
5523 			if (!priv->hwts_rx_en)
5524 				rx_errors++;
5525 		}
5526 
5527 		if (unlikely(error && (status & rx_not_ls)))
5528 			goto read_again;
5529 		if (unlikely(error)) {
5530 			dev_kfree_skb(skb);
5531 			skb = NULL;
5532 			count++;
5533 			continue;
5534 		}
5535 
5536 		/* Buffer is good. Go on. */
5537 
5538 		prefetch(page_address(buf->page) + buf->page_offset);
5539 		if (buf->sec_page)
5540 			prefetch(page_address(buf->sec_page));
5541 
5542 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5543 		len += buf1_len;
5544 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5545 		len += buf2_len;
5546 
5547 		/* ACS is disabled; strip manually. */
5548 		if (likely(!(status & rx_not_ls))) {
5549 			if (buf2_len) {
5550 				buf2_len -= ETH_FCS_LEN;
5551 				len -= ETH_FCS_LEN;
5552 			} else if (buf1_len) {
5553 				buf1_len -= ETH_FCS_LEN;
5554 				len -= ETH_FCS_LEN;
5555 			}
5556 		}
5557 
5558 		if (!skb) {
5559 			unsigned int pre_len, sync_len;
5560 
5561 			dma_sync_single_for_cpu(priv->device, buf->addr,
5562 						buf1_len, dma_dir);
5563 
5564 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5565 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5566 					 buf->page_offset, buf1_len, true);
5567 
5568 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5569 				  buf->page_offset;
5570 
5571 			ctx.priv = priv;
5572 			ctx.desc = p;
5573 			ctx.ndesc = np;
5574 
5575 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5576 			/* Due xdp_adjust_tail: DMA sync for_device
5577 			 * cover max len CPU touch
5578 			 */
5579 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5580 				   buf->page_offset;
5581 			sync_len = max(sync_len, pre_len);
5582 
5583 			/* For Not XDP_PASS verdict */
5584 			if (IS_ERR(skb)) {
5585 				unsigned int xdp_res = -PTR_ERR(skb);
5586 
5587 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5588 					page_pool_put_page(rx_q->page_pool,
5589 							   virt_to_head_page(ctx.xdp.data),
5590 							   sync_len, true);
5591 					buf->page = NULL;
5592 					rx_dropped++;
5593 
5594 					/* Clear skb as it was set as
5595 					 * status by XDP program.
5596 					 */
5597 					skb = NULL;
5598 
5599 					if (unlikely((status & rx_not_ls)))
5600 						goto read_again;
5601 
5602 					count++;
5603 					continue;
5604 				} else if (xdp_res & (STMMAC_XDP_TX |
5605 						      STMMAC_XDP_REDIRECT)) {
5606 					xdp_status |= xdp_res;
5607 					buf->page = NULL;
5608 					skb = NULL;
5609 					count++;
5610 					continue;
5611 				}
5612 			}
5613 		}
5614 
5615 		if (!skb) {
5616 			/* XDP program may expand or reduce tail */
5617 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5618 
5619 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5620 			if (!skb) {
5621 				rx_dropped++;
5622 				count++;
5623 				goto drain_data;
5624 			}
5625 
5626 			/* XDP program may adjust header */
5627 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5628 			skb_put(skb, buf1_len);
5629 
5630 			/* Data payload copied into SKB, page ready for recycle */
5631 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5632 			buf->page = NULL;
5633 		} else if (buf1_len) {
5634 			dma_sync_single_for_cpu(priv->device, buf->addr,
5635 						buf1_len, dma_dir);
5636 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5637 					buf->page, buf->page_offset, buf1_len,
5638 					priv->dma_conf.dma_buf_sz);
5639 
5640 			/* Data payload appended into SKB */
5641 			skb_mark_for_recycle(skb);
5642 			buf->page = NULL;
5643 		}
5644 
5645 		if (buf2_len) {
5646 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5647 						buf2_len, dma_dir);
5648 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5649 					buf->sec_page, 0, buf2_len,
5650 					priv->dma_conf.dma_buf_sz);
5651 
5652 			/* Data payload appended into SKB */
5653 			skb_mark_for_recycle(skb);
5654 			buf->sec_page = NULL;
5655 		}
5656 
5657 drain_data:
5658 		if (likely(status & rx_not_ls))
5659 			goto read_again;
5660 		if (!skb)
5661 			continue;
5662 
5663 		/* Got entire packet into SKB. Finish it. */
5664 
5665 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5666 
5667 		if (priv->hw->hw_vlan_en)
5668 			/* MAC level stripping. */
5669 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5670 		else
5671 			/* Driver level stripping. */
5672 			stmmac_rx_vlan(priv->dev, skb);
5673 
5674 		skb->protocol = eth_type_trans(skb, priv->dev);
5675 
5676 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5677 			skb_checksum_none_assert(skb);
5678 		else
5679 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5680 
5681 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5682 			skb_set_hash(skb, hash, hash_type);
5683 
5684 		skb_record_rx_queue(skb, queue);
5685 		napi_gro_receive(&ch->rx_napi, skb);
5686 		skb = NULL;
5687 
5688 		rx_packets++;
5689 		rx_bytes += len;
5690 		count++;
5691 	}
5692 
5693 	if (status & rx_not_ls || skb) {
5694 		rx_q->state_saved = true;
5695 		rx_q->state.skb = skb;
5696 		rx_q->state.error = error;
5697 		rx_q->state.len = len;
5698 	}
5699 
5700 	stmmac_finalize_xdp_rx(priv, xdp_status);
5701 
5702 	stmmac_rx_refill(priv, queue);
5703 
5704 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5705 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5706 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5707 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5708 	u64_stats_update_end(&rxq_stats->napi_syncp);
5709 
5710 	priv->xstats.rx_dropped += rx_dropped;
5711 	priv->xstats.rx_errors += rx_errors;
5712 
5713 	return count;
5714 }
5715 
5716 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5717 {
5718 	struct stmmac_channel *ch =
5719 		container_of(napi, struct stmmac_channel, rx_napi);
5720 	struct stmmac_priv *priv = ch->priv_data;
5721 	struct stmmac_rxq_stats *rxq_stats;
5722 	u32 chan = ch->index;
5723 	int work_done;
5724 
5725 	rxq_stats = &priv->xstats.rxq_stats[chan];
5726 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5727 	u64_stats_inc(&rxq_stats->napi.poll);
5728 	u64_stats_update_end(&rxq_stats->napi_syncp);
5729 
5730 	work_done = stmmac_rx(priv, budget, chan);
5731 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5732 		unsigned long flags;
5733 
5734 		spin_lock_irqsave(&ch->lock, flags);
5735 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5736 		spin_unlock_irqrestore(&ch->lock, flags);
5737 	}
5738 
5739 	return work_done;
5740 }
5741 
5742 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5743 {
5744 	struct stmmac_channel *ch =
5745 		container_of(napi, struct stmmac_channel, tx_napi);
5746 	struct stmmac_priv *priv = ch->priv_data;
5747 	struct stmmac_txq_stats *txq_stats;
5748 	bool pending_packets = false;
5749 	u32 chan = ch->index;
5750 	int work_done;
5751 
5752 	txq_stats = &priv->xstats.txq_stats[chan];
5753 	u64_stats_update_begin(&txq_stats->napi_syncp);
5754 	u64_stats_inc(&txq_stats->napi.poll);
5755 	u64_stats_update_end(&txq_stats->napi_syncp);
5756 
5757 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5758 	work_done = min(work_done, budget);
5759 
5760 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5761 		unsigned long flags;
5762 
5763 		spin_lock_irqsave(&ch->lock, flags);
5764 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5765 		spin_unlock_irqrestore(&ch->lock, flags);
5766 	}
5767 
5768 	/* TX still have packet to handle, check if we need to arm tx timer */
5769 	if (pending_packets)
5770 		stmmac_tx_timer_arm(priv, chan);
5771 
5772 	return work_done;
5773 }
5774 
5775 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5776 {
5777 	struct stmmac_channel *ch =
5778 		container_of(napi, struct stmmac_channel, rxtx_napi);
5779 	struct stmmac_priv *priv = ch->priv_data;
5780 	bool tx_pending_packets = false;
5781 	int rx_done, tx_done, rxtx_done;
5782 	struct stmmac_rxq_stats *rxq_stats;
5783 	struct stmmac_txq_stats *txq_stats;
5784 	u32 chan = ch->index;
5785 
5786 	rxq_stats = &priv->xstats.rxq_stats[chan];
5787 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5788 	u64_stats_inc(&rxq_stats->napi.poll);
5789 	u64_stats_update_end(&rxq_stats->napi_syncp);
5790 
5791 	txq_stats = &priv->xstats.txq_stats[chan];
5792 	u64_stats_update_begin(&txq_stats->napi_syncp);
5793 	u64_stats_inc(&txq_stats->napi.poll);
5794 	u64_stats_update_end(&txq_stats->napi_syncp);
5795 
5796 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5797 	tx_done = min(tx_done, budget);
5798 
5799 	rx_done = stmmac_rx_zc(priv, budget, chan);
5800 
5801 	rxtx_done = max(tx_done, rx_done);
5802 
5803 	/* If either TX or RX work is not complete, return budget
5804 	 * and keep pooling
5805 	 */
5806 	if (rxtx_done >= budget)
5807 		return budget;
5808 
5809 	/* all work done, exit the polling mode */
5810 	if (napi_complete_done(napi, rxtx_done)) {
5811 		unsigned long flags;
5812 
5813 		spin_lock_irqsave(&ch->lock, flags);
5814 		/* Both RX and TX work done are compelte,
5815 		 * so enable both RX & TX IRQs.
5816 		 */
5817 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5818 		spin_unlock_irqrestore(&ch->lock, flags);
5819 	}
5820 
5821 	/* TX still have packet to handle, check if we need to arm tx timer */
5822 	if (tx_pending_packets)
5823 		stmmac_tx_timer_arm(priv, chan);
5824 
5825 	return min(rxtx_done, budget - 1);
5826 }
5827 
5828 /**
5829  *  stmmac_tx_timeout
5830  *  @dev : Pointer to net device structure
5831  *  @txqueue: the index of the hanging transmit queue
5832  *  Description: this function is called when a packet transmission fails to
5833  *   complete within a reasonable time. The driver will mark the error in the
5834  *   netdev structure and arrange for the device to be reset to a sane state
5835  *   in order to transmit a new packet.
5836  */
5837 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5838 {
5839 	struct stmmac_priv *priv = netdev_priv(dev);
5840 
5841 	stmmac_global_err(priv);
5842 }
5843 
5844 /**
5845  *  stmmac_set_rx_mode - entry point for multicast addressing
5846  *  @dev : pointer to the device structure
5847  *  Description:
5848  *  This function is a driver entry point which gets called by the kernel
5849  *  whenever multicast addresses must be enabled/disabled.
5850  *  Return value:
5851  *  void.
5852  */
5853 static void stmmac_set_rx_mode(struct net_device *dev)
5854 {
5855 	struct stmmac_priv *priv = netdev_priv(dev);
5856 
5857 	stmmac_set_filter(priv, priv->hw, dev);
5858 }
5859 
5860 /**
5861  *  stmmac_change_mtu - entry point to change MTU size for the device.
5862  *  @dev : device pointer.
5863  *  @new_mtu : the new MTU size for the device.
5864  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5865  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5866  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5867  *  Return value:
5868  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5869  *  file on failure.
5870  */
5871 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5872 {
5873 	struct stmmac_priv *priv = netdev_priv(dev);
5874 	int txfifosz = priv->plat->tx_fifo_size;
5875 	struct stmmac_dma_conf *dma_conf;
5876 	const int mtu = new_mtu;
5877 	int ret;
5878 
5879 	if (txfifosz == 0)
5880 		txfifosz = priv->dma_cap.tx_fifo_size;
5881 
5882 	txfifosz /= priv->plat->tx_queues_to_use;
5883 
5884 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5885 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5886 		return -EINVAL;
5887 	}
5888 
5889 	new_mtu = STMMAC_ALIGN(new_mtu);
5890 
5891 	/* If condition true, FIFO is too small or MTU too large */
5892 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5893 		return -EINVAL;
5894 
5895 	if (netif_running(dev)) {
5896 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5897 		/* Try to allocate the new DMA conf with the new mtu */
5898 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5899 		if (IS_ERR(dma_conf)) {
5900 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5901 				   mtu);
5902 			return PTR_ERR(dma_conf);
5903 		}
5904 
5905 		stmmac_release(dev);
5906 
5907 		ret = __stmmac_open(dev, dma_conf);
5908 		if (ret) {
5909 			free_dma_desc_resources(priv, dma_conf);
5910 			kfree(dma_conf);
5911 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5912 			return ret;
5913 		}
5914 
5915 		kfree(dma_conf);
5916 
5917 		stmmac_set_rx_mode(dev);
5918 	}
5919 
5920 	dev->mtu = mtu;
5921 	netdev_update_features(dev);
5922 
5923 	return 0;
5924 }
5925 
5926 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5927 					     netdev_features_t features)
5928 {
5929 	struct stmmac_priv *priv = netdev_priv(dev);
5930 
5931 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5932 		features &= ~NETIF_F_RXCSUM;
5933 
5934 	if (!priv->plat->tx_coe)
5935 		features &= ~NETIF_F_CSUM_MASK;
5936 
5937 	/* Some GMAC devices have a bugged Jumbo frame support that
5938 	 * needs to have the Tx COE disabled for oversized frames
5939 	 * (due to limited buffer sizes). In this case we disable
5940 	 * the TX csum insertion in the TDES and not use SF.
5941 	 */
5942 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5943 		features &= ~NETIF_F_CSUM_MASK;
5944 
5945 	/* Disable tso if asked by ethtool */
5946 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5947 		if (features & NETIF_F_TSO)
5948 			priv->tso = true;
5949 		else
5950 			priv->tso = false;
5951 	}
5952 
5953 	return features;
5954 }
5955 
5956 static int stmmac_set_features(struct net_device *netdev,
5957 			       netdev_features_t features)
5958 {
5959 	struct stmmac_priv *priv = netdev_priv(netdev);
5960 
5961 	/* Keep the COE Type in case of csum is supporting */
5962 	if (features & NETIF_F_RXCSUM)
5963 		priv->hw->rx_csum = priv->plat->rx_coe;
5964 	else
5965 		priv->hw->rx_csum = 0;
5966 	/* No check needed because rx_coe has been set before and it will be
5967 	 * fixed in case of issue.
5968 	 */
5969 	stmmac_rx_ipc(priv, priv->hw);
5970 
5971 	if (priv->sph_cap) {
5972 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5973 		u32 chan;
5974 
5975 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5976 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5977 	}
5978 
5979 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5980 		priv->hw->hw_vlan_en = true;
5981 	else
5982 		priv->hw->hw_vlan_en = false;
5983 
5984 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5985 
5986 	return 0;
5987 }
5988 
5989 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5990 {
5991 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5992 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5993 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5994 	bool *hs_enable = &fpe_cfg->hs_enable;
5995 
5996 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5997 		return;
5998 
5999 	/* If LP has sent verify mPacket, LP is FPE capable */
6000 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
6001 		if (*lp_state < FPE_STATE_CAPABLE)
6002 			*lp_state = FPE_STATE_CAPABLE;
6003 
6004 		/* If user has requested FPE enable, quickly response */
6005 		if (*hs_enable)
6006 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6007 						fpe_cfg,
6008 						MPACKET_RESPONSE);
6009 	}
6010 
6011 	/* If Local has sent verify mPacket, Local is FPE capable */
6012 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
6013 		if (*lo_state < FPE_STATE_CAPABLE)
6014 			*lo_state = FPE_STATE_CAPABLE;
6015 	}
6016 
6017 	/* If LP has sent response mPacket, LP is entering FPE ON */
6018 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
6019 		*lp_state = FPE_STATE_ENTERING_ON;
6020 
6021 	/* If Local has sent response mPacket, Local is entering FPE ON */
6022 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
6023 		*lo_state = FPE_STATE_ENTERING_ON;
6024 
6025 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
6026 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
6027 	    priv->fpe_wq) {
6028 		queue_work(priv->fpe_wq, &priv->fpe_task);
6029 	}
6030 }
6031 
6032 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6033 {
6034 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6035 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6036 	u32 queues_count;
6037 	u32 queue;
6038 	bool xmac;
6039 
6040 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6041 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6042 
6043 	if (priv->irq_wake)
6044 		pm_wakeup_event(priv->device, 0);
6045 
6046 	if (priv->dma_cap.estsel)
6047 		stmmac_est_irq_status(priv, priv, priv->dev,
6048 				      &priv->xstats, tx_cnt);
6049 
6050 	if (priv->dma_cap.fpesel) {
6051 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6052 						   priv->dev);
6053 
6054 		stmmac_fpe_event_status(priv, status);
6055 	}
6056 
6057 	/* To handle GMAC own interrupts */
6058 	if ((priv->plat->has_gmac) || xmac) {
6059 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6060 
6061 		if (unlikely(status)) {
6062 			/* For LPI we need to save the tx status */
6063 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6064 				priv->tx_path_in_lpi_mode = true;
6065 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6066 				priv->tx_path_in_lpi_mode = false;
6067 		}
6068 
6069 		for (queue = 0; queue < queues_count; queue++)
6070 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6071 
6072 		/* PCS link status */
6073 		if (priv->hw->pcs &&
6074 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6075 			if (priv->xstats.pcs_link)
6076 				netif_carrier_on(priv->dev);
6077 			else
6078 				netif_carrier_off(priv->dev);
6079 		}
6080 
6081 		stmmac_timestamp_interrupt(priv, priv);
6082 	}
6083 }
6084 
6085 /**
6086  *  stmmac_interrupt - main ISR
6087  *  @irq: interrupt number.
6088  *  @dev_id: to pass the net device pointer.
6089  *  Description: this is the main driver interrupt service routine.
6090  *  It can call:
6091  *  o DMA service routine (to manage incoming frame reception and transmission
6092  *    status)
6093  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6094  *    interrupts.
6095  */
6096 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6097 {
6098 	struct net_device *dev = (struct net_device *)dev_id;
6099 	struct stmmac_priv *priv = netdev_priv(dev);
6100 
6101 	/* Check if adapter is up */
6102 	if (test_bit(STMMAC_DOWN, &priv->state))
6103 		return IRQ_HANDLED;
6104 
6105 	/* Check ASP error if it isn't delivered via an individual IRQ */
6106 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6107 		return IRQ_HANDLED;
6108 
6109 	/* To handle Common interrupts */
6110 	stmmac_common_interrupt(priv);
6111 
6112 	/* To handle DMA interrupts */
6113 	stmmac_dma_interrupt(priv);
6114 
6115 	return IRQ_HANDLED;
6116 }
6117 
6118 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6119 {
6120 	struct net_device *dev = (struct net_device *)dev_id;
6121 	struct stmmac_priv *priv = netdev_priv(dev);
6122 
6123 	/* Check if adapter is up */
6124 	if (test_bit(STMMAC_DOWN, &priv->state))
6125 		return IRQ_HANDLED;
6126 
6127 	/* To handle Common interrupts */
6128 	stmmac_common_interrupt(priv);
6129 
6130 	return IRQ_HANDLED;
6131 }
6132 
6133 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6134 {
6135 	struct net_device *dev = (struct net_device *)dev_id;
6136 	struct stmmac_priv *priv = netdev_priv(dev);
6137 
6138 	/* Check if adapter is up */
6139 	if (test_bit(STMMAC_DOWN, &priv->state))
6140 		return IRQ_HANDLED;
6141 
6142 	/* Check if a fatal error happened */
6143 	stmmac_safety_feat_interrupt(priv);
6144 
6145 	return IRQ_HANDLED;
6146 }
6147 
6148 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6149 {
6150 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6151 	struct stmmac_dma_conf *dma_conf;
6152 	int chan = tx_q->queue_index;
6153 	struct stmmac_priv *priv;
6154 	int status;
6155 
6156 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6157 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6158 
6159 	/* Check if adapter is up */
6160 	if (test_bit(STMMAC_DOWN, &priv->state))
6161 		return IRQ_HANDLED;
6162 
6163 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6164 
6165 	if (unlikely(status & tx_hard_error_bump_tc)) {
6166 		/* Try to bump up the dma threshold on this failure */
6167 		stmmac_bump_dma_threshold(priv, chan);
6168 	} else if (unlikely(status == tx_hard_error)) {
6169 		stmmac_tx_err(priv, chan);
6170 	}
6171 
6172 	return IRQ_HANDLED;
6173 }
6174 
6175 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6176 {
6177 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6178 	struct stmmac_dma_conf *dma_conf;
6179 	int chan = rx_q->queue_index;
6180 	struct stmmac_priv *priv;
6181 
6182 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6183 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6184 
6185 	/* Check if adapter is up */
6186 	if (test_bit(STMMAC_DOWN, &priv->state))
6187 		return IRQ_HANDLED;
6188 
6189 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6190 
6191 	return IRQ_HANDLED;
6192 }
6193 
6194 /**
6195  *  stmmac_ioctl - Entry point for the Ioctl
6196  *  @dev: Device pointer.
6197  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6198  *  a proprietary structure used to pass information to the driver.
6199  *  @cmd: IOCTL command
6200  *  Description:
6201  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6202  */
6203 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6204 {
6205 	struct stmmac_priv *priv = netdev_priv (dev);
6206 	int ret = -EOPNOTSUPP;
6207 
6208 	if (!netif_running(dev))
6209 		return -EINVAL;
6210 
6211 	switch (cmd) {
6212 	case SIOCGMIIPHY:
6213 	case SIOCGMIIREG:
6214 	case SIOCSMIIREG:
6215 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6216 		break;
6217 	case SIOCSHWTSTAMP:
6218 		ret = stmmac_hwtstamp_set(dev, rq);
6219 		break;
6220 	case SIOCGHWTSTAMP:
6221 		ret = stmmac_hwtstamp_get(dev, rq);
6222 		break;
6223 	default:
6224 		break;
6225 	}
6226 
6227 	return ret;
6228 }
6229 
6230 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6231 				    void *cb_priv)
6232 {
6233 	struct stmmac_priv *priv = cb_priv;
6234 	int ret = -EOPNOTSUPP;
6235 
6236 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6237 		return ret;
6238 
6239 	__stmmac_disable_all_queues(priv);
6240 
6241 	switch (type) {
6242 	case TC_SETUP_CLSU32:
6243 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6244 		break;
6245 	case TC_SETUP_CLSFLOWER:
6246 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6247 		break;
6248 	default:
6249 		break;
6250 	}
6251 
6252 	stmmac_enable_all_queues(priv);
6253 	return ret;
6254 }
6255 
6256 static LIST_HEAD(stmmac_block_cb_list);
6257 
6258 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6259 			   void *type_data)
6260 {
6261 	struct stmmac_priv *priv = netdev_priv(ndev);
6262 
6263 	switch (type) {
6264 	case TC_QUERY_CAPS:
6265 		return stmmac_tc_query_caps(priv, priv, type_data);
6266 	case TC_SETUP_BLOCK:
6267 		return flow_block_cb_setup_simple(type_data,
6268 						  &stmmac_block_cb_list,
6269 						  stmmac_setup_tc_block_cb,
6270 						  priv, priv, true);
6271 	case TC_SETUP_QDISC_CBS:
6272 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6273 	case TC_SETUP_QDISC_TAPRIO:
6274 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6275 	case TC_SETUP_QDISC_ETF:
6276 		return stmmac_tc_setup_etf(priv, priv, type_data);
6277 	default:
6278 		return -EOPNOTSUPP;
6279 	}
6280 }
6281 
6282 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6283 			       struct net_device *sb_dev)
6284 {
6285 	int gso = skb_shinfo(skb)->gso_type;
6286 
6287 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6288 		/*
6289 		 * There is no way to determine the number of TSO/USO
6290 		 * capable Queues. Let's use always the Queue 0
6291 		 * because if TSO/USO is supported then at least this
6292 		 * one will be capable.
6293 		 */
6294 		return 0;
6295 	}
6296 
6297 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6298 }
6299 
6300 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6301 {
6302 	struct stmmac_priv *priv = netdev_priv(ndev);
6303 	int ret = 0;
6304 
6305 	ret = pm_runtime_resume_and_get(priv->device);
6306 	if (ret < 0)
6307 		return ret;
6308 
6309 	ret = eth_mac_addr(ndev, addr);
6310 	if (ret)
6311 		goto set_mac_error;
6312 
6313 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6314 
6315 set_mac_error:
6316 	pm_runtime_put(priv->device);
6317 
6318 	return ret;
6319 }
6320 
6321 #ifdef CONFIG_DEBUG_FS
6322 static struct dentry *stmmac_fs_dir;
6323 
6324 static void sysfs_display_ring(void *head, int size, int extend_desc,
6325 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6326 {
6327 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6328 	struct dma_desc *p = (struct dma_desc *)head;
6329 	unsigned int desc_size;
6330 	dma_addr_t dma_addr;
6331 	int i;
6332 
6333 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6334 	for (i = 0; i < size; i++) {
6335 		dma_addr = dma_phy_addr + i * desc_size;
6336 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6337 				i, &dma_addr,
6338 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6339 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6340 		if (extend_desc)
6341 			p = &(++ep)->basic;
6342 		else
6343 			p++;
6344 	}
6345 }
6346 
6347 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6348 {
6349 	struct net_device *dev = seq->private;
6350 	struct stmmac_priv *priv = netdev_priv(dev);
6351 	u32 rx_count = priv->plat->rx_queues_to_use;
6352 	u32 tx_count = priv->plat->tx_queues_to_use;
6353 	u32 queue;
6354 
6355 	if ((dev->flags & IFF_UP) == 0)
6356 		return 0;
6357 
6358 	for (queue = 0; queue < rx_count; queue++) {
6359 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6360 
6361 		seq_printf(seq, "RX Queue %d:\n", queue);
6362 
6363 		if (priv->extend_desc) {
6364 			seq_printf(seq, "Extended descriptor ring:\n");
6365 			sysfs_display_ring((void *)rx_q->dma_erx,
6366 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6367 		} else {
6368 			seq_printf(seq, "Descriptor ring:\n");
6369 			sysfs_display_ring((void *)rx_q->dma_rx,
6370 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6371 		}
6372 	}
6373 
6374 	for (queue = 0; queue < tx_count; queue++) {
6375 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6376 
6377 		seq_printf(seq, "TX Queue %d:\n", queue);
6378 
6379 		if (priv->extend_desc) {
6380 			seq_printf(seq, "Extended descriptor ring:\n");
6381 			sysfs_display_ring((void *)tx_q->dma_etx,
6382 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6383 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6384 			seq_printf(seq, "Descriptor ring:\n");
6385 			sysfs_display_ring((void *)tx_q->dma_tx,
6386 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6387 		}
6388 	}
6389 
6390 	return 0;
6391 }
6392 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6393 
6394 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6395 {
6396 	static const char * const dwxgmac_timestamp_source[] = {
6397 		"None",
6398 		"Internal",
6399 		"External",
6400 		"Both",
6401 	};
6402 	static const char * const dwxgmac_safety_feature_desc[] = {
6403 		"No",
6404 		"All Safety Features with ECC and Parity",
6405 		"All Safety Features without ECC or Parity",
6406 		"All Safety Features with Parity Only",
6407 		"ECC Only",
6408 		"UNDEFINED",
6409 		"UNDEFINED",
6410 		"UNDEFINED",
6411 	};
6412 	struct net_device *dev = seq->private;
6413 	struct stmmac_priv *priv = netdev_priv(dev);
6414 
6415 	if (!priv->hw_cap_support) {
6416 		seq_printf(seq, "DMA HW features not supported\n");
6417 		return 0;
6418 	}
6419 
6420 	seq_printf(seq, "==============================\n");
6421 	seq_printf(seq, "\tDMA HW features\n");
6422 	seq_printf(seq, "==============================\n");
6423 
6424 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6425 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6426 	seq_printf(seq, "\t1000 Mbps: %s\n",
6427 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6428 	seq_printf(seq, "\tHalf duplex: %s\n",
6429 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6430 	if (priv->plat->has_xgmac) {
6431 		seq_printf(seq,
6432 			   "\tNumber of Additional MAC address registers: %d\n",
6433 			   priv->dma_cap.multi_addr);
6434 	} else {
6435 		seq_printf(seq, "\tHash Filter: %s\n",
6436 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6437 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6438 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6439 	}
6440 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6441 		   (priv->dma_cap.pcs) ? "Y" : "N");
6442 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6443 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6444 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6445 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6446 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6447 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6448 	seq_printf(seq, "\tRMON module: %s\n",
6449 		   (priv->dma_cap.rmon) ? "Y" : "N");
6450 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6451 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6452 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6453 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6454 	if (priv->plat->has_xgmac)
6455 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6456 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6457 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6458 		   (priv->dma_cap.eee) ? "Y" : "N");
6459 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6460 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6461 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6462 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6463 	    priv->plat->has_xgmac) {
6464 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6465 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6466 	} else {
6467 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6468 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6469 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6470 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6471 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6472 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6473 	}
6474 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6475 		   priv->dma_cap.number_rx_channel);
6476 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6477 		   priv->dma_cap.number_tx_channel);
6478 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6479 		   priv->dma_cap.number_rx_queues);
6480 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6481 		   priv->dma_cap.number_tx_queues);
6482 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6483 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6484 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6485 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6486 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6487 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6488 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6489 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6490 		   priv->dma_cap.pps_out_num);
6491 	seq_printf(seq, "\tSafety Features: %s\n",
6492 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6493 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6494 		   priv->dma_cap.frpsel ? "Y" : "N");
6495 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6496 		   priv->dma_cap.host_dma_width);
6497 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6498 		   priv->dma_cap.rssen ? "Y" : "N");
6499 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6500 		   priv->dma_cap.vlhash ? "Y" : "N");
6501 	seq_printf(seq, "\tSplit Header: %s\n",
6502 		   priv->dma_cap.sphen ? "Y" : "N");
6503 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6504 		   priv->dma_cap.vlins ? "Y" : "N");
6505 	seq_printf(seq, "\tDouble VLAN: %s\n",
6506 		   priv->dma_cap.dvlan ? "Y" : "N");
6507 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6508 		   priv->dma_cap.l3l4fnum);
6509 	seq_printf(seq, "\tARP Offloading: %s\n",
6510 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6511 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6512 		   priv->dma_cap.estsel ? "Y" : "N");
6513 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6514 		   priv->dma_cap.fpesel ? "Y" : "N");
6515 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6516 		   priv->dma_cap.tbssel ? "Y" : "N");
6517 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6518 		   priv->dma_cap.tbs_ch_num);
6519 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6520 		   priv->dma_cap.sgfsel ? "Y" : "N");
6521 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6522 		   BIT(priv->dma_cap.ttsfd) >> 1);
6523 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6524 		   priv->dma_cap.numtc);
6525 	seq_printf(seq, "\tDCB Feature: %s\n",
6526 		   priv->dma_cap.dcben ? "Y" : "N");
6527 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6528 		   priv->dma_cap.advthword ? "Y" : "N");
6529 	seq_printf(seq, "\tPTP Offload: %s\n",
6530 		   priv->dma_cap.ptoen ? "Y" : "N");
6531 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6532 		   priv->dma_cap.osten ? "Y" : "N");
6533 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6534 		   priv->dma_cap.pfcen ? "Y" : "N");
6535 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6536 		   BIT(priv->dma_cap.frpes) << 6);
6537 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6538 		   BIT(priv->dma_cap.frpbs) << 6);
6539 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6540 		   priv->dma_cap.frppipe_num);
6541 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6542 		   priv->dma_cap.nrvf_num ?
6543 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6544 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6545 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6546 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6547 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6548 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6549 		   priv->dma_cap.cbtisel ? "Y" : "N");
6550 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6551 		   priv->dma_cap.aux_snapshot_n);
6552 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6553 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6554 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6555 		   priv->dma_cap.edma ? "Y" : "N");
6556 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6557 		   priv->dma_cap.ediffc ? "Y" : "N");
6558 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6559 		   priv->dma_cap.vxn ? "Y" : "N");
6560 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6561 		   priv->dma_cap.dbgmem ? "Y" : "N");
6562 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6563 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6564 	return 0;
6565 }
6566 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6567 
6568 /* Use network device events to rename debugfs file entries.
6569  */
6570 static int stmmac_device_event(struct notifier_block *unused,
6571 			       unsigned long event, void *ptr)
6572 {
6573 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6574 	struct stmmac_priv *priv = netdev_priv(dev);
6575 
6576 	if (dev->netdev_ops != &stmmac_netdev_ops)
6577 		goto done;
6578 
6579 	switch (event) {
6580 	case NETDEV_CHANGENAME:
6581 		if (priv->dbgfs_dir)
6582 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6583 							 priv->dbgfs_dir,
6584 							 stmmac_fs_dir,
6585 							 dev->name);
6586 		break;
6587 	}
6588 done:
6589 	return NOTIFY_DONE;
6590 }
6591 
6592 static struct notifier_block stmmac_notifier = {
6593 	.notifier_call = stmmac_device_event,
6594 };
6595 
6596 static void stmmac_init_fs(struct net_device *dev)
6597 {
6598 	struct stmmac_priv *priv = netdev_priv(dev);
6599 
6600 	rtnl_lock();
6601 
6602 	/* Create per netdev entries */
6603 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6604 
6605 	/* Entry to report DMA RX/TX rings */
6606 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6607 			    &stmmac_rings_status_fops);
6608 
6609 	/* Entry to report the DMA HW features */
6610 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6611 			    &stmmac_dma_cap_fops);
6612 
6613 	rtnl_unlock();
6614 }
6615 
6616 static void stmmac_exit_fs(struct net_device *dev)
6617 {
6618 	struct stmmac_priv *priv = netdev_priv(dev);
6619 
6620 	debugfs_remove_recursive(priv->dbgfs_dir);
6621 }
6622 #endif /* CONFIG_DEBUG_FS */
6623 
6624 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6625 {
6626 	unsigned char *data = (unsigned char *)&vid_le;
6627 	unsigned char data_byte = 0;
6628 	u32 crc = ~0x0;
6629 	u32 temp = 0;
6630 	int i, bits;
6631 
6632 	bits = get_bitmask_order(VLAN_VID_MASK);
6633 	for (i = 0; i < bits; i++) {
6634 		if ((i % 8) == 0)
6635 			data_byte = data[i / 8];
6636 
6637 		temp = ((crc & 1) ^ data_byte) & 1;
6638 		crc >>= 1;
6639 		data_byte >>= 1;
6640 
6641 		if (temp)
6642 			crc ^= 0xedb88320;
6643 	}
6644 
6645 	return crc;
6646 }
6647 
6648 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6649 {
6650 	u32 crc, hash = 0;
6651 	__le16 pmatch = 0;
6652 	int count = 0;
6653 	u16 vid = 0;
6654 
6655 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6656 		__le16 vid_le = cpu_to_le16(vid);
6657 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6658 		hash |= (1 << crc);
6659 		count++;
6660 	}
6661 
6662 	if (!priv->dma_cap.vlhash) {
6663 		if (count > 2) /* VID = 0 always passes filter */
6664 			return -EOPNOTSUPP;
6665 
6666 		pmatch = cpu_to_le16(vid);
6667 		hash = 0;
6668 	}
6669 
6670 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6671 }
6672 
6673 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6674 {
6675 	struct stmmac_priv *priv = netdev_priv(ndev);
6676 	bool is_double = false;
6677 	int ret;
6678 
6679 	ret = pm_runtime_resume_and_get(priv->device);
6680 	if (ret < 0)
6681 		return ret;
6682 
6683 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6684 		is_double = true;
6685 
6686 	set_bit(vid, priv->active_vlans);
6687 	ret = stmmac_vlan_update(priv, is_double);
6688 	if (ret) {
6689 		clear_bit(vid, priv->active_vlans);
6690 		goto err_pm_put;
6691 	}
6692 
6693 	if (priv->hw->num_vlan) {
6694 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6695 		if (ret)
6696 			goto err_pm_put;
6697 	}
6698 err_pm_put:
6699 	pm_runtime_put(priv->device);
6700 
6701 	return ret;
6702 }
6703 
6704 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6705 {
6706 	struct stmmac_priv *priv = netdev_priv(ndev);
6707 	bool is_double = false;
6708 	int ret;
6709 
6710 	ret = pm_runtime_resume_and_get(priv->device);
6711 	if (ret < 0)
6712 		return ret;
6713 
6714 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6715 		is_double = true;
6716 
6717 	clear_bit(vid, priv->active_vlans);
6718 
6719 	if (priv->hw->num_vlan) {
6720 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6721 		if (ret)
6722 			goto del_vlan_error;
6723 	}
6724 
6725 	ret = stmmac_vlan_update(priv, is_double);
6726 
6727 del_vlan_error:
6728 	pm_runtime_put(priv->device);
6729 
6730 	return ret;
6731 }
6732 
6733 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6734 {
6735 	struct stmmac_priv *priv = netdev_priv(dev);
6736 
6737 	switch (bpf->command) {
6738 	case XDP_SETUP_PROG:
6739 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6740 	case XDP_SETUP_XSK_POOL:
6741 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6742 					     bpf->xsk.queue_id);
6743 	default:
6744 		return -EOPNOTSUPP;
6745 	}
6746 }
6747 
6748 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6749 			   struct xdp_frame **frames, u32 flags)
6750 {
6751 	struct stmmac_priv *priv = netdev_priv(dev);
6752 	int cpu = smp_processor_id();
6753 	struct netdev_queue *nq;
6754 	int i, nxmit = 0;
6755 	int queue;
6756 
6757 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6758 		return -ENETDOWN;
6759 
6760 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6761 		return -EINVAL;
6762 
6763 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6764 	nq = netdev_get_tx_queue(priv->dev, queue);
6765 
6766 	__netif_tx_lock(nq, cpu);
6767 	/* Avoids TX time-out as we are sharing with slow path */
6768 	txq_trans_cond_update(nq);
6769 
6770 	for (i = 0; i < num_frames; i++) {
6771 		int res;
6772 
6773 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6774 		if (res == STMMAC_XDP_CONSUMED)
6775 			break;
6776 
6777 		nxmit++;
6778 	}
6779 
6780 	if (flags & XDP_XMIT_FLUSH) {
6781 		stmmac_flush_tx_descriptors(priv, queue);
6782 		stmmac_tx_timer_arm(priv, queue);
6783 	}
6784 
6785 	__netif_tx_unlock(nq);
6786 
6787 	return nxmit;
6788 }
6789 
6790 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6791 {
6792 	struct stmmac_channel *ch = &priv->channel[queue];
6793 	unsigned long flags;
6794 
6795 	spin_lock_irqsave(&ch->lock, flags);
6796 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6797 	spin_unlock_irqrestore(&ch->lock, flags);
6798 
6799 	stmmac_stop_rx_dma(priv, queue);
6800 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6801 }
6802 
6803 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6804 {
6805 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6806 	struct stmmac_channel *ch = &priv->channel[queue];
6807 	unsigned long flags;
6808 	u32 buf_size;
6809 	int ret;
6810 
6811 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6812 	if (ret) {
6813 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6814 		return;
6815 	}
6816 
6817 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6818 	if (ret) {
6819 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6820 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6821 		return;
6822 	}
6823 
6824 	stmmac_reset_rx_queue(priv, queue);
6825 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6826 
6827 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6828 			    rx_q->dma_rx_phy, rx_q->queue_index);
6829 
6830 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6831 			     sizeof(struct dma_desc));
6832 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6833 			       rx_q->rx_tail_addr, rx_q->queue_index);
6834 
6835 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6836 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6837 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6838 				      buf_size,
6839 				      rx_q->queue_index);
6840 	} else {
6841 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6842 				      priv->dma_conf.dma_buf_sz,
6843 				      rx_q->queue_index);
6844 	}
6845 
6846 	stmmac_start_rx_dma(priv, queue);
6847 
6848 	spin_lock_irqsave(&ch->lock, flags);
6849 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6850 	spin_unlock_irqrestore(&ch->lock, flags);
6851 }
6852 
6853 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6854 {
6855 	struct stmmac_channel *ch = &priv->channel[queue];
6856 	unsigned long flags;
6857 
6858 	spin_lock_irqsave(&ch->lock, flags);
6859 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6860 	spin_unlock_irqrestore(&ch->lock, flags);
6861 
6862 	stmmac_stop_tx_dma(priv, queue);
6863 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6864 }
6865 
6866 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6867 {
6868 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6869 	struct stmmac_channel *ch = &priv->channel[queue];
6870 	unsigned long flags;
6871 	int ret;
6872 
6873 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6874 	if (ret) {
6875 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6876 		return;
6877 	}
6878 
6879 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6880 	if (ret) {
6881 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6882 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6883 		return;
6884 	}
6885 
6886 	stmmac_reset_tx_queue(priv, queue);
6887 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6888 
6889 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6890 			    tx_q->dma_tx_phy, tx_q->queue_index);
6891 
6892 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6893 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6894 
6895 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6896 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6897 			       tx_q->tx_tail_addr, tx_q->queue_index);
6898 
6899 	stmmac_start_tx_dma(priv, queue);
6900 
6901 	spin_lock_irqsave(&ch->lock, flags);
6902 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6903 	spin_unlock_irqrestore(&ch->lock, flags);
6904 }
6905 
6906 void stmmac_xdp_release(struct net_device *dev)
6907 {
6908 	struct stmmac_priv *priv = netdev_priv(dev);
6909 	u32 chan;
6910 
6911 	/* Ensure tx function is not running */
6912 	netif_tx_disable(dev);
6913 
6914 	/* Disable NAPI process */
6915 	stmmac_disable_all_queues(priv);
6916 
6917 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6918 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6919 
6920 	/* Free the IRQ lines */
6921 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6922 
6923 	/* Stop TX/RX DMA channels */
6924 	stmmac_stop_all_dma(priv);
6925 
6926 	/* Release and free the Rx/Tx resources */
6927 	free_dma_desc_resources(priv, &priv->dma_conf);
6928 
6929 	/* Disable the MAC Rx/Tx */
6930 	stmmac_mac_set(priv, priv->ioaddr, false);
6931 
6932 	/* set trans_start so we don't get spurious
6933 	 * watchdogs during reset
6934 	 */
6935 	netif_trans_update(dev);
6936 	netif_carrier_off(dev);
6937 }
6938 
6939 int stmmac_xdp_open(struct net_device *dev)
6940 {
6941 	struct stmmac_priv *priv = netdev_priv(dev);
6942 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6943 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6944 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6945 	struct stmmac_rx_queue *rx_q;
6946 	struct stmmac_tx_queue *tx_q;
6947 	u32 buf_size;
6948 	bool sph_en;
6949 	u32 chan;
6950 	int ret;
6951 
6952 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6953 	if (ret < 0) {
6954 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6955 			   __func__);
6956 		goto dma_desc_error;
6957 	}
6958 
6959 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6960 	if (ret < 0) {
6961 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6962 			   __func__);
6963 		goto init_error;
6964 	}
6965 
6966 	stmmac_reset_queues_param(priv);
6967 
6968 	/* DMA CSR Channel configuration */
6969 	for (chan = 0; chan < dma_csr_ch; chan++) {
6970 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6971 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6972 	}
6973 
6974 	/* Adjust Split header */
6975 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6976 
6977 	/* DMA RX Channel Configuration */
6978 	for (chan = 0; chan < rx_cnt; chan++) {
6979 		rx_q = &priv->dma_conf.rx_queue[chan];
6980 
6981 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6982 				    rx_q->dma_rx_phy, chan);
6983 
6984 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6985 				     (rx_q->buf_alloc_num *
6986 				      sizeof(struct dma_desc));
6987 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6988 				       rx_q->rx_tail_addr, chan);
6989 
6990 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6991 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6992 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6993 					      buf_size,
6994 					      rx_q->queue_index);
6995 		} else {
6996 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6997 					      priv->dma_conf.dma_buf_sz,
6998 					      rx_q->queue_index);
6999 		}
7000 
7001 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7002 	}
7003 
7004 	/* DMA TX Channel Configuration */
7005 	for (chan = 0; chan < tx_cnt; chan++) {
7006 		tx_q = &priv->dma_conf.tx_queue[chan];
7007 
7008 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7009 				    tx_q->dma_tx_phy, chan);
7010 
7011 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7012 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7013 				       tx_q->tx_tail_addr, chan);
7014 
7015 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7016 		tx_q->txtimer.function = stmmac_tx_timer;
7017 	}
7018 
7019 	/* Enable the MAC Rx/Tx */
7020 	stmmac_mac_set(priv, priv->ioaddr, true);
7021 
7022 	/* Start Rx & Tx DMA Channels */
7023 	stmmac_start_all_dma(priv);
7024 
7025 	ret = stmmac_request_irq(dev);
7026 	if (ret)
7027 		goto irq_error;
7028 
7029 	/* Enable NAPI process*/
7030 	stmmac_enable_all_queues(priv);
7031 	netif_carrier_on(dev);
7032 	netif_tx_start_all_queues(dev);
7033 	stmmac_enable_all_dma_irq(priv);
7034 
7035 	return 0;
7036 
7037 irq_error:
7038 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7039 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7040 
7041 	stmmac_hw_teardown(dev);
7042 init_error:
7043 	free_dma_desc_resources(priv, &priv->dma_conf);
7044 dma_desc_error:
7045 	return ret;
7046 }
7047 
7048 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7049 {
7050 	struct stmmac_priv *priv = netdev_priv(dev);
7051 	struct stmmac_rx_queue *rx_q;
7052 	struct stmmac_tx_queue *tx_q;
7053 	struct stmmac_channel *ch;
7054 
7055 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7056 	    !netif_carrier_ok(priv->dev))
7057 		return -ENETDOWN;
7058 
7059 	if (!stmmac_xdp_is_enabled(priv))
7060 		return -EINVAL;
7061 
7062 	if (queue >= priv->plat->rx_queues_to_use ||
7063 	    queue >= priv->plat->tx_queues_to_use)
7064 		return -EINVAL;
7065 
7066 	rx_q = &priv->dma_conf.rx_queue[queue];
7067 	tx_q = &priv->dma_conf.tx_queue[queue];
7068 	ch = &priv->channel[queue];
7069 
7070 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7071 		return -EINVAL;
7072 
7073 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7074 		/* EQoS does not have per-DMA channel SW interrupt,
7075 		 * so we schedule RX Napi straight-away.
7076 		 */
7077 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7078 			__napi_schedule(&ch->rxtx_napi);
7079 	}
7080 
7081 	return 0;
7082 }
7083 
7084 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7085 {
7086 	struct stmmac_priv *priv = netdev_priv(dev);
7087 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7088 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7089 	unsigned int start;
7090 	int q;
7091 
7092 	for (q = 0; q < tx_cnt; q++) {
7093 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7094 		u64 tx_packets;
7095 		u64 tx_bytes;
7096 
7097 		do {
7098 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7099 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7100 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7101 		do {
7102 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7103 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7104 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7105 
7106 		stats->tx_packets += tx_packets;
7107 		stats->tx_bytes += tx_bytes;
7108 	}
7109 
7110 	for (q = 0; q < rx_cnt; q++) {
7111 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7112 		u64 rx_packets;
7113 		u64 rx_bytes;
7114 
7115 		do {
7116 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7117 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7118 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7119 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7120 
7121 		stats->rx_packets += rx_packets;
7122 		stats->rx_bytes += rx_bytes;
7123 	}
7124 
7125 	stats->rx_dropped = priv->xstats.rx_dropped;
7126 	stats->rx_errors = priv->xstats.rx_errors;
7127 	stats->tx_dropped = priv->xstats.tx_dropped;
7128 	stats->tx_errors = priv->xstats.tx_errors;
7129 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7130 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7131 	stats->rx_length_errors = priv->xstats.rx_length;
7132 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7133 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7134 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7135 }
7136 
7137 static const struct net_device_ops stmmac_netdev_ops = {
7138 	.ndo_open = stmmac_open,
7139 	.ndo_start_xmit = stmmac_xmit,
7140 	.ndo_stop = stmmac_release,
7141 	.ndo_change_mtu = stmmac_change_mtu,
7142 	.ndo_fix_features = stmmac_fix_features,
7143 	.ndo_set_features = stmmac_set_features,
7144 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7145 	.ndo_tx_timeout = stmmac_tx_timeout,
7146 	.ndo_eth_ioctl = stmmac_ioctl,
7147 	.ndo_get_stats64 = stmmac_get_stats64,
7148 	.ndo_setup_tc = stmmac_setup_tc,
7149 	.ndo_select_queue = stmmac_select_queue,
7150 	.ndo_set_mac_address = stmmac_set_mac_address,
7151 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7152 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7153 	.ndo_bpf = stmmac_bpf,
7154 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7155 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7156 };
7157 
7158 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7159 {
7160 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7161 		return;
7162 	if (test_bit(STMMAC_DOWN, &priv->state))
7163 		return;
7164 
7165 	netdev_err(priv->dev, "Reset adapter.\n");
7166 
7167 	rtnl_lock();
7168 	netif_trans_update(priv->dev);
7169 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7170 		usleep_range(1000, 2000);
7171 
7172 	set_bit(STMMAC_DOWN, &priv->state);
7173 	dev_close(priv->dev);
7174 	dev_open(priv->dev, NULL);
7175 	clear_bit(STMMAC_DOWN, &priv->state);
7176 	clear_bit(STMMAC_RESETING, &priv->state);
7177 	rtnl_unlock();
7178 }
7179 
7180 static void stmmac_service_task(struct work_struct *work)
7181 {
7182 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7183 			service_task);
7184 
7185 	stmmac_reset_subtask(priv);
7186 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7187 }
7188 
7189 /**
7190  *  stmmac_hw_init - Init the MAC device
7191  *  @priv: driver private structure
7192  *  Description: this function is to configure the MAC device according to
7193  *  some platform parameters or the HW capability register. It prepares the
7194  *  driver to use either ring or chain modes and to setup either enhanced or
7195  *  normal descriptors.
7196  */
7197 static int stmmac_hw_init(struct stmmac_priv *priv)
7198 {
7199 	int ret;
7200 
7201 	/* dwmac-sun8i only work in chain mode */
7202 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7203 		chain_mode = 1;
7204 	priv->chain_mode = chain_mode;
7205 
7206 	/* Initialize HW Interface */
7207 	ret = stmmac_hwif_init(priv);
7208 	if (ret)
7209 		return ret;
7210 
7211 	/* Get the HW capability (new GMAC newer than 3.50a) */
7212 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7213 	if (priv->hw_cap_support) {
7214 		dev_info(priv->device, "DMA HW capability register supported\n");
7215 
7216 		/* We can override some gmac/dma configuration fields: e.g.
7217 		 * enh_desc, tx_coe (e.g. that are passed through the
7218 		 * platform) with the values from the HW capability
7219 		 * register (if supported).
7220 		 */
7221 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7222 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7223 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7224 		priv->hw->pmt = priv->plat->pmt;
7225 		if (priv->dma_cap.hash_tb_sz) {
7226 			priv->hw->multicast_filter_bins =
7227 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7228 			priv->hw->mcast_bits_log2 =
7229 					ilog2(priv->hw->multicast_filter_bins);
7230 		}
7231 
7232 		/* TXCOE doesn't work in thresh DMA mode */
7233 		if (priv->plat->force_thresh_dma_mode)
7234 			priv->plat->tx_coe = 0;
7235 		else
7236 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7237 
7238 		/* In case of GMAC4 rx_coe is from HW cap register. */
7239 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7240 
7241 		if (priv->dma_cap.rx_coe_type2)
7242 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7243 		else if (priv->dma_cap.rx_coe_type1)
7244 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7245 
7246 	} else {
7247 		dev_info(priv->device, "No HW DMA feature register supported\n");
7248 	}
7249 
7250 	if (priv->plat->rx_coe) {
7251 		priv->hw->rx_csum = priv->plat->rx_coe;
7252 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7253 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7254 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7255 	}
7256 	if (priv->plat->tx_coe)
7257 		dev_info(priv->device, "TX Checksum insertion supported\n");
7258 
7259 	if (priv->plat->pmt) {
7260 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7261 		device_set_wakeup_capable(priv->device, 1);
7262 	}
7263 
7264 	if (priv->dma_cap.tsoen)
7265 		dev_info(priv->device, "TSO supported\n");
7266 
7267 	priv->hw->vlan_fail_q_en =
7268 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7269 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7270 
7271 	/* Run HW quirks, if any */
7272 	if (priv->hwif_quirks) {
7273 		ret = priv->hwif_quirks(priv);
7274 		if (ret)
7275 			return ret;
7276 	}
7277 
7278 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7279 	 * In some case, for example on bugged HW this feature
7280 	 * has to be disable and this can be done by passing the
7281 	 * riwt_off field from the platform.
7282 	 */
7283 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7284 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7285 		priv->use_riwt = 1;
7286 		dev_info(priv->device,
7287 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7288 	}
7289 
7290 	return 0;
7291 }
7292 
7293 static void stmmac_napi_add(struct net_device *dev)
7294 {
7295 	struct stmmac_priv *priv = netdev_priv(dev);
7296 	u32 queue, maxq;
7297 
7298 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7299 
7300 	for (queue = 0; queue < maxq; queue++) {
7301 		struct stmmac_channel *ch = &priv->channel[queue];
7302 
7303 		ch->priv_data = priv;
7304 		ch->index = queue;
7305 		spin_lock_init(&ch->lock);
7306 
7307 		if (queue < priv->plat->rx_queues_to_use) {
7308 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7309 		}
7310 		if (queue < priv->plat->tx_queues_to_use) {
7311 			netif_napi_add_tx(dev, &ch->tx_napi,
7312 					  stmmac_napi_poll_tx);
7313 		}
7314 		if (queue < priv->plat->rx_queues_to_use &&
7315 		    queue < priv->plat->tx_queues_to_use) {
7316 			netif_napi_add(dev, &ch->rxtx_napi,
7317 				       stmmac_napi_poll_rxtx);
7318 		}
7319 	}
7320 }
7321 
7322 static void stmmac_napi_del(struct net_device *dev)
7323 {
7324 	struct stmmac_priv *priv = netdev_priv(dev);
7325 	u32 queue, maxq;
7326 
7327 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7328 
7329 	for (queue = 0; queue < maxq; queue++) {
7330 		struct stmmac_channel *ch = &priv->channel[queue];
7331 
7332 		if (queue < priv->plat->rx_queues_to_use)
7333 			netif_napi_del(&ch->rx_napi);
7334 		if (queue < priv->plat->tx_queues_to_use)
7335 			netif_napi_del(&ch->tx_napi);
7336 		if (queue < priv->plat->rx_queues_to_use &&
7337 		    queue < priv->plat->tx_queues_to_use) {
7338 			netif_napi_del(&ch->rxtx_napi);
7339 		}
7340 	}
7341 }
7342 
7343 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7344 {
7345 	struct stmmac_priv *priv = netdev_priv(dev);
7346 	int ret = 0, i;
7347 
7348 	if (netif_running(dev))
7349 		stmmac_release(dev);
7350 
7351 	stmmac_napi_del(dev);
7352 
7353 	priv->plat->rx_queues_to_use = rx_cnt;
7354 	priv->plat->tx_queues_to_use = tx_cnt;
7355 	if (!netif_is_rxfh_configured(dev))
7356 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7357 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7358 									rx_cnt);
7359 
7360 	stmmac_set_half_duplex(priv);
7361 	stmmac_napi_add(dev);
7362 
7363 	if (netif_running(dev))
7364 		ret = stmmac_open(dev);
7365 
7366 	return ret;
7367 }
7368 
7369 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7370 {
7371 	struct stmmac_priv *priv = netdev_priv(dev);
7372 	int ret = 0;
7373 
7374 	if (netif_running(dev))
7375 		stmmac_release(dev);
7376 
7377 	priv->dma_conf.dma_rx_size = rx_size;
7378 	priv->dma_conf.dma_tx_size = tx_size;
7379 
7380 	if (netif_running(dev))
7381 		ret = stmmac_open(dev);
7382 
7383 	return ret;
7384 }
7385 
7386 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7387 static void stmmac_fpe_lp_task(struct work_struct *work)
7388 {
7389 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7390 						fpe_task);
7391 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7392 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7393 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7394 	bool *hs_enable = &fpe_cfg->hs_enable;
7395 	bool *enable = &fpe_cfg->enable;
7396 	int retries = 20;
7397 
7398 	while (retries-- > 0) {
7399 		/* Bail out immediately if FPE handshake is OFF */
7400 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7401 			break;
7402 
7403 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7404 		    *lp_state == FPE_STATE_ENTERING_ON) {
7405 			stmmac_fpe_configure(priv, priv->ioaddr,
7406 					     fpe_cfg,
7407 					     priv->plat->tx_queues_to_use,
7408 					     priv->plat->rx_queues_to_use,
7409 					     *enable);
7410 
7411 			netdev_info(priv->dev, "configured FPE\n");
7412 
7413 			*lo_state = FPE_STATE_ON;
7414 			*lp_state = FPE_STATE_ON;
7415 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7416 			break;
7417 		}
7418 
7419 		if ((*lo_state == FPE_STATE_CAPABLE ||
7420 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7421 		     *lp_state != FPE_STATE_ON) {
7422 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7423 				    *lo_state, *lp_state);
7424 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7425 						fpe_cfg,
7426 						MPACKET_VERIFY);
7427 		}
7428 		/* Sleep then retry */
7429 		msleep(500);
7430 	}
7431 
7432 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7433 }
7434 
7435 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7436 {
7437 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7438 		if (enable) {
7439 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7440 						priv->plat->fpe_cfg,
7441 						MPACKET_VERIFY);
7442 		} else {
7443 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7444 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7445 		}
7446 
7447 		priv->plat->fpe_cfg->hs_enable = enable;
7448 	}
7449 }
7450 
7451 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7452 {
7453 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7454 	struct dma_desc *desc_contains_ts = ctx->desc;
7455 	struct stmmac_priv *priv = ctx->priv;
7456 	struct dma_desc *ndesc = ctx->ndesc;
7457 	struct dma_desc *desc = ctx->desc;
7458 	u64 ns = 0;
7459 
7460 	if (!priv->hwts_rx_en)
7461 		return -ENODATA;
7462 
7463 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7464 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7465 		desc_contains_ts = ndesc;
7466 
7467 	/* Check if timestamp is available */
7468 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7469 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7470 		ns -= priv->plat->cdc_error_adj;
7471 		*timestamp = ns_to_ktime(ns);
7472 		return 0;
7473 	}
7474 
7475 	return -ENODATA;
7476 }
7477 
7478 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7479 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7480 };
7481 
7482 /**
7483  * stmmac_dvr_probe
7484  * @device: device pointer
7485  * @plat_dat: platform data pointer
7486  * @res: stmmac resource pointer
7487  * Description: this is the main probe function used to
7488  * call the alloc_etherdev, allocate the priv structure.
7489  * Return:
7490  * returns 0 on success, otherwise errno.
7491  */
7492 int stmmac_dvr_probe(struct device *device,
7493 		     struct plat_stmmacenet_data *plat_dat,
7494 		     struct stmmac_resources *res)
7495 {
7496 	struct net_device *ndev = NULL;
7497 	struct stmmac_priv *priv;
7498 	u32 rxq;
7499 	int i, ret = 0;
7500 
7501 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7502 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7503 	if (!ndev)
7504 		return -ENOMEM;
7505 
7506 	SET_NETDEV_DEV(ndev, device);
7507 
7508 	priv = netdev_priv(ndev);
7509 	priv->device = device;
7510 	priv->dev = ndev;
7511 
7512 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7513 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7514 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7515 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7516 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7517 	}
7518 
7519 	priv->xstats.pcpu_stats =
7520 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7521 	if (!priv->xstats.pcpu_stats)
7522 		return -ENOMEM;
7523 
7524 	stmmac_set_ethtool_ops(ndev);
7525 	priv->pause = pause;
7526 	priv->plat = plat_dat;
7527 	priv->ioaddr = res->addr;
7528 	priv->dev->base_addr = (unsigned long)res->addr;
7529 	priv->plat->dma_cfg->multi_msi_en =
7530 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7531 
7532 	priv->dev->irq = res->irq;
7533 	priv->wol_irq = res->wol_irq;
7534 	priv->lpi_irq = res->lpi_irq;
7535 	priv->sfty_irq = res->sfty_irq;
7536 	priv->sfty_ce_irq = res->sfty_ce_irq;
7537 	priv->sfty_ue_irq = res->sfty_ue_irq;
7538 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7539 		priv->rx_irq[i] = res->rx_irq[i];
7540 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7541 		priv->tx_irq[i] = res->tx_irq[i];
7542 
7543 	if (!is_zero_ether_addr(res->mac))
7544 		eth_hw_addr_set(priv->dev, res->mac);
7545 
7546 	dev_set_drvdata(device, priv->dev);
7547 
7548 	/* Verify driver arguments */
7549 	stmmac_verify_args();
7550 
7551 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7552 	if (!priv->af_xdp_zc_qps)
7553 		return -ENOMEM;
7554 
7555 	/* Allocate workqueue */
7556 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7557 	if (!priv->wq) {
7558 		dev_err(priv->device, "failed to create workqueue\n");
7559 		ret = -ENOMEM;
7560 		goto error_wq_init;
7561 	}
7562 
7563 	INIT_WORK(&priv->service_task, stmmac_service_task);
7564 
7565 	/* Initialize Link Partner FPE workqueue */
7566 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7567 
7568 	/* Override with kernel parameters if supplied XXX CRS XXX
7569 	 * this needs to have multiple instances
7570 	 */
7571 	if ((phyaddr >= 0) && (phyaddr <= 31))
7572 		priv->plat->phy_addr = phyaddr;
7573 
7574 	if (priv->plat->stmmac_rst) {
7575 		ret = reset_control_assert(priv->plat->stmmac_rst);
7576 		reset_control_deassert(priv->plat->stmmac_rst);
7577 		/* Some reset controllers have only reset callback instead of
7578 		 * assert + deassert callbacks pair.
7579 		 */
7580 		if (ret == -ENOTSUPP)
7581 			reset_control_reset(priv->plat->stmmac_rst);
7582 	}
7583 
7584 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7585 	if (ret == -ENOTSUPP)
7586 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7587 			ERR_PTR(ret));
7588 
7589 	/* Wait a bit for the reset to take effect */
7590 	udelay(10);
7591 
7592 	/* Init MAC and get the capabilities */
7593 	ret = stmmac_hw_init(priv);
7594 	if (ret)
7595 		goto error_hw_init;
7596 
7597 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7598 	 */
7599 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7600 		priv->plat->dma_cfg->dche = false;
7601 
7602 	stmmac_check_ether_addr(priv);
7603 
7604 	ndev->netdev_ops = &stmmac_netdev_ops;
7605 
7606 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7607 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7608 
7609 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7610 			    NETIF_F_RXCSUM;
7611 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7612 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7613 
7614 	ret = stmmac_tc_init(priv, priv);
7615 	if (!ret) {
7616 		ndev->hw_features |= NETIF_F_HW_TC;
7617 	}
7618 
7619 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7620 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7621 		if (priv->plat->has_gmac4)
7622 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7623 		priv->tso = true;
7624 		dev_info(priv->device, "TSO feature enabled\n");
7625 	}
7626 
7627 	if (priv->dma_cap.sphen &&
7628 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7629 		ndev->hw_features |= NETIF_F_GRO;
7630 		priv->sph_cap = true;
7631 		priv->sph = priv->sph_cap;
7632 		dev_info(priv->device, "SPH feature enabled\n");
7633 	}
7634 
7635 	/* Ideally our host DMA address width is the same as for the
7636 	 * device. However, it may differ and then we have to use our
7637 	 * host DMA width for allocation and the device DMA width for
7638 	 * register handling.
7639 	 */
7640 	if (priv->plat->host_dma_width)
7641 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7642 	else
7643 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7644 
7645 	if (priv->dma_cap.host_dma_width) {
7646 		ret = dma_set_mask_and_coherent(device,
7647 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7648 		if (!ret) {
7649 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7650 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7651 
7652 			/*
7653 			 * If more than 32 bits can be addressed, make sure to
7654 			 * enable enhanced addressing mode.
7655 			 */
7656 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7657 				priv->plat->dma_cfg->eame = true;
7658 		} else {
7659 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7660 			if (ret) {
7661 				dev_err(priv->device, "Failed to set DMA Mask\n");
7662 				goto error_hw_init;
7663 			}
7664 
7665 			priv->dma_cap.host_dma_width = 32;
7666 		}
7667 	}
7668 
7669 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7670 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7671 #ifdef STMMAC_VLAN_TAG_USED
7672 	/* Both mac100 and gmac support receive VLAN tag detection */
7673 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7674 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7675 	priv->hw->hw_vlan_en = true;
7676 
7677 	if (priv->dma_cap.vlhash) {
7678 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7679 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7680 	}
7681 	if (priv->dma_cap.vlins) {
7682 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7683 		if (priv->dma_cap.dvlan)
7684 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7685 	}
7686 #endif
7687 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7688 
7689 	priv->xstats.threshold = tc;
7690 
7691 	/* Initialize RSS */
7692 	rxq = priv->plat->rx_queues_to_use;
7693 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7694 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7695 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7696 
7697 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7698 		ndev->features |= NETIF_F_RXHASH;
7699 
7700 	ndev->vlan_features |= ndev->features;
7701 	/* TSO doesn't work on VLANs yet */
7702 	ndev->vlan_features &= ~NETIF_F_TSO;
7703 
7704 	/* MTU range: 46 - hw-specific max */
7705 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7706 	if (priv->plat->has_xgmac)
7707 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7708 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7709 		ndev->max_mtu = JUMBO_LEN;
7710 	else
7711 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7712 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7713 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7714 	 */
7715 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7716 	    (priv->plat->maxmtu >= ndev->min_mtu))
7717 		ndev->max_mtu = priv->plat->maxmtu;
7718 	else if (priv->plat->maxmtu < ndev->min_mtu)
7719 		dev_warn(priv->device,
7720 			 "%s: warning: maxmtu having invalid value (%d)\n",
7721 			 __func__, priv->plat->maxmtu);
7722 
7723 	if (flow_ctrl)
7724 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7725 
7726 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7727 
7728 	/* Setup channels NAPI */
7729 	stmmac_napi_add(ndev);
7730 
7731 	mutex_init(&priv->lock);
7732 
7733 	/* If a specific clk_csr value is passed from the platform
7734 	 * this means that the CSR Clock Range selection cannot be
7735 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7736 	 * set the MDC clock dynamically according to the csr actual
7737 	 * clock input.
7738 	 */
7739 	if (priv->plat->clk_csr >= 0)
7740 		priv->clk_csr = priv->plat->clk_csr;
7741 	else
7742 		stmmac_clk_csr_set(priv);
7743 
7744 	stmmac_check_pcs_mode(priv);
7745 
7746 	pm_runtime_get_noresume(device);
7747 	pm_runtime_set_active(device);
7748 	if (!pm_runtime_enabled(device))
7749 		pm_runtime_enable(device);
7750 
7751 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7752 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7753 		/* MDIO bus Registration */
7754 		ret = stmmac_mdio_register(ndev);
7755 		if (ret < 0) {
7756 			dev_err_probe(priv->device, ret,
7757 				      "%s: MDIO bus (id: %d) registration failed\n",
7758 				      __func__, priv->plat->bus_id);
7759 			goto error_mdio_register;
7760 		}
7761 	}
7762 
7763 	if (priv->plat->speed_mode_2500)
7764 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7765 
7766 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7767 		ret = stmmac_xpcs_setup(priv->mii);
7768 		if (ret)
7769 			goto error_xpcs_setup;
7770 	}
7771 
7772 	ret = stmmac_phy_setup(priv);
7773 	if (ret) {
7774 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7775 		goto error_phy_setup;
7776 	}
7777 
7778 	ret = register_netdev(ndev);
7779 	if (ret) {
7780 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7781 			__func__, ret);
7782 		goto error_netdev_register;
7783 	}
7784 
7785 #ifdef CONFIG_DEBUG_FS
7786 	stmmac_init_fs(ndev);
7787 #endif
7788 
7789 	if (priv->plat->dump_debug_regs)
7790 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7791 
7792 	/* Let pm_runtime_put() disable the clocks.
7793 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7794 	 */
7795 	pm_runtime_put(device);
7796 
7797 	return ret;
7798 
7799 error_netdev_register:
7800 	phylink_destroy(priv->phylink);
7801 error_xpcs_setup:
7802 error_phy_setup:
7803 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7804 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7805 		stmmac_mdio_unregister(ndev);
7806 error_mdio_register:
7807 	stmmac_napi_del(ndev);
7808 error_hw_init:
7809 	destroy_workqueue(priv->wq);
7810 error_wq_init:
7811 	bitmap_free(priv->af_xdp_zc_qps);
7812 
7813 	return ret;
7814 }
7815 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7816 
7817 /**
7818  * stmmac_dvr_remove
7819  * @dev: device pointer
7820  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7821  * changes the link status, releases the DMA descriptor rings.
7822  */
7823 void stmmac_dvr_remove(struct device *dev)
7824 {
7825 	struct net_device *ndev = dev_get_drvdata(dev);
7826 	struct stmmac_priv *priv = netdev_priv(ndev);
7827 
7828 	netdev_info(priv->dev, "%s: removing driver", __func__);
7829 
7830 	pm_runtime_get_sync(dev);
7831 
7832 	stmmac_stop_all_dma(priv);
7833 	stmmac_mac_set(priv, priv->ioaddr, false);
7834 	netif_carrier_off(ndev);
7835 	unregister_netdev(ndev);
7836 
7837 #ifdef CONFIG_DEBUG_FS
7838 	stmmac_exit_fs(ndev);
7839 #endif
7840 	phylink_destroy(priv->phylink);
7841 	if (priv->plat->stmmac_rst)
7842 		reset_control_assert(priv->plat->stmmac_rst);
7843 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7844 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7845 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7846 		stmmac_mdio_unregister(ndev);
7847 	destroy_workqueue(priv->wq);
7848 	mutex_destroy(&priv->lock);
7849 	bitmap_free(priv->af_xdp_zc_qps);
7850 
7851 	pm_runtime_disable(dev);
7852 	pm_runtime_put_noidle(dev);
7853 }
7854 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7855 
7856 /**
7857  * stmmac_suspend - suspend callback
7858  * @dev: device pointer
7859  * Description: this is the function to suspend the device and it is called
7860  * by the platform driver to stop the network queue, release the resources,
7861  * program the PMT register (for WoL), clean and release driver resources.
7862  */
7863 int stmmac_suspend(struct device *dev)
7864 {
7865 	struct net_device *ndev = dev_get_drvdata(dev);
7866 	struct stmmac_priv *priv = netdev_priv(ndev);
7867 	u32 chan;
7868 
7869 	if (!ndev || !netif_running(ndev))
7870 		return 0;
7871 
7872 	mutex_lock(&priv->lock);
7873 
7874 	netif_device_detach(ndev);
7875 
7876 	stmmac_disable_all_queues(priv);
7877 
7878 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7879 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7880 
7881 	if (priv->eee_enabled) {
7882 		priv->tx_path_in_lpi_mode = false;
7883 		del_timer_sync(&priv->eee_ctrl_timer);
7884 	}
7885 
7886 	/* Stop TX/RX DMA */
7887 	stmmac_stop_all_dma(priv);
7888 
7889 	if (priv->plat->serdes_powerdown)
7890 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7891 
7892 	/* Enable Power down mode by programming the PMT regs */
7893 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7894 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7895 		priv->irq_wake = 1;
7896 	} else {
7897 		stmmac_mac_set(priv, priv->ioaddr, false);
7898 		pinctrl_pm_select_sleep_state(priv->device);
7899 	}
7900 
7901 	mutex_unlock(&priv->lock);
7902 
7903 	rtnl_lock();
7904 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7905 		phylink_suspend(priv->phylink, true);
7906 	} else {
7907 		if (device_may_wakeup(priv->device))
7908 			phylink_speed_down(priv->phylink, false);
7909 		phylink_suspend(priv->phylink, false);
7910 	}
7911 	rtnl_unlock();
7912 
7913 	if (priv->dma_cap.fpesel) {
7914 		/* Disable FPE */
7915 		stmmac_fpe_configure(priv, priv->ioaddr,
7916 				     priv->plat->fpe_cfg,
7917 				     priv->plat->tx_queues_to_use,
7918 				     priv->plat->rx_queues_to_use, false);
7919 
7920 		stmmac_fpe_handshake(priv, false);
7921 		stmmac_fpe_stop_wq(priv);
7922 	}
7923 
7924 	priv->speed = SPEED_UNKNOWN;
7925 	return 0;
7926 }
7927 EXPORT_SYMBOL_GPL(stmmac_suspend);
7928 
7929 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7930 {
7931 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7932 
7933 	rx_q->cur_rx = 0;
7934 	rx_q->dirty_rx = 0;
7935 }
7936 
7937 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7938 {
7939 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7940 
7941 	tx_q->cur_tx = 0;
7942 	tx_q->dirty_tx = 0;
7943 	tx_q->mss = 0;
7944 
7945 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7946 }
7947 
7948 /**
7949  * stmmac_reset_queues_param - reset queue parameters
7950  * @priv: device pointer
7951  */
7952 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7953 {
7954 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7955 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7956 	u32 queue;
7957 
7958 	for (queue = 0; queue < rx_cnt; queue++)
7959 		stmmac_reset_rx_queue(priv, queue);
7960 
7961 	for (queue = 0; queue < tx_cnt; queue++)
7962 		stmmac_reset_tx_queue(priv, queue);
7963 }
7964 
7965 /**
7966  * stmmac_resume - resume callback
7967  * @dev: device pointer
7968  * Description: when resume this function is invoked to setup the DMA and CORE
7969  * in a usable state.
7970  */
7971 int stmmac_resume(struct device *dev)
7972 {
7973 	struct net_device *ndev = dev_get_drvdata(dev);
7974 	struct stmmac_priv *priv = netdev_priv(ndev);
7975 	int ret;
7976 
7977 	if (!netif_running(ndev))
7978 		return 0;
7979 
7980 	/* Power Down bit, into the PM register, is cleared
7981 	 * automatically as soon as a magic packet or a Wake-up frame
7982 	 * is received. Anyway, it's better to manually clear
7983 	 * this bit because it can generate problems while resuming
7984 	 * from another devices (e.g. serial console).
7985 	 */
7986 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7987 		mutex_lock(&priv->lock);
7988 		stmmac_pmt(priv, priv->hw, 0);
7989 		mutex_unlock(&priv->lock);
7990 		priv->irq_wake = 0;
7991 	} else {
7992 		pinctrl_pm_select_default_state(priv->device);
7993 		/* reset the phy so that it's ready */
7994 		if (priv->mii)
7995 			stmmac_mdio_reset(priv->mii);
7996 	}
7997 
7998 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7999 	    priv->plat->serdes_powerup) {
8000 		ret = priv->plat->serdes_powerup(ndev,
8001 						 priv->plat->bsp_priv);
8002 
8003 		if (ret < 0)
8004 			return ret;
8005 	}
8006 
8007 	rtnl_lock();
8008 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
8009 		phylink_resume(priv->phylink);
8010 	} else {
8011 		phylink_resume(priv->phylink);
8012 		if (device_may_wakeup(priv->device))
8013 			phylink_speed_up(priv->phylink);
8014 	}
8015 	rtnl_unlock();
8016 
8017 	rtnl_lock();
8018 	mutex_lock(&priv->lock);
8019 
8020 	stmmac_reset_queues_param(priv);
8021 
8022 	stmmac_free_tx_skbufs(priv);
8023 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8024 
8025 	stmmac_hw_setup(ndev, false);
8026 	stmmac_init_coalesce(priv);
8027 	stmmac_set_rx_mode(ndev);
8028 
8029 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8030 
8031 	stmmac_enable_all_queues(priv);
8032 	stmmac_enable_all_dma_irq(priv);
8033 
8034 	mutex_unlock(&priv->lock);
8035 	rtnl_unlock();
8036 
8037 	netif_device_attach(ndev);
8038 
8039 	return 0;
8040 }
8041 EXPORT_SYMBOL_GPL(stmmac_resume);
8042 
8043 #ifndef MODULE
8044 static int __init stmmac_cmdline_opt(char *str)
8045 {
8046 	char *opt;
8047 
8048 	if (!str || !*str)
8049 		return 1;
8050 	while ((opt = strsep(&str, ",")) != NULL) {
8051 		if (!strncmp(opt, "debug:", 6)) {
8052 			if (kstrtoint(opt + 6, 0, &debug))
8053 				goto err;
8054 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8055 			if (kstrtoint(opt + 8, 0, &phyaddr))
8056 				goto err;
8057 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8058 			if (kstrtoint(opt + 7, 0, &buf_sz))
8059 				goto err;
8060 		} else if (!strncmp(opt, "tc:", 3)) {
8061 			if (kstrtoint(opt + 3, 0, &tc))
8062 				goto err;
8063 		} else if (!strncmp(opt, "watchdog:", 9)) {
8064 			if (kstrtoint(opt + 9, 0, &watchdog))
8065 				goto err;
8066 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8067 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8068 				goto err;
8069 		} else if (!strncmp(opt, "pause:", 6)) {
8070 			if (kstrtoint(opt + 6, 0, &pause))
8071 				goto err;
8072 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8073 			if (kstrtoint(opt + 10, 0, &eee_timer))
8074 				goto err;
8075 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8076 			if (kstrtoint(opt + 11, 0, &chain_mode))
8077 				goto err;
8078 		}
8079 	}
8080 	return 1;
8081 
8082 err:
8083 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8084 	return 1;
8085 }
8086 
8087 __setup("stmmaceth=", stmmac_cmdline_opt);
8088 #endif /* MODULE */
8089 
8090 static int __init stmmac_init(void)
8091 {
8092 #ifdef CONFIG_DEBUG_FS
8093 	/* Create debugfs main directory if it doesn't exist yet */
8094 	if (!stmmac_fs_dir)
8095 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8096 	register_netdevice_notifier(&stmmac_notifier);
8097 #endif
8098 
8099 	return 0;
8100 }
8101 
8102 static void __exit stmmac_exit(void)
8103 {
8104 #ifdef CONFIG_DEBUG_FS
8105 	unregister_netdevice_notifier(&stmmac_notifier);
8106 	debugfs_remove_recursive(stmmac_fs_dir);
8107 #endif
8108 }
8109 
8110 module_init(stmmac_init)
8111 module_exit(stmmac_exit)
8112 
8113 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8114 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8115 MODULE_LICENSE("GPL");
8116