xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision a1f4cf5791e7914f3e42f5462669353104fef8a9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Using PCS we cannot dial with the phy registers at this stage
475 	 * so we do not support extra feature like EEE.
476 	 */
477 	if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 	    priv->hw->pcs == STMMAC_PCS_RTBI)
479 		return false;
480 
481 	/* Check if MAC core supports the EEE feature. */
482 	if (!priv->dma_cap.eee)
483 		return false;
484 
485 	mutex_lock(&priv->lock);
486 
487 	/* Check if it needs to be deactivated */
488 	if (!priv->eee_active) {
489 		if (priv->eee_enabled) {
490 			netdev_dbg(priv->dev, "disable EEE\n");
491 			stmmac_lpi_entry_timer_config(priv, 0);
492 			del_timer_sync(&priv->eee_ctrl_timer);
493 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 			if (priv->hw->xpcs)
495 				xpcs_config_eee(priv->hw->xpcs,
496 						priv->plat->mult_fact_100ns,
497 						false);
498 		}
499 		mutex_unlock(&priv->lock);
500 		return false;
501 	}
502 
503 	if (priv->eee_active && !priv->eee_enabled) {
504 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 				     eee_tw_timer);
507 		if (priv->hw->xpcs)
508 			xpcs_config_eee(priv->hw->xpcs,
509 					priv->plat->mult_fact_100ns,
510 					true);
511 	}
512 
513 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 		del_timer_sync(&priv->eee_ctrl_timer);
515 		priv->tx_path_in_lpi_mode = false;
516 		stmmac_lpi_entry_timer_config(priv, 1);
517 	} else {
518 		stmmac_lpi_entry_timer_config(priv, 0);
519 		mod_timer(&priv->eee_ctrl_timer,
520 			  STMMAC_LPI_T(priv->tx_lpi_timer));
521 	}
522 
523 	mutex_unlock(&priv->lock);
524 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 	return true;
526 }
527 
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529  * @priv: driver private structure
530  * @p : descriptor pointer
531  * @skb : the socket buffer
532  * Description :
533  * This function will read timestamp from the descriptor & pass it to stack.
534  * and also perform some sanity checks.
535  */
536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 				   struct dma_desc *p, struct sk_buff *skb)
538 {
539 	struct skb_shared_hwtstamps shhwtstamp;
540 	bool found = false;
541 	u64 ns = 0;
542 
543 	if (!priv->hwts_tx_en)
544 		return;
545 
546 	/* exit if skb doesn't support hw tstamp */
547 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 		return;
549 
550 	/* check tx tstamp status */
551 	if (stmmac_get_tx_timestamp_status(priv, p)) {
552 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 		found = true;
554 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 		found = true;
556 	}
557 
558 	if (found) {
559 		ns -= priv->plat->cdc_error_adj;
560 
561 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
563 
564 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 		/* pass tstamp to stack */
566 		skb_tstamp_tx(skb, &shhwtstamp);
567 	}
568 }
569 
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571  * @priv: driver private structure
572  * @p : descriptor pointer
573  * @np : next descriptor pointer
574  * @skb : the socket buffer
575  * Description :
576  * This function will read received packet's timestamp from the descriptor
577  * and pass it to stack. It also perform some sanity checks.
578  */
579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 				   struct dma_desc *np, struct sk_buff *skb)
581 {
582 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 	struct dma_desc *desc = p;
584 	u64 ns = 0;
585 
586 	if (!priv->hwts_rx_en)
587 		return;
588 	/* For GMAC4, the valid timestamp is from CTX next desc. */
589 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 		desc = np;
591 
592 	/* Check if timestamp is available */
593 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595 
596 		ns -= priv->plat->cdc_error_adj;
597 
598 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 		shhwtstamp = skb_hwtstamps(skb);
600 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 	} else  {
603 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 	}
605 }
606 
607 /**
608  *  stmmac_hwtstamp_set - control hardware timestamping.
609  *  @dev: device pointer.
610  *  @ifr: An IOCTL specific structure, that can contain a pointer to
611  *  a proprietary structure used to pass information to the driver.
612  *  Description:
613  *  This function configures the MAC to enable/disable both outgoing(TX)
614  *  and incoming(RX) packets time stamping based on user input.
615  *  Return Value:
616  *  0 on success and an appropriate -ve integer on failure.
617  */
618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 	struct stmmac_priv *priv = netdev_priv(dev);
621 	struct hwtstamp_config config;
622 	u32 ptp_v2 = 0;
623 	u32 tstamp_all = 0;
624 	u32 ptp_over_ipv4_udp = 0;
625 	u32 ptp_over_ipv6_udp = 0;
626 	u32 ptp_over_ethernet = 0;
627 	u32 snap_type_sel = 0;
628 	u32 ts_master_en = 0;
629 	u32 ts_event_en = 0;
630 
631 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 		netdev_alert(priv->dev, "No support for HW time stamping\n");
633 		priv->hwts_tx_en = 0;
634 		priv->hwts_rx_en = 0;
635 
636 		return -EOPNOTSUPP;
637 	}
638 
639 	if (copy_from_user(&config, ifr->ifr_data,
640 			   sizeof(config)))
641 		return -EFAULT;
642 
643 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 		   __func__, config.flags, config.tx_type, config.rx_filter);
645 
646 	if (config.tx_type != HWTSTAMP_TX_OFF &&
647 	    config.tx_type != HWTSTAMP_TX_ON)
648 		return -ERANGE;
649 
650 	if (priv->adv_ts) {
651 		switch (config.rx_filter) {
652 		case HWTSTAMP_FILTER_NONE:
653 			/* time stamp no incoming packet at all */
654 			config.rx_filter = HWTSTAMP_FILTER_NONE;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 			/* PTP v1, UDP, any kind of event packet */
659 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 			/* 'xmac' hardware can support Sync, Pdelay_Req and
661 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 			 * This leaves Delay_Req timestamps out.
663 			 * Enable all events *and* general purpose message
664 			 * timestamping
665 			 */
666 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 			break;
670 
671 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 			/* PTP v1, UDP, Sync packet */
673 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 			/* take time stamp for SYNC messages only */
675 			ts_event_en = PTP_TCR_TSEVNTENA;
676 
677 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 			break;
680 
681 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 			/* PTP v1, UDP, Delay_req packet */
683 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 			/* take time stamp for Delay_Req messages only */
685 			ts_master_en = PTP_TCR_TSMSTRENA;
686 			ts_event_en = PTP_TCR_TSEVNTENA;
687 
688 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 			/* PTP v2, UDP, any kind of event packet */
694 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for all event messages */
697 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			break;
702 
703 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 			/* PTP v2, UDP, Sync packet */
705 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 			ptp_v2 = PTP_TCR_TSVER2ENA;
707 			/* take time stamp for SYNC messages only */
708 			ts_event_en = PTP_TCR_TSEVNTENA;
709 
710 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 			break;
713 
714 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 			/* PTP v2, UDP, Delay_req packet */
716 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 			ptp_v2 = PTP_TCR_TSVER2ENA;
718 			/* take time stamp for Delay_Req messages only */
719 			ts_master_en = PTP_TCR_TSMSTRENA;
720 			ts_event_en = PTP_TCR_TSEVNTENA;
721 
722 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 			break;
725 
726 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 			/* PTP v2/802.AS1 any layer, any kind of event packet */
728 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 			ptp_v2 = PTP_TCR_TSVER2ENA;
730 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 			if (priv->synopsys_id < DWMAC_CORE_4_10)
732 				ts_event_en = PTP_TCR_TSEVNTENA;
733 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 			ptp_over_ethernet = PTP_TCR_TSIPENA;
736 			break;
737 
738 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 			/* PTP v2/802.AS1, any layer, Sync packet */
740 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 			ptp_v2 = PTP_TCR_TSVER2ENA;
742 			/* take time stamp for SYNC messages only */
743 			ts_event_en = PTP_TCR_TSEVNTENA;
744 
745 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 			ptp_over_ethernet = PTP_TCR_TSIPENA;
748 			break;
749 
750 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 			/* PTP v2/802.AS1, any layer, Delay_req packet */
752 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 			ptp_v2 = PTP_TCR_TSVER2ENA;
754 			/* take time stamp for Delay_Req messages only */
755 			ts_master_en = PTP_TCR_TSMSTRENA;
756 			ts_event_en = PTP_TCR_TSEVNTENA;
757 
758 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 			ptp_over_ethernet = PTP_TCR_TSIPENA;
761 			break;
762 
763 		case HWTSTAMP_FILTER_NTP_ALL:
764 		case HWTSTAMP_FILTER_ALL:
765 			/* time stamp any incoming packet */
766 			config.rx_filter = HWTSTAMP_FILTER_ALL;
767 			tstamp_all = PTP_TCR_TSENALL;
768 			break;
769 
770 		default:
771 			return -ERANGE;
772 		}
773 	} else {
774 		switch (config.rx_filter) {
775 		case HWTSTAMP_FILTER_NONE:
776 			config.rx_filter = HWTSTAMP_FILTER_NONE;
777 			break;
778 		default:
779 			/* PTP v1, UDP, any kind of event packet */
780 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 			break;
782 		}
783 	}
784 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786 
787 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
788 
789 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 		priv->systime_flags |= tstamp_all | ptp_v2 |
791 				       ptp_over_ethernet | ptp_over_ipv6_udp |
792 				       ptp_over_ipv4_udp | ts_event_en |
793 				       ts_master_en | snap_type_sel;
794 	}
795 
796 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797 
798 	memcpy(&priv->tstamp_config, &config, sizeof(config));
799 
800 	return copy_to_user(ifr->ifr_data, &config,
801 			    sizeof(config)) ? -EFAULT : 0;
802 }
803 
804 /**
805  *  stmmac_hwtstamp_get - read hardware timestamping.
806  *  @dev: device pointer.
807  *  @ifr: An IOCTL specific structure, that can contain a pointer to
808  *  a proprietary structure used to pass information to the driver.
809  *  Description:
810  *  This function obtain the current hardware timestamping settings
811  *  as requested.
812  */
813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 	struct stmmac_priv *priv = netdev_priv(dev);
816 	struct hwtstamp_config *config = &priv->tstamp_config;
817 
818 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 		return -EOPNOTSUPP;
820 
821 	return copy_to_user(ifr->ifr_data, config,
822 			    sizeof(*config)) ? -EFAULT : 0;
823 }
824 
825 /**
826  * stmmac_init_tstamp_counter - init hardware timestamping counter
827  * @priv: driver private structure
828  * @systime_flags: timestamping flags
829  * Description:
830  * Initialize hardware counter for packet timestamping.
831  * This is valid as long as the interface is open and not suspended.
832  * Will be rerun after resuming from suspend, case in which the timestamping
833  * flags updated by stmmac_hwtstamp_set() also need to be restored.
834  */
835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 	struct timespec64 now;
839 	u32 sec_inc = 0;
840 	u64 temp = 0;
841 
842 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 		return -EOPNOTSUPP;
844 
845 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 	priv->systime_flags = systime_flags;
847 
848 	/* program Sub Second Increment reg */
849 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 					   priv->plat->clk_ptp_rate,
851 					   xmac, &sec_inc);
852 	temp = div_u64(1000000000ULL, sec_inc);
853 
854 	/* Store sub second increment for later use */
855 	priv->sub_second_inc = sec_inc;
856 
857 	/* calculate default added value:
858 	 * formula is :
859 	 * addend = (2^32)/freq_div_ratio;
860 	 * where, freq_div_ratio = 1e9ns/sec_inc
861 	 */
862 	temp = (u64)(temp << 32);
863 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865 
866 	/* initialize system time */
867 	ktime_get_real_ts64(&now);
868 
869 	/* lower 32 bits of tv_sec are safe until y2106 */
870 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871 
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875 
876 /**
877  * stmmac_init_ptp - init PTP
878  * @priv: driver private structure
879  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880  * This is done by looking at the HW cap. register.
881  * This function also registers the ptp driver.
882  */
883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 	int ret;
887 
888 	if (priv->plat->ptp_clk_freq_config)
889 		priv->plat->ptp_clk_freq_config(priv);
890 
891 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 	if (ret)
893 		return ret;
894 
895 	priv->adv_ts = 0;
896 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 	if (xmac && priv->dma_cap.atime_stamp)
898 		priv->adv_ts = 1;
899 	/* Dwmac 3.x core with extend_desc can support adv_ts */
900 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 		priv->adv_ts = 1;
902 
903 	if (priv->dma_cap.time_stamp)
904 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905 
906 	if (priv->adv_ts)
907 		netdev_info(priv->dev,
908 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
909 
910 	priv->hwts_tx_en = 0;
911 	priv->hwts_rx_en = 0;
912 
913 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 		stmmac_hwtstamp_correct_latency(priv, priv);
915 
916 	return 0;
917 }
918 
919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 	stmmac_ptp_unregister(priv);
923 }
924 
925 /**
926  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
927  *  @priv: driver private structure
928  *  @duplex: duplex passed to the next function
929  *  Description: It is used for configuring the flow control in all queues
930  */
931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 	u32 tx_cnt = priv->plat->tx_queues_to_use;
934 
935 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 			priv->pause, tx_cnt);
937 }
938 
939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 						 phy_interface_t interface)
941 {
942 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943 
944 	if (priv->hw->xpcs)
945 		return &priv->hw->xpcs->pcs;
946 
947 	if (priv->hw->lynx_pcs)
948 		return priv->hw->lynx_pcs;
949 
950 	return NULL;
951 }
952 
953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 			      const struct phylink_link_state *state)
955 {
956 	/* Nothing to do, xpcs_config() handles everything */
957 }
958 
959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 	bool *hs_enable = &fpe_cfg->hs_enable;
965 
966 	if (is_up && *hs_enable) {
967 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 					MPACKET_VERIFY);
969 	} else {
970 		*lo_state = FPE_STATE_OFF;
971 		*lp_state = FPE_STATE_OFF;
972 	}
973 }
974 
975 static void stmmac_mac_link_down(struct phylink_config *config,
976 				 unsigned int mode, phy_interface_t interface)
977 {
978 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979 
980 	stmmac_mac_set(priv, priv->ioaddr, false);
981 	priv->eee_active = false;
982 	priv->tx_lpi_enabled = false;
983 	priv->eee_enabled = stmmac_eee_init(priv);
984 	stmmac_set_eee_pls(priv, priv->hw, false);
985 
986 	if (priv->dma_cap.fpesel)
987 		stmmac_fpe_link_state_handle(priv, false);
988 }
989 
990 static void stmmac_mac_link_up(struct phylink_config *config,
991 			       struct phy_device *phy,
992 			       unsigned int mode, phy_interface_t interface,
993 			       int speed, int duplex,
994 			       bool tx_pause, bool rx_pause)
995 {
996 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 	u32 old_ctrl, ctrl;
998 
999 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 	    priv->plat->serdes_powerup)
1001 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002 
1003 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005 
1006 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 		switch (speed) {
1008 		case SPEED_10000:
1009 			ctrl |= priv->hw->link.xgmii.speed10000;
1010 			break;
1011 		case SPEED_5000:
1012 			ctrl |= priv->hw->link.xgmii.speed5000;
1013 			break;
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.xgmii.speed2500;
1016 			break;
1017 		default:
1018 			return;
1019 		}
1020 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 		switch (speed) {
1022 		case SPEED_100000:
1023 			ctrl |= priv->hw->link.xlgmii.speed100000;
1024 			break;
1025 		case SPEED_50000:
1026 			ctrl |= priv->hw->link.xlgmii.speed50000;
1027 			break;
1028 		case SPEED_40000:
1029 			ctrl |= priv->hw->link.xlgmii.speed40000;
1030 			break;
1031 		case SPEED_25000:
1032 			ctrl |= priv->hw->link.xlgmii.speed25000;
1033 			break;
1034 		case SPEED_10000:
1035 			ctrl |= priv->hw->link.xgmii.speed10000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.speed2500;
1039 			break;
1040 		case SPEED_1000:
1041 			ctrl |= priv->hw->link.speed1000;
1042 			break;
1043 		default:
1044 			return;
1045 		}
1046 	} else {
1047 		switch (speed) {
1048 		case SPEED_2500:
1049 			ctrl |= priv->hw->link.speed2500;
1050 			break;
1051 		case SPEED_1000:
1052 			ctrl |= priv->hw->link.speed1000;
1053 			break;
1054 		case SPEED_100:
1055 			ctrl |= priv->hw->link.speed100;
1056 			break;
1057 		case SPEED_10:
1058 			ctrl |= priv->hw->link.speed10;
1059 			break;
1060 		default:
1061 			return;
1062 		}
1063 	}
1064 
1065 	priv->speed = speed;
1066 
1067 	if (priv->plat->fix_mac_speed)
1068 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069 
1070 	if (!duplex)
1071 		ctrl &= ~priv->hw->link.duplex;
1072 	else
1073 		ctrl |= priv->hw->link.duplex;
1074 
1075 	/* Flow Control operation */
1076 	if (rx_pause && tx_pause)
1077 		priv->flow_ctrl = FLOW_AUTO;
1078 	else if (rx_pause && !tx_pause)
1079 		priv->flow_ctrl = FLOW_RX;
1080 	else if (!rx_pause && tx_pause)
1081 		priv->flow_ctrl = FLOW_TX;
1082 	else
1083 		priv->flow_ctrl = FLOW_OFF;
1084 
1085 	stmmac_mac_flow_ctrl(priv, duplex);
1086 
1087 	if (ctrl != old_ctrl)
1088 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089 
1090 	stmmac_mac_set(priv, priv->ioaddr, true);
1091 	if (phy && priv->dma_cap.eee) {
1092 		priv->eee_active =
1093 			phy_init_eee(phy, !(priv->plat->flags &
1094 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 		priv->eee_enabled = stmmac_eee_init(priv);
1096 		priv->tx_lpi_enabled = priv->eee_enabled;
1097 		stmmac_set_eee_pls(priv, priv->hw, true);
1098 	}
1099 
1100 	if (priv->dma_cap.fpesel)
1101 		stmmac_fpe_link_state_handle(priv, true);
1102 
1103 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 		stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106 
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 	.mac_select_pcs = stmmac_mac_select_pcs,
1109 	.mac_config = stmmac_mac_config,
1110 	.mac_link_down = stmmac_mac_link_down,
1111 	.mac_link_up = stmmac_mac_link_up,
1112 };
1113 
1114 /**
1115  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116  * @priv: driver private structure
1117  * Description: this is to verify if the HW supports the PCS.
1118  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119  * configured for the TBI, RTBI, or SGMII PHY interface.
1120  */
1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 	int interface = priv->plat->mac_interface;
1124 
1125 	if (priv->dma_cap.pcs) {
1126 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 			priv->hw->pcs = STMMAC_PCS_RGMII;
1132 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 			priv->hw->pcs = STMMAC_PCS_SGMII;
1135 		}
1136 	}
1137 }
1138 
1139 /**
1140  * stmmac_init_phy - PHY initialization
1141  * @dev: net device structure
1142  * Description: it initializes the driver's PHY state, and attaches the PHY
1143  * to the mac driver.
1144  *  Return value:
1145  *  0 on success
1146  */
1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 	struct stmmac_priv *priv = netdev_priv(dev);
1150 	struct fwnode_handle *phy_fwnode;
1151 	struct fwnode_handle *fwnode;
1152 	int ret;
1153 
1154 	if (!phylink_expects_phy(priv->phylink))
1155 		return 0;
1156 
1157 	fwnode = priv->plat->port_node;
1158 	if (!fwnode)
1159 		fwnode = dev_fwnode(priv->device);
1160 
1161 	if (fwnode)
1162 		phy_fwnode = fwnode_get_phy_node(fwnode);
1163 	else
1164 		phy_fwnode = NULL;
1165 
1166 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1167 	 * manually parse it
1168 	 */
1169 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 		int addr = priv->plat->phy_addr;
1171 		struct phy_device *phydev;
1172 
1173 		if (addr < 0) {
1174 			netdev_err(priv->dev, "no phy found\n");
1175 			return -ENODEV;
1176 		}
1177 
1178 		phydev = mdiobus_get_phy(priv->mii, addr);
1179 		if (!phydev) {
1180 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 			return -ENODEV;
1182 		}
1183 
1184 		ret = phylink_connect_phy(priv->phylink, phydev);
1185 	} else {
1186 		fwnode_handle_put(phy_fwnode);
1187 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 	}
1189 
1190 	if (!priv->plat->pmt) {
1191 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192 
1193 		phylink_ethtool_get_wol(priv->phylink, &wol);
1194 		device_set_wakeup_capable(priv->device, !!wol.supported);
1195 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 	}
1197 
1198 	return ret;
1199 }
1200 
1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 	/* Half-Duplex can only work with single tx queue */
1204 	if (priv->plat->tx_queues_to_use > 1)
1205 		priv->phylink_config.mac_capabilities &=
1206 			~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 	else
1208 		priv->phylink_config.mac_capabilities |=
1209 			(MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 	int max_speed;
1219 
1220 	priv->phylink_config.dev = &priv->dev->dev;
1221 	priv->phylink_config.type = PHYLINK_NETDEV;
1222 	priv->phylink_config.mac_managed_pm = true;
1223 
1224 	mdio_bus_data = priv->plat->mdio_bus_data;
1225 	if (mdio_bus_data)
1226 		priv->phylink_config.ovr_an_inband =
1227 			mdio_bus_data->xpcs_an_inband;
1228 
1229 	/* Set the platform/firmware specified interface mode. Note, phylink
1230 	 * deals with the PHY interface mode, not the MAC interface mode.
1231 	 */
1232 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1233 
1234 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 	if (priv->hw->xpcs)
1236 		xpcs_get_interfaces(priv->hw->xpcs,
1237 				    priv->phylink_config.supported_interfaces);
1238 
1239 	priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 						MAC_10FD | MAC_100FD |
1241 						MAC_1000FD;
1242 
1243 	stmmac_set_half_duplex(priv);
1244 
1245 	/* Get the MAC specific capabilities */
1246 	stmmac_mac_phylink_get_caps(priv);
1247 
1248 	max_speed = priv->plat->max_speed;
1249 	if (max_speed)
1250 		phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251 
1252 	fwnode = priv->plat->port_node;
1253 	if (!fwnode)
1254 		fwnode = dev_fwnode(priv->device);
1255 
1256 	phylink = phylink_create(&priv->phylink_config, fwnode,
1257 				 mode, &stmmac_phylink_mac_ops);
1258 	if (IS_ERR(phylink))
1259 		return PTR_ERR(phylink);
1260 
1261 	priv->phylink = phylink;
1262 	return 0;
1263 }
1264 
1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 				    struct stmmac_dma_conf *dma_conf)
1267 {
1268 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 	unsigned int desc_size;
1270 	void *head_rx;
1271 	u32 queue;
1272 
1273 	/* Display RX rings */
1274 	for (queue = 0; queue < rx_cnt; queue++) {
1275 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276 
1277 		pr_info("\tRX Queue %u rings\n", queue);
1278 
1279 		if (priv->extend_desc) {
1280 			head_rx = (void *)rx_q->dma_erx;
1281 			desc_size = sizeof(struct dma_extended_desc);
1282 		} else {
1283 			head_rx = (void *)rx_q->dma_rx;
1284 			desc_size = sizeof(struct dma_desc);
1285 		}
1286 
1287 		/* Display RX ring */
1288 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 				    rx_q->dma_rx_phy, desc_size);
1290 	}
1291 }
1292 
1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 				    struct stmmac_dma_conf *dma_conf)
1295 {
1296 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 	unsigned int desc_size;
1298 	void *head_tx;
1299 	u32 queue;
1300 
1301 	/* Display TX rings */
1302 	for (queue = 0; queue < tx_cnt; queue++) {
1303 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304 
1305 		pr_info("\tTX Queue %d rings\n", queue);
1306 
1307 		if (priv->extend_desc) {
1308 			head_tx = (void *)tx_q->dma_etx;
1309 			desc_size = sizeof(struct dma_extended_desc);
1310 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 			head_tx = (void *)tx_q->dma_entx;
1312 			desc_size = sizeof(struct dma_edesc);
1313 		} else {
1314 			head_tx = (void *)tx_q->dma_tx;
1315 			desc_size = sizeof(struct dma_desc);
1316 		}
1317 
1318 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 				    tx_q->dma_tx_phy, desc_size);
1320 	}
1321 }
1322 
1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 				 struct stmmac_dma_conf *dma_conf)
1325 {
1326 	/* Display RX ring */
1327 	stmmac_display_rx_rings(priv, dma_conf);
1328 
1329 	/* Display TX ring */
1330 	stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332 
1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 	int ret = bufsize;
1336 
1337 	if (mtu >= BUF_SIZE_8KiB)
1338 		ret = BUF_SIZE_16KiB;
1339 	else if (mtu >= BUF_SIZE_4KiB)
1340 		ret = BUF_SIZE_8KiB;
1341 	else if (mtu >= BUF_SIZE_2KiB)
1342 		ret = BUF_SIZE_4KiB;
1343 	else if (mtu > DEFAULT_BUFSIZE)
1344 		ret = BUF_SIZE_2KiB;
1345 	else
1346 		ret = DEFAULT_BUFSIZE;
1347 
1348 	return ret;
1349 }
1350 
1351 /**
1352  * stmmac_clear_rx_descriptors - clear RX descriptors
1353  * @priv: driver private structure
1354  * @dma_conf: structure to take the dma data
1355  * @queue: RX queue index
1356  * Description: this function is called to clear the RX descriptors
1357  * in case of both basic and extended descriptors are used.
1358  */
1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 					struct stmmac_dma_conf *dma_conf,
1361 					u32 queue)
1362 {
1363 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 	int i;
1365 
1366 	/* Clear the RX descriptors */
1367 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 		if (priv->extend_desc)
1369 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 					priv->use_riwt, priv->mode,
1371 					(i == dma_conf->dma_rx_size - 1),
1372 					dma_conf->dma_buf_sz);
1373 		else
1374 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 					priv->use_riwt, priv->mode,
1376 					(i == dma_conf->dma_rx_size - 1),
1377 					dma_conf->dma_buf_sz);
1378 }
1379 
1380 /**
1381  * stmmac_clear_tx_descriptors - clear tx descriptors
1382  * @priv: driver private structure
1383  * @dma_conf: structure to take the dma data
1384  * @queue: TX queue index.
1385  * Description: this function is called to clear the TX descriptors
1386  * in case of both basic and extended descriptors are used.
1387  */
1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 					struct stmmac_dma_conf *dma_conf,
1390 					u32 queue)
1391 {
1392 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 	int i;
1394 
1395 	/* Clear the TX descriptors */
1396 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 		int last = (i == (dma_conf->dma_tx_size - 1));
1398 		struct dma_desc *p;
1399 
1400 		if (priv->extend_desc)
1401 			p = &tx_q->dma_etx[i].basic;
1402 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 			p = &tx_q->dma_entx[i].basic;
1404 		else
1405 			p = &tx_q->dma_tx[i];
1406 
1407 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 	}
1409 }
1410 
1411 /**
1412  * stmmac_clear_descriptors - clear descriptors
1413  * @priv: driver private structure
1414  * @dma_conf: structure to take the dma data
1415  * Description: this function is called to clear the TX and RX descriptors
1416  * in case of both basic and extended descriptors are used.
1417  */
1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 				     struct stmmac_dma_conf *dma_conf)
1420 {
1421 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 	u32 queue;
1424 
1425 	/* Clear the RX descriptors */
1426 	for (queue = 0; queue < rx_queue_cnt; queue++)
1427 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428 
1429 	/* Clear the TX descriptors */
1430 	for (queue = 0; queue < tx_queue_cnt; queue++)
1431 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433 
1434 /**
1435  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436  * @priv: driver private structure
1437  * @dma_conf: structure to take the dma data
1438  * @p: descriptor pointer
1439  * @i: descriptor index
1440  * @flags: gfp flag
1441  * @queue: RX queue index
1442  * Description: this function is called to allocate a receive buffer, perform
1443  * the DMA mapping and init the descriptor.
1444  */
1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 				  struct stmmac_dma_conf *dma_conf,
1447 				  struct dma_desc *p,
1448 				  int i, gfp_t flags, u32 queue)
1449 {
1450 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453 
1454 	if (priv->dma_cap.host_dma_width <= 32)
1455 		gfp |= GFP_DMA32;
1456 
1457 	if (!buf->page) {
1458 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 		if (!buf->page)
1460 			return -ENOMEM;
1461 		buf->page_offset = stmmac_rx_offset(priv);
1462 	}
1463 
1464 	if (priv->sph && !buf->sec_page) {
1465 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 		if (!buf->sec_page)
1467 			return -ENOMEM;
1468 
1469 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 	} else {
1472 		buf->sec_page = NULL;
1473 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 	}
1475 
1476 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477 
1478 	stmmac_set_desc_addr(priv, p, buf->addr);
1479 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 		stmmac_init_desc3(priv, p);
1481 
1482 	return 0;
1483 }
1484 
1485 /**
1486  * stmmac_free_rx_buffer - free RX dma buffers
1487  * @priv: private structure
1488  * @rx_q: RX queue
1489  * @i: buffer index.
1490  */
1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 				  struct stmmac_rx_queue *rx_q,
1493 				  int i)
1494 {
1495 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496 
1497 	if (buf->page)
1498 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 	buf->page = NULL;
1500 
1501 	if (buf->sec_page)
1502 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 	buf->sec_page = NULL;
1504 }
1505 
1506 /**
1507  * stmmac_free_tx_buffer - free RX dma buffers
1508  * @priv: private structure
1509  * @dma_conf: structure to take the dma data
1510  * @queue: RX queue index
1511  * @i: buffer index.
1512  */
1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 				  struct stmmac_dma_conf *dma_conf,
1515 				  u32 queue, int i)
1516 {
1517 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518 
1519 	if (tx_q->tx_skbuff_dma[i].buf &&
1520 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 			dma_unmap_page(priv->device,
1523 				       tx_q->tx_skbuff_dma[i].buf,
1524 				       tx_q->tx_skbuff_dma[i].len,
1525 				       DMA_TO_DEVICE);
1526 		else
1527 			dma_unmap_single(priv->device,
1528 					 tx_q->tx_skbuff_dma[i].buf,
1529 					 tx_q->tx_skbuff_dma[i].len,
1530 					 DMA_TO_DEVICE);
1531 	}
1532 
1533 	if (tx_q->xdpf[i] &&
1534 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 		xdp_return_frame(tx_q->xdpf[i]);
1537 		tx_q->xdpf[i] = NULL;
1538 	}
1539 
1540 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 		tx_q->xsk_frames_done++;
1542 
1543 	if (tx_q->tx_skbuff[i] &&
1544 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 		tx_q->tx_skbuff[i] = NULL;
1547 	}
1548 
1549 	tx_q->tx_skbuff_dma[i].buf = 0;
1550 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552 
1553 /**
1554  * dma_free_rx_skbufs - free RX dma buffers
1555  * @priv: private structure
1556  * @dma_conf: structure to take the dma data
1557  * @queue: RX queue index
1558  */
1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 			       struct stmmac_dma_conf *dma_conf,
1561 			       u32 queue)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 		stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569 
1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 				   struct stmmac_dma_conf *dma_conf,
1572 				   u32 queue, gfp_t flags)
1573 {
1574 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 	int i;
1576 
1577 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 		struct dma_desc *p;
1579 		int ret;
1580 
1581 		if (priv->extend_desc)
1582 			p = &((rx_q->dma_erx + i)->basic);
1583 		else
1584 			p = rx_q->dma_rx + i;
1585 
1586 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 					     queue);
1588 		if (ret)
1589 			return ret;
1590 
1591 		rx_q->buf_alloc_num++;
1592 	}
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599  * @priv: private structure
1600  * @dma_conf: structure to take the dma data
1601  * @queue: RX queue index
1602  */
1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 				struct stmmac_dma_conf *dma_conf,
1605 				u32 queue)
1606 {
1607 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 	int i;
1609 
1610 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612 
1613 		if (!buf->xdp)
1614 			continue;
1615 
1616 		xsk_buff_free(buf->xdp);
1617 		buf->xdp = NULL;
1618 	}
1619 }
1620 
1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 				      struct stmmac_dma_conf *dma_conf,
1623 				      u32 queue)
1624 {
1625 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 	int i;
1627 
1628 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 	 * use this macro to make sure no size violations.
1631 	 */
1632 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633 
1634 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 		struct stmmac_rx_buffer *buf;
1636 		dma_addr_t dma_addr;
1637 		struct dma_desc *p;
1638 
1639 		if (priv->extend_desc)
1640 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 		else
1642 			p = rx_q->dma_rx + i;
1643 
1644 		buf = &rx_q->buf_pool[i];
1645 
1646 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 		if (!buf->xdp)
1648 			return -ENOMEM;
1649 
1650 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 		stmmac_set_desc_addr(priv, p, dma_addr);
1652 		rx_q->buf_alloc_num++;
1653 	}
1654 
1655 	return 0;
1656 }
1657 
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 		return NULL;
1662 
1663 	return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665 
1666 /**
1667  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668  * @priv: driver private structure
1669  * @dma_conf: structure to take the dma data
1670  * @queue: RX queue index
1671  * @flags: gfp flag.
1672  * Description: this function initializes the DMA RX descriptors
1673  * and allocates the socket buffers. It supports the chained and ring
1674  * modes.
1675  */
1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 				    struct stmmac_dma_conf *dma_conf,
1678 				    u32 queue, gfp_t flags)
1679 {
1680 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 	int ret;
1682 
1683 	netif_dbg(priv, probe, priv->dev,
1684 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 		  (u32)rx_q->dma_rx_phy);
1686 
1687 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688 
1689 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690 
1691 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692 
1693 	if (rx_q->xsk_pool) {
1694 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 						   MEM_TYPE_XSK_BUFF_POOL,
1696 						   NULL));
1697 		netdev_info(priv->dev,
1698 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 			    rx_q->queue_index);
1700 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 	} else {
1702 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 						   MEM_TYPE_PAGE_POOL,
1704 						   rx_q->page_pool));
1705 		netdev_info(priv->dev,
1706 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 			    rx_q->queue_index);
1708 	}
1709 
1710 	if (rx_q->xsk_pool) {
1711 		/* RX XDP ZC buffer pool may not be populated, e.g.
1712 		 * xdpsock TX-only.
1713 		 */
1714 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 	} else {
1716 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 		if (ret < 0)
1718 			return -ENOMEM;
1719 	}
1720 
1721 	/* Setup the chained descriptor addresses */
1722 	if (priv->mode == STMMAC_CHAIN_MODE) {
1723 		if (priv->extend_desc)
1724 			stmmac_mode_init(priv, rx_q->dma_erx,
1725 					 rx_q->dma_rx_phy,
1726 					 dma_conf->dma_rx_size, 1);
1727 		else
1728 			stmmac_mode_init(priv, rx_q->dma_rx,
1729 					 rx_q->dma_rx_phy,
1730 					 dma_conf->dma_rx_size, 0);
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 				  struct stmmac_dma_conf *dma_conf,
1738 				  gfp_t flags)
1739 {
1740 	struct stmmac_priv *priv = netdev_priv(dev);
1741 	u32 rx_count = priv->plat->rx_queues_to_use;
1742 	int queue;
1743 	int ret;
1744 
1745 	/* RX INITIALIZATION */
1746 	netif_dbg(priv, probe, priv->dev,
1747 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748 
1749 	for (queue = 0; queue < rx_count; queue++) {
1750 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 		if (ret)
1752 			goto err_init_rx_buffers;
1753 	}
1754 
1755 	return 0;
1756 
1757 err_init_rx_buffers:
1758 	while (queue >= 0) {
1759 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760 
1761 		if (rx_q->xsk_pool)
1762 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 		else
1764 			dma_free_rx_skbufs(priv, dma_conf, queue);
1765 
1766 		rx_q->buf_alloc_num = 0;
1767 		rx_q->xsk_pool = NULL;
1768 
1769 		queue--;
1770 	}
1771 
1772 	return ret;
1773 }
1774 
1775 /**
1776  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777  * @priv: driver private structure
1778  * @dma_conf: structure to take the dma data
1779  * @queue: TX queue index
1780  * Description: this function initializes the DMA TX descriptors
1781  * and allocates the socket buffers. It supports the chained and ring
1782  * modes.
1783  */
1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 				    struct stmmac_dma_conf *dma_conf,
1786 				    u32 queue)
1787 {
1788 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 	int i;
1790 
1791 	netif_dbg(priv, probe, priv->dev,
1792 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 		  (u32)tx_q->dma_tx_phy);
1794 
1795 	/* Setup the chained descriptor addresses */
1796 	if (priv->mode == STMMAC_CHAIN_MODE) {
1797 		if (priv->extend_desc)
1798 			stmmac_mode_init(priv, tx_q->dma_etx,
1799 					 tx_q->dma_tx_phy,
1800 					 dma_conf->dma_tx_size, 1);
1801 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 			stmmac_mode_init(priv, tx_q->dma_tx,
1803 					 tx_q->dma_tx_phy,
1804 					 dma_conf->dma_tx_size, 0);
1805 	}
1806 
1807 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808 
1809 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 		struct dma_desc *p;
1811 
1812 		if (priv->extend_desc)
1813 			p = &((tx_q->dma_etx + i)->basic);
1814 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 			p = &((tx_q->dma_entx + i)->basic);
1816 		else
1817 			p = tx_q->dma_tx + i;
1818 
1819 		stmmac_clear_desc(priv, p);
1820 
1821 		tx_q->tx_skbuff_dma[i].buf = 0;
1822 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 		tx_q->tx_skbuff_dma[i].len = 0;
1824 		tx_q->tx_skbuff_dma[i].last_segment = false;
1825 		tx_q->tx_skbuff[i] = NULL;
1826 	}
1827 
1828 	return 0;
1829 }
1830 
1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 				  struct stmmac_dma_conf *dma_conf)
1833 {
1834 	struct stmmac_priv *priv = netdev_priv(dev);
1835 	u32 tx_queue_cnt;
1836 	u32 queue;
1837 
1838 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1839 
1840 	for (queue = 0; queue < tx_queue_cnt; queue++)
1841 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1842 
1843 	return 0;
1844 }
1845 
1846 /**
1847  * init_dma_desc_rings - init the RX/TX descriptor rings
1848  * @dev: net device structure
1849  * @dma_conf: structure to take the dma data
1850  * @flags: gfp flag.
1851  * Description: this function initializes the DMA RX/TX descriptors
1852  * and allocates the socket buffers. It supports the chained and ring
1853  * modes.
1854  */
1855 static int init_dma_desc_rings(struct net_device *dev,
1856 			       struct stmmac_dma_conf *dma_conf,
1857 			       gfp_t flags)
1858 {
1859 	struct stmmac_priv *priv = netdev_priv(dev);
1860 	int ret;
1861 
1862 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 	if (ret)
1864 		return ret;
1865 
1866 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1867 
1868 	stmmac_clear_descriptors(priv, dma_conf);
1869 
1870 	if (netif_msg_hw(priv))
1871 		stmmac_display_rings(priv, dma_conf);
1872 
1873 	return ret;
1874 }
1875 
1876 /**
1877  * dma_free_tx_skbufs - free TX dma buffers
1878  * @priv: private structure
1879  * @dma_conf: structure to take the dma data
1880  * @queue: TX queue index
1881  */
1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 			       struct stmmac_dma_conf *dma_conf,
1884 			       u32 queue)
1885 {
1886 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 	int i;
1888 
1889 	tx_q->xsk_frames_done = 0;
1890 
1891 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893 
1894 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 		tx_q->xsk_frames_done = 0;
1897 		tx_q->xsk_pool = NULL;
1898 	}
1899 }
1900 
1901 /**
1902  * stmmac_free_tx_skbufs - free TX skb buffers
1903  * @priv: private structure
1904  */
1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 	u32 queue;
1909 
1910 	for (queue = 0; queue < tx_queue_cnt; queue++)
1911 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913 
1914 /**
1915  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916  * @priv: private structure
1917  * @dma_conf: structure to take the dma data
1918  * @queue: RX queue index
1919  */
1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 					 struct stmmac_dma_conf *dma_conf,
1922 					 u32 queue)
1923 {
1924 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925 
1926 	/* Release the DMA RX socket buffers */
1927 	if (rx_q->xsk_pool)
1928 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 	else
1930 		dma_free_rx_skbufs(priv, dma_conf, queue);
1931 
1932 	rx_q->buf_alloc_num = 0;
1933 	rx_q->xsk_pool = NULL;
1934 
1935 	/* Free DMA regions of consistent memory previously allocated */
1936 	if (!priv->extend_desc)
1937 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 				  sizeof(struct dma_desc),
1939 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1940 	else
1941 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 				  sizeof(struct dma_extended_desc),
1943 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1944 
1945 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947 
1948 	kfree(rx_q->buf_pool);
1949 	if (rx_q->page_pool)
1950 		page_pool_destroy(rx_q->page_pool);
1951 }
1952 
1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 				       struct stmmac_dma_conf *dma_conf)
1955 {
1956 	u32 rx_count = priv->plat->rx_queues_to_use;
1957 	u32 queue;
1958 
1959 	/* Free RX queue resources */
1960 	for (queue = 0; queue < rx_count; queue++)
1961 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963 
1964 /**
1965  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966  * @priv: private structure
1967  * @dma_conf: structure to take the dma data
1968  * @queue: TX queue index
1969  */
1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 					 struct stmmac_dma_conf *dma_conf,
1972 					 u32 queue)
1973 {
1974 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 	size_t size;
1976 	void *addr;
1977 
1978 	/* Release the DMA TX socket buffers */
1979 	dma_free_tx_skbufs(priv, dma_conf, queue);
1980 
1981 	if (priv->extend_desc) {
1982 		size = sizeof(struct dma_extended_desc);
1983 		addr = tx_q->dma_etx;
1984 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 		size = sizeof(struct dma_edesc);
1986 		addr = tx_q->dma_entx;
1987 	} else {
1988 		size = sizeof(struct dma_desc);
1989 		addr = tx_q->dma_tx;
1990 	}
1991 
1992 	size *= dma_conf->dma_tx_size;
1993 
1994 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995 
1996 	kfree(tx_q->tx_skbuff_dma);
1997 	kfree(tx_q->tx_skbuff);
1998 }
1999 
2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 				       struct stmmac_dma_conf *dma_conf)
2002 {
2003 	u32 tx_count = priv->plat->tx_queues_to_use;
2004 	u32 queue;
2005 
2006 	/* Free TX queue resources */
2007 	for (queue = 0; queue < tx_count; queue++)
2008 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010 
2011 /**
2012  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013  * @priv: private structure
2014  * @dma_conf: structure to take the dma data
2015  * @queue: RX queue index
2016  * Description: according to which descriptor can be used (extend or basic)
2017  * this function allocates the resources for TX and RX paths. In case of
2018  * reception, for example, it pre-allocated the RX socket buffer in order to
2019  * allow zero-copy mechanism.
2020  */
2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 					 struct stmmac_dma_conf *dma_conf,
2023 					 u32 queue)
2024 {
2025 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 	struct stmmac_channel *ch = &priv->channel[queue];
2027 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 	struct page_pool_params pp_params = { 0 };
2029 	unsigned int num_pages;
2030 	unsigned int napi_id;
2031 	int ret;
2032 
2033 	rx_q->queue_index = queue;
2034 	rx_q->priv_data = priv;
2035 
2036 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 	pp_params.pool_size = dma_conf->dma_rx_size;
2038 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 	pp_params.order = ilog2(num_pages);
2040 	pp_params.nid = dev_to_node(priv->device);
2041 	pp_params.dev = priv->device;
2042 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 	pp_params.offset = stmmac_rx_offset(priv);
2044 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045 
2046 	rx_q->page_pool = page_pool_create(&pp_params);
2047 	if (IS_ERR(rx_q->page_pool)) {
2048 		ret = PTR_ERR(rx_q->page_pool);
2049 		rx_q->page_pool = NULL;
2050 		return ret;
2051 	}
2052 
2053 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 				 sizeof(*rx_q->buf_pool),
2055 				 GFP_KERNEL);
2056 	if (!rx_q->buf_pool)
2057 		return -ENOMEM;
2058 
2059 	if (priv->extend_desc) {
2060 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 						   dma_conf->dma_rx_size *
2062 						   sizeof(struct dma_extended_desc),
2063 						   &rx_q->dma_rx_phy,
2064 						   GFP_KERNEL);
2065 		if (!rx_q->dma_erx)
2066 			return -ENOMEM;
2067 
2068 	} else {
2069 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 						  dma_conf->dma_rx_size *
2071 						  sizeof(struct dma_desc),
2072 						  &rx_q->dma_rx_phy,
2073 						  GFP_KERNEL);
2074 		if (!rx_q->dma_rx)
2075 			return -ENOMEM;
2076 	}
2077 
2078 	if (stmmac_xdp_is_enabled(priv) &&
2079 	    test_bit(queue, priv->af_xdp_zc_qps))
2080 		napi_id = ch->rxtx_napi.napi_id;
2081 	else
2082 		napi_id = ch->rx_napi.napi_id;
2083 
2084 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 			       rx_q->queue_index,
2086 			       napi_id);
2087 	if (ret) {
2088 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 		return -EINVAL;
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 				       struct stmmac_dma_conf *dma_conf)
2097 {
2098 	u32 rx_count = priv->plat->rx_queues_to_use;
2099 	u32 queue;
2100 	int ret;
2101 
2102 	/* RX queues buffers and DMA */
2103 	for (queue = 0; queue < rx_count; queue++) {
2104 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 		if (ret)
2106 			goto err_dma;
2107 	}
2108 
2109 	return 0;
2110 
2111 err_dma:
2112 	free_dma_rx_desc_resources(priv, dma_conf);
2113 
2114 	return ret;
2115 }
2116 
2117 /**
2118  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  * Description: according to which descriptor can be used (extend or basic)
2123  * this function allocates the resources for TX and RX paths. In case of
2124  * reception, for example, it pre-allocated the RX socket buffer in order to
2125  * allow zero-copy mechanism.
2126  */
2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 					 struct stmmac_dma_conf *dma_conf,
2129 					 u32 queue)
2130 {
2131 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 	size_t size;
2133 	void *addr;
2134 
2135 	tx_q->queue_index = queue;
2136 	tx_q->priv_data = priv;
2137 
2138 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 				      sizeof(*tx_q->tx_skbuff_dma),
2140 				      GFP_KERNEL);
2141 	if (!tx_q->tx_skbuff_dma)
2142 		return -ENOMEM;
2143 
2144 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 				  sizeof(struct sk_buff *),
2146 				  GFP_KERNEL);
2147 	if (!tx_q->tx_skbuff)
2148 		return -ENOMEM;
2149 
2150 	if (priv->extend_desc)
2151 		size = sizeof(struct dma_extended_desc);
2152 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 		size = sizeof(struct dma_edesc);
2154 	else
2155 		size = sizeof(struct dma_desc);
2156 
2157 	size *= dma_conf->dma_tx_size;
2158 
2159 	addr = dma_alloc_coherent(priv->device, size,
2160 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2161 	if (!addr)
2162 		return -ENOMEM;
2163 
2164 	if (priv->extend_desc)
2165 		tx_q->dma_etx = addr;
2166 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 		tx_q->dma_entx = addr;
2168 	else
2169 		tx_q->dma_tx = addr;
2170 
2171 	return 0;
2172 }
2173 
2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 				       struct stmmac_dma_conf *dma_conf)
2176 {
2177 	u32 tx_count = priv->plat->tx_queues_to_use;
2178 	u32 queue;
2179 	int ret;
2180 
2181 	/* TX queues buffers and DMA */
2182 	for (queue = 0; queue < tx_count; queue++) {
2183 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 		if (ret)
2185 			goto err_dma;
2186 	}
2187 
2188 	return 0;
2189 
2190 err_dma:
2191 	free_dma_tx_desc_resources(priv, dma_conf);
2192 	return ret;
2193 }
2194 
2195 /**
2196  * alloc_dma_desc_resources - alloc TX/RX resources.
2197  * @priv: private structure
2198  * @dma_conf: structure to take the dma data
2199  * Description: according to which descriptor can be used (extend or basic)
2200  * this function allocates the resources for TX and RX paths. In case of
2201  * reception, for example, it pre-allocated the RX socket buffer in order to
2202  * allow zero-copy mechanism.
2203  */
2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 				    struct stmmac_dma_conf *dma_conf)
2206 {
2207 	/* RX Allocation */
2208 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209 
2210 	if (ret)
2211 		return ret;
2212 
2213 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214 
2215 	return ret;
2216 }
2217 
2218 /**
2219  * free_dma_desc_resources - free dma desc resources
2220  * @priv: private structure
2221  * @dma_conf: structure to take the dma data
2222  */
2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 				    struct stmmac_dma_conf *dma_conf)
2225 {
2226 	/* Release the DMA TX socket buffers */
2227 	free_dma_tx_desc_resources(priv, dma_conf);
2228 
2229 	/* Release the DMA RX socket buffers later
2230 	 * to ensure all pending XDP_TX buffers are returned.
2231 	 */
2232 	free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234 
2235 /**
2236  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237  *  @priv: driver private structure
2238  *  Description: It is used for enabling the rx queues in the MAC
2239  */
2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 	int queue;
2244 	u8 mode;
2245 
2246 	for (queue = 0; queue < rx_queues_count; queue++) {
2247 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 	}
2250 }
2251 
2252 /**
2253  * stmmac_start_rx_dma - start RX DMA channel
2254  * @priv: driver private structure
2255  * @chan: RX channel index
2256  * Description:
2257  * This starts a RX DMA channel
2258  */
2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 	stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264 
2265 /**
2266  * stmmac_start_tx_dma - start TX DMA channel
2267  * @priv: driver private structure
2268  * @chan: TX channel index
2269  * Description:
2270  * This starts a TX DMA channel
2271  */
2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 	stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277 
2278 /**
2279  * stmmac_stop_rx_dma - stop RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This stops a RX DMA channel
2284  */
2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_stop_tx_dma - stop TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This stops a TX DMA channel
2297  */
2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 	u32 chan;
2310 
2311 	for (chan = 0; chan < dma_csr_ch; chan++) {
2312 		struct stmmac_channel *ch = &priv->channel[chan];
2313 		unsigned long flags;
2314 
2315 		spin_lock_irqsave(&ch->lock, flags);
2316 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 		spin_unlock_irqrestore(&ch->lock, flags);
2318 	}
2319 }
2320 
2321 /**
2322  * stmmac_start_all_dma - start all RX and TX DMA channels
2323  * @priv: driver private structure
2324  * Description:
2325  * This starts all the RX and TX DMA channels
2326  */
2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 	u32 chan = 0;
2332 
2333 	for (chan = 0; chan < rx_channels_count; chan++)
2334 		stmmac_start_rx_dma(priv, chan);
2335 
2336 	for (chan = 0; chan < tx_channels_count; chan++)
2337 		stmmac_start_tx_dma(priv, chan);
2338 }
2339 
2340 /**
2341  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342  * @priv: driver private structure
2343  * Description:
2344  * This stops the RX and TX DMA channels
2345  */
2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 	u32 chan = 0;
2351 
2352 	for (chan = 0; chan < rx_channels_count; chan++)
2353 		stmmac_stop_rx_dma(priv, chan);
2354 
2355 	for (chan = 0; chan < tx_channels_count; chan++)
2356 		stmmac_stop_tx_dma(priv, chan);
2357 }
2358 
2359 /**
2360  *  stmmac_dma_operation_mode - HW DMA operation mode
2361  *  @priv: driver private structure
2362  *  Description: it is used for configuring the DMA operation mode register in
2363  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364  */
2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 	int rxfifosz = priv->plat->rx_fifo_size;
2370 	int txfifosz = priv->plat->tx_fifo_size;
2371 	u32 txmode = 0;
2372 	u32 rxmode = 0;
2373 	u32 chan = 0;
2374 	u8 qmode = 0;
2375 
2376 	if (rxfifosz == 0)
2377 		rxfifosz = priv->dma_cap.rx_fifo_size;
2378 	if (txfifosz == 0)
2379 		txfifosz = priv->dma_cap.tx_fifo_size;
2380 
2381 	/* Adjust for real per queue fifo size */
2382 	rxfifosz /= rx_channels_count;
2383 	txfifosz /= tx_channels_count;
2384 
2385 	if (priv->plat->force_thresh_dma_mode) {
2386 		txmode = tc;
2387 		rxmode = tc;
2388 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 		/*
2390 		 * In case of GMAC, SF mode can be enabled
2391 		 * to perform the TX COE in HW. This depends on:
2392 		 * 1) TX COE if actually supported
2393 		 * 2) There is no bugged Jumbo frame support
2394 		 *    that needs to not insert csum in the TDES.
2395 		 */
2396 		txmode = SF_DMA_MODE;
2397 		rxmode = SF_DMA_MODE;
2398 		priv->xstats.threshold = SF_DMA_MODE;
2399 	} else {
2400 		txmode = tc;
2401 		rxmode = SF_DMA_MODE;
2402 	}
2403 
2404 	/* configure all channels */
2405 	for (chan = 0; chan < rx_channels_count; chan++) {
2406 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 		u32 buf_size;
2408 
2409 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410 
2411 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 				rxfifosz, qmode);
2413 
2414 		if (rx_q->xsk_pool) {
2415 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 					      buf_size,
2418 					      chan);
2419 		} else {
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      priv->dma_conf.dma_buf_sz,
2422 					      chan);
2423 		}
2424 	}
2425 
2426 	for (chan = 0; chan < tx_channels_count; chan++) {
2427 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428 
2429 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 				txfifosz, qmode);
2431 	}
2432 }
2433 
2434 static void stmmac_xsk_request_timestamp(void *_priv)
2435 {
2436 	struct stmmac_metadata_request *meta_req = _priv;
2437 
2438 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2439 	*meta_req->set_ic = true;
2440 }
2441 
2442 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2443 {
2444 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2445 	struct stmmac_priv *priv = tx_compl->priv;
2446 	struct dma_desc *desc = tx_compl->desc;
2447 	bool found = false;
2448 	u64 ns = 0;
2449 
2450 	if (!priv->hwts_tx_en)
2451 		return 0;
2452 
2453 	/* check tx tstamp status */
2454 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2455 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2456 		found = true;
2457 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2458 		found = true;
2459 	}
2460 
2461 	if (found) {
2462 		ns -= priv->plat->cdc_error_adj;
2463 		return ns_to_ktime(ns);
2464 	}
2465 
2466 	return 0;
2467 }
2468 
2469 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2470 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2471 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2472 };
2473 
2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2475 {
2476 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2477 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2478 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2479 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2480 	unsigned int entry = tx_q->cur_tx;
2481 	struct dma_desc *tx_desc = NULL;
2482 	struct xdp_desc xdp_desc;
2483 	bool work_done = true;
2484 	u32 tx_set_ic_bit = 0;
2485 
2486 	/* Avoids TX time-out as we are sharing with slow path */
2487 	txq_trans_cond_update(nq);
2488 
2489 	budget = min(budget, stmmac_tx_avail(priv, queue));
2490 
2491 	while (budget-- > 0) {
2492 		struct stmmac_metadata_request meta_req;
2493 		struct xsk_tx_metadata *meta = NULL;
2494 		dma_addr_t dma_addr;
2495 		bool set_ic;
2496 
2497 		/* We are sharing with slow path and stop XSK TX desc submission when
2498 		 * available TX ring is less than threshold.
2499 		 */
2500 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2501 		    !netif_carrier_ok(priv->dev)) {
2502 			work_done = false;
2503 			break;
2504 		}
2505 
2506 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2507 			break;
2508 
2509 		if (priv->plat->est && priv->plat->est->enable &&
2510 		    priv->plat->est->max_sdu[queue] &&
2511 		    xdp_desc.len > priv->plat->est->max_sdu[queue]) {
2512 			priv->xstats.max_sdu_txq_drop[queue]++;
2513 			continue;
2514 		}
2515 
2516 		if (likely(priv->extend_desc))
2517 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2518 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2519 			tx_desc = &tx_q->dma_entx[entry].basic;
2520 		else
2521 			tx_desc = tx_q->dma_tx + entry;
2522 
2523 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2524 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2525 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2526 
2527 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2528 
2529 		/* To return XDP buffer to XSK pool, we simple call
2530 		 * xsk_tx_completed(), so we don't need to fill up
2531 		 * 'buf' and 'xdpf'.
2532 		 */
2533 		tx_q->tx_skbuff_dma[entry].buf = 0;
2534 		tx_q->xdpf[entry] = NULL;
2535 
2536 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2537 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2538 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2539 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2540 
2541 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2542 
2543 		tx_q->tx_count_frames++;
2544 
2545 		if (!priv->tx_coal_frames[queue])
2546 			set_ic = false;
2547 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2548 			set_ic = true;
2549 		else
2550 			set_ic = false;
2551 
2552 		meta_req.priv = priv;
2553 		meta_req.tx_desc = tx_desc;
2554 		meta_req.set_ic = &set_ic;
2555 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2556 					&meta_req);
2557 		if (set_ic) {
2558 			tx_q->tx_count_frames = 0;
2559 			stmmac_set_tx_ic(priv, tx_desc);
2560 			tx_set_ic_bit++;
2561 		}
2562 
2563 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2564 				       true, priv->mode, true, true,
2565 				       xdp_desc.len);
2566 
2567 		stmmac_enable_dma_transmission(priv, priv->ioaddr);
2568 
2569 		xsk_tx_metadata_to_compl(meta,
2570 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2571 
2572 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2573 		entry = tx_q->cur_tx;
2574 	}
2575 	u64_stats_update_begin(&txq_stats->napi_syncp);
2576 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2577 	u64_stats_update_end(&txq_stats->napi_syncp);
2578 
2579 	if (tx_desc) {
2580 		stmmac_flush_tx_descriptors(priv, queue);
2581 		xsk_tx_release(pool);
2582 	}
2583 
2584 	/* Return true if all of the 3 conditions are met
2585 	 *  a) TX Budget is still available
2586 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2587 	 *     pending XSK TX for transmission)
2588 	 */
2589 	return !!budget && work_done;
2590 }
2591 
2592 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2593 {
2594 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2595 		tc += 64;
2596 
2597 		if (priv->plat->force_thresh_dma_mode)
2598 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2599 		else
2600 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2601 						      chan);
2602 
2603 		priv->xstats.threshold = tc;
2604 	}
2605 }
2606 
2607 /**
2608  * stmmac_tx_clean - to manage the transmission completion
2609  * @priv: driver private structure
2610  * @budget: napi budget limiting this functions packet handling
2611  * @queue: TX queue index
2612  * @pending_packets: signal to arm the TX coal timer
2613  * Description: it reclaims the transmit resources after transmission completes.
2614  * If some packets still needs to be handled, due to TX coalesce, set
2615  * pending_packets to true to make NAPI arm the TX coal timer.
2616  */
2617 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2618 			   bool *pending_packets)
2619 {
2620 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2621 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2622 	unsigned int bytes_compl = 0, pkts_compl = 0;
2623 	unsigned int entry, xmits = 0, count = 0;
2624 	u32 tx_packets = 0, tx_errors = 0;
2625 
2626 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2627 
2628 	tx_q->xsk_frames_done = 0;
2629 
2630 	entry = tx_q->dirty_tx;
2631 
2632 	/* Try to clean all TX complete frame in 1 shot */
2633 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2634 		struct xdp_frame *xdpf;
2635 		struct sk_buff *skb;
2636 		struct dma_desc *p;
2637 		int status;
2638 
2639 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2640 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2641 			xdpf = tx_q->xdpf[entry];
2642 			skb = NULL;
2643 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2644 			xdpf = NULL;
2645 			skb = tx_q->tx_skbuff[entry];
2646 		} else {
2647 			xdpf = NULL;
2648 			skb = NULL;
2649 		}
2650 
2651 		if (priv->extend_desc)
2652 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2653 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2654 			p = &tx_q->dma_entx[entry].basic;
2655 		else
2656 			p = tx_q->dma_tx + entry;
2657 
2658 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2659 		/* Check if the descriptor is owned by the DMA */
2660 		if (unlikely(status & tx_dma_own))
2661 			break;
2662 
2663 		count++;
2664 
2665 		/* Make sure descriptor fields are read after reading
2666 		 * the own bit.
2667 		 */
2668 		dma_rmb();
2669 
2670 		/* Just consider the last segment and ...*/
2671 		if (likely(!(status & tx_not_ls))) {
2672 			/* ... verify the status error condition */
2673 			if (unlikely(status & tx_err)) {
2674 				tx_errors++;
2675 				if (unlikely(status & tx_err_bump_tc))
2676 					stmmac_bump_dma_threshold(priv, queue);
2677 			} else {
2678 				tx_packets++;
2679 			}
2680 			if (skb) {
2681 				stmmac_get_tx_hwtstamp(priv, p, skb);
2682 			} else {
2683 				struct stmmac_xsk_tx_complete tx_compl = {
2684 					.priv = priv,
2685 					.desc = p,
2686 				};
2687 
2688 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2689 							 &stmmac_xsk_tx_metadata_ops,
2690 							 &tx_compl);
2691 			}
2692 		}
2693 
2694 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2695 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2696 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2697 				dma_unmap_page(priv->device,
2698 					       tx_q->tx_skbuff_dma[entry].buf,
2699 					       tx_q->tx_skbuff_dma[entry].len,
2700 					       DMA_TO_DEVICE);
2701 			else
2702 				dma_unmap_single(priv->device,
2703 						 tx_q->tx_skbuff_dma[entry].buf,
2704 						 tx_q->tx_skbuff_dma[entry].len,
2705 						 DMA_TO_DEVICE);
2706 			tx_q->tx_skbuff_dma[entry].buf = 0;
2707 			tx_q->tx_skbuff_dma[entry].len = 0;
2708 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2709 		}
2710 
2711 		stmmac_clean_desc3(priv, tx_q, p);
2712 
2713 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2714 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2715 
2716 		if (xdpf &&
2717 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2718 			xdp_return_frame_rx_napi(xdpf);
2719 			tx_q->xdpf[entry] = NULL;
2720 		}
2721 
2722 		if (xdpf &&
2723 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2724 			xdp_return_frame(xdpf);
2725 			tx_q->xdpf[entry] = NULL;
2726 		}
2727 
2728 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2729 			tx_q->xsk_frames_done++;
2730 
2731 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2732 			if (likely(skb)) {
2733 				pkts_compl++;
2734 				bytes_compl += skb->len;
2735 				dev_consume_skb_any(skb);
2736 				tx_q->tx_skbuff[entry] = NULL;
2737 			}
2738 		}
2739 
2740 		stmmac_release_tx_desc(priv, p, priv->mode);
2741 
2742 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2743 	}
2744 	tx_q->dirty_tx = entry;
2745 
2746 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2747 				  pkts_compl, bytes_compl);
2748 
2749 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2750 								queue))) &&
2751 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2752 
2753 		netif_dbg(priv, tx_done, priv->dev,
2754 			  "%s: restart transmit\n", __func__);
2755 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2756 	}
2757 
2758 	if (tx_q->xsk_pool) {
2759 		bool work_done;
2760 
2761 		if (tx_q->xsk_frames_done)
2762 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2763 
2764 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2765 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2766 
2767 		/* For XSK TX, we try to send as many as possible.
2768 		 * If XSK work done (XSK TX desc empty and budget still
2769 		 * available), return "budget - 1" to reenable TX IRQ.
2770 		 * Else, return "budget" to make NAPI continue polling.
2771 		 */
2772 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2773 					       STMMAC_XSK_TX_BUDGET_MAX);
2774 		if (work_done)
2775 			xmits = budget - 1;
2776 		else
2777 			xmits = budget;
2778 	}
2779 
2780 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2781 	    priv->eee_sw_timer_en) {
2782 		if (stmmac_enable_eee_mode(priv))
2783 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2784 	}
2785 
2786 	/* We still have pending packets, let's call for a new scheduling */
2787 	if (tx_q->dirty_tx != tx_q->cur_tx)
2788 		*pending_packets = true;
2789 
2790 	u64_stats_update_begin(&txq_stats->napi_syncp);
2791 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2792 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2793 	u64_stats_inc(&txq_stats->napi.tx_clean);
2794 	u64_stats_update_end(&txq_stats->napi_syncp);
2795 
2796 	priv->xstats.tx_errors += tx_errors;
2797 
2798 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2799 
2800 	/* Combine decisions from TX clean and XSK TX */
2801 	return max(count, xmits);
2802 }
2803 
2804 /**
2805  * stmmac_tx_err - to manage the tx error
2806  * @priv: driver private structure
2807  * @chan: channel index
2808  * Description: it cleans the descriptors and restarts the transmission
2809  * in case of transmission errors.
2810  */
2811 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2812 {
2813 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2814 
2815 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2816 
2817 	stmmac_stop_tx_dma(priv, chan);
2818 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2819 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2820 	stmmac_reset_tx_queue(priv, chan);
2821 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2822 			    tx_q->dma_tx_phy, chan);
2823 	stmmac_start_tx_dma(priv, chan);
2824 
2825 	priv->xstats.tx_errors++;
2826 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2827 }
2828 
2829 /**
2830  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2831  *  @priv: driver private structure
2832  *  @txmode: TX operating mode
2833  *  @rxmode: RX operating mode
2834  *  @chan: channel index
2835  *  Description: it is used for configuring of the DMA operation mode in
2836  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2837  *  mode.
2838  */
2839 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2840 					  u32 rxmode, u32 chan)
2841 {
2842 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2843 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2844 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2845 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2846 	int rxfifosz = priv->plat->rx_fifo_size;
2847 	int txfifosz = priv->plat->tx_fifo_size;
2848 
2849 	if (rxfifosz == 0)
2850 		rxfifosz = priv->dma_cap.rx_fifo_size;
2851 	if (txfifosz == 0)
2852 		txfifosz = priv->dma_cap.tx_fifo_size;
2853 
2854 	/* Adjust for real per queue fifo size */
2855 	rxfifosz /= rx_channels_count;
2856 	txfifosz /= tx_channels_count;
2857 
2858 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2859 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2860 }
2861 
2862 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2863 {
2864 	int ret;
2865 
2866 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2867 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2868 	if (ret && (ret != -EINVAL)) {
2869 		stmmac_global_err(priv);
2870 		return true;
2871 	}
2872 
2873 	return false;
2874 }
2875 
2876 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2877 {
2878 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2879 						 &priv->xstats, chan, dir);
2880 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2881 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2882 	struct stmmac_channel *ch = &priv->channel[chan];
2883 	struct napi_struct *rx_napi;
2884 	struct napi_struct *tx_napi;
2885 	unsigned long flags;
2886 
2887 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2888 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2889 
2890 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2891 		if (napi_schedule_prep(rx_napi)) {
2892 			spin_lock_irqsave(&ch->lock, flags);
2893 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2894 			spin_unlock_irqrestore(&ch->lock, flags);
2895 			__napi_schedule(rx_napi);
2896 		}
2897 	}
2898 
2899 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2900 		if (napi_schedule_prep(tx_napi)) {
2901 			spin_lock_irqsave(&ch->lock, flags);
2902 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2903 			spin_unlock_irqrestore(&ch->lock, flags);
2904 			__napi_schedule(tx_napi);
2905 		}
2906 	}
2907 
2908 	return status;
2909 }
2910 
2911 /**
2912  * stmmac_dma_interrupt - DMA ISR
2913  * @priv: driver private structure
2914  * Description: this is the DMA ISR. It is called by the main ISR.
2915  * It calls the dwmac dma routine and schedule poll method in case of some
2916  * work can be done.
2917  */
2918 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2919 {
2920 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2921 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2922 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2923 				tx_channel_count : rx_channel_count;
2924 	u32 chan;
2925 	int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2926 
2927 	/* Make sure we never check beyond our status buffer. */
2928 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2929 		channels_to_check = ARRAY_SIZE(status);
2930 
2931 	for (chan = 0; chan < channels_to_check; chan++)
2932 		status[chan] = stmmac_napi_check(priv, chan,
2933 						 DMA_DIR_RXTX);
2934 
2935 	for (chan = 0; chan < tx_channel_count; chan++) {
2936 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2937 			/* Try to bump up the dma threshold on this failure */
2938 			stmmac_bump_dma_threshold(priv, chan);
2939 		} else if (unlikely(status[chan] == tx_hard_error)) {
2940 			stmmac_tx_err(priv, chan);
2941 		}
2942 	}
2943 }
2944 
2945 /**
2946  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2947  * @priv: driver private structure
2948  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2949  */
2950 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2951 {
2952 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2953 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2954 
2955 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2956 
2957 	if (priv->dma_cap.rmon) {
2958 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2959 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2960 	} else
2961 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2962 }
2963 
2964 /**
2965  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2966  * @priv: driver private structure
2967  * Description:
2968  *  new GMAC chip generations have a new register to indicate the
2969  *  presence of the optional feature/functions.
2970  *  This can be also used to override the value passed through the
2971  *  platform and necessary for old MAC10/100 and GMAC chips.
2972  */
2973 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2974 {
2975 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2976 }
2977 
2978 /**
2979  * stmmac_check_ether_addr - check if the MAC addr is valid
2980  * @priv: driver private structure
2981  * Description:
2982  * it is to verify if the MAC address is valid, in case of failures it
2983  * generates a random MAC address
2984  */
2985 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2986 {
2987 	u8 addr[ETH_ALEN];
2988 
2989 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2990 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2991 		if (is_valid_ether_addr(addr))
2992 			eth_hw_addr_set(priv->dev, addr);
2993 		else
2994 			eth_hw_addr_random(priv->dev);
2995 		dev_info(priv->device, "device MAC address %pM\n",
2996 			 priv->dev->dev_addr);
2997 	}
2998 }
2999 
3000 /**
3001  * stmmac_init_dma_engine - DMA init.
3002  * @priv: driver private structure
3003  * Description:
3004  * It inits the DMA invoking the specific MAC/GMAC callback.
3005  * Some DMA parameters can be passed from the platform;
3006  * in case of these are not passed a default is kept for the MAC or GMAC.
3007  */
3008 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3009 {
3010 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3011 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3012 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3013 	struct stmmac_rx_queue *rx_q;
3014 	struct stmmac_tx_queue *tx_q;
3015 	u32 chan = 0;
3016 	int atds = 0;
3017 	int ret = 0;
3018 
3019 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3020 		dev_err(priv->device, "Invalid DMA configuration\n");
3021 		return -EINVAL;
3022 	}
3023 
3024 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3025 		atds = 1;
3026 
3027 	ret = stmmac_reset(priv, priv->ioaddr);
3028 	if (ret) {
3029 		dev_err(priv->device, "Failed to reset the dma\n");
3030 		return ret;
3031 	}
3032 
3033 	/* DMA Configuration */
3034 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3035 
3036 	if (priv->plat->axi)
3037 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3038 
3039 	/* DMA CSR Channel configuration */
3040 	for (chan = 0; chan < dma_csr_ch; chan++) {
3041 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3042 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3043 	}
3044 
3045 	/* DMA RX Channel Configuration */
3046 	for (chan = 0; chan < rx_channels_count; chan++) {
3047 		rx_q = &priv->dma_conf.rx_queue[chan];
3048 
3049 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3050 				    rx_q->dma_rx_phy, chan);
3051 
3052 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3053 				     (rx_q->buf_alloc_num *
3054 				      sizeof(struct dma_desc));
3055 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3056 				       rx_q->rx_tail_addr, chan);
3057 	}
3058 
3059 	/* DMA TX Channel Configuration */
3060 	for (chan = 0; chan < tx_channels_count; chan++) {
3061 		tx_q = &priv->dma_conf.tx_queue[chan];
3062 
3063 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3064 				    tx_q->dma_tx_phy, chan);
3065 
3066 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3067 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3068 				       tx_q->tx_tail_addr, chan);
3069 	}
3070 
3071 	return ret;
3072 }
3073 
3074 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3075 {
3076 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3077 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3078 	struct stmmac_channel *ch;
3079 	struct napi_struct *napi;
3080 
3081 	if (!tx_coal_timer)
3082 		return;
3083 
3084 	ch = &priv->channel[tx_q->queue_index];
3085 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3086 
3087 	/* Arm timer only if napi is not already scheduled.
3088 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3089 	 * again in the next scheduled napi.
3090 	 */
3091 	if (unlikely(!napi_is_scheduled(napi)))
3092 		hrtimer_start(&tx_q->txtimer,
3093 			      STMMAC_COAL_TIMER(tx_coal_timer),
3094 			      HRTIMER_MODE_REL);
3095 	else
3096 		hrtimer_try_to_cancel(&tx_q->txtimer);
3097 }
3098 
3099 /**
3100  * stmmac_tx_timer - mitigation sw timer for tx.
3101  * @t: data pointer
3102  * Description:
3103  * This is the timer handler to directly invoke the stmmac_tx_clean.
3104  */
3105 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3106 {
3107 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3108 	struct stmmac_priv *priv = tx_q->priv_data;
3109 	struct stmmac_channel *ch;
3110 	struct napi_struct *napi;
3111 
3112 	ch = &priv->channel[tx_q->queue_index];
3113 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3114 
3115 	if (likely(napi_schedule_prep(napi))) {
3116 		unsigned long flags;
3117 
3118 		spin_lock_irqsave(&ch->lock, flags);
3119 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3120 		spin_unlock_irqrestore(&ch->lock, flags);
3121 		__napi_schedule(napi);
3122 	}
3123 
3124 	return HRTIMER_NORESTART;
3125 }
3126 
3127 /**
3128  * stmmac_init_coalesce - init mitigation options.
3129  * @priv: driver private structure
3130  * Description:
3131  * This inits the coalesce parameters: i.e. timer rate,
3132  * timer handler and default threshold used for enabling the
3133  * interrupt on completion bit.
3134  */
3135 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3136 {
3137 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3138 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3139 	u32 chan;
3140 
3141 	for (chan = 0; chan < tx_channel_count; chan++) {
3142 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3143 
3144 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3145 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3146 
3147 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3148 		tx_q->txtimer.function = stmmac_tx_timer;
3149 	}
3150 
3151 	for (chan = 0; chan < rx_channel_count; chan++)
3152 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3153 }
3154 
3155 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3156 {
3157 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3158 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3159 	u32 chan;
3160 
3161 	/* set TX ring length */
3162 	for (chan = 0; chan < tx_channels_count; chan++)
3163 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3164 				       (priv->dma_conf.dma_tx_size - 1), chan);
3165 
3166 	/* set RX ring length */
3167 	for (chan = 0; chan < rx_channels_count; chan++)
3168 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3169 				       (priv->dma_conf.dma_rx_size - 1), chan);
3170 }
3171 
3172 /**
3173  *  stmmac_set_tx_queue_weight - Set TX queue weight
3174  *  @priv: driver private structure
3175  *  Description: It is used for setting TX queues weight
3176  */
3177 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3178 {
3179 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3180 	u32 weight;
3181 	u32 queue;
3182 
3183 	for (queue = 0; queue < tx_queues_count; queue++) {
3184 		weight = priv->plat->tx_queues_cfg[queue].weight;
3185 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3186 	}
3187 }
3188 
3189 /**
3190  *  stmmac_configure_cbs - Configure CBS in TX queue
3191  *  @priv: driver private structure
3192  *  Description: It is used for configuring CBS in AVB TX queues
3193  */
3194 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3195 {
3196 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3197 	u32 mode_to_use;
3198 	u32 queue;
3199 
3200 	/* queue 0 is reserved for legacy traffic */
3201 	for (queue = 1; queue < tx_queues_count; queue++) {
3202 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3203 		if (mode_to_use == MTL_QUEUE_DCB)
3204 			continue;
3205 
3206 		stmmac_config_cbs(priv, priv->hw,
3207 				priv->plat->tx_queues_cfg[queue].send_slope,
3208 				priv->plat->tx_queues_cfg[queue].idle_slope,
3209 				priv->plat->tx_queues_cfg[queue].high_credit,
3210 				priv->plat->tx_queues_cfg[queue].low_credit,
3211 				queue);
3212 	}
3213 }
3214 
3215 /**
3216  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3217  *  @priv: driver private structure
3218  *  Description: It is used for mapping RX queues to RX dma channels
3219  */
3220 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3221 {
3222 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3223 	u32 queue;
3224 	u32 chan;
3225 
3226 	for (queue = 0; queue < rx_queues_count; queue++) {
3227 		chan = priv->plat->rx_queues_cfg[queue].chan;
3228 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3229 	}
3230 }
3231 
3232 /**
3233  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3234  *  @priv: driver private structure
3235  *  Description: It is used for configuring the RX Queue Priority
3236  */
3237 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3238 {
3239 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3240 	u32 queue;
3241 	u32 prio;
3242 
3243 	for (queue = 0; queue < rx_queues_count; queue++) {
3244 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3245 			continue;
3246 
3247 		prio = priv->plat->rx_queues_cfg[queue].prio;
3248 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3249 	}
3250 }
3251 
3252 /**
3253  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3254  *  @priv: driver private structure
3255  *  Description: It is used for configuring the TX Queue Priority
3256  */
3257 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3258 {
3259 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3260 	u32 queue;
3261 	u32 prio;
3262 
3263 	for (queue = 0; queue < tx_queues_count; queue++) {
3264 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3265 			continue;
3266 
3267 		prio = priv->plat->tx_queues_cfg[queue].prio;
3268 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3269 	}
3270 }
3271 
3272 /**
3273  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3274  *  @priv: driver private structure
3275  *  Description: It is used for configuring the RX queue routing
3276  */
3277 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3278 {
3279 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3280 	u32 queue;
3281 	u8 packet;
3282 
3283 	for (queue = 0; queue < rx_queues_count; queue++) {
3284 		/* no specific packet type routing specified for the queue */
3285 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3286 			continue;
3287 
3288 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3289 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3290 	}
3291 }
3292 
3293 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3294 {
3295 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3296 		priv->rss.enable = false;
3297 		return;
3298 	}
3299 
3300 	if (priv->dev->features & NETIF_F_RXHASH)
3301 		priv->rss.enable = true;
3302 	else
3303 		priv->rss.enable = false;
3304 
3305 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3306 			     priv->plat->rx_queues_to_use);
3307 }
3308 
3309 /**
3310  *  stmmac_mtl_configuration - Configure MTL
3311  *  @priv: driver private structure
3312  *  Description: It is used for configurring MTL
3313  */
3314 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3315 {
3316 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3317 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3318 
3319 	if (tx_queues_count > 1)
3320 		stmmac_set_tx_queue_weight(priv);
3321 
3322 	/* Configure MTL RX algorithms */
3323 	if (rx_queues_count > 1)
3324 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3325 				priv->plat->rx_sched_algorithm);
3326 
3327 	/* Configure MTL TX algorithms */
3328 	if (tx_queues_count > 1)
3329 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3330 				priv->plat->tx_sched_algorithm);
3331 
3332 	/* Configure CBS in AVB TX queues */
3333 	if (tx_queues_count > 1)
3334 		stmmac_configure_cbs(priv);
3335 
3336 	/* Map RX MTL to DMA channels */
3337 	stmmac_rx_queue_dma_chan_map(priv);
3338 
3339 	/* Enable MAC RX Queues */
3340 	stmmac_mac_enable_rx_queues(priv);
3341 
3342 	/* Set RX priorities */
3343 	if (rx_queues_count > 1)
3344 		stmmac_mac_config_rx_queues_prio(priv);
3345 
3346 	/* Set TX priorities */
3347 	if (tx_queues_count > 1)
3348 		stmmac_mac_config_tx_queues_prio(priv);
3349 
3350 	/* Set RX routing */
3351 	if (rx_queues_count > 1)
3352 		stmmac_mac_config_rx_queues_routing(priv);
3353 
3354 	/* Receive Side Scaling */
3355 	if (rx_queues_count > 1)
3356 		stmmac_mac_config_rss(priv);
3357 }
3358 
3359 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3360 {
3361 	if (priv->dma_cap.asp) {
3362 		netdev_info(priv->dev, "Enabling Safety Features\n");
3363 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3364 					  priv->plat->safety_feat_cfg);
3365 	} else {
3366 		netdev_info(priv->dev, "No Safety Features support found\n");
3367 	}
3368 }
3369 
3370 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3371 {
3372 	char *name;
3373 
3374 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3375 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3376 
3377 	name = priv->wq_name;
3378 	sprintf(name, "%s-fpe", priv->dev->name);
3379 
3380 	priv->fpe_wq = create_singlethread_workqueue(name);
3381 	if (!priv->fpe_wq) {
3382 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3383 
3384 		return -ENOMEM;
3385 	}
3386 	netdev_info(priv->dev, "FPE workqueue start");
3387 
3388 	return 0;
3389 }
3390 
3391 /**
3392  * stmmac_hw_setup - setup mac in a usable state.
3393  *  @dev : pointer to the device structure.
3394  *  @ptp_register: register PTP if set
3395  *  Description:
3396  *  this is the main function to setup the HW in a usable state because the
3397  *  dma engine is reset, the core registers are configured (e.g. AXI,
3398  *  Checksum features, timers). The DMA is ready to start receiving and
3399  *  transmitting.
3400  *  Return value:
3401  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3402  *  file on failure.
3403  */
3404 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3405 {
3406 	struct stmmac_priv *priv = netdev_priv(dev);
3407 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3408 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3409 	bool sph_en;
3410 	u32 chan;
3411 	int ret;
3412 
3413 	/* DMA initialization and SW reset */
3414 	ret = stmmac_init_dma_engine(priv);
3415 	if (ret < 0) {
3416 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3417 			   __func__);
3418 		return ret;
3419 	}
3420 
3421 	/* Copy the MAC addr into the HW  */
3422 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3423 
3424 	/* PS and related bits will be programmed according to the speed */
3425 	if (priv->hw->pcs) {
3426 		int speed = priv->plat->mac_port_sel_speed;
3427 
3428 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3429 		    (speed == SPEED_1000)) {
3430 			priv->hw->ps = speed;
3431 		} else {
3432 			dev_warn(priv->device, "invalid port speed\n");
3433 			priv->hw->ps = 0;
3434 		}
3435 	}
3436 
3437 	/* Initialize the MAC Core */
3438 	stmmac_core_init(priv, priv->hw, dev);
3439 
3440 	/* Initialize MTL*/
3441 	stmmac_mtl_configuration(priv);
3442 
3443 	/* Initialize Safety Features */
3444 	stmmac_safety_feat_configuration(priv);
3445 
3446 	ret = stmmac_rx_ipc(priv, priv->hw);
3447 	if (!ret) {
3448 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3449 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3450 		priv->hw->rx_csum = 0;
3451 	}
3452 
3453 	/* Enable the MAC Rx/Tx */
3454 	stmmac_mac_set(priv, priv->ioaddr, true);
3455 
3456 	/* Set the HW DMA mode and the COE */
3457 	stmmac_dma_operation_mode(priv);
3458 
3459 	stmmac_mmc_setup(priv);
3460 
3461 	if (ptp_register) {
3462 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3463 		if (ret < 0)
3464 			netdev_warn(priv->dev,
3465 				    "failed to enable PTP reference clock: %pe\n",
3466 				    ERR_PTR(ret));
3467 	}
3468 
3469 	ret = stmmac_init_ptp(priv);
3470 	if (ret == -EOPNOTSUPP)
3471 		netdev_info(priv->dev, "PTP not supported by HW\n");
3472 	else if (ret)
3473 		netdev_warn(priv->dev, "PTP init failed\n");
3474 	else if (ptp_register)
3475 		stmmac_ptp_register(priv);
3476 
3477 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3478 
3479 	/* Convert the timer from msec to usec */
3480 	if (!priv->tx_lpi_timer)
3481 		priv->tx_lpi_timer = eee_timer * 1000;
3482 
3483 	if (priv->use_riwt) {
3484 		u32 queue;
3485 
3486 		for (queue = 0; queue < rx_cnt; queue++) {
3487 			if (!priv->rx_riwt[queue])
3488 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3489 
3490 			stmmac_rx_watchdog(priv, priv->ioaddr,
3491 					   priv->rx_riwt[queue], queue);
3492 		}
3493 	}
3494 
3495 	if (priv->hw->pcs)
3496 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3497 
3498 	/* set TX and RX rings length */
3499 	stmmac_set_rings_length(priv);
3500 
3501 	/* Enable TSO */
3502 	if (priv->tso) {
3503 		for (chan = 0; chan < tx_cnt; chan++) {
3504 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3505 
3506 			/* TSO and TBS cannot co-exist */
3507 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3508 				continue;
3509 
3510 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3511 		}
3512 	}
3513 
3514 	/* Enable Split Header */
3515 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3516 	for (chan = 0; chan < rx_cnt; chan++)
3517 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3518 
3519 
3520 	/* VLAN Tag Insertion */
3521 	if (priv->dma_cap.vlins)
3522 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3523 
3524 	/* TBS */
3525 	for (chan = 0; chan < tx_cnt; chan++) {
3526 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3527 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3528 
3529 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3530 	}
3531 
3532 	/* Configure real RX and TX queues */
3533 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3534 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3535 
3536 	/* Start the ball rolling... */
3537 	stmmac_start_all_dma(priv);
3538 
3539 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3540 
3541 	if (priv->dma_cap.fpesel) {
3542 		stmmac_fpe_start_wq(priv);
3543 
3544 		if (priv->plat->fpe_cfg->enable)
3545 			stmmac_fpe_handshake(priv, true);
3546 	}
3547 
3548 	return 0;
3549 }
3550 
3551 static void stmmac_hw_teardown(struct net_device *dev)
3552 {
3553 	struct stmmac_priv *priv = netdev_priv(dev);
3554 
3555 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3556 }
3557 
3558 static void stmmac_free_irq(struct net_device *dev,
3559 			    enum request_irq_err irq_err, int irq_idx)
3560 {
3561 	struct stmmac_priv *priv = netdev_priv(dev);
3562 	int j;
3563 
3564 	switch (irq_err) {
3565 	case REQ_IRQ_ERR_ALL:
3566 		irq_idx = priv->plat->tx_queues_to_use;
3567 		fallthrough;
3568 	case REQ_IRQ_ERR_TX:
3569 		for (j = irq_idx - 1; j >= 0; j--) {
3570 			if (priv->tx_irq[j] > 0) {
3571 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3572 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3573 			}
3574 		}
3575 		irq_idx = priv->plat->rx_queues_to_use;
3576 		fallthrough;
3577 	case REQ_IRQ_ERR_RX:
3578 		for (j = irq_idx - 1; j >= 0; j--) {
3579 			if (priv->rx_irq[j] > 0) {
3580 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3581 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3582 			}
3583 		}
3584 
3585 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3586 			free_irq(priv->sfty_ue_irq, dev);
3587 		fallthrough;
3588 	case REQ_IRQ_ERR_SFTY_UE:
3589 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3590 			free_irq(priv->sfty_ce_irq, dev);
3591 		fallthrough;
3592 	case REQ_IRQ_ERR_SFTY_CE:
3593 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3594 			free_irq(priv->lpi_irq, dev);
3595 		fallthrough;
3596 	case REQ_IRQ_ERR_LPI:
3597 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3598 			free_irq(priv->wol_irq, dev);
3599 		fallthrough;
3600 	case REQ_IRQ_ERR_SFTY:
3601 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3602 			free_irq(priv->sfty_irq, dev);
3603 		fallthrough;
3604 	case REQ_IRQ_ERR_WOL:
3605 		free_irq(dev->irq, dev);
3606 		fallthrough;
3607 	case REQ_IRQ_ERR_MAC:
3608 	case REQ_IRQ_ERR_NO:
3609 		/* If MAC IRQ request error, no more IRQ to free */
3610 		break;
3611 	}
3612 }
3613 
3614 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3615 {
3616 	struct stmmac_priv *priv = netdev_priv(dev);
3617 	enum request_irq_err irq_err;
3618 	cpumask_t cpu_mask;
3619 	int irq_idx = 0;
3620 	char *int_name;
3621 	int ret;
3622 	int i;
3623 
3624 	/* For common interrupt */
3625 	int_name = priv->int_name_mac;
3626 	sprintf(int_name, "%s:%s", dev->name, "mac");
3627 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3628 			  0, int_name, dev);
3629 	if (unlikely(ret < 0)) {
3630 		netdev_err(priv->dev,
3631 			   "%s: alloc mac MSI %d (error: %d)\n",
3632 			   __func__, dev->irq, ret);
3633 		irq_err = REQ_IRQ_ERR_MAC;
3634 		goto irq_error;
3635 	}
3636 
3637 	/* Request the Wake IRQ in case of another line
3638 	 * is used for WoL
3639 	 */
3640 	priv->wol_irq_disabled = true;
3641 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3642 		int_name = priv->int_name_wol;
3643 		sprintf(int_name, "%s:%s", dev->name, "wol");
3644 		ret = request_irq(priv->wol_irq,
3645 				  stmmac_mac_interrupt,
3646 				  0, int_name, dev);
3647 		if (unlikely(ret < 0)) {
3648 			netdev_err(priv->dev,
3649 				   "%s: alloc wol MSI %d (error: %d)\n",
3650 				   __func__, priv->wol_irq, ret);
3651 			irq_err = REQ_IRQ_ERR_WOL;
3652 			goto irq_error;
3653 		}
3654 	}
3655 
3656 	/* Request the LPI IRQ in case of another line
3657 	 * is used for LPI
3658 	 */
3659 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3660 		int_name = priv->int_name_lpi;
3661 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3662 		ret = request_irq(priv->lpi_irq,
3663 				  stmmac_mac_interrupt,
3664 				  0, int_name, dev);
3665 		if (unlikely(ret < 0)) {
3666 			netdev_err(priv->dev,
3667 				   "%s: alloc lpi MSI %d (error: %d)\n",
3668 				   __func__, priv->lpi_irq, ret);
3669 			irq_err = REQ_IRQ_ERR_LPI;
3670 			goto irq_error;
3671 		}
3672 	}
3673 
3674 	/* Request the common Safety Feature Correctible/Uncorrectible
3675 	 * Error line in case of another line is used
3676 	 */
3677 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3678 		int_name = priv->int_name_sfty;
3679 		sprintf(int_name, "%s:%s", dev->name, "safety");
3680 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3681 				  0, int_name, dev);
3682 		if (unlikely(ret < 0)) {
3683 			netdev_err(priv->dev,
3684 				   "%s: alloc sfty MSI %d (error: %d)\n",
3685 				   __func__, priv->sfty_irq, ret);
3686 			irq_err = REQ_IRQ_ERR_SFTY;
3687 			goto irq_error;
3688 		}
3689 	}
3690 
3691 	/* Request the Safety Feature Correctible Error line in
3692 	 * case of another line is used
3693 	 */
3694 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3695 		int_name = priv->int_name_sfty_ce;
3696 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3697 		ret = request_irq(priv->sfty_ce_irq,
3698 				  stmmac_safety_interrupt,
3699 				  0, int_name, dev);
3700 		if (unlikely(ret < 0)) {
3701 			netdev_err(priv->dev,
3702 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3703 				   __func__, priv->sfty_ce_irq, ret);
3704 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3705 			goto irq_error;
3706 		}
3707 	}
3708 
3709 	/* Request the Safety Feature Uncorrectible Error line in
3710 	 * case of another line is used
3711 	 */
3712 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3713 		int_name = priv->int_name_sfty_ue;
3714 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3715 		ret = request_irq(priv->sfty_ue_irq,
3716 				  stmmac_safety_interrupt,
3717 				  0, int_name, dev);
3718 		if (unlikely(ret < 0)) {
3719 			netdev_err(priv->dev,
3720 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3721 				   __func__, priv->sfty_ue_irq, ret);
3722 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3723 			goto irq_error;
3724 		}
3725 	}
3726 
3727 	/* Request Rx MSI irq */
3728 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3729 		if (i >= MTL_MAX_RX_QUEUES)
3730 			break;
3731 		if (priv->rx_irq[i] == 0)
3732 			continue;
3733 
3734 		int_name = priv->int_name_rx_irq[i];
3735 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3736 		ret = request_irq(priv->rx_irq[i],
3737 				  stmmac_msi_intr_rx,
3738 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3739 		if (unlikely(ret < 0)) {
3740 			netdev_err(priv->dev,
3741 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3742 				   __func__, i, priv->rx_irq[i], ret);
3743 			irq_err = REQ_IRQ_ERR_RX;
3744 			irq_idx = i;
3745 			goto irq_error;
3746 		}
3747 		cpumask_clear(&cpu_mask);
3748 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3749 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3750 	}
3751 
3752 	/* Request Tx MSI irq */
3753 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3754 		if (i >= MTL_MAX_TX_QUEUES)
3755 			break;
3756 		if (priv->tx_irq[i] == 0)
3757 			continue;
3758 
3759 		int_name = priv->int_name_tx_irq[i];
3760 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3761 		ret = request_irq(priv->tx_irq[i],
3762 				  stmmac_msi_intr_tx,
3763 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3764 		if (unlikely(ret < 0)) {
3765 			netdev_err(priv->dev,
3766 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3767 				   __func__, i, priv->tx_irq[i], ret);
3768 			irq_err = REQ_IRQ_ERR_TX;
3769 			irq_idx = i;
3770 			goto irq_error;
3771 		}
3772 		cpumask_clear(&cpu_mask);
3773 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3774 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3775 	}
3776 
3777 	return 0;
3778 
3779 irq_error:
3780 	stmmac_free_irq(dev, irq_err, irq_idx);
3781 	return ret;
3782 }
3783 
3784 static int stmmac_request_irq_single(struct net_device *dev)
3785 {
3786 	struct stmmac_priv *priv = netdev_priv(dev);
3787 	enum request_irq_err irq_err;
3788 	int ret;
3789 
3790 	ret = request_irq(dev->irq, stmmac_interrupt,
3791 			  IRQF_SHARED, dev->name, dev);
3792 	if (unlikely(ret < 0)) {
3793 		netdev_err(priv->dev,
3794 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3795 			   __func__, dev->irq, ret);
3796 		irq_err = REQ_IRQ_ERR_MAC;
3797 		goto irq_error;
3798 	}
3799 
3800 	/* Request the Wake IRQ in case of another line
3801 	 * is used for WoL
3802 	 */
3803 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3804 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3805 				  IRQF_SHARED, dev->name, dev);
3806 		if (unlikely(ret < 0)) {
3807 			netdev_err(priv->dev,
3808 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3809 				   __func__, priv->wol_irq, ret);
3810 			irq_err = REQ_IRQ_ERR_WOL;
3811 			goto irq_error;
3812 		}
3813 	}
3814 
3815 	/* Request the IRQ lines */
3816 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3817 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3818 				  IRQF_SHARED, dev->name, dev);
3819 		if (unlikely(ret < 0)) {
3820 			netdev_err(priv->dev,
3821 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3822 				   __func__, priv->lpi_irq, ret);
3823 			irq_err = REQ_IRQ_ERR_LPI;
3824 			goto irq_error;
3825 		}
3826 	}
3827 
3828 	/* Request the common Safety Feature Correctible/Uncorrectible
3829 	 * Error line in case of another line is used
3830 	 */
3831 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3832 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3833 				  IRQF_SHARED, dev->name, dev);
3834 		if (unlikely(ret < 0)) {
3835 			netdev_err(priv->dev,
3836 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3837 				   __func__, priv->sfty_irq, ret);
3838 			irq_err = REQ_IRQ_ERR_SFTY;
3839 			goto irq_error;
3840 		}
3841 	}
3842 
3843 	return 0;
3844 
3845 irq_error:
3846 	stmmac_free_irq(dev, irq_err, 0);
3847 	return ret;
3848 }
3849 
3850 static int stmmac_request_irq(struct net_device *dev)
3851 {
3852 	struct stmmac_priv *priv = netdev_priv(dev);
3853 	int ret;
3854 
3855 	/* Request the IRQ lines */
3856 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3857 		ret = stmmac_request_irq_multi_msi(dev);
3858 	else
3859 		ret = stmmac_request_irq_single(dev);
3860 
3861 	return ret;
3862 }
3863 
3864 /**
3865  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3866  *  @priv: driver private structure
3867  *  @mtu: MTU to setup the dma queue and buf with
3868  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3869  *  Allocate the Tx/Rx DMA queue and init them.
3870  *  Return value:
3871  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3872  */
3873 static struct stmmac_dma_conf *
3874 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3875 {
3876 	struct stmmac_dma_conf *dma_conf;
3877 	int chan, bfsize, ret;
3878 
3879 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3880 	if (!dma_conf) {
3881 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3882 			   __func__);
3883 		return ERR_PTR(-ENOMEM);
3884 	}
3885 
3886 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3887 	if (bfsize < 0)
3888 		bfsize = 0;
3889 
3890 	if (bfsize < BUF_SIZE_16KiB)
3891 		bfsize = stmmac_set_bfsize(mtu, 0);
3892 
3893 	dma_conf->dma_buf_sz = bfsize;
3894 	/* Chose the tx/rx size from the already defined one in the
3895 	 * priv struct. (if defined)
3896 	 */
3897 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3898 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3899 
3900 	if (!dma_conf->dma_tx_size)
3901 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3902 	if (!dma_conf->dma_rx_size)
3903 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3904 
3905 	/* Earlier check for TBS */
3906 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3907 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3908 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3909 
3910 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3911 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3912 	}
3913 
3914 	ret = alloc_dma_desc_resources(priv, dma_conf);
3915 	if (ret < 0) {
3916 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3917 			   __func__);
3918 		goto alloc_error;
3919 	}
3920 
3921 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3922 	if (ret < 0) {
3923 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3924 			   __func__);
3925 		goto init_error;
3926 	}
3927 
3928 	return dma_conf;
3929 
3930 init_error:
3931 	free_dma_desc_resources(priv, dma_conf);
3932 alloc_error:
3933 	kfree(dma_conf);
3934 	return ERR_PTR(ret);
3935 }
3936 
3937 /**
3938  *  __stmmac_open - open entry point of the driver
3939  *  @dev : pointer to the device structure.
3940  *  @dma_conf :  structure to take the dma data
3941  *  Description:
3942  *  This function is the open entry point of the driver.
3943  *  Return value:
3944  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3945  *  file on failure.
3946  */
3947 static int __stmmac_open(struct net_device *dev,
3948 			 struct stmmac_dma_conf *dma_conf)
3949 {
3950 	struct stmmac_priv *priv = netdev_priv(dev);
3951 	int mode = priv->plat->phy_interface;
3952 	u32 chan;
3953 	int ret;
3954 
3955 	ret = pm_runtime_resume_and_get(priv->device);
3956 	if (ret < 0)
3957 		return ret;
3958 
3959 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
3960 	    priv->hw->pcs != STMMAC_PCS_RTBI &&
3961 	    (!priv->hw->xpcs ||
3962 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3963 	    !priv->hw->lynx_pcs) {
3964 		ret = stmmac_init_phy(dev);
3965 		if (ret) {
3966 			netdev_err(priv->dev,
3967 				   "%s: Cannot attach to PHY (error: %d)\n",
3968 				   __func__, ret);
3969 			goto init_phy_error;
3970 		}
3971 	}
3972 
3973 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3974 
3975 	buf_sz = dma_conf->dma_buf_sz;
3976 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3977 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3978 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3979 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3980 
3981 	stmmac_reset_queues_param(priv);
3982 
3983 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3984 	    priv->plat->serdes_powerup) {
3985 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3986 		if (ret < 0) {
3987 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3988 				   __func__);
3989 			goto init_error;
3990 		}
3991 	}
3992 
3993 	ret = stmmac_hw_setup(dev, true);
3994 	if (ret < 0) {
3995 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3996 		goto init_error;
3997 	}
3998 
3999 	stmmac_init_coalesce(priv);
4000 
4001 	phylink_start(priv->phylink);
4002 	/* We may have called phylink_speed_down before */
4003 	phylink_speed_up(priv->phylink);
4004 
4005 	ret = stmmac_request_irq(dev);
4006 	if (ret)
4007 		goto irq_error;
4008 
4009 	stmmac_enable_all_queues(priv);
4010 	netif_tx_start_all_queues(priv->dev);
4011 	stmmac_enable_all_dma_irq(priv);
4012 
4013 	return 0;
4014 
4015 irq_error:
4016 	phylink_stop(priv->phylink);
4017 
4018 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4019 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4020 
4021 	stmmac_hw_teardown(dev);
4022 init_error:
4023 	phylink_disconnect_phy(priv->phylink);
4024 init_phy_error:
4025 	pm_runtime_put(priv->device);
4026 	return ret;
4027 }
4028 
4029 static int stmmac_open(struct net_device *dev)
4030 {
4031 	struct stmmac_priv *priv = netdev_priv(dev);
4032 	struct stmmac_dma_conf *dma_conf;
4033 	int ret;
4034 
4035 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4036 	if (IS_ERR(dma_conf))
4037 		return PTR_ERR(dma_conf);
4038 
4039 	ret = __stmmac_open(dev, dma_conf);
4040 	if (ret)
4041 		free_dma_desc_resources(priv, dma_conf);
4042 
4043 	kfree(dma_conf);
4044 	return ret;
4045 }
4046 
4047 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4048 {
4049 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4050 
4051 	if (priv->fpe_wq)
4052 		destroy_workqueue(priv->fpe_wq);
4053 
4054 	netdev_info(priv->dev, "FPE workqueue stop");
4055 }
4056 
4057 /**
4058  *  stmmac_release - close entry point of the driver
4059  *  @dev : device pointer.
4060  *  Description:
4061  *  This is the stop entry point of the driver.
4062  */
4063 static int stmmac_release(struct net_device *dev)
4064 {
4065 	struct stmmac_priv *priv = netdev_priv(dev);
4066 	u32 chan;
4067 
4068 	if (device_may_wakeup(priv->device))
4069 		phylink_speed_down(priv->phylink, false);
4070 	/* Stop and disconnect the PHY */
4071 	phylink_stop(priv->phylink);
4072 	phylink_disconnect_phy(priv->phylink);
4073 
4074 	stmmac_disable_all_queues(priv);
4075 
4076 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4077 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4078 
4079 	netif_tx_disable(dev);
4080 
4081 	/* Free the IRQ lines */
4082 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4083 
4084 	if (priv->eee_enabled) {
4085 		priv->tx_path_in_lpi_mode = false;
4086 		del_timer_sync(&priv->eee_ctrl_timer);
4087 	}
4088 
4089 	/* Stop TX/RX DMA and clear the descriptors */
4090 	stmmac_stop_all_dma(priv);
4091 
4092 	/* Release and free the Rx/Tx resources */
4093 	free_dma_desc_resources(priv, &priv->dma_conf);
4094 
4095 	/* Disable the MAC Rx/Tx */
4096 	stmmac_mac_set(priv, priv->ioaddr, false);
4097 
4098 	/* Powerdown Serdes if there is */
4099 	if (priv->plat->serdes_powerdown)
4100 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4101 
4102 	netif_carrier_off(dev);
4103 
4104 	stmmac_release_ptp(priv);
4105 
4106 	pm_runtime_put(priv->device);
4107 
4108 	if (priv->dma_cap.fpesel)
4109 		stmmac_fpe_stop_wq(priv);
4110 
4111 	return 0;
4112 }
4113 
4114 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4115 			       struct stmmac_tx_queue *tx_q)
4116 {
4117 	u16 tag = 0x0, inner_tag = 0x0;
4118 	u32 inner_type = 0x0;
4119 	struct dma_desc *p;
4120 
4121 	if (!priv->dma_cap.vlins)
4122 		return false;
4123 	if (!skb_vlan_tag_present(skb))
4124 		return false;
4125 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4126 		inner_tag = skb_vlan_tag_get(skb);
4127 		inner_type = STMMAC_VLAN_INSERT;
4128 	}
4129 
4130 	tag = skb_vlan_tag_get(skb);
4131 
4132 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4133 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4134 	else
4135 		p = &tx_q->dma_tx[tx_q->cur_tx];
4136 
4137 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4138 		return false;
4139 
4140 	stmmac_set_tx_owner(priv, p);
4141 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4142 	return true;
4143 }
4144 
4145 /**
4146  *  stmmac_tso_allocator - close entry point of the driver
4147  *  @priv: driver private structure
4148  *  @des: buffer start address
4149  *  @total_len: total length to fill in descriptors
4150  *  @last_segment: condition for the last descriptor
4151  *  @queue: TX queue index
4152  *  Description:
4153  *  This function fills descriptor and request new descriptors according to
4154  *  buffer length to fill
4155  */
4156 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4157 				 int total_len, bool last_segment, u32 queue)
4158 {
4159 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4160 	struct dma_desc *desc;
4161 	u32 buff_size;
4162 	int tmp_len;
4163 
4164 	tmp_len = total_len;
4165 
4166 	while (tmp_len > 0) {
4167 		dma_addr_t curr_addr;
4168 
4169 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4170 						priv->dma_conf.dma_tx_size);
4171 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4172 
4173 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4174 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4175 		else
4176 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4177 
4178 		curr_addr = des + (total_len - tmp_len);
4179 		if (priv->dma_cap.addr64 <= 32)
4180 			desc->des0 = cpu_to_le32(curr_addr);
4181 		else
4182 			stmmac_set_desc_addr(priv, desc, curr_addr);
4183 
4184 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4185 			    TSO_MAX_BUFF_SIZE : tmp_len;
4186 
4187 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4188 				0, 1,
4189 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4190 				0, 0);
4191 
4192 		tmp_len -= TSO_MAX_BUFF_SIZE;
4193 	}
4194 }
4195 
4196 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4197 {
4198 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4199 	int desc_size;
4200 
4201 	if (likely(priv->extend_desc))
4202 		desc_size = sizeof(struct dma_extended_desc);
4203 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4204 		desc_size = sizeof(struct dma_edesc);
4205 	else
4206 		desc_size = sizeof(struct dma_desc);
4207 
4208 	/* The own bit must be the latest setting done when prepare the
4209 	 * descriptor and then barrier is needed to make sure that
4210 	 * all is coherent before granting the DMA engine.
4211 	 */
4212 	wmb();
4213 
4214 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4215 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4216 }
4217 
4218 /**
4219  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4220  *  @skb : the socket buffer
4221  *  @dev : device pointer
4222  *  Description: this is the transmit function that is called on TSO frames
4223  *  (support available on GMAC4 and newer chips).
4224  *  Diagram below show the ring programming in case of TSO frames:
4225  *
4226  *  First Descriptor
4227  *   --------
4228  *   | DES0 |---> buffer1 = L2/L3/L4 header
4229  *   | DES1 |---> TCP Payload (can continue on next descr...)
4230  *   | DES2 |---> buffer 1 and 2 len
4231  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4232  *   --------
4233  *	|
4234  *     ...
4235  *	|
4236  *   --------
4237  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4238  *   | DES1 | --|
4239  *   | DES2 | --> buffer 1 and 2 len
4240  *   | DES3 |
4241  *   --------
4242  *
4243  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4244  */
4245 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4246 {
4247 	struct dma_desc *desc, *first, *mss_desc = NULL;
4248 	struct stmmac_priv *priv = netdev_priv(dev);
4249 	int nfrags = skb_shinfo(skb)->nr_frags;
4250 	u32 queue = skb_get_queue_mapping(skb);
4251 	unsigned int first_entry, tx_packets;
4252 	struct stmmac_txq_stats *txq_stats;
4253 	int tmp_pay_len = 0, first_tx;
4254 	struct stmmac_tx_queue *tx_q;
4255 	bool has_vlan, set_ic;
4256 	u8 proto_hdr_len, hdr;
4257 	u32 pay_len, mss;
4258 	dma_addr_t des;
4259 	int i;
4260 
4261 	tx_q = &priv->dma_conf.tx_queue[queue];
4262 	txq_stats = &priv->xstats.txq_stats[queue];
4263 	first_tx = tx_q->cur_tx;
4264 
4265 	/* Compute header lengths */
4266 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4267 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4268 		hdr = sizeof(struct udphdr);
4269 	} else {
4270 		proto_hdr_len = skb_tcp_all_headers(skb);
4271 		hdr = tcp_hdrlen(skb);
4272 	}
4273 
4274 	/* Desc availability based on threshold should be enough safe */
4275 	if (unlikely(stmmac_tx_avail(priv, queue) <
4276 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4277 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4278 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4279 								queue));
4280 			/* This is a hard error, log it. */
4281 			netdev_err(priv->dev,
4282 				   "%s: Tx Ring full when queue awake\n",
4283 				   __func__);
4284 		}
4285 		return NETDEV_TX_BUSY;
4286 	}
4287 
4288 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4289 
4290 	mss = skb_shinfo(skb)->gso_size;
4291 
4292 	/* set new MSS value if needed */
4293 	if (mss != tx_q->mss) {
4294 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4295 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4296 		else
4297 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4298 
4299 		stmmac_set_mss(priv, mss_desc, mss);
4300 		tx_q->mss = mss;
4301 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4302 						priv->dma_conf.dma_tx_size);
4303 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4304 	}
4305 
4306 	if (netif_msg_tx_queued(priv)) {
4307 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4308 			__func__, hdr, proto_hdr_len, pay_len, mss);
4309 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4310 			skb->data_len);
4311 	}
4312 
4313 	/* Check if VLAN can be inserted by HW */
4314 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4315 
4316 	first_entry = tx_q->cur_tx;
4317 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4318 
4319 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4320 		desc = &tx_q->dma_entx[first_entry].basic;
4321 	else
4322 		desc = &tx_q->dma_tx[first_entry];
4323 	first = desc;
4324 
4325 	if (has_vlan)
4326 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4327 
4328 	/* first descriptor: fill Headers on Buf1 */
4329 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4330 			     DMA_TO_DEVICE);
4331 	if (dma_mapping_error(priv->device, des))
4332 		goto dma_map_err;
4333 
4334 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4335 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4336 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4337 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4338 
4339 	if (priv->dma_cap.addr64 <= 32) {
4340 		first->des0 = cpu_to_le32(des);
4341 
4342 		/* Fill start of payload in buff2 of first descriptor */
4343 		if (pay_len)
4344 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4345 
4346 		/* If needed take extra descriptors to fill the remaining payload */
4347 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4348 	} else {
4349 		stmmac_set_desc_addr(priv, first, des);
4350 		tmp_pay_len = pay_len;
4351 		des += proto_hdr_len;
4352 		pay_len = 0;
4353 	}
4354 
4355 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4356 
4357 	/* Prepare fragments */
4358 	for (i = 0; i < nfrags; i++) {
4359 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4360 
4361 		des = skb_frag_dma_map(priv->device, frag, 0,
4362 				       skb_frag_size(frag),
4363 				       DMA_TO_DEVICE);
4364 		if (dma_mapping_error(priv->device, des))
4365 			goto dma_map_err;
4366 
4367 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4368 				     (i == nfrags - 1), queue);
4369 
4370 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4371 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4372 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4373 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4374 	}
4375 
4376 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4377 
4378 	/* Only the last descriptor gets to point to the skb. */
4379 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4380 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4381 
4382 	/* Manage tx mitigation */
4383 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4384 	tx_q->tx_count_frames += tx_packets;
4385 
4386 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4387 		set_ic = true;
4388 	else if (!priv->tx_coal_frames[queue])
4389 		set_ic = false;
4390 	else if (tx_packets > priv->tx_coal_frames[queue])
4391 		set_ic = true;
4392 	else if ((tx_q->tx_count_frames %
4393 		  priv->tx_coal_frames[queue]) < tx_packets)
4394 		set_ic = true;
4395 	else
4396 		set_ic = false;
4397 
4398 	if (set_ic) {
4399 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4400 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4401 		else
4402 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4403 
4404 		tx_q->tx_count_frames = 0;
4405 		stmmac_set_tx_ic(priv, desc);
4406 	}
4407 
4408 	/* We've used all descriptors we need for this skb, however,
4409 	 * advance cur_tx so that it references a fresh descriptor.
4410 	 * ndo_start_xmit will fill this descriptor the next time it's
4411 	 * called and stmmac_tx_clean may clean up to this descriptor.
4412 	 */
4413 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4414 
4415 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4416 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4417 			  __func__);
4418 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4419 	}
4420 
4421 	u64_stats_update_begin(&txq_stats->q_syncp);
4422 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4423 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4424 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4425 	if (set_ic)
4426 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4427 	u64_stats_update_end(&txq_stats->q_syncp);
4428 
4429 	if (priv->sarc_type)
4430 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4431 
4432 	skb_tx_timestamp(skb);
4433 
4434 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4435 		     priv->hwts_tx_en)) {
4436 		/* declare that device is doing timestamping */
4437 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4438 		stmmac_enable_tx_timestamp(priv, first);
4439 	}
4440 
4441 	/* Complete the first descriptor before granting the DMA */
4442 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4443 			proto_hdr_len,
4444 			pay_len,
4445 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4446 			hdr / 4, (skb->len - proto_hdr_len));
4447 
4448 	/* If context desc is used to change MSS */
4449 	if (mss_desc) {
4450 		/* Make sure that first descriptor has been completely
4451 		 * written, including its own bit. This is because MSS is
4452 		 * actually before first descriptor, so we need to make
4453 		 * sure that MSS's own bit is the last thing written.
4454 		 */
4455 		dma_wmb();
4456 		stmmac_set_tx_owner(priv, mss_desc);
4457 	}
4458 
4459 	if (netif_msg_pktdata(priv)) {
4460 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4461 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4462 			tx_q->cur_tx, first, nfrags);
4463 		pr_info(">>> frame to be transmitted: ");
4464 		print_pkt(skb->data, skb_headlen(skb));
4465 	}
4466 
4467 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4468 
4469 	stmmac_flush_tx_descriptors(priv, queue);
4470 	stmmac_tx_timer_arm(priv, queue);
4471 
4472 	return NETDEV_TX_OK;
4473 
4474 dma_map_err:
4475 	dev_err(priv->device, "Tx dma map failed\n");
4476 	dev_kfree_skb(skb);
4477 	priv->xstats.tx_dropped++;
4478 	return NETDEV_TX_OK;
4479 }
4480 
4481 /**
4482  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4483  * @skb: socket buffer to check
4484  *
4485  * Check if a packet has an ethertype that will trigger the IP header checks
4486  * and IP/TCP checksum engine of the stmmac core.
4487  *
4488  * Return: true if the ethertype can trigger the checksum engine, false
4489  * otherwise
4490  */
4491 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4492 {
4493 	int depth = 0;
4494 	__be16 proto;
4495 
4496 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4497 				    &depth);
4498 
4499 	return (depth <= ETH_HLEN) &&
4500 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4501 }
4502 
4503 /**
4504  *  stmmac_xmit - Tx entry point of the driver
4505  *  @skb : the socket buffer
4506  *  @dev : device pointer
4507  *  Description : this is the tx entry point of the driver.
4508  *  It programs the chain or the ring and supports oversized frames
4509  *  and SG feature.
4510  */
4511 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4512 {
4513 	unsigned int first_entry, tx_packets, enh_desc;
4514 	struct stmmac_priv *priv = netdev_priv(dev);
4515 	unsigned int nopaged_len = skb_headlen(skb);
4516 	int i, csum_insertion = 0, is_jumbo = 0;
4517 	u32 queue = skb_get_queue_mapping(skb);
4518 	int nfrags = skb_shinfo(skb)->nr_frags;
4519 	int gso = skb_shinfo(skb)->gso_type;
4520 	struct stmmac_txq_stats *txq_stats;
4521 	struct dma_edesc *tbs_desc = NULL;
4522 	struct dma_desc *desc, *first;
4523 	struct stmmac_tx_queue *tx_q;
4524 	bool has_vlan, set_ic;
4525 	int entry, first_tx;
4526 	dma_addr_t des;
4527 
4528 	tx_q = &priv->dma_conf.tx_queue[queue];
4529 	txq_stats = &priv->xstats.txq_stats[queue];
4530 	first_tx = tx_q->cur_tx;
4531 
4532 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4533 		stmmac_disable_eee_mode(priv);
4534 
4535 	/* Manage oversized TCP frames for GMAC4 device */
4536 	if (skb_is_gso(skb) && priv->tso) {
4537 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4538 			return stmmac_tso_xmit(skb, dev);
4539 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4540 			return stmmac_tso_xmit(skb, dev);
4541 	}
4542 
4543 	if (priv->plat->est && priv->plat->est->enable &&
4544 	    priv->plat->est->max_sdu[queue] &&
4545 	    skb->len > priv->plat->est->max_sdu[queue]){
4546 		priv->xstats.max_sdu_txq_drop[queue]++;
4547 		goto max_sdu_err;
4548 	}
4549 
4550 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4551 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4552 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4553 								queue));
4554 			/* This is a hard error, log it. */
4555 			netdev_err(priv->dev,
4556 				   "%s: Tx Ring full when queue awake\n",
4557 				   __func__);
4558 		}
4559 		return NETDEV_TX_BUSY;
4560 	}
4561 
4562 	/* Check if VLAN can be inserted by HW */
4563 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4564 
4565 	entry = tx_q->cur_tx;
4566 	first_entry = entry;
4567 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4568 
4569 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4570 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4571 	 * queues. In that case, checksum offloading for those queues that don't
4572 	 * support tx coe needs to fallback to software checksum calculation.
4573 	 *
4574 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4575 	 * also have to be checksummed in software.
4576 	 */
4577 	if (csum_insertion &&
4578 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4579 	     !stmmac_has_ip_ethertype(skb))) {
4580 		if (unlikely(skb_checksum_help(skb)))
4581 			goto dma_map_err;
4582 		csum_insertion = !csum_insertion;
4583 	}
4584 
4585 	if (likely(priv->extend_desc))
4586 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4587 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4588 		desc = &tx_q->dma_entx[entry].basic;
4589 	else
4590 		desc = tx_q->dma_tx + entry;
4591 
4592 	first = desc;
4593 
4594 	if (has_vlan)
4595 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4596 
4597 	enh_desc = priv->plat->enh_desc;
4598 	/* To program the descriptors according to the size of the frame */
4599 	if (enh_desc)
4600 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4601 
4602 	if (unlikely(is_jumbo)) {
4603 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4604 		if (unlikely(entry < 0) && (entry != -EINVAL))
4605 			goto dma_map_err;
4606 	}
4607 
4608 	for (i = 0; i < nfrags; i++) {
4609 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4610 		int len = skb_frag_size(frag);
4611 		bool last_segment = (i == (nfrags - 1));
4612 
4613 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4614 		WARN_ON(tx_q->tx_skbuff[entry]);
4615 
4616 		if (likely(priv->extend_desc))
4617 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4618 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4619 			desc = &tx_q->dma_entx[entry].basic;
4620 		else
4621 			desc = tx_q->dma_tx + entry;
4622 
4623 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4624 				       DMA_TO_DEVICE);
4625 		if (dma_mapping_error(priv->device, des))
4626 			goto dma_map_err; /* should reuse desc w/o issues */
4627 
4628 		tx_q->tx_skbuff_dma[entry].buf = des;
4629 
4630 		stmmac_set_desc_addr(priv, desc, des);
4631 
4632 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4633 		tx_q->tx_skbuff_dma[entry].len = len;
4634 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4635 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4636 
4637 		/* Prepare the descriptor and set the own bit too */
4638 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4639 				priv->mode, 1, last_segment, skb->len);
4640 	}
4641 
4642 	/* Only the last descriptor gets to point to the skb. */
4643 	tx_q->tx_skbuff[entry] = skb;
4644 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4645 
4646 	/* According to the coalesce parameter the IC bit for the latest
4647 	 * segment is reset and the timer re-started to clean the tx status.
4648 	 * This approach takes care about the fragments: desc is the first
4649 	 * element in case of no SG.
4650 	 */
4651 	tx_packets = (entry + 1) - first_tx;
4652 	tx_q->tx_count_frames += tx_packets;
4653 
4654 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4655 		set_ic = true;
4656 	else if (!priv->tx_coal_frames[queue])
4657 		set_ic = false;
4658 	else if (tx_packets > priv->tx_coal_frames[queue])
4659 		set_ic = true;
4660 	else if ((tx_q->tx_count_frames %
4661 		  priv->tx_coal_frames[queue]) < tx_packets)
4662 		set_ic = true;
4663 	else
4664 		set_ic = false;
4665 
4666 	if (set_ic) {
4667 		if (likely(priv->extend_desc))
4668 			desc = &tx_q->dma_etx[entry].basic;
4669 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4670 			desc = &tx_q->dma_entx[entry].basic;
4671 		else
4672 			desc = &tx_q->dma_tx[entry];
4673 
4674 		tx_q->tx_count_frames = 0;
4675 		stmmac_set_tx_ic(priv, desc);
4676 	}
4677 
4678 	/* We've used all descriptors we need for this skb, however,
4679 	 * advance cur_tx so that it references a fresh descriptor.
4680 	 * ndo_start_xmit will fill this descriptor the next time it's
4681 	 * called and stmmac_tx_clean may clean up to this descriptor.
4682 	 */
4683 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4684 	tx_q->cur_tx = entry;
4685 
4686 	if (netif_msg_pktdata(priv)) {
4687 		netdev_dbg(priv->dev,
4688 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4689 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4690 			   entry, first, nfrags);
4691 
4692 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4693 		print_pkt(skb->data, skb->len);
4694 	}
4695 
4696 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4697 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4698 			  __func__);
4699 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4700 	}
4701 
4702 	u64_stats_update_begin(&txq_stats->q_syncp);
4703 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4704 	if (set_ic)
4705 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4706 	u64_stats_update_end(&txq_stats->q_syncp);
4707 
4708 	if (priv->sarc_type)
4709 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4710 
4711 	skb_tx_timestamp(skb);
4712 
4713 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4714 	 * problems because all the descriptors are actually ready to be
4715 	 * passed to the DMA engine.
4716 	 */
4717 	if (likely(!is_jumbo)) {
4718 		bool last_segment = (nfrags == 0);
4719 
4720 		des = dma_map_single(priv->device, skb->data,
4721 				     nopaged_len, DMA_TO_DEVICE);
4722 		if (dma_mapping_error(priv->device, des))
4723 			goto dma_map_err;
4724 
4725 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4726 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4727 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4728 
4729 		stmmac_set_desc_addr(priv, first, des);
4730 
4731 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4732 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4733 
4734 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4735 			     priv->hwts_tx_en)) {
4736 			/* declare that device is doing timestamping */
4737 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4738 			stmmac_enable_tx_timestamp(priv, first);
4739 		}
4740 
4741 		/* Prepare the first descriptor setting the OWN bit too */
4742 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4743 				csum_insertion, priv->mode, 0, last_segment,
4744 				skb->len);
4745 	}
4746 
4747 	if (tx_q->tbs & STMMAC_TBS_EN) {
4748 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4749 
4750 		tbs_desc = &tx_q->dma_entx[first_entry];
4751 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4752 	}
4753 
4754 	stmmac_set_tx_owner(priv, first);
4755 
4756 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4757 
4758 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4759 
4760 	stmmac_flush_tx_descriptors(priv, queue);
4761 	stmmac_tx_timer_arm(priv, queue);
4762 
4763 	return NETDEV_TX_OK;
4764 
4765 dma_map_err:
4766 	netdev_err(priv->dev, "Tx DMA map failed\n");
4767 max_sdu_err:
4768 	dev_kfree_skb(skb);
4769 	priv->xstats.tx_dropped++;
4770 	return NETDEV_TX_OK;
4771 }
4772 
4773 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4774 {
4775 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4776 	__be16 vlan_proto = veth->h_vlan_proto;
4777 	u16 vlanid;
4778 
4779 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4780 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4781 	    (vlan_proto == htons(ETH_P_8021AD) &&
4782 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4783 		/* pop the vlan tag */
4784 		vlanid = ntohs(veth->h_vlan_TCI);
4785 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4786 		skb_pull(skb, VLAN_HLEN);
4787 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4788 	}
4789 }
4790 
4791 /**
4792  * stmmac_rx_refill - refill used skb preallocated buffers
4793  * @priv: driver private structure
4794  * @queue: RX queue index
4795  * Description : this is to reallocate the skb for the reception process
4796  * that is based on zero-copy.
4797  */
4798 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4799 {
4800 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4801 	int dirty = stmmac_rx_dirty(priv, queue);
4802 	unsigned int entry = rx_q->dirty_rx;
4803 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4804 
4805 	if (priv->dma_cap.host_dma_width <= 32)
4806 		gfp |= GFP_DMA32;
4807 
4808 	while (dirty-- > 0) {
4809 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4810 		struct dma_desc *p;
4811 		bool use_rx_wd;
4812 
4813 		if (priv->extend_desc)
4814 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4815 		else
4816 			p = rx_q->dma_rx + entry;
4817 
4818 		if (!buf->page) {
4819 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4820 			if (!buf->page)
4821 				break;
4822 		}
4823 
4824 		if (priv->sph && !buf->sec_page) {
4825 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4826 			if (!buf->sec_page)
4827 				break;
4828 
4829 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4830 		}
4831 
4832 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4833 
4834 		stmmac_set_desc_addr(priv, p, buf->addr);
4835 		if (priv->sph)
4836 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4837 		else
4838 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4839 		stmmac_refill_desc3(priv, rx_q, p);
4840 
4841 		rx_q->rx_count_frames++;
4842 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4843 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4844 			rx_q->rx_count_frames = 0;
4845 
4846 		use_rx_wd = !priv->rx_coal_frames[queue];
4847 		use_rx_wd |= rx_q->rx_count_frames > 0;
4848 		if (!priv->use_riwt)
4849 			use_rx_wd = false;
4850 
4851 		dma_wmb();
4852 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4853 
4854 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4855 	}
4856 	rx_q->dirty_rx = entry;
4857 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4858 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4859 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4860 }
4861 
4862 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4863 				       struct dma_desc *p,
4864 				       int status, unsigned int len)
4865 {
4866 	unsigned int plen = 0, hlen = 0;
4867 	int coe = priv->hw->rx_csum;
4868 
4869 	/* Not first descriptor, buffer is always zero */
4870 	if (priv->sph && len)
4871 		return 0;
4872 
4873 	/* First descriptor, get split header length */
4874 	stmmac_get_rx_header_len(priv, p, &hlen);
4875 	if (priv->sph && hlen) {
4876 		priv->xstats.rx_split_hdr_pkt_n++;
4877 		return hlen;
4878 	}
4879 
4880 	/* First descriptor, not last descriptor and not split header */
4881 	if (status & rx_not_ls)
4882 		return priv->dma_conf.dma_buf_sz;
4883 
4884 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4885 
4886 	/* First descriptor and last descriptor and not split header */
4887 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4888 }
4889 
4890 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4891 				       struct dma_desc *p,
4892 				       int status, unsigned int len)
4893 {
4894 	int coe = priv->hw->rx_csum;
4895 	unsigned int plen = 0;
4896 
4897 	/* Not split header, buffer is not available */
4898 	if (!priv->sph)
4899 		return 0;
4900 
4901 	/* Not last descriptor */
4902 	if (status & rx_not_ls)
4903 		return priv->dma_conf.dma_buf_sz;
4904 
4905 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4906 
4907 	/* Last descriptor */
4908 	return plen - len;
4909 }
4910 
4911 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4912 				struct xdp_frame *xdpf, bool dma_map)
4913 {
4914 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4915 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4916 	unsigned int entry = tx_q->cur_tx;
4917 	struct dma_desc *tx_desc;
4918 	dma_addr_t dma_addr;
4919 	bool set_ic;
4920 
4921 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4922 		return STMMAC_XDP_CONSUMED;
4923 
4924 	if (priv->plat->est && priv->plat->est->enable &&
4925 	    priv->plat->est->max_sdu[queue] &&
4926 	    xdpf->len > priv->plat->est->max_sdu[queue]) {
4927 		priv->xstats.max_sdu_txq_drop[queue]++;
4928 		return STMMAC_XDP_CONSUMED;
4929 	}
4930 
4931 	if (likely(priv->extend_desc))
4932 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4933 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4934 		tx_desc = &tx_q->dma_entx[entry].basic;
4935 	else
4936 		tx_desc = tx_q->dma_tx + entry;
4937 
4938 	if (dma_map) {
4939 		dma_addr = dma_map_single(priv->device, xdpf->data,
4940 					  xdpf->len, DMA_TO_DEVICE);
4941 		if (dma_mapping_error(priv->device, dma_addr))
4942 			return STMMAC_XDP_CONSUMED;
4943 
4944 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4945 	} else {
4946 		struct page *page = virt_to_page(xdpf->data);
4947 
4948 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4949 			   xdpf->headroom;
4950 		dma_sync_single_for_device(priv->device, dma_addr,
4951 					   xdpf->len, DMA_BIDIRECTIONAL);
4952 
4953 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4954 	}
4955 
4956 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4957 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4958 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4959 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4960 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4961 
4962 	tx_q->xdpf[entry] = xdpf;
4963 
4964 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4965 
4966 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4967 			       true, priv->mode, true, true,
4968 			       xdpf->len);
4969 
4970 	tx_q->tx_count_frames++;
4971 
4972 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4973 		set_ic = true;
4974 	else
4975 		set_ic = false;
4976 
4977 	if (set_ic) {
4978 		tx_q->tx_count_frames = 0;
4979 		stmmac_set_tx_ic(priv, tx_desc);
4980 		u64_stats_update_begin(&txq_stats->q_syncp);
4981 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4982 		u64_stats_update_end(&txq_stats->q_syncp);
4983 	}
4984 
4985 	stmmac_enable_dma_transmission(priv, priv->ioaddr);
4986 
4987 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4988 	tx_q->cur_tx = entry;
4989 
4990 	return STMMAC_XDP_TX;
4991 }
4992 
4993 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4994 				   int cpu)
4995 {
4996 	int index = cpu;
4997 
4998 	if (unlikely(index < 0))
4999 		index = 0;
5000 
5001 	while (index >= priv->plat->tx_queues_to_use)
5002 		index -= priv->plat->tx_queues_to_use;
5003 
5004 	return index;
5005 }
5006 
5007 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5008 				struct xdp_buff *xdp)
5009 {
5010 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5011 	int cpu = smp_processor_id();
5012 	struct netdev_queue *nq;
5013 	int queue;
5014 	int res;
5015 
5016 	if (unlikely(!xdpf))
5017 		return STMMAC_XDP_CONSUMED;
5018 
5019 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5020 	nq = netdev_get_tx_queue(priv->dev, queue);
5021 
5022 	__netif_tx_lock(nq, cpu);
5023 	/* Avoids TX time-out as we are sharing with slow path */
5024 	txq_trans_cond_update(nq);
5025 
5026 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5027 	if (res == STMMAC_XDP_TX)
5028 		stmmac_flush_tx_descriptors(priv, queue);
5029 
5030 	__netif_tx_unlock(nq);
5031 
5032 	return res;
5033 }
5034 
5035 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5036 				 struct bpf_prog *prog,
5037 				 struct xdp_buff *xdp)
5038 {
5039 	u32 act;
5040 	int res;
5041 
5042 	act = bpf_prog_run_xdp(prog, xdp);
5043 	switch (act) {
5044 	case XDP_PASS:
5045 		res = STMMAC_XDP_PASS;
5046 		break;
5047 	case XDP_TX:
5048 		res = stmmac_xdp_xmit_back(priv, xdp);
5049 		break;
5050 	case XDP_REDIRECT:
5051 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5052 			res = STMMAC_XDP_CONSUMED;
5053 		else
5054 			res = STMMAC_XDP_REDIRECT;
5055 		break;
5056 	default:
5057 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5058 		fallthrough;
5059 	case XDP_ABORTED:
5060 		trace_xdp_exception(priv->dev, prog, act);
5061 		fallthrough;
5062 	case XDP_DROP:
5063 		res = STMMAC_XDP_CONSUMED;
5064 		break;
5065 	}
5066 
5067 	return res;
5068 }
5069 
5070 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5071 					   struct xdp_buff *xdp)
5072 {
5073 	struct bpf_prog *prog;
5074 	int res;
5075 
5076 	prog = READ_ONCE(priv->xdp_prog);
5077 	if (!prog) {
5078 		res = STMMAC_XDP_PASS;
5079 		goto out;
5080 	}
5081 
5082 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5083 out:
5084 	return ERR_PTR(-res);
5085 }
5086 
5087 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5088 				   int xdp_status)
5089 {
5090 	int cpu = smp_processor_id();
5091 	int queue;
5092 
5093 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5094 
5095 	if (xdp_status & STMMAC_XDP_TX)
5096 		stmmac_tx_timer_arm(priv, queue);
5097 
5098 	if (xdp_status & STMMAC_XDP_REDIRECT)
5099 		xdp_do_flush();
5100 }
5101 
5102 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5103 					       struct xdp_buff *xdp)
5104 {
5105 	unsigned int metasize = xdp->data - xdp->data_meta;
5106 	unsigned int datasize = xdp->data_end - xdp->data;
5107 	struct sk_buff *skb;
5108 
5109 	skb = __napi_alloc_skb(&ch->rxtx_napi,
5110 			       xdp->data_end - xdp->data_hard_start,
5111 			       GFP_ATOMIC | __GFP_NOWARN);
5112 	if (unlikely(!skb))
5113 		return NULL;
5114 
5115 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5116 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5117 	if (metasize)
5118 		skb_metadata_set(skb, metasize);
5119 
5120 	return skb;
5121 }
5122 
5123 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5124 				   struct dma_desc *p, struct dma_desc *np,
5125 				   struct xdp_buff *xdp)
5126 {
5127 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5128 	struct stmmac_channel *ch = &priv->channel[queue];
5129 	unsigned int len = xdp->data_end - xdp->data;
5130 	enum pkt_hash_types hash_type;
5131 	int coe = priv->hw->rx_csum;
5132 	struct sk_buff *skb;
5133 	u32 hash;
5134 
5135 	skb = stmmac_construct_skb_zc(ch, xdp);
5136 	if (!skb) {
5137 		priv->xstats.rx_dropped++;
5138 		return;
5139 	}
5140 
5141 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5142 	if (priv->hw->hw_vlan_en)
5143 		/* MAC level stripping. */
5144 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5145 	else
5146 		/* Driver level stripping. */
5147 		stmmac_rx_vlan(priv->dev, skb);
5148 	skb->protocol = eth_type_trans(skb, priv->dev);
5149 
5150 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5151 		skb_checksum_none_assert(skb);
5152 	else
5153 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5154 
5155 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5156 		skb_set_hash(skb, hash, hash_type);
5157 
5158 	skb_record_rx_queue(skb, queue);
5159 	napi_gro_receive(&ch->rxtx_napi, skb);
5160 
5161 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5162 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5163 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5164 	u64_stats_update_end(&rxq_stats->napi_syncp);
5165 }
5166 
5167 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5168 {
5169 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5170 	unsigned int entry = rx_q->dirty_rx;
5171 	struct dma_desc *rx_desc = NULL;
5172 	bool ret = true;
5173 
5174 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5175 
5176 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5177 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5178 		dma_addr_t dma_addr;
5179 		bool use_rx_wd;
5180 
5181 		if (!buf->xdp) {
5182 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5183 			if (!buf->xdp) {
5184 				ret = false;
5185 				break;
5186 			}
5187 		}
5188 
5189 		if (priv->extend_desc)
5190 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5191 		else
5192 			rx_desc = rx_q->dma_rx + entry;
5193 
5194 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5195 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5196 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5197 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5198 
5199 		rx_q->rx_count_frames++;
5200 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5201 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5202 			rx_q->rx_count_frames = 0;
5203 
5204 		use_rx_wd = !priv->rx_coal_frames[queue];
5205 		use_rx_wd |= rx_q->rx_count_frames > 0;
5206 		if (!priv->use_riwt)
5207 			use_rx_wd = false;
5208 
5209 		dma_wmb();
5210 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5211 
5212 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5213 	}
5214 
5215 	if (rx_desc) {
5216 		rx_q->dirty_rx = entry;
5217 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5218 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5219 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5220 	}
5221 
5222 	return ret;
5223 }
5224 
5225 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5226 {
5227 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5228 	 * to represent incoming packet, whereas cb field in the same structure
5229 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5230 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5231 	 */
5232 	return (struct stmmac_xdp_buff *)xdp;
5233 }
5234 
5235 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5236 {
5237 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5238 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5239 	unsigned int count = 0, error = 0, len = 0;
5240 	int dirty = stmmac_rx_dirty(priv, queue);
5241 	unsigned int next_entry = rx_q->cur_rx;
5242 	u32 rx_errors = 0, rx_dropped = 0;
5243 	unsigned int desc_size;
5244 	struct bpf_prog *prog;
5245 	bool failure = false;
5246 	int xdp_status = 0;
5247 	int status = 0;
5248 
5249 	if (netif_msg_rx_status(priv)) {
5250 		void *rx_head;
5251 
5252 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5253 		if (priv->extend_desc) {
5254 			rx_head = (void *)rx_q->dma_erx;
5255 			desc_size = sizeof(struct dma_extended_desc);
5256 		} else {
5257 			rx_head = (void *)rx_q->dma_rx;
5258 			desc_size = sizeof(struct dma_desc);
5259 		}
5260 
5261 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5262 				    rx_q->dma_rx_phy, desc_size);
5263 	}
5264 	while (count < limit) {
5265 		struct stmmac_rx_buffer *buf;
5266 		struct stmmac_xdp_buff *ctx;
5267 		unsigned int buf1_len = 0;
5268 		struct dma_desc *np, *p;
5269 		int entry;
5270 		int res;
5271 
5272 		if (!count && rx_q->state_saved) {
5273 			error = rx_q->state.error;
5274 			len = rx_q->state.len;
5275 		} else {
5276 			rx_q->state_saved = false;
5277 			error = 0;
5278 			len = 0;
5279 		}
5280 
5281 		if (count >= limit)
5282 			break;
5283 
5284 read_again:
5285 		buf1_len = 0;
5286 		entry = next_entry;
5287 		buf = &rx_q->buf_pool[entry];
5288 
5289 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5290 			failure = failure ||
5291 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5292 			dirty = 0;
5293 		}
5294 
5295 		if (priv->extend_desc)
5296 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5297 		else
5298 			p = rx_q->dma_rx + entry;
5299 
5300 		/* read the status of the incoming frame */
5301 		status = stmmac_rx_status(priv, &priv->xstats, p);
5302 		/* check if managed by the DMA otherwise go ahead */
5303 		if (unlikely(status & dma_own))
5304 			break;
5305 
5306 		/* Prefetch the next RX descriptor */
5307 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5308 						priv->dma_conf.dma_rx_size);
5309 		next_entry = rx_q->cur_rx;
5310 
5311 		if (priv->extend_desc)
5312 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5313 		else
5314 			np = rx_q->dma_rx + next_entry;
5315 
5316 		prefetch(np);
5317 
5318 		/* Ensure a valid XSK buffer before proceed */
5319 		if (!buf->xdp)
5320 			break;
5321 
5322 		if (priv->extend_desc)
5323 			stmmac_rx_extended_status(priv, &priv->xstats,
5324 						  rx_q->dma_erx + entry);
5325 		if (unlikely(status == discard_frame)) {
5326 			xsk_buff_free(buf->xdp);
5327 			buf->xdp = NULL;
5328 			dirty++;
5329 			error = 1;
5330 			if (!priv->hwts_rx_en)
5331 				rx_errors++;
5332 		}
5333 
5334 		if (unlikely(error && (status & rx_not_ls)))
5335 			goto read_again;
5336 		if (unlikely(error)) {
5337 			count++;
5338 			continue;
5339 		}
5340 
5341 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5342 		if (likely(status & rx_not_ls)) {
5343 			xsk_buff_free(buf->xdp);
5344 			buf->xdp = NULL;
5345 			dirty++;
5346 			count++;
5347 			goto read_again;
5348 		}
5349 
5350 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5351 		ctx->priv = priv;
5352 		ctx->desc = p;
5353 		ctx->ndesc = np;
5354 
5355 		/* XDP ZC Frame only support primary buffers for now */
5356 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5357 		len += buf1_len;
5358 
5359 		/* ACS is disabled; strip manually. */
5360 		if (likely(!(status & rx_not_ls))) {
5361 			buf1_len -= ETH_FCS_LEN;
5362 			len -= ETH_FCS_LEN;
5363 		}
5364 
5365 		/* RX buffer is good and fit into a XSK pool buffer */
5366 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5367 		xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5368 
5369 		prog = READ_ONCE(priv->xdp_prog);
5370 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5371 
5372 		switch (res) {
5373 		case STMMAC_XDP_PASS:
5374 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5375 			xsk_buff_free(buf->xdp);
5376 			break;
5377 		case STMMAC_XDP_CONSUMED:
5378 			xsk_buff_free(buf->xdp);
5379 			rx_dropped++;
5380 			break;
5381 		case STMMAC_XDP_TX:
5382 		case STMMAC_XDP_REDIRECT:
5383 			xdp_status |= res;
5384 			break;
5385 		}
5386 
5387 		buf->xdp = NULL;
5388 		dirty++;
5389 		count++;
5390 	}
5391 
5392 	if (status & rx_not_ls) {
5393 		rx_q->state_saved = true;
5394 		rx_q->state.error = error;
5395 		rx_q->state.len = len;
5396 	}
5397 
5398 	stmmac_finalize_xdp_rx(priv, xdp_status);
5399 
5400 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5401 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5402 	u64_stats_update_end(&rxq_stats->napi_syncp);
5403 
5404 	priv->xstats.rx_dropped += rx_dropped;
5405 	priv->xstats.rx_errors += rx_errors;
5406 
5407 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5408 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5409 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5410 		else
5411 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5412 
5413 		return (int)count;
5414 	}
5415 
5416 	return failure ? limit : (int)count;
5417 }
5418 
5419 /**
5420  * stmmac_rx - manage the receive process
5421  * @priv: driver private structure
5422  * @limit: napi bugget
5423  * @queue: RX queue index.
5424  * Description :  this the function called by the napi poll method.
5425  * It gets all the frames inside the ring.
5426  */
5427 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5428 {
5429 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5430 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5431 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5432 	struct stmmac_channel *ch = &priv->channel[queue];
5433 	unsigned int count = 0, error = 0, len = 0;
5434 	int status = 0, coe = priv->hw->rx_csum;
5435 	unsigned int next_entry = rx_q->cur_rx;
5436 	enum dma_data_direction dma_dir;
5437 	unsigned int desc_size;
5438 	struct sk_buff *skb = NULL;
5439 	struct stmmac_xdp_buff ctx;
5440 	int xdp_status = 0;
5441 	int buf_sz;
5442 
5443 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5444 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5445 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5446 
5447 	if (netif_msg_rx_status(priv)) {
5448 		void *rx_head;
5449 
5450 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5451 		if (priv->extend_desc) {
5452 			rx_head = (void *)rx_q->dma_erx;
5453 			desc_size = sizeof(struct dma_extended_desc);
5454 		} else {
5455 			rx_head = (void *)rx_q->dma_rx;
5456 			desc_size = sizeof(struct dma_desc);
5457 		}
5458 
5459 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5460 				    rx_q->dma_rx_phy, desc_size);
5461 	}
5462 	while (count < limit) {
5463 		unsigned int buf1_len = 0, buf2_len = 0;
5464 		enum pkt_hash_types hash_type;
5465 		struct stmmac_rx_buffer *buf;
5466 		struct dma_desc *np, *p;
5467 		int entry;
5468 		u32 hash;
5469 
5470 		if (!count && rx_q->state_saved) {
5471 			skb = rx_q->state.skb;
5472 			error = rx_q->state.error;
5473 			len = rx_q->state.len;
5474 		} else {
5475 			rx_q->state_saved = false;
5476 			skb = NULL;
5477 			error = 0;
5478 			len = 0;
5479 		}
5480 
5481 read_again:
5482 		if (count >= limit)
5483 			break;
5484 
5485 		buf1_len = 0;
5486 		buf2_len = 0;
5487 		entry = next_entry;
5488 		buf = &rx_q->buf_pool[entry];
5489 
5490 		if (priv->extend_desc)
5491 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5492 		else
5493 			p = rx_q->dma_rx + entry;
5494 
5495 		/* read the status of the incoming frame */
5496 		status = stmmac_rx_status(priv, &priv->xstats, p);
5497 		/* check if managed by the DMA otherwise go ahead */
5498 		if (unlikely(status & dma_own))
5499 			break;
5500 
5501 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5502 						priv->dma_conf.dma_rx_size);
5503 		next_entry = rx_q->cur_rx;
5504 
5505 		if (priv->extend_desc)
5506 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5507 		else
5508 			np = rx_q->dma_rx + next_entry;
5509 
5510 		prefetch(np);
5511 
5512 		if (priv->extend_desc)
5513 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5514 		if (unlikely(status == discard_frame)) {
5515 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5516 			buf->page = NULL;
5517 			error = 1;
5518 			if (!priv->hwts_rx_en)
5519 				rx_errors++;
5520 		}
5521 
5522 		if (unlikely(error && (status & rx_not_ls)))
5523 			goto read_again;
5524 		if (unlikely(error)) {
5525 			dev_kfree_skb(skb);
5526 			skb = NULL;
5527 			count++;
5528 			continue;
5529 		}
5530 
5531 		/* Buffer is good. Go on. */
5532 
5533 		prefetch(page_address(buf->page) + buf->page_offset);
5534 		if (buf->sec_page)
5535 			prefetch(page_address(buf->sec_page));
5536 
5537 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5538 		len += buf1_len;
5539 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5540 		len += buf2_len;
5541 
5542 		/* ACS is disabled; strip manually. */
5543 		if (likely(!(status & rx_not_ls))) {
5544 			if (buf2_len) {
5545 				buf2_len -= ETH_FCS_LEN;
5546 				len -= ETH_FCS_LEN;
5547 			} else if (buf1_len) {
5548 				buf1_len -= ETH_FCS_LEN;
5549 				len -= ETH_FCS_LEN;
5550 			}
5551 		}
5552 
5553 		if (!skb) {
5554 			unsigned int pre_len, sync_len;
5555 
5556 			dma_sync_single_for_cpu(priv->device, buf->addr,
5557 						buf1_len, dma_dir);
5558 
5559 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5560 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5561 					 buf->page_offset, buf1_len, true);
5562 
5563 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5564 				  buf->page_offset;
5565 
5566 			ctx.priv = priv;
5567 			ctx.desc = p;
5568 			ctx.ndesc = np;
5569 
5570 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5571 			/* Due xdp_adjust_tail: DMA sync for_device
5572 			 * cover max len CPU touch
5573 			 */
5574 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5575 				   buf->page_offset;
5576 			sync_len = max(sync_len, pre_len);
5577 
5578 			/* For Not XDP_PASS verdict */
5579 			if (IS_ERR(skb)) {
5580 				unsigned int xdp_res = -PTR_ERR(skb);
5581 
5582 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5583 					page_pool_put_page(rx_q->page_pool,
5584 							   virt_to_head_page(ctx.xdp.data),
5585 							   sync_len, true);
5586 					buf->page = NULL;
5587 					rx_dropped++;
5588 
5589 					/* Clear skb as it was set as
5590 					 * status by XDP program.
5591 					 */
5592 					skb = NULL;
5593 
5594 					if (unlikely((status & rx_not_ls)))
5595 						goto read_again;
5596 
5597 					count++;
5598 					continue;
5599 				} else if (xdp_res & (STMMAC_XDP_TX |
5600 						      STMMAC_XDP_REDIRECT)) {
5601 					xdp_status |= xdp_res;
5602 					buf->page = NULL;
5603 					skb = NULL;
5604 					count++;
5605 					continue;
5606 				}
5607 			}
5608 		}
5609 
5610 		if (!skb) {
5611 			/* XDP program may expand or reduce tail */
5612 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5613 
5614 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5615 			if (!skb) {
5616 				rx_dropped++;
5617 				count++;
5618 				goto drain_data;
5619 			}
5620 
5621 			/* XDP program may adjust header */
5622 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5623 			skb_put(skb, buf1_len);
5624 
5625 			/* Data payload copied into SKB, page ready for recycle */
5626 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5627 			buf->page = NULL;
5628 		} else if (buf1_len) {
5629 			dma_sync_single_for_cpu(priv->device, buf->addr,
5630 						buf1_len, dma_dir);
5631 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5632 					buf->page, buf->page_offset, buf1_len,
5633 					priv->dma_conf.dma_buf_sz);
5634 
5635 			/* Data payload appended into SKB */
5636 			skb_mark_for_recycle(skb);
5637 			buf->page = NULL;
5638 		}
5639 
5640 		if (buf2_len) {
5641 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5642 						buf2_len, dma_dir);
5643 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5644 					buf->sec_page, 0, buf2_len,
5645 					priv->dma_conf.dma_buf_sz);
5646 
5647 			/* Data payload appended into SKB */
5648 			skb_mark_for_recycle(skb);
5649 			buf->sec_page = NULL;
5650 		}
5651 
5652 drain_data:
5653 		if (likely(status & rx_not_ls))
5654 			goto read_again;
5655 		if (!skb)
5656 			continue;
5657 
5658 		/* Got entire packet into SKB. Finish it. */
5659 
5660 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5661 
5662 		if (priv->hw->hw_vlan_en)
5663 			/* MAC level stripping. */
5664 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5665 		else
5666 			/* Driver level stripping. */
5667 			stmmac_rx_vlan(priv->dev, skb);
5668 
5669 		skb->protocol = eth_type_trans(skb, priv->dev);
5670 
5671 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5672 			skb_checksum_none_assert(skb);
5673 		else
5674 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5675 
5676 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5677 			skb_set_hash(skb, hash, hash_type);
5678 
5679 		skb_record_rx_queue(skb, queue);
5680 		napi_gro_receive(&ch->rx_napi, skb);
5681 		skb = NULL;
5682 
5683 		rx_packets++;
5684 		rx_bytes += len;
5685 		count++;
5686 	}
5687 
5688 	if (status & rx_not_ls || skb) {
5689 		rx_q->state_saved = true;
5690 		rx_q->state.skb = skb;
5691 		rx_q->state.error = error;
5692 		rx_q->state.len = len;
5693 	}
5694 
5695 	stmmac_finalize_xdp_rx(priv, xdp_status);
5696 
5697 	stmmac_rx_refill(priv, queue);
5698 
5699 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5700 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5701 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5702 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5703 	u64_stats_update_end(&rxq_stats->napi_syncp);
5704 
5705 	priv->xstats.rx_dropped += rx_dropped;
5706 	priv->xstats.rx_errors += rx_errors;
5707 
5708 	return count;
5709 }
5710 
5711 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5712 {
5713 	struct stmmac_channel *ch =
5714 		container_of(napi, struct stmmac_channel, rx_napi);
5715 	struct stmmac_priv *priv = ch->priv_data;
5716 	struct stmmac_rxq_stats *rxq_stats;
5717 	u32 chan = ch->index;
5718 	int work_done;
5719 
5720 	rxq_stats = &priv->xstats.rxq_stats[chan];
5721 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5722 	u64_stats_inc(&rxq_stats->napi.poll);
5723 	u64_stats_update_end(&rxq_stats->napi_syncp);
5724 
5725 	work_done = stmmac_rx(priv, budget, chan);
5726 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5727 		unsigned long flags;
5728 
5729 		spin_lock_irqsave(&ch->lock, flags);
5730 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5731 		spin_unlock_irqrestore(&ch->lock, flags);
5732 	}
5733 
5734 	return work_done;
5735 }
5736 
5737 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5738 {
5739 	struct stmmac_channel *ch =
5740 		container_of(napi, struct stmmac_channel, tx_napi);
5741 	struct stmmac_priv *priv = ch->priv_data;
5742 	struct stmmac_txq_stats *txq_stats;
5743 	bool pending_packets = false;
5744 	u32 chan = ch->index;
5745 	int work_done;
5746 
5747 	txq_stats = &priv->xstats.txq_stats[chan];
5748 	u64_stats_update_begin(&txq_stats->napi_syncp);
5749 	u64_stats_inc(&txq_stats->napi.poll);
5750 	u64_stats_update_end(&txq_stats->napi_syncp);
5751 
5752 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5753 	work_done = min(work_done, budget);
5754 
5755 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5756 		unsigned long flags;
5757 
5758 		spin_lock_irqsave(&ch->lock, flags);
5759 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5760 		spin_unlock_irqrestore(&ch->lock, flags);
5761 	}
5762 
5763 	/* TX still have packet to handle, check if we need to arm tx timer */
5764 	if (pending_packets)
5765 		stmmac_tx_timer_arm(priv, chan);
5766 
5767 	return work_done;
5768 }
5769 
5770 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5771 {
5772 	struct stmmac_channel *ch =
5773 		container_of(napi, struct stmmac_channel, rxtx_napi);
5774 	struct stmmac_priv *priv = ch->priv_data;
5775 	bool tx_pending_packets = false;
5776 	int rx_done, tx_done, rxtx_done;
5777 	struct stmmac_rxq_stats *rxq_stats;
5778 	struct stmmac_txq_stats *txq_stats;
5779 	u32 chan = ch->index;
5780 
5781 	rxq_stats = &priv->xstats.rxq_stats[chan];
5782 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5783 	u64_stats_inc(&rxq_stats->napi.poll);
5784 	u64_stats_update_end(&rxq_stats->napi_syncp);
5785 
5786 	txq_stats = &priv->xstats.txq_stats[chan];
5787 	u64_stats_update_begin(&txq_stats->napi_syncp);
5788 	u64_stats_inc(&txq_stats->napi.poll);
5789 	u64_stats_update_end(&txq_stats->napi_syncp);
5790 
5791 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5792 	tx_done = min(tx_done, budget);
5793 
5794 	rx_done = stmmac_rx_zc(priv, budget, chan);
5795 
5796 	rxtx_done = max(tx_done, rx_done);
5797 
5798 	/* If either TX or RX work is not complete, return budget
5799 	 * and keep pooling
5800 	 */
5801 	if (rxtx_done >= budget)
5802 		return budget;
5803 
5804 	/* all work done, exit the polling mode */
5805 	if (napi_complete_done(napi, rxtx_done)) {
5806 		unsigned long flags;
5807 
5808 		spin_lock_irqsave(&ch->lock, flags);
5809 		/* Both RX and TX work done are compelte,
5810 		 * so enable both RX & TX IRQs.
5811 		 */
5812 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5813 		spin_unlock_irqrestore(&ch->lock, flags);
5814 	}
5815 
5816 	/* TX still have packet to handle, check if we need to arm tx timer */
5817 	if (tx_pending_packets)
5818 		stmmac_tx_timer_arm(priv, chan);
5819 
5820 	return min(rxtx_done, budget - 1);
5821 }
5822 
5823 /**
5824  *  stmmac_tx_timeout
5825  *  @dev : Pointer to net device structure
5826  *  @txqueue: the index of the hanging transmit queue
5827  *  Description: this function is called when a packet transmission fails to
5828  *   complete within a reasonable time. The driver will mark the error in the
5829  *   netdev structure and arrange for the device to be reset to a sane state
5830  *   in order to transmit a new packet.
5831  */
5832 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5833 {
5834 	struct stmmac_priv *priv = netdev_priv(dev);
5835 
5836 	stmmac_global_err(priv);
5837 }
5838 
5839 /**
5840  *  stmmac_set_rx_mode - entry point for multicast addressing
5841  *  @dev : pointer to the device structure
5842  *  Description:
5843  *  This function is a driver entry point which gets called by the kernel
5844  *  whenever multicast addresses must be enabled/disabled.
5845  *  Return value:
5846  *  void.
5847  */
5848 static void stmmac_set_rx_mode(struct net_device *dev)
5849 {
5850 	struct stmmac_priv *priv = netdev_priv(dev);
5851 
5852 	stmmac_set_filter(priv, priv->hw, dev);
5853 }
5854 
5855 /**
5856  *  stmmac_change_mtu - entry point to change MTU size for the device.
5857  *  @dev : device pointer.
5858  *  @new_mtu : the new MTU size for the device.
5859  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5860  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5861  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5862  *  Return value:
5863  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5864  *  file on failure.
5865  */
5866 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5867 {
5868 	struct stmmac_priv *priv = netdev_priv(dev);
5869 	int txfifosz = priv->plat->tx_fifo_size;
5870 	struct stmmac_dma_conf *dma_conf;
5871 	const int mtu = new_mtu;
5872 	int ret;
5873 
5874 	if (txfifosz == 0)
5875 		txfifosz = priv->dma_cap.tx_fifo_size;
5876 
5877 	txfifosz /= priv->plat->tx_queues_to_use;
5878 
5879 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5880 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5881 		return -EINVAL;
5882 	}
5883 
5884 	new_mtu = STMMAC_ALIGN(new_mtu);
5885 
5886 	/* If condition true, FIFO is too small or MTU too large */
5887 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5888 		return -EINVAL;
5889 
5890 	if (netif_running(dev)) {
5891 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5892 		/* Try to allocate the new DMA conf with the new mtu */
5893 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5894 		if (IS_ERR(dma_conf)) {
5895 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5896 				   mtu);
5897 			return PTR_ERR(dma_conf);
5898 		}
5899 
5900 		stmmac_release(dev);
5901 
5902 		ret = __stmmac_open(dev, dma_conf);
5903 		if (ret) {
5904 			free_dma_desc_resources(priv, dma_conf);
5905 			kfree(dma_conf);
5906 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5907 			return ret;
5908 		}
5909 
5910 		kfree(dma_conf);
5911 
5912 		stmmac_set_rx_mode(dev);
5913 	}
5914 
5915 	dev->mtu = mtu;
5916 	netdev_update_features(dev);
5917 
5918 	return 0;
5919 }
5920 
5921 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5922 					     netdev_features_t features)
5923 {
5924 	struct stmmac_priv *priv = netdev_priv(dev);
5925 
5926 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5927 		features &= ~NETIF_F_RXCSUM;
5928 
5929 	if (!priv->plat->tx_coe)
5930 		features &= ~NETIF_F_CSUM_MASK;
5931 
5932 	/* Some GMAC devices have a bugged Jumbo frame support that
5933 	 * needs to have the Tx COE disabled for oversized frames
5934 	 * (due to limited buffer sizes). In this case we disable
5935 	 * the TX csum insertion in the TDES and not use SF.
5936 	 */
5937 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5938 		features &= ~NETIF_F_CSUM_MASK;
5939 
5940 	/* Disable tso if asked by ethtool */
5941 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5942 		if (features & NETIF_F_TSO)
5943 			priv->tso = true;
5944 		else
5945 			priv->tso = false;
5946 	}
5947 
5948 	return features;
5949 }
5950 
5951 static int stmmac_set_features(struct net_device *netdev,
5952 			       netdev_features_t features)
5953 {
5954 	struct stmmac_priv *priv = netdev_priv(netdev);
5955 
5956 	/* Keep the COE Type in case of csum is supporting */
5957 	if (features & NETIF_F_RXCSUM)
5958 		priv->hw->rx_csum = priv->plat->rx_coe;
5959 	else
5960 		priv->hw->rx_csum = 0;
5961 	/* No check needed because rx_coe has been set before and it will be
5962 	 * fixed in case of issue.
5963 	 */
5964 	stmmac_rx_ipc(priv, priv->hw);
5965 
5966 	if (priv->sph_cap) {
5967 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5968 		u32 chan;
5969 
5970 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5971 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5972 	}
5973 
5974 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5975 		priv->hw->hw_vlan_en = true;
5976 	else
5977 		priv->hw->hw_vlan_en = false;
5978 
5979 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5980 
5981 	return 0;
5982 }
5983 
5984 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5985 {
5986 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5987 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5988 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5989 	bool *hs_enable = &fpe_cfg->hs_enable;
5990 
5991 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5992 		return;
5993 
5994 	/* If LP has sent verify mPacket, LP is FPE capable */
5995 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5996 		if (*lp_state < FPE_STATE_CAPABLE)
5997 			*lp_state = FPE_STATE_CAPABLE;
5998 
5999 		/* If user has requested FPE enable, quickly response */
6000 		if (*hs_enable)
6001 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6002 						fpe_cfg,
6003 						MPACKET_RESPONSE);
6004 	}
6005 
6006 	/* If Local has sent verify mPacket, Local is FPE capable */
6007 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
6008 		if (*lo_state < FPE_STATE_CAPABLE)
6009 			*lo_state = FPE_STATE_CAPABLE;
6010 	}
6011 
6012 	/* If LP has sent response mPacket, LP is entering FPE ON */
6013 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
6014 		*lp_state = FPE_STATE_ENTERING_ON;
6015 
6016 	/* If Local has sent response mPacket, Local is entering FPE ON */
6017 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
6018 		*lo_state = FPE_STATE_ENTERING_ON;
6019 
6020 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
6021 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
6022 	    priv->fpe_wq) {
6023 		queue_work(priv->fpe_wq, &priv->fpe_task);
6024 	}
6025 }
6026 
6027 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6028 {
6029 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6030 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6031 	u32 queues_count;
6032 	u32 queue;
6033 	bool xmac;
6034 
6035 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6036 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6037 
6038 	if (priv->irq_wake)
6039 		pm_wakeup_event(priv->device, 0);
6040 
6041 	if (priv->dma_cap.estsel)
6042 		stmmac_est_irq_status(priv, priv, priv->dev,
6043 				      &priv->xstats, tx_cnt);
6044 
6045 	if (priv->dma_cap.fpesel) {
6046 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6047 						   priv->dev);
6048 
6049 		stmmac_fpe_event_status(priv, status);
6050 	}
6051 
6052 	/* To handle GMAC own interrupts */
6053 	if ((priv->plat->has_gmac) || xmac) {
6054 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6055 
6056 		if (unlikely(status)) {
6057 			/* For LPI we need to save the tx status */
6058 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6059 				priv->tx_path_in_lpi_mode = true;
6060 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6061 				priv->tx_path_in_lpi_mode = false;
6062 		}
6063 
6064 		for (queue = 0; queue < queues_count; queue++)
6065 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6066 
6067 		/* PCS link status */
6068 		if (priv->hw->pcs &&
6069 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6070 			if (priv->xstats.pcs_link)
6071 				netif_carrier_on(priv->dev);
6072 			else
6073 				netif_carrier_off(priv->dev);
6074 		}
6075 
6076 		stmmac_timestamp_interrupt(priv, priv);
6077 	}
6078 }
6079 
6080 /**
6081  *  stmmac_interrupt - main ISR
6082  *  @irq: interrupt number.
6083  *  @dev_id: to pass the net device pointer.
6084  *  Description: this is the main driver interrupt service routine.
6085  *  It can call:
6086  *  o DMA service routine (to manage incoming frame reception and transmission
6087  *    status)
6088  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6089  *    interrupts.
6090  */
6091 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6092 {
6093 	struct net_device *dev = (struct net_device *)dev_id;
6094 	struct stmmac_priv *priv = netdev_priv(dev);
6095 
6096 	/* Check if adapter is up */
6097 	if (test_bit(STMMAC_DOWN, &priv->state))
6098 		return IRQ_HANDLED;
6099 
6100 	/* Check ASP error if it isn't delivered via an individual IRQ */
6101 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6102 		return IRQ_HANDLED;
6103 
6104 	/* To handle Common interrupts */
6105 	stmmac_common_interrupt(priv);
6106 
6107 	/* To handle DMA interrupts */
6108 	stmmac_dma_interrupt(priv);
6109 
6110 	return IRQ_HANDLED;
6111 }
6112 
6113 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6114 {
6115 	struct net_device *dev = (struct net_device *)dev_id;
6116 	struct stmmac_priv *priv = netdev_priv(dev);
6117 
6118 	if (unlikely(!dev)) {
6119 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6120 		return IRQ_NONE;
6121 	}
6122 
6123 	/* Check if adapter is up */
6124 	if (test_bit(STMMAC_DOWN, &priv->state))
6125 		return IRQ_HANDLED;
6126 
6127 	/* To handle Common interrupts */
6128 	stmmac_common_interrupt(priv);
6129 
6130 	return IRQ_HANDLED;
6131 }
6132 
6133 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6134 {
6135 	struct net_device *dev = (struct net_device *)dev_id;
6136 	struct stmmac_priv *priv = netdev_priv(dev);
6137 
6138 	if (unlikely(!dev)) {
6139 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6140 		return IRQ_NONE;
6141 	}
6142 
6143 	/* Check if adapter is up */
6144 	if (test_bit(STMMAC_DOWN, &priv->state))
6145 		return IRQ_HANDLED;
6146 
6147 	/* Check if a fatal error happened */
6148 	stmmac_safety_feat_interrupt(priv);
6149 
6150 	return IRQ_HANDLED;
6151 }
6152 
6153 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6154 {
6155 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6156 	struct stmmac_dma_conf *dma_conf;
6157 	int chan = tx_q->queue_index;
6158 	struct stmmac_priv *priv;
6159 	int status;
6160 
6161 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6162 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6163 
6164 	if (unlikely(!data)) {
6165 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6166 		return IRQ_NONE;
6167 	}
6168 
6169 	/* Check if adapter is up */
6170 	if (test_bit(STMMAC_DOWN, &priv->state))
6171 		return IRQ_HANDLED;
6172 
6173 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6174 
6175 	if (unlikely(status & tx_hard_error_bump_tc)) {
6176 		/* Try to bump up the dma threshold on this failure */
6177 		stmmac_bump_dma_threshold(priv, chan);
6178 	} else if (unlikely(status == tx_hard_error)) {
6179 		stmmac_tx_err(priv, chan);
6180 	}
6181 
6182 	return IRQ_HANDLED;
6183 }
6184 
6185 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6186 {
6187 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6188 	struct stmmac_dma_conf *dma_conf;
6189 	int chan = rx_q->queue_index;
6190 	struct stmmac_priv *priv;
6191 
6192 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6193 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6194 
6195 	if (unlikely(!data)) {
6196 		netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
6197 		return IRQ_NONE;
6198 	}
6199 
6200 	/* Check if adapter is up */
6201 	if (test_bit(STMMAC_DOWN, &priv->state))
6202 		return IRQ_HANDLED;
6203 
6204 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6205 
6206 	return IRQ_HANDLED;
6207 }
6208 
6209 /**
6210  *  stmmac_ioctl - Entry point for the Ioctl
6211  *  @dev: Device pointer.
6212  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6213  *  a proprietary structure used to pass information to the driver.
6214  *  @cmd: IOCTL command
6215  *  Description:
6216  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6217  */
6218 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6219 {
6220 	struct stmmac_priv *priv = netdev_priv (dev);
6221 	int ret = -EOPNOTSUPP;
6222 
6223 	if (!netif_running(dev))
6224 		return -EINVAL;
6225 
6226 	switch (cmd) {
6227 	case SIOCGMIIPHY:
6228 	case SIOCGMIIREG:
6229 	case SIOCSMIIREG:
6230 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6231 		break;
6232 	case SIOCSHWTSTAMP:
6233 		ret = stmmac_hwtstamp_set(dev, rq);
6234 		break;
6235 	case SIOCGHWTSTAMP:
6236 		ret = stmmac_hwtstamp_get(dev, rq);
6237 		break;
6238 	default:
6239 		break;
6240 	}
6241 
6242 	return ret;
6243 }
6244 
6245 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6246 				    void *cb_priv)
6247 {
6248 	struct stmmac_priv *priv = cb_priv;
6249 	int ret = -EOPNOTSUPP;
6250 
6251 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6252 		return ret;
6253 
6254 	__stmmac_disable_all_queues(priv);
6255 
6256 	switch (type) {
6257 	case TC_SETUP_CLSU32:
6258 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6259 		break;
6260 	case TC_SETUP_CLSFLOWER:
6261 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6262 		break;
6263 	default:
6264 		break;
6265 	}
6266 
6267 	stmmac_enable_all_queues(priv);
6268 	return ret;
6269 }
6270 
6271 static LIST_HEAD(stmmac_block_cb_list);
6272 
6273 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6274 			   void *type_data)
6275 {
6276 	struct stmmac_priv *priv = netdev_priv(ndev);
6277 
6278 	switch (type) {
6279 	case TC_QUERY_CAPS:
6280 		return stmmac_tc_query_caps(priv, priv, type_data);
6281 	case TC_SETUP_BLOCK:
6282 		return flow_block_cb_setup_simple(type_data,
6283 						  &stmmac_block_cb_list,
6284 						  stmmac_setup_tc_block_cb,
6285 						  priv, priv, true);
6286 	case TC_SETUP_QDISC_CBS:
6287 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6288 	case TC_SETUP_QDISC_TAPRIO:
6289 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6290 	case TC_SETUP_QDISC_ETF:
6291 		return stmmac_tc_setup_etf(priv, priv, type_data);
6292 	default:
6293 		return -EOPNOTSUPP;
6294 	}
6295 }
6296 
6297 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6298 			       struct net_device *sb_dev)
6299 {
6300 	int gso = skb_shinfo(skb)->gso_type;
6301 
6302 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6303 		/*
6304 		 * There is no way to determine the number of TSO/USO
6305 		 * capable Queues. Let's use always the Queue 0
6306 		 * because if TSO/USO is supported then at least this
6307 		 * one will be capable.
6308 		 */
6309 		return 0;
6310 	}
6311 
6312 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6313 }
6314 
6315 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6316 {
6317 	struct stmmac_priv *priv = netdev_priv(ndev);
6318 	int ret = 0;
6319 
6320 	ret = pm_runtime_resume_and_get(priv->device);
6321 	if (ret < 0)
6322 		return ret;
6323 
6324 	ret = eth_mac_addr(ndev, addr);
6325 	if (ret)
6326 		goto set_mac_error;
6327 
6328 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6329 
6330 set_mac_error:
6331 	pm_runtime_put(priv->device);
6332 
6333 	return ret;
6334 }
6335 
6336 #ifdef CONFIG_DEBUG_FS
6337 static struct dentry *stmmac_fs_dir;
6338 
6339 static void sysfs_display_ring(void *head, int size, int extend_desc,
6340 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6341 {
6342 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6343 	struct dma_desc *p = (struct dma_desc *)head;
6344 	unsigned int desc_size;
6345 	dma_addr_t dma_addr;
6346 	int i;
6347 
6348 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6349 	for (i = 0; i < size; i++) {
6350 		dma_addr = dma_phy_addr + i * desc_size;
6351 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6352 				i, &dma_addr,
6353 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6354 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6355 		if (extend_desc)
6356 			p = &(++ep)->basic;
6357 		else
6358 			p++;
6359 	}
6360 }
6361 
6362 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6363 {
6364 	struct net_device *dev = seq->private;
6365 	struct stmmac_priv *priv = netdev_priv(dev);
6366 	u32 rx_count = priv->plat->rx_queues_to_use;
6367 	u32 tx_count = priv->plat->tx_queues_to_use;
6368 	u32 queue;
6369 
6370 	if ((dev->flags & IFF_UP) == 0)
6371 		return 0;
6372 
6373 	for (queue = 0; queue < rx_count; queue++) {
6374 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6375 
6376 		seq_printf(seq, "RX Queue %d:\n", queue);
6377 
6378 		if (priv->extend_desc) {
6379 			seq_printf(seq, "Extended descriptor ring:\n");
6380 			sysfs_display_ring((void *)rx_q->dma_erx,
6381 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6382 		} else {
6383 			seq_printf(seq, "Descriptor ring:\n");
6384 			sysfs_display_ring((void *)rx_q->dma_rx,
6385 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6386 		}
6387 	}
6388 
6389 	for (queue = 0; queue < tx_count; queue++) {
6390 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6391 
6392 		seq_printf(seq, "TX Queue %d:\n", queue);
6393 
6394 		if (priv->extend_desc) {
6395 			seq_printf(seq, "Extended descriptor ring:\n");
6396 			sysfs_display_ring((void *)tx_q->dma_etx,
6397 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6398 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6399 			seq_printf(seq, "Descriptor ring:\n");
6400 			sysfs_display_ring((void *)tx_q->dma_tx,
6401 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6402 		}
6403 	}
6404 
6405 	return 0;
6406 }
6407 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6408 
6409 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6410 {
6411 	static const char * const dwxgmac_timestamp_source[] = {
6412 		"None",
6413 		"Internal",
6414 		"External",
6415 		"Both",
6416 	};
6417 	static const char * const dwxgmac_safety_feature_desc[] = {
6418 		"No",
6419 		"All Safety Features with ECC and Parity",
6420 		"All Safety Features without ECC or Parity",
6421 		"All Safety Features with Parity Only",
6422 		"ECC Only",
6423 		"UNDEFINED",
6424 		"UNDEFINED",
6425 		"UNDEFINED",
6426 	};
6427 	struct net_device *dev = seq->private;
6428 	struct stmmac_priv *priv = netdev_priv(dev);
6429 
6430 	if (!priv->hw_cap_support) {
6431 		seq_printf(seq, "DMA HW features not supported\n");
6432 		return 0;
6433 	}
6434 
6435 	seq_printf(seq, "==============================\n");
6436 	seq_printf(seq, "\tDMA HW features\n");
6437 	seq_printf(seq, "==============================\n");
6438 
6439 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6440 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6441 	seq_printf(seq, "\t1000 Mbps: %s\n",
6442 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6443 	seq_printf(seq, "\tHalf duplex: %s\n",
6444 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6445 	if (priv->plat->has_xgmac) {
6446 		seq_printf(seq,
6447 			   "\tNumber of Additional MAC address registers: %d\n",
6448 			   priv->dma_cap.multi_addr);
6449 	} else {
6450 		seq_printf(seq, "\tHash Filter: %s\n",
6451 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6452 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6453 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6454 	}
6455 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6456 		   (priv->dma_cap.pcs) ? "Y" : "N");
6457 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6458 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6459 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6460 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6461 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6462 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6463 	seq_printf(seq, "\tRMON module: %s\n",
6464 		   (priv->dma_cap.rmon) ? "Y" : "N");
6465 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6466 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6467 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6468 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6469 	if (priv->plat->has_xgmac)
6470 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6471 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6472 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6473 		   (priv->dma_cap.eee) ? "Y" : "N");
6474 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6475 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6476 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6477 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6478 	    priv->plat->has_xgmac) {
6479 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6480 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6481 	} else {
6482 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6483 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6484 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6485 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6486 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6487 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6488 	}
6489 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6490 		   priv->dma_cap.number_rx_channel);
6491 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6492 		   priv->dma_cap.number_tx_channel);
6493 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6494 		   priv->dma_cap.number_rx_queues);
6495 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6496 		   priv->dma_cap.number_tx_queues);
6497 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6498 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6499 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6500 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6501 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6502 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6503 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6504 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6505 		   priv->dma_cap.pps_out_num);
6506 	seq_printf(seq, "\tSafety Features: %s\n",
6507 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6508 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6509 		   priv->dma_cap.frpsel ? "Y" : "N");
6510 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6511 		   priv->dma_cap.host_dma_width);
6512 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6513 		   priv->dma_cap.rssen ? "Y" : "N");
6514 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6515 		   priv->dma_cap.vlhash ? "Y" : "N");
6516 	seq_printf(seq, "\tSplit Header: %s\n",
6517 		   priv->dma_cap.sphen ? "Y" : "N");
6518 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6519 		   priv->dma_cap.vlins ? "Y" : "N");
6520 	seq_printf(seq, "\tDouble VLAN: %s\n",
6521 		   priv->dma_cap.dvlan ? "Y" : "N");
6522 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6523 		   priv->dma_cap.l3l4fnum);
6524 	seq_printf(seq, "\tARP Offloading: %s\n",
6525 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6526 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6527 		   priv->dma_cap.estsel ? "Y" : "N");
6528 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6529 		   priv->dma_cap.fpesel ? "Y" : "N");
6530 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6531 		   priv->dma_cap.tbssel ? "Y" : "N");
6532 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6533 		   priv->dma_cap.tbs_ch_num);
6534 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6535 		   priv->dma_cap.sgfsel ? "Y" : "N");
6536 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6537 		   BIT(priv->dma_cap.ttsfd) >> 1);
6538 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6539 		   priv->dma_cap.numtc);
6540 	seq_printf(seq, "\tDCB Feature: %s\n",
6541 		   priv->dma_cap.dcben ? "Y" : "N");
6542 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6543 		   priv->dma_cap.advthword ? "Y" : "N");
6544 	seq_printf(seq, "\tPTP Offload: %s\n",
6545 		   priv->dma_cap.ptoen ? "Y" : "N");
6546 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6547 		   priv->dma_cap.osten ? "Y" : "N");
6548 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6549 		   priv->dma_cap.pfcen ? "Y" : "N");
6550 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6551 		   BIT(priv->dma_cap.frpes) << 6);
6552 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6553 		   BIT(priv->dma_cap.frpbs) << 6);
6554 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6555 		   priv->dma_cap.frppipe_num);
6556 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6557 		   priv->dma_cap.nrvf_num ?
6558 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6559 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6560 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6561 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6562 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6563 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6564 		   priv->dma_cap.cbtisel ? "Y" : "N");
6565 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6566 		   priv->dma_cap.aux_snapshot_n);
6567 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6568 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6569 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6570 		   priv->dma_cap.edma ? "Y" : "N");
6571 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6572 		   priv->dma_cap.ediffc ? "Y" : "N");
6573 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6574 		   priv->dma_cap.vxn ? "Y" : "N");
6575 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6576 		   priv->dma_cap.dbgmem ? "Y" : "N");
6577 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6578 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6579 	return 0;
6580 }
6581 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6582 
6583 /* Use network device events to rename debugfs file entries.
6584  */
6585 static int stmmac_device_event(struct notifier_block *unused,
6586 			       unsigned long event, void *ptr)
6587 {
6588 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6589 	struct stmmac_priv *priv = netdev_priv(dev);
6590 
6591 	if (dev->netdev_ops != &stmmac_netdev_ops)
6592 		goto done;
6593 
6594 	switch (event) {
6595 	case NETDEV_CHANGENAME:
6596 		if (priv->dbgfs_dir)
6597 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6598 							 priv->dbgfs_dir,
6599 							 stmmac_fs_dir,
6600 							 dev->name);
6601 		break;
6602 	}
6603 done:
6604 	return NOTIFY_DONE;
6605 }
6606 
6607 static struct notifier_block stmmac_notifier = {
6608 	.notifier_call = stmmac_device_event,
6609 };
6610 
6611 static void stmmac_init_fs(struct net_device *dev)
6612 {
6613 	struct stmmac_priv *priv = netdev_priv(dev);
6614 
6615 	rtnl_lock();
6616 
6617 	/* Create per netdev entries */
6618 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6619 
6620 	/* Entry to report DMA RX/TX rings */
6621 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6622 			    &stmmac_rings_status_fops);
6623 
6624 	/* Entry to report the DMA HW features */
6625 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6626 			    &stmmac_dma_cap_fops);
6627 
6628 	rtnl_unlock();
6629 }
6630 
6631 static void stmmac_exit_fs(struct net_device *dev)
6632 {
6633 	struct stmmac_priv *priv = netdev_priv(dev);
6634 
6635 	debugfs_remove_recursive(priv->dbgfs_dir);
6636 }
6637 #endif /* CONFIG_DEBUG_FS */
6638 
6639 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6640 {
6641 	unsigned char *data = (unsigned char *)&vid_le;
6642 	unsigned char data_byte = 0;
6643 	u32 crc = ~0x0;
6644 	u32 temp = 0;
6645 	int i, bits;
6646 
6647 	bits = get_bitmask_order(VLAN_VID_MASK);
6648 	for (i = 0; i < bits; i++) {
6649 		if ((i % 8) == 0)
6650 			data_byte = data[i / 8];
6651 
6652 		temp = ((crc & 1) ^ data_byte) & 1;
6653 		crc >>= 1;
6654 		data_byte >>= 1;
6655 
6656 		if (temp)
6657 			crc ^= 0xedb88320;
6658 	}
6659 
6660 	return crc;
6661 }
6662 
6663 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6664 {
6665 	u32 crc, hash = 0;
6666 	__le16 pmatch = 0;
6667 	int count = 0;
6668 	u16 vid = 0;
6669 
6670 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6671 		__le16 vid_le = cpu_to_le16(vid);
6672 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6673 		hash |= (1 << crc);
6674 		count++;
6675 	}
6676 
6677 	if (!priv->dma_cap.vlhash) {
6678 		if (count > 2) /* VID = 0 always passes filter */
6679 			return -EOPNOTSUPP;
6680 
6681 		pmatch = cpu_to_le16(vid);
6682 		hash = 0;
6683 	}
6684 
6685 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6686 }
6687 
6688 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6689 {
6690 	struct stmmac_priv *priv = netdev_priv(ndev);
6691 	bool is_double = false;
6692 	int ret;
6693 
6694 	ret = pm_runtime_resume_and_get(priv->device);
6695 	if (ret < 0)
6696 		return ret;
6697 
6698 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6699 		is_double = true;
6700 
6701 	set_bit(vid, priv->active_vlans);
6702 	ret = stmmac_vlan_update(priv, is_double);
6703 	if (ret) {
6704 		clear_bit(vid, priv->active_vlans);
6705 		goto err_pm_put;
6706 	}
6707 
6708 	if (priv->hw->num_vlan) {
6709 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6710 		if (ret)
6711 			goto err_pm_put;
6712 	}
6713 err_pm_put:
6714 	pm_runtime_put(priv->device);
6715 
6716 	return ret;
6717 }
6718 
6719 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6720 {
6721 	struct stmmac_priv *priv = netdev_priv(ndev);
6722 	bool is_double = false;
6723 	int ret;
6724 
6725 	ret = pm_runtime_resume_and_get(priv->device);
6726 	if (ret < 0)
6727 		return ret;
6728 
6729 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6730 		is_double = true;
6731 
6732 	clear_bit(vid, priv->active_vlans);
6733 
6734 	if (priv->hw->num_vlan) {
6735 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6736 		if (ret)
6737 			goto del_vlan_error;
6738 	}
6739 
6740 	ret = stmmac_vlan_update(priv, is_double);
6741 
6742 del_vlan_error:
6743 	pm_runtime_put(priv->device);
6744 
6745 	return ret;
6746 }
6747 
6748 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6749 {
6750 	struct stmmac_priv *priv = netdev_priv(dev);
6751 
6752 	switch (bpf->command) {
6753 	case XDP_SETUP_PROG:
6754 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6755 	case XDP_SETUP_XSK_POOL:
6756 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6757 					     bpf->xsk.queue_id);
6758 	default:
6759 		return -EOPNOTSUPP;
6760 	}
6761 }
6762 
6763 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6764 			   struct xdp_frame **frames, u32 flags)
6765 {
6766 	struct stmmac_priv *priv = netdev_priv(dev);
6767 	int cpu = smp_processor_id();
6768 	struct netdev_queue *nq;
6769 	int i, nxmit = 0;
6770 	int queue;
6771 
6772 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6773 		return -ENETDOWN;
6774 
6775 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6776 		return -EINVAL;
6777 
6778 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6779 	nq = netdev_get_tx_queue(priv->dev, queue);
6780 
6781 	__netif_tx_lock(nq, cpu);
6782 	/* Avoids TX time-out as we are sharing with slow path */
6783 	txq_trans_cond_update(nq);
6784 
6785 	for (i = 0; i < num_frames; i++) {
6786 		int res;
6787 
6788 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6789 		if (res == STMMAC_XDP_CONSUMED)
6790 			break;
6791 
6792 		nxmit++;
6793 	}
6794 
6795 	if (flags & XDP_XMIT_FLUSH) {
6796 		stmmac_flush_tx_descriptors(priv, queue);
6797 		stmmac_tx_timer_arm(priv, queue);
6798 	}
6799 
6800 	__netif_tx_unlock(nq);
6801 
6802 	return nxmit;
6803 }
6804 
6805 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6806 {
6807 	struct stmmac_channel *ch = &priv->channel[queue];
6808 	unsigned long flags;
6809 
6810 	spin_lock_irqsave(&ch->lock, flags);
6811 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6812 	spin_unlock_irqrestore(&ch->lock, flags);
6813 
6814 	stmmac_stop_rx_dma(priv, queue);
6815 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6816 }
6817 
6818 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6819 {
6820 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6821 	struct stmmac_channel *ch = &priv->channel[queue];
6822 	unsigned long flags;
6823 	u32 buf_size;
6824 	int ret;
6825 
6826 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6827 	if (ret) {
6828 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6829 		return;
6830 	}
6831 
6832 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6833 	if (ret) {
6834 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6835 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6836 		return;
6837 	}
6838 
6839 	stmmac_reset_rx_queue(priv, queue);
6840 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6841 
6842 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6843 			    rx_q->dma_rx_phy, rx_q->queue_index);
6844 
6845 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6846 			     sizeof(struct dma_desc));
6847 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6848 			       rx_q->rx_tail_addr, rx_q->queue_index);
6849 
6850 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6851 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6852 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6853 				      buf_size,
6854 				      rx_q->queue_index);
6855 	} else {
6856 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6857 				      priv->dma_conf.dma_buf_sz,
6858 				      rx_q->queue_index);
6859 	}
6860 
6861 	stmmac_start_rx_dma(priv, queue);
6862 
6863 	spin_lock_irqsave(&ch->lock, flags);
6864 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6865 	spin_unlock_irqrestore(&ch->lock, flags);
6866 }
6867 
6868 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6869 {
6870 	struct stmmac_channel *ch = &priv->channel[queue];
6871 	unsigned long flags;
6872 
6873 	spin_lock_irqsave(&ch->lock, flags);
6874 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6875 	spin_unlock_irqrestore(&ch->lock, flags);
6876 
6877 	stmmac_stop_tx_dma(priv, queue);
6878 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6879 }
6880 
6881 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6882 {
6883 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6884 	struct stmmac_channel *ch = &priv->channel[queue];
6885 	unsigned long flags;
6886 	int ret;
6887 
6888 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6889 	if (ret) {
6890 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6891 		return;
6892 	}
6893 
6894 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6895 	if (ret) {
6896 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6897 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6898 		return;
6899 	}
6900 
6901 	stmmac_reset_tx_queue(priv, queue);
6902 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6903 
6904 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6905 			    tx_q->dma_tx_phy, tx_q->queue_index);
6906 
6907 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6908 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6909 
6910 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6911 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6912 			       tx_q->tx_tail_addr, tx_q->queue_index);
6913 
6914 	stmmac_start_tx_dma(priv, queue);
6915 
6916 	spin_lock_irqsave(&ch->lock, flags);
6917 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6918 	spin_unlock_irqrestore(&ch->lock, flags);
6919 }
6920 
6921 void stmmac_xdp_release(struct net_device *dev)
6922 {
6923 	struct stmmac_priv *priv = netdev_priv(dev);
6924 	u32 chan;
6925 
6926 	/* Ensure tx function is not running */
6927 	netif_tx_disable(dev);
6928 
6929 	/* Disable NAPI process */
6930 	stmmac_disable_all_queues(priv);
6931 
6932 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6933 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6934 
6935 	/* Free the IRQ lines */
6936 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6937 
6938 	/* Stop TX/RX DMA channels */
6939 	stmmac_stop_all_dma(priv);
6940 
6941 	/* Release and free the Rx/Tx resources */
6942 	free_dma_desc_resources(priv, &priv->dma_conf);
6943 
6944 	/* Disable the MAC Rx/Tx */
6945 	stmmac_mac_set(priv, priv->ioaddr, false);
6946 
6947 	/* set trans_start so we don't get spurious
6948 	 * watchdogs during reset
6949 	 */
6950 	netif_trans_update(dev);
6951 	netif_carrier_off(dev);
6952 }
6953 
6954 int stmmac_xdp_open(struct net_device *dev)
6955 {
6956 	struct stmmac_priv *priv = netdev_priv(dev);
6957 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6958 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6959 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6960 	struct stmmac_rx_queue *rx_q;
6961 	struct stmmac_tx_queue *tx_q;
6962 	u32 buf_size;
6963 	bool sph_en;
6964 	u32 chan;
6965 	int ret;
6966 
6967 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6968 	if (ret < 0) {
6969 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6970 			   __func__);
6971 		goto dma_desc_error;
6972 	}
6973 
6974 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6975 	if (ret < 0) {
6976 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6977 			   __func__);
6978 		goto init_error;
6979 	}
6980 
6981 	stmmac_reset_queues_param(priv);
6982 
6983 	/* DMA CSR Channel configuration */
6984 	for (chan = 0; chan < dma_csr_ch; chan++) {
6985 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6986 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6987 	}
6988 
6989 	/* Adjust Split header */
6990 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6991 
6992 	/* DMA RX Channel Configuration */
6993 	for (chan = 0; chan < rx_cnt; chan++) {
6994 		rx_q = &priv->dma_conf.rx_queue[chan];
6995 
6996 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6997 				    rx_q->dma_rx_phy, chan);
6998 
6999 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7000 				     (rx_q->buf_alloc_num *
7001 				      sizeof(struct dma_desc));
7002 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7003 				       rx_q->rx_tail_addr, chan);
7004 
7005 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7006 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7007 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7008 					      buf_size,
7009 					      rx_q->queue_index);
7010 		} else {
7011 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7012 					      priv->dma_conf.dma_buf_sz,
7013 					      rx_q->queue_index);
7014 		}
7015 
7016 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7017 	}
7018 
7019 	/* DMA TX Channel Configuration */
7020 	for (chan = 0; chan < tx_cnt; chan++) {
7021 		tx_q = &priv->dma_conf.tx_queue[chan];
7022 
7023 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7024 				    tx_q->dma_tx_phy, chan);
7025 
7026 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7027 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7028 				       tx_q->tx_tail_addr, chan);
7029 
7030 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7031 		tx_q->txtimer.function = stmmac_tx_timer;
7032 	}
7033 
7034 	/* Enable the MAC Rx/Tx */
7035 	stmmac_mac_set(priv, priv->ioaddr, true);
7036 
7037 	/* Start Rx & Tx DMA Channels */
7038 	stmmac_start_all_dma(priv);
7039 
7040 	ret = stmmac_request_irq(dev);
7041 	if (ret)
7042 		goto irq_error;
7043 
7044 	/* Enable NAPI process*/
7045 	stmmac_enable_all_queues(priv);
7046 	netif_carrier_on(dev);
7047 	netif_tx_start_all_queues(dev);
7048 	stmmac_enable_all_dma_irq(priv);
7049 
7050 	return 0;
7051 
7052 irq_error:
7053 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7054 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7055 
7056 	stmmac_hw_teardown(dev);
7057 init_error:
7058 	free_dma_desc_resources(priv, &priv->dma_conf);
7059 dma_desc_error:
7060 	return ret;
7061 }
7062 
7063 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7064 {
7065 	struct stmmac_priv *priv = netdev_priv(dev);
7066 	struct stmmac_rx_queue *rx_q;
7067 	struct stmmac_tx_queue *tx_q;
7068 	struct stmmac_channel *ch;
7069 
7070 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7071 	    !netif_carrier_ok(priv->dev))
7072 		return -ENETDOWN;
7073 
7074 	if (!stmmac_xdp_is_enabled(priv))
7075 		return -EINVAL;
7076 
7077 	if (queue >= priv->plat->rx_queues_to_use ||
7078 	    queue >= priv->plat->tx_queues_to_use)
7079 		return -EINVAL;
7080 
7081 	rx_q = &priv->dma_conf.rx_queue[queue];
7082 	tx_q = &priv->dma_conf.tx_queue[queue];
7083 	ch = &priv->channel[queue];
7084 
7085 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7086 		return -EINVAL;
7087 
7088 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7089 		/* EQoS does not have per-DMA channel SW interrupt,
7090 		 * so we schedule RX Napi straight-away.
7091 		 */
7092 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7093 			__napi_schedule(&ch->rxtx_napi);
7094 	}
7095 
7096 	return 0;
7097 }
7098 
7099 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7100 {
7101 	struct stmmac_priv *priv = netdev_priv(dev);
7102 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7103 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7104 	unsigned int start;
7105 	int q;
7106 
7107 	for (q = 0; q < tx_cnt; q++) {
7108 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7109 		u64 tx_packets;
7110 		u64 tx_bytes;
7111 
7112 		do {
7113 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7114 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7115 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7116 		do {
7117 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7118 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7119 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7120 
7121 		stats->tx_packets += tx_packets;
7122 		stats->tx_bytes += tx_bytes;
7123 	}
7124 
7125 	for (q = 0; q < rx_cnt; q++) {
7126 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7127 		u64 rx_packets;
7128 		u64 rx_bytes;
7129 
7130 		do {
7131 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7132 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7133 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7134 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7135 
7136 		stats->rx_packets += rx_packets;
7137 		stats->rx_bytes += rx_bytes;
7138 	}
7139 
7140 	stats->rx_dropped = priv->xstats.rx_dropped;
7141 	stats->rx_errors = priv->xstats.rx_errors;
7142 	stats->tx_dropped = priv->xstats.tx_dropped;
7143 	stats->tx_errors = priv->xstats.tx_errors;
7144 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7145 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7146 	stats->rx_length_errors = priv->xstats.rx_length;
7147 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7148 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7149 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7150 }
7151 
7152 static const struct net_device_ops stmmac_netdev_ops = {
7153 	.ndo_open = stmmac_open,
7154 	.ndo_start_xmit = stmmac_xmit,
7155 	.ndo_stop = stmmac_release,
7156 	.ndo_change_mtu = stmmac_change_mtu,
7157 	.ndo_fix_features = stmmac_fix_features,
7158 	.ndo_set_features = stmmac_set_features,
7159 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7160 	.ndo_tx_timeout = stmmac_tx_timeout,
7161 	.ndo_eth_ioctl = stmmac_ioctl,
7162 	.ndo_get_stats64 = stmmac_get_stats64,
7163 	.ndo_setup_tc = stmmac_setup_tc,
7164 	.ndo_select_queue = stmmac_select_queue,
7165 	.ndo_set_mac_address = stmmac_set_mac_address,
7166 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7167 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7168 	.ndo_bpf = stmmac_bpf,
7169 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7170 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7171 };
7172 
7173 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7174 {
7175 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7176 		return;
7177 	if (test_bit(STMMAC_DOWN, &priv->state))
7178 		return;
7179 
7180 	netdev_err(priv->dev, "Reset adapter.\n");
7181 
7182 	rtnl_lock();
7183 	netif_trans_update(priv->dev);
7184 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7185 		usleep_range(1000, 2000);
7186 
7187 	set_bit(STMMAC_DOWN, &priv->state);
7188 	dev_close(priv->dev);
7189 	dev_open(priv->dev, NULL);
7190 	clear_bit(STMMAC_DOWN, &priv->state);
7191 	clear_bit(STMMAC_RESETING, &priv->state);
7192 	rtnl_unlock();
7193 }
7194 
7195 static void stmmac_service_task(struct work_struct *work)
7196 {
7197 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7198 			service_task);
7199 
7200 	stmmac_reset_subtask(priv);
7201 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7202 }
7203 
7204 /**
7205  *  stmmac_hw_init - Init the MAC device
7206  *  @priv: driver private structure
7207  *  Description: this function is to configure the MAC device according to
7208  *  some platform parameters or the HW capability register. It prepares the
7209  *  driver to use either ring or chain modes and to setup either enhanced or
7210  *  normal descriptors.
7211  */
7212 static int stmmac_hw_init(struct stmmac_priv *priv)
7213 {
7214 	int ret;
7215 
7216 	/* dwmac-sun8i only work in chain mode */
7217 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7218 		chain_mode = 1;
7219 	priv->chain_mode = chain_mode;
7220 
7221 	/* Initialize HW Interface */
7222 	ret = stmmac_hwif_init(priv);
7223 	if (ret)
7224 		return ret;
7225 
7226 	/* Get the HW capability (new GMAC newer than 3.50a) */
7227 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7228 	if (priv->hw_cap_support) {
7229 		dev_info(priv->device, "DMA HW capability register supported\n");
7230 
7231 		/* We can override some gmac/dma configuration fields: e.g.
7232 		 * enh_desc, tx_coe (e.g. that are passed through the
7233 		 * platform) with the values from the HW capability
7234 		 * register (if supported).
7235 		 */
7236 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7237 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7238 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7239 		priv->hw->pmt = priv->plat->pmt;
7240 		if (priv->dma_cap.hash_tb_sz) {
7241 			priv->hw->multicast_filter_bins =
7242 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7243 			priv->hw->mcast_bits_log2 =
7244 					ilog2(priv->hw->multicast_filter_bins);
7245 		}
7246 
7247 		/* TXCOE doesn't work in thresh DMA mode */
7248 		if (priv->plat->force_thresh_dma_mode)
7249 			priv->plat->tx_coe = 0;
7250 		else
7251 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7252 
7253 		/* In case of GMAC4 rx_coe is from HW cap register. */
7254 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7255 
7256 		if (priv->dma_cap.rx_coe_type2)
7257 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7258 		else if (priv->dma_cap.rx_coe_type1)
7259 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7260 
7261 	} else {
7262 		dev_info(priv->device, "No HW DMA feature register supported\n");
7263 	}
7264 
7265 	if (priv->plat->rx_coe) {
7266 		priv->hw->rx_csum = priv->plat->rx_coe;
7267 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7268 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7269 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7270 	}
7271 	if (priv->plat->tx_coe)
7272 		dev_info(priv->device, "TX Checksum insertion supported\n");
7273 
7274 	if (priv->plat->pmt) {
7275 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7276 		device_set_wakeup_capable(priv->device, 1);
7277 	}
7278 
7279 	if (priv->dma_cap.tsoen)
7280 		dev_info(priv->device, "TSO supported\n");
7281 
7282 	priv->hw->vlan_fail_q_en =
7283 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7284 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7285 
7286 	/* Run HW quirks, if any */
7287 	if (priv->hwif_quirks) {
7288 		ret = priv->hwif_quirks(priv);
7289 		if (ret)
7290 			return ret;
7291 	}
7292 
7293 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7294 	 * In some case, for example on bugged HW this feature
7295 	 * has to be disable and this can be done by passing the
7296 	 * riwt_off field from the platform.
7297 	 */
7298 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7299 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7300 		priv->use_riwt = 1;
7301 		dev_info(priv->device,
7302 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7303 	}
7304 
7305 	return 0;
7306 }
7307 
7308 static void stmmac_napi_add(struct net_device *dev)
7309 {
7310 	struct stmmac_priv *priv = netdev_priv(dev);
7311 	u32 queue, maxq;
7312 
7313 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7314 
7315 	for (queue = 0; queue < maxq; queue++) {
7316 		struct stmmac_channel *ch = &priv->channel[queue];
7317 
7318 		ch->priv_data = priv;
7319 		ch->index = queue;
7320 		spin_lock_init(&ch->lock);
7321 
7322 		if (queue < priv->plat->rx_queues_to_use) {
7323 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7324 		}
7325 		if (queue < priv->plat->tx_queues_to_use) {
7326 			netif_napi_add_tx(dev, &ch->tx_napi,
7327 					  stmmac_napi_poll_tx);
7328 		}
7329 		if (queue < priv->plat->rx_queues_to_use &&
7330 		    queue < priv->plat->tx_queues_to_use) {
7331 			netif_napi_add(dev, &ch->rxtx_napi,
7332 				       stmmac_napi_poll_rxtx);
7333 		}
7334 	}
7335 }
7336 
7337 static void stmmac_napi_del(struct net_device *dev)
7338 {
7339 	struct stmmac_priv *priv = netdev_priv(dev);
7340 	u32 queue, maxq;
7341 
7342 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7343 
7344 	for (queue = 0; queue < maxq; queue++) {
7345 		struct stmmac_channel *ch = &priv->channel[queue];
7346 
7347 		if (queue < priv->plat->rx_queues_to_use)
7348 			netif_napi_del(&ch->rx_napi);
7349 		if (queue < priv->plat->tx_queues_to_use)
7350 			netif_napi_del(&ch->tx_napi);
7351 		if (queue < priv->plat->rx_queues_to_use &&
7352 		    queue < priv->plat->tx_queues_to_use) {
7353 			netif_napi_del(&ch->rxtx_napi);
7354 		}
7355 	}
7356 }
7357 
7358 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7359 {
7360 	struct stmmac_priv *priv = netdev_priv(dev);
7361 	int ret = 0, i;
7362 
7363 	if (netif_running(dev))
7364 		stmmac_release(dev);
7365 
7366 	stmmac_napi_del(dev);
7367 
7368 	priv->plat->rx_queues_to_use = rx_cnt;
7369 	priv->plat->tx_queues_to_use = tx_cnt;
7370 	if (!netif_is_rxfh_configured(dev))
7371 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7372 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7373 									rx_cnt);
7374 
7375 	stmmac_set_half_duplex(priv);
7376 	stmmac_napi_add(dev);
7377 
7378 	if (netif_running(dev))
7379 		ret = stmmac_open(dev);
7380 
7381 	return ret;
7382 }
7383 
7384 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7385 {
7386 	struct stmmac_priv *priv = netdev_priv(dev);
7387 	int ret = 0;
7388 
7389 	if (netif_running(dev))
7390 		stmmac_release(dev);
7391 
7392 	priv->dma_conf.dma_rx_size = rx_size;
7393 	priv->dma_conf.dma_tx_size = tx_size;
7394 
7395 	if (netif_running(dev))
7396 		ret = stmmac_open(dev);
7397 
7398 	return ret;
7399 }
7400 
7401 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7402 static void stmmac_fpe_lp_task(struct work_struct *work)
7403 {
7404 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7405 						fpe_task);
7406 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7407 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7408 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7409 	bool *hs_enable = &fpe_cfg->hs_enable;
7410 	bool *enable = &fpe_cfg->enable;
7411 	int retries = 20;
7412 
7413 	while (retries-- > 0) {
7414 		/* Bail out immediately if FPE handshake is OFF */
7415 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7416 			break;
7417 
7418 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7419 		    *lp_state == FPE_STATE_ENTERING_ON) {
7420 			stmmac_fpe_configure(priv, priv->ioaddr,
7421 					     fpe_cfg,
7422 					     priv->plat->tx_queues_to_use,
7423 					     priv->plat->rx_queues_to_use,
7424 					     *enable);
7425 
7426 			netdev_info(priv->dev, "configured FPE\n");
7427 
7428 			*lo_state = FPE_STATE_ON;
7429 			*lp_state = FPE_STATE_ON;
7430 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7431 			break;
7432 		}
7433 
7434 		if ((*lo_state == FPE_STATE_CAPABLE ||
7435 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7436 		     *lp_state != FPE_STATE_ON) {
7437 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7438 				    *lo_state, *lp_state);
7439 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7440 						fpe_cfg,
7441 						MPACKET_VERIFY);
7442 		}
7443 		/* Sleep then retry */
7444 		msleep(500);
7445 	}
7446 
7447 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7448 }
7449 
7450 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7451 {
7452 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7453 		if (enable) {
7454 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7455 						priv->plat->fpe_cfg,
7456 						MPACKET_VERIFY);
7457 		} else {
7458 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7459 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7460 		}
7461 
7462 		priv->plat->fpe_cfg->hs_enable = enable;
7463 	}
7464 }
7465 
7466 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7467 {
7468 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7469 	struct dma_desc *desc_contains_ts = ctx->desc;
7470 	struct stmmac_priv *priv = ctx->priv;
7471 	struct dma_desc *ndesc = ctx->ndesc;
7472 	struct dma_desc *desc = ctx->desc;
7473 	u64 ns = 0;
7474 
7475 	if (!priv->hwts_rx_en)
7476 		return -ENODATA;
7477 
7478 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7479 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7480 		desc_contains_ts = ndesc;
7481 
7482 	/* Check if timestamp is available */
7483 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7484 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7485 		ns -= priv->plat->cdc_error_adj;
7486 		*timestamp = ns_to_ktime(ns);
7487 		return 0;
7488 	}
7489 
7490 	return -ENODATA;
7491 }
7492 
7493 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7494 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7495 };
7496 
7497 /**
7498  * stmmac_dvr_probe
7499  * @device: device pointer
7500  * @plat_dat: platform data pointer
7501  * @res: stmmac resource pointer
7502  * Description: this is the main probe function used to
7503  * call the alloc_etherdev, allocate the priv structure.
7504  * Return:
7505  * returns 0 on success, otherwise errno.
7506  */
7507 int stmmac_dvr_probe(struct device *device,
7508 		     struct plat_stmmacenet_data *plat_dat,
7509 		     struct stmmac_resources *res)
7510 {
7511 	struct net_device *ndev = NULL;
7512 	struct stmmac_priv *priv;
7513 	u32 rxq;
7514 	int i, ret = 0;
7515 
7516 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7517 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7518 	if (!ndev)
7519 		return -ENOMEM;
7520 
7521 	SET_NETDEV_DEV(ndev, device);
7522 
7523 	priv = netdev_priv(ndev);
7524 	priv->device = device;
7525 	priv->dev = ndev;
7526 
7527 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7528 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7529 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7530 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7531 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7532 	}
7533 
7534 	priv->xstats.pcpu_stats =
7535 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7536 	if (!priv->xstats.pcpu_stats)
7537 		return -ENOMEM;
7538 
7539 	stmmac_set_ethtool_ops(ndev);
7540 	priv->pause = pause;
7541 	priv->plat = plat_dat;
7542 	priv->ioaddr = res->addr;
7543 	priv->dev->base_addr = (unsigned long)res->addr;
7544 	priv->plat->dma_cfg->multi_msi_en =
7545 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7546 
7547 	priv->dev->irq = res->irq;
7548 	priv->wol_irq = res->wol_irq;
7549 	priv->lpi_irq = res->lpi_irq;
7550 	priv->sfty_irq = res->sfty_irq;
7551 	priv->sfty_ce_irq = res->sfty_ce_irq;
7552 	priv->sfty_ue_irq = res->sfty_ue_irq;
7553 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7554 		priv->rx_irq[i] = res->rx_irq[i];
7555 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7556 		priv->tx_irq[i] = res->tx_irq[i];
7557 
7558 	if (!is_zero_ether_addr(res->mac))
7559 		eth_hw_addr_set(priv->dev, res->mac);
7560 
7561 	dev_set_drvdata(device, priv->dev);
7562 
7563 	/* Verify driver arguments */
7564 	stmmac_verify_args();
7565 
7566 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7567 	if (!priv->af_xdp_zc_qps)
7568 		return -ENOMEM;
7569 
7570 	/* Allocate workqueue */
7571 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7572 	if (!priv->wq) {
7573 		dev_err(priv->device, "failed to create workqueue\n");
7574 		ret = -ENOMEM;
7575 		goto error_wq_init;
7576 	}
7577 
7578 	INIT_WORK(&priv->service_task, stmmac_service_task);
7579 
7580 	/* Initialize Link Partner FPE workqueue */
7581 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7582 
7583 	/* Override with kernel parameters if supplied XXX CRS XXX
7584 	 * this needs to have multiple instances
7585 	 */
7586 	if ((phyaddr >= 0) && (phyaddr <= 31))
7587 		priv->plat->phy_addr = phyaddr;
7588 
7589 	if (priv->plat->stmmac_rst) {
7590 		ret = reset_control_assert(priv->plat->stmmac_rst);
7591 		reset_control_deassert(priv->plat->stmmac_rst);
7592 		/* Some reset controllers have only reset callback instead of
7593 		 * assert + deassert callbacks pair.
7594 		 */
7595 		if (ret == -ENOTSUPP)
7596 			reset_control_reset(priv->plat->stmmac_rst);
7597 	}
7598 
7599 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7600 	if (ret == -ENOTSUPP)
7601 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7602 			ERR_PTR(ret));
7603 
7604 	/* Wait a bit for the reset to take effect */
7605 	udelay(10);
7606 
7607 	/* Init MAC and get the capabilities */
7608 	ret = stmmac_hw_init(priv);
7609 	if (ret)
7610 		goto error_hw_init;
7611 
7612 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7613 	 */
7614 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7615 		priv->plat->dma_cfg->dche = false;
7616 
7617 	stmmac_check_ether_addr(priv);
7618 
7619 	ndev->netdev_ops = &stmmac_netdev_ops;
7620 
7621 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7622 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7623 
7624 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7625 			    NETIF_F_RXCSUM;
7626 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7627 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7628 
7629 	ret = stmmac_tc_init(priv, priv);
7630 	if (!ret) {
7631 		ndev->hw_features |= NETIF_F_HW_TC;
7632 	}
7633 
7634 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7635 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7636 		if (priv->plat->has_gmac4)
7637 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7638 		priv->tso = true;
7639 		dev_info(priv->device, "TSO feature enabled\n");
7640 	}
7641 
7642 	if (priv->dma_cap.sphen &&
7643 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7644 		ndev->hw_features |= NETIF_F_GRO;
7645 		priv->sph_cap = true;
7646 		priv->sph = priv->sph_cap;
7647 		dev_info(priv->device, "SPH feature enabled\n");
7648 	}
7649 
7650 	/* Ideally our host DMA address width is the same as for the
7651 	 * device. However, it may differ and then we have to use our
7652 	 * host DMA width for allocation and the device DMA width for
7653 	 * register handling.
7654 	 */
7655 	if (priv->plat->host_dma_width)
7656 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7657 	else
7658 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7659 
7660 	if (priv->dma_cap.host_dma_width) {
7661 		ret = dma_set_mask_and_coherent(device,
7662 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7663 		if (!ret) {
7664 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7665 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7666 
7667 			/*
7668 			 * If more than 32 bits can be addressed, make sure to
7669 			 * enable enhanced addressing mode.
7670 			 */
7671 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7672 				priv->plat->dma_cfg->eame = true;
7673 		} else {
7674 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7675 			if (ret) {
7676 				dev_err(priv->device, "Failed to set DMA Mask\n");
7677 				goto error_hw_init;
7678 			}
7679 
7680 			priv->dma_cap.host_dma_width = 32;
7681 		}
7682 	}
7683 
7684 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7685 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7686 #ifdef STMMAC_VLAN_TAG_USED
7687 	/* Both mac100 and gmac support receive VLAN tag detection */
7688 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7689 	ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7690 	priv->hw->hw_vlan_en = true;
7691 
7692 	if (priv->dma_cap.vlhash) {
7693 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7694 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7695 	}
7696 	if (priv->dma_cap.vlins) {
7697 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7698 		if (priv->dma_cap.dvlan)
7699 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7700 	}
7701 #endif
7702 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7703 
7704 	priv->xstats.threshold = tc;
7705 
7706 	/* Initialize RSS */
7707 	rxq = priv->plat->rx_queues_to_use;
7708 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7709 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7710 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7711 
7712 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7713 		ndev->features |= NETIF_F_RXHASH;
7714 
7715 	ndev->vlan_features |= ndev->features;
7716 	/* TSO doesn't work on VLANs yet */
7717 	ndev->vlan_features &= ~NETIF_F_TSO;
7718 
7719 	/* MTU range: 46 - hw-specific max */
7720 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7721 	if (priv->plat->has_xgmac)
7722 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7723 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7724 		ndev->max_mtu = JUMBO_LEN;
7725 	else
7726 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7727 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7728 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7729 	 */
7730 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7731 	    (priv->plat->maxmtu >= ndev->min_mtu))
7732 		ndev->max_mtu = priv->plat->maxmtu;
7733 	else if (priv->plat->maxmtu < ndev->min_mtu)
7734 		dev_warn(priv->device,
7735 			 "%s: warning: maxmtu having invalid value (%d)\n",
7736 			 __func__, priv->plat->maxmtu);
7737 
7738 	if (flow_ctrl)
7739 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7740 
7741 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7742 
7743 	/* Setup channels NAPI */
7744 	stmmac_napi_add(ndev);
7745 
7746 	mutex_init(&priv->lock);
7747 
7748 	/* If a specific clk_csr value is passed from the platform
7749 	 * this means that the CSR Clock Range selection cannot be
7750 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7751 	 * set the MDC clock dynamically according to the csr actual
7752 	 * clock input.
7753 	 */
7754 	if (priv->plat->clk_csr >= 0)
7755 		priv->clk_csr = priv->plat->clk_csr;
7756 	else
7757 		stmmac_clk_csr_set(priv);
7758 
7759 	stmmac_check_pcs_mode(priv);
7760 
7761 	pm_runtime_get_noresume(device);
7762 	pm_runtime_set_active(device);
7763 	if (!pm_runtime_enabled(device))
7764 		pm_runtime_enable(device);
7765 
7766 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7767 	    priv->hw->pcs != STMMAC_PCS_RTBI) {
7768 		/* MDIO bus Registration */
7769 		ret = stmmac_mdio_register(ndev);
7770 		if (ret < 0) {
7771 			dev_err_probe(priv->device, ret,
7772 				      "%s: MDIO bus (id: %d) registration failed\n",
7773 				      __func__, priv->plat->bus_id);
7774 			goto error_mdio_register;
7775 		}
7776 	}
7777 
7778 	if (priv->plat->speed_mode_2500)
7779 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7780 
7781 	if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7782 		ret = stmmac_xpcs_setup(priv->mii);
7783 		if (ret)
7784 			goto error_xpcs_setup;
7785 	}
7786 
7787 	ret = stmmac_phy_setup(priv);
7788 	if (ret) {
7789 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7790 		goto error_phy_setup;
7791 	}
7792 
7793 	ret = register_netdev(ndev);
7794 	if (ret) {
7795 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7796 			__func__, ret);
7797 		goto error_netdev_register;
7798 	}
7799 
7800 #ifdef CONFIG_DEBUG_FS
7801 	stmmac_init_fs(ndev);
7802 #endif
7803 
7804 	if (priv->plat->dump_debug_regs)
7805 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7806 
7807 	/* Let pm_runtime_put() disable the clocks.
7808 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7809 	 */
7810 	pm_runtime_put(device);
7811 
7812 	return ret;
7813 
7814 error_netdev_register:
7815 	phylink_destroy(priv->phylink);
7816 error_xpcs_setup:
7817 error_phy_setup:
7818 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7819 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7820 		stmmac_mdio_unregister(ndev);
7821 error_mdio_register:
7822 	stmmac_napi_del(ndev);
7823 error_hw_init:
7824 	destroy_workqueue(priv->wq);
7825 error_wq_init:
7826 	bitmap_free(priv->af_xdp_zc_qps);
7827 
7828 	return ret;
7829 }
7830 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7831 
7832 /**
7833  * stmmac_dvr_remove
7834  * @dev: device pointer
7835  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7836  * changes the link status, releases the DMA descriptor rings.
7837  */
7838 void stmmac_dvr_remove(struct device *dev)
7839 {
7840 	struct net_device *ndev = dev_get_drvdata(dev);
7841 	struct stmmac_priv *priv = netdev_priv(ndev);
7842 
7843 	netdev_info(priv->dev, "%s: removing driver", __func__);
7844 
7845 	pm_runtime_get_sync(dev);
7846 
7847 	stmmac_stop_all_dma(priv);
7848 	stmmac_mac_set(priv, priv->ioaddr, false);
7849 	netif_carrier_off(ndev);
7850 	unregister_netdev(ndev);
7851 
7852 #ifdef CONFIG_DEBUG_FS
7853 	stmmac_exit_fs(ndev);
7854 #endif
7855 	phylink_destroy(priv->phylink);
7856 	if (priv->plat->stmmac_rst)
7857 		reset_control_assert(priv->plat->stmmac_rst);
7858 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7859 	if (priv->hw->pcs != STMMAC_PCS_TBI &&
7860 	    priv->hw->pcs != STMMAC_PCS_RTBI)
7861 		stmmac_mdio_unregister(ndev);
7862 	destroy_workqueue(priv->wq);
7863 	mutex_destroy(&priv->lock);
7864 	bitmap_free(priv->af_xdp_zc_qps);
7865 
7866 	pm_runtime_disable(dev);
7867 	pm_runtime_put_noidle(dev);
7868 }
7869 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7870 
7871 /**
7872  * stmmac_suspend - suspend callback
7873  * @dev: device pointer
7874  * Description: this is the function to suspend the device and it is called
7875  * by the platform driver to stop the network queue, release the resources,
7876  * program the PMT register (for WoL), clean and release driver resources.
7877  */
7878 int stmmac_suspend(struct device *dev)
7879 {
7880 	struct net_device *ndev = dev_get_drvdata(dev);
7881 	struct stmmac_priv *priv = netdev_priv(ndev);
7882 	u32 chan;
7883 
7884 	if (!ndev || !netif_running(ndev))
7885 		return 0;
7886 
7887 	mutex_lock(&priv->lock);
7888 
7889 	netif_device_detach(ndev);
7890 
7891 	stmmac_disable_all_queues(priv);
7892 
7893 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7894 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7895 
7896 	if (priv->eee_enabled) {
7897 		priv->tx_path_in_lpi_mode = false;
7898 		del_timer_sync(&priv->eee_ctrl_timer);
7899 	}
7900 
7901 	/* Stop TX/RX DMA */
7902 	stmmac_stop_all_dma(priv);
7903 
7904 	if (priv->plat->serdes_powerdown)
7905 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7906 
7907 	/* Enable Power down mode by programming the PMT regs */
7908 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7909 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7910 		priv->irq_wake = 1;
7911 	} else {
7912 		stmmac_mac_set(priv, priv->ioaddr, false);
7913 		pinctrl_pm_select_sleep_state(priv->device);
7914 	}
7915 
7916 	mutex_unlock(&priv->lock);
7917 
7918 	rtnl_lock();
7919 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7920 		phylink_suspend(priv->phylink, true);
7921 	} else {
7922 		if (device_may_wakeup(priv->device))
7923 			phylink_speed_down(priv->phylink, false);
7924 		phylink_suspend(priv->phylink, false);
7925 	}
7926 	rtnl_unlock();
7927 
7928 	if (priv->dma_cap.fpesel) {
7929 		/* Disable FPE */
7930 		stmmac_fpe_configure(priv, priv->ioaddr,
7931 				     priv->plat->fpe_cfg,
7932 				     priv->plat->tx_queues_to_use,
7933 				     priv->plat->rx_queues_to_use, false);
7934 
7935 		stmmac_fpe_handshake(priv, false);
7936 		stmmac_fpe_stop_wq(priv);
7937 	}
7938 
7939 	priv->speed = SPEED_UNKNOWN;
7940 	return 0;
7941 }
7942 EXPORT_SYMBOL_GPL(stmmac_suspend);
7943 
7944 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7945 {
7946 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7947 
7948 	rx_q->cur_rx = 0;
7949 	rx_q->dirty_rx = 0;
7950 }
7951 
7952 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7953 {
7954 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7955 
7956 	tx_q->cur_tx = 0;
7957 	tx_q->dirty_tx = 0;
7958 	tx_q->mss = 0;
7959 
7960 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7961 }
7962 
7963 /**
7964  * stmmac_reset_queues_param - reset queue parameters
7965  * @priv: device pointer
7966  */
7967 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7968 {
7969 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7970 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7971 	u32 queue;
7972 
7973 	for (queue = 0; queue < rx_cnt; queue++)
7974 		stmmac_reset_rx_queue(priv, queue);
7975 
7976 	for (queue = 0; queue < tx_cnt; queue++)
7977 		stmmac_reset_tx_queue(priv, queue);
7978 }
7979 
7980 /**
7981  * stmmac_resume - resume callback
7982  * @dev: device pointer
7983  * Description: when resume this function is invoked to setup the DMA and CORE
7984  * in a usable state.
7985  */
7986 int stmmac_resume(struct device *dev)
7987 {
7988 	struct net_device *ndev = dev_get_drvdata(dev);
7989 	struct stmmac_priv *priv = netdev_priv(ndev);
7990 	int ret;
7991 
7992 	if (!netif_running(ndev))
7993 		return 0;
7994 
7995 	/* Power Down bit, into the PM register, is cleared
7996 	 * automatically as soon as a magic packet or a Wake-up frame
7997 	 * is received. Anyway, it's better to manually clear
7998 	 * this bit because it can generate problems while resuming
7999 	 * from another devices (e.g. serial console).
8000 	 */
8001 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
8002 		mutex_lock(&priv->lock);
8003 		stmmac_pmt(priv, priv->hw, 0);
8004 		mutex_unlock(&priv->lock);
8005 		priv->irq_wake = 0;
8006 	} else {
8007 		pinctrl_pm_select_default_state(priv->device);
8008 		/* reset the phy so that it's ready */
8009 		if (priv->mii)
8010 			stmmac_mdio_reset(priv->mii);
8011 	}
8012 
8013 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
8014 	    priv->plat->serdes_powerup) {
8015 		ret = priv->plat->serdes_powerup(ndev,
8016 						 priv->plat->bsp_priv);
8017 
8018 		if (ret < 0)
8019 			return ret;
8020 	}
8021 
8022 	rtnl_lock();
8023 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
8024 		phylink_resume(priv->phylink);
8025 	} else {
8026 		phylink_resume(priv->phylink);
8027 		if (device_may_wakeup(priv->device))
8028 			phylink_speed_up(priv->phylink);
8029 	}
8030 	rtnl_unlock();
8031 
8032 	rtnl_lock();
8033 	mutex_lock(&priv->lock);
8034 
8035 	stmmac_reset_queues_param(priv);
8036 
8037 	stmmac_free_tx_skbufs(priv);
8038 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8039 
8040 	stmmac_hw_setup(ndev, false);
8041 	stmmac_init_coalesce(priv);
8042 	stmmac_set_rx_mode(ndev);
8043 
8044 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8045 
8046 	stmmac_enable_all_queues(priv);
8047 	stmmac_enable_all_dma_irq(priv);
8048 
8049 	mutex_unlock(&priv->lock);
8050 	rtnl_unlock();
8051 
8052 	netif_device_attach(ndev);
8053 
8054 	return 0;
8055 }
8056 EXPORT_SYMBOL_GPL(stmmac_resume);
8057 
8058 #ifndef MODULE
8059 static int __init stmmac_cmdline_opt(char *str)
8060 {
8061 	char *opt;
8062 
8063 	if (!str || !*str)
8064 		return 1;
8065 	while ((opt = strsep(&str, ",")) != NULL) {
8066 		if (!strncmp(opt, "debug:", 6)) {
8067 			if (kstrtoint(opt + 6, 0, &debug))
8068 				goto err;
8069 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8070 			if (kstrtoint(opt + 8, 0, &phyaddr))
8071 				goto err;
8072 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8073 			if (kstrtoint(opt + 7, 0, &buf_sz))
8074 				goto err;
8075 		} else if (!strncmp(opt, "tc:", 3)) {
8076 			if (kstrtoint(opt + 3, 0, &tc))
8077 				goto err;
8078 		} else if (!strncmp(opt, "watchdog:", 9)) {
8079 			if (kstrtoint(opt + 9, 0, &watchdog))
8080 				goto err;
8081 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8082 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8083 				goto err;
8084 		} else if (!strncmp(opt, "pause:", 6)) {
8085 			if (kstrtoint(opt + 6, 0, &pause))
8086 				goto err;
8087 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8088 			if (kstrtoint(opt + 10, 0, &eee_timer))
8089 				goto err;
8090 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8091 			if (kstrtoint(opt + 11, 0, &chain_mode))
8092 				goto err;
8093 		}
8094 	}
8095 	return 1;
8096 
8097 err:
8098 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8099 	return 1;
8100 }
8101 
8102 __setup("stmmaceth=", stmmac_cmdline_opt);
8103 #endif /* MODULE */
8104 
8105 static int __init stmmac_init(void)
8106 {
8107 #ifdef CONFIG_DEBUG_FS
8108 	/* Create debugfs main directory if it doesn't exist yet */
8109 	if (!stmmac_fs_dir)
8110 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8111 	register_netdevice_notifier(&stmmac_notifier);
8112 #endif
8113 
8114 	return 0;
8115 }
8116 
8117 static void __exit stmmac_exit(void)
8118 {
8119 #ifdef CONFIG_DEBUG_FS
8120 	unregister_netdevice_notifier(&stmmac_notifier);
8121 	debugfs_remove_recursive(stmmac_fs_dir);
8122 #endif
8123 }
8124 
8125 module_init(stmmac_init)
8126 module_exit(stmmac_exit)
8127 
8128 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8129 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8130 MODULE_LICENSE("GPL");
8131