xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 550ee90ac61c1f0cd987c68a9ac6c4c9833925d7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Check if MAC core supports the EEE feature. */
475 	if (!priv->dma_cap.eee)
476 		return false;
477 
478 	mutex_lock(&priv->lock);
479 
480 	/* Check if it needs to be deactivated */
481 	if (!priv->eee_active) {
482 		if (priv->eee_enabled) {
483 			netdev_dbg(priv->dev, "disable EEE\n");
484 			stmmac_lpi_entry_timer_config(priv, 0);
485 			del_timer_sync(&priv->eee_ctrl_timer);
486 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487 			if (priv->hw->xpcs)
488 				xpcs_config_eee(priv->hw->xpcs,
489 						priv->plat->mult_fact_100ns,
490 						false);
491 		}
492 		mutex_unlock(&priv->lock);
493 		return false;
494 	}
495 
496 	if (priv->eee_active && !priv->eee_enabled) {
497 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 				     eee_tw_timer);
500 		if (priv->hw->xpcs)
501 			xpcs_config_eee(priv->hw->xpcs,
502 					priv->plat->mult_fact_100ns,
503 					true);
504 	}
505 
506 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 		del_timer_sync(&priv->eee_ctrl_timer);
508 		priv->tx_path_in_lpi_mode = false;
509 		stmmac_lpi_entry_timer_config(priv, 1);
510 	} else {
511 		stmmac_lpi_entry_timer_config(priv, 0);
512 		mod_timer(&priv->eee_ctrl_timer,
513 			  STMMAC_LPI_T(priv->tx_lpi_timer));
514 	}
515 
516 	mutex_unlock(&priv->lock);
517 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518 	return true;
519 }
520 
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
522  * @priv: driver private structure
523  * @p : descriptor pointer
524  * @skb : the socket buffer
525  * Description :
526  * This function will read timestamp from the descriptor & pass it to stack.
527  * and also perform some sanity checks.
528  */
529 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
530 				   struct dma_desc *p, struct sk_buff *skb)
531 {
532 	struct skb_shared_hwtstamps shhwtstamp;
533 	bool found = false;
534 	u64 ns = 0;
535 
536 	if (!priv->hwts_tx_en)
537 		return;
538 
539 	/* exit if skb doesn't support hw tstamp */
540 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
541 		return;
542 
543 	/* check tx tstamp status */
544 	if (stmmac_get_tx_timestamp_status(priv, p)) {
545 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
546 		found = true;
547 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
548 		found = true;
549 	}
550 
551 	if (found) {
552 		ns -= priv->plat->cdc_error_adj;
553 
554 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
555 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
556 
557 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
558 		/* pass tstamp to stack */
559 		skb_tstamp_tx(skb, &shhwtstamp);
560 	}
561 }
562 
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
564  * @priv: driver private structure
565  * @p : descriptor pointer
566  * @np : next descriptor pointer
567  * @skb : the socket buffer
568  * Description :
569  * This function will read received packet's timestamp from the descriptor
570  * and pass it to stack. It also perform some sanity checks.
571  */
572 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
573 				   struct dma_desc *np, struct sk_buff *skb)
574 {
575 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
576 	struct dma_desc *desc = p;
577 	u64 ns = 0;
578 
579 	if (!priv->hwts_rx_en)
580 		return;
581 	/* For GMAC4, the valid timestamp is from CTX next desc. */
582 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 		desc = np;
584 
585 	/* Check if timestamp is available */
586 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588 
589 		ns -= priv->plat->cdc_error_adj;
590 
591 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
592 		shhwtstamp = skb_hwtstamps(skb);
593 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
594 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
595 	} else  {
596 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
597 	}
598 }
599 
600 /**
601  *  stmmac_hwtstamp_set - control hardware timestamping.
602  *  @dev: device pointer.
603  *  @ifr: An IOCTL specific structure, that can contain a pointer to
604  *  a proprietary structure used to pass information to the driver.
605  *  Description:
606  *  This function configures the MAC to enable/disable both outgoing(TX)
607  *  and incoming(RX) packets time stamping based on user input.
608  *  Return Value:
609  *  0 on success and an appropriate -ve integer on failure.
610  */
611 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
612 {
613 	struct stmmac_priv *priv = netdev_priv(dev);
614 	struct hwtstamp_config config;
615 	u32 ptp_v2 = 0;
616 	u32 tstamp_all = 0;
617 	u32 ptp_over_ipv4_udp = 0;
618 	u32 ptp_over_ipv6_udp = 0;
619 	u32 ptp_over_ethernet = 0;
620 	u32 snap_type_sel = 0;
621 	u32 ts_master_en = 0;
622 	u32 ts_event_en = 0;
623 
624 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
625 		netdev_alert(priv->dev, "No support for HW time stamping\n");
626 		priv->hwts_tx_en = 0;
627 		priv->hwts_rx_en = 0;
628 
629 		return -EOPNOTSUPP;
630 	}
631 
632 	if (copy_from_user(&config, ifr->ifr_data,
633 			   sizeof(config)))
634 		return -EFAULT;
635 
636 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 		   __func__, config.flags, config.tx_type, config.rx_filter);
638 
639 	if (config.tx_type != HWTSTAMP_TX_OFF &&
640 	    config.tx_type != HWTSTAMP_TX_ON)
641 		return -ERANGE;
642 
643 	if (priv->adv_ts) {
644 		switch (config.rx_filter) {
645 		case HWTSTAMP_FILTER_NONE:
646 			/* time stamp no incoming packet at all */
647 			config.rx_filter = HWTSTAMP_FILTER_NONE;
648 			break;
649 
650 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
651 			/* PTP v1, UDP, any kind of event packet */
652 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
653 			/* 'xmac' hardware can support Sync, Pdelay_Req and
654 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
655 			 * This leaves Delay_Req timestamps out.
656 			 * Enable all events *and* general purpose message
657 			 * timestamping
658 			 */
659 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 			/* PTP v1, UDP, Sync packet */
666 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
667 			/* take time stamp for SYNC messages only */
668 			ts_event_en = PTP_TCR_TSEVNTENA;
669 
670 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
671 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
672 			break;
673 
674 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
675 			/* PTP v1, UDP, Delay_req packet */
676 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
677 			/* take time stamp for Delay_Req messages only */
678 			ts_master_en = PTP_TCR_TSMSTRENA;
679 			ts_event_en = PTP_TCR_TSEVNTENA;
680 
681 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
686 			/* PTP v2, UDP, any kind of event packet */
687 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
688 			ptp_v2 = PTP_TCR_TSVER2ENA;
689 			/* take time stamp for all event messages */
690 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
691 
692 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694 			break;
695 
696 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
697 			/* PTP v2, UDP, Sync packet */
698 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
699 			ptp_v2 = PTP_TCR_TSVER2ENA;
700 			/* take time stamp for SYNC messages only */
701 			ts_event_en = PTP_TCR_TSEVNTENA;
702 
703 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
708 			/* PTP v2, UDP, Delay_req packet */
709 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
710 			ptp_v2 = PTP_TCR_TSVER2ENA;
711 			/* take time stamp for Delay_Req messages only */
712 			ts_master_en = PTP_TCR_TSMSTRENA;
713 			ts_event_en = PTP_TCR_TSEVNTENA;
714 
715 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 			break;
718 
719 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
720 			/* PTP v2/802.AS1 any layer, any kind of event packet */
721 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
722 			ptp_v2 = PTP_TCR_TSVER2ENA;
723 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
724 			if (priv->synopsys_id < DWMAC_CORE_4_10)
725 				ts_event_en = PTP_TCR_TSEVNTENA;
726 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 			ptp_over_ethernet = PTP_TCR_TSIPENA;
729 			break;
730 
731 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
732 			/* PTP v2/802.AS1, any layer, Sync packet */
733 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
734 			ptp_v2 = PTP_TCR_TSVER2ENA;
735 			/* take time stamp for SYNC messages only */
736 			ts_event_en = PTP_TCR_TSEVNTENA;
737 
738 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740 			ptp_over_ethernet = PTP_TCR_TSIPENA;
741 			break;
742 
743 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
744 			/* PTP v2/802.AS1, any layer, Delay_req packet */
745 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
746 			ptp_v2 = PTP_TCR_TSVER2ENA;
747 			/* take time stamp for Delay_Req messages only */
748 			ts_master_en = PTP_TCR_TSMSTRENA;
749 			ts_event_en = PTP_TCR_TSEVNTENA;
750 
751 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
752 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
753 			ptp_over_ethernet = PTP_TCR_TSIPENA;
754 			break;
755 
756 		case HWTSTAMP_FILTER_NTP_ALL:
757 		case HWTSTAMP_FILTER_ALL:
758 			/* time stamp any incoming packet */
759 			config.rx_filter = HWTSTAMP_FILTER_ALL;
760 			tstamp_all = PTP_TCR_TSENALL;
761 			break;
762 
763 		default:
764 			return -ERANGE;
765 		}
766 	} else {
767 		switch (config.rx_filter) {
768 		case HWTSTAMP_FILTER_NONE:
769 			config.rx_filter = HWTSTAMP_FILTER_NONE;
770 			break;
771 		default:
772 			/* PTP v1, UDP, any kind of event packet */
773 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
774 			break;
775 		}
776 	}
777 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
778 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
779 
780 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
781 
782 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
783 		priv->systime_flags |= tstamp_all | ptp_v2 |
784 				       ptp_over_ethernet | ptp_over_ipv6_udp |
785 				       ptp_over_ipv4_udp | ts_event_en |
786 				       ts_master_en | snap_type_sel;
787 	}
788 
789 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
790 
791 	memcpy(&priv->tstamp_config, &config, sizeof(config));
792 
793 	return copy_to_user(ifr->ifr_data, &config,
794 			    sizeof(config)) ? -EFAULT : 0;
795 }
796 
797 /**
798  *  stmmac_hwtstamp_get - read hardware timestamping.
799  *  @dev: device pointer.
800  *  @ifr: An IOCTL specific structure, that can contain a pointer to
801  *  a proprietary structure used to pass information to the driver.
802  *  Description:
803  *  This function obtain the current hardware timestamping settings
804  *  as requested.
805  */
806 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
807 {
808 	struct stmmac_priv *priv = netdev_priv(dev);
809 	struct hwtstamp_config *config = &priv->tstamp_config;
810 
811 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
812 		return -EOPNOTSUPP;
813 
814 	return copy_to_user(ifr->ifr_data, config,
815 			    sizeof(*config)) ? -EFAULT : 0;
816 }
817 
818 /**
819  * stmmac_init_tstamp_counter - init hardware timestamping counter
820  * @priv: driver private structure
821  * @systime_flags: timestamping flags
822  * Description:
823  * Initialize hardware counter for packet timestamping.
824  * This is valid as long as the interface is open and not suspended.
825  * Will be rerun after resuming from suspend, case in which the timestamping
826  * flags updated by stmmac_hwtstamp_set() also need to be restored.
827  */
828 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
829 {
830 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
831 	struct timespec64 now;
832 	u32 sec_inc = 0;
833 	u64 temp = 0;
834 
835 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
836 		return -EOPNOTSUPP;
837 
838 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
839 	priv->systime_flags = systime_flags;
840 
841 	/* program Sub Second Increment reg */
842 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
843 					   priv->plat->clk_ptp_rate,
844 					   xmac, &sec_inc);
845 	temp = div_u64(1000000000ULL, sec_inc);
846 
847 	/* Store sub second increment for later use */
848 	priv->sub_second_inc = sec_inc;
849 
850 	/* calculate default added value:
851 	 * formula is :
852 	 * addend = (2^32)/freq_div_ratio;
853 	 * where, freq_div_ratio = 1e9ns/sec_inc
854 	 */
855 	temp = (u64)(temp << 32);
856 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
857 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
858 
859 	/* initialize system time */
860 	ktime_get_real_ts64(&now);
861 
862 	/* lower 32 bits of tv_sec are safe until y2106 */
863 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
864 
865 	return 0;
866 }
867 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
868 
869 /**
870  * stmmac_init_ptp - init PTP
871  * @priv: driver private structure
872  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
873  * This is done by looking at the HW cap. register.
874  * This function also registers the ptp driver.
875  */
876 static int stmmac_init_ptp(struct stmmac_priv *priv)
877 {
878 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
879 	int ret;
880 
881 	if (priv->plat->ptp_clk_freq_config)
882 		priv->plat->ptp_clk_freq_config(priv);
883 
884 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
885 	if (ret)
886 		return ret;
887 
888 	priv->adv_ts = 0;
889 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
890 	if (xmac && priv->dma_cap.atime_stamp)
891 		priv->adv_ts = 1;
892 	/* Dwmac 3.x core with extend_desc can support adv_ts */
893 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
894 		priv->adv_ts = 1;
895 
896 	if (priv->dma_cap.time_stamp)
897 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
898 
899 	if (priv->adv_ts)
900 		netdev_info(priv->dev,
901 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
902 
903 	priv->hwts_tx_en = 0;
904 	priv->hwts_rx_en = 0;
905 
906 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
907 		stmmac_hwtstamp_correct_latency(priv, priv);
908 
909 	return 0;
910 }
911 
912 static void stmmac_release_ptp(struct stmmac_priv *priv)
913 {
914 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
915 	stmmac_ptp_unregister(priv);
916 }
917 
918 /**
919  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
920  *  @priv: driver private structure
921  *  @duplex: duplex passed to the next function
922  *  Description: It is used for configuring the flow control in all queues
923  */
924 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
925 {
926 	u32 tx_cnt = priv->plat->tx_queues_to_use;
927 
928 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
929 			priv->pause, tx_cnt);
930 }
931 
932 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
933 					 phy_interface_t interface)
934 {
935 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
936 
937 	/* Refresh the MAC-specific capabilities */
938 	stmmac_mac_update_caps(priv);
939 
940 	config->mac_capabilities = priv->hw->link.caps;
941 
942 	if (priv->plat->max_speed)
943 		phylink_limit_mac_speed(config, priv->plat->max_speed);
944 
945 	return config->mac_capabilities;
946 }
947 
948 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
949 						 phy_interface_t interface)
950 {
951 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 	struct phylink_pcs *pcs;
953 
954 	if (priv->plat->select_pcs) {
955 		pcs = priv->plat->select_pcs(priv, interface);
956 		if (!IS_ERR(pcs))
957 			return pcs;
958 	}
959 
960 	return NULL;
961 }
962 
963 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
964 			      const struct phylink_link_state *state)
965 {
966 	/* Nothing to do, xpcs_config() handles everything */
967 }
968 
969 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
970 {
971 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
972 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
973 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
974 	bool *hs_enable = &fpe_cfg->hs_enable;
975 
976 	if (is_up && *hs_enable) {
977 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
978 					MPACKET_VERIFY);
979 	} else {
980 		*lo_state = FPE_STATE_OFF;
981 		*lp_state = FPE_STATE_OFF;
982 	}
983 }
984 
985 static void stmmac_mac_link_down(struct phylink_config *config,
986 				 unsigned int mode, phy_interface_t interface)
987 {
988 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989 
990 	stmmac_mac_set(priv, priv->ioaddr, false);
991 	priv->eee_active = false;
992 	priv->tx_lpi_enabled = false;
993 	priv->eee_enabled = stmmac_eee_init(priv);
994 	stmmac_set_eee_pls(priv, priv->hw, false);
995 
996 	if (priv->dma_cap.fpesel)
997 		stmmac_fpe_link_state_handle(priv, false);
998 }
999 
1000 static void stmmac_mac_link_up(struct phylink_config *config,
1001 			       struct phy_device *phy,
1002 			       unsigned int mode, phy_interface_t interface,
1003 			       int speed, int duplex,
1004 			       bool tx_pause, bool rx_pause)
1005 {
1006 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1007 	u32 old_ctrl, ctrl;
1008 
1009 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1010 	    priv->plat->serdes_powerup)
1011 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1012 
1013 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1014 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1015 
1016 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1017 		switch (speed) {
1018 		case SPEED_10000:
1019 			ctrl |= priv->hw->link.xgmii.speed10000;
1020 			break;
1021 		case SPEED_5000:
1022 			ctrl |= priv->hw->link.xgmii.speed5000;
1023 			break;
1024 		case SPEED_2500:
1025 			ctrl |= priv->hw->link.xgmii.speed2500;
1026 			break;
1027 		default:
1028 			return;
1029 		}
1030 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1031 		switch (speed) {
1032 		case SPEED_100000:
1033 			ctrl |= priv->hw->link.xlgmii.speed100000;
1034 			break;
1035 		case SPEED_50000:
1036 			ctrl |= priv->hw->link.xlgmii.speed50000;
1037 			break;
1038 		case SPEED_40000:
1039 			ctrl |= priv->hw->link.xlgmii.speed40000;
1040 			break;
1041 		case SPEED_25000:
1042 			ctrl |= priv->hw->link.xlgmii.speed25000;
1043 			break;
1044 		case SPEED_10000:
1045 			ctrl |= priv->hw->link.xgmii.speed10000;
1046 			break;
1047 		case SPEED_2500:
1048 			ctrl |= priv->hw->link.speed2500;
1049 			break;
1050 		case SPEED_1000:
1051 			ctrl |= priv->hw->link.speed1000;
1052 			break;
1053 		default:
1054 			return;
1055 		}
1056 	} else {
1057 		switch (speed) {
1058 		case SPEED_2500:
1059 			ctrl |= priv->hw->link.speed2500;
1060 			break;
1061 		case SPEED_1000:
1062 			ctrl |= priv->hw->link.speed1000;
1063 			break;
1064 		case SPEED_100:
1065 			ctrl |= priv->hw->link.speed100;
1066 			break;
1067 		case SPEED_10:
1068 			ctrl |= priv->hw->link.speed10;
1069 			break;
1070 		default:
1071 			return;
1072 		}
1073 	}
1074 
1075 	priv->speed = speed;
1076 
1077 	if (priv->plat->fix_mac_speed)
1078 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1079 
1080 	if (!duplex)
1081 		ctrl &= ~priv->hw->link.duplex;
1082 	else
1083 		ctrl |= priv->hw->link.duplex;
1084 
1085 	/* Flow Control operation */
1086 	if (rx_pause && tx_pause)
1087 		priv->flow_ctrl = FLOW_AUTO;
1088 	else if (rx_pause && !tx_pause)
1089 		priv->flow_ctrl = FLOW_RX;
1090 	else if (!rx_pause && tx_pause)
1091 		priv->flow_ctrl = FLOW_TX;
1092 	else
1093 		priv->flow_ctrl = FLOW_OFF;
1094 
1095 	stmmac_mac_flow_ctrl(priv, duplex);
1096 
1097 	if (ctrl != old_ctrl)
1098 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1099 
1100 	stmmac_mac_set(priv, priv->ioaddr, true);
1101 	if (phy && priv->dma_cap.eee) {
1102 		priv->eee_active =
1103 			phy_init_eee(phy, !(priv->plat->flags &
1104 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1105 		priv->eee_enabled = stmmac_eee_init(priv);
1106 		priv->tx_lpi_enabled = priv->eee_enabled;
1107 		stmmac_set_eee_pls(priv, priv->hw, true);
1108 	}
1109 
1110 	if (priv->dma_cap.fpesel)
1111 		stmmac_fpe_link_state_handle(priv, true);
1112 
1113 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1114 		stmmac_hwtstamp_correct_latency(priv, priv);
1115 }
1116 
1117 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1118 	.mac_get_caps = stmmac_mac_get_caps,
1119 	.mac_select_pcs = stmmac_mac_select_pcs,
1120 	.mac_config = stmmac_mac_config,
1121 	.mac_link_down = stmmac_mac_link_down,
1122 	.mac_link_up = stmmac_mac_link_up,
1123 };
1124 
1125 /**
1126  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1127  * @priv: driver private structure
1128  * Description: this is to verify if the HW supports the PCS.
1129  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1130  * configured for the TBI, RTBI, or SGMII PHY interface.
1131  */
1132 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1133 {
1134 	int interface = priv->plat->mac_interface;
1135 
1136 	if (priv->dma_cap.pcs) {
1137 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1138 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1139 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1140 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1141 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1142 			priv->hw->pcs = STMMAC_PCS_RGMII;
1143 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1144 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1145 			priv->hw->pcs = STMMAC_PCS_SGMII;
1146 		}
1147 	}
1148 }
1149 
1150 /**
1151  * stmmac_init_phy - PHY initialization
1152  * @dev: net device structure
1153  * Description: it initializes the driver's PHY state, and attaches the PHY
1154  * to the mac driver.
1155  *  Return value:
1156  *  0 on success
1157  */
1158 static int stmmac_init_phy(struct net_device *dev)
1159 {
1160 	struct stmmac_priv *priv = netdev_priv(dev);
1161 	struct fwnode_handle *phy_fwnode;
1162 	struct fwnode_handle *fwnode;
1163 	int ret;
1164 
1165 	if (!phylink_expects_phy(priv->phylink))
1166 		return 0;
1167 
1168 	fwnode = priv->plat->port_node;
1169 	if (!fwnode)
1170 		fwnode = dev_fwnode(priv->device);
1171 
1172 	if (fwnode)
1173 		phy_fwnode = fwnode_get_phy_node(fwnode);
1174 	else
1175 		phy_fwnode = NULL;
1176 
1177 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1178 	 * manually parse it
1179 	 */
1180 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1181 		int addr = priv->plat->phy_addr;
1182 		struct phy_device *phydev;
1183 
1184 		if (addr < 0) {
1185 			netdev_err(priv->dev, "no phy found\n");
1186 			return -ENODEV;
1187 		}
1188 
1189 		phydev = mdiobus_get_phy(priv->mii, addr);
1190 		if (!phydev) {
1191 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1192 			return -ENODEV;
1193 		}
1194 
1195 		ret = phylink_connect_phy(priv->phylink, phydev);
1196 	} else {
1197 		fwnode_handle_put(phy_fwnode);
1198 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1199 	}
1200 
1201 	if (!priv->plat->pmt) {
1202 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1203 
1204 		phylink_ethtool_get_wol(priv->phylink, &wol);
1205 		device_set_wakeup_capable(priv->device, !!wol.supported);
1206 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1207 	}
1208 
1209 	return ret;
1210 }
1211 
1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 	struct stmmac_mdio_bus_data *mdio_bus_data;
1215 	int mode = priv->plat->phy_interface;
1216 	struct fwnode_handle *fwnode;
1217 	struct phylink *phylink;
1218 
1219 	priv->phylink_config.dev = &priv->dev->dev;
1220 	priv->phylink_config.type = PHYLINK_NETDEV;
1221 	priv->phylink_config.mac_managed_pm = true;
1222 
1223 	/* Stmmac always requires an RX clock for hardware initialization */
1224 	priv->phylink_config.mac_requires_rxc = true;
1225 
1226 	mdio_bus_data = priv->plat->mdio_bus_data;
1227 	if (mdio_bus_data)
1228 		priv->phylink_config.default_an_inband =
1229 			mdio_bus_data->default_an_inband;
1230 
1231 	/* Set the platform/firmware specified interface mode. Note, phylink
1232 	 * deals with the PHY interface mode, not the MAC interface mode.
1233 	 */
1234 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1235 
1236 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1237 	if (priv->hw->xpcs)
1238 		xpcs_get_interfaces(priv->hw->xpcs,
1239 				    priv->phylink_config.supported_interfaces);
1240 
1241 	fwnode = priv->plat->port_node;
1242 	if (!fwnode)
1243 		fwnode = dev_fwnode(priv->device);
1244 
1245 	phylink = phylink_create(&priv->phylink_config, fwnode,
1246 				 mode, &stmmac_phylink_mac_ops);
1247 	if (IS_ERR(phylink))
1248 		return PTR_ERR(phylink);
1249 
1250 	priv->phylink = phylink;
1251 	return 0;
1252 }
1253 
1254 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1255 				    struct stmmac_dma_conf *dma_conf)
1256 {
1257 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1258 	unsigned int desc_size;
1259 	void *head_rx;
1260 	u32 queue;
1261 
1262 	/* Display RX rings */
1263 	for (queue = 0; queue < rx_cnt; queue++) {
1264 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1265 
1266 		pr_info("\tRX Queue %u rings\n", queue);
1267 
1268 		if (priv->extend_desc) {
1269 			head_rx = (void *)rx_q->dma_erx;
1270 			desc_size = sizeof(struct dma_extended_desc);
1271 		} else {
1272 			head_rx = (void *)rx_q->dma_rx;
1273 			desc_size = sizeof(struct dma_desc);
1274 		}
1275 
1276 		/* Display RX ring */
1277 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1278 				    rx_q->dma_rx_phy, desc_size);
1279 	}
1280 }
1281 
1282 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1283 				    struct stmmac_dma_conf *dma_conf)
1284 {
1285 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1286 	unsigned int desc_size;
1287 	void *head_tx;
1288 	u32 queue;
1289 
1290 	/* Display TX rings */
1291 	for (queue = 0; queue < tx_cnt; queue++) {
1292 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1293 
1294 		pr_info("\tTX Queue %d rings\n", queue);
1295 
1296 		if (priv->extend_desc) {
1297 			head_tx = (void *)tx_q->dma_etx;
1298 			desc_size = sizeof(struct dma_extended_desc);
1299 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1300 			head_tx = (void *)tx_q->dma_entx;
1301 			desc_size = sizeof(struct dma_edesc);
1302 		} else {
1303 			head_tx = (void *)tx_q->dma_tx;
1304 			desc_size = sizeof(struct dma_desc);
1305 		}
1306 
1307 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1308 				    tx_q->dma_tx_phy, desc_size);
1309 	}
1310 }
1311 
1312 static void stmmac_display_rings(struct stmmac_priv *priv,
1313 				 struct stmmac_dma_conf *dma_conf)
1314 {
1315 	/* Display RX ring */
1316 	stmmac_display_rx_rings(priv, dma_conf);
1317 
1318 	/* Display TX ring */
1319 	stmmac_display_tx_rings(priv, dma_conf);
1320 }
1321 
1322 static int stmmac_set_bfsize(int mtu, int bufsize)
1323 {
1324 	int ret = bufsize;
1325 
1326 	if (mtu >= BUF_SIZE_8KiB)
1327 		ret = BUF_SIZE_16KiB;
1328 	else if (mtu >= BUF_SIZE_4KiB)
1329 		ret = BUF_SIZE_8KiB;
1330 	else if (mtu >= BUF_SIZE_2KiB)
1331 		ret = BUF_SIZE_4KiB;
1332 	else if (mtu > DEFAULT_BUFSIZE)
1333 		ret = BUF_SIZE_2KiB;
1334 	else
1335 		ret = DEFAULT_BUFSIZE;
1336 
1337 	return ret;
1338 }
1339 
1340 /**
1341  * stmmac_clear_rx_descriptors - clear RX descriptors
1342  * @priv: driver private structure
1343  * @dma_conf: structure to take the dma data
1344  * @queue: RX queue index
1345  * Description: this function is called to clear the RX descriptors
1346  * in case of both basic and extended descriptors are used.
1347  */
1348 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1349 					struct stmmac_dma_conf *dma_conf,
1350 					u32 queue)
1351 {
1352 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1353 	int i;
1354 
1355 	/* Clear the RX descriptors */
1356 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1357 		if (priv->extend_desc)
1358 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1359 					priv->use_riwt, priv->mode,
1360 					(i == dma_conf->dma_rx_size - 1),
1361 					dma_conf->dma_buf_sz);
1362 		else
1363 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1364 					priv->use_riwt, priv->mode,
1365 					(i == dma_conf->dma_rx_size - 1),
1366 					dma_conf->dma_buf_sz);
1367 }
1368 
1369 /**
1370  * stmmac_clear_tx_descriptors - clear tx descriptors
1371  * @priv: driver private structure
1372  * @dma_conf: structure to take the dma data
1373  * @queue: TX queue index.
1374  * Description: this function is called to clear the TX descriptors
1375  * in case of both basic and extended descriptors are used.
1376  */
1377 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1378 					struct stmmac_dma_conf *dma_conf,
1379 					u32 queue)
1380 {
1381 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1382 	int i;
1383 
1384 	/* Clear the TX descriptors */
1385 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1386 		int last = (i == (dma_conf->dma_tx_size - 1));
1387 		struct dma_desc *p;
1388 
1389 		if (priv->extend_desc)
1390 			p = &tx_q->dma_etx[i].basic;
1391 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1392 			p = &tx_q->dma_entx[i].basic;
1393 		else
1394 			p = &tx_q->dma_tx[i];
1395 
1396 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1397 	}
1398 }
1399 
1400 /**
1401  * stmmac_clear_descriptors - clear descriptors
1402  * @priv: driver private structure
1403  * @dma_conf: structure to take the dma data
1404  * Description: this function is called to clear the TX and RX descriptors
1405  * in case of both basic and extended descriptors are used.
1406  */
1407 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1408 				     struct stmmac_dma_conf *dma_conf)
1409 {
1410 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1411 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1412 	u32 queue;
1413 
1414 	/* Clear the RX descriptors */
1415 	for (queue = 0; queue < rx_queue_cnt; queue++)
1416 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1417 
1418 	/* Clear the TX descriptors */
1419 	for (queue = 0; queue < tx_queue_cnt; queue++)
1420 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1421 }
1422 
1423 /**
1424  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1425  * @priv: driver private structure
1426  * @dma_conf: structure to take the dma data
1427  * @p: descriptor pointer
1428  * @i: descriptor index
1429  * @flags: gfp flag
1430  * @queue: RX queue index
1431  * Description: this function is called to allocate a receive buffer, perform
1432  * the DMA mapping and init the descriptor.
1433  */
1434 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1435 				  struct stmmac_dma_conf *dma_conf,
1436 				  struct dma_desc *p,
1437 				  int i, gfp_t flags, u32 queue)
1438 {
1439 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1440 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1441 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1442 
1443 	if (priv->dma_cap.host_dma_width <= 32)
1444 		gfp |= GFP_DMA32;
1445 
1446 	if (!buf->page) {
1447 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1448 		if (!buf->page)
1449 			return -ENOMEM;
1450 		buf->page_offset = stmmac_rx_offset(priv);
1451 	}
1452 
1453 	if (priv->sph && !buf->sec_page) {
1454 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1455 		if (!buf->sec_page)
1456 			return -ENOMEM;
1457 
1458 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1459 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1460 	} else {
1461 		buf->sec_page = NULL;
1462 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1463 	}
1464 
1465 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1466 
1467 	stmmac_set_desc_addr(priv, p, buf->addr);
1468 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1469 		stmmac_init_desc3(priv, p);
1470 
1471 	return 0;
1472 }
1473 
1474 /**
1475  * stmmac_free_rx_buffer - free RX dma buffers
1476  * @priv: private structure
1477  * @rx_q: RX queue
1478  * @i: buffer index.
1479  */
1480 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1481 				  struct stmmac_rx_queue *rx_q,
1482 				  int i)
1483 {
1484 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1485 
1486 	if (buf->page)
1487 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1488 	buf->page = NULL;
1489 
1490 	if (buf->sec_page)
1491 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1492 	buf->sec_page = NULL;
1493 }
1494 
1495 /**
1496  * stmmac_free_tx_buffer - free RX dma buffers
1497  * @priv: private structure
1498  * @dma_conf: structure to take the dma data
1499  * @queue: RX queue index
1500  * @i: buffer index.
1501  */
1502 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1503 				  struct stmmac_dma_conf *dma_conf,
1504 				  u32 queue, int i)
1505 {
1506 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1507 
1508 	if (tx_q->tx_skbuff_dma[i].buf &&
1509 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1510 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1511 			dma_unmap_page(priv->device,
1512 				       tx_q->tx_skbuff_dma[i].buf,
1513 				       tx_q->tx_skbuff_dma[i].len,
1514 				       DMA_TO_DEVICE);
1515 		else
1516 			dma_unmap_single(priv->device,
1517 					 tx_q->tx_skbuff_dma[i].buf,
1518 					 tx_q->tx_skbuff_dma[i].len,
1519 					 DMA_TO_DEVICE);
1520 	}
1521 
1522 	if (tx_q->xdpf[i] &&
1523 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1524 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1525 		xdp_return_frame(tx_q->xdpf[i]);
1526 		tx_q->xdpf[i] = NULL;
1527 	}
1528 
1529 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1530 		tx_q->xsk_frames_done++;
1531 
1532 	if (tx_q->tx_skbuff[i] &&
1533 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1534 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1535 		tx_q->tx_skbuff[i] = NULL;
1536 	}
1537 
1538 	tx_q->tx_skbuff_dma[i].buf = 0;
1539 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1540 }
1541 
1542 /**
1543  * dma_free_rx_skbufs - free RX dma buffers
1544  * @priv: private structure
1545  * @dma_conf: structure to take the dma data
1546  * @queue: RX queue index
1547  */
1548 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1549 			       struct stmmac_dma_conf *dma_conf,
1550 			       u32 queue)
1551 {
1552 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1553 	int i;
1554 
1555 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1556 		stmmac_free_rx_buffer(priv, rx_q, i);
1557 }
1558 
1559 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1560 				   struct stmmac_dma_conf *dma_conf,
1561 				   u32 queue, gfp_t flags)
1562 {
1563 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 	int i;
1565 
1566 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1567 		struct dma_desc *p;
1568 		int ret;
1569 
1570 		if (priv->extend_desc)
1571 			p = &((rx_q->dma_erx + i)->basic);
1572 		else
1573 			p = rx_q->dma_rx + i;
1574 
1575 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1576 					     queue);
1577 		if (ret)
1578 			return ret;
1579 
1580 		rx_q->buf_alloc_num++;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 /**
1587  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1588  * @priv: private structure
1589  * @dma_conf: structure to take the dma data
1590  * @queue: RX queue index
1591  */
1592 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1593 				struct stmmac_dma_conf *dma_conf,
1594 				u32 queue)
1595 {
1596 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1597 	int i;
1598 
1599 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1600 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1601 
1602 		if (!buf->xdp)
1603 			continue;
1604 
1605 		xsk_buff_free(buf->xdp);
1606 		buf->xdp = NULL;
1607 	}
1608 }
1609 
1610 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1611 				      struct stmmac_dma_conf *dma_conf,
1612 				      u32 queue)
1613 {
1614 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1615 	int i;
1616 
1617 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1618 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1619 	 * use this macro to make sure no size violations.
1620 	 */
1621 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1622 
1623 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1624 		struct stmmac_rx_buffer *buf;
1625 		dma_addr_t dma_addr;
1626 		struct dma_desc *p;
1627 
1628 		if (priv->extend_desc)
1629 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1630 		else
1631 			p = rx_q->dma_rx + i;
1632 
1633 		buf = &rx_q->buf_pool[i];
1634 
1635 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1636 		if (!buf->xdp)
1637 			return -ENOMEM;
1638 
1639 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1640 		stmmac_set_desc_addr(priv, p, dma_addr);
1641 		rx_q->buf_alloc_num++;
1642 	}
1643 
1644 	return 0;
1645 }
1646 
1647 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1648 {
1649 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1650 		return NULL;
1651 
1652 	return xsk_get_pool_from_qid(priv->dev, queue);
1653 }
1654 
1655 /**
1656  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1657  * @priv: driver private structure
1658  * @dma_conf: structure to take the dma data
1659  * @queue: RX queue index
1660  * @flags: gfp flag.
1661  * Description: this function initializes the DMA RX descriptors
1662  * and allocates the socket buffers. It supports the chained and ring
1663  * modes.
1664  */
1665 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1666 				    struct stmmac_dma_conf *dma_conf,
1667 				    u32 queue, gfp_t flags)
1668 {
1669 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1670 	int ret;
1671 
1672 	netif_dbg(priv, probe, priv->dev,
1673 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1674 		  (u32)rx_q->dma_rx_phy);
1675 
1676 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1677 
1678 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1679 
1680 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1681 
1682 	if (rx_q->xsk_pool) {
1683 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1684 						   MEM_TYPE_XSK_BUFF_POOL,
1685 						   NULL));
1686 		netdev_info(priv->dev,
1687 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1688 			    rx_q->queue_index);
1689 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1690 	} else {
1691 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1692 						   MEM_TYPE_PAGE_POOL,
1693 						   rx_q->page_pool));
1694 		netdev_info(priv->dev,
1695 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1696 			    rx_q->queue_index);
1697 	}
1698 
1699 	if (rx_q->xsk_pool) {
1700 		/* RX XDP ZC buffer pool may not be populated, e.g.
1701 		 * xdpsock TX-only.
1702 		 */
1703 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1704 	} else {
1705 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1706 		if (ret < 0)
1707 			return -ENOMEM;
1708 	}
1709 
1710 	/* Setup the chained descriptor addresses */
1711 	if (priv->mode == STMMAC_CHAIN_MODE) {
1712 		if (priv->extend_desc)
1713 			stmmac_mode_init(priv, rx_q->dma_erx,
1714 					 rx_q->dma_rx_phy,
1715 					 dma_conf->dma_rx_size, 1);
1716 		else
1717 			stmmac_mode_init(priv, rx_q->dma_rx,
1718 					 rx_q->dma_rx_phy,
1719 					 dma_conf->dma_rx_size, 0);
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 static int init_dma_rx_desc_rings(struct net_device *dev,
1726 				  struct stmmac_dma_conf *dma_conf,
1727 				  gfp_t flags)
1728 {
1729 	struct stmmac_priv *priv = netdev_priv(dev);
1730 	u32 rx_count = priv->plat->rx_queues_to_use;
1731 	int queue;
1732 	int ret;
1733 
1734 	/* RX INITIALIZATION */
1735 	netif_dbg(priv, probe, priv->dev,
1736 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1737 
1738 	for (queue = 0; queue < rx_count; queue++) {
1739 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1740 		if (ret)
1741 			goto err_init_rx_buffers;
1742 	}
1743 
1744 	return 0;
1745 
1746 err_init_rx_buffers:
1747 	while (queue >= 0) {
1748 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1749 
1750 		if (rx_q->xsk_pool)
1751 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1752 		else
1753 			dma_free_rx_skbufs(priv, dma_conf, queue);
1754 
1755 		rx_q->buf_alloc_num = 0;
1756 		rx_q->xsk_pool = NULL;
1757 
1758 		queue--;
1759 	}
1760 
1761 	return ret;
1762 }
1763 
1764 /**
1765  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1766  * @priv: driver private structure
1767  * @dma_conf: structure to take the dma data
1768  * @queue: TX queue index
1769  * Description: this function initializes the DMA TX descriptors
1770  * and allocates the socket buffers. It supports the chained and ring
1771  * modes.
1772  */
1773 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1774 				    struct stmmac_dma_conf *dma_conf,
1775 				    u32 queue)
1776 {
1777 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1778 	int i;
1779 
1780 	netif_dbg(priv, probe, priv->dev,
1781 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1782 		  (u32)tx_q->dma_tx_phy);
1783 
1784 	/* Setup the chained descriptor addresses */
1785 	if (priv->mode == STMMAC_CHAIN_MODE) {
1786 		if (priv->extend_desc)
1787 			stmmac_mode_init(priv, tx_q->dma_etx,
1788 					 tx_q->dma_tx_phy,
1789 					 dma_conf->dma_tx_size, 1);
1790 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1791 			stmmac_mode_init(priv, tx_q->dma_tx,
1792 					 tx_q->dma_tx_phy,
1793 					 dma_conf->dma_tx_size, 0);
1794 	}
1795 
1796 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1797 
1798 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1799 		struct dma_desc *p;
1800 
1801 		if (priv->extend_desc)
1802 			p = &((tx_q->dma_etx + i)->basic);
1803 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1804 			p = &((tx_q->dma_entx + i)->basic);
1805 		else
1806 			p = tx_q->dma_tx + i;
1807 
1808 		stmmac_clear_desc(priv, p);
1809 
1810 		tx_q->tx_skbuff_dma[i].buf = 0;
1811 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1812 		tx_q->tx_skbuff_dma[i].len = 0;
1813 		tx_q->tx_skbuff_dma[i].last_segment = false;
1814 		tx_q->tx_skbuff[i] = NULL;
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 static int init_dma_tx_desc_rings(struct net_device *dev,
1821 				  struct stmmac_dma_conf *dma_conf)
1822 {
1823 	struct stmmac_priv *priv = netdev_priv(dev);
1824 	u32 tx_queue_cnt;
1825 	u32 queue;
1826 
1827 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1828 
1829 	for (queue = 0; queue < tx_queue_cnt; queue++)
1830 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1831 
1832 	return 0;
1833 }
1834 
1835 /**
1836  * init_dma_desc_rings - init the RX/TX descriptor rings
1837  * @dev: net device structure
1838  * @dma_conf: structure to take the dma data
1839  * @flags: gfp flag.
1840  * Description: this function initializes the DMA RX/TX descriptors
1841  * and allocates the socket buffers. It supports the chained and ring
1842  * modes.
1843  */
1844 static int init_dma_desc_rings(struct net_device *dev,
1845 			       struct stmmac_dma_conf *dma_conf,
1846 			       gfp_t flags)
1847 {
1848 	struct stmmac_priv *priv = netdev_priv(dev);
1849 	int ret;
1850 
1851 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1852 	if (ret)
1853 		return ret;
1854 
1855 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1856 
1857 	stmmac_clear_descriptors(priv, dma_conf);
1858 
1859 	if (netif_msg_hw(priv))
1860 		stmmac_display_rings(priv, dma_conf);
1861 
1862 	return ret;
1863 }
1864 
1865 /**
1866  * dma_free_tx_skbufs - free TX dma buffers
1867  * @priv: private structure
1868  * @dma_conf: structure to take the dma data
1869  * @queue: TX queue index
1870  */
1871 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1872 			       struct stmmac_dma_conf *dma_conf,
1873 			       u32 queue)
1874 {
1875 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1876 	int i;
1877 
1878 	tx_q->xsk_frames_done = 0;
1879 
1880 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1881 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1882 
1883 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1884 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1885 		tx_q->xsk_frames_done = 0;
1886 		tx_q->xsk_pool = NULL;
1887 	}
1888 }
1889 
1890 /**
1891  * stmmac_free_tx_skbufs - free TX skb buffers
1892  * @priv: private structure
1893  */
1894 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1895 {
1896 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1897 	u32 queue;
1898 
1899 	for (queue = 0; queue < tx_queue_cnt; queue++)
1900 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1901 }
1902 
1903 /**
1904  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1905  * @priv: private structure
1906  * @dma_conf: structure to take the dma data
1907  * @queue: RX queue index
1908  */
1909 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1910 					 struct stmmac_dma_conf *dma_conf,
1911 					 u32 queue)
1912 {
1913 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1914 
1915 	/* Release the DMA RX socket buffers */
1916 	if (rx_q->xsk_pool)
1917 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1918 	else
1919 		dma_free_rx_skbufs(priv, dma_conf, queue);
1920 
1921 	rx_q->buf_alloc_num = 0;
1922 	rx_q->xsk_pool = NULL;
1923 
1924 	/* Free DMA regions of consistent memory previously allocated */
1925 	if (!priv->extend_desc)
1926 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1927 				  sizeof(struct dma_desc),
1928 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1929 	else
1930 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1931 				  sizeof(struct dma_extended_desc),
1932 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1933 
1934 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1935 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1936 
1937 	kfree(rx_q->buf_pool);
1938 	if (rx_q->page_pool)
1939 		page_pool_destroy(rx_q->page_pool);
1940 }
1941 
1942 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1943 				       struct stmmac_dma_conf *dma_conf)
1944 {
1945 	u32 rx_count = priv->plat->rx_queues_to_use;
1946 	u32 queue;
1947 
1948 	/* Free RX queue resources */
1949 	for (queue = 0; queue < rx_count; queue++)
1950 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1951 }
1952 
1953 /**
1954  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1955  * @priv: private structure
1956  * @dma_conf: structure to take the dma data
1957  * @queue: TX queue index
1958  */
1959 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1960 					 struct stmmac_dma_conf *dma_conf,
1961 					 u32 queue)
1962 {
1963 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1964 	size_t size;
1965 	void *addr;
1966 
1967 	/* Release the DMA TX socket buffers */
1968 	dma_free_tx_skbufs(priv, dma_conf, queue);
1969 
1970 	if (priv->extend_desc) {
1971 		size = sizeof(struct dma_extended_desc);
1972 		addr = tx_q->dma_etx;
1973 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1974 		size = sizeof(struct dma_edesc);
1975 		addr = tx_q->dma_entx;
1976 	} else {
1977 		size = sizeof(struct dma_desc);
1978 		addr = tx_q->dma_tx;
1979 	}
1980 
1981 	size *= dma_conf->dma_tx_size;
1982 
1983 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1984 
1985 	kfree(tx_q->tx_skbuff_dma);
1986 	kfree(tx_q->tx_skbuff);
1987 }
1988 
1989 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1990 				       struct stmmac_dma_conf *dma_conf)
1991 {
1992 	u32 tx_count = priv->plat->tx_queues_to_use;
1993 	u32 queue;
1994 
1995 	/* Free TX queue resources */
1996 	for (queue = 0; queue < tx_count; queue++)
1997 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
1998 }
1999 
2000 /**
2001  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2002  * @priv: private structure
2003  * @dma_conf: structure to take the dma data
2004  * @queue: RX queue index
2005  * Description: according to which descriptor can be used (extend or basic)
2006  * this function allocates the resources for TX and RX paths. In case of
2007  * reception, for example, it pre-allocated the RX socket buffer in order to
2008  * allow zero-copy mechanism.
2009  */
2010 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2011 					 struct stmmac_dma_conf *dma_conf,
2012 					 u32 queue)
2013 {
2014 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2015 	struct stmmac_channel *ch = &priv->channel[queue];
2016 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2017 	struct page_pool_params pp_params = { 0 };
2018 	unsigned int num_pages;
2019 	unsigned int napi_id;
2020 	int ret;
2021 
2022 	rx_q->queue_index = queue;
2023 	rx_q->priv_data = priv;
2024 
2025 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2026 	pp_params.pool_size = dma_conf->dma_rx_size;
2027 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2028 	pp_params.order = ilog2(num_pages);
2029 	pp_params.nid = dev_to_node(priv->device);
2030 	pp_params.dev = priv->device;
2031 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2032 	pp_params.offset = stmmac_rx_offset(priv);
2033 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2034 
2035 	rx_q->page_pool = page_pool_create(&pp_params);
2036 	if (IS_ERR(rx_q->page_pool)) {
2037 		ret = PTR_ERR(rx_q->page_pool);
2038 		rx_q->page_pool = NULL;
2039 		return ret;
2040 	}
2041 
2042 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2043 				 sizeof(*rx_q->buf_pool),
2044 				 GFP_KERNEL);
2045 	if (!rx_q->buf_pool)
2046 		return -ENOMEM;
2047 
2048 	if (priv->extend_desc) {
2049 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2050 						   dma_conf->dma_rx_size *
2051 						   sizeof(struct dma_extended_desc),
2052 						   &rx_q->dma_rx_phy,
2053 						   GFP_KERNEL);
2054 		if (!rx_q->dma_erx)
2055 			return -ENOMEM;
2056 
2057 	} else {
2058 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2059 						  dma_conf->dma_rx_size *
2060 						  sizeof(struct dma_desc),
2061 						  &rx_q->dma_rx_phy,
2062 						  GFP_KERNEL);
2063 		if (!rx_q->dma_rx)
2064 			return -ENOMEM;
2065 	}
2066 
2067 	if (stmmac_xdp_is_enabled(priv) &&
2068 	    test_bit(queue, priv->af_xdp_zc_qps))
2069 		napi_id = ch->rxtx_napi.napi_id;
2070 	else
2071 		napi_id = ch->rx_napi.napi_id;
2072 
2073 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2074 			       rx_q->queue_index,
2075 			       napi_id);
2076 	if (ret) {
2077 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2078 		return -EINVAL;
2079 	}
2080 
2081 	return 0;
2082 }
2083 
2084 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2085 				       struct stmmac_dma_conf *dma_conf)
2086 {
2087 	u32 rx_count = priv->plat->rx_queues_to_use;
2088 	u32 queue;
2089 	int ret;
2090 
2091 	/* RX queues buffers and DMA */
2092 	for (queue = 0; queue < rx_count; queue++) {
2093 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2094 		if (ret)
2095 			goto err_dma;
2096 	}
2097 
2098 	return 0;
2099 
2100 err_dma:
2101 	free_dma_rx_desc_resources(priv, dma_conf);
2102 
2103 	return ret;
2104 }
2105 
2106 /**
2107  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2108  * @priv: private structure
2109  * @dma_conf: structure to take the dma data
2110  * @queue: TX queue index
2111  * Description: according to which descriptor can be used (extend or basic)
2112  * this function allocates the resources for TX and RX paths. In case of
2113  * reception, for example, it pre-allocated the RX socket buffer in order to
2114  * allow zero-copy mechanism.
2115  */
2116 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2117 					 struct stmmac_dma_conf *dma_conf,
2118 					 u32 queue)
2119 {
2120 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2121 	size_t size;
2122 	void *addr;
2123 
2124 	tx_q->queue_index = queue;
2125 	tx_q->priv_data = priv;
2126 
2127 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2128 				      sizeof(*tx_q->tx_skbuff_dma),
2129 				      GFP_KERNEL);
2130 	if (!tx_q->tx_skbuff_dma)
2131 		return -ENOMEM;
2132 
2133 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2134 				  sizeof(struct sk_buff *),
2135 				  GFP_KERNEL);
2136 	if (!tx_q->tx_skbuff)
2137 		return -ENOMEM;
2138 
2139 	if (priv->extend_desc)
2140 		size = sizeof(struct dma_extended_desc);
2141 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2142 		size = sizeof(struct dma_edesc);
2143 	else
2144 		size = sizeof(struct dma_desc);
2145 
2146 	size *= dma_conf->dma_tx_size;
2147 
2148 	addr = dma_alloc_coherent(priv->device, size,
2149 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2150 	if (!addr)
2151 		return -ENOMEM;
2152 
2153 	if (priv->extend_desc)
2154 		tx_q->dma_etx = addr;
2155 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2156 		tx_q->dma_entx = addr;
2157 	else
2158 		tx_q->dma_tx = addr;
2159 
2160 	return 0;
2161 }
2162 
2163 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2164 				       struct stmmac_dma_conf *dma_conf)
2165 {
2166 	u32 tx_count = priv->plat->tx_queues_to_use;
2167 	u32 queue;
2168 	int ret;
2169 
2170 	/* TX queues buffers and DMA */
2171 	for (queue = 0; queue < tx_count; queue++) {
2172 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2173 		if (ret)
2174 			goto err_dma;
2175 	}
2176 
2177 	return 0;
2178 
2179 err_dma:
2180 	free_dma_tx_desc_resources(priv, dma_conf);
2181 	return ret;
2182 }
2183 
2184 /**
2185  * alloc_dma_desc_resources - alloc TX/RX resources.
2186  * @priv: private structure
2187  * @dma_conf: structure to take the dma data
2188  * Description: according to which descriptor can be used (extend or basic)
2189  * this function allocates the resources for TX and RX paths. In case of
2190  * reception, for example, it pre-allocated the RX socket buffer in order to
2191  * allow zero-copy mechanism.
2192  */
2193 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2194 				    struct stmmac_dma_conf *dma_conf)
2195 {
2196 	/* RX Allocation */
2197 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2198 
2199 	if (ret)
2200 		return ret;
2201 
2202 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2203 
2204 	return ret;
2205 }
2206 
2207 /**
2208  * free_dma_desc_resources - free dma desc resources
2209  * @priv: private structure
2210  * @dma_conf: structure to take the dma data
2211  */
2212 static void free_dma_desc_resources(struct stmmac_priv *priv,
2213 				    struct stmmac_dma_conf *dma_conf)
2214 {
2215 	/* Release the DMA TX socket buffers */
2216 	free_dma_tx_desc_resources(priv, dma_conf);
2217 
2218 	/* Release the DMA RX socket buffers later
2219 	 * to ensure all pending XDP_TX buffers are returned.
2220 	 */
2221 	free_dma_rx_desc_resources(priv, dma_conf);
2222 }
2223 
2224 /**
2225  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2226  *  @priv: driver private structure
2227  *  Description: It is used for enabling the rx queues in the MAC
2228  */
2229 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2230 {
2231 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2232 	int queue;
2233 	u8 mode;
2234 
2235 	for (queue = 0; queue < rx_queues_count; queue++) {
2236 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2237 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2238 	}
2239 }
2240 
2241 /**
2242  * stmmac_start_rx_dma - start RX DMA channel
2243  * @priv: driver private structure
2244  * @chan: RX channel index
2245  * Description:
2246  * This starts a RX DMA channel
2247  */
2248 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2249 {
2250 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2251 	stmmac_start_rx(priv, priv->ioaddr, chan);
2252 }
2253 
2254 /**
2255  * stmmac_start_tx_dma - start TX DMA channel
2256  * @priv: driver private structure
2257  * @chan: TX channel index
2258  * Description:
2259  * This starts a TX DMA channel
2260  */
2261 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2262 {
2263 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2264 	stmmac_start_tx(priv, priv->ioaddr, chan);
2265 }
2266 
2267 /**
2268  * stmmac_stop_rx_dma - stop RX DMA channel
2269  * @priv: driver private structure
2270  * @chan: RX channel index
2271  * Description:
2272  * This stops a RX DMA channel
2273  */
2274 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2275 {
2276 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2277 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2278 }
2279 
2280 /**
2281  * stmmac_stop_tx_dma - stop TX DMA channel
2282  * @priv: driver private structure
2283  * @chan: TX channel index
2284  * Description:
2285  * This stops a TX DMA channel
2286  */
2287 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2288 {
2289 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2290 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2291 }
2292 
2293 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2294 {
2295 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2296 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2297 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2298 	u32 chan;
2299 
2300 	for (chan = 0; chan < dma_csr_ch; chan++) {
2301 		struct stmmac_channel *ch = &priv->channel[chan];
2302 		unsigned long flags;
2303 
2304 		spin_lock_irqsave(&ch->lock, flags);
2305 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2306 		spin_unlock_irqrestore(&ch->lock, flags);
2307 	}
2308 }
2309 
2310 /**
2311  * stmmac_start_all_dma - start all RX and TX DMA channels
2312  * @priv: driver private structure
2313  * Description:
2314  * This starts all the RX and TX DMA channels
2315  */
2316 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2317 {
2318 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2319 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2320 	u32 chan = 0;
2321 
2322 	for (chan = 0; chan < rx_channels_count; chan++)
2323 		stmmac_start_rx_dma(priv, chan);
2324 
2325 	for (chan = 0; chan < tx_channels_count; chan++)
2326 		stmmac_start_tx_dma(priv, chan);
2327 }
2328 
2329 /**
2330  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2331  * @priv: driver private structure
2332  * Description:
2333  * This stops the RX and TX DMA channels
2334  */
2335 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2336 {
2337 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2338 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2339 	u32 chan = 0;
2340 
2341 	for (chan = 0; chan < rx_channels_count; chan++)
2342 		stmmac_stop_rx_dma(priv, chan);
2343 
2344 	for (chan = 0; chan < tx_channels_count; chan++)
2345 		stmmac_stop_tx_dma(priv, chan);
2346 }
2347 
2348 /**
2349  *  stmmac_dma_operation_mode - HW DMA operation mode
2350  *  @priv: driver private structure
2351  *  Description: it is used for configuring the DMA operation mode register in
2352  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2353  */
2354 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2355 {
2356 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2357 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2358 	int rxfifosz = priv->plat->rx_fifo_size;
2359 	int txfifosz = priv->plat->tx_fifo_size;
2360 	u32 txmode = 0;
2361 	u32 rxmode = 0;
2362 	u32 chan = 0;
2363 	u8 qmode = 0;
2364 
2365 	if (rxfifosz == 0)
2366 		rxfifosz = priv->dma_cap.rx_fifo_size;
2367 	if (txfifosz == 0)
2368 		txfifosz = priv->dma_cap.tx_fifo_size;
2369 
2370 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2371 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2372 		rxfifosz /= rx_channels_count;
2373 		txfifosz /= tx_channels_count;
2374 	}
2375 
2376 	if (priv->plat->force_thresh_dma_mode) {
2377 		txmode = tc;
2378 		rxmode = tc;
2379 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2380 		/*
2381 		 * In case of GMAC, SF mode can be enabled
2382 		 * to perform the TX COE in HW. This depends on:
2383 		 * 1) TX COE if actually supported
2384 		 * 2) There is no bugged Jumbo frame support
2385 		 *    that needs to not insert csum in the TDES.
2386 		 */
2387 		txmode = SF_DMA_MODE;
2388 		rxmode = SF_DMA_MODE;
2389 		priv->xstats.threshold = SF_DMA_MODE;
2390 	} else {
2391 		txmode = tc;
2392 		rxmode = SF_DMA_MODE;
2393 	}
2394 
2395 	/* configure all channels */
2396 	for (chan = 0; chan < rx_channels_count; chan++) {
2397 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2398 		u32 buf_size;
2399 
2400 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2401 
2402 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2403 				rxfifosz, qmode);
2404 
2405 		if (rx_q->xsk_pool) {
2406 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2407 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2408 					      buf_size,
2409 					      chan);
2410 		} else {
2411 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2412 					      priv->dma_conf.dma_buf_sz,
2413 					      chan);
2414 		}
2415 	}
2416 
2417 	for (chan = 0; chan < tx_channels_count; chan++) {
2418 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2419 
2420 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2421 				txfifosz, qmode);
2422 	}
2423 }
2424 
2425 static void stmmac_xsk_request_timestamp(void *_priv)
2426 {
2427 	struct stmmac_metadata_request *meta_req = _priv;
2428 
2429 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2430 	*meta_req->set_ic = true;
2431 }
2432 
2433 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2434 {
2435 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2436 	struct stmmac_priv *priv = tx_compl->priv;
2437 	struct dma_desc *desc = tx_compl->desc;
2438 	bool found = false;
2439 	u64 ns = 0;
2440 
2441 	if (!priv->hwts_tx_en)
2442 		return 0;
2443 
2444 	/* check tx tstamp status */
2445 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2446 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2447 		found = true;
2448 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2449 		found = true;
2450 	}
2451 
2452 	if (found) {
2453 		ns -= priv->plat->cdc_error_adj;
2454 		return ns_to_ktime(ns);
2455 	}
2456 
2457 	return 0;
2458 }
2459 
2460 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2461 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2462 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2463 };
2464 
2465 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2466 {
2467 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2468 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2469 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2470 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2471 	unsigned int entry = tx_q->cur_tx;
2472 	struct dma_desc *tx_desc = NULL;
2473 	struct xdp_desc xdp_desc;
2474 	bool work_done = true;
2475 	u32 tx_set_ic_bit = 0;
2476 
2477 	/* Avoids TX time-out as we are sharing with slow path */
2478 	txq_trans_cond_update(nq);
2479 
2480 	budget = min(budget, stmmac_tx_avail(priv, queue));
2481 
2482 	while (budget-- > 0) {
2483 		struct stmmac_metadata_request meta_req;
2484 		struct xsk_tx_metadata *meta = NULL;
2485 		dma_addr_t dma_addr;
2486 		bool set_ic;
2487 
2488 		/* We are sharing with slow path and stop XSK TX desc submission when
2489 		 * available TX ring is less than threshold.
2490 		 */
2491 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2492 		    !netif_carrier_ok(priv->dev)) {
2493 			work_done = false;
2494 			break;
2495 		}
2496 
2497 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2498 			break;
2499 
2500 		if (priv->est && priv->est->enable &&
2501 		    priv->est->max_sdu[queue] &&
2502 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2503 			priv->xstats.max_sdu_txq_drop[queue]++;
2504 			continue;
2505 		}
2506 
2507 		if (likely(priv->extend_desc))
2508 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2509 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2510 			tx_desc = &tx_q->dma_entx[entry].basic;
2511 		else
2512 			tx_desc = tx_q->dma_tx + entry;
2513 
2514 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2515 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2516 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2517 
2518 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2519 
2520 		/* To return XDP buffer to XSK pool, we simple call
2521 		 * xsk_tx_completed(), so we don't need to fill up
2522 		 * 'buf' and 'xdpf'.
2523 		 */
2524 		tx_q->tx_skbuff_dma[entry].buf = 0;
2525 		tx_q->xdpf[entry] = NULL;
2526 
2527 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2528 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2529 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2530 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2531 
2532 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2533 
2534 		tx_q->tx_count_frames++;
2535 
2536 		if (!priv->tx_coal_frames[queue])
2537 			set_ic = false;
2538 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2539 			set_ic = true;
2540 		else
2541 			set_ic = false;
2542 
2543 		meta_req.priv = priv;
2544 		meta_req.tx_desc = tx_desc;
2545 		meta_req.set_ic = &set_ic;
2546 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2547 					&meta_req);
2548 		if (set_ic) {
2549 			tx_q->tx_count_frames = 0;
2550 			stmmac_set_tx_ic(priv, tx_desc);
2551 			tx_set_ic_bit++;
2552 		}
2553 
2554 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2555 				       true, priv->mode, true, true,
2556 				       xdp_desc.len);
2557 
2558 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2559 
2560 		xsk_tx_metadata_to_compl(meta,
2561 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2562 
2563 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2564 		entry = tx_q->cur_tx;
2565 	}
2566 	u64_stats_update_begin(&txq_stats->napi_syncp);
2567 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2568 	u64_stats_update_end(&txq_stats->napi_syncp);
2569 
2570 	if (tx_desc) {
2571 		stmmac_flush_tx_descriptors(priv, queue);
2572 		xsk_tx_release(pool);
2573 	}
2574 
2575 	/* Return true if all of the 3 conditions are met
2576 	 *  a) TX Budget is still available
2577 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2578 	 *     pending XSK TX for transmission)
2579 	 */
2580 	return !!budget && work_done;
2581 }
2582 
2583 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2584 {
2585 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2586 		tc += 64;
2587 
2588 		if (priv->plat->force_thresh_dma_mode)
2589 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2590 		else
2591 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2592 						      chan);
2593 
2594 		priv->xstats.threshold = tc;
2595 	}
2596 }
2597 
2598 /**
2599  * stmmac_tx_clean - to manage the transmission completion
2600  * @priv: driver private structure
2601  * @budget: napi budget limiting this functions packet handling
2602  * @queue: TX queue index
2603  * @pending_packets: signal to arm the TX coal timer
2604  * Description: it reclaims the transmit resources after transmission completes.
2605  * If some packets still needs to be handled, due to TX coalesce, set
2606  * pending_packets to true to make NAPI arm the TX coal timer.
2607  */
2608 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2609 			   bool *pending_packets)
2610 {
2611 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2612 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2613 	unsigned int bytes_compl = 0, pkts_compl = 0;
2614 	unsigned int entry, xmits = 0, count = 0;
2615 	u32 tx_packets = 0, tx_errors = 0;
2616 
2617 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2618 
2619 	tx_q->xsk_frames_done = 0;
2620 
2621 	entry = tx_q->dirty_tx;
2622 
2623 	/* Try to clean all TX complete frame in 1 shot */
2624 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2625 		struct xdp_frame *xdpf;
2626 		struct sk_buff *skb;
2627 		struct dma_desc *p;
2628 		int status;
2629 
2630 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2631 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2632 			xdpf = tx_q->xdpf[entry];
2633 			skb = NULL;
2634 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2635 			xdpf = NULL;
2636 			skb = tx_q->tx_skbuff[entry];
2637 		} else {
2638 			xdpf = NULL;
2639 			skb = NULL;
2640 		}
2641 
2642 		if (priv->extend_desc)
2643 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2644 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2645 			p = &tx_q->dma_entx[entry].basic;
2646 		else
2647 			p = tx_q->dma_tx + entry;
2648 
2649 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2650 		/* Check if the descriptor is owned by the DMA */
2651 		if (unlikely(status & tx_dma_own))
2652 			break;
2653 
2654 		count++;
2655 
2656 		/* Make sure descriptor fields are read after reading
2657 		 * the own bit.
2658 		 */
2659 		dma_rmb();
2660 
2661 		/* Just consider the last segment and ...*/
2662 		if (likely(!(status & tx_not_ls))) {
2663 			/* ... verify the status error condition */
2664 			if (unlikely(status & tx_err)) {
2665 				tx_errors++;
2666 				if (unlikely(status & tx_err_bump_tc))
2667 					stmmac_bump_dma_threshold(priv, queue);
2668 			} else {
2669 				tx_packets++;
2670 			}
2671 			if (skb) {
2672 				stmmac_get_tx_hwtstamp(priv, p, skb);
2673 			} else if (tx_q->xsk_pool &&
2674 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2675 				struct stmmac_xsk_tx_complete tx_compl = {
2676 					.priv = priv,
2677 					.desc = p,
2678 				};
2679 
2680 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2681 							 &stmmac_xsk_tx_metadata_ops,
2682 							 &tx_compl);
2683 			}
2684 		}
2685 
2686 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2687 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2688 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2689 				dma_unmap_page(priv->device,
2690 					       tx_q->tx_skbuff_dma[entry].buf,
2691 					       tx_q->tx_skbuff_dma[entry].len,
2692 					       DMA_TO_DEVICE);
2693 			else
2694 				dma_unmap_single(priv->device,
2695 						 tx_q->tx_skbuff_dma[entry].buf,
2696 						 tx_q->tx_skbuff_dma[entry].len,
2697 						 DMA_TO_DEVICE);
2698 			tx_q->tx_skbuff_dma[entry].buf = 0;
2699 			tx_q->tx_skbuff_dma[entry].len = 0;
2700 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2701 		}
2702 
2703 		stmmac_clean_desc3(priv, tx_q, p);
2704 
2705 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2706 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2707 
2708 		if (xdpf &&
2709 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2710 			xdp_return_frame_rx_napi(xdpf);
2711 			tx_q->xdpf[entry] = NULL;
2712 		}
2713 
2714 		if (xdpf &&
2715 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2716 			xdp_return_frame(xdpf);
2717 			tx_q->xdpf[entry] = NULL;
2718 		}
2719 
2720 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2721 			tx_q->xsk_frames_done++;
2722 
2723 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2724 			if (likely(skb)) {
2725 				pkts_compl++;
2726 				bytes_compl += skb->len;
2727 				dev_consume_skb_any(skb);
2728 				tx_q->tx_skbuff[entry] = NULL;
2729 			}
2730 		}
2731 
2732 		stmmac_release_tx_desc(priv, p, priv->mode);
2733 
2734 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2735 	}
2736 	tx_q->dirty_tx = entry;
2737 
2738 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2739 				  pkts_compl, bytes_compl);
2740 
2741 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2742 								queue))) &&
2743 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2744 
2745 		netif_dbg(priv, tx_done, priv->dev,
2746 			  "%s: restart transmit\n", __func__);
2747 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2748 	}
2749 
2750 	if (tx_q->xsk_pool) {
2751 		bool work_done;
2752 
2753 		if (tx_q->xsk_frames_done)
2754 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2755 
2756 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2757 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2758 
2759 		/* For XSK TX, we try to send as many as possible.
2760 		 * If XSK work done (XSK TX desc empty and budget still
2761 		 * available), return "budget - 1" to reenable TX IRQ.
2762 		 * Else, return "budget" to make NAPI continue polling.
2763 		 */
2764 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2765 					       STMMAC_XSK_TX_BUDGET_MAX);
2766 		if (work_done)
2767 			xmits = budget - 1;
2768 		else
2769 			xmits = budget;
2770 	}
2771 
2772 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2773 	    priv->eee_sw_timer_en) {
2774 		if (stmmac_enable_eee_mode(priv))
2775 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2776 	}
2777 
2778 	/* We still have pending packets, let's call for a new scheduling */
2779 	if (tx_q->dirty_tx != tx_q->cur_tx)
2780 		*pending_packets = true;
2781 
2782 	u64_stats_update_begin(&txq_stats->napi_syncp);
2783 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2784 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2785 	u64_stats_inc(&txq_stats->napi.tx_clean);
2786 	u64_stats_update_end(&txq_stats->napi_syncp);
2787 
2788 	priv->xstats.tx_errors += tx_errors;
2789 
2790 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2791 
2792 	/* Combine decisions from TX clean and XSK TX */
2793 	return max(count, xmits);
2794 }
2795 
2796 /**
2797  * stmmac_tx_err - to manage the tx error
2798  * @priv: driver private structure
2799  * @chan: channel index
2800  * Description: it cleans the descriptors and restarts the transmission
2801  * in case of transmission errors.
2802  */
2803 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2804 {
2805 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2806 
2807 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2808 
2809 	stmmac_stop_tx_dma(priv, chan);
2810 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2811 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2812 	stmmac_reset_tx_queue(priv, chan);
2813 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2814 			    tx_q->dma_tx_phy, chan);
2815 	stmmac_start_tx_dma(priv, chan);
2816 
2817 	priv->xstats.tx_errors++;
2818 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2819 }
2820 
2821 /**
2822  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2823  *  @priv: driver private structure
2824  *  @txmode: TX operating mode
2825  *  @rxmode: RX operating mode
2826  *  @chan: channel index
2827  *  Description: it is used for configuring of the DMA operation mode in
2828  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2829  *  mode.
2830  */
2831 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2832 					  u32 rxmode, u32 chan)
2833 {
2834 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2835 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2836 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2837 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2838 	int rxfifosz = priv->plat->rx_fifo_size;
2839 	int txfifosz = priv->plat->tx_fifo_size;
2840 
2841 	if (rxfifosz == 0)
2842 		rxfifosz = priv->dma_cap.rx_fifo_size;
2843 	if (txfifosz == 0)
2844 		txfifosz = priv->dma_cap.tx_fifo_size;
2845 
2846 	/* Adjust for real per queue fifo size */
2847 	rxfifosz /= rx_channels_count;
2848 	txfifosz /= tx_channels_count;
2849 
2850 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2851 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2852 }
2853 
2854 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2855 {
2856 	int ret;
2857 
2858 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2859 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2860 	if (ret && (ret != -EINVAL)) {
2861 		stmmac_global_err(priv);
2862 		return true;
2863 	}
2864 
2865 	return false;
2866 }
2867 
2868 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2869 {
2870 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2871 						 &priv->xstats, chan, dir);
2872 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2873 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2874 	struct stmmac_channel *ch = &priv->channel[chan];
2875 	struct napi_struct *rx_napi;
2876 	struct napi_struct *tx_napi;
2877 	unsigned long flags;
2878 
2879 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2880 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2881 
2882 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2883 		if (napi_schedule_prep(rx_napi)) {
2884 			spin_lock_irqsave(&ch->lock, flags);
2885 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2886 			spin_unlock_irqrestore(&ch->lock, flags);
2887 			__napi_schedule(rx_napi);
2888 		}
2889 	}
2890 
2891 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2892 		if (napi_schedule_prep(tx_napi)) {
2893 			spin_lock_irqsave(&ch->lock, flags);
2894 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2895 			spin_unlock_irqrestore(&ch->lock, flags);
2896 			__napi_schedule(tx_napi);
2897 		}
2898 	}
2899 
2900 	return status;
2901 }
2902 
2903 /**
2904  * stmmac_dma_interrupt - DMA ISR
2905  * @priv: driver private structure
2906  * Description: this is the DMA ISR. It is called by the main ISR.
2907  * It calls the dwmac dma routine and schedule poll method in case of some
2908  * work can be done.
2909  */
2910 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2911 {
2912 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2913 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2914 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2915 				tx_channel_count : rx_channel_count;
2916 	u32 chan;
2917 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2918 
2919 	/* Make sure we never check beyond our status buffer. */
2920 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2921 		channels_to_check = ARRAY_SIZE(status);
2922 
2923 	for (chan = 0; chan < channels_to_check; chan++)
2924 		status[chan] = stmmac_napi_check(priv, chan,
2925 						 DMA_DIR_RXTX);
2926 
2927 	for (chan = 0; chan < tx_channel_count; chan++) {
2928 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2929 			/* Try to bump up the dma threshold on this failure */
2930 			stmmac_bump_dma_threshold(priv, chan);
2931 		} else if (unlikely(status[chan] == tx_hard_error)) {
2932 			stmmac_tx_err(priv, chan);
2933 		}
2934 	}
2935 }
2936 
2937 /**
2938  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2939  * @priv: driver private structure
2940  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2941  */
2942 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2943 {
2944 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2945 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2946 
2947 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2948 
2949 	if (priv->dma_cap.rmon) {
2950 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2951 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2952 	} else
2953 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2954 }
2955 
2956 /**
2957  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2958  * @priv: driver private structure
2959  * Description:
2960  *  new GMAC chip generations have a new register to indicate the
2961  *  presence of the optional feature/functions.
2962  *  This can be also used to override the value passed through the
2963  *  platform and necessary for old MAC10/100 and GMAC chips.
2964  */
2965 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2966 {
2967 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2968 }
2969 
2970 /**
2971  * stmmac_check_ether_addr - check if the MAC addr is valid
2972  * @priv: driver private structure
2973  * Description:
2974  * it is to verify if the MAC address is valid, in case of failures it
2975  * generates a random MAC address
2976  */
2977 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2978 {
2979 	u8 addr[ETH_ALEN];
2980 
2981 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2982 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2983 		if (is_valid_ether_addr(addr))
2984 			eth_hw_addr_set(priv->dev, addr);
2985 		else
2986 			eth_hw_addr_random(priv->dev);
2987 		dev_info(priv->device, "device MAC address %pM\n",
2988 			 priv->dev->dev_addr);
2989 	}
2990 }
2991 
2992 /**
2993  * stmmac_init_dma_engine - DMA init.
2994  * @priv: driver private structure
2995  * Description:
2996  * It inits the DMA invoking the specific MAC/GMAC callback.
2997  * Some DMA parameters can be passed from the platform;
2998  * in case of these are not passed a default is kept for the MAC or GMAC.
2999  */
3000 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3001 {
3002 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3003 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3004 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3005 	struct stmmac_rx_queue *rx_q;
3006 	struct stmmac_tx_queue *tx_q;
3007 	u32 chan = 0;
3008 	int ret = 0;
3009 
3010 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3011 		dev_err(priv->device, "Invalid DMA configuration\n");
3012 		return -EINVAL;
3013 	}
3014 
3015 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3016 		priv->plat->dma_cfg->atds = 1;
3017 
3018 	ret = stmmac_reset(priv, priv->ioaddr);
3019 	if (ret) {
3020 		dev_err(priv->device, "Failed to reset the dma\n");
3021 		return ret;
3022 	}
3023 
3024 	/* DMA Configuration */
3025 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3026 
3027 	if (priv->plat->axi)
3028 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3029 
3030 	/* DMA CSR Channel configuration */
3031 	for (chan = 0; chan < dma_csr_ch; chan++) {
3032 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3033 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3034 	}
3035 
3036 	/* DMA RX Channel Configuration */
3037 	for (chan = 0; chan < rx_channels_count; chan++) {
3038 		rx_q = &priv->dma_conf.rx_queue[chan];
3039 
3040 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3041 				    rx_q->dma_rx_phy, chan);
3042 
3043 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3044 				     (rx_q->buf_alloc_num *
3045 				      sizeof(struct dma_desc));
3046 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3047 				       rx_q->rx_tail_addr, chan);
3048 	}
3049 
3050 	/* DMA TX Channel Configuration */
3051 	for (chan = 0; chan < tx_channels_count; chan++) {
3052 		tx_q = &priv->dma_conf.tx_queue[chan];
3053 
3054 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3055 				    tx_q->dma_tx_phy, chan);
3056 
3057 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3058 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3059 				       tx_q->tx_tail_addr, chan);
3060 	}
3061 
3062 	return ret;
3063 }
3064 
3065 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3066 {
3067 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3068 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3069 	struct stmmac_channel *ch;
3070 	struct napi_struct *napi;
3071 
3072 	if (!tx_coal_timer)
3073 		return;
3074 
3075 	ch = &priv->channel[tx_q->queue_index];
3076 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3077 
3078 	/* Arm timer only if napi is not already scheduled.
3079 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3080 	 * again in the next scheduled napi.
3081 	 */
3082 	if (unlikely(!napi_is_scheduled(napi)))
3083 		hrtimer_start(&tx_q->txtimer,
3084 			      STMMAC_COAL_TIMER(tx_coal_timer),
3085 			      HRTIMER_MODE_REL);
3086 	else
3087 		hrtimer_try_to_cancel(&tx_q->txtimer);
3088 }
3089 
3090 /**
3091  * stmmac_tx_timer - mitigation sw timer for tx.
3092  * @t: data pointer
3093  * Description:
3094  * This is the timer handler to directly invoke the stmmac_tx_clean.
3095  */
3096 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3097 {
3098 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3099 	struct stmmac_priv *priv = tx_q->priv_data;
3100 	struct stmmac_channel *ch;
3101 	struct napi_struct *napi;
3102 
3103 	ch = &priv->channel[tx_q->queue_index];
3104 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3105 
3106 	if (likely(napi_schedule_prep(napi))) {
3107 		unsigned long flags;
3108 
3109 		spin_lock_irqsave(&ch->lock, flags);
3110 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3111 		spin_unlock_irqrestore(&ch->lock, flags);
3112 		__napi_schedule(napi);
3113 	}
3114 
3115 	return HRTIMER_NORESTART;
3116 }
3117 
3118 /**
3119  * stmmac_init_coalesce - init mitigation options.
3120  * @priv: driver private structure
3121  * Description:
3122  * This inits the coalesce parameters: i.e. timer rate,
3123  * timer handler and default threshold used for enabling the
3124  * interrupt on completion bit.
3125  */
3126 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3127 {
3128 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3129 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3130 	u32 chan;
3131 
3132 	for (chan = 0; chan < tx_channel_count; chan++) {
3133 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3134 
3135 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3136 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3137 
3138 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3139 		tx_q->txtimer.function = stmmac_tx_timer;
3140 	}
3141 
3142 	for (chan = 0; chan < rx_channel_count; chan++)
3143 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3144 }
3145 
3146 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3147 {
3148 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3149 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3150 	u32 chan;
3151 
3152 	/* set TX ring length */
3153 	for (chan = 0; chan < tx_channels_count; chan++)
3154 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3155 				       (priv->dma_conf.dma_tx_size - 1), chan);
3156 
3157 	/* set RX ring length */
3158 	for (chan = 0; chan < rx_channels_count; chan++)
3159 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3160 				       (priv->dma_conf.dma_rx_size - 1), chan);
3161 }
3162 
3163 /**
3164  *  stmmac_set_tx_queue_weight - Set TX queue weight
3165  *  @priv: driver private structure
3166  *  Description: It is used for setting TX queues weight
3167  */
3168 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3169 {
3170 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3171 	u32 weight;
3172 	u32 queue;
3173 
3174 	for (queue = 0; queue < tx_queues_count; queue++) {
3175 		weight = priv->plat->tx_queues_cfg[queue].weight;
3176 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3177 	}
3178 }
3179 
3180 /**
3181  *  stmmac_configure_cbs - Configure CBS in TX queue
3182  *  @priv: driver private structure
3183  *  Description: It is used for configuring CBS in AVB TX queues
3184  */
3185 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3186 {
3187 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3188 	u32 mode_to_use;
3189 	u32 queue;
3190 
3191 	/* queue 0 is reserved for legacy traffic */
3192 	for (queue = 1; queue < tx_queues_count; queue++) {
3193 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3194 		if (mode_to_use == MTL_QUEUE_DCB)
3195 			continue;
3196 
3197 		stmmac_config_cbs(priv, priv->hw,
3198 				priv->plat->tx_queues_cfg[queue].send_slope,
3199 				priv->plat->tx_queues_cfg[queue].idle_slope,
3200 				priv->plat->tx_queues_cfg[queue].high_credit,
3201 				priv->plat->tx_queues_cfg[queue].low_credit,
3202 				queue);
3203 	}
3204 }
3205 
3206 /**
3207  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3208  *  @priv: driver private structure
3209  *  Description: It is used for mapping RX queues to RX dma channels
3210  */
3211 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3212 {
3213 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3214 	u32 queue;
3215 	u32 chan;
3216 
3217 	for (queue = 0; queue < rx_queues_count; queue++) {
3218 		chan = priv->plat->rx_queues_cfg[queue].chan;
3219 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3220 	}
3221 }
3222 
3223 /**
3224  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3225  *  @priv: driver private structure
3226  *  Description: It is used for configuring the RX Queue Priority
3227  */
3228 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3229 {
3230 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3231 	u32 queue;
3232 	u32 prio;
3233 
3234 	for (queue = 0; queue < rx_queues_count; queue++) {
3235 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3236 			continue;
3237 
3238 		prio = priv->plat->rx_queues_cfg[queue].prio;
3239 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3240 	}
3241 }
3242 
3243 /**
3244  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3245  *  @priv: driver private structure
3246  *  Description: It is used for configuring the TX Queue Priority
3247  */
3248 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3249 {
3250 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3251 	u32 queue;
3252 	u32 prio;
3253 
3254 	for (queue = 0; queue < tx_queues_count; queue++) {
3255 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3256 			continue;
3257 
3258 		prio = priv->plat->tx_queues_cfg[queue].prio;
3259 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3260 	}
3261 }
3262 
3263 /**
3264  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3265  *  @priv: driver private structure
3266  *  Description: It is used for configuring the RX queue routing
3267  */
3268 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3269 {
3270 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3271 	u32 queue;
3272 	u8 packet;
3273 
3274 	for (queue = 0; queue < rx_queues_count; queue++) {
3275 		/* no specific packet type routing specified for the queue */
3276 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3277 			continue;
3278 
3279 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3280 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3281 	}
3282 }
3283 
3284 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3285 {
3286 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3287 		priv->rss.enable = false;
3288 		return;
3289 	}
3290 
3291 	if (priv->dev->features & NETIF_F_RXHASH)
3292 		priv->rss.enable = true;
3293 	else
3294 		priv->rss.enable = false;
3295 
3296 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3297 			     priv->plat->rx_queues_to_use);
3298 }
3299 
3300 /**
3301  *  stmmac_mtl_configuration - Configure MTL
3302  *  @priv: driver private structure
3303  *  Description: It is used for configurring MTL
3304  */
3305 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3306 {
3307 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3308 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3309 
3310 	if (tx_queues_count > 1)
3311 		stmmac_set_tx_queue_weight(priv);
3312 
3313 	/* Configure MTL RX algorithms */
3314 	if (rx_queues_count > 1)
3315 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3316 				priv->plat->rx_sched_algorithm);
3317 
3318 	/* Configure MTL TX algorithms */
3319 	if (tx_queues_count > 1)
3320 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3321 				priv->plat->tx_sched_algorithm);
3322 
3323 	/* Configure CBS in AVB TX queues */
3324 	if (tx_queues_count > 1)
3325 		stmmac_configure_cbs(priv);
3326 
3327 	/* Map RX MTL to DMA channels */
3328 	stmmac_rx_queue_dma_chan_map(priv);
3329 
3330 	/* Enable MAC RX Queues */
3331 	stmmac_mac_enable_rx_queues(priv);
3332 
3333 	/* Set RX priorities */
3334 	if (rx_queues_count > 1)
3335 		stmmac_mac_config_rx_queues_prio(priv);
3336 
3337 	/* Set TX priorities */
3338 	if (tx_queues_count > 1)
3339 		stmmac_mac_config_tx_queues_prio(priv);
3340 
3341 	/* Set RX routing */
3342 	if (rx_queues_count > 1)
3343 		stmmac_mac_config_rx_queues_routing(priv);
3344 
3345 	/* Receive Side Scaling */
3346 	if (rx_queues_count > 1)
3347 		stmmac_mac_config_rss(priv);
3348 }
3349 
3350 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3351 {
3352 	if (priv->dma_cap.asp) {
3353 		netdev_info(priv->dev, "Enabling Safety Features\n");
3354 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3355 					  priv->plat->safety_feat_cfg);
3356 	} else {
3357 		netdev_info(priv->dev, "No Safety Features support found\n");
3358 	}
3359 }
3360 
3361 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3362 {
3363 	char *name;
3364 
3365 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3366 	clear_bit(__FPE_REMOVING,  &priv->fpe_task_state);
3367 
3368 	name = priv->wq_name;
3369 	sprintf(name, "%s-fpe", priv->dev->name);
3370 
3371 	priv->fpe_wq = create_singlethread_workqueue(name);
3372 	if (!priv->fpe_wq) {
3373 		netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3374 
3375 		return -ENOMEM;
3376 	}
3377 	netdev_info(priv->dev, "FPE workqueue start");
3378 
3379 	return 0;
3380 }
3381 
3382 /**
3383  * stmmac_hw_setup - setup mac in a usable state.
3384  *  @dev : pointer to the device structure.
3385  *  @ptp_register: register PTP if set
3386  *  Description:
3387  *  this is the main function to setup the HW in a usable state because the
3388  *  dma engine is reset, the core registers are configured (e.g. AXI,
3389  *  Checksum features, timers). The DMA is ready to start receiving and
3390  *  transmitting.
3391  *  Return value:
3392  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3393  *  file on failure.
3394  */
3395 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3396 {
3397 	struct stmmac_priv *priv = netdev_priv(dev);
3398 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3399 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3400 	bool sph_en;
3401 	u32 chan;
3402 	int ret;
3403 
3404 	/* Make sure RX clock is enabled */
3405 	if (priv->hw->phylink_pcs)
3406 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3407 
3408 	/* DMA initialization and SW reset */
3409 	ret = stmmac_init_dma_engine(priv);
3410 	if (ret < 0) {
3411 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3412 			   __func__);
3413 		return ret;
3414 	}
3415 
3416 	/* Copy the MAC addr into the HW  */
3417 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3418 
3419 	/* PS and related bits will be programmed according to the speed */
3420 	if (priv->hw->pcs) {
3421 		int speed = priv->plat->mac_port_sel_speed;
3422 
3423 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3424 		    (speed == SPEED_1000)) {
3425 			priv->hw->ps = speed;
3426 		} else {
3427 			dev_warn(priv->device, "invalid port speed\n");
3428 			priv->hw->ps = 0;
3429 		}
3430 	}
3431 
3432 	/* Initialize the MAC Core */
3433 	stmmac_core_init(priv, priv->hw, dev);
3434 
3435 	/* Initialize MTL*/
3436 	stmmac_mtl_configuration(priv);
3437 
3438 	/* Initialize Safety Features */
3439 	stmmac_safety_feat_configuration(priv);
3440 
3441 	ret = stmmac_rx_ipc(priv, priv->hw);
3442 	if (!ret) {
3443 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3444 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3445 		priv->hw->rx_csum = 0;
3446 	}
3447 
3448 	/* Enable the MAC Rx/Tx */
3449 	stmmac_mac_set(priv, priv->ioaddr, true);
3450 
3451 	/* Set the HW DMA mode and the COE */
3452 	stmmac_dma_operation_mode(priv);
3453 
3454 	stmmac_mmc_setup(priv);
3455 
3456 	if (ptp_register) {
3457 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3458 		if (ret < 0)
3459 			netdev_warn(priv->dev,
3460 				    "failed to enable PTP reference clock: %pe\n",
3461 				    ERR_PTR(ret));
3462 	}
3463 
3464 	ret = stmmac_init_ptp(priv);
3465 	if (ret == -EOPNOTSUPP)
3466 		netdev_info(priv->dev, "PTP not supported by HW\n");
3467 	else if (ret)
3468 		netdev_warn(priv->dev, "PTP init failed\n");
3469 	else if (ptp_register)
3470 		stmmac_ptp_register(priv);
3471 
3472 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3473 
3474 	/* Convert the timer from msec to usec */
3475 	if (!priv->tx_lpi_timer)
3476 		priv->tx_lpi_timer = eee_timer * 1000;
3477 
3478 	if (priv->use_riwt) {
3479 		u32 queue;
3480 
3481 		for (queue = 0; queue < rx_cnt; queue++) {
3482 			if (!priv->rx_riwt[queue])
3483 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3484 
3485 			stmmac_rx_watchdog(priv, priv->ioaddr,
3486 					   priv->rx_riwt[queue], queue);
3487 		}
3488 	}
3489 
3490 	if (priv->hw->pcs)
3491 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3492 
3493 	/* set TX and RX rings length */
3494 	stmmac_set_rings_length(priv);
3495 
3496 	/* Enable TSO */
3497 	if (priv->tso) {
3498 		for (chan = 0; chan < tx_cnt; chan++) {
3499 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3500 
3501 			/* TSO and TBS cannot co-exist */
3502 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3503 				continue;
3504 
3505 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3506 		}
3507 	}
3508 
3509 	/* Enable Split Header */
3510 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3511 	for (chan = 0; chan < rx_cnt; chan++)
3512 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3513 
3514 
3515 	/* VLAN Tag Insertion */
3516 	if (priv->dma_cap.vlins)
3517 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3518 
3519 	/* TBS */
3520 	for (chan = 0; chan < tx_cnt; chan++) {
3521 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3522 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3523 
3524 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3525 	}
3526 
3527 	/* Configure real RX and TX queues */
3528 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3529 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3530 
3531 	/* Start the ball rolling... */
3532 	stmmac_start_all_dma(priv);
3533 
3534 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3535 
3536 	if (priv->dma_cap.fpesel) {
3537 		stmmac_fpe_start_wq(priv);
3538 
3539 		if (priv->plat->fpe_cfg->enable)
3540 			stmmac_fpe_handshake(priv, true);
3541 	}
3542 
3543 	return 0;
3544 }
3545 
3546 static void stmmac_hw_teardown(struct net_device *dev)
3547 {
3548 	struct stmmac_priv *priv = netdev_priv(dev);
3549 
3550 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3551 }
3552 
3553 static void stmmac_free_irq(struct net_device *dev,
3554 			    enum request_irq_err irq_err, int irq_idx)
3555 {
3556 	struct stmmac_priv *priv = netdev_priv(dev);
3557 	int j;
3558 
3559 	switch (irq_err) {
3560 	case REQ_IRQ_ERR_ALL:
3561 		irq_idx = priv->plat->tx_queues_to_use;
3562 		fallthrough;
3563 	case REQ_IRQ_ERR_TX:
3564 		for (j = irq_idx - 1; j >= 0; j--) {
3565 			if (priv->tx_irq[j] > 0) {
3566 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3567 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3568 			}
3569 		}
3570 		irq_idx = priv->plat->rx_queues_to_use;
3571 		fallthrough;
3572 	case REQ_IRQ_ERR_RX:
3573 		for (j = irq_idx - 1; j >= 0; j--) {
3574 			if (priv->rx_irq[j] > 0) {
3575 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3576 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3577 			}
3578 		}
3579 
3580 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3581 			free_irq(priv->sfty_ue_irq, dev);
3582 		fallthrough;
3583 	case REQ_IRQ_ERR_SFTY_UE:
3584 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3585 			free_irq(priv->sfty_ce_irq, dev);
3586 		fallthrough;
3587 	case REQ_IRQ_ERR_SFTY_CE:
3588 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3589 			free_irq(priv->lpi_irq, dev);
3590 		fallthrough;
3591 	case REQ_IRQ_ERR_LPI:
3592 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3593 			free_irq(priv->wol_irq, dev);
3594 		fallthrough;
3595 	case REQ_IRQ_ERR_SFTY:
3596 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3597 			free_irq(priv->sfty_irq, dev);
3598 		fallthrough;
3599 	case REQ_IRQ_ERR_WOL:
3600 		free_irq(dev->irq, dev);
3601 		fallthrough;
3602 	case REQ_IRQ_ERR_MAC:
3603 	case REQ_IRQ_ERR_NO:
3604 		/* If MAC IRQ request error, no more IRQ to free */
3605 		break;
3606 	}
3607 }
3608 
3609 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3610 {
3611 	struct stmmac_priv *priv = netdev_priv(dev);
3612 	enum request_irq_err irq_err;
3613 	cpumask_t cpu_mask;
3614 	int irq_idx = 0;
3615 	char *int_name;
3616 	int ret;
3617 	int i;
3618 
3619 	/* For common interrupt */
3620 	int_name = priv->int_name_mac;
3621 	sprintf(int_name, "%s:%s", dev->name, "mac");
3622 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3623 			  0, int_name, dev);
3624 	if (unlikely(ret < 0)) {
3625 		netdev_err(priv->dev,
3626 			   "%s: alloc mac MSI %d (error: %d)\n",
3627 			   __func__, dev->irq, ret);
3628 		irq_err = REQ_IRQ_ERR_MAC;
3629 		goto irq_error;
3630 	}
3631 
3632 	/* Request the Wake IRQ in case of another line
3633 	 * is used for WoL
3634 	 */
3635 	priv->wol_irq_disabled = true;
3636 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3637 		int_name = priv->int_name_wol;
3638 		sprintf(int_name, "%s:%s", dev->name, "wol");
3639 		ret = request_irq(priv->wol_irq,
3640 				  stmmac_mac_interrupt,
3641 				  0, int_name, dev);
3642 		if (unlikely(ret < 0)) {
3643 			netdev_err(priv->dev,
3644 				   "%s: alloc wol MSI %d (error: %d)\n",
3645 				   __func__, priv->wol_irq, ret);
3646 			irq_err = REQ_IRQ_ERR_WOL;
3647 			goto irq_error;
3648 		}
3649 	}
3650 
3651 	/* Request the LPI IRQ in case of another line
3652 	 * is used for LPI
3653 	 */
3654 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3655 		int_name = priv->int_name_lpi;
3656 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3657 		ret = request_irq(priv->lpi_irq,
3658 				  stmmac_mac_interrupt,
3659 				  0, int_name, dev);
3660 		if (unlikely(ret < 0)) {
3661 			netdev_err(priv->dev,
3662 				   "%s: alloc lpi MSI %d (error: %d)\n",
3663 				   __func__, priv->lpi_irq, ret);
3664 			irq_err = REQ_IRQ_ERR_LPI;
3665 			goto irq_error;
3666 		}
3667 	}
3668 
3669 	/* Request the common Safety Feature Correctible/Uncorrectible
3670 	 * Error line in case of another line is used
3671 	 */
3672 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3673 		int_name = priv->int_name_sfty;
3674 		sprintf(int_name, "%s:%s", dev->name, "safety");
3675 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3676 				  0, int_name, dev);
3677 		if (unlikely(ret < 0)) {
3678 			netdev_err(priv->dev,
3679 				   "%s: alloc sfty MSI %d (error: %d)\n",
3680 				   __func__, priv->sfty_irq, ret);
3681 			irq_err = REQ_IRQ_ERR_SFTY;
3682 			goto irq_error;
3683 		}
3684 	}
3685 
3686 	/* Request the Safety Feature Correctible Error line in
3687 	 * case of another line is used
3688 	 */
3689 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3690 		int_name = priv->int_name_sfty_ce;
3691 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3692 		ret = request_irq(priv->sfty_ce_irq,
3693 				  stmmac_safety_interrupt,
3694 				  0, int_name, dev);
3695 		if (unlikely(ret < 0)) {
3696 			netdev_err(priv->dev,
3697 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3698 				   __func__, priv->sfty_ce_irq, ret);
3699 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3700 			goto irq_error;
3701 		}
3702 	}
3703 
3704 	/* Request the Safety Feature Uncorrectible Error line in
3705 	 * case of another line is used
3706 	 */
3707 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3708 		int_name = priv->int_name_sfty_ue;
3709 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3710 		ret = request_irq(priv->sfty_ue_irq,
3711 				  stmmac_safety_interrupt,
3712 				  0, int_name, dev);
3713 		if (unlikely(ret < 0)) {
3714 			netdev_err(priv->dev,
3715 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3716 				   __func__, priv->sfty_ue_irq, ret);
3717 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3718 			goto irq_error;
3719 		}
3720 	}
3721 
3722 	/* Request Rx MSI irq */
3723 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3724 		if (i >= MTL_MAX_RX_QUEUES)
3725 			break;
3726 		if (priv->rx_irq[i] == 0)
3727 			continue;
3728 
3729 		int_name = priv->int_name_rx_irq[i];
3730 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3731 		ret = request_irq(priv->rx_irq[i],
3732 				  stmmac_msi_intr_rx,
3733 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3734 		if (unlikely(ret < 0)) {
3735 			netdev_err(priv->dev,
3736 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3737 				   __func__, i, priv->rx_irq[i], ret);
3738 			irq_err = REQ_IRQ_ERR_RX;
3739 			irq_idx = i;
3740 			goto irq_error;
3741 		}
3742 		cpumask_clear(&cpu_mask);
3743 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3744 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3745 	}
3746 
3747 	/* Request Tx MSI irq */
3748 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3749 		if (i >= MTL_MAX_TX_QUEUES)
3750 			break;
3751 		if (priv->tx_irq[i] == 0)
3752 			continue;
3753 
3754 		int_name = priv->int_name_tx_irq[i];
3755 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3756 		ret = request_irq(priv->tx_irq[i],
3757 				  stmmac_msi_intr_tx,
3758 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3759 		if (unlikely(ret < 0)) {
3760 			netdev_err(priv->dev,
3761 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3762 				   __func__, i, priv->tx_irq[i], ret);
3763 			irq_err = REQ_IRQ_ERR_TX;
3764 			irq_idx = i;
3765 			goto irq_error;
3766 		}
3767 		cpumask_clear(&cpu_mask);
3768 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3769 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3770 	}
3771 
3772 	return 0;
3773 
3774 irq_error:
3775 	stmmac_free_irq(dev, irq_err, irq_idx);
3776 	return ret;
3777 }
3778 
3779 static int stmmac_request_irq_single(struct net_device *dev)
3780 {
3781 	struct stmmac_priv *priv = netdev_priv(dev);
3782 	enum request_irq_err irq_err;
3783 	int ret;
3784 
3785 	ret = request_irq(dev->irq, stmmac_interrupt,
3786 			  IRQF_SHARED, dev->name, dev);
3787 	if (unlikely(ret < 0)) {
3788 		netdev_err(priv->dev,
3789 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3790 			   __func__, dev->irq, ret);
3791 		irq_err = REQ_IRQ_ERR_MAC;
3792 		goto irq_error;
3793 	}
3794 
3795 	/* Request the Wake IRQ in case of another line
3796 	 * is used for WoL
3797 	 */
3798 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3799 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3800 				  IRQF_SHARED, dev->name, dev);
3801 		if (unlikely(ret < 0)) {
3802 			netdev_err(priv->dev,
3803 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3804 				   __func__, priv->wol_irq, ret);
3805 			irq_err = REQ_IRQ_ERR_WOL;
3806 			goto irq_error;
3807 		}
3808 	}
3809 
3810 	/* Request the IRQ lines */
3811 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3812 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3813 				  IRQF_SHARED, dev->name, dev);
3814 		if (unlikely(ret < 0)) {
3815 			netdev_err(priv->dev,
3816 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3817 				   __func__, priv->lpi_irq, ret);
3818 			irq_err = REQ_IRQ_ERR_LPI;
3819 			goto irq_error;
3820 		}
3821 	}
3822 
3823 	/* Request the common Safety Feature Correctible/Uncorrectible
3824 	 * Error line in case of another line is used
3825 	 */
3826 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3827 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3828 				  IRQF_SHARED, dev->name, dev);
3829 		if (unlikely(ret < 0)) {
3830 			netdev_err(priv->dev,
3831 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3832 				   __func__, priv->sfty_irq, ret);
3833 			irq_err = REQ_IRQ_ERR_SFTY;
3834 			goto irq_error;
3835 		}
3836 	}
3837 
3838 	return 0;
3839 
3840 irq_error:
3841 	stmmac_free_irq(dev, irq_err, 0);
3842 	return ret;
3843 }
3844 
3845 static int stmmac_request_irq(struct net_device *dev)
3846 {
3847 	struct stmmac_priv *priv = netdev_priv(dev);
3848 	int ret;
3849 
3850 	/* Request the IRQ lines */
3851 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3852 		ret = stmmac_request_irq_multi_msi(dev);
3853 	else
3854 		ret = stmmac_request_irq_single(dev);
3855 
3856 	return ret;
3857 }
3858 
3859 /**
3860  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3861  *  @priv: driver private structure
3862  *  @mtu: MTU to setup the dma queue and buf with
3863  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3864  *  Allocate the Tx/Rx DMA queue and init them.
3865  *  Return value:
3866  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3867  */
3868 static struct stmmac_dma_conf *
3869 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3870 {
3871 	struct stmmac_dma_conf *dma_conf;
3872 	int chan, bfsize, ret;
3873 
3874 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3875 	if (!dma_conf) {
3876 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3877 			   __func__);
3878 		return ERR_PTR(-ENOMEM);
3879 	}
3880 
3881 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3882 	if (bfsize < 0)
3883 		bfsize = 0;
3884 
3885 	if (bfsize < BUF_SIZE_16KiB)
3886 		bfsize = stmmac_set_bfsize(mtu, 0);
3887 
3888 	dma_conf->dma_buf_sz = bfsize;
3889 	/* Chose the tx/rx size from the already defined one in the
3890 	 * priv struct. (if defined)
3891 	 */
3892 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3893 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3894 
3895 	if (!dma_conf->dma_tx_size)
3896 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3897 	if (!dma_conf->dma_rx_size)
3898 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3899 
3900 	/* Earlier check for TBS */
3901 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3902 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3903 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3904 
3905 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3906 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3907 	}
3908 
3909 	ret = alloc_dma_desc_resources(priv, dma_conf);
3910 	if (ret < 0) {
3911 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3912 			   __func__);
3913 		goto alloc_error;
3914 	}
3915 
3916 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3917 	if (ret < 0) {
3918 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3919 			   __func__);
3920 		goto init_error;
3921 	}
3922 
3923 	return dma_conf;
3924 
3925 init_error:
3926 	free_dma_desc_resources(priv, dma_conf);
3927 alloc_error:
3928 	kfree(dma_conf);
3929 	return ERR_PTR(ret);
3930 }
3931 
3932 /**
3933  *  __stmmac_open - open entry point of the driver
3934  *  @dev : pointer to the device structure.
3935  *  @dma_conf :  structure to take the dma data
3936  *  Description:
3937  *  This function is the open entry point of the driver.
3938  *  Return value:
3939  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3940  *  file on failure.
3941  */
3942 static int __stmmac_open(struct net_device *dev,
3943 			 struct stmmac_dma_conf *dma_conf)
3944 {
3945 	struct stmmac_priv *priv = netdev_priv(dev);
3946 	int mode = priv->plat->phy_interface;
3947 	u32 chan;
3948 	int ret;
3949 
3950 	ret = pm_runtime_resume_and_get(priv->device);
3951 	if (ret < 0)
3952 		return ret;
3953 
3954 	if ((!priv->hw->xpcs ||
3955 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3956 		ret = stmmac_init_phy(dev);
3957 		if (ret) {
3958 			netdev_err(priv->dev,
3959 				   "%s: Cannot attach to PHY (error: %d)\n",
3960 				   __func__, ret);
3961 			goto init_phy_error;
3962 		}
3963 	}
3964 
3965 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3966 
3967 	buf_sz = dma_conf->dma_buf_sz;
3968 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3969 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3970 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3971 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3972 
3973 	stmmac_reset_queues_param(priv);
3974 
3975 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3976 	    priv->plat->serdes_powerup) {
3977 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3978 		if (ret < 0) {
3979 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3980 				   __func__);
3981 			goto init_error;
3982 		}
3983 	}
3984 
3985 	ret = stmmac_hw_setup(dev, true);
3986 	if (ret < 0) {
3987 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3988 		goto init_error;
3989 	}
3990 
3991 	stmmac_init_coalesce(priv);
3992 
3993 	phylink_start(priv->phylink);
3994 	/* We may have called phylink_speed_down before */
3995 	phylink_speed_up(priv->phylink);
3996 
3997 	ret = stmmac_request_irq(dev);
3998 	if (ret)
3999 		goto irq_error;
4000 
4001 	stmmac_enable_all_queues(priv);
4002 	netif_tx_start_all_queues(priv->dev);
4003 	stmmac_enable_all_dma_irq(priv);
4004 
4005 	return 0;
4006 
4007 irq_error:
4008 	phylink_stop(priv->phylink);
4009 
4010 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4011 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4012 
4013 	stmmac_hw_teardown(dev);
4014 init_error:
4015 	phylink_disconnect_phy(priv->phylink);
4016 init_phy_error:
4017 	pm_runtime_put(priv->device);
4018 	return ret;
4019 }
4020 
4021 static int stmmac_open(struct net_device *dev)
4022 {
4023 	struct stmmac_priv *priv = netdev_priv(dev);
4024 	struct stmmac_dma_conf *dma_conf;
4025 	int ret;
4026 
4027 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4028 	if (IS_ERR(dma_conf))
4029 		return PTR_ERR(dma_conf);
4030 
4031 	ret = __stmmac_open(dev, dma_conf);
4032 	if (ret)
4033 		free_dma_desc_resources(priv, dma_conf);
4034 
4035 	kfree(dma_conf);
4036 	return ret;
4037 }
4038 
4039 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4040 {
4041 	set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4042 
4043 	if (priv->fpe_wq) {
4044 		destroy_workqueue(priv->fpe_wq);
4045 		priv->fpe_wq = NULL;
4046 	}
4047 
4048 	netdev_info(priv->dev, "FPE workqueue stop");
4049 }
4050 
4051 /**
4052  *  stmmac_release - close entry point of the driver
4053  *  @dev : device pointer.
4054  *  Description:
4055  *  This is the stop entry point of the driver.
4056  */
4057 static int stmmac_release(struct net_device *dev)
4058 {
4059 	struct stmmac_priv *priv = netdev_priv(dev);
4060 	u32 chan;
4061 
4062 	if (device_may_wakeup(priv->device))
4063 		phylink_speed_down(priv->phylink, false);
4064 	/* Stop and disconnect the PHY */
4065 	phylink_stop(priv->phylink);
4066 	phylink_disconnect_phy(priv->phylink);
4067 
4068 	stmmac_disable_all_queues(priv);
4069 
4070 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4071 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4072 
4073 	netif_tx_disable(dev);
4074 
4075 	/* Free the IRQ lines */
4076 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4077 
4078 	if (priv->eee_enabled) {
4079 		priv->tx_path_in_lpi_mode = false;
4080 		del_timer_sync(&priv->eee_ctrl_timer);
4081 	}
4082 
4083 	/* Stop TX/RX DMA and clear the descriptors */
4084 	stmmac_stop_all_dma(priv);
4085 
4086 	/* Release and free the Rx/Tx resources */
4087 	free_dma_desc_resources(priv, &priv->dma_conf);
4088 
4089 	/* Disable the MAC Rx/Tx */
4090 	stmmac_mac_set(priv, priv->ioaddr, false);
4091 
4092 	/* Powerdown Serdes if there is */
4093 	if (priv->plat->serdes_powerdown)
4094 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4095 
4096 	stmmac_release_ptp(priv);
4097 
4098 	pm_runtime_put(priv->device);
4099 
4100 	if (priv->dma_cap.fpesel)
4101 		stmmac_fpe_stop_wq(priv);
4102 
4103 	return 0;
4104 }
4105 
4106 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4107 			       struct stmmac_tx_queue *tx_q)
4108 {
4109 	u16 tag = 0x0, inner_tag = 0x0;
4110 	u32 inner_type = 0x0;
4111 	struct dma_desc *p;
4112 
4113 	if (!priv->dma_cap.vlins)
4114 		return false;
4115 	if (!skb_vlan_tag_present(skb))
4116 		return false;
4117 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4118 		inner_tag = skb_vlan_tag_get(skb);
4119 		inner_type = STMMAC_VLAN_INSERT;
4120 	}
4121 
4122 	tag = skb_vlan_tag_get(skb);
4123 
4124 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4125 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4126 	else
4127 		p = &tx_q->dma_tx[tx_q->cur_tx];
4128 
4129 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4130 		return false;
4131 
4132 	stmmac_set_tx_owner(priv, p);
4133 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4134 	return true;
4135 }
4136 
4137 /**
4138  *  stmmac_tso_allocator - close entry point of the driver
4139  *  @priv: driver private structure
4140  *  @des: buffer start address
4141  *  @total_len: total length to fill in descriptors
4142  *  @last_segment: condition for the last descriptor
4143  *  @queue: TX queue index
4144  *  Description:
4145  *  This function fills descriptor and request new descriptors according to
4146  *  buffer length to fill
4147  */
4148 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4149 				 int total_len, bool last_segment, u32 queue)
4150 {
4151 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4152 	struct dma_desc *desc;
4153 	u32 buff_size;
4154 	int tmp_len;
4155 
4156 	tmp_len = total_len;
4157 
4158 	while (tmp_len > 0) {
4159 		dma_addr_t curr_addr;
4160 
4161 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4162 						priv->dma_conf.dma_tx_size);
4163 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4164 
4165 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4166 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4167 		else
4168 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4169 
4170 		curr_addr = des + (total_len - tmp_len);
4171 		if (priv->dma_cap.addr64 <= 32)
4172 			desc->des0 = cpu_to_le32(curr_addr);
4173 		else
4174 			stmmac_set_desc_addr(priv, desc, curr_addr);
4175 
4176 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4177 			    TSO_MAX_BUFF_SIZE : tmp_len;
4178 
4179 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4180 				0, 1,
4181 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4182 				0, 0);
4183 
4184 		tmp_len -= TSO_MAX_BUFF_SIZE;
4185 	}
4186 }
4187 
4188 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4189 {
4190 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4191 	int desc_size;
4192 
4193 	if (likely(priv->extend_desc))
4194 		desc_size = sizeof(struct dma_extended_desc);
4195 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4196 		desc_size = sizeof(struct dma_edesc);
4197 	else
4198 		desc_size = sizeof(struct dma_desc);
4199 
4200 	/* The own bit must be the latest setting done when prepare the
4201 	 * descriptor and then barrier is needed to make sure that
4202 	 * all is coherent before granting the DMA engine.
4203 	 */
4204 	wmb();
4205 
4206 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4207 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4208 }
4209 
4210 /**
4211  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4212  *  @skb : the socket buffer
4213  *  @dev : device pointer
4214  *  Description: this is the transmit function that is called on TSO frames
4215  *  (support available on GMAC4 and newer chips).
4216  *  Diagram below show the ring programming in case of TSO frames:
4217  *
4218  *  First Descriptor
4219  *   --------
4220  *   | DES0 |---> buffer1 = L2/L3/L4 header
4221  *   | DES1 |---> TCP Payload (can continue on next descr...)
4222  *   | DES2 |---> buffer 1 and 2 len
4223  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4224  *   --------
4225  *	|
4226  *     ...
4227  *	|
4228  *   --------
4229  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4230  *   | DES1 | --|
4231  *   | DES2 | --> buffer 1 and 2 len
4232  *   | DES3 |
4233  *   --------
4234  *
4235  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4236  */
4237 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4238 {
4239 	struct dma_desc *desc, *first, *mss_desc = NULL;
4240 	struct stmmac_priv *priv = netdev_priv(dev);
4241 	int tmp_pay_len = 0, first_tx, nfrags;
4242 	unsigned int first_entry, tx_packets;
4243 	struct stmmac_txq_stats *txq_stats;
4244 	struct stmmac_tx_queue *tx_q;
4245 	u32 pay_len, mss, queue;
4246 	u8 proto_hdr_len, hdr;
4247 	dma_addr_t des;
4248 	bool set_ic;
4249 	int i;
4250 
4251 	/* Always insert VLAN tag to SKB payload for TSO frames.
4252 	 *
4253 	 * Never insert VLAN tag by HW, since segments splited by
4254 	 * TSO engine will be un-tagged by mistake.
4255 	 */
4256 	if (skb_vlan_tag_present(skb)) {
4257 		skb = __vlan_hwaccel_push_inside(skb);
4258 		if (unlikely(!skb)) {
4259 			priv->xstats.tx_dropped++;
4260 			return NETDEV_TX_OK;
4261 		}
4262 	}
4263 
4264 	nfrags = skb_shinfo(skb)->nr_frags;
4265 	queue = skb_get_queue_mapping(skb);
4266 
4267 	tx_q = &priv->dma_conf.tx_queue[queue];
4268 	txq_stats = &priv->xstats.txq_stats[queue];
4269 	first_tx = tx_q->cur_tx;
4270 
4271 	/* Compute header lengths */
4272 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4273 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4274 		hdr = sizeof(struct udphdr);
4275 	} else {
4276 		proto_hdr_len = skb_tcp_all_headers(skb);
4277 		hdr = tcp_hdrlen(skb);
4278 	}
4279 
4280 	/* Desc availability based on threshold should be enough safe */
4281 	if (unlikely(stmmac_tx_avail(priv, queue) <
4282 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4283 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4284 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4285 								queue));
4286 			/* This is a hard error, log it. */
4287 			netdev_err(priv->dev,
4288 				   "%s: Tx Ring full when queue awake\n",
4289 				   __func__);
4290 		}
4291 		return NETDEV_TX_BUSY;
4292 	}
4293 
4294 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4295 
4296 	mss = skb_shinfo(skb)->gso_size;
4297 
4298 	/* set new MSS value if needed */
4299 	if (mss != tx_q->mss) {
4300 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4301 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4302 		else
4303 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4304 
4305 		stmmac_set_mss(priv, mss_desc, mss);
4306 		tx_q->mss = mss;
4307 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4308 						priv->dma_conf.dma_tx_size);
4309 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4310 	}
4311 
4312 	if (netif_msg_tx_queued(priv)) {
4313 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4314 			__func__, hdr, proto_hdr_len, pay_len, mss);
4315 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4316 			skb->data_len);
4317 	}
4318 
4319 	first_entry = tx_q->cur_tx;
4320 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4321 
4322 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4323 		desc = &tx_q->dma_entx[first_entry].basic;
4324 	else
4325 		desc = &tx_q->dma_tx[first_entry];
4326 	first = desc;
4327 
4328 	/* first descriptor: fill Headers on Buf1 */
4329 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4330 			     DMA_TO_DEVICE);
4331 	if (dma_mapping_error(priv->device, des))
4332 		goto dma_map_err;
4333 
4334 	tx_q->tx_skbuff_dma[first_entry].buf = des;
4335 	tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4336 	tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4337 	tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4338 
4339 	if (priv->dma_cap.addr64 <= 32) {
4340 		first->des0 = cpu_to_le32(des);
4341 
4342 		/* Fill start of payload in buff2 of first descriptor */
4343 		if (pay_len)
4344 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4345 
4346 		/* If needed take extra descriptors to fill the remaining payload */
4347 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4348 	} else {
4349 		stmmac_set_desc_addr(priv, first, des);
4350 		tmp_pay_len = pay_len;
4351 		des += proto_hdr_len;
4352 		pay_len = 0;
4353 	}
4354 
4355 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4356 
4357 	/* Prepare fragments */
4358 	for (i = 0; i < nfrags; i++) {
4359 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4360 
4361 		des = skb_frag_dma_map(priv->device, frag, 0,
4362 				       skb_frag_size(frag),
4363 				       DMA_TO_DEVICE);
4364 		if (dma_mapping_error(priv->device, des))
4365 			goto dma_map_err;
4366 
4367 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4368 				     (i == nfrags - 1), queue);
4369 
4370 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4371 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4372 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4373 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4374 	}
4375 
4376 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4377 
4378 	/* Only the last descriptor gets to point to the skb. */
4379 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4380 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4381 
4382 	/* Manage tx mitigation */
4383 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4384 	tx_q->tx_count_frames += tx_packets;
4385 
4386 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4387 		set_ic = true;
4388 	else if (!priv->tx_coal_frames[queue])
4389 		set_ic = false;
4390 	else if (tx_packets > priv->tx_coal_frames[queue])
4391 		set_ic = true;
4392 	else if ((tx_q->tx_count_frames %
4393 		  priv->tx_coal_frames[queue]) < tx_packets)
4394 		set_ic = true;
4395 	else
4396 		set_ic = false;
4397 
4398 	if (set_ic) {
4399 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4400 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4401 		else
4402 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4403 
4404 		tx_q->tx_count_frames = 0;
4405 		stmmac_set_tx_ic(priv, desc);
4406 	}
4407 
4408 	/* We've used all descriptors we need for this skb, however,
4409 	 * advance cur_tx so that it references a fresh descriptor.
4410 	 * ndo_start_xmit will fill this descriptor the next time it's
4411 	 * called and stmmac_tx_clean may clean up to this descriptor.
4412 	 */
4413 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4414 
4415 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4416 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4417 			  __func__);
4418 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4419 	}
4420 
4421 	u64_stats_update_begin(&txq_stats->q_syncp);
4422 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4423 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4424 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4425 	if (set_ic)
4426 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4427 	u64_stats_update_end(&txq_stats->q_syncp);
4428 
4429 	if (priv->sarc_type)
4430 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4431 
4432 	skb_tx_timestamp(skb);
4433 
4434 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4435 		     priv->hwts_tx_en)) {
4436 		/* declare that device is doing timestamping */
4437 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4438 		stmmac_enable_tx_timestamp(priv, first);
4439 	}
4440 
4441 	/* Complete the first descriptor before granting the DMA */
4442 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4443 			proto_hdr_len,
4444 			pay_len,
4445 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4446 			hdr / 4, (skb->len - proto_hdr_len));
4447 
4448 	/* If context desc is used to change MSS */
4449 	if (mss_desc) {
4450 		/* Make sure that first descriptor has been completely
4451 		 * written, including its own bit. This is because MSS is
4452 		 * actually before first descriptor, so we need to make
4453 		 * sure that MSS's own bit is the last thing written.
4454 		 */
4455 		dma_wmb();
4456 		stmmac_set_tx_owner(priv, mss_desc);
4457 	}
4458 
4459 	if (netif_msg_pktdata(priv)) {
4460 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4461 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4462 			tx_q->cur_tx, first, nfrags);
4463 		pr_info(">>> frame to be transmitted: ");
4464 		print_pkt(skb->data, skb_headlen(skb));
4465 	}
4466 
4467 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4468 
4469 	stmmac_flush_tx_descriptors(priv, queue);
4470 	stmmac_tx_timer_arm(priv, queue);
4471 
4472 	return NETDEV_TX_OK;
4473 
4474 dma_map_err:
4475 	dev_err(priv->device, "Tx dma map failed\n");
4476 	dev_kfree_skb(skb);
4477 	priv->xstats.tx_dropped++;
4478 	return NETDEV_TX_OK;
4479 }
4480 
4481 /**
4482  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4483  * @skb: socket buffer to check
4484  *
4485  * Check if a packet has an ethertype that will trigger the IP header checks
4486  * and IP/TCP checksum engine of the stmmac core.
4487  *
4488  * Return: true if the ethertype can trigger the checksum engine, false
4489  * otherwise
4490  */
4491 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4492 {
4493 	int depth = 0;
4494 	__be16 proto;
4495 
4496 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4497 				    &depth);
4498 
4499 	return (depth <= ETH_HLEN) &&
4500 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4501 }
4502 
4503 /**
4504  *  stmmac_xmit - Tx entry point of the driver
4505  *  @skb : the socket buffer
4506  *  @dev : device pointer
4507  *  Description : this is the tx entry point of the driver.
4508  *  It programs the chain or the ring and supports oversized frames
4509  *  and SG feature.
4510  */
4511 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4512 {
4513 	unsigned int first_entry, tx_packets, enh_desc;
4514 	struct stmmac_priv *priv = netdev_priv(dev);
4515 	unsigned int nopaged_len = skb_headlen(skb);
4516 	int i, csum_insertion = 0, is_jumbo = 0;
4517 	u32 queue = skb_get_queue_mapping(skb);
4518 	int nfrags = skb_shinfo(skb)->nr_frags;
4519 	int gso = skb_shinfo(skb)->gso_type;
4520 	struct stmmac_txq_stats *txq_stats;
4521 	struct dma_edesc *tbs_desc = NULL;
4522 	struct dma_desc *desc, *first;
4523 	struct stmmac_tx_queue *tx_q;
4524 	bool has_vlan, set_ic;
4525 	int entry, first_tx;
4526 	dma_addr_t des;
4527 
4528 	tx_q = &priv->dma_conf.tx_queue[queue];
4529 	txq_stats = &priv->xstats.txq_stats[queue];
4530 	first_tx = tx_q->cur_tx;
4531 
4532 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4533 		stmmac_disable_eee_mode(priv);
4534 
4535 	/* Manage oversized TCP frames for GMAC4 device */
4536 	if (skb_is_gso(skb) && priv->tso) {
4537 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4538 			return stmmac_tso_xmit(skb, dev);
4539 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4540 			return stmmac_tso_xmit(skb, dev);
4541 	}
4542 
4543 	if (priv->est && priv->est->enable &&
4544 	    priv->est->max_sdu[queue] &&
4545 	    skb->len > priv->est->max_sdu[queue]){
4546 		priv->xstats.max_sdu_txq_drop[queue]++;
4547 		goto max_sdu_err;
4548 	}
4549 
4550 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4551 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4552 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4553 								queue));
4554 			/* This is a hard error, log it. */
4555 			netdev_err(priv->dev,
4556 				   "%s: Tx Ring full when queue awake\n",
4557 				   __func__);
4558 		}
4559 		return NETDEV_TX_BUSY;
4560 	}
4561 
4562 	/* Check if VLAN can be inserted by HW */
4563 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4564 
4565 	entry = tx_q->cur_tx;
4566 	first_entry = entry;
4567 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4568 
4569 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4570 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4571 	 * queues. In that case, checksum offloading for those queues that don't
4572 	 * support tx coe needs to fallback to software checksum calculation.
4573 	 *
4574 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4575 	 * also have to be checksummed in software.
4576 	 */
4577 	if (csum_insertion &&
4578 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4579 	     !stmmac_has_ip_ethertype(skb))) {
4580 		if (unlikely(skb_checksum_help(skb)))
4581 			goto dma_map_err;
4582 		csum_insertion = !csum_insertion;
4583 	}
4584 
4585 	if (likely(priv->extend_desc))
4586 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4587 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4588 		desc = &tx_q->dma_entx[entry].basic;
4589 	else
4590 		desc = tx_q->dma_tx + entry;
4591 
4592 	first = desc;
4593 
4594 	if (has_vlan)
4595 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4596 
4597 	enh_desc = priv->plat->enh_desc;
4598 	/* To program the descriptors according to the size of the frame */
4599 	if (enh_desc)
4600 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4601 
4602 	if (unlikely(is_jumbo)) {
4603 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4604 		if (unlikely(entry < 0) && (entry != -EINVAL))
4605 			goto dma_map_err;
4606 	}
4607 
4608 	for (i = 0; i < nfrags; i++) {
4609 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4610 		int len = skb_frag_size(frag);
4611 		bool last_segment = (i == (nfrags - 1));
4612 
4613 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4614 		WARN_ON(tx_q->tx_skbuff[entry]);
4615 
4616 		if (likely(priv->extend_desc))
4617 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4618 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4619 			desc = &tx_q->dma_entx[entry].basic;
4620 		else
4621 			desc = tx_q->dma_tx + entry;
4622 
4623 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4624 				       DMA_TO_DEVICE);
4625 		if (dma_mapping_error(priv->device, des))
4626 			goto dma_map_err; /* should reuse desc w/o issues */
4627 
4628 		tx_q->tx_skbuff_dma[entry].buf = des;
4629 
4630 		stmmac_set_desc_addr(priv, desc, des);
4631 
4632 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4633 		tx_q->tx_skbuff_dma[entry].len = len;
4634 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4635 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4636 
4637 		/* Prepare the descriptor and set the own bit too */
4638 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4639 				priv->mode, 1, last_segment, skb->len);
4640 	}
4641 
4642 	/* Only the last descriptor gets to point to the skb. */
4643 	tx_q->tx_skbuff[entry] = skb;
4644 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4645 
4646 	/* According to the coalesce parameter the IC bit for the latest
4647 	 * segment is reset and the timer re-started to clean the tx status.
4648 	 * This approach takes care about the fragments: desc is the first
4649 	 * element in case of no SG.
4650 	 */
4651 	tx_packets = (entry + 1) - first_tx;
4652 	tx_q->tx_count_frames += tx_packets;
4653 
4654 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4655 		set_ic = true;
4656 	else if (!priv->tx_coal_frames[queue])
4657 		set_ic = false;
4658 	else if (tx_packets > priv->tx_coal_frames[queue])
4659 		set_ic = true;
4660 	else if ((tx_q->tx_count_frames %
4661 		  priv->tx_coal_frames[queue]) < tx_packets)
4662 		set_ic = true;
4663 	else
4664 		set_ic = false;
4665 
4666 	if (set_ic) {
4667 		if (likely(priv->extend_desc))
4668 			desc = &tx_q->dma_etx[entry].basic;
4669 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4670 			desc = &tx_q->dma_entx[entry].basic;
4671 		else
4672 			desc = &tx_q->dma_tx[entry];
4673 
4674 		tx_q->tx_count_frames = 0;
4675 		stmmac_set_tx_ic(priv, desc);
4676 	}
4677 
4678 	/* We've used all descriptors we need for this skb, however,
4679 	 * advance cur_tx so that it references a fresh descriptor.
4680 	 * ndo_start_xmit will fill this descriptor the next time it's
4681 	 * called and stmmac_tx_clean may clean up to this descriptor.
4682 	 */
4683 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4684 	tx_q->cur_tx = entry;
4685 
4686 	if (netif_msg_pktdata(priv)) {
4687 		netdev_dbg(priv->dev,
4688 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4689 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4690 			   entry, first, nfrags);
4691 
4692 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4693 		print_pkt(skb->data, skb->len);
4694 	}
4695 
4696 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4697 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4698 			  __func__);
4699 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4700 	}
4701 
4702 	u64_stats_update_begin(&txq_stats->q_syncp);
4703 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4704 	if (set_ic)
4705 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4706 	u64_stats_update_end(&txq_stats->q_syncp);
4707 
4708 	if (priv->sarc_type)
4709 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4710 
4711 	skb_tx_timestamp(skb);
4712 
4713 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4714 	 * problems because all the descriptors are actually ready to be
4715 	 * passed to the DMA engine.
4716 	 */
4717 	if (likely(!is_jumbo)) {
4718 		bool last_segment = (nfrags == 0);
4719 
4720 		des = dma_map_single(priv->device, skb->data,
4721 				     nopaged_len, DMA_TO_DEVICE);
4722 		if (dma_mapping_error(priv->device, des))
4723 			goto dma_map_err;
4724 
4725 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4726 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4727 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4728 
4729 		stmmac_set_desc_addr(priv, first, des);
4730 
4731 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4732 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4733 
4734 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4735 			     priv->hwts_tx_en)) {
4736 			/* declare that device is doing timestamping */
4737 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4738 			stmmac_enable_tx_timestamp(priv, first);
4739 		}
4740 
4741 		/* Prepare the first descriptor setting the OWN bit too */
4742 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4743 				csum_insertion, priv->mode, 0, last_segment,
4744 				skb->len);
4745 	}
4746 
4747 	if (tx_q->tbs & STMMAC_TBS_EN) {
4748 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4749 
4750 		tbs_desc = &tx_q->dma_entx[first_entry];
4751 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4752 	}
4753 
4754 	stmmac_set_tx_owner(priv, first);
4755 
4756 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4757 
4758 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4759 
4760 	stmmac_flush_tx_descriptors(priv, queue);
4761 	stmmac_tx_timer_arm(priv, queue);
4762 
4763 	return NETDEV_TX_OK;
4764 
4765 dma_map_err:
4766 	netdev_err(priv->dev, "Tx DMA map failed\n");
4767 max_sdu_err:
4768 	dev_kfree_skb(skb);
4769 	priv->xstats.tx_dropped++;
4770 	return NETDEV_TX_OK;
4771 }
4772 
4773 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4774 {
4775 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4776 	__be16 vlan_proto = veth->h_vlan_proto;
4777 	u16 vlanid;
4778 
4779 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4780 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4781 	    (vlan_proto == htons(ETH_P_8021AD) &&
4782 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4783 		/* pop the vlan tag */
4784 		vlanid = ntohs(veth->h_vlan_TCI);
4785 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4786 		skb_pull(skb, VLAN_HLEN);
4787 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4788 	}
4789 }
4790 
4791 /**
4792  * stmmac_rx_refill - refill used skb preallocated buffers
4793  * @priv: driver private structure
4794  * @queue: RX queue index
4795  * Description : this is to reallocate the skb for the reception process
4796  * that is based on zero-copy.
4797  */
4798 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4799 {
4800 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4801 	int dirty = stmmac_rx_dirty(priv, queue);
4802 	unsigned int entry = rx_q->dirty_rx;
4803 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4804 
4805 	if (priv->dma_cap.host_dma_width <= 32)
4806 		gfp |= GFP_DMA32;
4807 
4808 	while (dirty-- > 0) {
4809 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4810 		struct dma_desc *p;
4811 		bool use_rx_wd;
4812 
4813 		if (priv->extend_desc)
4814 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4815 		else
4816 			p = rx_q->dma_rx + entry;
4817 
4818 		if (!buf->page) {
4819 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4820 			if (!buf->page)
4821 				break;
4822 		}
4823 
4824 		if (priv->sph && !buf->sec_page) {
4825 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4826 			if (!buf->sec_page)
4827 				break;
4828 
4829 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4830 		}
4831 
4832 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4833 
4834 		stmmac_set_desc_addr(priv, p, buf->addr);
4835 		if (priv->sph)
4836 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4837 		else
4838 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4839 		stmmac_refill_desc3(priv, rx_q, p);
4840 
4841 		rx_q->rx_count_frames++;
4842 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4843 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4844 			rx_q->rx_count_frames = 0;
4845 
4846 		use_rx_wd = !priv->rx_coal_frames[queue];
4847 		use_rx_wd |= rx_q->rx_count_frames > 0;
4848 		if (!priv->use_riwt)
4849 			use_rx_wd = false;
4850 
4851 		dma_wmb();
4852 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4853 
4854 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4855 	}
4856 	rx_q->dirty_rx = entry;
4857 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4858 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4859 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4860 }
4861 
4862 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4863 				       struct dma_desc *p,
4864 				       int status, unsigned int len)
4865 {
4866 	unsigned int plen = 0, hlen = 0;
4867 	int coe = priv->hw->rx_csum;
4868 
4869 	/* Not first descriptor, buffer is always zero */
4870 	if (priv->sph && len)
4871 		return 0;
4872 
4873 	/* First descriptor, get split header length */
4874 	stmmac_get_rx_header_len(priv, p, &hlen);
4875 	if (priv->sph && hlen) {
4876 		priv->xstats.rx_split_hdr_pkt_n++;
4877 		return hlen;
4878 	}
4879 
4880 	/* First descriptor, not last descriptor and not split header */
4881 	if (status & rx_not_ls)
4882 		return priv->dma_conf.dma_buf_sz;
4883 
4884 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4885 
4886 	/* First descriptor and last descriptor and not split header */
4887 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4888 }
4889 
4890 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4891 				       struct dma_desc *p,
4892 				       int status, unsigned int len)
4893 {
4894 	int coe = priv->hw->rx_csum;
4895 	unsigned int plen = 0;
4896 
4897 	/* Not split header, buffer is not available */
4898 	if (!priv->sph)
4899 		return 0;
4900 
4901 	/* Not last descriptor */
4902 	if (status & rx_not_ls)
4903 		return priv->dma_conf.dma_buf_sz;
4904 
4905 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4906 
4907 	/* Last descriptor */
4908 	return plen - len;
4909 }
4910 
4911 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4912 				struct xdp_frame *xdpf, bool dma_map)
4913 {
4914 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4915 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4916 	unsigned int entry = tx_q->cur_tx;
4917 	struct dma_desc *tx_desc;
4918 	dma_addr_t dma_addr;
4919 	bool set_ic;
4920 
4921 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4922 		return STMMAC_XDP_CONSUMED;
4923 
4924 	if (priv->est && priv->est->enable &&
4925 	    priv->est->max_sdu[queue] &&
4926 	    xdpf->len > priv->est->max_sdu[queue]) {
4927 		priv->xstats.max_sdu_txq_drop[queue]++;
4928 		return STMMAC_XDP_CONSUMED;
4929 	}
4930 
4931 	if (likely(priv->extend_desc))
4932 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4933 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4934 		tx_desc = &tx_q->dma_entx[entry].basic;
4935 	else
4936 		tx_desc = tx_q->dma_tx + entry;
4937 
4938 	if (dma_map) {
4939 		dma_addr = dma_map_single(priv->device, xdpf->data,
4940 					  xdpf->len, DMA_TO_DEVICE);
4941 		if (dma_mapping_error(priv->device, dma_addr))
4942 			return STMMAC_XDP_CONSUMED;
4943 
4944 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4945 	} else {
4946 		struct page *page = virt_to_page(xdpf->data);
4947 
4948 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4949 			   xdpf->headroom;
4950 		dma_sync_single_for_device(priv->device, dma_addr,
4951 					   xdpf->len, DMA_BIDIRECTIONAL);
4952 
4953 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4954 	}
4955 
4956 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4957 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4958 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4959 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4960 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4961 
4962 	tx_q->xdpf[entry] = xdpf;
4963 
4964 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4965 
4966 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4967 			       true, priv->mode, true, true,
4968 			       xdpf->len);
4969 
4970 	tx_q->tx_count_frames++;
4971 
4972 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4973 		set_ic = true;
4974 	else
4975 		set_ic = false;
4976 
4977 	if (set_ic) {
4978 		tx_q->tx_count_frames = 0;
4979 		stmmac_set_tx_ic(priv, tx_desc);
4980 		u64_stats_update_begin(&txq_stats->q_syncp);
4981 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4982 		u64_stats_update_end(&txq_stats->q_syncp);
4983 	}
4984 
4985 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4986 
4987 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4988 	tx_q->cur_tx = entry;
4989 
4990 	return STMMAC_XDP_TX;
4991 }
4992 
4993 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4994 				   int cpu)
4995 {
4996 	int index = cpu;
4997 
4998 	if (unlikely(index < 0))
4999 		index = 0;
5000 
5001 	while (index >= priv->plat->tx_queues_to_use)
5002 		index -= priv->plat->tx_queues_to_use;
5003 
5004 	return index;
5005 }
5006 
5007 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5008 				struct xdp_buff *xdp)
5009 {
5010 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5011 	int cpu = smp_processor_id();
5012 	struct netdev_queue *nq;
5013 	int queue;
5014 	int res;
5015 
5016 	if (unlikely(!xdpf))
5017 		return STMMAC_XDP_CONSUMED;
5018 
5019 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5020 	nq = netdev_get_tx_queue(priv->dev, queue);
5021 
5022 	__netif_tx_lock(nq, cpu);
5023 	/* Avoids TX time-out as we are sharing with slow path */
5024 	txq_trans_cond_update(nq);
5025 
5026 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5027 	if (res == STMMAC_XDP_TX)
5028 		stmmac_flush_tx_descriptors(priv, queue);
5029 
5030 	__netif_tx_unlock(nq);
5031 
5032 	return res;
5033 }
5034 
5035 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5036 				 struct bpf_prog *prog,
5037 				 struct xdp_buff *xdp)
5038 {
5039 	u32 act;
5040 	int res;
5041 
5042 	act = bpf_prog_run_xdp(prog, xdp);
5043 	switch (act) {
5044 	case XDP_PASS:
5045 		res = STMMAC_XDP_PASS;
5046 		break;
5047 	case XDP_TX:
5048 		res = stmmac_xdp_xmit_back(priv, xdp);
5049 		break;
5050 	case XDP_REDIRECT:
5051 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5052 			res = STMMAC_XDP_CONSUMED;
5053 		else
5054 			res = STMMAC_XDP_REDIRECT;
5055 		break;
5056 	default:
5057 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5058 		fallthrough;
5059 	case XDP_ABORTED:
5060 		trace_xdp_exception(priv->dev, prog, act);
5061 		fallthrough;
5062 	case XDP_DROP:
5063 		res = STMMAC_XDP_CONSUMED;
5064 		break;
5065 	}
5066 
5067 	return res;
5068 }
5069 
5070 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5071 					   struct xdp_buff *xdp)
5072 {
5073 	struct bpf_prog *prog;
5074 	int res;
5075 
5076 	prog = READ_ONCE(priv->xdp_prog);
5077 	if (!prog) {
5078 		res = STMMAC_XDP_PASS;
5079 		goto out;
5080 	}
5081 
5082 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5083 out:
5084 	return ERR_PTR(-res);
5085 }
5086 
5087 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5088 				   int xdp_status)
5089 {
5090 	int cpu = smp_processor_id();
5091 	int queue;
5092 
5093 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5094 
5095 	if (xdp_status & STMMAC_XDP_TX)
5096 		stmmac_tx_timer_arm(priv, queue);
5097 
5098 	if (xdp_status & STMMAC_XDP_REDIRECT)
5099 		xdp_do_flush();
5100 }
5101 
5102 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5103 					       struct xdp_buff *xdp)
5104 {
5105 	unsigned int metasize = xdp->data - xdp->data_meta;
5106 	unsigned int datasize = xdp->data_end - xdp->data;
5107 	struct sk_buff *skb;
5108 
5109 	skb = napi_alloc_skb(&ch->rxtx_napi,
5110 			     xdp->data_end - xdp->data_hard_start);
5111 	if (unlikely(!skb))
5112 		return NULL;
5113 
5114 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5115 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5116 	if (metasize)
5117 		skb_metadata_set(skb, metasize);
5118 
5119 	return skb;
5120 }
5121 
5122 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5123 				   struct dma_desc *p, struct dma_desc *np,
5124 				   struct xdp_buff *xdp)
5125 {
5126 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5127 	struct stmmac_channel *ch = &priv->channel[queue];
5128 	unsigned int len = xdp->data_end - xdp->data;
5129 	enum pkt_hash_types hash_type;
5130 	int coe = priv->hw->rx_csum;
5131 	struct sk_buff *skb;
5132 	u32 hash;
5133 
5134 	skb = stmmac_construct_skb_zc(ch, xdp);
5135 	if (!skb) {
5136 		priv->xstats.rx_dropped++;
5137 		return;
5138 	}
5139 
5140 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5141 	if (priv->hw->hw_vlan_en)
5142 		/* MAC level stripping. */
5143 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5144 	else
5145 		/* Driver level stripping. */
5146 		stmmac_rx_vlan(priv->dev, skb);
5147 	skb->protocol = eth_type_trans(skb, priv->dev);
5148 
5149 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5150 		skb_checksum_none_assert(skb);
5151 	else
5152 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5153 
5154 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5155 		skb_set_hash(skb, hash, hash_type);
5156 
5157 	skb_record_rx_queue(skb, queue);
5158 	napi_gro_receive(&ch->rxtx_napi, skb);
5159 
5160 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5161 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5162 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5163 	u64_stats_update_end(&rxq_stats->napi_syncp);
5164 }
5165 
5166 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5167 {
5168 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5169 	unsigned int entry = rx_q->dirty_rx;
5170 	struct dma_desc *rx_desc = NULL;
5171 	bool ret = true;
5172 
5173 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5174 
5175 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5176 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5177 		dma_addr_t dma_addr;
5178 		bool use_rx_wd;
5179 
5180 		if (!buf->xdp) {
5181 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5182 			if (!buf->xdp) {
5183 				ret = false;
5184 				break;
5185 			}
5186 		}
5187 
5188 		if (priv->extend_desc)
5189 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5190 		else
5191 			rx_desc = rx_q->dma_rx + entry;
5192 
5193 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5194 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5195 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5196 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5197 
5198 		rx_q->rx_count_frames++;
5199 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5200 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5201 			rx_q->rx_count_frames = 0;
5202 
5203 		use_rx_wd = !priv->rx_coal_frames[queue];
5204 		use_rx_wd |= rx_q->rx_count_frames > 0;
5205 		if (!priv->use_riwt)
5206 			use_rx_wd = false;
5207 
5208 		dma_wmb();
5209 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5210 
5211 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5212 	}
5213 
5214 	if (rx_desc) {
5215 		rx_q->dirty_rx = entry;
5216 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5217 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5218 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5219 	}
5220 
5221 	return ret;
5222 }
5223 
5224 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5225 {
5226 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5227 	 * to represent incoming packet, whereas cb field in the same structure
5228 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5229 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5230 	 */
5231 	return (struct stmmac_xdp_buff *)xdp;
5232 }
5233 
5234 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5235 {
5236 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5237 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5238 	unsigned int count = 0, error = 0, len = 0;
5239 	int dirty = stmmac_rx_dirty(priv, queue);
5240 	unsigned int next_entry = rx_q->cur_rx;
5241 	u32 rx_errors = 0, rx_dropped = 0;
5242 	unsigned int desc_size;
5243 	struct bpf_prog *prog;
5244 	bool failure = false;
5245 	int xdp_status = 0;
5246 	int status = 0;
5247 
5248 	if (netif_msg_rx_status(priv)) {
5249 		void *rx_head;
5250 
5251 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5252 		if (priv->extend_desc) {
5253 			rx_head = (void *)rx_q->dma_erx;
5254 			desc_size = sizeof(struct dma_extended_desc);
5255 		} else {
5256 			rx_head = (void *)rx_q->dma_rx;
5257 			desc_size = sizeof(struct dma_desc);
5258 		}
5259 
5260 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5261 				    rx_q->dma_rx_phy, desc_size);
5262 	}
5263 	while (count < limit) {
5264 		struct stmmac_rx_buffer *buf;
5265 		struct stmmac_xdp_buff *ctx;
5266 		unsigned int buf1_len = 0;
5267 		struct dma_desc *np, *p;
5268 		int entry;
5269 		int res;
5270 
5271 		if (!count && rx_q->state_saved) {
5272 			error = rx_q->state.error;
5273 			len = rx_q->state.len;
5274 		} else {
5275 			rx_q->state_saved = false;
5276 			error = 0;
5277 			len = 0;
5278 		}
5279 
5280 		if (count >= limit)
5281 			break;
5282 
5283 read_again:
5284 		buf1_len = 0;
5285 		entry = next_entry;
5286 		buf = &rx_q->buf_pool[entry];
5287 
5288 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5289 			failure = failure ||
5290 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5291 			dirty = 0;
5292 		}
5293 
5294 		if (priv->extend_desc)
5295 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5296 		else
5297 			p = rx_q->dma_rx + entry;
5298 
5299 		/* read the status of the incoming frame */
5300 		status = stmmac_rx_status(priv, &priv->xstats, p);
5301 		/* check if managed by the DMA otherwise go ahead */
5302 		if (unlikely(status & dma_own))
5303 			break;
5304 
5305 		/* Prefetch the next RX descriptor */
5306 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5307 						priv->dma_conf.dma_rx_size);
5308 		next_entry = rx_q->cur_rx;
5309 
5310 		if (priv->extend_desc)
5311 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5312 		else
5313 			np = rx_q->dma_rx + next_entry;
5314 
5315 		prefetch(np);
5316 
5317 		/* Ensure a valid XSK buffer before proceed */
5318 		if (!buf->xdp)
5319 			break;
5320 
5321 		if (priv->extend_desc)
5322 			stmmac_rx_extended_status(priv, &priv->xstats,
5323 						  rx_q->dma_erx + entry);
5324 		if (unlikely(status == discard_frame)) {
5325 			xsk_buff_free(buf->xdp);
5326 			buf->xdp = NULL;
5327 			dirty++;
5328 			error = 1;
5329 			if (!priv->hwts_rx_en)
5330 				rx_errors++;
5331 		}
5332 
5333 		if (unlikely(error && (status & rx_not_ls)))
5334 			goto read_again;
5335 		if (unlikely(error)) {
5336 			count++;
5337 			continue;
5338 		}
5339 
5340 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5341 		if (likely(status & rx_not_ls)) {
5342 			xsk_buff_free(buf->xdp);
5343 			buf->xdp = NULL;
5344 			dirty++;
5345 			count++;
5346 			goto read_again;
5347 		}
5348 
5349 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5350 		ctx->priv = priv;
5351 		ctx->desc = p;
5352 		ctx->ndesc = np;
5353 
5354 		/* XDP ZC Frame only support primary buffers for now */
5355 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5356 		len += buf1_len;
5357 
5358 		/* ACS is disabled; strip manually. */
5359 		if (likely(!(status & rx_not_ls))) {
5360 			buf1_len -= ETH_FCS_LEN;
5361 			len -= ETH_FCS_LEN;
5362 		}
5363 
5364 		/* RX buffer is good and fit into a XSK pool buffer */
5365 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5366 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5367 
5368 		prog = READ_ONCE(priv->xdp_prog);
5369 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5370 
5371 		switch (res) {
5372 		case STMMAC_XDP_PASS:
5373 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5374 			xsk_buff_free(buf->xdp);
5375 			break;
5376 		case STMMAC_XDP_CONSUMED:
5377 			xsk_buff_free(buf->xdp);
5378 			rx_dropped++;
5379 			break;
5380 		case STMMAC_XDP_TX:
5381 		case STMMAC_XDP_REDIRECT:
5382 			xdp_status |= res;
5383 			break;
5384 		}
5385 
5386 		buf->xdp = NULL;
5387 		dirty++;
5388 		count++;
5389 	}
5390 
5391 	if (status & rx_not_ls) {
5392 		rx_q->state_saved = true;
5393 		rx_q->state.error = error;
5394 		rx_q->state.len = len;
5395 	}
5396 
5397 	stmmac_finalize_xdp_rx(priv, xdp_status);
5398 
5399 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5400 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5401 	u64_stats_update_end(&rxq_stats->napi_syncp);
5402 
5403 	priv->xstats.rx_dropped += rx_dropped;
5404 	priv->xstats.rx_errors += rx_errors;
5405 
5406 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5407 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5408 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5409 		else
5410 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5411 
5412 		return (int)count;
5413 	}
5414 
5415 	return failure ? limit : (int)count;
5416 }
5417 
5418 /**
5419  * stmmac_rx - manage the receive process
5420  * @priv: driver private structure
5421  * @limit: napi bugget
5422  * @queue: RX queue index.
5423  * Description :  this the function called by the napi poll method.
5424  * It gets all the frames inside the ring.
5425  */
5426 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5427 {
5428 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5429 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5430 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5431 	struct stmmac_channel *ch = &priv->channel[queue];
5432 	unsigned int count = 0, error = 0, len = 0;
5433 	int status = 0, coe = priv->hw->rx_csum;
5434 	unsigned int next_entry = rx_q->cur_rx;
5435 	enum dma_data_direction dma_dir;
5436 	unsigned int desc_size;
5437 	struct sk_buff *skb = NULL;
5438 	struct stmmac_xdp_buff ctx;
5439 	int xdp_status = 0;
5440 	int buf_sz;
5441 
5442 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5443 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5444 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5445 
5446 	if (netif_msg_rx_status(priv)) {
5447 		void *rx_head;
5448 
5449 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5450 		if (priv->extend_desc) {
5451 			rx_head = (void *)rx_q->dma_erx;
5452 			desc_size = sizeof(struct dma_extended_desc);
5453 		} else {
5454 			rx_head = (void *)rx_q->dma_rx;
5455 			desc_size = sizeof(struct dma_desc);
5456 		}
5457 
5458 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5459 				    rx_q->dma_rx_phy, desc_size);
5460 	}
5461 	while (count < limit) {
5462 		unsigned int buf1_len = 0, buf2_len = 0;
5463 		enum pkt_hash_types hash_type;
5464 		struct stmmac_rx_buffer *buf;
5465 		struct dma_desc *np, *p;
5466 		int entry;
5467 		u32 hash;
5468 
5469 		if (!count && rx_q->state_saved) {
5470 			skb = rx_q->state.skb;
5471 			error = rx_q->state.error;
5472 			len = rx_q->state.len;
5473 		} else {
5474 			rx_q->state_saved = false;
5475 			skb = NULL;
5476 			error = 0;
5477 			len = 0;
5478 		}
5479 
5480 read_again:
5481 		if (count >= limit)
5482 			break;
5483 
5484 		buf1_len = 0;
5485 		buf2_len = 0;
5486 		entry = next_entry;
5487 		buf = &rx_q->buf_pool[entry];
5488 
5489 		if (priv->extend_desc)
5490 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5491 		else
5492 			p = rx_q->dma_rx + entry;
5493 
5494 		/* read the status of the incoming frame */
5495 		status = stmmac_rx_status(priv, &priv->xstats, p);
5496 		/* check if managed by the DMA otherwise go ahead */
5497 		if (unlikely(status & dma_own))
5498 			break;
5499 
5500 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5501 						priv->dma_conf.dma_rx_size);
5502 		next_entry = rx_q->cur_rx;
5503 
5504 		if (priv->extend_desc)
5505 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5506 		else
5507 			np = rx_q->dma_rx + next_entry;
5508 
5509 		prefetch(np);
5510 
5511 		if (priv->extend_desc)
5512 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5513 		if (unlikely(status == discard_frame)) {
5514 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5515 			buf->page = NULL;
5516 			error = 1;
5517 			if (!priv->hwts_rx_en)
5518 				rx_errors++;
5519 		}
5520 
5521 		if (unlikely(error && (status & rx_not_ls)))
5522 			goto read_again;
5523 		if (unlikely(error)) {
5524 			dev_kfree_skb(skb);
5525 			skb = NULL;
5526 			count++;
5527 			continue;
5528 		}
5529 
5530 		/* Buffer is good. Go on. */
5531 
5532 		prefetch(page_address(buf->page) + buf->page_offset);
5533 		if (buf->sec_page)
5534 			prefetch(page_address(buf->sec_page));
5535 
5536 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5537 		len += buf1_len;
5538 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5539 		len += buf2_len;
5540 
5541 		/* ACS is disabled; strip manually. */
5542 		if (likely(!(status & rx_not_ls))) {
5543 			if (buf2_len) {
5544 				buf2_len -= ETH_FCS_LEN;
5545 				len -= ETH_FCS_LEN;
5546 			} else if (buf1_len) {
5547 				buf1_len -= ETH_FCS_LEN;
5548 				len -= ETH_FCS_LEN;
5549 			}
5550 		}
5551 
5552 		if (!skb) {
5553 			unsigned int pre_len, sync_len;
5554 
5555 			dma_sync_single_for_cpu(priv->device, buf->addr,
5556 						buf1_len, dma_dir);
5557 
5558 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5559 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5560 					 buf->page_offset, buf1_len, true);
5561 
5562 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5563 				  buf->page_offset;
5564 
5565 			ctx.priv = priv;
5566 			ctx.desc = p;
5567 			ctx.ndesc = np;
5568 
5569 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5570 			/* Due xdp_adjust_tail: DMA sync for_device
5571 			 * cover max len CPU touch
5572 			 */
5573 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5574 				   buf->page_offset;
5575 			sync_len = max(sync_len, pre_len);
5576 
5577 			/* For Not XDP_PASS verdict */
5578 			if (IS_ERR(skb)) {
5579 				unsigned int xdp_res = -PTR_ERR(skb);
5580 
5581 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5582 					page_pool_put_page(rx_q->page_pool,
5583 							   virt_to_head_page(ctx.xdp.data),
5584 							   sync_len, true);
5585 					buf->page = NULL;
5586 					rx_dropped++;
5587 
5588 					/* Clear skb as it was set as
5589 					 * status by XDP program.
5590 					 */
5591 					skb = NULL;
5592 
5593 					if (unlikely((status & rx_not_ls)))
5594 						goto read_again;
5595 
5596 					count++;
5597 					continue;
5598 				} else if (xdp_res & (STMMAC_XDP_TX |
5599 						      STMMAC_XDP_REDIRECT)) {
5600 					xdp_status |= xdp_res;
5601 					buf->page = NULL;
5602 					skb = NULL;
5603 					count++;
5604 					continue;
5605 				}
5606 			}
5607 		}
5608 
5609 		if (!skb) {
5610 			/* XDP program may expand or reduce tail */
5611 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5612 
5613 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5614 			if (!skb) {
5615 				rx_dropped++;
5616 				count++;
5617 				goto drain_data;
5618 			}
5619 
5620 			/* XDP program may adjust header */
5621 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5622 			skb_put(skb, buf1_len);
5623 
5624 			/* Data payload copied into SKB, page ready for recycle */
5625 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5626 			buf->page = NULL;
5627 		} else if (buf1_len) {
5628 			dma_sync_single_for_cpu(priv->device, buf->addr,
5629 						buf1_len, dma_dir);
5630 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5631 					buf->page, buf->page_offset, buf1_len,
5632 					priv->dma_conf.dma_buf_sz);
5633 
5634 			/* Data payload appended into SKB */
5635 			skb_mark_for_recycle(skb);
5636 			buf->page = NULL;
5637 		}
5638 
5639 		if (buf2_len) {
5640 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5641 						buf2_len, dma_dir);
5642 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5643 					buf->sec_page, 0, buf2_len,
5644 					priv->dma_conf.dma_buf_sz);
5645 
5646 			/* Data payload appended into SKB */
5647 			skb_mark_for_recycle(skb);
5648 			buf->sec_page = NULL;
5649 		}
5650 
5651 drain_data:
5652 		if (likely(status & rx_not_ls))
5653 			goto read_again;
5654 		if (!skb)
5655 			continue;
5656 
5657 		/* Got entire packet into SKB. Finish it. */
5658 
5659 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5660 
5661 		if (priv->hw->hw_vlan_en)
5662 			/* MAC level stripping. */
5663 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5664 		else
5665 			/* Driver level stripping. */
5666 			stmmac_rx_vlan(priv->dev, skb);
5667 
5668 		skb->protocol = eth_type_trans(skb, priv->dev);
5669 
5670 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5671 			skb_checksum_none_assert(skb);
5672 		else
5673 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5674 
5675 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5676 			skb_set_hash(skb, hash, hash_type);
5677 
5678 		skb_record_rx_queue(skb, queue);
5679 		napi_gro_receive(&ch->rx_napi, skb);
5680 		skb = NULL;
5681 
5682 		rx_packets++;
5683 		rx_bytes += len;
5684 		count++;
5685 	}
5686 
5687 	if (status & rx_not_ls || skb) {
5688 		rx_q->state_saved = true;
5689 		rx_q->state.skb = skb;
5690 		rx_q->state.error = error;
5691 		rx_q->state.len = len;
5692 	}
5693 
5694 	stmmac_finalize_xdp_rx(priv, xdp_status);
5695 
5696 	stmmac_rx_refill(priv, queue);
5697 
5698 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5699 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5700 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5701 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5702 	u64_stats_update_end(&rxq_stats->napi_syncp);
5703 
5704 	priv->xstats.rx_dropped += rx_dropped;
5705 	priv->xstats.rx_errors += rx_errors;
5706 
5707 	return count;
5708 }
5709 
5710 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5711 {
5712 	struct stmmac_channel *ch =
5713 		container_of(napi, struct stmmac_channel, rx_napi);
5714 	struct stmmac_priv *priv = ch->priv_data;
5715 	struct stmmac_rxq_stats *rxq_stats;
5716 	u32 chan = ch->index;
5717 	int work_done;
5718 
5719 	rxq_stats = &priv->xstats.rxq_stats[chan];
5720 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5721 	u64_stats_inc(&rxq_stats->napi.poll);
5722 	u64_stats_update_end(&rxq_stats->napi_syncp);
5723 
5724 	work_done = stmmac_rx(priv, budget, chan);
5725 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5726 		unsigned long flags;
5727 
5728 		spin_lock_irqsave(&ch->lock, flags);
5729 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5730 		spin_unlock_irqrestore(&ch->lock, flags);
5731 	}
5732 
5733 	return work_done;
5734 }
5735 
5736 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5737 {
5738 	struct stmmac_channel *ch =
5739 		container_of(napi, struct stmmac_channel, tx_napi);
5740 	struct stmmac_priv *priv = ch->priv_data;
5741 	struct stmmac_txq_stats *txq_stats;
5742 	bool pending_packets = false;
5743 	u32 chan = ch->index;
5744 	int work_done;
5745 
5746 	txq_stats = &priv->xstats.txq_stats[chan];
5747 	u64_stats_update_begin(&txq_stats->napi_syncp);
5748 	u64_stats_inc(&txq_stats->napi.poll);
5749 	u64_stats_update_end(&txq_stats->napi_syncp);
5750 
5751 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5752 	work_done = min(work_done, budget);
5753 
5754 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5755 		unsigned long flags;
5756 
5757 		spin_lock_irqsave(&ch->lock, flags);
5758 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5759 		spin_unlock_irqrestore(&ch->lock, flags);
5760 	}
5761 
5762 	/* TX still have packet to handle, check if we need to arm tx timer */
5763 	if (pending_packets)
5764 		stmmac_tx_timer_arm(priv, chan);
5765 
5766 	return work_done;
5767 }
5768 
5769 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5770 {
5771 	struct stmmac_channel *ch =
5772 		container_of(napi, struct stmmac_channel, rxtx_napi);
5773 	struct stmmac_priv *priv = ch->priv_data;
5774 	bool tx_pending_packets = false;
5775 	int rx_done, tx_done, rxtx_done;
5776 	struct stmmac_rxq_stats *rxq_stats;
5777 	struct stmmac_txq_stats *txq_stats;
5778 	u32 chan = ch->index;
5779 
5780 	rxq_stats = &priv->xstats.rxq_stats[chan];
5781 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5782 	u64_stats_inc(&rxq_stats->napi.poll);
5783 	u64_stats_update_end(&rxq_stats->napi_syncp);
5784 
5785 	txq_stats = &priv->xstats.txq_stats[chan];
5786 	u64_stats_update_begin(&txq_stats->napi_syncp);
5787 	u64_stats_inc(&txq_stats->napi.poll);
5788 	u64_stats_update_end(&txq_stats->napi_syncp);
5789 
5790 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5791 	tx_done = min(tx_done, budget);
5792 
5793 	rx_done = stmmac_rx_zc(priv, budget, chan);
5794 
5795 	rxtx_done = max(tx_done, rx_done);
5796 
5797 	/* If either TX or RX work is not complete, return budget
5798 	 * and keep pooling
5799 	 */
5800 	if (rxtx_done >= budget)
5801 		return budget;
5802 
5803 	/* all work done, exit the polling mode */
5804 	if (napi_complete_done(napi, rxtx_done)) {
5805 		unsigned long flags;
5806 
5807 		spin_lock_irqsave(&ch->lock, flags);
5808 		/* Both RX and TX work done are compelte,
5809 		 * so enable both RX & TX IRQs.
5810 		 */
5811 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5812 		spin_unlock_irqrestore(&ch->lock, flags);
5813 	}
5814 
5815 	/* TX still have packet to handle, check if we need to arm tx timer */
5816 	if (tx_pending_packets)
5817 		stmmac_tx_timer_arm(priv, chan);
5818 
5819 	return min(rxtx_done, budget - 1);
5820 }
5821 
5822 /**
5823  *  stmmac_tx_timeout
5824  *  @dev : Pointer to net device structure
5825  *  @txqueue: the index of the hanging transmit queue
5826  *  Description: this function is called when a packet transmission fails to
5827  *   complete within a reasonable time. The driver will mark the error in the
5828  *   netdev structure and arrange for the device to be reset to a sane state
5829  *   in order to transmit a new packet.
5830  */
5831 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5832 {
5833 	struct stmmac_priv *priv = netdev_priv(dev);
5834 
5835 	stmmac_global_err(priv);
5836 }
5837 
5838 /**
5839  *  stmmac_set_rx_mode - entry point for multicast addressing
5840  *  @dev : pointer to the device structure
5841  *  Description:
5842  *  This function is a driver entry point which gets called by the kernel
5843  *  whenever multicast addresses must be enabled/disabled.
5844  *  Return value:
5845  *  void.
5846  */
5847 static void stmmac_set_rx_mode(struct net_device *dev)
5848 {
5849 	struct stmmac_priv *priv = netdev_priv(dev);
5850 
5851 	stmmac_set_filter(priv, priv->hw, dev);
5852 }
5853 
5854 /**
5855  *  stmmac_change_mtu - entry point to change MTU size for the device.
5856  *  @dev : device pointer.
5857  *  @new_mtu : the new MTU size for the device.
5858  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5859  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5860  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5861  *  Return value:
5862  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5863  *  file on failure.
5864  */
5865 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5866 {
5867 	struct stmmac_priv *priv = netdev_priv(dev);
5868 	int txfifosz = priv->plat->tx_fifo_size;
5869 	struct stmmac_dma_conf *dma_conf;
5870 	const int mtu = new_mtu;
5871 	int ret;
5872 
5873 	if (txfifosz == 0)
5874 		txfifosz = priv->dma_cap.tx_fifo_size;
5875 
5876 	txfifosz /= priv->plat->tx_queues_to_use;
5877 
5878 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5879 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5880 		return -EINVAL;
5881 	}
5882 
5883 	new_mtu = STMMAC_ALIGN(new_mtu);
5884 
5885 	/* If condition true, FIFO is too small or MTU too large */
5886 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5887 		return -EINVAL;
5888 
5889 	if (netif_running(dev)) {
5890 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5891 		/* Try to allocate the new DMA conf with the new mtu */
5892 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5893 		if (IS_ERR(dma_conf)) {
5894 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5895 				   mtu);
5896 			return PTR_ERR(dma_conf);
5897 		}
5898 
5899 		stmmac_release(dev);
5900 
5901 		ret = __stmmac_open(dev, dma_conf);
5902 		if (ret) {
5903 			free_dma_desc_resources(priv, dma_conf);
5904 			kfree(dma_conf);
5905 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5906 			return ret;
5907 		}
5908 
5909 		kfree(dma_conf);
5910 
5911 		stmmac_set_rx_mode(dev);
5912 	}
5913 
5914 	WRITE_ONCE(dev->mtu, mtu);
5915 	netdev_update_features(dev);
5916 
5917 	return 0;
5918 }
5919 
5920 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5921 					     netdev_features_t features)
5922 {
5923 	struct stmmac_priv *priv = netdev_priv(dev);
5924 
5925 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5926 		features &= ~NETIF_F_RXCSUM;
5927 
5928 	if (!priv->plat->tx_coe)
5929 		features &= ~NETIF_F_CSUM_MASK;
5930 
5931 	/* Some GMAC devices have a bugged Jumbo frame support that
5932 	 * needs to have the Tx COE disabled for oversized frames
5933 	 * (due to limited buffer sizes). In this case we disable
5934 	 * the TX csum insertion in the TDES and not use SF.
5935 	 */
5936 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5937 		features &= ~NETIF_F_CSUM_MASK;
5938 
5939 	/* Disable tso if asked by ethtool */
5940 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5941 		if (features & NETIF_F_TSO)
5942 			priv->tso = true;
5943 		else
5944 			priv->tso = false;
5945 	}
5946 
5947 	return features;
5948 }
5949 
5950 static int stmmac_set_features(struct net_device *netdev,
5951 			       netdev_features_t features)
5952 {
5953 	struct stmmac_priv *priv = netdev_priv(netdev);
5954 
5955 	/* Keep the COE Type in case of csum is supporting */
5956 	if (features & NETIF_F_RXCSUM)
5957 		priv->hw->rx_csum = priv->plat->rx_coe;
5958 	else
5959 		priv->hw->rx_csum = 0;
5960 	/* No check needed because rx_coe has been set before and it will be
5961 	 * fixed in case of issue.
5962 	 */
5963 	stmmac_rx_ipc(priv, priv->hw);
5964 
5965 	if (priv->sph_cap) {
5966 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5967 		u32 chan;
5968 
5969 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5970 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5971 	}
5972 
5973 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5974 		priv->hw->hw_vlan_en = true;
5975 	else
5976 		priv->hw->hw_vlan_en = false;
5977 
5978 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5979 
5980 	return 0;
5981 }
5982 
5983 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5984 {
5985 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5986 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5987 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5988 	bool *hs_enable = &fpe_cfg->hs_enable;
5989 
5990 	if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5991 		return;
5992 
5993 	/* If LP has sent verify mPacket, LP is FPE capable */
5994 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5995 		if (*lp_state < FPE_STATE_CAPABLE)
5996 			*lp_state = FPE_STATE_CAPABLE;
5997 
5998 		/* If user has requested FPE enable, quickly response */
5999 		if (*hs_enable)
6000 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6001 						fpe_cfg,
6002 						MPACKET_RESPONSE);
6003 	}
6004 
6005 	/* If Local has sent verify mPacket, Local is FPE capable */
6006 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
6007 		if (*lo_state < FPE_STATE_CAPABLE)
6008 			*lo_state = FPE_STATE_CAPABLE;
6009 	}
6010 
6011 	/* If LP has sent response mPacket, LP is entering FPE ON */
6012 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
6013 		*lp_state = FPE_STATE_ENTERING_ON;
6014 
6015 	/* If Local has sent response mPacket, Local is entering FPE ON */
6016 	if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
6017 		*lo_state = FPE_STATE_ENTERING_ON;
6018 
6019 	if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
6020 	    !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
6021 	    priv->fpe_wq) {
6022 		queue_work(priv->fpe_wq, &priv->fpe_task);
6023 	}
6024 }
6025 
6026 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6027 {
6028 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6029 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6030 	u32 queues_count;
6031 	u32 queue;
6032 	bool xmac;
6033 
6034 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6035 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6036 
6037 	if (priv->irq_wake)
6038 		pm_wakeup_event(priv->device, 0);
6039 
6040 	if (priv->dma_cap.estsel)
6041 		stmmac_est_irq_status(priv, priv, priv->dev,
6042 				      &priv->xstats, tx_cnt);
6043 
6044 	if (priv->dma_cap.fpesel) {
6045 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6046 						   priv->dev);
6047 
6048 		stmmac_fpe_event_status(priv, status);
6049 	}
6050 
6051 	/* To handle GMAC own interrupts */
6052 	if ((priv->plat->has_gmac) || xmac) {
6053 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6054 
6055 		if (unlikely(status)) {
6056 			/* For LPI we need to save the tx status */
6057 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6058 				priv->tx_path_in_lpi_mode = true;
6059 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6060 				priv->tx_path_in_lpi_mode = false;
6061 		}
6062 
6063 		for (queue = 0; queue < queues_count; queue++)
6064 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6065 
6066 		/* PCS link status */
6067 		if (priv->hw->pcs &&
6068 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6069 			if (priv->xstats.pcs_link)
6070 				netif_carrier_on(priv->dev);
6071 			else
6072 				netif_carrier_off(priv->dev);
6073 		}
6074 
6075 		stmmac_timestamp_interrupt(priv, priv);
6076 	}
6077 }
6078 
6079 /**
6080  *  stmmac_interrupt - main ISR
6081  *  @irq: interrupt number.
6082  *  @dev_id: to pass the net device pointer.
6083  *  Description: this is the main driver interrupt service routine.
6084  *  It can call:
6085  *  o DMA service routine (to manage incoming frame reception and transmission
6086  *    status)
6087  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6088  *    interrupts.
6089  */
6090 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6091 {
6092 	struct net_device *dev = (struct net_device *)dev_id;
6093 	struct stmmac_priv *priv = netdev_priv(dev);
6094 
6095 	/* Check if adapter is up */
6096 	if (test_bit(STMMAC_DOWN, &priv->state))
6097 		return IRQ_HANDLED;
6098 
6099 	/* Check ASP error if it isn't delivered via an individual IRQ */
6100 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6101 		return IRQ_HANDLED;
6102 
6103 	/* To handle Common interrupts */
6104 	stmmac_common_interrupt(priv);
6105 
6106 	/* To handle DMA interrupts */
6107 	stmmac_dma_interrupt(priv);
6108 
6109 	return IRQ_HANDLED;
6110 }
6111 
6112 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6113 {
6114 	struct net_device *dev = (struct net_device *)dev_id;
6115 	struct stmmac_priv *priv = netdev_priv(dev);
6116 
6117 	/* Check if adapter is up */
6118 	if (test_bit(STMMAC_DOWN, &priv->state))
6119 		return IRQ_HANDLED;
6120 
6121 	/* To handle Common interrupts */
6122 	stmmac_common_interrupt(priv);
6123 
6124 	return IRQ_HANDLED;
6125 }
6126 
6127 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6128 {
6129 	struct net_device *dev = (struct net_device *)dev_id;
6130 	struct stmmac_priv *priv = netdev_priv(dev);
6131 
6132 	/* Check if adapter is up */
6133 	if (test_bit(STMMAC_DOWN, &priv->state))
6134 		return IRQ_HANDLED;
6135 
6136 	/* Check if a fatal error happened */
6137 	stmmac_safety_feat_interrupt(priv);
6138 
6139 	return IRQ_HANDLED;
6140 }
6141 
6142 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6143 {
6144 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6145 	struct stmmac_dma_conf *dma_conf;
6146 	int chan = tx_q->queue_index;
6147 	struct stmmac_priv *priv;
6148 	int status;
6149 
6150 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6151 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6152 
6153 	/* Check if adapter is up */
6154 	if (test_bit(STMMAC_DOWN, &priv->state))
6155 		return IRQ_HANDLED;
6156 
6157 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6158 
6159 	if (unlikely(status & tx_hard_error_bump_tc)) {
6160 		/* Try to bump up the dma threshold on this failure */
6161 		stmmac_bump_dma_threshold(priv, chan);
6162 	} else if (unlikely(status == tx_hard_error)) {
6163 		stmmac_tx_err(priv, chan);
6164 	}
6165 
6166 	return IRQ_HANDLED;
6167 }
6168 
6169 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6170 {
6171 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6172 	struct stmmac_dma_conf *dma_conf;
6173 	int chan = rx_q->queue_index;
6174 	struct stmmac_priv *priv;
6175 
6176 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6177 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6178 
6179 	/* Check if adapter is up */
6180 	if (test_bit(STMMAC_DOWN, &priv->state))
6181 		return IRQ_HANDLED;
6182 
6183 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6184 
6185 	return IRQ_HANDLED;
6186 }
6187 
6188 /**
6189  *  stmmac_ioctl - Entry point for the Ioctl
6190  *  @dev: Device pointer.
6191  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6192  *  a proprietary structure used to pass information to the driver.
6193  *  @cmd: IOCTL command
6194  *  Description:
6195  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6196  */
6197 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6198 {
6199 	struct stmmac_priv *priv = netdev_priv (dev);
6200 	int ret = -EOPNOTSUPP;
6201 
6202 	if (!netif_running(dev))
6203 		return -EINVAL;
6204 
6205 	switch (cmd) {
6206 	case SIOCGMIIPHY:
6207 	case SIOCGMIIREG:
6208 	case SIOCSMIIREG:
6209 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6210 		break;
6211 	case SIOCSHWTSTAMP:
6212 		ret = stmmac_hwtstamp_set(dev, rq);
6213 		break;
6214 	case SIOCGHWTSTAMP:
6215 		ret = stmmac_hwtstamp_get(dev, rq);
6216 		break;
6217 	default:
6218 		break;
6219 	}
6220 
6221 	return ret;
6222 }
6223 
6224 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6225 				    void *cb_priv)
6226 {
6227 	struct stmmac_priv *priv = cb_priv;
6228 	int ret = -EOPNOTSUPP;
6229 
6230 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6231 		return ret;
6232 
6233 	__stmmac_disable_all_queues(priv);
6234 
6235 	switch (type) {
6236 	case TC_SETUP_CLSU32:
6237 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6238 		break;
6239 	case TC_SETUP_CLSFLOWER:
6240 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6241 		break;
6242 	default:
6243 		break;
6244 	}
6245 
6246 	stmmac_enable_all_queues(priv);
6247 	return ret;
6248 }
6249 
6250 static LIST_HEAD(stmmac_block_cb_list);
6251 
6252 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6253 			   void *type_data)
6254 {
6255 	struct stmmac_priv *priv = netdev_priv(ndev);
6256 
6257 	switch (type) {
6258 	case TC_QUERY_CAPS:
6259 		return stmmac_tc_query_caps(priv, priv, type_data);
6260 	case TC_SETUP_BLOCK:
6261 		return flow_block_cb_setup_simple(type_data,
6262 						  &stmmac_block_cb_list,
6263 						  stmmac_setup_tc_block_cb,
6264 						  priv, priv, true);
6265 	case TC_SETUP_QDISC_CBS:
6266 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6267 	case TC_SETUP_QDISC_TAPRIO:
6268 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6269 	case TC_SETUP_QDISC_ETF:
6270 		return stmmac_tc_setup_etf(priv, priv, type_data);
6271 	default:
6272 		return -EOPNOTSUPP;
6273 	}
6274 }
6275 
6276 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6277 			       struct net_device *sb_dev)
6278 {
6279 	int gso = skb_shinfo(skb)->gso_type;
6280 
6281 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6282 		/*
6283 		 * There is no way to determine the number of TSO/USO
6284 		 * capable Queues. Let's use always the Queue 0
6285 		 * because if TSO/USO is supported then at least this
6286 		 * one will be capable.
6287 		 */
6288 		return 0;
6289 	}
6290 
6291 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6292 }
6293 
6294 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6295 {
6296 	struct stmmac_priv *priv = netdev_priv(ndev);
6297 	int ret = 0;
6298 
6299 	ret = pm_runtime_resume_and_get(priv->device);
6300 	if (ret < 0)
6301 		return ret;
6302 
6303 	ret = eth_mac_addr(ndev, addr);
6304 	if (ret)
6305 		goto set_mac_error;
6306 
6307 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6308 
6309 set_mac_error:
6310 	pm_runtime_put(priv->device);
6311 
6312 	return ret;
6313 }
6314 
6315 #ifdef CONFIG_DEBUG_FS
6316 static struct dentry *stmmac_fs_dir;
6317 
6318 static void sysfs_display_ring(void *head, int size, int extend_desc,
6319 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6320 {
6321 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6322 	struct dma_desc *p = (struct dma_desc *)head;
6323 	unsigned int desc_size;
6324 	dma_addr_t dma_addr;
6325 	int i;
6326 
6327 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6328 	for (i = 0; i < size; i++) {
6329 		dma_addr = dma_phy_addr + i * desc_size;
6330 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6331 				i, &dma_addr,
6332 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6333 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6334 		if (extend_desc)
6335 			p = &(++ep)->basic;
6336 		else
6337 			p++;
6338 	}
6339 }
6340 
6341 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6342 {
6343 	struct net_device *dev = seq->private;
6344 	struct stmmac_priv *priv = netdev_priv(dev);
6345 	u32 rx_count = priv->plat->rx_queues_to_use;
6346 	u32 tx_count = priv->plat->tx_queues_to_use;
6347 	u32 queue;
6348 
6349 	if ((dev->flags & IFF_UP) == 0)
6350 		return 0;
6351 
6352 	for (queue = 0; queue < rx_count; queue++) {
6353 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6354 
6355 		seq_printf(seq, "RX Queue %d:\n", queue);
6356 
6357 		if (priv->extend_desc) {
6358 			seq_printf(seq, "Extended descriptor ring:\n");
6359 			sysfs_display_ring((void *)rx_q->dma_erx,
6360 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6361 		} else {
6362 			seq_printf(seq, "Descriptor ring:\n");
6363 			sysfs_display_ring((void *)rx_q->dma_rx,
6364 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6365 		}
6366 	}
6367 
6368 	for (queue = 0; queue < tx_count; queue++) {
6369 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6370 
6371 		seq_printf(seq, "TX Queue %d:\n", queue);
6372 
6373 		if (priv->extend_desc) {
6374 			seq_printf(seq, "Extended descriptor ring:\n");
6375 			sysfs_display_ring((void *)tx_q->dma_etx,
6376 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6377 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6378 			seq_printf(seq, "Descriptor ring:\n");
6379 			sysfs_display_ring((void *)tx_q->dma_tx,
6380 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6381 		}
6382 	}
6383 
6384 	return 0;
6385 }
6386 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6387 
6388 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6389 {
6390 	static const char * const dwxgmac_timestamp_source[] = {
6391 		"None",
6392 		"Internal",
6393 		"External",
6394 		"Both",
6395 	};
6396 	static const char * const dwxgmac_safety_feature_desc[] = {
6397 		"No",
6398 		"All Safety Features with ECC and Parity",
6399 		"All Safety Features without ECC or Parity",
6400 		"All Safety Features with Parity Only",
6401 		"ECC Only",
6402 		"UNDEFINED",
6403 		"UNDEFINED",
6404 		"UNDEFINED",
6405 	};
6406 	struct net_device *dev = seq->private;
6407 	struct stmmac_priv *priv = netdev_priv(dev);
6408 
6409 	if (!priv->hw_cap_support) {
6410 		seq_printf(seq, "DMA HW features not supported\n");
6411 		return 0;
6412 	}
6413 
6414 	seq_printf(seq, "==============================\n");
6415 	seq_printf(seq, "\tDMA HW features\n");
6416 	seq_printf(seq, "==============================\n");
6417 
6418 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6419 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6420 	seq_printf(seq, "\t1000 Mbps: %s\n",
6421 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6422 	seq_printf(seq, "\tHalf duplex: %s\n",
6423 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6424 	if (priv->plat->has_xgmac) {
6425 		seq_printf(seq,
6426 			   "\tNumber of Additional MAC address registers: %d\n",
6427 			   priv->dma_cap.multi_addr);
6428 	} else {
6429 		seq_printf(seq, "\tHash Filter: %s\n",
6430 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6431 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6432 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6433 	}
6434 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6435 		   (priv->dma_cap.pcs) ? "Y" : "N");
6436 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6437 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6438 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6439 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6440 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6441 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6442 	seq_printf(seq, "\tRMON module: %s\n",
6443 		   (priv->dma_cap.rmon) ? "Y" : "N");
6444 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6445 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6446 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6447 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6448 	if (priv->plat->has_xgmac)
6449 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6450 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6451 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6452 		   (priv->dma_cap.eee) ? "Y" : "N");
6453 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6454 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6455 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6456 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6457 	    priv->plat->has_xgmac) {
6458 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6459 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6460 	} else {
6461 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6462 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6463 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6464 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6465 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6466 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6467 	}
6468 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6469 		   priv->dma_cap.number_rx_channel);
6470 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6471 		   priv->dma_cap.number_tx_channel);
6472 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6473 		   priv->dma_cap.number_rx_queues);
6474 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6475 		   priv->dma_cap.number_tx_queues);
6476 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6477 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6478 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6479 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6480 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6481 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6482 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6483 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6484 		   priv->dma_cap.pps_out_num);
6485 	seq_printf(seq, "\tSafety Features: %s\n",
6486 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6487 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6488 		   priv->dma_cap.frpsel ? "Y" : "N");
6489 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6490 		   priv->dma_cap.host_dma_width);
6491 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6492 		   priv->dma_cap.rssen ? "Y" : "N");
6493 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6494 		   priv->dma_cap.vlhash ? "Y" : "N");
6495 	seq_printf(seq, "\tSplit Header: %s\n",
6496 		   priv->dma_cap.sphen ? "Y" : "N");
6497 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6498 		   priv->dma_cap.vlins ? "Y" : "N");
6499 	seq_printf(seq, "\tDouble VLAN: %s\n",
6500 		   priv->dma_cap.dvlan ? "Y" : "N");
6501 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6502 		   priv->dma_cap.l3l4fnum);
6503 	seq_printf(seq, "\tARP Offloading: %s\n",
6504 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6505 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6506 		   priv->dma_cap.estsel ? "Y" : "N");
6507 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6508 		   priv->dma_cap.fpesel ? "Y" : "N");
6509 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6510 		   priv->dma_cap.tbssel ? "Y" : "N");
6511 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6512 		   priv->dma_cap.tbs_ch_num);
6513 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6514 		   priv->dma_cap.sgfsel ? "Y" : "N");
6515 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6516 		   BIT(priv->dma_cap.ttsfd) >> 1);
6517 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6518 		   priv->dma_cap.numtc);
6519 	seq_printf(seq, "\tDCB Feature: %s\n",
6520 		   priv->dma_cap.dcben ? "Y" : "N");
6521 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6522 		   priv->dma_cap.advthword ? "Y" : "N");
6523 	seq_printf(seq, "\tPTP Offload: %s\n",
6524 		   priv->dma_cap.ptoen ? "Y" : "N");
6525 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6526 		   priv->dma_cap.osten ? "Y" : "N");
6527 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6528 		   priv->dma_cap.pfcen ? "Y" : "N");
6529 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6530 		   BIT(priv->dma_cap.frpes) << 6);
6531 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6532 		   BIT(priv->dma_cap.frpbs) << 6);
6533 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6534 		   priv->dma_cap.frppipe_num);
6535 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6536 		   priv->dma_cap.nrvf_num ?
6537 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6538 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6539 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6540 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6541 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6542 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6543 		   priv->dma_cap.cbtisel ? "Y" : "N");
6544 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6545 		   priv->dma_cap.aux_snapshot_n);
6546 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6547 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6548 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6549 		   priv->dma_cap.edma ? "Y" : "N");
6550 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6551 		   priv->dma_cap.ediffc ? "Y" : "N");
6552 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6553 		   priv->dma_cap.vxn ? "Y" : "N");
6554 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6555 		   priv->dma_cap.dbgmem ? "Y" : "N");
6556 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6557 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6558 	return 0;
6559 }
6560 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6561 
6562 /* Use network device events to rename debugfs file entries.
6563  */
6564 static int stmmac_device_event(struct notifier_block *unused,
6565 			       unsigned long event, void *ptr)
6566 {
6567 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6568 	struct stmmac_priv *priv = netdev_priv(dev);
6569 
6570 	if (dev->netdev_ops != &stmmac_netdev_ops)
6571 		goto done;
6572 
6573 	switch (event) {
6574 	case NETDEV_CHANGENAME:
6575 		if (priv->dbgfs_dir)
6576 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6577 							 priv->dbgfs_dir,
6578 							 stmmac_fs_dir,
6579 							 dev->name);
6580 		break;
6581 	}
6582 done:
6583 	return NOTIFY_DONE;
6584 }
6585 
6586 static struct notifier_block stmmac_notifier = {
6587 	.notifier_call = stmmac_device_event,
6588 };
6589 
6590 static void stmmac_init_fs(struct net_device *dev)
6591 {
6592 	struct stmmac_priv *priv = netdev_priv(dev);
6593 
6594 	rtnl_lock();
6595 
6596 	/* Create per netdev entries */
6597 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6598 
6599 	/* Entry to report DMA RX/TX rings */
6600 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6601 			    &stmmac_rings_status_fops);
6602 
6603 	/* Entry to report the DMA HW features */
6604 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6605 			    &stmmac_dma_cap_fops);
6606 
6607 	rtnl_unlock();
6608 }
6609 
6610 static void stmmac_exit_fs(struct net_device *dev)
6611 {
6612 	struct stmmac_priv *priv = netdev_priv(dev);
6613 
6614 	debugfs_remove_recursive(priv->dbgfs_dir);
6615 }
6616 #endif /* CONFIG_DEBUG_FS */
6617 
6618 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6619 {
6620 	unsigned char *data = (unsigned char *)&vid_le;
6621 	unsigned char data_byte = 0;
6622 	u32 crc = ~0x0;
6623 	u32 temp = 0;
6624 	int i, bits;
6625 
6626 	bits = get_bitmask_order(VLAN_VID_MASK);
6627 	for (i = 0; i < bits; i++) {
6628 		if ((i % 8) == 0)
6629 			data_byte = data[i / 8];
6630 
6631 		temp = ((crc & 1) ^ data_byte) & 1;
6632 		crc >>= 1;
6633 		data_byte >>= 1;
6634 
6635 		if (temp)
6636 			crc ^= 0xedb88320;
6637 	}
6638 
6639 	return crc;
6640 }
6641 
6642 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6643 {
6644 	u32 crc, hash = 0;
6645 	u16 pmatch = 0;
6646 	int count = 0;
6647 	u16 vid = 0;
6648 
6649 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6650 		__le16 vid_le = cpu_to_le16(vid);
6651 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6652 		hash |= (1 << crc);
6653 		count++;
6654 	}
6655 
6656 	if (!priv->dma_cap.vlhash) {
6657 		if (count > 2) /* VID = 0 always passes filter */
6658 			return -EOPNOTSUPP;
6659 
6660 		pmatch = vid;
6661 		hash = 0;
6662 	}
6663 
6664 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6665 }
6666 
6667 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6668 {
6669 	struct stmmac_priv *priv = netdev_priv(ndev);
6670 	bool is_double = false;
6671 	int ret;
6672 
6673 	ret = pm_runtime_resume_and_get(priv->device);
6674 	if (ret < 0)
6675 		return ret;
6676 
6677 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6678 		is_double = true;
6679 
6680 	set_bit(vid, priv->active_vlans);
6681 	ret = stmmac_vlan_update(priv, is_double);
6682 	if (ret) {
6683 		clear_bit(vid, priv->active_vlans);
6684 		goto err_pm_put;
6685 	}
6686 
6687 	if (priv->hw->num_vlan) {
6688 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6689 		if (ret)
6690 			goto err_pm_put;
6691 	}
6692 err_pm_put:
6693 	pm_runtime_put(priv->device);
6694 
6695 	return ret;
6696 }
6697 
6698 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6699 {
6700 	struct stmmac_priv *priv = netdev_priv(ndev);
6701 	bool is_double = false;
6702 	int ret;
6703 
6704 	ret = pm_runtime_resume_and_get(priv->device);
6705 	if (ret < 0)
6706 		return ret;
6707 
6708 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6709 		is_double = true;
6710 
6711 	clear_bit(vid, priv->active_vlans);
6712 
6713 	if (priv->hw->num_vlan) {
6714 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6715 		if (ret)
6716 			goto del_vlan_error;
6717 	}
6718 
6719 	ret = stmmac_vlan_update(priv, is_double);
6720 
6721 del_vlan_error:
6722 	pm_runtime_put(priv->device);
6723 
6724 	return ret;
6725 }
6726 
6727 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6728 {
6729 	struct stmmac_priv *priv = netdev_priv(dev);
6730 
6731 	switch (bpf->command) {
6732 	case XDP_SETUP_PROG:
6733 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6734 	case XDP_SETUP_XSK_POOL:
6735 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6736 					     bpf->xsk.queue_id);
6737 	default:
6738 		return -EOPNOTSUPP;
6739 	}
6740 }
6741 
6742 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6743 			   struct xdp_frame **frames, u32 flags)
6744 {
6745 	struct stmmac_priv *priv = netdev_priv(dev);
6746 	int cpu = smp_processor_id();
6747 	struct netdev_queue *nq;
6748 	int i, nxmit = 0;
6749 	int queue;
6750 
6751 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6752 		return -ENETDOWN;
6753 
6754 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6755 		return -EINVAL;
6756 
6757 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6758 	nq = netdev_get_tx_queue(priv->dev, queue);
6759 
6760 	__netif_tx_lock(nq, cpu);
6761 	/* Avoids TX time-out as we are sharing with slow path */
6762 	txq_trans_cond_update(nq);
6763 
6764 	for (i = 0; i < num_frames; i++) {
6765 		int res;
6766 
6767 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6768 		if (res == STMMAC_XDP_CONSUMED)
6769 			break;
6770 
6771 		nxmit++;
6772 	}
6773 
6774 	if (flags & XDP_XMIT_FLUSH) {
6775 		stmmac_flush_tx_descriptors(priv, queue);
6776 		stmmac_tx_timer_arm(priv, queue);
6777 	}
6778 
6779 	__netif_tx_unlock(nq);
6780 
6781 	return nxmit;
6782 }
6783 
6784 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6785 {
6786 	struct stmmac_channel *ch = &priv->channel[queue];
6787 	unsigned long flags;
6788 
6789 	spin_lock_irqsave(&ch->lock, flags);
6790 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6791 	spin_unlock_irqrestore(&ch->lock, flags);
6792 
6793 	stmmac_stop_rx_dma(priv, queue);
6794 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6795 }
6796 
6797 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6798 {
6799 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6800 	struct stmmac_channel *ch = &priv->channel[queue];
6801 	unsigned long flags;
6802 	u32 buf_size;
6803 	int ret;
6804 
6805 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6806 	if (ret) {
6807 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6808 		return;
6809 	}
6810 
6811 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6812 	if (ret) {
6813 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6814 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6815 		return;
6816 	}
6817 
6818 	stmmac_reset_rx_queue(priv, queue);
6819 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6820 
6821 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6822 			    rx_q->dma_rx_phy, rx_q->queue_index);
6823 
6824 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6825 			     sizeof(struct dma_desc));
6826 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6827 			       rx_q->rx_tail_addr, rx_q->queue_index);
6828 
6829 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6830 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6831 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6832 				      buf_size,
6833 				      rx_q->queue_index);
6834 	} else {
6835 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6836 				      priv->dma_conf.dma_buf_sz,
6837 				      rx_q->queue_index);
6838 	}
6839 
6840 	stmmac_start_rx_dma(priv, queue);
6841 
6842 	spin_lock_irqsave(&ch->lock, flags);
6843 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6844 	spin_unlock_irqrestore(&ch->lock, flags);
6845 }
6846 
6847 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6848 {
6849 	struct stmmac_channel *ch = &priv->channel[queue];
6850 	unsigned long flags;
6851 
6852 	spin_lock_irqsave(&ch->lock, flags);
6853 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6854 	spin_unlock_irqrestore(&ch->lock, flags);
6855 
6856 	stmmac_stop_tx_dma(priv, queue);
6857 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6858 }
6859 
6860 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6861 {
6862 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6863 	struct stmmac_channel *ch = &priv->channel[queue];
6864 	unsigned long flags;
6865 	int ret;
6866 
6867 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6868 	if (ret) {
6869 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6870 		return;
6871 	}
6872 
6873 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6874 	if (ret) {
6875 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6876 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6877 		return;
6878 	}
6879 
6880 	stmmac_reset_tx_queue(priv, queue);
6881 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6882 
6883 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6884 			    tx_q->dma_tx_phy, tx_q->queue_index);
6885 
6886 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6887 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6888 
6889 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6890 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6891 			       tx_q->tx_tail_addr, tx_q->queue_index);
6892 
6893 	stmmac_start_tx_dma(priv, queue);
6894 
6895 	spin_lock_irqsave(&ch->lock, flags);
6896 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6897 	spin_unlock_irqrestore(&ch->lock, flags);
6898 }
6899 
6900 void stmmac_xdp_release(struct net_device *dev)
6901 {
6902 	struct stmmac_priv *priv = netdev_priv(dev);
6903 	u32 chan;
6904 
6905 	/* Ensure tx function is not running */
6906 	netif_tx_disable(dev);
6907 
6908 	/* Disable NAPI process */
6909 	stmmac_disable_all_queues(priv);
6910 
6911 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6912 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6913 
6914 	/* Free the IRQ lines */
6915 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6916 
6917 	/* Stop TX/RX DMA channels */
6918 	stmmac_stop_all_dma(priv);
6919 
6920 	/* Release and free the Rx/Tx resources */
6921 	free_dma_desc_resources(priv, &priv->dma_conf);
6922 
6923 	/* Disable the MAC Rx/Tx */
6924 	stmmac_mac_set(priv, priv->ioaddr, false);
6925 
6926 	/* set trans_start so we don't get spurious
6927 	 * watchdogs during reset
6928 	 */
6929 	netif_trans_update(dev);
6930 	netif_carrier_off(dev);
6931 }
6932 
6933 int stmmac_xdp_open(struct net_device *dev)
6934 {
6935 	struct stmmac_priv *priv = netdev_priv(dev);
6936 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6937 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6938 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6939 	struct stmmac_rx_queue *rx_q;
6940 	struct stmmac_tx_queue *tx_q;
6941 	u32 buf_size;
6942 	bool sph_en;
6943 	u32 chan;
6944 	int ret;
6945 
6946 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6947 	if (ret < 0) {
6948 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6949 			   __func__);
6950 		goto dma_desc_error;
6951 	}
6952 
6953 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6954 	if (ret < 0) {
6955 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6956 			   __func__);
6957 		goto init_error;
6958 	}
6959 
6960 	stmmac_reset_queues_param(priv);
6961 
6962 	/* DMA CSR Channel configuration */
6963 	for (chan = 0; chan < dma_csr_ch; chan++) {
6964 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6965 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6966 	}
6967 
6968 	/* Adjust Split header */
6969 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6970 
6971 	/* DMA RX Channel Configuration */
6972 	for (chan = 0; chan < rx_cnt; chan++) {
6973 		rx_q = &priv->dma_conf.rx_queue[chan];
6974 
6975 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6976 				    rx_q->dma_rx_phy, chan);
6977 
6978 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6979 				     (rx_q->buf_alloc_num *
6980 				      sizeof(struct dma_desc));
6981 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6982 				       rx_q->rx_tail_addr, chan);
6983 
6984 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6985 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6986 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6987 					      buf_size,
6988 					      rx_q->queue_index);
6989 		} else {
6990 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6991 					      priv->dma_conf.dma_buf_sz,
6992 					      rx_q->queue_index);
6993 		}
6994 
6995 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6996 	}
6997 
6998 	/* DMA TX Channel Configuration */
6999 	for (chan = 0; chan < tx_cnt; chan++) {
7000 		tx_q = &priv->dma_conf.tx_queue[chan];
7001 
7002 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7003 				    tx_q->dma_tx_phy, chan);
7004 
7005 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7006 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7007 				       tx_q->tx_tail_addr, chan);
7008 
7009 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7010 		tx_q->txtimer.function = stmmac_tx_timer;
7011 	}
7012 
7013 	/* Enable the MAC Rx/Tx */
7014 	stmmac_mac_set(priv, priv->ioaddr, true);
7015 
7016 	/* Start Rx & Tx DMA Channels */
7017 	stmmac_start_all_dma(priv);
7018 
7019 	ret = stmmac_request_irq(dev);
7020 	if (ret)
7021 		goto irq_error;
7022 
7023 	/* Enable NAPI process*/
7024 	stmmac_enable_all_queues(priv);
7025 	netif_carrier_on(dev);
7026 	netif_tx_start_all_queues(dev);
7027 	stmmac_enable_all_dma_irq(priv);
7028 
7029 	return 0;
7030 
7031 irq_error:
7032 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7033 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7034 
7035 	stmmac_hw_teardown(dev);
7036 init_error:
7037 	free_dma_desc_resources(priv, &priv->dma_conf);
7038 dma_desc_error:
7039 	return ret;
7040 }
7041 
7042 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7043 {
7044 	struct stmmac_priv *priv = netdev_priv(dev);
7045 	struct stmmac_rx_queue *rx_q;
7046 	struct stmmac_tx_queue *tx_q;
7047 	struct stmmac_channel *ch;
7048 
7049 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7050 	    !netif_carrier_ok(priv->dev))
7051 		return -ENETDOWN;
7052 
7053 	if (!stmmac_xdp_is_enabled(priv))
7054 		return -EINVAL;
7055 
7056 	if (queue >= priv->plat->rx_queues_to_use ||
7057 	    queue >= priv->plat->tx_queues_to_use)
7058 		return -EINVAL;
7059 
7060 	rx_q = &priv->dma_conf.rx_queue[queue];
7061 	tx_q = &priv->dma_conf.tx_queue[queue];
7062 	ch = &priv->channel[queue];
7063 
7064 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7065 		return -EINVAL;
7066 
7067 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7068 		/* EQoS does not have per-DMA channel SW interrupt,
7069 		 * so we schedule RX Napi straight-away.
7070 		 */
7071 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7072 			__napi_schedule(&ch->rxtx_napi);
7073 	}
7074 
7075 	return 0;
7076 }
7077 
7078 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7079 {
7080 	struct stmmac_priv *priv = netdev_priv(dev);
7081 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7082 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7083 	unsigned int start;
7084 	int q;
7085 
7086 	for (q = 0; q < tx_cnt; q++) {
7087 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7088 		u64 tx_packets;
7089 		u64 tx_bytes;
7090 
7091 		do {
7092 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7093 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7094 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7095 		do {
7096 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7097 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7098 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7099 
7100 		stats->tx_packets += tx_packets;
7101 		stats->tx_bytes += tx_bytes;
7102 	}
7103 
7104 	for (q = 0; q < rx_cnt; q++) {
7105 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7106 		u64 rx_packets;
7107 		u64 rx_bytes;
7108 
7109 		do {
7110 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7111 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7112 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7113 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7114 
7115 		stats->rx_packets += rx_packets;
7116 		stats->rx_bytes += rx_bytes;
7117 	}
7118 
7119 	stats->rx_dropped = priv->xstats.rx_dropped;
7120 	stats->rx_errors = priv->xstats.rx_errors;
7121 	stats->tx_dropped = priv->xstats.tx_dropped;
7122 	stats->tx_errors = priv->xstats.tx_errors;
7123 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7124 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7125 	stats->rx_length_errors = priv->xstats.rx_length;
7126 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7127 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7128 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7129 }
7130 
7131 static const struct net_device_ops stmmac_netdev_ops = {
7132 	.ndo_open = stmmac_open,
7133 	.ndo_start_xmit = stmmac_xmit,
7134 	.ndo_stop = stmmac_release,
7135 	.ndo_change_mtu = stmmac_change_mtu,
7136 	.ndo_fix_features = stmmac_fix_features,
7137 	.ndo_set_features = stmmac_set_features,
7138 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7139 	.ndo_tx_timeout = stmmac_tx_timeout,
7140 	.ndo_eth_ioctl = stmmac_ioctl,
7141 	.ndo_get_stats64 = stmmac_get_stats64,
7142 	.ndo_setup_tc = stmmac_setup_tc,
7143 	.ndo_select_queue = stmmac_select_queue,
7144 	.ndo_set_mac_address = stmmac_set_mac_address,
7145 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7146 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7147 	.ndo_bpf = stmmac_bpf,
7148 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7149 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7150 };
7151 
7152 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7153 {
7154 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7155 		return;
7156 	if (test_bit(STMMAC_DOWN, &priv->state))
7157 		return;
7158 
7159 	netdev_err(priv->dev, "Reset adapter.\n");
7160 
7161 	rtnl_lock();
7162 	netif_trans_update(priv->dev);
7163 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7164 		usleep_range(1000, 2000);
7165 
7166 	set_bit(STMMAC_DOWN, &priv->state);
7167 	dev_close(priv->dev);
7168 	dev_open(priv->dev, NULL);
7169 	clear_bit(STMMAC_DOWN, &priv->state);
7170 	clear_bit(STMMAC_RESETING, &priv->state);
7171 	rtnl_unlock();
7172 }
7173 
7174 static void stmmac_service_task(struct work_struct *work)
7175 {
7176 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7177 			service_task);
7178 
7179 	stmmac_reset_subtask(priv);
7180 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7181 }
7182 
7183 /**
7184  *  stmmac_hw_init - Init the MAC device
7185  *  @priv: driver private structure
7186  *  Description: this function is to configure the MAC device according to
7187  *  some platform parameters or the HW capability register. It prepares the
7188  *  driver to use either ring or chain modes and to setup either enhanced or
7189  *  normal descriptors.
7190  */
7191 static int stmmac_hw_init(struct stmmac_priv *priv)
7192 {
7193 	int ret;
7194 
7195 	/* dwmac-sun8i only work in chain mode */
7196 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7197 		chain_mode = 1;
7198 	priv->chain_mode = chain_mode;
7199 
7200 	/* Initialize HW Interface */
7201 	ret = stmmac_hwif_init(priv);
7202 	if (ret)
7203 		return ret;
7204 
7205 	/* Get the HW capability (new GMAC newer than 3.50a) */
7206 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7207 	if (priv->hw_cap_support) {
7208 		dev_info(priv->device, "DMA HW capability register supported\n");
7209 
7210 		/* We can override some gmac/dma configuration fields: e.g.
7211 		 * enh_desc, tx_coe (e.g. that are passed through the
7212 		 * platform) with the values from the HW capability
7213 		 * register (if supported).
7214 		 */
7215 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7216 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7217 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7218 		priv->hw->pmt = priv->plat->pmt;
7219 		if (priv->dma_cap.hash_tb_sz) {
7220 			priv->hw->multicast_filter_bins =
7221 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7222 			priv->hw->mcast_bits_log2 =
7223 					ilog2(priv->hw->multicast_filter_bins);
7224 		}
7225 
7226 		/* TXCOE doesn't work in thresh DMA mode */
7227 		if (priv->plat->force_thresh_dma_mode)
7228 			priv->plat->tx_coe = 0;
7229 		else
7230 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7231 
7232 		/* In case of GMAC4 rx_coe is from HW cap register. */
7233 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7234 
7235 		if (priv->dma_cap.rx_coe_type2)
7236 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7237 		else if (priv->dma_cap.rx_coe_type1)
7238 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7239 
7240 	} else {
7241 		dev_info(priv->device, "No HW DMA feature register supported\n");
7242 	}
7243 
7244 	if (priv->plat->rx_coe) {
7245 		priv->hw->rx_csum = priv->plat->rx_coe;
7246 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7247 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7248 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7249 	}
7250 	if (priv->plat->tx_coe)
7251 		dev_info(priv->device, "TX Checksum insertion supported\n");
7252 
7253 	if (priv->plat->pmt) {
7254 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7255 		device_set_wakeup_capable(priv->device, 1);
7256 	}
7257 
7258 	if (priv->dma_cap.tsoen)
7259 		dev_info(priv->device, "TSO supported\n");
7260 
7261 	priv->hw->vlan_fail_q_en =
7262 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7263 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7264 
7265 	/* Run HW quirks, if any */
7266 	if (priv->hwif_quirks) {
7267 		ret = priv->hwif_quirks(priv);
7268 		if (ret)
7269 			return ret;
7270 	}
7271 
7272 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7273 	 * In some case, for example on bugged HW this feature
7274 	 * has to be disable and this can be done by passing the
7275 	 * riwt_off field from the platform.
7276 	 */
7277 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7278 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7279 		priv->use_riwt = 1;
7280 		dev_info(priv->device,
7281 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7282 	}
7283 
7284 	return 0;
7285 }
7286 
7287 static void stmmac_napi_add(struct net_device *dev)
7288 {
7289 	struct stmmac_priv *priv = netdev_priv(dev);
7290 	u32 queue, maxq;
7291 
7292 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7293 
7294 	for (queue = 0; queue < maxq; queue++) {
7295 		struct stmmac_channel *ch = &priv->channel[queue];
7296 
7297 		ch->priv_data = priv;
7298 		ch->index = queue;
7299 		spin_lock_init(&ch->lock);
7300 
7301 		if (queue < priv->plat->rx_queues_to_use) {
7302 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7303 		}
7304 		if (queue < priv->plat->tx_queues_to_use) {
7305 			netif_napi_add_tx(dev, &ch->tx_napi,
7306 					  stmmac_napi_poll_tx);
7307 		}
7308 		if (queue < priv->plat->rx_queues_to_use &&
7309 		    queue < priv->plat->tx_queues_to_use) {
7310 			netif_napi_add(dev, &ch->rxtx_napi,
7311 				       stmmac_napi_poll_rxtx);
7312 		}
7313 	}
7314 }
7315 
7316 static void stmmac_napi_del(struct net_device *dev)
7317 {
7318 	struct stmmac_priv *priv = netdev_priv(dev);
7319 	u32 queue, maxq;
7320 
7321 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7322 
7323 	for (queue = 0; queue < maxq; queue++) {
7324 		struct stmmac_channel *ch = &priv->channel[queue];
7325 
7326 		if (queue < priv->plat->rx_queues_to_use)
7327 			netif_napi_del(&ch->rx_napi);
7328 		if (queue < priv->plat->tx_queues_to_use)
7329 			netif_napi_del(&ch->tx_napi);
7330 		if (queue < priv->plat->rx_queues_to_use &&
7331 		    queue < priv->plat->tx_queues_to_use) {
7332 			netif_napi_del(&ch->rxtx_napi);
7333 		}
7334 	}
7335 }
7336 
7337 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7338 {
7339 	struct stmmac_priv *priv = netdev_priv(dev);
7340 	int ret = 0, i;
7341 
7342 	if (netif_running(dev))
7343 		stmmac_release(dev);
7344 
7345 	stmmac_napi_del(dev);
7346 
7347 	priv->plat->rx_queues_to_use = rx_cnt;
7348 	priv->plat->tx_queues_to_use = tx_cnt;
7349 	if (!netif_is_rxfh_configured(dev))
7350 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7351 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7352 									rx_cnt);
7353 
7354 	stmmac_napi_add(dev);
7355 
7356 	if (netif_running(dev))
7357 		ret = stmmac_open(dev);
7358 
7359 	return ret;
7360 }
7361 
7362 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7363 {
7364 	struct stmmac_priv *priv = netdev_priv(dev);
7365 	int ret = 0;
7366 
7367 	if (netif_running(dev))
7368 		stmmac_release(dev);
7369 
7370 	priv->dma_conf.dma_rx_size = rx_size;
7371 	priv->dma_conf.dma_tx_size = tx_size;
7372 
7373 	if (netif_running(dev))
7374 		ret = stmmac_open(dev);
7375 
7376 	return ret;
7377 }
7378 
7379 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
7380 static void stmmac_fpe_lp_task(struct work_struct *work)
7381 {
7382 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7383 						fpe_task);
7384 	struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7385 	enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7386 	enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7387 	bool *hs_enable = &fpe_cfg->hs_enable;
7388 	bool *enable = &fpe_cfg->enable;
7389 	int retries = 20;
7390 
7391 	while (retries-- > 0) {
7392 		/* Bail out immediately if FPE handshake is OFF */
7393 		if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7394 			break;
7395 
7396 		if (*lo_state == FPE_STATE_ENTERING_ON &&
7397 		    *lp_state == FPE_STATE_ENTERING_ON) {
7398 			stmmac_fpe_configure(priv, priv->ioaddr,
7399 					     fpe_cfg,
7400 					     priv->plat->tx_queues_to_use,
7401 					     priv->plat->rx_queues_to_use,
7402 					     *enable);
7403 
7404 			netdev_info(priv->dev, "configured FPE\n");
7405 
7406 			*lo_state = FPE_STATE_ON;
7407 			*lp_state = FPE_STATE_ON;
7408 			netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7409 			break;
7410 		}
7411 
7412 		if ((*lo_state == FPE_STATE_CAPABLE ||
7413 		     *lo_state == FPE_STATE_ENTERING_ON) &&
7414 		     *lp_state != FPE_STATE_ON) {
7415 			netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7416 				    *lo_state, *lp_state);
7417 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7418 						fpe_cfg,
7419 						MPACKET_VERIFY);
7420 		}
7421 		/* Sleep then retry */
7422 		msleep(500);
7423 	}
7424 
7425 	clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7426 }
7427 
7428 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7429 {
7430 	if (priv->plat->fpe_cfg->hs_enable != enable) {
7431 		if (enable) {
7432 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7433 						priv->plat->fpe_cfg,
7434 						MPACKET_VERIFY);
7435 		} else {
7436 			priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7437 			priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7438 		}
7439 
7440 		priv->plat->fpe_cfg->hs_enable = enable;
7441 	}
7442 }
7443 
7444 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7445 {
7446 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7447 	struct dma_desc *desc_contains_ts = ctx->desc;
7448 	struct stmmac_priv *priv = ctx->priv;
7449 	struct dma_desc *ndesc = ctx->ndesc;
7450 	struct dma_desc *desc = ctx->desc;
7451 	u64 ns = 0;
7452 
7453 	if (!priv->hwts_rx_en)
7454 		return -ENODATA;
7455 
7456 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7457 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7458 		desc_contains_ts = ndesc;
7459 
7460 	/* Check if timestamp is available */
7461 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7462 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7463 		ns -= priv->plat->cdc_error_adj;
7464 		*timestamp = ns_to_ktime(ns);
7465 		return 0;
7466 	}
7467 
7468 	return -ENODATA;
7469 }
7470 
7471 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7472 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7473 };
7474 
7475 /**
7476  * stmmac_dvr_probe
7477  * @device: device pointer
7478  * @plat_dat: platform data pointer
7479  * @res: stmmac resource pointer
7480  * Description: this is the main probe function used to
7481  * call the alloc_etherdev, allocate the priv structure.
7482  * Return:
7483  * returns 0 on success, otherwise errno.
7484  */
7485 int stmmac_dvr_probe(struct device *device,
7486 		     struct plat_stmmacenet_data *plat_dat,
7487 		     struct stmmac_resources *res)
7488 {
7489 	struct net_device *ndev = NULL;
7490 	struct stmmac_priv *priv;
7491 	u32 rxq;
7492 	int i, ret = 0;
7493 
7494 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7495 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7496 	if (!ndev)
7497 		return -ENOMEM;
7498 
7499 	SET_NETDEV_DEV(ndev, device);
7500 
7501 	priv = netdev_priv(ndev);
7502 	priv->device = device;
7503 	priv->dev = ndev;
7504 
7505 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7506 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7507 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7508 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7509 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7510 	}
7511 
7512 	priv->xstats.pcpu_stats =
7513 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7514 	if (!priv->xstats.pcpu_stats)
7515 		return -ENOMEM;
7516 
7517 	stmmac_set_ethtool_ops(ndev);
7518 	priv->pause = pause;
7519 	priv->plat = plat_dat;
7520 	priv->ioaddr = res->addr;
7521 	priv->dev->base_addr = (unsigned long)res->addr;
7522 	priv->plat->dma_cfg->multi_msi_en =
7523 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7524 
7525 	priv->dev->irq = res->irq;
7526 	priv->wol_irq = res->wol_irq;
7527 	priv->lpi_irq = res->lpi_irq;
7528 	priv->sfty_irq = res->sfty_irq;
7529 	priv->sfty_ce_irq = res->sfty_ce_irq;
7530 	priv->sfty_ue_irq = res->sfty_ue_irq;
7531 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7532 		priv->rx_irq[i] = res->rx_irq[i];
7533 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7534 		priv->tx_irq[i] = res->tx_irq[i];
7535 
7536 	if (!is_zero_ether_addr(res->mac))
7537 		eth_hw_addr_set(priv->dev, res->mac);
7538 
7539 	dev_set_drvdata(device, priv->dev);
7540 
7541 	/* Verify driver arguments */
7542 	stmmac_verify_args();
7543 
7544 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7545 	if (!priv->af_xdp_zc_qps)
7546 		return -ENOMEM;
7547 
7548 	/* Allocate workqueue */
7549 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7550 	if (!priv->wq) {
7551 		dev_err(priv->device, "failed to create workqueue\n");
7552 		ret = -ENOMEM;
7553 		goto error_wq_init;
7554 	}
7555 
7556 	INIT_WORK(&priv->service_task, stmmac_service_task);
7557 
7558 	/* Initialize Link Partner FPE workqueue */
7559 	INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7560 
7561 	/* Override with kernel parameters if supplied XXX CRS XXX
7562 	 * this needs to have multiple instances
7563 	 */
7564 	if ((phyaddr >= 0) && (phyaddr <= 31))
7565 		priv->plat->phy_addr = phyaddr;
7566 
7567 	if (priv->plat->stmmac_rst) {
7568 		ret = reset_control_assert(priv->plat->stmmac_rst);
7569 		reset_control_deassert(priv->plat->stmmac_rst);
7570 		/* Some reset controllers have only reset callback instead of
7571 		 * assert + deassert callbacks pair.
7572 		 */
7573 		if (ret == -ENOTSUPP)
7574 			reset_control_reset(priv->plat->stmmac_rst);
7575 	}
7576 
7577 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7578 	if (ret == -ENOTSUPP)
7579 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7580 			ERR_PTR(ret));
7581 
7582 	/* Wait a bit for the reset to take effect */
7583 	udelay(10);
7584 
7585 	/* Init MAC and get the capabilities */
7586 	ret = stmmac_hw_init(priv);
7587 	if (ret)
7588 		goto error_hw_init;
7589 
7590 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7591 	 */
7592 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7593 		priv->plat->dma_cfg->dche = false;
7594 
7595 	stmmac_check_ether_addr(priv);
7596 
7597 	ndev->netdev_ops = &stmmac_netdev_ops;
7598 
7599 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7600 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7601 
7602 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7603 			    NETIF_F_RXCSUM;
7604 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7605 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7606 
7607 	ret = stmmac_tc_init(priv, priv);
7608 	if (!ret) {
7609 		ndev->hw_features |= NETIF_F_HW_TC;
7610 	}
7611 
7612 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7613 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7614 		if (priv->plat->has_gmac4)
7615 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7616 		priv->tso = true;
7617 		dev_info(priv->device, "TSO feature enabled\n");
7618 	}
7619 
7620 	if (priv->dma_cap.sphen &&
7621 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7622 		ndev->hw_features |= NETIF_F_GRO;
7623 		priv->sph_cap = true;
7624 		priv->sph = priv->sph_cap;
7625 		dev_info(priv->device, "SPH feature enabled\n");
7626 	}
7627 
7628 	/* Ideally our host DMA address width is the same as for the
7629 	 * device. However, it may differ and then we have to use our
7630 	 * host DMA width for allocation and the device DMA width for
7631 	 * register handling.
7632 	 */
7633 	if (priv->plat->host_dma_width)
7634 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7635 	else
7636 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7637 
7638 	if (priv->dma_cap.host_dma_width) {
7639 		ret = dma_set_mask_and_coherent(device,
7640 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7641 		if (!ret) {
7642 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7643 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7644 
7645 			/*
7646 			 * If more than 32 bits can be addressed, make sure to
7647 			 * enable enhanced addressing mode.
7648 			 */
7649 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7650 				priv->plat->dma_cfg->eame = true;
7651 		} else {
7652 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7653 			if (ret) {
7654 				dev_err(priv->device, "Failed to set DMA Mask\n");
7655 				goto error_hw_init;
7656 			}
7657 
7658 			priv->dma_cap.host_dma_width = 32;
7659 		}
7660 	}
7661 
7662 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7663 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7664 #ifdef STMMAC_VLAN_TAG_USED
7665 	/* Both mac100 and gmac support receive VLAN tag detection */
7666 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7667 	if (priv->plat->has_gmac4) {
7668 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7669 		priv->hw->hw_vlan_en = true;
7670 	}
7671 	if (priv->dma_cap.vlhash) {
7672 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7673 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7674 	}
7675 	if (priv->dma_cap.vlins) {
7676 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7677 		if (priv->dma_cap.dvlan)
7678 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7679 	}
7680 #endif
7681 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7682 
7683 	priv->xstats.threshold = tc;
7684 
7685 	/* Initialize RSS */
7686 	rxq = priv->plat->rx_queues_to_use;
7687 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7688 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7689 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7690 
7691 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7692 		ndev->features |= NETIF_F_RXHASH;
7693 
7694 	ndev->vlan_features |= ndev->features;
7695 
7696 	/* MTU range: 46 - hw-specific max */
7697 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7698 	if (priv->plat->has_xgmac)
7699 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7700 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7701 		ndev->max_mtu = JUMBO_LEN;
7702 	else
7703 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7704 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7705 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7706 	 */
7707 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7708 	    (priv->plat->maxmtu >= ndev->min_mtu))
7709 		ndev->max_mtu = priv->plat->maxmtu;
7710 	else if (priv->plat->maxmtu < ndev->min_mtu)
7711 		dev_warn(priv->device,
7712 			 "%s: warning: maxmtu having invalid value (%d)\n",
7713 			 __func__, priv->plat->maxmtu);
7714 
7715 	if (flow_ctrl)
7716 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7717 
7718 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7719 
7720 	/* Setup channels NAPI */
7721 	stmmac_napi_add(ndev);
7722 
7723 	mutex_init(&priv->lock);
7724 
7725 	/* If a specific clk_csr value is passed from the platform
7726 	 * this means that the CSR Clock Range selection cannot be
7727 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7728 	 * set the MDC clock dynamically according to the csr actual
7729 	 * clock input.
7730 	 */
7731 	if (priv->plat->clk_csr >= 0)
7732 		priv->clk_csr = priv->plat->clk_csr;
7733 	else
7734 		stmmac_clk_csr_set(priv);
7735 
7736 	stmmac_check_pcs_mode(priv);
7737 
7738 	pm_runtime_get_noresume(device);
7739 	pm_runtime_set_active(device);
7740 	if (!pm_runtime_enabled(device))
7741 		pm_runtime_enable(device);
7742 
7743 	ret = stmmac_mdio_register(ndev);
7744 	if (ret < 0) {
7745 		dev_err_probe(priv->device, ret,
7746 			      "MDIO bus (id: %d) registration failed\n",
7747 			      priv->plat->bus_id);
7748 		goto error_mdio_register;
7749 	}
7750 
7751 	if (priv->plat->speed_mode_2500)
7752 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7753 
7754 	ret = stmmac_pcs_setup(ndev);
7755 	if (ret)
7756 		goto error_pcs_setup;
7757 
7758 	ret = stmmac_phy_setup(priv);
7759 	if (ret) {
7760 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7761 		goto error_phy_setup;
7762 	}
7763 
7764 	ret = register_netdev(ndev);
7765 	if (ret) {
7766 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7767 			__func__, ret);
7768 		goto error_netdev_register;
7769 	}
7770 
7771 #ifdef CONFIG_DEBUG_FS
7772 	stmmac_init_fs(ndev);
7773 #endif
7774 
7775 	if (priv->plat->dump_debug_regs)
7776 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7777 
7778 	/* Let pm_runtime_put() disable the clocks.
7779 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7780 	 */
7781 	pm_runtime_put(device);
7782 
7783 	return ret;
7784 
7785 error_netdev_register:
7786 	phylink_destroy(priv->phylink);
7787 error_phy_setup:
7788 	stmmac_pcs_clean(ndev);
7789 error_pcs_setup:
7790 	stmmac_mdio_unregister(ndev);
7791 error_mdio_register:
7792 	stmmac_napi_del(ndev);
7793 error_hw_init:
7794 	destroy_workqueue(priv->wq);
7795 error_wq_init:
7796 	bitmap_free(priv->af_xdp_zc_qps);
7797 
7798 	return ret;
7799 }
7800 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7801 
7802 /**
7803  * stmmac_dvr_remove
7804  * @dev: device pointer
7805  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7806  * changes the link status, releases the DMA descriptor rings.
7807  */
7808 void stmmac_dvr_remove(struct device *dev)
7809 {
7810 	struct net_device *ndev = dev_get_drvdata(dev);
7811 	struct stmmac_priv *priv = netdev_priv(ndev);
7812 
7813 	netdev_info(priv->dev, "%s: removing driver", __func__);
7814 
7815 	pm_runtime_get_sync(dev);
7816 
7817 	stmmac_stop_all_dma(priv);
7818 	stmmac_mac_set(priv, priv->ioaddr, false);
7819 	unregister_netdev(ndev);
7820 
7821 #ifdef CONFIG_DEBUG_FS
7822 	stmmac_exit_fs(ndev);
7823 #endif
7824 	phylink_destroy(priv->phylink);
7825 	if (priv->plat->stmmac_rst)
7826 		reset_control_assert(priv->plat->stmmac_rst);
7827 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7828 
7829 	stmmac_pcs_clean(ndev);
7830 	stmmac_mdio_unregister(ndev);
7831 
7832 	destroy_workqueue(priv->wq);
7833 	mutex_destroy(&priv->lock);
7834 	bitmap_free(priv->af_xdp_zc_qps);
7835 
7836 	pm_runtime_disable(dev);
7837 	pm_runtime_put_noidle(dev);
7838 }
7839 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7840 
7841 /**
7842  * stmmac_suspend - suspend callback
7843  * @dev: device pointer
7844  * Description: this is the function to suspend the device and it is called
7845  * by the platform driver to stop the network queue, release the resources,
7846  * program the PMT register (for WoL), clean and release driver resources.
7847  */
7848 int stmmac_suspend(struct device *dev)
7849 {
7850 	struct net_device *ndev = dev_get_drvdata(dev);
7851 	struct stmmac_priv *priv = netdev_priv(ndev);
7852 	u32 chan;
7853 
7854 	if (!ndev || !netif_running(ndev))
7855 		return 0;
7856 
7857 	mutex_lock(&priv->lock);
7858 
7859 	netif_device_detach(ndev);
7860 
7861 	stmmac_disable_all_queues(priv);
7862 
7863 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7864 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7865 
7866 	if (priv->eee_enabled) {
7867 		priv->tx_path_in_lpi_mode = false;
7868 		del_timer_sync(&priv->eee_ctrl_timer);
7869 	}
7870 
7871 	/* Stop TX/RX DMA */
7872 	stmmac_stop_all_dma(priv);
7873 
7874 	if (priv->plat->serdes_powerdown)
7875 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7876 
7877 	/* Enable Power down mode by programming the PMT regs */
7878 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7879 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7880 		priv->irq_wake = 1;
7881 	} else {
7882 		stmmac_mac_set(priv, priv->ioaddr, false);
7883 		pinctrl_pm_select_sleep_state(priv->device);
7884 	}
7885 
7886 	mutex_unlock(&priv->lock);
7887 
7888 	rtnl_lock();
7889 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7890 		phylink_suspend(priv->phylink, true);
7891 	} else {
7892 		if (device_may_wakeup(priv->device))
7893 			phylink_speed_down(priv->phylink, false);
7894 		phylink_suspend(priv->phylink, false);
7895 	}
7896 	rtnl_unlock();
7897 
7898 	if (priv->dma_cap.fpesel) {
7899 		/* Disable FPE */
7900 		stmmac_fpe_configure(priv, priv->ioaddr,
7901 				     priv->plat->fpe_cfg,
7902 				     priv->plat->tx_queues_to_use,
7903 				     priv->plat->rx_queues_to_use, false);
7904 
7905 		stmmac_fpe_handshake(priv, false);
7906 		stmmac_fpe_stop_wq(priv);
7907 	}
7908 
7909 	priv->speed = SPEED_UNKNOWN;
7910 	return 0;
7911 }
7912 EXPORT_SYMBOL_GPL(stmmac_suspend);
7913 
7914 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7915 {
7916 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7917 
7918 	rx_q->cur_rx = 0;
7919 	rx_q->dirty_rx = 0;
7920 }
7921 
7922 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7923 {
7924 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7925 
7926 	tx_q->cur_tx = 0;
7927 	tx_q->dirty_tx = 0;
7928 	tx_q->mss = 0;
7929 
7930 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7931 }
7932 
7933 /**
7934  * stmmac_reset_queues_param - reset queue parameters
7935  * @priv: device pointer
7936  */
7937 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7938 {
7939 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7940 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7941 	u32 queue;
7942 
7943 	for (queue = 0; queue < rx_cnt; queue++)
7944 		stmmac_reset_rx_queue(priv, queue);
7945 
7946 	for (queue = 0; queue < tx_cnt; queue++)
7947 		stmmac_reset_tx_queue(priv, queue);
7948 }
7949 
7950 /**
7951  * stmmac_resume - resume callback
7952  * @dev: device pointer
7953  * Description: when resume this function is invoked to setup the DMA and CORE
7954  * in a usable state.
7955  */
7956 int stmmac_resume(struct device *dev)
7957 {
7958 	struct net_device *ndev = dev_get_drvdata(dev);
7959 	struct stmmac_priv *priv = netdev_priv(ndev);
7960 	int ret;
7961 
7962 	if (!netif_running(ndev))
7963 		return 0;
7964 
7965 	/* Power Down bit, into the PM register, is cleared
7966 	 * automatically as soon as a magic packet or a Wake-up frame
7967 	 * is received. Anyway, it's better to manually clear
7968 	 * this bit because it can generate problems while resuming
7969 	 * from another devices (e.g. serial console).
7970 	 */
7971 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7972 		mutex_lock(&priv->lock);
7973 		stmmac_pmt(priv, priv->hw, 0);
7974 		mutex_unlock(&priv->lock);
7975 		priv->irq_wake = 0;
7976 	} else {
7977 		pinctrl_pm_select_default_state(priv->device);
7978 		/* reset the phy so that it's ready */
7979 		if (priv->mii)
7980 			stmmac_mdio_reset(priv->mii);
7981 	}
7982 
7983 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7984 	    priv->plat->serdes_powerup) {
7985 		ret = priv->plat->serdes_powerup(ndev,
7986 						 priv->plat->bsp_priv);
7987 
7988 		if (ret < 0)
7989 			return ret;
7990 	}
7991 
7992 	rtnl_lock();
7993 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7994 		phylink_resume(priv->phylink);
7995 	} else {
7996 		phylink_resume(priv->phylink);
7997 		if (device_may_wakeup(priv->device))
7998 			phylink_speed_up(priv->phylink);
7999 	}
8000 	rtnl_unlock();
8001 
8002 	rtnl_lock();
8003 	mutex_lock(&priv->lock);
8004 
8005 	stmmac_reset_queues_param(priv);
8006 
8007 	stmmac_free_tx_skbufs(priv);
8008 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8009 
8010 	stmmac_hw_setup(ndev, false);
8011 	stmmac_init_coalesce(priv);
8012 	stmmac_set_rx_mode(ndev);
8013 
8014 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8015 
8016 	stmmac_enable_all_queues(priv);
8017 	stmmac_enable_all_dma_irq(priv);
8018 
8019 	mutex_unlock(&priv->lock);
8020 	rtnl_unlock();
8021 
8022 	netif_device_attach(ndev);
8023 
8024 	return 0;
8025 }
8026 EXPORT_SYMBOL_GPL(stmmac_resume);
8027 
8028 #ifndef MODULE
8029 static int __init stmmac_cmdline_opt(char *str)
8030 {
8031 	char *opt;
8032 
8033 	if (!str || !*str)
8034 		return 1;
8035 	while ((opt = strsep(&str, ",")) != NULL) {
8036 		if (!strncmp(opt, "debug:", 6)) {
8037 			if (kstrtoint(opt + 6, 0, &debug))
8038 				goto err;
8039 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8040 			if (kstrtoint(opt + 8, 0, &phyaddr))
8041 				goto err;
8042 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8043 			if (kstrtoint(opt + 7, 0, &buf_sz))
8044 				goto err;
8045 		} else if (!strncmp(opt, "tc:", 3)) {
8046 			if (kstrtoint(opt + 3, 0, &tc))
8047 				goto err;
8048 		} else if (!strncmp(opt, "watchdog:", 9)) {
8049 			if (kstrtoint(opt + 9, 0, &watchdog))
8050 				goto err;
8051 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8052 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8053 				goto err;
8054 		} else if (!strncmp(opt, "pause:", 6)) {
8055 			if (kstrtoint(opt + 6, 0, &pause))
8056 				goto err;
8057 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8058 			if (kstrtoint(opt + 10, 0, &eee_timer))
8059 				goto err;
8060 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8061 			if (kstrtoint(opt + 11, 0, &chain_mode))
8062 				goto err;
8063 		}
8064 	}
8065 	return 1;
8066 
8067 err:
8068 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8069 	return 1;
8070 }
8071 
8072 __setup("stmmaceth=", stmmac_cmdline_opt);
8073 #endif /* MODULE */
8074 
8075 static int __init stmmac_init(void)
8076 {
8077 #ifdef CONFIG_DEBUG_FS
8078 	/* Create debugfs main directory if it doesn't exist yet */
8079 	if (!stmmac_fs_dir)
8080 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8081 	register_netdevice_notifier(&stmmac_notifier);
8082 #endif
8083 
8084 	return 0;
8085 }
8086 
8087 static void __exit stmmac_exit(void)
8088 {
8089 #ifdef CONFIG_DEBUG_FS
8090 	unregister_netdevice_notifier(&stmmac_notifier);
8091 	debugfs_remove_recursive(stmmac_fs_dir);
8092 #endif
8093 }
8094 
8095 module_init(stmmac_init)
8096 module_exit(stmmac_exit)
8097 
8098 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8099 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8100 MODULE_LICENSE("GPL");
8101