xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision a182a62ff77f705f7dd3d98cf05cb3d03751a8f0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/devlink.h>
44 #include <net/page_pool/helpers.h>
45 #include <net/pkt_cls.h>
46 #include <net/xdp_sock_drv.h>
47 #include "stmmac_ptp.h"
48 #include "stmmac_fpe.h"
49 #include "stmmac.h"
50 #include "stmmac_pcs.h"
51 #include "stmmac_xdp.h"
52 #include <linux/reset.h>
53 #include <linux/of_mdio.h>
54 #include "dwmac1000.h"
55 #include "dwxgmac2.h"
56 #include "hwif.h"
57 
58 /* As long as the interface is active, we keep the timestamping counter enabled
59  * with fine resolution and binary rollover. This avoid non-monotonic behavior
60  * (clock jumps) when changing timestamping settings at runtime.
61  */
62 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
63 
64 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
65 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
66 
67 /* Module parameters */
68 #define TX_TIMEO	5000
69 static int watchdog = TX_TIMEO;
70 module_param(watchdog, int, 0644);
71 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
72 
73 static int debug = -1;
74 module_param(debug, int, 0644);
75 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
76 
77 static int phyaddr = -1;
78 module_param(phyaddr, int, 0444);
79 MODULE_PARM_DESC(phyaddr, "Physical device address");
80 
81 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
82 
83 /* Limit to make sure XDP TX and slow path can coexist */
84 #define STMMAC_XSK_TX_BUDGET_MAX	256
85 #define STMMAC_TX_XSK_AVAIL		16
86 #define STMMAC_RX_FILL_BATCH		16
87 
88 #define STMMAC_XDP_PASS		0
89 #define STMMAC_XDP_CONSUMED	BIT(0)
90 #define STMMAC_XDP_TX		BIT(1)
91 #define STMMAC_XDP_REDIRECT	BIT(2)
92 #define STMMAC_XSK_CONSUMED	BIT(3)
93 
94 static int flow_ctrl = 0xdead;
95 module_param(flow_ctrl, int, 0644);
96 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
97 
98 static int pause = PAUSE_TIME;
99 module_param(pause, int, 0644);
100 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
101 
102 #define TC_DEFAULT 64
103 static int tc = TC_DEFAULT;
104 module_param(tc, int, 0644);
105 MODULE_PARM_DESC(tc, "DMA threshold control value");
106 
107 /* This is unused */
108 #define	DEFAULT_BUFSIZE	1536
109 static int buf_sz = DEFAULT_BUFSIZE;
110 module_param(buf_sz, int, 0644);
111 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
112 
113 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
114 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
115 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
116 
117 #define STMMAC_DEFAULT_LPI_TIMER	1000
118 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
119 module_param(eee_timer, uint, 0644);
120 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
121 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
122 
123 /* By default the driver will use the ring mode to manage tx and rx descriptors,
124  * but allow user to force to use the chain instead of the ring
125  */
126 static unsigned int chain_mode;
127 module_param(chain_mode, int, 0444);
128 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
129 
130 static const char *stmmac_dwmac_actphyif[8] = {
131 	[PHY_INTF_SEL_GMII_MII]	= "GMII/MII",
132 	[PHY_INTF_SEL_RGMII]	= "RGMII",
133 	[PHY_INTF_SEL_SGMII]	= "SGMII",
134 	[PHY_INTF_SEL_TBI]	= "TBI",
135 	[PHY_INTF_SEL_RMII]	= "RMII",
136 	[PHY_INTF_SEL_RTBI]	= "RTBI",
137 	[PHY_INTF_SEL_SMII]	= "SMII",
138 	[PHY_INTF_SEL_REVMII]	= "REVMII",
139 };
140 
141 static const char *stmmac_dwxgmac_phyif[4] = {
142 	[PHY_INTF_GMII]		= "GMII",
143 	[PHY_INTF_RGMII]	= "RGMII",
144 };
145 
146 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
147 /* For MSI interrupts handling */
148 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
149 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
150 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
151 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
152 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
153 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
154 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
155 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
156 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
157 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
158 					  u32 rxmode, u32 chan);
159 
160 #ifdef CONFIG_DEBUG_FS
161 static const struct net_device_ops stmmac_netdev_ops;
162 static void stmmac_init_fs(struct net_device *dev);
163 static void stmmac_exit_fs(struct net_device *dev);
164 #endif
165 
166 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
167 
168 struct stmmac_devlink_priv {
169 	struct stmmac_priv *stmmac_priv;
170 };
171 
172 enum stmmac_dl_param_id {
173 	STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
174 	STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
175 };
176 
177 /**
178  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
179  * @bsp_priv: BSP private data structure (unused)
180  * @clk_tx_i: the transmit clock
181  * @interface: the selected interface mode
182  * @speed: the speed that the MAC will be operating at
183  *
184  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
185  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
186  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
187  * the plat_data->set_clk_tx_rate method directly, call it via their own
188  * implementation, or implement their own method should they have more
189  * complex requirements. It is intended to only be used in this method.
190  *
191  * plat_data->clk_tx_i must be filled in.
192  */
193 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
194 			   phy_interface_t interface, int speed)
195 {
196 	long rate = rgmii_clock(speed);
197 
198 	/* Silently ignore unsupported speeds as rgmii_clock() only
199 	 * supports 10, 100 and 1000Mbps. We do not want to spit
200 	 * errors for 2500 and higher speeds here.
201 	 */
202 	if (rate < 0)
203 		return 0;
204 
205 	return clk_set_rate(clk_tx_i, rate);
206 }
207 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
208 
209 /**
210  * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
211  * @regval: pointer to a u32 for the resulting register value
212  * @blen: pointer to an array of u32 containing the burst length values in bytes
213  * @len: the number of entries in the @blen array
214  */
215 void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
216 {
217 	size_t i;
218 	u32 val;
219 
220 	for (val = i = 0; i < len; i++) {
221 		u32 burst = blen[i];
222 
223 		/* Burst values of zero must be skipped. */
224 		if (!burst)
225 			continue;
226 
227 		/* The valid range for the burst length is 4 to 256 inclusive,
228 		 * and it must be a power of two.
229 		 */
230 		if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
231 			pr_err("stmmac: invalid burst length %u at index %zu\n",
232 			       burst, i);
233 			continue;
234 		}
235 
236 		/* Since burst is a power of two, and the register field starts
237 		 * with burst = 4, shift right by two bits so bit 0 of the field
238 		 * corresponds with the minimum value.
239 		 */
240 		val |= burst >> 2;
241 	}
242 
243 	*regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
244 }
245 EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
246 
247 /**
248  * stmmac_verify_args - verify the driver parameters.
249  * Description: it checks the driver parameters and set a default in case of
250  * errors.
251  */
252 static void stmmac_verify_args(void)
253 {
254 	if (unlikely(watchdog < 0))
255 		watchdog = TX_TIMEO;
256 	if (unlikely((pause < 0) || (pause > 0xffff)))
257 		pause = PAUSE_TIME;
258 
259 	if (flow_ctrl != 0xdead)
260 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
261 }
262 
263 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
264 {
265 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
266 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
267 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
268 	u32 queue;
269 
270 	for (queue = 0; queue < maxq; queue++) {
271 		struct stmmac_channel *ch = &priv->channel[queue];
272 
273 		if (stmmac_xdp_is_enabled(priv) &&
274 		    test_bit(queue, priv->af_xdp_zc_qps)) {
275 			napi_disable(&ch->rxtx_napi);
276 			continue;
277 		}
278 
279 		if (queue < rx_queues_cnt)
280 			napi_disable(&ch->rx_napi);
281 		if (queue < tx_queues_cnt)
282 			napi_disable(&ch->tx_napi);
283 	}
284 }
285 
286 /**
287  * stmmac_disable_all_queues - Disable all queues
288  * @priv: driver private structure
289  */
290 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
291 {
292 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
293 	struct stmmac_rx_queue *rx_q;
294 	u32 queue;
295 
296 	/* synchronize_rcu() needed for pending XDP buffers to drain */
297 	for (queue = 0; queue < rx_queues_cnt; queue++) {
298 		rx_q = &priv->dma_conf.rx_queue[queue];
299 		if (rx_q->xsk_pool) {
300 			synchronize_rcu();
301 			break;
302 		}
303 	}
304 
305 	__stmmac_disable_all_queues(priv);
306 }
307 
308 /**
309  * stmmac_enable_all_queues - Enable all queues
310  * @priv: driver private structure
311  */
312 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
313 {
314 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
315 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
316 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
317 	u32 queue;
318 
319 	for (queue = 0; queue < maxq; queue++) {
320 		struct stmmac_channel *ch = &priv->channel[queue];
321 
322 		if (stmmac_xdp_is_enabled(priv) &&
323 		    test_bit(queue, priv->af_xdp_zc_qps)) {
324 			napi_enable(&ch->rxtx_napi);
325 			continue;
326 		}
327 
328 		if (queue < rx_queues_cnt)
329 			napi_enable(&ch->rx_napi);
330 		if (queue < tx_queues_cnt)
331 			napi_enable(&ch->tx_napi);
332 	}
333 }
334 
335 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
336 {
337 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
338 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
339 		queue_work(priv->wq, &priv->service_task);
340 }
341 
342 static void stmmac_global_err(struct stmmac_priv *priv)
343 {
344 	netif_carrier_off(priv->dev);
345 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
346 	stmmac_service_event_schedule(priv);
347 }
348 
349 static void print_pkt(unsigned char *buf, int len)
350 {
351 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
352 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
353 }
354 
355 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
356 {
357 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
358 	u32 avail;
359 
360 	if (tx_q->dirty_tx > tx_q->cur_tx)
361 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
362 	else
363 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
364 
365 	return avail;
366 }
367 
368 /**
369  * stmmac_rx_dirty - Get RX queue dirty
370  * @priv: driver private structure
371  * @queue: RX queue index
372  */
373 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
374 {
375 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
376 	u32 dirty;
377 
378 	if (rx_q->dirty_rx <= rx_q->cur_rx)
379 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
380 	else
381 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
382 
383 	return dirty;
384 }
385 
386 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
387 {
388 	u32 tx_cnt = priv->plat->tx_queues_to_use;
389 	u32 queue;
390 
391 	/* check if all TX queues have the work finished */
392 	for (queue = 0; queue < tx_cnt; queue++) {
393 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
394 
395 		if (tx_q->dirty_tx != tx_q->cur_tx)
396 			return true; /* still unfinished work */
397 	}
398 
399 	return false;
400 }
401 
402 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
403 {
404 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
405 }
406 
407 /**
408  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
409  * @priv: driver private structure
410  * Description: this function is to verify and enter in LPI mode in case of
411  * EEE.
412  */
413 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
414 {
415 	if (stmmac_eee_tx_busy(priv)) {
416 		stmmac_restart_sw_lpi_timer(priv);
417 		return;
418 	}
419 
420 	/* Check and enter in LPI mode */
421 	if (!priv->tx_path_in_lpi_mode)
422 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
423 				    priv->tx_lpi_clk_stop, 0);
424 }
425 
426 /**
427  * stmmac_stop_sw_lpi - stop transmitting LPI
428  * @priv: driver private structure
429  * Description: When using software-controlled LPI, stop transmitting LPI state.
430  */
431 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
432 {
433 	timer_delete_sync(&priv->eee_ctrl_timer);
434 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
435 	priv->tx_path_in_lpi_mode = false;
436 }
437 
438 /**
439  * stmmac_eee_ctrl_timer - EEE TX SW timer.
440  * @t:  timer_list struct containing private info
441  * Description:
442  *  if there is no data transfer and if we are not in LPI state,
443  *  then MAC Transmitter can be moved to LPI state.
444  */
445 static void stmmac_eee_ctrl_timer(struct timer_list *t)
446 {
447 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
448 
449 	stmmac_try_to_start_sw_lpi(priv);
450 }
451 
452 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
453  * @priv: driver private structure
454  * @p : descriptor pointer
455  * @skb : the socket buffer
456  * Description :
457  * This function will read timestamp from the descriptor & pass it to stack.
458  * and also perform some sanity checks.
459  */
460 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
461 				   struct dma_desc *p, struct sk_buff *skb)
462 {
463 	struct skb_shared_hwtstamps shhwtstamp;
464 	bool found = false;
465 	u64 ns = 0;
466 
467 	if (!priv->hwts_tx_en)
468 		return;
469 
470 	/* exit if skb doesn't support hw tstamp */
471 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
472 		return;
473 
474 	/* check tx tstamp status */
475 	if (stmmac_get_tx_timestamp_status(priv, p)) {
476 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
477 		found = true;
478 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
479 		found = true;
480 	}
481 
482 	if (found) {
483 		ns -= priv->plat->cdc_error_adj;
484 
485 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
486 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
487 
488 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
489 		/* pass tstamp to stack */
490 		skb_tstamp_tx(skb, &shhwtstamp);
491 	}
492 }
493 
494 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
495  * @priv: driver private structure
496  * @p : descriptor pointer
497  * @np : next descriptor pointer
498  * @skb : the socket buffer
499  * Description :
500  * This function will read received packet's timestamp from the descriptor
501  * and pass it to stack. It also perform some sanity checks.
502  */
503 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
504 				   struct dma_desc *np, struct sk_buff *skb)
505 {
506 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
507 	struct dma_desc *desc = p;
508 	u64 ns = 0;
509 
510 	if (!priv->hwts_rx_en)
511 		return;
512 	/* For GMAC4, the valid timestamp is from CTX next desc. */
513 	if (dwmac_is_xmac(priv->plat->core_type))
514 		desc = np;
515 
516 	/* Check if timestamp is available */
517 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
518 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
519 
520 		ns -= priv->plat->cdc_error_adj;
521 
522 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
523 		shhwtstamp = skb_hwtstamps(skb);
524 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
525 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
526 	} else  {
527 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
528 	}
529 }
530 
531 static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
532 {
533 	bool xmac = dwmac_is_xmac(priv->plat->core_type);
534 	u32 sec_inc = 0;
535 	u64 temp = 0;
536 
537 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
538 
539 	/* program Sub Second Increment reg */
540 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
541 					   priv->plat->clk_ptp_rate,
542 					   xmac, &sec_inc);
543 	temp = div_u64(1000000000ULL, sec_inc);
544 
545 	/* Store sub second increment for later use */
546 	priv->sub_second_inc = sec_inc;
547 
548 	/* calculate default added value:
549 	 * formula is :
550 	 * addend = (2^32)/freq_div_ratio;
551 	 * where, freq_div_ratio = 1e9ns/sec_inc
552 	 */
553 	temp = (u64)(temp << 32);
554 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
555 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
556 }
557 
558 /**
559  *  stmmac_hwtstamp_set - control hardware timestamping.
560  *  @dev: device pointer.
561  *  @config: the timestamping configuration.
562  *  @extack: netlink extended ack structure for error reporting.
563  *  Description:
564  *  This function configures the MAC to enable/disable both outgoing(TX)
565  *  and incoming(RX) packets time stamping based on user input.
566  *  Return Value:
567  *  0 on success and an appropriate -ve integer on failure.
568  */
569 static int stmmac_hwtstamp_set(struct net_device *dev,
570 			       struct kernel_hwtstamp_config *config,
571 			       struct netlink_ext_ack *extack)
572 {
573 	struct stmmac_priv *priv = netdev_priv(dev);
574 	u32 ptp_v2 = 0;
575 	u32 tstamp_all = 0;
576 	u32 ptp_over_ipv4_udp = 0;
577 	u32 ptp_over_ipv6_udp = 0;
578 	u32 ptp_over_ethernet = 0;
579 	u32 snap_type_sel = 0;
580 	u32 ts_master_en = 0;
581 	u32 ts_event_en = 0;
582 
583 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
584 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
585 		priv->hwts_tx_en = 0;
586 		priv->hwts_rx_en = 0;
587 
588 		return -EOPNOTSUPP;
589 	}
590 
591 	if (!netif_running(dev)) {
592 		NL_SET_ERR_MSG_MOD(extack,
593 				   "Cannot change timestamping configuration while down");
594 		return -ENODEV;
595 	}
596 
597 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
598 		   __func__, config->flags, config->tx_type, config->rx_filter);
599 
600 	if (config->tx_type != HWTSTAMP_TX_OFF &&
601 	    config->tx_type != HWTSTAMP_TX_ON)
602 		return -ERANGE;
603 
604 	if (priv->adv_ts) {
605 		switch (config->rx_filter) {
606 		case HWTSTAMP_FILTER_NONE:
607 			/* time stamp no incoming packet at all */
608 			config->rx_filter = HWTSTAMP_FILTER_NONE;
609 			break;
610 
611 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
612 			/* PTP v1, UDP, any kind of event packet */
613 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
614 			/* 'xmac' hardware can support Sync, Pdelay_Req and
615 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
616 			 * This leaves Delay_Req timestamps out.
617 			 * Enable all events *and* general purpose message
618 			 * timestamping
619 			 */
620 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
621 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
622 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
623 			break;
624 
625 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
626 			/* PTP v1, UDP, Sync packet */
627 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
628 			/* take time stamp for SYNC messages only */
629 			ts_event_en = PTP_TCR_TSEVNTENA;
630 
631 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
632 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
633 			break;
634 
635 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
636 			/* PTP v1, UDP, Delay_req packet */
637 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
638 			/* take time stamp for Delay_Req messages only */
639 			ts_master_en = PTP_TCR_TSMSTRENA;
640 			ts_event_en = PTP_TCR_TSEVNTENA;
641 
642 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
643 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
644 			break;
645 
646 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
647 			/* PTP v2, UDP, any kind of event packet */
648 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
649 			ptp_v2 = PTP_TCR_TSVER2ENA;
650 			/* take time stamp for all event messages */
651 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
652 
653 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655 			break;
656 
657 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
658 			/* PTP v2, UDP, Sync packet */
659 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
660 			ptp_v2 = PTP_TCR_TSVER2ENA;
661 			/* take time stamp for SYNC messages only */
662 			ts_event_en = PTP_TCR_TSEVNTENA;
663 
664 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
665 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
666 			break;
667 
668 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
669 			/* PTP v2, UDP, Delay_req packet */
670 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
671 			ptp_v2 = PTP_TCR_TSVER2ENA;
672 			/* take time stamp for Delay_Req messages only */
673 			ts_master_en = PTP_TCR_TSMSTRENA;
674 			ts_event_en = PTP_TCR_TSEVNTENA;
675 
676 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678 			break;
679 
680 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
681 			/* PTP v2/802.AS1 any layer, any kind of event packet */
682 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
683 			ptp_v2 = PTP_TCR_TSVER2ENA;
684 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
685 			if (priv->synopsys_id < DWMAC_CORE_4_10)
686 				ts_event_en = PTP_TCR_TSEVNTENA;
687 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
688 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
689 			ptp_over_ethernet = PTP_TCR_TSIPENA;
690 			break;
691 
692 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
693 			/* PTP v2/802.AS1, any layer, Sync packet */
694 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
695 			ptp_v2 = PTP_TCR_TSVER2ENA;
696 			/* take time stamp for SYNC messages only */
697 			ts_event_en = PTP_TCR_TSEVNTENA;
698 
699 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 			ptp_over_ethernet = PTP_TCR_TSIPENA;
702 			break;
703 
704 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
705 			/* PTP v2/802.AS1, any layer, Delay_req packet */
706 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
707 			ptp_v2 = PTP_TCR_TSVER2ENA;
708 			/* take time stamp for Delay_Req messages only */
709 			ts_master_en = PTP_TCR_TSMSTRENA;
710 			ts_event_en = PTP_TCR_TSEVNTENA;
711 
712 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
713 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
714 			ptp_over_ethernet = PTP_TCR_TSIPENA;
715 			break;
716 
717 		case HWTSTAMP_FILTER_NTP_ALL:
718 		case HWTSTAMP_FILTER_ALL:
719 			/* time stamp any incoming packet */
720 			config->rx_filter = HWTSTAMP_FILTER_ALL;
721 			tstamp_all = PTP_TCR_TSENALL;
722 			break;
723 
724 		default:
725 			return -ERANGE;
726 		}
727 	} else {
728 		switch (config->rx_filter) {
729 		case HWTSTAMP_FILTER_NONE:
730 			config->rx_filter = HWTSTAMP_FILTER_NONE;
731 			break;
732 		default:
733 			/* PTP v1, UDP, any kind of event packet */
734 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
735 			break;
736 		}
737 	}
738 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
739 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
740 
741 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
742 	if (!priv->tsfupdt_coarse)
743 		priv->systime_flags |= PTP_TCR_TSCFUPDT;
744 
745 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
746 		priv->systime_flags |= tstamp_all | ptp_v2 |
747 				       ptp_over_ethernet | ptp_over_ipv6_udp |
748 				       ptp_over_ipv4_udp | ts_event_en |
749 				       ts_master_en | snap_type_sel;
750 	}
751 
752 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
753 
754 	priv->tstamp_config = *config;
755 
756 	return 0;
757 }
758 
759 /**
760  *  stmmac_hwtstamp_get - read hardware timestamping.
761  *  @dev: device pointer.
762  *  @config: the timestamping configuration.
763  *  Description:
764  *  This function obtain the current hardware timestamping settings
765  *  as requested.
766  */
767 static int stmmac_hwtstamp_get(struct net_device *dev,
768 			       struct kernel_hwtstamp_config *config)
769 {
770 	struct stmmac_priv *priv = netdev_priv(dev);
771 
772 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
773 		return -EOPNOTSUPP;
774 
775 	*config = priv->tstamp_config;
776 
777 	return 0;
778 }
779 
780 /**
781  * stmmac_init_tstamp_counter - init hardware timestamping counter
782  * @priv: driver private structure
783  * @systime_flags: timestamping flags
784  * Description:
785  * Initialize hardware counter for packet timestamping.
786  * This is valid as long as the interface is open and not suspended.
787  * Will be rerun after resuming from suspend, case in which the timestamping
788  * flags updated by stmmac_hwtstamp_set() also need to be restored.
789  */
790 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
791 				      u32 systime_flags)
792 {
793 	struct timespec64 now;
794 
795 	if (!priv->plat->clk_ptp_rate) {
796 		netdev_err(priv->dev, "Invalid PTP clock rate");
797 		return -EINVAL;
798 	}
799 
800 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
801 	priv->systime_flags = systime_flags;
802 
803 	stmmac_update_subsecond_increment(priv);
804 
805 	/* initialize system time */
806 	ktime_get_real_ts64(&now);
807 
808 	/* lower 32 bits of tv_sec are safe until y2106 */
809 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
810 
811 	return 0;
812 }
813 
814 /**
815  * stmmac_init_timestamping - initialise timestamping
816  * @priv: driver private structure
817  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
818  * This is done by looking at the HW cap. register.
819  * This function also registers the ptp driver.
820  */
821 static int stmmac_init_timestamping(struct stmmac_priv *priv)
822 {
823 	bool xmac = dwmac_is_xmac(priv->plat->core_type);
824 	int ret;
825 
826 	if (priv->plat->ptp_clk_freq_config)
827 		priv->plat->ptp_clk_freq_config(priv);
828 
829 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
830 		netdev_info(priv->dev, "PTP not supported by HW\n");
831 		return -EOPNOTSUPP;
832 	}
833 
834 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
835 					       PTP_TCR_TSCFUPDT);
836 	if (ret) {
837 		netdev_warn(priv->dev, "PTP init failed\n");
838 		return ret;
839 	}
840 
841 	priv->adv_ts = 0;
842 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
843 	if (xmac && priv->dma_cap.atime_stamp)
844 		priv->adv_ts = 1;
845 	/* Dwmac 3.x core with extend_desc can support adv_ts */
846 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
847 		priv->adv_ts = 1;
848 
849 	if (priv->dma_cap.time_stamp)
850 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
851 
852 	if (priv->adv_ts)
853 		netdev_info(priv->dev,
854 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
855 
856 	priv->hwts_tx_en = 0;
857 	priv->hwts_rx_en = 0;
858 
859 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
860 		stmmac_hwtstamp_correct_latency(priv, priv);
861 
862 	return 0;
863 }
864 
865 static void stmmac_setup_ptp(struct stmmac_priv *priv)
866 {
867 	int ret;
868 
869 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
870 	if (ret < 0)
871 		netdev_warn(priv->dev,
872 			    "failed to enable PTP reference clock: %pe\n",
873 			    ERR_PTR(ret));
874 
875 	if (stmmac_init_timestamping(priv) == 0)
876 		stmmac_ptp_register(priv);
877 }
878 
879 static void stmmac_release_ptp(struct stmmac_priv *priv)
880 {
881 	stmmac_ptp_unregister(priv);
882 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
883 }
884 
885 static void stmmac_legacy_serdes_power_down(struct stmmac_priv *priv)
886 {
887 	if (priv->plat->serdes_powerdown && priv->legacy_serdes_is_powered)
888 		priv->plat->serdes_powerdown(priv->dev, priv->plat->bsp_priv);
889 
890 	priv->legacy_serdes_is_powered = false;
891 }
892 
893 static int stmmac_legacy_serdes_power_up(struct stmmac_priv *priv)
894 {
895 	int ret;
896 
897 	if (!priv->plat->serdes_powerup)
898 		return 0;
899 
900 	ret = priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
901 	if (ret < 0)
902 		netdev_err(priv->dev, "SerDes powerup failed\n");
903 	else
904 		priv->legacy_serdes_is_powered = true;
905 
906 	return ret;
907 }
908 
909 /**
910  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
911  *  @priv: driver private structure
912  *  @duplex: duplex passed to the next function
913  *  @flow_ctrl: desired flow control modes
914  *  Description: It is used for configuring the flow control in all queues
915  */
916 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
917 				 unsigned int flow_ctrl)
918 {
919 	u32 tx_cnt = priv->plat->tx_queues_to_use;
920 
921 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
922 			 tx_cnt);
923 }
924 
925 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
926 					 phy_interface_t interface)
927 {
928 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
929 
930 	/* Refresh the MAC-specific capabilities */
931 	stmmac_mac_update_caps(priv);
932 
933 	if (priv->hw_cap_support && !priv->dma_cap.half_duplex)
934 		priv->hw->link.caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
935 
936 	config->mac_capabilities = priv->hw->link.caps;
937 
938 	if (priv->plat->max_speed)
939 		phylink_limit_mac_speed(config, priv->plat->max_speed);
940 
941 	return config->mac_capabilities;
942 }
943 
944 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
945 						 phy_interface_t interface)
946 {
947 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
948 	struct phylink_pcs *pcs;
949 
950 	if (priv->plat->select_pcs) {
951 		pcs = priv->plat->select_pcs(priv, interface);
952 		if (!IS_ERR(pcs))
953 			return pcs;
954 	}
955 
956 	/* The PCS control register is only relevant for SGMII, TBI and RTBI
957 	 * modes. We no longer support TBI or RTBI, so only configure this
958 	 * register when operating in SGMII mode with the integrated PCS.
959 	 */
960 	if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
961 		return &priv->integrated_pcs->pcs;
962 
963 	return NULL;
964 }
965 
966 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
967 			      const struct phylink_link_state *state)
968 {
969 	/* Nothing to do, xpcs_config() handles everything */
970 }
971 
972 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
973 			     phy_interface_t interface)
974 {
975 	struct net_device *ndev = to_net_dev(config->dev);
976 	struct stmmac_priv *priv = netdev_priv(ndev);
977 
978 	if (priv->plat->mac_finish)
979 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
980 
981 	return 0;
982 }
983 
984 static void stmmac_mac_link_down(struct phylink_config *config,
985 				 unsigned int mode, phy_interface_t interface)
986 {
987 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
988 
989 	stmmac_mac_set(priv, priv->ioaddr, false);
990 	if (priv->dma_cap.eee)
991 		stmmac_set_eee_pls(priv, priv->hw, false);
992 
993 	if (stmmac_fpe_supported(priv))
994 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
995 }
996 
997 static void stmmac_mac_link_up(struct phylink_config *config,
998 			       struct phy_device *phy,
999 			       unsigned int mode, phy_interface_t interface,
1000 			       int speed, int duplex,
1001 			       bool tx_pause, bool rx_pause)
1002 {
1003 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1004 	unsigned int flow_ctrl;
1005 	u32 old_ctrl, ctrl;
1006 	int ret;
1007 
1008 	if (priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)
1009 		stmmac_legacy_serdes_power_up(priv);
1010 
1011 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1012 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1013 
1014 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1015 		switch (speed) {
1016 		case SPEED_10000:
1017 			ctrl |= priv->hw->link.xgmii.speed10000;
1018 			break;
1019 		case SPEED_5000:
1020 			ctrl |= priv->hw->link.xgmii.speed5000;
1021 			break;
1022 		case SPEED_2500:
1023 			ctrl |= priv->hw->link.xgmii.speed2500;
1024 			break;
1025 		default:
1026 			return;
1027 		}
1028 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1029 		switch (speed) {
1030 		case SPEED_100000:
1031 			ctrl |= priv->hw->link.xlgmii.speed100000;
1032 			break;
1033 		case SPEED_50000:
1034 			ctrl |= priv->hw->link.xlgmii.speed50000;
1035 			break;
1036 		case SPEED_40000:
1037 			ctrl |= priv->hw->link.xlgmii.speed40000;
1038 			break;
1039 		case SPEED_25000:
1040 			ctrl |= priv->hw->link.xlgmii.speed25000;
1041 			break;
1042 		case SPEED_10000:
1043 			ctrl |= priv->hw->link.xgmii.speed10000;
1044 			break;
1045 		case SPEED_2500:
1046 			ctrl |= priv->hw->link.speed2500;
1047 			break;
1048 		case SPEED_1000:
1049 			ctrl |= priv->hw->link.speed1000;
1050 			break;
1051 		default:
1052 			return;
1053 		}
1054 	} else {
1055 		switch (speed) {
1056 		case SPEED_2500:
1057 			ctrl |= priv->hw->link.speed2500;
1058 			break;
1059 		case SPEED_1000:
1060 			ctrl |= priv->hw->link.speed1000;
1061 			break;
1062 		case SPEED_100:
1063 			ctrl |= priv->hw->link.speed100;
1064 			break;
1065 		case SPEED_10:
1066 			ctrl |= priv->hw->link.speed10;
1067 			break;
1068 		default:
1069 			return;
1070 		}
1071 	}
1072 
1073 	if (priv->plat->fix_mac_speed)
1074 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1075 
1076 	if (!duplex)
1077 		ctrl &= ~priv->hw->link.duplex;
1078 	else
1079 		ctrl |= priv->hw->link.duplex;
1080 
1081 	/* Flow Control operation */
1082 	if (rx_pause && tx_pause)
1083 		flow_ctrl = FLOW_AUTO;
1084 	else if (rx_pause && !tx_pause)
1085 		flow_ctrl = FLOW_RX;
1086 	else if (!rx_pause && tx_pause)
1087 		flow_ctrl = FLOW_TX;
1088 	else
1089 		flow_ctrl = FLOW_OFF;
1090 
1091 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1092 
1093 	if (ctrl != old_ctrl)
1094 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1095 
1096 	if (priv->plat->set_clk_tx_rate) {
1097 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1098 						priv->plat->clk_tx_i,
1099 						interface, speed);
1100 		if (ret < 0)
1101 			netdev_err(priv->dev,
1102 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
1103 				   phy_modes(interface), speed, ERR_PTR(ret));
1104 	}
1105 
1106 	stmmac_mac_set(priv, priv->ioaddr, true);
1107 	if (priv->dma_cap.eee)
1108 		stmmac_set_eee_pls(priv, priv->hw, true);
1109 
1110 	if (stmmac_fpe_supported(priv))
1111 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1112 
1113 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1114 		stmmac_hwtstamp_correct_latency(priv, priv);
1115 }
1116 
1117 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1118 {
1119 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1120 
1121 	priv->eee_active = false;
1122 
1123 	mutex_lock(&priv->lock);
1124 
1125 	priv->eee_enabled = false;
1126 
1127 	netdev_dbg(priv->dev, "disable EEE\n");
1128 	priv->eee_sw_timer_en = false;
1129 	timer_delete_sync(&priv->eee_ctrl_timer);
1130 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1131 	priv->tx_path_in_lpi_mode = false;
1132 
1133 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1134 	mutex_unlock(&priv->lock);
1135 }
1136 
1137 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1138 				    bool tx_clk_stop)
1139 {
1140 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1141 	int ret;
1142 
1143 	priv->tx_lpi_timer = timer;
1144 	priv->eee_active = true;
1145 
1146 	mutex_lock(&priv->lock);
1147 
1148 	priv->eee_enabled = true;
1149 
1150 	/* Update the transmit clock stop according to PHY capability if
1151 	 * the platform allows
1152 	 */
1153 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1154 		priv->tx_lpi_clk_stop = tx_clk_stop;
1155 
1156 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1157 			     STMMAC_DEFAULT_TWT_LS);
1158 
1159 	/* Try to configure the hardware timer. */
1160 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1161 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1162 
1163 	if (ret) {
1164 		/* Hardware timer mode not supported, or value out of range.
1165 		 * Fall back to using software LPI mode
1166 		 */
1167 		priv->eee_sw_timer_en = true;
1168 		stmmac_restart_sw_lpi_timer(priv);
1169 	}
1170 
1171 	mutex_unlock(&priv->lock);
1172 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1173 
1174 	return 0;
1175 }
1176 
1177 static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
1178 			      const u8 *sopass)
1179 {
1180 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1181 
1182 	device_set_wakeup_enable(priv->device, !!wolopts);
1183 
1184 	mutex_lock(&priv->lock);
1185 	priv->wolopts = wolopts;
1186 	mutex_unlock(&priv->lock);
1187 
1188 	return 0;
1189 }
1190 
1191 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1192 	.mac_get_caps = stmmac_mac_get_caps,
1193 	.mac_select_pcs = stmmac_mac_select_pcs,
1194 	.mac_config = stmmac_mac_config,
1195 	.mac_finish = stmmac_mac_finish,
1196 	.mac_link_down = stmmac_mac_link_down,
1197 	.mac_link_up = stmmac_mac_link_up,
1198 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1199 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1200 	.mac_wol_set = stmmac_mac_wol_set,
1201 };
1202 
1203 /**
1204  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1205  * @priv: driver private structure
1206  * Description: this is to verify if the HW supports the PCS.
1207  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1208  * configured for the TBI, RTBI, or SGMII PHY interface.
1209  */
1210 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1211 {
1212 	int interface = priv->plat->phy_interface;
1213 	int speed = priv->plat->mac_port_sel_speed;
1214 
1215 	if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
1216 		netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1217 		priv->hw->pcs = STMMAC_PCS_SGMII;
1218 
1219 		switch (speed) {
1220 		case SPEED_10:
1221 		case SPEED_100:
1222 		case SPEED_1000:
1223 			priv->hw->reverse_sgmii_enable = true;
1224 			break;
1225 
1226 		default:
1227 			dev_warn(priv->device, "invalid port speed\n");
1228 			fallthrough;
1229 		case 0:
1230 			priv->hw->reverse_sgmii_enable = false;
1231 			break;
1232 		}
1233 	}
1234 }
1235 
1236 /**
1237  * stmmac_init_phy - PHY initialization
1238  * @dev: net device structure
1239  * Description: it initializes the driver's PHY state, and attaches the PHY
1240  * to the mac driver.
1241  *  Return value:
1242  *  0 on success
1243  */
1244 static int stmmac_init_phy(struct net_device *dev)
1245 {
1246 	struct stmmac_priv *priv = netdev_priv(dev);
1247 	int mode = priv->plat->phy_interface;
1248 	struct fwnode_handle *phy_fwnode;
1249 	struct fwnode_handle *fwnode;
1250 	struct ethtool_keee eee;
1251 	u32 dev_flags = 0;
1252 	int ret;
1253 
1254 	if (!phylink_expects_phy(priv->phylink))
1255 		return 0;
1256 
1257 	if (priv->hw->xpcs &&
1258 	    xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1259 		return 0;
1260 
1261 	fwnode = priv->plat->port_node;
1262 	if (!fwnode)
1263 		fwnode = dev_fwnode(priv->device);
1264 
1265 	if (fwnode)
1266 		phy_fwnode = fwnode_get_phy_node(fwnode);
1267 	else
1268 		phy_fwnode = NULL;
1269 
1270 	if (priv->plat->flags & STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD)
1271 		dev_flags |= PHY_F_KEEP_PREAMBLE_BEFORE_SFD;
1272 
1273 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1274 	 * manually parse it
1275 	 */
1276 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1277 		int addr = priv->plat->phy_addr;
1278 		struct phy_device *phydev;
1279 
1280 		if (addr < 0) {
1281 			netdev_err(priv->dev, "no phy found\n");
1282 			return -ENODEV;
1283 		}
1284 
1285 		phydev = mdiobus_get_phy(priv->mii, addr);
1286 		if (!phydev) {
1287 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1288 			return -ENODEV;
1289 		}
1290 
1291 		phydev->dev_flags |= dev_flags;
1292 
1293 		ret = phylink_connect_phy(priv->phylink, phydev);
1294 	} else {
1295 		fwnode_handle_put(phy_fwnode);
1296 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, dev_flags);
1297 	}
1298 
1299 	if (ret) {
1300 		netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1301 			   ERR_PTR(ret));
1302 		return ret;
1303 	}
1304 
1305 	/* Configure phylib's copy of the LPI timer. Normally,
1306 	 * phylink_config.lpi_timer_default would do this, but there is a
1307 	 * chance that userspace could change the eee_timer setting via sysfs
1308 	 * before the first open. Thus, preserve existing behaviour.
1309 	 */
1310 	if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1311 		eee.tx_lpi_timer = priv->tx_lpi_timer;
1312 		phylink_ethtool_set_eee(priv->phylink, &eee);
1313 	}
1314 
1315 	return 0;
1316 }
1317 
1318 static int stmmac_phylink_setup(struct stmmac_priv *priv)
1319 {
1320 	struct stmmac_mdio_bus_data *mdio_bus_data;
1321 	struct phylink_config *config;
1322 	struct fwnode_handle *fwnode;
1323 	struct phylink_pcs *pcs;
1324 	struct phylink *phylink;
1325 
1326 	config = &priv->phylink_config;
1327 
1328 	config->dev = &priv->dev->dev;
1329 	config->type = PHYLINK_NETDEV;
1330 	config->mac_managed_pm = true;
1331 
1332 	/* Stmmac always requires an RX clock for hardware initialization */
1333 	config->mac_requires_rxc = true;
1334 
1335 	/* Disable EEE RX clock stop to ensure VLAN register access works
1336 	 * correctly.
1337 	 */
1338 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
1339 	    !(priv->dev->features & NETIF_F_VLAN_FEATURES))
1340 		config->eee_rx_clk_stop_enable = true;
1341 
1342 	/* Set the default transmit clock stop bit based on the platform glue */
1343 	priv->tx_lpi_clk_stop = priv->plat->flags &
1344 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1345 
1346 	mdio_bus_data = priv->plat->mdio_bus_data;
1347 	if (mdio_bus_data)
1348 		config->default_an_inband = mdio_bus_data->default_an_inband;
1349 
1350 	/* Get the PHY interface modes (at the PHY end of the link) that
1351 	 * are supported by the platform.
1352 	 */
1353 	if (priv->plat->get_interfaces)
1354 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1355 					   config->supported_interfaces);
1356 
1357 	/* Set the platform/firmware specified interface mode if the
1358 	 * supported interfaces have not already been provided using
1359 	 * phy_interface as a last resort.
1360 	 */
1361 	if (phy_interface_empty(config->supported_interfaces))
1362 		__set_bit(priv->plat->phy_interface,
1363 			  config->supported_interfaces);
1364 
1365 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1366 	if (priv->hw->xpcs)
1367 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1368 	else
1369 		pcs = priv->hw->phylink_pcs;
1370 
1371 	if (pcs)
1372 		phy_interface_or(config->supported_interfaces,
1373 				 config->supported_interfaces,
1374 				 pcs->supported_interfaces);
1375 
1376 	if (priv->dma_cap.eee) {
1377 		/* Assume all supported interfaces also support LPI */
1378 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1379 		       sizeof(config->lpi_interfaces));
1380 
1381 		/* All full duplex speeds above 100Mbps are supported */
1382 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1383 		config->lpi_timer_default = eee_timer * 1000;
1384 		config->eee_enabled_default = true;
1385 	}
1386 
1387 	config->wol_phy_speed_ctrl = true;
1388 	if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
1389 		config->wol_phy_legacy = true;
1390 	} else {
1391 		if (priv->dma_cap.pmt_remote_wake_up)
1392 			config->wol_mac_support |= WAKE_UCAST;
1393 		if (priv->dma_cap.pmt_magic_frame)
1394 			config->wol_mac_support |= WAKE_MAGIC;
1395 	}
1396 
1397 	fwnode = priv->plat->port_node;
1398 	if (!fwnode)
1399 		fwnode = dev_fwnode(priv->device);
1400 
1401 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1402 				 &stmmac_phylink_mac_ops);
1403 	if (IS_ERR(phylink))
1404 		return PTR_ERR(phylink);
1405 
1406 	priv->phylink = phylink;
1407 	return 0;
1408 }
1409 
1410 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1411 				    struct stmmac_dma_conf *dma_conf)
1412 {
1413 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1414 	unsigned int desc_size;
1415 	void *head_rx;
1416 	u32 queue;
1417 
1418 	/* Display RX rings */
1419 	for (queue = 0; queue < rx_cnt; queue++) {
1420 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1421 
1422 		pr_info("\tRX Queue %u rings\n", queue);
1423 
1424 		if (priv->extend_desc) {
1425 			head_rx = (void *)rx_q->dma_erx;
1426 			desc_size = sizeof(struct dma_extended_desc);
1427 		} else {
1428 			head_rx = (void *)rx_q->dma_rx;
1429 			desc_size = sizeof(struct dma_desc);
1430 		}
1431 
1432 		/* Display RX ring */
1433 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1434 				    rx_q->dma_rx_phy, desc_size);
1435 	}
1436 }
1437 
1438 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1439 				    struct stmmac_dma_conf *dma_conf)
1440 {
1441 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1442 	unsigned int desc_size;
1443 	void *head_tx;
1444 	u32 queue;
1445 
1446 	/* Display TX rings */
1447 	for (queue = 0; queue < tx_cnt; queue++) {
1448 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1449 
1450 		pr_info("\tTX Queue %d rings\n", queue);
1451 
1452 		if (priv->extend_desc) {
1453 			head_tx = (void *)tx_q->dma_etx;
1454 			desc_size = sizeof(struct dma_extended_desc);
1455 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1456 			head_tx = (void *)tx_q->dma_entx;
1457 			desc_size = sizeof(struct dma_edesc);
1458 		} else {
1459 			head_tx = (void *)tx_q->dma_tx;
1460 			desc_size = sizeof(struct dma_desc);
1461 		}
1462 
1463 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1464 				    tx_q->dma_tx_phy, desc_size);
1465 	}
1466 }
1467 
1468 static void stmmac_display_rings(struct stmmac_priv *priv,
1469 				 struct stmmac_dma_conf *dma_conf)
1470 {
1471 	/* Display RX ring */
1472 	stmmac_display_rx_rings(priv, dma_conf);
1473 
1474 	/* Display TX ring */
1475 	stmmac_display_tx_rings(priv, dma_conf);
1476 }
1477 
1478 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1479 {
1480 	if (stmmac_xdp_is_enabled(priv))
1481 		return XDP_PACKET_HEADROOM;
1482 
1483 	return NET_SKB_PAD;
1484 }
1485 
1486 static int stmmac_set_bfsize(int mtu)
1487 {
1488 	int ret;
1489 
1490 	if (mtu >= BUF_SIZE_8KiB)
1491 		ret = BUF_SIZE_16KiB;
1492 	else if (mtu >= BUF_SIZE_4KiB)
1493 		ret = BUF_SIZE_8KiB;
1494 	else if (mtu >= BUF_SIZE_2KiB)
1495 		ret = BUF_SIZE_4KiB;
1496 	else if (mtu > DEFAULT_BUFSIZE)
1497 		ret = BUF_SIZE_2KiB;
1498 	else
1499 		ret = DEFAULT_BUFSIZE;
1500 
1501 	return ret;
1502 }
1503 
1504 /**
1505  * stmmac_clear_rx_descriptors - clear RX descriptors
1506  * @priv: driver private structure
1507  * @dma_conf: structure to take the dma data
1508  * @queue: RX queue index
1509  * Description: this function is called to clear the RX descriptors
1510  * in case of both basic and extended descriptors are used.
1511  */
1512 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1513 					struct stmmac_dma_conf *dma_conf,
1514 					u32 queue)
1515 {
1516 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1517 	int i;
1518 
1519 	/* Clear the RX descriptors */
1520 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1521 		if (priv->extend_desc)
1522 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1523 					priv->use_riwt, priv->mode,
1524 					(i == dma_conf->dma_rx_size - 1),
1525 					dma_conf->dma_buf_sz);
1526 		else
1527 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1528 					priv->use_riwt, priv->mode,
1529 					(i == dma_conf->dma_rx_size - 1),
1530 					dma_conf->dma_buf_sz);
1531 }
1532 
1533 /**
1534  * stmmac_clear_tx_descriptors - clear tx descriptors
1535  * @priv: driver private structure
1536  * @dma_conf: structure to take the dma data
1537  * @queue: TX queue index.
1538  * Description: this function is called to clear the TX descriptors
1539  * in case of both basic and extended descriptors are used.
1540  */
1541 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1542 					struct stmmac_dma_conf *dma_conf,
1543 					u32 queue)
1544 {
1545 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1546 	int i;
1547 
1548 	/* Clear the TX descriptors */
1549 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1550 		int last = (i == (dma_conf->dma_tx_size - 1));
1551 		struct dma_desc *p;
1552 
1553 		if (priv->extend_desc)
1554 			p = &tx_q->dma_etx[i].basic;
1555 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1556 			p = &tx_q->dma_entx[i].basic;
1557 		else
1558 			p = &tx_q->dma_tx[i];
1559 
1560 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1561 	}
1562 }
1563 
1564 /**
1565  * stmmac_clear_descriptors - clear descriptors
1566  * @priv: driver private structure
1567  * @dma_conf: structure to take the dma data
1568  * Description: this function is called to clear the TX and RX descriptors
1569  * in case of both basic and extended descriptors are used.
1570  */
1571 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1572 				     struct stmmac_dma_conf *dma_conf)
1573 {
1574 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1575 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1576 	u32 queue;
1577 
1578 	/* Clear the RX descriptors */
1579 	for (queue = 0; queue < rx_queue_cnt; queue++)
1580 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1581 
1582 	/* Clear the TX descriptors */
1583 	for (queue = 0; queue < tx_queue_cnt; queue++)
1584 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1585 }
1586 
1587 /**
1588  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1589  * @priv: driver private structure
1590  * @dma_conf: structure to take the dma data
1591  * @p: descriptor pointer
1592  * @i: descriptor index
1593  * @flags: gfp flag
1594  * @queue: RX queue index
1595  * Description: this function is called to allocate a receive buffer, perform
1596  * the DMA mapping and init the descriptor.
1597  */
1598 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1599 				  struct stmmac_dma_conf *dma_conf,
1600 				  struct dma_desc *p,
1601 				  int i, gfp_t flags, u32 queue)
1602 {
1603 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1604 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1605 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1606 
1607 	if (priv->dma_cap.host_dma_width <= 32)
1608 		gfp |= GFP_DMA32;
1609 
1610 	if (!buf->page) {
1611 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1612 		if (!buf->page)
1613 			return -ENOMEM;
1614 		buf->page_offset = stmmac_rx_offset(priv);
1615 	}
1616 
1617 	if (priv->sph_active && !buf->sec_page) {
1618 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1619 		if (!buf->sec_page)
1620 			return -ENOMEM;
1621 
1622 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1623 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1624 	} else {
1625 		buf->sec_page = NULL;
1626 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1627 	}
1628 
1629 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1630 
1631 	stmmac_set_desc_addr(priv, p, buf->addr);
1632 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1633 		stmmac_init_desc3(priv, p);
1634 
1635 	return 0;
1636 }
1637 
1638 /**
1639  * stmmac_free_rx_buffer - free RX dma buffers
1640  * @priv: private structure
1641  * @rx_q: RX queue
1642  * @i: buffer index.
1643  */
1644 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1645 				  struct stmmac_rx_queue *rx_q,
1646 				  int i)
1647 {
1648 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1649 
1650 	if (buf->page)
1651 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1652 	buf->page = NULL;
1653 
1654 	if (buf->sec_page)
1655 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1656 	buf->sec_page = NULL;
1657 }
1658 
1659 /**
1660  * stmmac_free_tx_buffer - free RX dma buffers
1661  * @priv: private structure
1662  * @dma_conf: structure to take the dma data
1663  * @queue: RX queue index
1664  * @i: buffer index.
1665  */
1666 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1667 				  struct stmmac_dma_conf *dma_conf,
1668 				  u32 queue, int i)
1669 {
1670 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1671 
1672 	if (tx_q->tx_skbuff_dma[i].buf &&
1673 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1674 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1675 			dma_unmap_page(priv->device,
1676 				       tx_q->tx_skbuff_dma[i].buf,
1677 				       tx_q->tx_skbuff_dma[i].len,
1678 				       DMA_TO_DEVICE);
1679 		else
1680 			dma_unmap_single(priv->device,
1681 					 tx_q->tx_skbuff_dma[i].buf,
1682 					 tx_q->tx_skbuff_dma[i].len,
1683 					 DMA_TO_DEVICE);
1684 	}
1685 
1686 	if (tx_q->xdpf[i] &&
1687 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1688 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1689 		xdp_return_frame(tx_q->xdpf[i]);
1690 		tx_q->xdpf[i] = NULL;
1691 	}
1692 
1693 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1694 		tx_q->xsk_frames_done++;
1695 
1696 	if (tx_q->tx_skbuff[i] &&
1697 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1698 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1699 		tx_q->tx_skbuff[i] = NULL;
1700 	}
1701 
1702 	tx_q->tx_skbuff_dma[i].buf = 0;
1703 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1704 }
1705 
1706 /**
1707  * dma_free_rx_skbufs - free RX dma buffers
1708  * @priv: private structure
1709  * @dma_conf: structure to take the dma data
1710  * @queue: RX queue index
1711  */
1712 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1713 			       struct stmmac_dma_conf *dma_conf,
1714 			       u32 queue)
1715 {
1716 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1717 	int i;
1718 
1719 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1720 		stmmac_free_rx_buffer(priv, rx_q, i);
1721 }
1722 
1723 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1724 				   struct stmmac_dma_conf *dma_conf,
1725 				   u32 queue, gfp_t flags)
1726 {
1727 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1728 	int i;
1729 
1730 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1731 		struct dma_desc *p;
1732 		int ret;
1733 
1734 		if (priv->extend_desc)
1735 			p = &((rx_q->dma_erx + i)->basic);
1736 		else
1737 			p = rx_q->dma_rx + i;
1738 
1739 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1740 					     queue);
1741 		if (ret)
1742 			return ret;
1743 
1744 		rx_q->buf_alloc_num++;
1745 	}
1746 
1747 	return 0;
1748 }
1749 
1750 /**
1751  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1752  * @priv: private structure
1753  * @dma_conf: structure to take the dma data
1754  * @queue: RX queue index
1755  */
1756 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1757 				struct stmmac_dma_conf *dma_conf,
1758 				u32 queue)
1759 {
1760 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1761 	int i;
1762 
1763 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1764 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1765 
1766 		if (!buf->xdp)
1767 			continue;
1768 
1769 		xsk_buff_free(buf->xdp);
1770 		buf->xdp = NULL;
1771 	}
1772 }
1773 
1774 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1775 				      struct stmmac_dma_conf *dma_conf,
1776 				      u32 queue)
1777 {
1778 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1779 	int i;
1780 
1781 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1782 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1783 	 * use this macro to make sure no size violations.
1784 	 */
1785 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1786 
1787 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1788 		struct stmmac_rx_buffer *buf;
1789 		dma_addr_t dma_addr;
1790 		struct dma_desc *p;
1791 
1792 		if (priv->extend_desc)
1793 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1794 		else
1795 			p = rx_q->dma_rx + i;
1796 
1797 		buf = &rx_q->buf_pool[i];
1798 
1799 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1800 		if (!buf->xdp)
1801 			return -ENOMEM;
1802 
1803 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1804 		stmmac_set_desc_addr(priv, p, dma_addr);
1805 		rx_q->buf_alloc_num++;
1806 	}
1807 
1808 	return 0;
1809 }
1810 
1811 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1812 {
1813 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1814 		return NULL;
1815 
1816 	return xsk_get_pool_from_qid(priv->dev, queue);
1817 }
1818 
1819 /**
1820  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1821  * @priv: driver private structure
1822  * @dma_conf: structure to take the dma data
1823  * @queue: RX queue index
1824  * @flags: gfp flag.
1825  * Description: this function initializes the DMA RX descriptors
1826  * and allocates the socket buffers. It supports the chained and ring
1827  * modes.
1828  */
1829 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1830 				    struct stmmac_dma_conf *dma_conf,
1831 				    u32 queue, gfp_t flags)
1832 {
1833 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1834 	int ret;
1835 
1836 	netif_dbg(priv, probe, priv->dev,
1837 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1838 		  (u32)rx_q->dma_rx_phy);
1839 
1840 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1841 
1842 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1843 
1844 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1845 
1846 	if (rx_q->xsk_pool) {
1847 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1848 						   MEM_TYPE_XSK_BUFF_POOL,
1849 						   NULL));
1850 		netdev_info(priv->dev,
1851 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1852 			    rx_q->queue_index);
1853 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1854 	} else {
1855 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1856 						   MEM_TYPE_PAGE_POOL,
1857 						   rx_q->page_pool));
1858 		netdev_info(priv->dev,
1859 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1860 			    rx_q->queue_index);
1861 	}
1862 
1863 	if (rx_q->xsk_pool) {
1864 		/* RX XDP ZC buffer pool may not be populated, e.g.
1865 		 * xdpsock TX-only.
1866 		 */
1867 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1868 	} else {
1869 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1870 		if (ret < 0)
1871 			return -ENOMEM;
1872 	}
1873 
1874 	/* Setup the chained descriptor addresses */
1875 	if (priv->mode == STMMAC_CHAIN_MODE) {
1876 		if (priv->extend_desc)
1877 			stmmac_mode_init(priv, rx_q->dma_erx,
1878 					 rx_q->dma_rx_phy,
1879 					 dma_conf->dma_rx_size, 1);
1880 		else
1881 			stmmac_mode_init(priv, rx_q->dma_rx,
1882 					 rx_q->dma_rx_phy,
1883 					 dma_conf->dma_rx_size, 0);
1884 	}
1885 
1886 	return 0;
1887 }
1888 
1889 static int init_dma_rx_desc_rings(struct net_device *dev,
1890 				  struct stmmac_dma_conf *dma_conf,
1891 				  gfp_t flags)
1892 {
1893 	struct stmmac_priv *priv = netdev_priv(dev);
1894 	u32 rx_count = priv->plat->rx_queues_to_use;
1895 	int queue;
1896 	int ret;
1897 
1898 	/* RX INITIALIZATION */
1899 	netif_dbg(priv, probe, priv->dev,
1900 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1901 
1902 	for (queue = 0; queue < rx_count; queue++) {
1903 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1904 		if (ret)
1905 			goto err_init_rx_buffers;
1906 	}
1907 
1908 	return 0;
1909 
1910 err_init_rx_buffers:
1911 	while (queue >= 0) {
1912 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1913 
1914 		if (rx_q->xsk_pool)
1915 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1916 		else
1917 			dma_free_rx_skbufs(priv, dma_conf, queue);
1918 
1919 		rx_q->buf_alloc_num = 0;
1920 		rx_q->xsk_pool = NULL;
1921 
1922 		queue--;
1923 	}
1924 
1925 	return ret;
1926 }
1927 
1928 /**
1929  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1930  * @priv: driver private structure
1931  * @dma_conf: structure to take the dma data
1932  * @queue: TX queue index
1933  * Description: this function initializes the DMA TX descriptors
1934  * and allocates the socket buffers. It supports the chained and ring
1935  * modes.
1936  */
1937 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1938 				    struct stmmac_dma_conf *dma_conf,
1939 				    u32 queue)
1940 {
1941 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1942 	int i;
1943 
1944 	netif_dbg(priv, probe, priv->dev,
1945 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1946 		  (u32)tx_q->dma_tx_phy);
1947 
1948 	/* Setup the chained descriptor addresses */
1949 	if (priv->mode == STMMAC_CHAIN_MODE) {
1950 		if (priv->extend_desc)
1951 			stmmac_mode_init(priv, tx_q->dma_etx,
1952 					 tx_q->dma_tx_phy,
1953 					 dma_conf->dma_tx_size, 1);
1954 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1955 			stmmac_mode_init(priv, tx_q->dma_tx,
1956 					 tx_q->dma_tx_phy,
1957 					 dma_conf->dma_tx_size, 0);
1958 	}
1959 
1960 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1961 
1962 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1963 		struct dma_desc *p;
1964 
1965 		if (priv->extend_desc)
1966 			p = &((tx_q->dma_etx + i)->basic);
1967 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1968 			p = &((tx_q->dma_entx + i)->basic);
1969 		else
1970 			p = tx_q->dma_tx + i;
1971 
1972 		stmmac_clear_desc(priv, p);
1973 
1974 		tx_q->tx_skbuff_dma[i].buf = 0;
1975 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1976 		tx_q->tx_skbuff_dma[i].len = 0;
1977 		tx_q->tx_skbuff_dma[i].last_segment = false;
1978 		tx_q->tx_skbuff[i] = NULL;
1979 	}
1980 
1981 	return 0;
1982 }
1983 
1984 static int init_dma_tx_desc_rings(struct net_device *dev,
1985 				  struct stmmac_dma_conf *dma_conf)
1986 {
1987 	struct stmmac_priv *priv = netdev_priv(dev);
1988 	u32 tx_queue_cnt;
1989 	u32 queue;
1990 
1991 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1992 
1993 	for (queue = 0; queue < tx_queue_cnt; queue++)
1994 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1995 
1996 	return 0;
1997 }
1998 
1999 /**
2000  * init_dma_desc_rings - init the RX/TX descriptor rings
2001  * @dev: net device structure
2002  * @dma_conf: structure to take the dma data
2003  * @flags: gfp flag.
2004  * Description: this function initializes the DMA RX/TX descriptors
2005  * and allocates the socket buffers. It supports the chained and ring
2006  * modes.
2007  */
2008 static int init_dma_desc_rings(struct net_device *dev,
2009 			       struct stmmac_dma_conf *dma_conf,
2010 			       gfp_t flags)
2011 {
2012 	struct stmmac_priv *priv = netdev_priv(dev);
2013 	int ret;
2014 
2015 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
2016 	if (ret)
2017 		return ret;
2018 
2019 	ret = init_dma_tx_desc_rings(dev, dma_conf);
2020 
2021 	stmmac_clear_descriptors(priv, dma_conf);
2022 
2023 	if (netif_msg_hw(priv))
2024 		stmmac_display_rings(priv, dma_conf);
2025 
2026 	return ret;
2027 }
2028 
2029 /**
2030  * dma_free_tx_skbufs - free TX dma buffers
2031  * @priv: private structure
2032  * @dma_conf: structure to take the dma data
2033  * @queue: TX queue index
2034  */
2035 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
2036 			       struct stmmac_dma_conf *dma_conf,
2037 			       u32 queue)
2038 {
2039 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2040 	int i;
2041 
2042 	tx_q->xsk_frames_done = 0;
2043 
2044 	for (i = 0; i < dma_conf->dma_tx_size; i++)
2045 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
2046 
2047 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
2048 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2049 		tx_q->xsk_frames_done = 0;
2050 		tx_q->xsk_pool = NULL;
2051 	}
2052 }
2053 
2054 /**
2055  * stmmac_free_tx_skbufs - free TX skb buffers
2056  * @priv: private structure
2057  */
2058 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
2059 {
2060 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
2061 	u32 queue;
2062 
2063 	for (queue = 0; queue < tx_queue_cnt; queue++)
2064 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
2065 }
2066 
2067 /**
2068  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2069  * @priv: private structure
2070  * @dma_conf: structure to take the dma data
2071  * @queue: RX queue index
2072  */
2073 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2074 					 struct stmmac_dma_conf *dma_conf,
2075 					 u32 queue)
2076 {
2077 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2078 
2079 	/* Release the DMA RX socket buffers */
2080 	if (rx_q->xsk_pool)
2081 		dma_free_rx_xskbufs(priv, dma_conf, queue);
2082 	else
2083 		dma_free_rx_skbufs(priv, dma_conf, queue);
2084 
2085 	rx_q->buf_alloc_num = 0;
2086 	rx_q->xsk_pool = NULL;
2087 
2088 	/* Free DMA regions of consistent memory previously allocated */
2089 	if (!priv->extend_desc)
2090 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2091 				  sizeof(struct dma_desc),
2092 				  rx_q->dma_rx, rx_q->dma_rx_phy);
2093 	else
2094 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2095 				  sizeof(struct dma_extended_desc),
2096 				  rx_q->dma_erx, rx_q->dma_rx_phy);
2097 
2098 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2099 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2100 
2101 	kfree(rx_q->buf_pool);
2102 	if (rx_q->page_pool)
2103 		page_pool_destroy(rx_q->page_pool);
2104 }
2105 
2106 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2107 				       struct stmmac_dma_conf *dma_conf)
2108 {
2109 	u32 rx_count = priv->plat->rx_queues_to_use;
2110 	u32 queue;
2111 
2112 	/* Free RX queue resources */
2113 	for (queue = 0; queue < rx_count; queue++)
2114 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2115 }
2116 
2117 /**
2118  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2119  * @priv: private structure
2120  * @dma_conf: structure to take the dma data
2121  * @queue: TX queue index
2122  */
2123 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2124 					 struct stmmac_dma_conf *dma_conf,
2125 					 u32 queue)
2126 {
2127 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2128 	size_t size;
2129 	void *addr;
2130 
2131 	/* Release the DMA TX socket buffers */
2132 	dma_free_tx_skbufs(priv, dma_conf, queue);
2133 
2134 	if (priv->extend_desc) {
2135 		size = sizeof(struct dma_extended_desc);
2136 		addr = tx_q->dma_etx;
2137 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2138 		size = sizeof(struct dma_edesc);
2139 		addr = tx_q->dma_entx;
2140 	} else {
2141 		size = sizeof(struct dma_desc);
2142 		addr = tx_q->dma_tx;
2143 	}
2144 
2145 	size *= dma_conf->dma_tx_size;
2146 
2147 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2148 
2149 	kfree(tx_q->tx_skbuff_dma);
2150 	kfree(tx_q->tx_skbuff);
2151 }
2152 
2153 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2154 				       struct stmmac_dma_conf *dma_conf)
2155 {
2156 	u32 tx_count = priv->plat->tx_queues_to_use;
2157 	u32 queue;
2158 
2159 	/* Free TX queue resources */
2160 	for (queue = 0; queue < tx_count; queue++)
2161 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2162 }
2163 
2164 /**
2165  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2166  * @priv: private structure
2167  * @dma_conf: structure to take the dma data
2168  * @queue: RX queue index
2169  * Description: according to which descriptor can be used (extend or basic)
2170  * this function allocates the resources for TX and RX paths. In case of
2171  * reception, for example, it pre-allocated the RX socket buffer in order to
2172  * allow zero-copy mechanism.
2173  */
2174 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2175 					 struct stmmac_dma_conf *dma_conf,
2176 					 u32 queue)
2177 {
2178 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2179 	struct stmmac_channel *ch = &priv->channel[queue];
2180 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2181 	struct page_pool_params pp_params = { 0 };
2182 	unsigned int dma_buf_sz_pad, num_pages;
2183 	unsigned int napi_id;
2184 	int ret;
2185 
2186 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2187 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2188 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2189 
2190 	rx_q->queue_index = queue;
2191 	rx_q->priv_data = priv;
2192 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2193 
2194 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2195 	pp_params.pool_size = dma_conf->dma_rx_size;
2196 	pp_params.order = order_base_2(num_pages);
2197 	pp_params.nid = dev_to_node(priv->device);
2198 	pp_params.dev = priv->device;
2199 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2200 	pp_params.offset = stmmac_rx_offset(priv);
2201 	pp_params.max_len = dma_conf->dma_buf_sz;
2202 
2203 	if (priv->sph_active) {
2204 		pp_params.offset = 0;
2205 		pp_params.max_len += stmmac_rx_offset(priv);
2206 	}
2207 
2208 	rx_q->page_pool = page_pool_create(&pp_params);
2209 	if (IS_ERR(rx_q->page_pool)) {
2210 		ret = PTR_ERR(rx_q->page_pool);
2211 		rx_q->page_pool = NULL;
2212 		return ret;
2213 	}
2214 
2215 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2216 				 sizeof(*rx_q->buf_pool),
2217 				 GFP_KERNEL);
2218 	if (!rx_q->buf_pool)
2219 		return -ENOMEM;
2220 
2221 	if (priv->extend_desc) {
2222 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2223 						   dma_conf->dma_rx_size *
2224 						   sizeof(struct dma_extended_desc),
2225 						   &rx_q->dma_rx_phy,
2226 						   GFP_KERNEL);
2227 		if (!rx_q->dma_erx)
2228 			return -ENOMEM;
2229 
2230 	} else {
2231 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2232 						  dma_conf->dma_rx_size *
2233 						  sizeof(struct dma_desc),
2234 						  &rx_q->dma_rx_phy,
2235 						  GFP_KERNEL);
2236 		if (!rx_q->dma_rx)
2237 			return -ENOMEM;
2238 	}
2239 
2240 	if (stmmac_xdp_is_enabled(priv) &&
2241 	    test_bit(queue, priv->af_xdp_zc_qps))
2242 		napi_id = ch->rxtx_napi.napi_id;
2243 	else
2244 		napi_id = ch->rx_napi.napi_id;
2245 
2246 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2247 			       rx_q->queue_index,
2248 			       napi_id);
2249 	if (ret) {
2250 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2251 		return -EINVAL;
2252 	}
2253 
2254 	return 0;
2255 }
2256 
2257 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2258 				       struct stmmac_dma_conf *dma_conf)
2259 {
2260 	u32 rx_count = priv->plat->rx_queues_to_use;
2261 	u32 queue;
2262 	int ret;
2263 
2264 	/* RX queues buffers and DMA */
2265 	for (queue = 0; queue < rx_count; queue++) {
2266 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2267 		if (ret)
2268 			goto err_dma;
2269 	}
2270 
2271 	return 0;
2272 
2273 err_dma:
2274 	free_dma_rx_desc_resources(priv, dma_conf);
2275 
2276 	return ret;
2277 }
2278 
2279 /**
2280  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2281  * @priv: private structure
2282  * @dma_conf: structure to take the dma data
2283  * @queue: TX queue index
2284  * Description: according to which descriptor can be used (extend or basic)
2285  * this function allocates the resources for TX and RX paths. In case of
2286  * reception, for example, it pre-allocated the RX socket buffer in order to
2287  * allow zero-copy mechanism.
2288  */
2289 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2290 					 struct stmmac_dma_conf *dma_conf,
2291 					 u32 queue)
2292 {
2293 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2294 	size_t size;
2295 	void *addr;
2296 
2297 	tx_q->queue_index = queue;
2298 	tx_q->priv_data = priv;
2299 
2300 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2301 				      sizeof(*tx_q->tx_skbuff_dma),
2302 				      GFP_KERNEL);
2303 	if (!tx_q->tx_skbuff_dma)
2304 		return -ENOMEM;
2305 
2306 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2307 				  sizeof(struct sk_buff *),
2308 				  GFP_KERNEL);
2309 	if (!tx_q->tx_skbuff)
2310 		return -ENOMEM;
2311 
2312 	if (priv->extend_desc)
2313 		size = sizeof(struct dma_extended_desc);
2314 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2315 		size = sizeof(struct dma_edesc);
2316 	else
2317 		size = sizeof(struct dma_desc);
2318 
2319 	size *= dma_conf->dma_tx_size;
2320 
2321 	addr = dma_alloc_coherent(priv->device, size,
2322 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2323 	if (!addr)
2324 		return -ENOMEM;
2325 
2326 	if (priv->extend_desc)
2327 		tx_q->dma_etx = addr;
2328 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2329 		tx_q->dma_entx = addr;
2330 	else
2331 		tx_q->dma_tx = addr;
2332 
2333 	return 0;
2334 }
2335 
2336 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2337 				       struct stmmac_dma_conf *dma_conf)
2338 {
2339 	u32 tx_count = priv->plat->tx_queues_to_use;
2340 	u32 queue;
2341 	int ret;
2342 
2343 	/* TX queues buffers and DMA */
2344 	for (queue = 0; queue < tx_count; queue++) {
2345 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2346 		if (ret)
2347 			goto err_dma;
2348 	}
2349 
2350 	return 0;
2351 
2352 err_dma:
2353 	free_dma_tx_desc_resources(priv, dma_conf);
2354 	return ret;
2355 }
2356 
2357 /**
2358  * alloc_dma_desc_resources - alloc TX/RX resources.
2359  * @priv: private structure
2360  * @dma_conf: structure to take the dma data
2361  * Description: according to which descriptor can be used (extend or basic)
2362  * this function allocates the resources for TX and RX paths. In case of
2363  * reception, for example, it pre-allocated the RX socket buffer in order to
2364  * allow zero-copy mechanism.
2365  */
2366 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2367 				    struct stmmac_dma_conf *dma_conf)
2368 {
2369 	/* RX Allocation */
2370 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2371 
2372 	if (ret)
2373 		return ret;
2374 
2375 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2376 
2377 	return ret;
2378 }
2379 
2380 /**
2381  * free_dma_desc_resources - free dma desc resources
2382  * @priv: private structure
2383  * @dma_conf: structure to take the dma data
2384  */
2385 static void free_dma_desc_resources(struct stmmac_priv *priv,
2386 				    struct stmmac_dma_conf *dma_conf)
2387 {
2388 	/* Release the DMA TX socket buffers */
2389 	free_dma_tx_desc_resources(priv, dma_conf);
2390 
2391 	/* Release the DMA RX socket buffers later
2392 	 * to ensure all pending XDP_TX buffers are returned.
2393 	 */
2394 	free_dma_rx_desc_resources(priv, dma_conf);
2395 }
2396 
2397 /**
2398  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2399  *  @priv: driver private structure
2400  *  Description: It is used for enabling the rx queues in the MAC
2401  */
2402 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2403 {
2404 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2405 	int queue;
2406 	u8 mode;
2407 
2408 	for (queue = 0; queue < rx_queues_count; queue++) {
2409 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2410 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2411 	}
2412 }
2413 
2414 /**
2415  * stmmac_start_rx_dma - start RX DMA channel
2416  * @priv: driver private structure
2417  * @chan: RX channel index
2418  * Description:
2419  * This starts a RX DMA channel
2420  */
2421 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2422 {
2423 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2424 	stmmac_start_rx(priv, priv->ioaddr, chan);
2425 }
2426 
2427 /**
2428  * stmmac_start_tx_dma - start TX DMA channel
2429  * @priv: driver private structure
2430  * @chan: TX channel index
2431  * Description:
2432  * This starts a TX DMA channel
2433  */
2434 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2435 {
2436 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2437 	stmmac_start_tx(priv, priv->ioaddr, chan);
2438 }
2439 
2440 /**
2441  * stmmac_stop_rx_dma - stop RX DMA channel
2442  * @priv: driver private structure
2443  * @chan: RX channel index
2444  * Description:
2445  * This stops a RX DMA channel
2446  */
2447 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2448 {
2449 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2450 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2451 }
2452 
2453 /**
2454  * stmmac_stop_tx_dma - stop TX DMA channel
2455  * @priv: driver private structure
2456  * @chan: TX channel index
2457  * Description:
2458  * This stops a TX DMA channel
2459  */
2460 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2461 {
2462 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2463 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2464 }
2465 
2466 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2467 {
2468 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2469 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2470 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2471 	u32 chan;
2472 
2473 	for (chan = 0; chan < dma_csr_ch; chan++) {
2474 		struct stmmac_channel *ch = &priv->channel[chan];
2475 		unsigned long flags;
2476 
2477 		spin_lock_irqsave(&ch->lock, flags);
2478 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2479 		spin_unlock_irqrestore(&ch->lock, flags);
2480 	}
2481 }
2482 
2483 /**
2484  * stmmac_start_all_dma - start all RX and TX DMA channels
2485  * @priv: driver private structure
2486  * Description:
2487  * This starts all the RX and TX DMA channels
2488  */
2489 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2490 {
2491 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2492 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2493 	u32 chan = 0;
2494 
2495 	for (chan = 0; chan < rx_channels_count; chan++)
2496 		stmmac_start_rx_dma(priv, chan);
2497 
2498 	for (chan = 0; chan < tx_channels_count; chan++)
2499 		stmmac_start_tx_dma(priv, chan);
2500 }
2501 
2502 /**
2503  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2504  * @priv: driver private structure
2505  * Description:
2506  * This stops the RX and TX DMA channels
2507  */
2508 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2509 {
2510 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2511 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2512 	u32 chan = 0;
2513 
2514 	for (chan = 0; chan < rx_channels_count; chan++)
2515 		stmmac_stop_rx_dma(priv, chan);
2516 
2517 	for (chan = 0; chan < tx_channels_count; chan++)
2518 		stmmac_stop_tx_dma(priv, chan);
2519 }
2520 
2521 /**
2522  *  stmmac_dma_operation_mode - HW DMA operation mode
2523  *  @priv: driver private structure
2524  *  Description: it is used for configuring the DMA operation mode register in
2525  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2526  */
2527 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2528 {
2529 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2530 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2531 	int rxfifosz = priv->plat->rx_fifo_size;
2532 	int txfifosz = priv->plat->tx_fifo_size;
2533 	u32 txmode = 0;
2534 	u32 rxmode = 0;
2535 	u32 chan = 0;
2536 	u8 qmode = 0;
2537 
2538 	if (rxfifosz == 0)
2539 		rxfifosz = priv->dma_cap.rx_fifo_size;
2540 	if (txfifosz == 0)
2541 		txfifosz = priv->dma_cap.tx_fifo_size;
2542 
2543 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2544 	if (dwmac_is_xmac(priv->plat->core_type)) {
2545 		rxfifosz /= rx_channels_count;
2546 		txfifosz /= tx_channels_count;
2547 	}
2548 
2549 	if (priv->plat->force_thresh_dma_mode) {
2550 		txmode = tc;
2551 		rxmode = tc;
2552 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2553 		/*
2554 		 * In case of GMAC, SF mode can be enabled
2555 		 * to perform the TX COE in HW. This depends on:
2556 		 * 1) TX COE if actually supported
2557 		 * 2) There is no bugged Jumbo frame support
2558 		 *    that needs to not insert csum in the TDES.
2559 		 */
2560 		txmode = SF_DMA_MODE;
2561 		rxmode = SF_DMA_MODE;
2562 		priv->xstats.threshold = SF_DMA_MODE;
2563 	} else {
2564 		txmode = tc;
2565 		rxmode = SF_DMA_MODE;
2566 	}
2567 
2568 	/* configure all channels */
2569 	for (chan = 0; chan < rx_channels_count; chan++) {
2570 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2571 		u32 buf_size;
2572 
2573 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2574 
2575 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2576 				rxfifosz, qmode);
2577 
2578 		if (rx_q->xsk_pool) {
2579 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2580 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2581 					      buf_size,
2582 					      chan);
2583 		} else {
2584 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2585 					      priv->dma_conf.dma_buf_sz,
2586 					      chan);
2587 		}
2588 	}
2589 
2590 	for (chan = 0; chan < tx_channels_count; chan++) {
2591 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2592 
2593 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2594 				txfifosz, qmode);
2595 	}
2596 }
2597 
2598 static void stmmac_xsk_request_timestamp(void *_priv)
2599 {
2600 	struct stmmac_metadata_request *meta_req = _priv;
2601 
2602 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2603 	*meta_req->set_ic = true;
2604 }
2605 
2606 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2607 {
2608 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2609 	struct stmmac_priv *priv = tx_compl->priv;
2610 	struct dma_desc *desc = tx_compl->desc;
2611 	bool found = false;
2612 	u64 ns = 0;
2613 
2614 	if (!priv->hwts_tx_en)
2615 		return 0;
2616 
2617 	/* check tx tstamp status */
2618 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2619 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2620 		found = true;
2621 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2622 		found = true;
2623 	}
2624 
2625 	if (found) {
2626 		ns -= priv->plat->cdc_error_adj;
2627 		return ns_to_ktime(ns);
2628 	}
2629 
2630 	return 0;
2631 }
2632 
2633 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2634 {
2635 	struct timespec64 ts = ns_to_timespec64(launch_time);
2636 	struct stmmac_metadata_request *meta_req = _priv;
2637 
2638 	if (meta_req->tbs & STMMAC_TBS_EN)
2639 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2640 				    ts.tv_nsec);
2641 }
2642 
2643 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2644 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2645 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2646 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2647 };
2648 
2649 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2650 {
2651 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2652 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2653 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2654 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2655 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2656 	unsigned int entry = tx_q->cur_tx;
2657 	struct dma_desc *tx_desc = NULL;
2658 	struct xdp_desc xdp_desc;
2659 	bool work_done = true;
2660 	u32 tx_set_ic_bit = 0;
2661 
2662 	/* Avoids TX time-out as we are sharing with slow path */
2663 	txq_trans_cond_update(nq);
2664 
2665 	budget = min(budget, stmmac_tx_avail(priv, queue));
2666 
2667 	for (; budget > 0; budget--) {
2668 		struct stmmac_metadata_request meta_req;
2669 		struct xsk_tx_metadata *meta = NULL;
2670 		dma_addr_t dma_addr;
2671 		bool set_ic;
2672 
2673 		/* We are sharing with slow path and stop XSK TX desc submission when
2674 		 * available TX ring is less than threshold.
2675 		 */
2676 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2677 		    !netif_carrier_ok(priv->dev)) {
2678 			work_done = false;
2679 			break;
2680 		}
2681 
2682 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2683 			break;
2684 
2685 		if (priv->est && priv->est->enable &&
2686 		    priv->est->max_sdu[queue] &&
2687 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2688 			priv->xstats.max_sdu_txq_drop[queue]++;
2689 			continue;
2690 		}
2691 
2692 		if (likely(priv->extend_desc))
2693 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2694 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2695 			tx_desc = &tx_q->dma_entx[entry].basic;
2696 		else
2697 			tx_desc = tx_q->dma_tx + entry;
2698 
2699 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2700 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2701 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2702 
2703 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2704 
2705 		/* To return XDP buffer to XSK pool, we simple call
2706 		 * xsk_tx_completed(), so we don't need to fill up
2707 		 * 'buf' and 'xdpf'.
2708 		 */
2709 		tx_q->tx_skbuff_dma[entry].buf = 0;
2710 		tx_q->xdpf[entry] = NULL;
2711 
2712 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2713 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2714 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2715 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2716 
2717 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2718 
2719 		tx_q->tx_count_frames++;
2720 
2721 		if (!priv->tx_coal_frames[queue])
2722 			set_ic = false;
2723 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2724 			set_ic = true;
2725 		else
2726 			set_ic = false;
2727 
2728 		meta_req.priv = priv;
2729 		meta_req.tx_desc = tx_desc;
2730 		meta_req.set_ic = &set_ic;
2731 		meta_req.tbs = tx_q->tbs;
2732 		meta_req.edesc = &tx_q->dma_entx[entry];
2733 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2734 					&meta_req);
2735 		if (set_ic) {
2736 			tx_q->tx_count_frames = 0;
2737 			stmmac_set_tx_ic(priv, tx_desc);
2738 			tx_set_ic_bit++;
2739 		}
2740 
2741 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2742 				       csum, priv->mode, true, true,
2743 				       xdp_desc.len);
2744 
2745 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2746 
2747 		xsk_tx_metadata_to_compl(meta,
2748 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2749 
2750 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2751 		entry = tx_q->cur_tx;
2752 	}
2753 	u64_stats_update_begin(&txq_stats->napi_syncp);
2754 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2755 	u64_stats_update_end(&txq_stats->napi_syncp);
2756 
2757 	if (tx_desc) {
2758 		stmmac_flush_tx_descriptors(priv, queue);
2759 		xsk_tx_release(pool);
2760 	}
2761 
2762 	/* Return true if all of the 3 conditions are met
2763 	 *  a) TX Budget is still available
2764 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2765 	 *     pending XSK TX for transmission)
2766 	 */
2767 	return !!budget && work_done;
2768 }
2769 
2770 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2771 {
2772 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2773 		tc += 64;
2774 
2775 		if (priv->plat->force_thresh_dma_mode)
2776 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2777 		else
2778 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2779 						      chan);
2780 
2781 		priv->xstats.threshold = tc;
2782 	}
2783 }
2784 
2785 /**
2786  * stmmac_tx_clean - to manage the transmission completion
2787  * @priv: driver private structure
2788  * @budget: napi budget limiting this functions packet handling
2789  * @queue: TX queue index
2790  * @pending_packets: signal to arm the TX coal timer
2791  * Description: it reclaims the transmit resources after transmission completes.
2792  * If some packets still needs to be handled, due to TX coalesce, set
2793  * pending_packets to true to make NAPI arm the TX coal timer.
2794  */
2795 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2796 			   bool *pending_packets)
2797 {
2798 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2799 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2800 	unsigned int bytes_compl = 0, pkts_compl = 0;
2801 	unsigned int entry, xmits = 0, count = 0;
2802 	u32 tx_packets = 0, tx_errors = 0;
2803 
2804 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2805 
2806 	tx_q->xsk_frames_done = 0;
2807 
2808 	entry = tx_q->dirty_tx;
2809 
2810 	/* Try to clean all TX complete frame in 1 shot */
2811 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2812 		struct xdp_frame *xdpf;
2813 		struct sk_buff *skb;
2814 		struct dma_desc *p;
2815 		int status;
2816 
2817 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2818 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2819 			xdpf = tx_q->xdpf[entry];
2820 			skb = NULL;
2821 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2822 			xdpf = NULL;
2823 			skb = tx_q->tx_skbuff[entry];
2824 		} else {
2825 			xdpf = NULL;
2826 			skb = NULL;
2827 		}
2828 
2829 		if (priv->extend_desc)
2830 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2831 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2832 			p = &tx_q->dma_entx[entry].basic;
2833 		else
2834 			p = tx_q->dma_tx + entry;
2835 
2836 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2837 		/* Check if the descriptor is owned by the DMA */
2838 		if (unlikely(status & tx_dma_own))
2839 			break;
2840 
2841 		count++;
2842 
2843 		/* Make sure descriptor fields are read after reading
2844 		 * the own bit.
2845 		 */
2846 		dma_rmb();
2847 
2848 		/* Just consider the last segment and ...*/
2849 		if (likely(!(status & tx_not_ls))) {
2850 			/* ... verify the status error condition */
2851 			if (unlikely(status & tx_err)) {
2852 				tx_errors++;
2853 				if (unlikely(status & tx_err_bump_tc))
2854 					stmmac_bump_dma_threshold(priv, queue);
2855 			} else {
2856 				tx_packets++;
2857 			}
2858 			if (skb) {
2859 				stmmac_get_tx_hwtstamp(priv, p, skb);
2860 			} else if (tx_q->xsk_pool &&
2861 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2862 				struct stmmac_xsk_tx_complete tx_compl = {
2863 					.priv = priv,
2864 					.desc = p,
2865 				};
2866 
2867 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2868 							 &stmmac_xsk_tx_metadata_ops,
2869 							 &tx_compl);
2870 			}
2871 		}
2872 
2873 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2874 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2875 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2876 				dma_unmap_page(priv->device,
2877 					       tx_q->tx_skbuff_dma[entry].buf,
2878 					       tx_q->tx_skbuff_dma[entry].len,
2879 					       DMA_TO_DEVICE);
2880 			else
2881 				dma_unmap_single(priv->device,
2882 						 tx_q->tx_skbuff_dma[entry].buf,
2883 						 tx_q->tx_skbuff_dma[entry].len,
2884 						 DMA_TO_DEVICE);
2885 			tx_q->tx_skbuff_dma[entry].buf = 0;
2886 			tx_q->tx_skbuff_dma[entry].len = 0;
2887 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2888 		}
2889 
2890 		stmmac_clean_desc3(priv, tx_q, p);
2891 
2892 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2893 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2894 
2895 		if (xdpf &&
2896 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2897 			xdp_return_frame_rx_napi(xdpf);
2898 			tx_q->xdpf[entry] = NULL;
2899 		}
2900 
2901 		if (xdpf &&
2902 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2903 			xdp_return_frame(xdpf);
2904 			tx_q->xdpf[entry] = NULL;
2905 		}
2906 
2907 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2908 			tx_q->xsk_frames_done++;
2909 
2910 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2911 			if (likely(skb)) {
2912 				pkts_compl++;
2913 				bytes_compl += skb->len;
2914 				dev_consume_skb_any(skb);
2915 				tx_q->tx_skbuff[entry] = NULL;
2916 			}
2917 		}
2918 
2919 		stmmac_release_tx_desc(priv, p, priv->mode);
2920 
2921 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2922 	}
2923 	tx_q->dirty_tx = entry;
2924 
2925 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2926 				  pkts_compl, bytes_compl);
2927 
2928 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2929 								queue))) &&
2930 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2931 
2932 		netif_dbg(priv, tx_done, priv->dev,
2933 			  "%s: restart transmit\n", __func__);
2934 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2935 	}
2936 
2937 	if (tx_q->xsk_pool) {
2938 		bool work_done;
2939 
2940 		if (tx_q->xsk_frames_done)
2941 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2942 
2943 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2944 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2945 
2946 		/* For XSK TX, we try to send as many as possible.
2947 		 * If XSK work done (XSK TX desc empty and budget still
2948 		 * available), return "budget - 1" to reenable TX IRQ.
2949 		 * Else, return "budget" to make NAPI continue polling.
2950 		 */
2951 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2952 					       STMMAC_XSK_TX_BUDGET_MAX);
2953 		if (work_done)
2954 			xmits = budget - 1;
2955 		else
2956 			xmits = budget;
2957 	}
2958 
2959 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2960 		stmmac_restart_sw_lpi_timer(priv);
2961 
2962 	/* We still have pending packets, let's call for a new scheduling */
2963 	if (tx_q->dirty_tx != tx_q->cur_tx)
2964 		*pending_packets = true;
2965 
2966 	u64_stats_update_begin(&txq_stats->napi_syncp);
2967 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2968 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2969 	u64_stats_inc(&txq_stats->napi.tx_clean);
2970 	u64_stats_update_end(&txq_stats->napi_syncp);
2971 
2972 	priv->xstats.tx_errors += tx_errors;
2973 
2974 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2975 
2976 	/* Combine decisions from TX clean and XSK TX */
2977 	return max(count, xmits);
2978 }
2979 
2980 /**
2981  * stmmac_tx_err - to manage the tx error
2982  * @priv: driver private structure
2983  * @chan: channel index
2984  * Description: it cleans the descriptors and restarts the transmission
2985  * in case of transmission errors.
2986  */
2987 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2988 {
2989 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2990 
2991 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2992 
2993 	stmmac_stop_tx_dma(priv, chan);
2994 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2995 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2996 	stmmac_reset_tx_queue(priv, chan);
2997 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2998 			    tx_q->dma_tx_phy, chan);
2999 	stmmac_start_tx_dma(priv, chan);
3000 
3001 	priv->xstats.tx_errors++;
3002 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
3003 }
3004 
3005 /**
3006  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
3007  *  @priv: driver private structure
3008  *  @txmode: TX operating mode
3009  *  @rxmode: RX operating mode
3010  *  @chan: channel index
3011  *  Description: it is used for configuring of the DMA operation mode in
3012  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
3013  *  mode.
3014  */
3015 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
3016 					  u32 rxmode, u32 chan)
3017 {
3018 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
3019 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
3020 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3021 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3022 	int rxfifosz = priv->plat->rx_fifo_size;
3023 	int txfifosz = priv->plat->tx_fifo_size;
3024 
3025 	if (rxfifosz == 0)
3026 		rxfifosz = priv->dma_cap.rx_fifo_size;
3027 	if (txfifosz == 0)
3028 		txfifosz = priv->dma_cap.tx_fifo_size;
3029 
3030 	/* Adjust for real per queue fifo size */
3031 	rxfifosz /= rx_channels_count;
3032 	txfifosz /= tx_channels_count;
3033 
3034 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
3035 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
3036 }
3037 
3038 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
3039 {
3040 	int ret;
3041 
3042 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
3043 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
3044 	if (ret && (ret != -EINVAL)) {
3045 		stmmac_global_err(priv);
3046 		return true;
3047 	}
3048 
3049 	return false;
3050 }
3051 
3052 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
3053 {
3054 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3055 						 &priv->xstats, chan, dir);
3056 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
3057 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3058 	struct stmmac_channel *ch = &priv->channel[chan];
3059 	struct napi_struct *rx_napi;
3060 	struct napi_struct *tx_napi;
3061 	unsigned long flags;
3062 
3063 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
3064 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3065 
3066 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3067 		if (napi_schedule_prep(rx_napi)) {
3068 			spin_lock_irqsave(&ch->lock, flags);
3069 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3070 			spin_unlock_irqrestore(&ch->lock, flags);
3071 			__napi_schedule(rx_napi);
3072 		}
3073 	}
3074 
3075 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3076 		if (napi_schedule_prep(tx_napi)) {
3077 			spin_lock_irqsave(&ch->lock, flags);
3078 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3079 			spin_unlock_irqrestore(&ch->lock, flags);
3080 			__napi_schedule(tx_napi);
3081 		}
3082 	}
3083 
3084 	return status;
3085 }
3086 
3087 /**
3088  * stmmac_dma_interrupt - DMA ISR
3089  * @priv: driver private structure
3090  * Description: this is the DMA ISR. It is called by the main ISR.
3091  * It calls the dwmac dma routine and schedule poll method in case of some
3092  * work can be done.
3093  */
3094 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3095 {
3096 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3097 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3098 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
3099 				tx_channel_count : rx_channel_count;
3100 	u32 chan;
3101 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3102 
3103 	/* Make sure we never check beyond our status buffer. */
3104 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3105 		channels_to_check = ARRAY_SIZE(status);
3106 
3107 	for (chan = 0; chan < channels_to_check; chan++)
3108 		status[chan] = stmmac_napi_check(priv, chan,
3109 						 DMA_DIR_RXTX);
3110 
3111 	for (chan = 0; chan < tx_channel_count; chan++) {
3112 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3113 			/* Try to bump up the dma threshold on this failure */
3114 			stmmac_bump_dma_threshold(priv, chan);
3115 		} else if (unlikely(status[chan] == tx_hard_error)) {
3116 			stmmac_tx_err(priv, chan);
3117 		}
3118 	}
3119 }
3120 
3121 /**
3122  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3123  * @priv: driver private structure
3124  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3125  */
3126 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3127 {
3128 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3129 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3130 
3131 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3132 
3133 	if (priv->dma_cap.rmon) {
3134 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3135 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3136 	} else
3137 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3138 }
3139 
3140 /**
3141  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3142  * @priv: driver private structure
3143  * Description:
3144  *  new GMAC chip generations have a new register to indicate the
3145  *  presence of the optional feature/functions.
3146  *  This can be also used to override the value passed through the
3147  *  platform and necessary for old MAC10/100 and GMAC chips.
3148  */
3149 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3150 {
3151 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3152 }
3153 
3154 /**
3155  * stmmac_check_ether_addr - check if the MAC addr is valid
3156  * @priv: driver private structure
3157  * Description:
3158  * it is to verify if the MAC address is valid, in case of failures it
3159  * generates a random MAC address
3160  */
3161 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3162 {
3163 	u8 addr[ETH_ALEN];
3164 
3165 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3166 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3167 		if (is_valid_ether_addr(addr))
3168 			eth_hw_addr_set(priv->dev, addr);
3169 		else
3170 			eth_hw_addr_random(priv->dev);
3171 		dev_info(priv->device, "device MAC address %pM\n",
3172 			 priv->dev->dev_addr);
3173 	}
3174 }
3175 
3176 int stmmac_get_phy_intf_sel(phy_interface_t interface)
3177 {
3178 	int phy_intf_sel = -EINVAL;
3179 
3180 	if (interface == PHY_INTERFACE_MODE_MII ||
3181 	    interface == PHY_INTERFACE_MODE_GMII)
3182 		phy_intf_sel = PHY_INTF_SEL_GMII_MII;
3183 	else if (phy_interface_mode_is_rgmii(interface))
3184 		phy_intf_sel = PHY_INTF_SEL_RGMII;
3185 	else if (interface == PHY_INTERFACE_MODE_RMII)
3186 		phy_intf_sel = PHY_INTF_SEL_RMII;
3187 	else if (interface == PHY_INTERFACE_MODE_REVMII)
3188 		phy_intf_sel = PHY_INTF_SEL_REVMII;
3189 
3190 	return phy_intf_sel;
3191 }
3192 EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
3193 
3194 static int stmmac_prereset_configure(struct stmmac_priv *priv)
3195 {
3196 	struct plat_stmmacenet_data *plat_dat = priv->plat;
3197 	phy_interface_t interface;
3198 	struct phylink_pcs *pcs;
3199 	int phy_intf_sel, ret;
3200 
3201 	if (!plat_dat->set_phy_intf_sel)
3202 		return 0;
3203 
3204 	interface = plat_dat->phy_interface;
3205 
3206 	/* Check whether this mode uses a PCS */
3207 	pcs = stmmac_mac_select_pcs(&priv->phylink_config, interface);
3208 	if (priv->integrated_pcs && pcs == &priv->integrated_pcs->pcs) {
3209 		/* Request the phy_intf_sel from the integrated PCS */
3210 		phy_intf_sel = stmmac_integrated_pcs_get_phy_intf_sel(pcs,
3211 								    interface);
3212 	} else {
3213 		phy_intf_sel = stmmac_get_phy_intf_sel(interface);
3214 	}
3215 
3216 	if (phy_intf_sel < 0) {
3217 		netdev_err(priv->dev,
3218 			   "failed to get phy_intf_sel for %s: %pe\n",
3219 			   phy_modes(interface), ERR_PTR(phy_intf_sel));
3220 		return phy_intf_sel;
3221 	}
3222 
3223 	ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
3224 	if (ret == -EINVAL)
3225 		netdev_err(priv->dev, "platform does not support %s\n",
3226 			   phy_modes(interface));
3227 	else if (ret < 0)
3228 		netdev_err(priv->dev,
3229 			   "platform failed to set interface %s: %pe\n",
3230 			   phy_modes(interface), ERR_PTR(ret));
3231 
3232 	return ret;
3233 }
3234 
3235 /**
3236  * stmmac_init_dma_engine - DMA init.
3237  * @priv: driver private structure
3238  * Description:
3239  * It inits the DMA invoking the specific MAC/GMAC callback.
3240  * Some DMA parameters can be passed from the platform;
3241  * in case of these are not passed a default is kept for the MAC or GMAC.
3242  */
3243 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3244 {
3245 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3246 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3247 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3248 	struct stmmac_rx_queue *rx_q;
3249 	struct stmmac_tx_queue *tx_q;
3250 	u32 chan = 0;
3251 	int ret = 0;
3252 
3253 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3254 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3255 		return -EINVAL;
3256 	}
3257 
3258 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3259 		priv->plat->dma_cfg->atds = 1;
3260 
3261 	ret = stmmac_prereset_configure(priv);
3262 	if (ret)
3263 		return ret;
3264 
3265 	ret = stmmac_reset(priv);
3266 	if (ret) {
3267 		netdev_err(priv->dev, "Failed to reset the dma\n");
3268 		return ret;
3269 	}
3270 
3271 	/* DMA Configuration */
3272 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3273 
3274 	if (priv->plat->axi)
3275 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3276 
3277 	/* DMA CSR Channel configuration */
3278 	for (chan = 0; chan < dma_csr_ch; chan++) {
3279 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3280 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3281 	}
3282 
3283 	/* DMA RX Channel Configuration */
3284 	for (chan = 0; chan < rx_channels_count; chan++) {
3285 		rx_q = &priv->dma_conf.rx_queue[chan];
3286 
3287 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3288 				    rx_q->dma_rx_phy, chan);
3289 
3290 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3291 				     (rx_q->buf_alloc_num *
3292 				      sizeof(struct dma_desc));
3293 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3294 				       rx_q->rx_tail_addr, chan);
3295 	}
3296 
3297 	/* DMA TX Channel Configuration */
3298 	for (chan = 0; chan < tx_channels_count; chan++) {
3299 		tx_q = &priv->dma_conf.tx_queue[chan];
3300 
3301 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3302 				    tx_q->dma_tx_phy, chan);
3303 
3304 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3305 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3306 				       tx_q->tx_tail_addr, chan);
3307 	}
3308 
3309 	return ret;
3310 }
3311 
3312 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3313 {
3314 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3315 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3316 	struct stmmac_channel *ch;
3317 	struct napi_struct *napi;
3318 
3319 	if (!tx_coal_timer)
3320 		return;
3321 
3322 	ch = &priv->channel[tx_q->queue_index];
3323 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3324 
3325 	/* Arm timer only if napi is not already scheduled.
3326 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3327 	 * again in the next scheduled napi.
3328 	 */
3329 	if (unlikely(!napi_is_scheduled(napi)))
3330 		hrtimer_start(&tx_q->txtimer,
3331 			      STMMAC_COAL_TIMER(tx_coal_timer),
3332 			      HRTIMER_MODE_REL);
3333 	else
3334 		hrtimer_try_to_cancel(&tx_q->txtimer);
3335 }
3336 
3337 /**
3338  * stmmac_tx_timer - mitigation sw timer for tx.
3339  * @t: data pointer
3340  * Description:
3341  * This is the timer handler to directly invoke the stmmac_tx_clean.
3342  */
3343 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3344 {
3345 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3346 	struct stmmac_priv *priv = tx_q->priv_data;
3347 	struct stmmac_channel *ch;
3348 	struct napi_struct *napi;
3349 
3350 	ch = &priv->channel[tx_q->queue_index];
3351 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3352 
3353 	if (likely(napi_schedule_prep(napi))) {
3354 		unsigned long flags;
3355 
3356 		spin_lock_irqsave(&ch->lock, flags);
3357 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3358 		spin_unlock_irqrestore(&ch->lock, flags);
3359 		__napi_schedule(napi);
3360 	}
3361 
3362 	return HRTIMER_NORESTART;
3363 }
3364 
3365 /**
3366  * stmmac_init_coalesce - init mitigation options.
3367  * @priv: driver private structure
3368  * Description:
3369  * This inits the coalesce parameters: i.e. timer rate,
3370  * timer handler and default threshold used for enabling the
3371  * interrupt on completion bit.
3372  */
3373 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3374 {
3375 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3376 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3377 	u32 chan;
3378 
3379 	for (chan = 0; chan < tx_channel_count; chan++) {
3380 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3381 
3382 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3383 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3384 
3385 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3386 	}
3387 
3388 	for (chan = 0; chan < rx_channel_count; chan++)
3389 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3390 }
3391 
3392 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3393 {
3394 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3395 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3396 	u32 chan;
3397 
3398 	/* set TX ring length */
3399 	for (chan = 0; chan < tx_channels_count; chan++)
3400 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3401 				       (priv->dma_conf.dma_tx_size - 1), chan);
3402 
3403 	/* set RX ring length */
3404 	for (chan = 0; chan < rx_channels_count; chan++)
3405 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3406 				       (priv->dma_conf.dma_rx_size - 1), chan);
3407 }
3408 
3409 /**
3410  *  stmmac_set_tx_queue_weight - Set TX queue weight
3411  *  @priv: driver private structure
3412  *  Description: It is used for setting TX queues weight
3413  */
3414 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3415 {
3416 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3417 	u32 weight;
3418 	u32 queue;
3419 
3420 	for (queue = 0; queue < tx_queues_count; queue++) {
3421 		weight = priv->plat->tx_queues_cfg[queue].weight;
3422 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3423 	}
3424 }
3425 
3426 /**
3427  *  stmmac_configure_cbs - Configure CBS in TX queue
3428  *  @priv: driver private structure
3429  *  Description: It is used for configuring CBS in AVB TX queues
3430  */
3431 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3432 {
3433 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3434 	u32 mode_to_use;
3435 	u32 queue;
3436 
3437 	/* queue 0 is reserved for legacy traffic */
3438 	for (queue = 1; queue < tx_queues_count; queue++) {
3439 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3440 		if (mode_to_use == MTL_QUEUE_DCB)
3441 			continue;
3442 
3443 		stmmac_config_cbs(priv, priv->hw,
3444 				priv->plat->tx_queues_cfg[queue].send_slope,
3445 				priv->plat->tx_queues_cfg[queue].idle_slope,
3446 				priv->plat->tx_queues_cfg[queue].high_credit,
3447 				priv->plat->tx_queues_cfg[queue].low_credit,
3448 				queue);
3449 	}
3450 }
3451 
3452 /**
3453  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3454  *  @priv: driver private structure
3455  *  Description: It is used for mapping RX queues to RX dma channels
3456  */
3457 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3458 {
3459 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3460 	u32 queue;
3461 	u32 chan;
3462 
3463 	for (queue = 0; queue < rx_queues_count; queue++) {
3464 		chan = priv->plat->rx_queues_cfg[queue].chan;
3465 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3466 	}
3467 }
3468 
3469 /**
3470  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3471  *  @priv: driver private structure
3472  *  Description: It is used for configuring the RX Queue Priority
3473  */
3474 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3475 {
3476 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3477 	u32 queue;
3478 	u32 prio;
3479 
3480 	for (queue = 0; queue < rx_queues_count; queue++) {
3481 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3482 			continue;
3483 
3484 		prio = priv->plat->rx_queues_cfg[queue].prio;
3485 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3486 	}
3487 }
3488 
3489 /**
3490  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3491  *  @priv: driver private structure
3492  *  Description: It is used for configuring the TX Queue Priority
3493  */
3494 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3495 {
3496 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3497 	u32 queue;
3498 	u32 prio;
3499 
3500 	for (queue = 0; queue < tx_queues_count; queue++) {
3501 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3502 			continue;
3503 
3504 		prio = priv->plat->tx_queues_cfg[queue].prio;
3505 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3506 	}
3507 }
3508 
3509 /**
3510  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3511  *  @priv: driver private structure
3512  *  Description: It is used for configuring the RX queue routing
3513  */
3514 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3515 {
3516 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3517 	u32 queue;
3518 	u8 packet;
3519 
3520 	for (queue = 0; queue < rx_queues_count; queue++) {
3521 		/* no specific packet type routing specified for the queue */
3522 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3523 			continue;
3524 
3525 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3526 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3527 	}
3528 }
3529 
3530 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3531 {
3532 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3533 		priv->rss.enable = false;
3534 		return;
3535 	}
3536 
3537 	if (priv->dev->features & NETIF_F_RXHASH)
3538 		priv->rss.enable = true;
3539 	else
3540 		priv->rss.enable = false;
3541 
3542 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3543 			     priv->plat->rx_queues_to_use);
3544 }
3545 
3546 /**
3547  *  stmmac_mtl_configuration - Configure MTL
3548  *  @priv: driver private structure
3549  *  Description: It is used for configuring MTL
3550  */
3551 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3552 {
3553 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3554 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3555 
3556 	if (tx_queues_count > 1)
3557 		stmmac_set_tx_queue_weight(priv);
3558 
3559 	/* Configure MTL RX algorithms */
3560 	if (rx_queues_count > 1)
3561 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3562 				priv->plat->rx_sched_algorithm);
3563 
3564 	/* Configure MTL TX algorithms */
3565 	if (tx_queues_count > 1)
3566 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3567 				priv->plat->tx_sched_algorithm);
3568 
3569 	/* Configure CBS in AVB TX queues */
3570 	if (tx_queues_count > 1)
3571 		stmmac_configure_cbs(priv);
3572 
3573 	/* Map RX MTL to DMA channels */
3574 	stmmac_rx_queue_dma_chan_map(priv);
3575 
3576 	/* Enable MAC RX Queues */
3577 	stmmac_mac_enable_rx_queues(priv);
3578 
3579 	/* Set RX priorities */
3580 	if (rx_queues_count > 1)
3581 		stmmac_mac_config_rx_queues_prio(priv);
3582 
3583 	/* Set TX priorities */
3584 	if (tx_queues_count > 1)
3585 		stmmac_mac_config_tx_queues_prio(priv);
3586 
3587 	/* Set RX routing */
3588 	if (rx_queues_count > 1)
3589 		stmmac_mac_config_rx_queues_routing(priv);
3590 
3591 	/* Receive Side Scaling */
3592 	if (rx_queues_count > 1)
3593 		stmmac_mac_config_rss(priv);
3594 }
3595 
3596 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3597 {
3598 	if (priv->dma_cap.asp) {
3599 		netdev_info(priv->dev, "Enabling Safety Features\n");
3600 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3601 					  priv->plat->safety_feat_cfg);
3602 	} else {
3603 		netdev_info(priv->dev, "No Safety Features support found\n");
3604 	}
3605 }
3606 
3607 /**
3608  * stmmac_hw_setup - setup mac in a usable state.
3609  *  @dev : pointer to the device structure.
3610  *  Description:
3611  *  this is the main function to setup the HW in a usable state because the
3612  *  dma engine is reset, the core registers are configured (e.g. AXI,
3613  *  Checksum features, timers). The DMA is ready to start receiving and
3614  *  transmitting.
3615  *  Return value:
3616  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3617  *  file on failure.
3618  */
3619 static int stmmac_hw_setup(struct net_device *dev)
3620 {
3621 	struct stmmac_priv *priv = netdev_priv(dev);
3622 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3623 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3624 	bool sph_en;
3625 	u32 chan;
3626 	int ret;
3627 
3628 	/* Make sure RX clock is enabled */
3629 	if (priv->hw->phylink_pcs)
3630 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3631 
3632 	/* Note that clk_rx_i must be running for reset to complete. This
3633 	 * clock may also be required when setting the MAC address.
3634 	 *
3635 	 * Block the receive clock stop for LPI mode at the PHY in case
3636 	 * the link is established with EEE mode active.
3637 	 */
3638 	phylink_rx_clk_stop_block(priv->phylink);
3639 
3640 	/* DMA initialization and SW reset */
3641 	ret = stmmac_init_dma_engine(priv);
3642 	if (ret < 0) {
3643 		phylink_rx_clk_stop_unblock(priv->phylink);
3644 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3645 			   __func__);
3646 		return ret;
3647 	}
3648 
3649 	/* Copy the MAC addr into the HW  */
3650 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3651 	phylink_rx_clk_stop_unblock(priv->phylink);
3652 
3653 	/* Initialize the MAC Core */
3654 	stmmac_core_init(priv, priv->hw, dev);
3655 
3656 	/* Initialize MTL*/
3657 	stmmac_mtl_configuration(priv);
3658 
3659 	/* Initialize Safety Features */
3660 	stmmac_safety_feat_configuration(priv);
3661 
3662 	ret = stmmac_rx_ipc(priv, priv->hw);
3663 	if (!ret) {
3664 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3665 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3666 		priv->hw->rx_csum = 0;
3667 	}
3668 
3669 	/* Enable the MAC Rx/Tx */
3670 	stmmac_mac_set(priv, priv->ioaddr, true);
3671 
3672 	/* Set the HW DMA mode and the COE */
3673 	stmmac_dma_operation_mode(priv);
3674 
3675 	stmmac_mmc_setup(priv);
3676 
3677 	if (priv->use_riwt) {
3678 		u32 queue;
3679 
3680 		for (queue = 0; queue < rx_cnt; queue++) {
3681 			if (!priv->rx_riwt[queue])
3682 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3683 
3684 			stmmac_rx_watchdog(priv, priv->ioaddr,
3685 					   priv->rx_riwt[queue], queue);
3686 		}
3687 	}
3688 
3689 	/* set TX and RX rings length */
3690 	stmmac_set_rings_length(priv);
3691 
3692 	/* Enable TSO */
3693 	if (priv->tso) {
3694 		for (chan = 0; chan < tx_cnt; chan++) {
3695 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3696 
3697 			/* TSO and TBS cannot co-exist */
3698 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3699 				continue;
3700 
3701 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3702 		}
3703 	}
3704 
3705 	/* Enable Split Header */
3706 	sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
3707 	for (chan = 0; chan < rx_cnt; chan++)
3708 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3709 
3710 
3711 	/* VLAN Tag Insertion */
3712 	if (priv->dma_cap.vlins)
3713 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3714 
3715 	/* TBS */
3716 	for (chan = 0; chan < tx_cnt; chan++) {
3717 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3718 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3719 
3720 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3721 	}
3722 
3723 	/* Configure real RX and TX queues */
3724 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3725 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3726 
3727 	/* Start the ball rolling... */
3728 	stmmac_start_all_dma(priv);
3729 
3730 	phylink_rx_clk_stop_block(priv->phylink);
3731 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3732 	phylink_rx_clk_stop_unblock(priv->phylink);
3733 
3734 	return 0;
3735 }
3736 
3737 static void stmmac_free_irq(struct net_device *dev,
3738 			    enum request_irq_err irq_err, int irq_idx)
3739 {
3740 	struct stmmac_priv *priv = netdev_priv(dev);
3741 	int j;
3742 
3743 	switch (irq_err) {
3744 	case REQ_IRQ_ERR_ALL:
3745 		irq_idx = priv->plat->tx_queues_to_use;
3746 		fallthrough;
3747 	case REQ_IRQ_ERR_TX:
3748 		for (j = irq_idx - 1; j >= 0; j--) {
3749 			if (priv->tx_irq[j] > 0) {
3750 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3751 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3752 			}
3753 		}
3754 		irq_idx = priv->plat->rx_queues_to_use;
3755 		fallthrough;
3756 	case REQ_IRQ_ERR_RX:
3757 		for (j = irq_idx - 1; j >= 0; j--) {
3758 			if (priv->rx_irq[j] > 0) {
3759 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3760 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3761 			}
3762 		}
3763 
3764 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3765 			free_irq(priv->sfty_ue_irq, dev);
3766 		fallthrough;
3767 	case REQ_IRQ_ERR_SFTY_UE:
3768 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3769 			free_irq(priv->sfty_ce_irq, dev);
3770 		fallthrough;
3771 	case REQ_IRQ_ERR_SFTY_CE:
3772 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3773 			free_irq(priv->wol_irq, dev);
3774 		fallthrough;
3775 	case REQ_IRQ_ERR_SFTY:
3776 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3777 			free_irq(priv->sfty_irq, dev);
3778 		fallthrough;
3779 	case REQ_IRQ_ERR_WOL:
3780 		free_irq(dev->irq, dev);
3781 		fallthrough;
3782 	case REQ_IRQ_ERR_MAC:
3783 	case REQ_IRQ_ERR_NO:
3784 		/* If MAC IRQ request error, no more IRQ to free */
3785 		break;
3786 	}
3787 }
3788 
3789 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3790 {
3791 	struct stmmac_priv *priv = netdev_priv(dev);
3792 	enum request_irq_err irq_err;
3793 	int irq_idx = 0;
3794 	char *int_name;
3795 	int ret;
3796 	int i;
3797 
3798 	/* For common interrupt */
3799 	int_name = priv->int_name_mac;
3800 	sprintf(int_name, "%s:%s", dev->name, "mac");
3801 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3802 			  0, int_name, dev);
3803 	if (unlikely(ret < 0)) {
3804 		netdev_err(priv->dev,
3805 			   "%s: alloc mac MSI %d (error: %d)\n",
3806 			   __func__, dev->irq, ret);
3807 		irq_err = REQ_IRQ_ERR_MAC;
3808 		goto irq_error;
3809 	}
3810 
3811 	/* Request the Wake IRQ in case of another line
3812 	 * is used for WoL
3813 	 */
3814 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3815 		int_name = priv->int_name_wol;
3816 		sprintf(int_name, "%s:%s", dev->name, "wol");
3817 		ret = request_irq(priv->wol_irq,
3818 				  stmmac_mac_interrupt,
3819 				  0, int_name, dev);
3820 		if (unlikely(ret < 0)) {
3821 			netdev_err(priv->dev,
3822 				   "%s: alloc wol MSI %d (error: %d)\n",
3823 				   __func__, priv->wol_irq, ret);
3824 			irq_err = REQ_IRQ_ERR_WOL;
3825 			goto irq_error;
3826 		}
3827 	}
3828 
3829 	/* Request the common Safety Feature Correctible/Uncorrectible
3830 	 * Error line in case of another line is used
3831 	 */
3832 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3833 		int_name = priv->int_name_sfty;
3834 		sprintf(int_name, "%s:%s", dev->name, "safety");
3835 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3836 				  0, int_name, dev);
3837 		if (unlikely(ret < 0)) {
3838 			netdev_err(priv->dev,
3839 				   "%s: alloc sfty MSI %d (error: %d)\n",
3840 				   __func__, priv->sfty_irq, ret);
3841 			irq_err = REQ_IRQ_ERR_SFTY;
3842 			goto irq_error;
3843 		}
3844 	}
3845 
3846 	/* Request the Safety Feature Correctible Error line in
3847 	 * case of another line is used
3848 	 */
3849 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3850 		int_name = priv->int_name_sfty_ce;
3851 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3852 		ret = request_irq(priv->sfty_ce_irq,
3853 				  stmmac_safety_interrupt,
3854 				  0, int_name, dev);
3855 		if (unlikely(ret < 0)) {
3856 			netdev_err(priv->dev,
3857 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3858 				   __func__, priv->sfty_ce_irq, ret);
3859 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3860 			goto irq_error;
3861 		}
3862 	}
3863 
3864 	/* Request the Safety Feature Uncorrectible Error line in
3865 	 * case of another line is used
3866 	 */
3867 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3868 		int_name = priv->int_name_sfty_ue;
3869 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3870 		ret = request_irq(priv->sfty_ue_irq,
3871 				  stmmac_safety_interrupt,
3872 				  0, int_name, dev);
3873 		if (unlikely(ret < 0)) {
3874 			netdev_err(priv->dev,
3875 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3876 				   __func__, priv->sfty_ue_irq, ret);
3877 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3878 			goto irq_error;
3879 		}
3880 	}
3881 
3882 	/* Request Rx MSI irq */
3883 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3884 		if (i >= MTL_MAX_RX_QUEUES)
3885 			break;
3886 		if (priv->rx_irq[i] == 0)
3887 			continue;
3888 
3889 		int_name = priv->int_name_rx_irq[i];
3890 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3891 		ret = request_irq(priv->rx_irq[i],
3892 				  stmmac_msi_intr_rx,
3893 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3894 		if (unlikely(ret < 0)) {
3895 			netdev_err(priv->dev,
3896 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3897 				   __func__, i, priv->rx_irq[i], ret);
3898 			irq_err = REQ_IRQ_ERR_RX;
3899 			irq_idx = i;
3900 			goto irq_error;
3901 		}
3902 		irq_set_affinity_hint(priv->rx_irq[i],
3903 				      cpumask_of(i % num_online_cpus()));
3904 	}
3905 
3906 	/* Request Tx MSI irq */
3907 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3908 		if (i >= MTL_MAX_TX_QUEUES)
3909 			break;
3910 		if (priv->tx_irq[i] == 0)
3911 			continue;
3912 
3913 		int_name = priv->int_name_tx_irq[i];
3914 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3915 		ret = request_irq(priv->tx_irq[i],
3916 				  stmmac_msi_intr_tx,
3917 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3918 		if (unlikely(ret < 0)) {
3919 			netdev_err(priv->dev,
3920 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3921 				   __func__, i, priv->tx_irq[i], ret);
3922 			irq_err = REQ_IRQ_ERR_TX;
3923 			irq_idx = i;
3924 			goto irq_error;
3925 		}
3926 		irq_set_affinity_hint(priv->tx_irq[i],
3927 				      cpumask_of(i % num_online_cpus()));
3928 	}
3929 
3930 	return 0;
3931 
3932 irq_error:
3933 	stmmac_free_irq(dev, irq_err, irq_idx);
3934 	return ret;
3935 }
3936 
3937 static int stmmac_request_irq_single(struct net_device *dev)
3938 {
3939 	struct stmmac_priv *priv = netdev_priv(dev);
3940 	enum request_irq_err irq_err;
3941 	int ret;
3942 
3943 	ret = request_irq(dev->irq, stmmac_interrupt,
3944 			  IRQF_SHARED, dev->name, dev);
3945 	if (unlikely(ret < 0)) {
3946 		netdev_err(priv->dev,
3947 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3948 			   __func__, dev->irq, ret);
3949 		irq_err = REQ_IRQ_ERR_MAC;
3950 		goto irq_error;
3951 	}
3952 
3953 	/* Request the Wake IRQ in case of another line
3954 	 * is used for WoL
3955 	 */
3956 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3957 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3958 				  IRQF_SHARED, dev->name, dev);
3959 		if (unlikely(ret < 0)) {
3960 			netdev_err(priv->dev,
3961 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3962 				   __func__, priv->wol_irq, ret);
3963 			irq_err = REQ_IRQ_ERR_WOL;
3964 			goto irq_error;
3965 		}
3966 	}
3967 
3968 	/* Request the common Safety Feature Correctible/Uncorrectible
3969 	 * Error line in case of another line is used
3970 	 */
3971 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3972 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3973 				  IRQF_SHARED, dev->name, dev);
3974 		if (unlikely(ret < 0)) {
3975 			netdev_err(priv->dev,
3976 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3977 				   __func__, priv->sfty_irq, ret);
3978 			irq_err = REQ_IRQ_ERR_SFTY;
3979 			goto irq_error;
3980 		}
3981 	}
3982 
3983 	return 0;
3984 
3985 irq_error:
3986 	stmmac_free_irq(dev, irq_err, 0);
3987 	return ret;
3988 }
3989 
3990 static int stmmac_request_irq(struct net_device *dev)
3991 {
3992 	struct stmmac_priv *priv = netdev_priv(dev);
3993 	int ret;
3994 
3995 	/* Request the IRQ lines */
3996 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3997 		ret = stmmac_request_irq_multi_msi(dev);
3998 	else
3999 		ret = stmmac_request_irq_single(dev);
4000 
4001 	return ret;
4002 }
4003 
4004 /**
4005  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
4006  *  @priv: driver private structure
4007  *  @mtu: MTU to setup the dma queue and buf with
4008  *  Description: Allocate and generate a dma_conf based on the provided MTU.
4009  *  Allocate the Tx/Rx DMA queue and init them.
4010  *  Return value:
4011  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
4012  */
4013 static struct stmmac_dma_conf *
4014 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
4015 {
4016 	struct stmmac_dma_conf *dma_conf;
4017 	int chan, bfsize, ret;
4018 
4019 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
4020 	if (!dma_conf) {
4021 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
4022 			   __func__);
4023 		return ERR_PTR(-ENOMEM);
4024 	}
4025 
4026 	/* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
4027 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
4028 	if (bfsize < 0)
4029 		bfsize = 0;
4030 
4031 	if (bfsize < BUF_SIZE_16KiB)
4032 		bfsize = stmmac_set_bfsize(mtu);
4033 
4034 	dma_conf->dma_buf_sz = bfsize;
4035 	/* Chose the tx/rx size from the already defined one in the
4036 	 * priv struct. (if defined)
4037 	 */
4038 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
4039 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
4040 
4041 	if (!dma_conf->dma_tx_size)
4042 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
4043 	if (!dma_conf->dma_rx_size)
4044 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
4045 
4046 	/* Earlier check for TBS */
4047 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
4048 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
4049 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
4050 
4051 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
4052 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
4053 	}
4054 
4055 	ret = alloc_dma_desc_resources(priv, dma_conf);
4056 	if (ret < 0) {
4057 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4058 			   __func__);
4059 		goto alloc_error;
4060 	}
4061 
4062 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4063 	if (ret < 0) {
4064 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4065 			   __func__);
4066 		goto init_error;
4067 	}
4068 
4069 	return dma_conf;
4070 
4071 init_error:
4072 	free_dma_desc_resources(priv, dma_conf);
4073 alloc_error:
4074 	kfree(dma_conf);
4075 	return ERR_PTR(ret);
4076 }
4077 
4078 /**
4079  *  __stmmac_open - open entry point of the driver
4080  *  @dev : pointer to the device structure.
4081  *  @dma_conf :  structure to take the dma data
4082  *  Description:
4083  *  This function is the open entry point of the driver.
4084  *  Return value:
4085  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4086  *  file on failure.
4087  */
4088 static int __stmmac_open(struct net_device *dev,
4089 			 struct stmmac_dma_conf *dma_conf)
4090 {
4091 	struct stmmac_priv *priv = netdev_priv(dev);
4092 	u32 chan;
4093 	int ret;
4094 
4095 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4096 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4097 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4098 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4099 
4100 	stmmac_reset_queues_param(priv);
4101 
4102 	ret = stmmac_hw_setup(dev);
4103 	if (ret < 0) {
4104 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4105 		goto init_error;
4106 	}
4107 
4108 	stmmac_setup_ptp(priv);
4109 
4110 	stmmac_init_coalesce(priv);
4111 
4112 	phylink_start(priv->phylink);
4113 
4114 	ret = stmmac_request_irq(dev);
4115 	if (ret)
4116 		goto irq_error;
4117 
4118 	stmmac_enable_all_queues(priv);
4119 	netif_tx_start_all_queues(priv->dev);
4120 	stmmac_enable_all_dma_irq(priv);
4121 
4122 	return 0;
4123 
4124 irq_error:
4125 	phylink_stop(priv->phylink);
4126 
4127 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4128 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4129 
4130 	stmmac_release_ptp(priv);
4131 init_error:
4132 	return ret;
4133 }
4134 
4135 static int stmmac_open(struct net_device *dev)
4136 {
4137 	struct stmmac_priv *priv = netdev_priv(dev);
4138 	struct stmmac_dma_conf *dma_conf;
4139 	int ret;
4140 
4141 	/* Initialise the tx lpi timer, converting from msec to usec */
4142 	if (!priv->tx_lpi_timer)
4143 		priv->tx_lpi_timer = eee_timer * 1000;
4144 
4145 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4146 	if (IS_ERR(dma_conf))
4147 		return PTR_ERR(dma_conf);
4148 
4149 	ret = pm_runtime_resume_and_get(priv->device);
4150 	if (ret < 0)
4151 		goto err_dma_resources;
4152 
4153 	ret = stmmac_init_phy(dev);
4154 	if (ret)
4155 		goto err_runtime_pm;
4156 
4157 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
4158 		ret = stmmac_legacy_serdes_power_up(priv);
4159 		if (ret < 0)
4160 			goto err_disconnect_phy;
4161 	}
4162 
4163 	ret = __stmmac_open(dev, dma_conf);
4164 	if (ret)
4165 		goto err_serdes;
4166 
4167 	kfree(dma_conf);
4168 
4169 	/* We may have called phylink_speed_down before */
4170 	phylink_speed_up(priv->phylink);
4171 
4172 	return ret;
4173 
4174 err_serdes:
4175 	stmmac_legacy_serdes_power_down(priv);
4176 err_disconnect_phy:
4177 	phylink_disconnect_phy(priv->phylink);
4178 err_runtime_pm:
4179 	pm_runtime_put(priv->device);
4180 err_dma_resources:
4181 	free_dma_desc_resources(priv, dma_conf);
4182 	kfree(dma_conf);
4183 	return ret;
4184 }
4185 
4186 static void __stmmac_release(struct net_device *dev)
4187 {
4188 	struct stmmac_priv *priv = netdev_priv(dev);
4189 	u32 chan;
4190 
4191 	/* Stop and disconnect the PHY */
4192 	phylink_stop(priv->phylink);
4193 
4194 	stmmac_disable_all_queues(priv);
4195 
4196 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4197 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4198 
4199 	netif_tx_disable(dev);
4200 
4201 	/* Free the IRQ lines */
4202 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4203 
4204 	/* Stop TX/RX DMA and clear the descriptors */
4205 	stmmac_stop_all_dma(priv);
4206 
4207 	/* Release and free the Rx/Tx resources */
4208 	free_dma_desc_resources(priv, &priv->dma_conf);
4209 
4210 	stmmac_release_ptp(priv);
4211 
4212 	if (stmmac_fpe_supported(priv))
4213 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4214 }
4215 
4216 /**
4217  *  stmmac_release - close entry point of the driver
4218  *  @dev : device pointer.
4219  *  Description:
4220  *  This is the stop entry point of the driver.
4221  */
4222 static int stmmac_release(struct net_device *dev)
4223 {
4224 	struct stmmac_priv *priv = netdev_priv(dev);
4225 
4226 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4227 	 * suspended when phylink_stop() is called below. Set the PHY
4228 	 * to its slowest speed to save power.
4229 	 */
4230 	if (device_may_wakeup(priv->device))
4231 		phylink_speed_down(priv->phylink, false);
4232 
4233 	__stmmac_release(dev);
4234 
4235 	stmmac_legacy_serdes_power_down(priv);
4236 	phylink_disconnect_phy(priv->phylink);
4237 	pm_runtime_put(priv->device);
4238 
4239 	return 0;
4240 }
4241 
4242 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4243 			       struct stmmac_tx_queue *tx_q)
4244 {
4245 	struct dma_desc *p;
4246 	u16 tag = 0x0;
4247 
4248 	if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
4249 		return false;
4250 
4251 	tag = skb_vlan_tag_get(skb);
4252 
4253 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4254 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4255 	else
4256 		p = &tx_q->dma_tx[tx_q->cur_tx];
4257 
4258 	if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
4259 		return false;
4260 
4261 	stmmac_set_tx_owner(priv, p);
4262 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4263 	return true;
4264 }
4265 
4266 /**
4267  *  stmmac_tso_allocator - close entry point of the driver
4268  *  @priv: driver private structure
4269  *  @des: buffer start address
4270  *  @total_len: total length to fill in descriptors
4271  *  @last_segment: condition for the last descriptor
4272  *  @queue: TX queue index
4273  *  Description:
4274  *  This function fills descriptor and request new descriptors according to
4275  *  buffer length to fill
4276  */
4277 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4278 				 int total_len, bool last_segment, u32 queue)
4279 {
4280 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4281 	struct dma_desc *desc;
4282 	u32 buff_size;
4283 	int tmp_len;
4284 
4285 	tmp_len = total_len;
4286 
4287 	while (tmp_len > 0) {
4288 		dma_addr_t curr_addr;
4289 
4290 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4291 						priv->dma_conf.dma_tx_size);
4292 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4293 
4294 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4295 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4296 		else
4297 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4298 
4299 		curr_addr = des + (total_len - tmp_len);
4300 		stmmac_set_desc_addr(priv, desc, curr_addr);
4301 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4302 			    TSO_MAX_BUFF_SIZE : tmp_len;
4303 
4304 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4305 				0, 1,
4306 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4307 				0, 0);
4308 
4309 		tmp_len -= TSO_MAX_BUFF_SIZE;
4310 	}
4311 }
4312 
4313 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4314 {
4315 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4316 	int desc_size;
4317 
4318 	if (likely(priv->extend_desc))
4319 		desc_size = sizeof(struct dma_extended_desc);
4320 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4321 		desc_size = sizeof(struct dma_edesc);
4322 	else
4323 		desc_size = sizeof(struct dma_desc);
4324 
4325 	/* The own bit must be the latest setting done when prepare the
4326 	 * descriptor and then barrier is needed to make sure that
4327 	 * all is coherent before granting the DMA engine.
4328 	 */
4329 	wmb();
4330 
4331 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4332 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4333 }
4334 
4335 /**
4336  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4337  *  @skb : the socket buffer
4338  *  @dev : device pointer
4339  *  Description: this is the transmit function that is called on TSO frames
4340  *  (support available on GMAC4 and newer chips).
4341  *  Diagram below show the ring programming in case of TSO frames:
4342  *
4343  *  First Descriptor
4344  *   --------
4345  *   | DES0 |---> buffer1 = L2/L3/L4 header
4346  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4347  *   |      |     width is 32-bit, but we never use it.
4348  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4349  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4350  *   |      |     or 48-bit, and we always use it.
4351  *   | DES2 |---> buffer1 len
4352  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4353  *   --------
4354  *   --------
4355  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4356  *   | DES1 |---> same as the First Descriptor
4357  *   | DES2 |---> buffer1 len
4358  *   | DES3 |
4359  *   --------
4360  *	|
4361  *     ...
4362  *	|
4363  *   --------
4364  *   | DES0 |---> buffer1 = Split TCP Payload
4365  *   | DES1 |---> same as the First Descriptor
4366  *   | DES2 |---> buffer1 len
4367  *   | DES3 |
4368  *   --------
4369  *
4370  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4371  */
4372 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4373 {
4374 	struct dma_desc *desc, *first, *mss_desc = NULL;
4375 	struct stmmac_priv *priv = netdev_priv(dev);
4376 	unsigned int first_entry, tx_packets;
4377 	struct stmmac_txq_stats *txq_stats;
4378 	struct stmmac_tx_queue *tx_q;
4379 	bool set_ic, is_last_segment;
4380 	u32 pay_len, mss, queue;
4381 	int i, first_tx, nfrags;
4382 	u8 proto_hdr_len, hdr;
4383 	dma_addr_t des;
4384 
4385 	/* Always insert VLAN tag to SKB payload for TSO frames.
4386 	 *
4387 	 * Never insert VLAN tag by HW, since segments split by
4388 	 * TSO engine will be un-tagged by mistake.
4389 	 */
4390 	if (skb_vlan_tag_present(skb)) {
4391 		skb = __vlan_hwaccel_push_inside(skb);
4392 		if (unlikely(!skb)) {
4393 			priv->xstats.tx_dropped++;
4394 			return NETDEV_TX_OK;
4395 		}
4396 	}
4397 
4398 	nfrags = skb_shinfo(skb)->nr_frags;
4399 	queue = skb_get_queue_mapping(skb);
4400 
4401 	tx_q = &priv->dma_conf.tx_queue[queue];
4402 	txq_stats = &priv->xstats.txq_stats[queue];
4403 	first_tx = tx_q->cur_tx;
4404 
4405 	/* Compute header lengths */
4406 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4407 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4408 		hdr = sizeof(struct udphdr);
4409 	} else {
4410 		proto_hdr_len = skb_tcp_all_headers(skb);
4411 		hdr = tcp_hdrlen(skb);
4412 	}
4413 
4414 	/* Desc availability based on threshold should be enough safe */
4415 	if (unlikely(stmmac_tx_avail(priv, queue) <
4416 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4417 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4418 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4419 								queue));
4420 			/* This is a hard error, log it. */
4421 			netdev_err(priv->dev,
4422 				   "%s: Tx Ring full when queue awake\n",
4423 				   __func__);
4424 		}
4425 		return NETDEV_TX_BUSY;
4426 	}
4427 
4428 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4429 
4430 	mss = skb_shinfo(skb)->gso_size;
4431 
4432 	/* set new MSS value if needed */
4433 	if (mss != tx_q->mss) {
4434 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4435 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4436 		else
4437 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4438 
4439 		stmmac_set_mss(priv, mss_desc, mss);
4440 		tx_q->mss = mss;
4441 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4442 						priv->dma_conf.dma_tx_size);
4443 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4444 	}
4445 
4446 	if (netif_msg_tx_queued(priv)) {
4447 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4448 			__func__, hdr, proto_hdr_len, pay_len, mss);
4449 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4450 			skb->data_len);
4451 	}
4452 
4453 	first_entry = tx_q->cur_tx;
4454 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4455 
4456 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4457 		desc = &tx_q->dma_entx[first_entry].basic;
4458 	else
4459 		desc = &tx_q->dma_tx[first_entry];
4460 	first = desc;
4461 
4462 	/* first descriptor: fill Headers on Buf1 */
4463 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4464 			     DMA_TO_DEVICE);
4465 	if (dma_mapping_error(priv->device, des))
4466 		goto dma_map_err;
4467 
4468 	stmmac_set_desc_addr(priv, first, des);
4469 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4470 			     (nfrags == 0), queue);
4471 
4472 	/* In case two or more DMA transmit descriptors are allocated for this
4473 	 * non-paged SKB data, the DMA buffer address should be saved to
4474 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4475 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4476 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4477 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4478 	 * sooner or later.
4479 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4480 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4481 	 * this DMA buffer right after the DMA engine completely finishes the
4482 	 * full buffer transmission.
4483 	 */
4484 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4485 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4486 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4487 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4488 
4489 	/* Prepare fragments */
4490 	for (i = 0; i < nfrags; i++) {
4491 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4492 
4493 		des = skb_frag_dma_map(priv->device, frag, 0,
4494 				       skb_frag_size(frag),
4495 				       DMA_TO_DEVICE);
4496 		if (dma_mapping_error(priv->device, des))
4497 			goto dma_map_err;
4498 
4499 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4500 				     (i == nfrags - 1), queue);
4501 
4502 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4503 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4504 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4505 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4506 	}
4507 
4508 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4509 
4510 	/* Only the last descriptor gets to point to the skb. */
4511 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4512 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4513 
4514 	/* Manage tx mitigation */
4515 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4516 	tx_q->tx_count_frames += tx_packets;
4517 
4518 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4519 		set_ic = true;
4520 	else if (!priv->tx_coal_frames[queue])
4521 		set_ic = false;
4522 	else if (tx_packets > priv->tx_coal_frames[queue])
4523 		set_ic = true;
4524 	else if ((tx_q->tx_count_frames %
4525 		  priv->tx_coal_frames[queue]) < tx_packets)
4526 		set_ic = true;
4527 	else
4528 		set_ic = false;
4529 
4530 	if (set_ic) {
4531 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4532 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4533 		else
4534 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4535 
4536 		tx_q->tx_count_frames = 0;
4537 		stmmac_set_tx_ic(priv, desc);
4538 	}
4539 
4540 	/* We've used all descriptors we need for this skb, however,
4541 	 * advance cur_tx so that it references a fresh descriptor.
4542 	 * ndo_start_xmit will fill this descriptor the next time it's
4543 	 * called and stmmac_tx_clean may clean up to this descriptor.
4544 	 */
4545 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4546 
4547 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4548 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4549 			  __func__);
4550 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4551 	}
4552 
4553 	u64_stats_update_begin(&txq_stats->q_syncp);
4554 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4555 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4556 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4557 	if (set_ic)
4558 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4559 	u64_stats_update_end(&txq_stats->q_syncp);
4560 
4561 	if (priv->sarc_type)
4562 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4563 
4564 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4565 		     priv->hwts_tx_en)) {
4566 		/* declare that device is doing timestamping */
4567 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4568 		stmmac_enable_tx_timestamp(priv, first);
4569 	}
4570 
4571 	/* If we only have one entry used, then the first entry is the last
4572 	 * segment.
4573 	 */
4574 	is_last_segment = ((tx_q->cur_tx - first_entry) &
4575 			   (priv->dma_conf.dma_tx_size - 1)) == 1;
4576 
4577 	/* Complete the first descriptor before granting the DMA */
4578 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4579 				   is_last_segment, hdr / 4,
4580 				   skb->len - proto_hdr_len);
4581 
4582 	/* If context desc is used to change MSS */
4583 	if (mss_desc) {
4584 		/* Make sure that first descriptor has been completely
4585 		 * written, including its own bit. This is because MSS is
4586 		 * actually before first descriptor, so we need to make
4587 		 * sure that MSS's own bit is the last thing written.
4588 		 */
4589 		dma_wmb();
4590 		stmmac_set_tx_owner(priv, mss_desc);
4591 	}
4592 
4593 	if (netif_msg_pktdata(priv)) {
4594 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4595 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4596 			tx_q->cur_tx, first, nfrags);
4597 		pr_info(">>> frame to be transmitted: ");
4598 		print_pkt(skb->data, skb_headlen(skb));
4599 	}
4600 
4601 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4602 	skb_tx_timestamp(skb);
4603 
4604 	stmmac_flush_tx_descriptors(priv, queue);
4605 	stmmac_tx_timer_arm(priv, queue);
4606 
4607 	return NETDEV_TX_OK;
4608 
4609 dma_map_err:
4610 	dev_err(priv->device, "Tx dma map failed\n");
4611 	dev_kfree_skb(skb);
4612 	priv->xstats.tx_dropped++;
4613 	return NETDEV_TX_OK;
4614 }
4615 
4616 /**
4617  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4618  * @skb: socket buffer to check
4619  *
4620  * Check if a packet has an ethertype that will trigger the IP header checks
4621  * and IP/TCP checksum engine of the stmmac core.
4622  *
4623  * Return: true if the ethertype can trigger the checksum engine, false
4624  * otherwise
4625  */
4626 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4627 {
4628 	int depth = 0;
4629 	__be16 proto;
4630 
4631 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4632 				    &depth);
4633 
4634 	return (depth <= ETH_HLEN) &&
4635 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4636 }
4637 
4638 /**
4639  *  stmmac_xmit - Tx entry point of the driver
4640  *  @skb : the socket buffer
4641  *  @dev : device pointer
4642  *  Description : this is the tx entry point of the driver.
4643  *  It programs the chain or the ring and supports oversized frames
4644  *  and SG feature.
4645  */
4646 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4647 {
4648 	bool enh_desc, has_vlan, set_ic, is_jumbo = false;
4649 	struct stmmac_priv *priv = netdev_priv(dev);
4650 	unsigned int nopaged_len = skb_headlen(skb);
4651 	u32 queue = skb_get_queue_mapping(skb);
4652 	int nfrags = skb_shinfo(skb)->nr_frags;
4653 	unsigned int first_entry, tx_packets;
4654 	int gso = skb_shinfo(skb)->gso_type;
4655 	struct stmmac_txq_stats *txq_stats;
4656 	struct dma_edesc *tbs_desc = NULL;
4657 	struct dma_desc *desc, *first;
4658 	struct stmmac_tx_queue *tx_q;
4659 	int i, csum_insertion = 0;
4660 	int entry, first_tx;
4661 	dma_addr_t des;
4662 	u32 sdu_len;
4663 
4664 	tx_q = &priv->dma_conf.tx_queue[queue];
4665 	txq_stats = &priv->xstats.txq_stats[queue];
4666 	first_tx = tx_q->cur_tx;
4667 
4668 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4669 		stmmac_stop_sw_lpi(priv);
4670 
4671 	/* Manage oversized TCP frames for GMAC4 device */
4672 	if (skb_is_gso(skb) && priv->tso) {
4673 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4674 			return stmmac_tso_xmit(skb, dev);
4675 		if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
4676 		    (gso & SKB_GSO_UDP_L4))
4677 			return stmmac_tso_xmit(skb, dev);
4678 	}
4679 
4680 	if (priv->est && priv->est->enable &&
4681 	    priv->est->max_sdu[queue]) {
4682 		sdu_len = skb->len;
4683 		/* Add VLAN tag length if VLAN tag insertion offload is requested */
4684 		if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
4685 			sdu_len += VLAN_HLEN;
4686 		if (sdu_len > priv->est->max_sdu[queue]) {
4687 			priv->xstats.max_sdu_txq_drop[queue]++;
4688 			goto max_sdu_err;
4689 		}
4690 	}
4691 
4692 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4693 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4694 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4695 								queue));
4696 			/* This is a hard error, log it. */
4697 			netdev_err(priv->dev,
4698 				   "%s: Tx Ring full when queue awake\n",
4699 				   __func__);
4700 		}
4701 		return NETDEV_TX_BUSY;
4702 	}
4703 
4704 	/* Check if VLAN can be inserted by HW */
4705 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4706 
4707 	entry = tx_q->cur_tx;
4708 	first_entry = entry;
4709 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4710 
4711 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4712 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4713 	 * queues. In that case, checksum offloading for those queues that don't
4714 	 * support tx coe needs to fallback to software checksum calculation.
4715 	 *
4716 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4717 	 * also have to be checksummed in software.
4718 	 */
4719 	if (csum_insertion &&
4720 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4721 	     !stmmac_has_ip_ethertype(skb))) {
4722 		if (unlikely(skb_checksum_help(skb)))
4723 			goto dma_map_err;
4724 		csum_insertion = !csum_insertion;
4725 	}
4726 
4727 	if (likely(priv->extend_desc))
4728 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4729 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4730 		desc = &tx_q->dma_entx[entry].basic;
4731 	else
4732 		desc = tx_q->dma_tx + entry;
4733 
4734 	first = desc;
4735 
4736 	if (has_vlan)
4737 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4738 
4739 	enh_desc = priv->plat->enh_desc;
4740 	/* To program the descriptors according to the size of the frame */
4741 	if (enh_desc)
4742 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4743 
4744 	if (unlikely(is_jumbo)) {
4745 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4746 		if (unlikely(entry < 0) && (entry != -EINVAL))
4747 			goto dma_map_err;
4748 	}
4749 
4750 	for (i = 0; i < nfrags; i++) {
4751 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4752 		int len = skb_frag_size(frag);
4753 		bool last_segment = (i == (nfrags - 1));
4754 
4755 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4756 		WARN_ON(tx_q->tx_skbuff[entry]);
4757 
4758 		if (likely(priv->extend_desc))
4759 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4760 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4761 			desc = &tx_q->dma_entx[entry].basic;
4762 		else
4763 			desc = tx_q->dma_tx + entry;
4764 
4765 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4766 				       DMA_TO_DEVICE);
4767 		if (dma_mapping_error(priv->device, des))
4768 			goto dma_map_err; /* should reuse desc w/o issues */
4769 
4770 		tx_q->tx_skbuff_dma[entry].buf = des;
4771 
4772 		stmmac_set_desc_addr(priv, desc, des);
4773 
4774 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4775 		tx_q->tx_skbuff_dma[entry].len = len;
4776 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4777 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4778 
4779 		/* Prepare the descriptor and set the own bit too */
4780 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4781 				priv->mode, 1, last_segment, skb->len);
4782 	}
4783 
4784 	/* Only the last descriptor gets to point to the skb. */
4785 	tx_q->tx_skbuff[entry] = skb;
4786 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4787 
4788 	/* According to the coalesce parameter the IC bit for the latest
4789 	 * segment is reset and the timer re-started to clean the tx status.
4790 	 * This approach takes care about the fragments: desc is the first
4791 	 * element in case of no SG.
4792 	 */
4793 	tx_packets = (entry + 1) - first_tx;
4794 	tx_q->tx_count_frames += tx_packets;
4795 
4796 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4797 		set_ic = true;
4798 	else if (!priv->tx_coal_frames[queue])
4799 		set_ic = false;
4800 	else if (tx_packets > priv->tx_coal_frames[queue])
4801 		set_ic = true;
4802 	else if ((tx_q->tx_count_frames %
4803 		  priv->tx_coal_frames[queue]) < tx_packets)
4804 		set_ic = true;
4805 	else
4806 		set_ic = false;
4807 
4808 	if (set_ic) {
4809 		if (likely(priv->extend_desc))
4810 			desc = &tx_q->dma_etx[entry].basic;
4811 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4812 			desc = &tx_q->dma_entx[entry].basic;
4813 		else
4814 			desc = &tx_q->dma_tx[entry];
4815 
4816 		tx_q->tx_count_frames = 0;
4817 		stmmac_set_tx_ic(priv, desc);
4818 	}
4819 
4820 	/* We've used all descriptors we need for this skb, however,
4821 	 * advance cur_tx so that it references a fresh descriptor.
4822 	 * ndo_start_xmit will fill this descriptor the next time it's
4823 	 * called and stmmac_tx_clean may clean up to this descriptor.
4824 	 */
4825 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4826 	tx_q->cur_tx = entry;
4827 
4828 	if (netif_msg_pktdata(priv)) {
4829 		netdev_dbg(priv->dev,
4830 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4831 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4832 			   entry, first, nfrags);
4833 
4834 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4835 		print_pkt(skb->data, skb->len);
4836 	}
4837 
4838 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4839 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4840 			  __func__);
4841 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4842 	}
4843 
4844 	u64_stats_update_begin(&txq_stats->q_syncp);
4845 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4846 	if (set_ic)
4847 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4848 	u64_stats_update_end(&txq_stats->q_syncp);
4849 
4850 	if (priv->sarc_type)
4851 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4852 
4853 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4854 	 * problems because all the descriptors are actually ready to be
4855 	 * passed to the DMA engine.
4856 	 */
4857 	if (likely(!is_jumbo)) {
4858 		bool last_segment = (nfrags == 0);
4859 
4860 		des = dma_map_single(priv->device, skb->data,
4861 				     nopaged_len, DMA_TO_DEVICE);
4862 		if (dma_mapping_error(priv->device, des))
4863 			goto dma_map_err;
4864 
4865 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4866 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4867 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4868 
4869 		stmmac_set_desc_addr(priv, first, des);
4870 
4871 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4872 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4873 
4874 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4875 			     priv->hwts_tx_en)) {
4876 			/* declare that device is doing timestamping */
4877 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4878 			stmmac_enable_tx_timestamp(priv, first);
4879 		}
4880 
4881 		/* Prepare the first descriptor setting the OWN bit too */
4882 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4883 				csum_insertion, priv->mode, 0, last_segment,
4884 				skb->len);
4885 	}
4886 
4887 	if (tx_q->tbs & STMMAC_TBS_EN) {
4888 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4889 
4890 		tbs_desc = &tx_q->dma_entx[first_entry];
4891 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4892 	}
4893 
4894 	stmmac_set_tx_owner(priv, first);
4895 
4896 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4897 
4898 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4899 	skb_tx_timestamp(skb);
4900 	stmmac_flush_tx_descriptors(priv, queue);
4901 	stmmac_tx_timer_arm(priv, queue);
4902 
4903 	return NETDEV_TX_OK;
4904 
4905 dma_map_err:
4906 	netdev_err(priv->dev, "Tx DMA map failed\n");
4907 max_sdu_err:
4908 	dev_kfree_skb(skb);
4909 	priv->xstats.tx_dropped++;
4910 	return NETDEV_TX_OK;
4911 }
4912 
4913 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4914 {
4915 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4916 	__be16 vlan_proto = veth->h_vlan_proto;
4917 	u16 vlanid;
4918 
4919 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4920 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4921 	    (vlan_proto == htons(ETH_P_8021AD) &&
4922 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4923 		/* pop the vlan tag */
4924 		vlanid = ntohs(veth->h_vlan_TCI);
4925 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4926 		skb_pull(skb, VLAN_HLEN);
4927 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4928 	}
4929 }
4930 
4931 /**
4932  * stmmac_rx_refill - refill used skb preallocated buffers
4933  * @priv: driver private structure
4934  * @queue: RX queue index
4935  * Description : this is to reallocate the skb for the reception process
4936  * that is based on zero-copy.
4937  */
4938 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4939 {
4940 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4941 	int dirty = stmmac_rx_dirty(priv, queue);
4942 	unsigned int entry = rx_q->dirty_rx;
4943 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4944 
4945 	if (priv->dma_cap.host_dma_width <= 32)
4946 		gfp |= GFP_DMA32;
4947 
4948 	while (dirty-- > 0) {
4949 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4950 		struct dma_desc *p;
4951 		bool use_rx_wd;
4952 
4953 		if (priv->extend_desc)
4954 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4955 		else
4956 			p = rx_q->dma_rx + entry;
4957 
4958 		if (!buf->page) {
4959 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4960 			if (!buf->page)
4961 				break;
4962 		}
4963 
4964 		if (priv->sph_active && !buf->sec_page) {
4965 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4966 			if (!buf->sec_page)
4967 				break;
4968 
4969 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4970 		}
4971 
4972 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4973 
4974 		stmmac_set_desc_addr(priv, p, buf->addr);
4975 		if (priv->sph_active)
4976 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4977 		else
4978 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4979 		stmmac_refill_desc3(priv, rx_q, p);
4980 
4981 		rx_q->rx_count_frames++;
4982 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4983 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4984 			rx_q->rx_count_frames = 0;
4985 
4986 		use_rx_wd = !priv->rx_coal_frames[queue];
4987 		use_rx_wd |= rx_q->rx_count_frames > 0;
4988 		if (!priv->use_riwt)
4989 			use_rx_wd = false;
4990 
4991 		dma_wmb();
4992 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4993 
4994 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4995 	}
4996 	rx_q->dirty_rx = entry;
4997 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4998 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4999 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5000 	/* Wake up Rx DMA from the suspend state if required */
5001 	stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
5002 }
5003 
5004 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
5005 				       struct dma_desc *p,
5006 				       int status, unsigned int len)
5007 {
5008 	unsigned int plen = 0, hlen = 0;
5009 	int coe = priv->hw->rx_csum;
5010 
5011 	/* Not first descriptor, buffer is always zero */
5012 	if (priv->sph_active && len)
5013 		return 0;
5014 
5015 	/* First descriptor, get split header length */
5016 	stmmac_get_rx_header_len(priv, p, &hlen);
5017 	if (priv->sph_active && hlen) {
5018 		priv->xstats.rx_split_hdr_pkt_n++;
5019 		return hlen;
5020 	}
5021 
5022 	/* First descriptor, not last descriptor and not split header */
5023 	if (status & rx_not_ls)
5024 		return priv->dma_conf.dma_buf_sz;
5025 
5026 	plen = stmmac_get_rx_frame_len(priv, p, coe);
5027 
5028 	/* First descriptor and last descriptor and not split header */
5029 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
5030 }
5031 
5032 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
5033 				       struct dma_desc *p,
5034 				       int status, unsigned int len)
5035 {
5036 	int coe = priv->hw->rx_csum;
5037 	unsigned int plen = 0;
5038 
5039 	/* Not split header, buffer is not available */
5040 	if (!priv->sph_active)
5041 		return 0;
5042 
5043 	/* Not last descriptor */
5044 	if (status & rx_not_ls)
5045 		return priv->dma_conf.dma_buf_sz;
5046 
5047 	plen = stmmac_get_rx_frame_len(priv, p, coe);
5048 
5049 	/* Last descriptor */
5050 	return plen - len;
5051 }
5052 
5053 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
5054 				struct xdp_frame *xdpf, bool dma_map)
5055 {
5056 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
5057 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
5058 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
5059 	unsigned int entry = tx_q->cur_tx;
5060 	struct dma_desc *tx_desc;
5061 	dma_addr_t dma_addr;
5062 	bool set_ic;
5063 
5064 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
5065 		return STMMAC_XDP_CONSUMED;
5066 
5067 	if (priv->est && priv->est->enable &&
5068 	    priv->est->max_sdu[queue] &&
5069 	    xdpf->len > priv->est->max_sdu[queue]) {
5070 		priv->xstats.max_sdu_txq_drop[queue]++;
5071 		return STMMAC_XDP_CONSUMED;
5072 	}
5073 
5074 	if (likely(priv->extend_desc))
5075 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5076 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5077 		tx_desc = &tx_q->dma_entx[entry].basic;
5078 	else
5079 		tx_desc = tx_q->dma_tx + entry;
5080 
5081 	if (dma_map) {
5082 		dma_addr = dma_map_single(priv->device, xdpf->data,
5083 					  xdpf->len, DMA_TO_DEVICE);
5084 		if (dma_mapping_error(priv->device, dma_addr))
5085 			return STMMAC_XDP_CONSUMED;
5086 
5087 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5088 	} else {
5089 		struct page *page = virt_to_page(xdpf->data);
5090 
5091 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5092 			   xdpf->headroom;
5093 		dma_sync_single_for_device(priv->device, dma_addr,
5094 					   xdpf->len, DMA_BIDIRECTIONAL);
5095 
5096 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5097 	}
5098 
5099 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5100 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
5101 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5102 	tx_q->tx_skbuff_dma[entry].last_segment = true;
5103 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5104 
5105 	tx_q->xdpf[entry] = xdpf;
5106 
5107 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5108 
5109 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5110 			       csum, priv->mode, true, true,
5111 			       xdpf->len);
5112 
5113 	tx_q->tx_count_frames++;
5114 
5115 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5116 		set_ic = true;
5117 	else
5118 		set_ic = false;
5119 
5120 	if (set_ic) {
5121 		tx_q->tx_count_frames = 0;
5122 		stmmac_set_tx_ic(priv, tx_desc);
5123 		u64_stats_update_begin(&txq_stats->q_syncp);
5124 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5125 		u64_stats_update_end(&txq_stats->q_syncp);
5126 	}
5127 
5128 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5129 
5130 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5131 	tx_q->cur_tx = entry;
5132 
5133 	return STMMAC_XDP_TX;
5134 }
5135 
5136 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5137 				   int cpu)
5138 {
5139 	int index = cpu;
5140 
5141 	if (unlikely(index < 0))
5142 		index = 0;
5143 
5144 	while (index >= priv->plat->tx_queues_to_use)
5145 		index -= priv->plat->tx_queues_to_use;
5146 
5147 	return index;
5148 }
5149 
5150 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5151 				struct xdp_buff *xdp)
5152 {
5153 	bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
5154 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5155 	int cpu = smp_processor_id();
5156 	struct netdev_queue *nq;
5157 	int queue;
5158 	int res;
5159 
5160 	if (unlikely(!xdpf))
5161 		return STMMAC_XDP_CONSUMED;
5162 
5163 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5164 	nq = netdev_get_tx_queue(priv->dev, queue);
5165 
5166 	__netif_tx_lock(nq, cpu);
5167 	/* Avoids TX time-out as we are sharing with slow path */
5168 	txq_trans_cond_update(nq);
5169 
5170 	/* For zero copy XDP_TX action, dma_map is true */
5171 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc);
5172 	if (res == STMMAC_XDP_TX) {
5173 		stmmac_flush_tx_descriptors(priv, queue);
5174 	} else if (res == STMMAC_XDP_CONSUMED && zc) {
5175 		/* xdp has been freed by xdp_convert_buff_to_frame(),
5176 		 * no need to call xsk_buff_free() again, so return
5177 		 * STMMAC_XSK_CONSUMED.
5178 		 */
5179 		res = STMMAC_XSK_CONSUMED;
5180 		xdp_return_frame(xdpf);
5181 	}
5182 
5183 	__netif_tx_unlock(nq);
5184 
5185 	return res;
5186 }
5187 
5188 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5189 				 struct bpf_prog *prog,
5190 				 struct xdp_buff *xdp)
5191 {
5192 	u32 act;
5193 	int res;
5194 
5195 	act = bpf_prog_run_xdp(prog, xdp);
5196 	switch (act) {
5197 	case XDP_PASS:
5198 		res = STMMAC_XDP_PASS;
5199 		break;
5200 	case XDP_TX:
5201 		res = stmmac_xdp_xmit_back(priv, xdp);
5202 		break;
5203 	case XDP_REDIRECT:
5204 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5205 			res = STMMAC_XDP_CONSUMED;
5206 		else
5207 			res = STMMAC_XDP_REDIRECT;
5208 		break;
5209 	default:
5210 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5211 		fallthrough;
5212 	case XDP_ABORTED:
5213 		trace_xdp_exception(priv->dev, prog, act);
5214 		fallthrough;
5215 	case XDP_DROP:
5216 		res = STMMAC_XDP_CONSUMED;
5217 		break;
5218 	}
5219 
5220 	return res;
5221 }
5222 
5223 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5224 					   struct xdp_buff *xdp)
5225 {
5226 	struct bpf_prog *prog;
5227 	int res;
5228 
5229 	prog = READ_ONCE(priv->xdp_prog);
5230 	if (!prog) {
5231 		res = STMMAC_XDP_PASS;
5232 		goto out;
5233 	}
5234 
5235 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5236 out:
5237 	return ERR_PTR(-res);
5238 }
5239 
5240 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5241 				   int xdp_status)
5242 {
5243 	int cpu = smp_processor_id();
5244 	int queue;
5245 
5246 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5247 
5248 	if (xdp_status & STMMAC_XDP_TX)
5249 		stmmac_tx_timer_arm(priv, queue);
5250 
5251 	if (xdp_status & STMMAC_XDP_REDIRECT)
5252 		xdp_do_flush();
5253 }
5254 
5255 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5256 					       struct xdp_buff *xdp)
5257 {
5258 	unsigned int metasize = xdp->data - xdp->data_meta;
5259 	unsigned int datasize = xdp->data_end - xdp->data;
5260 	struct sk_buff *skb;
5261 
5262 	skb = napi_alloc_skb(&ch->rxtx_napi,
5263 			     xdp->data_end - xdp->data_hard_start);
5264 	if (unlikely(!skb))
5265 		return NULL;
5266 
5267 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5268 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5269 	if (metasize)
5270 		skb_metadata_set(skb, metasize);
5271 
5272 	return skb;
5273 }
5274 
5275 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5276 				   struct dma_desc *p, struct dma_desc *np,
5277 				   struct xdp_buff *xdp)
5278 {
5279 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5280 	struct stmmac_channel *ch = &priv->channel[queue];
5281 	unsigned int len = xdp->data_end - xdp->data;
5282 	enum pkt_hash_types hash_type;
5283 	int coe = priv->hw->rx_csum;
5284 	struct sk_buff *skb;
5285 	u32 hash;
5286 
5287 	skb = stmmac_construct_skb_zc(ch, xdp);
5288 	if (!skb) {
5289 		priv->xstats.rx_dropped++;
5290 		return;
5291 	}
5292 
5293 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5294 	if (priv->hw->hw_vlan_en)
5295 		/* MAC level stripping. */
5296 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5297 	else
5298 		/* Driver level stripping. */
5299 		stmmac_rx_vlan(priv->dev, skb);
5300 	skb->protocol = eth_type_trans(skb, priv->dev);
5301 
5302 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5303 		skb_checksum_none_assert(skb);
5304 	else
5305 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5306 
5307 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5308 		skb_set_hash(skb, hash, hash_type);
5309 
5310 	skb_record_rx_queue(skb, queue);
5311 	napi_gro_receive(&ch->rxtx_napi, skb);
5312 
5313 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5314 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5315 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5316 	u64_stats_update_end(&rxq_stats->napi_syncp);
5317 }
5318 
5319 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5320 {
5321 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5322 	unsigned int entry = rx_q->dirty_rx;
5323 	struct dma_desc *rx_desc = NULL;
5324 	bool ret = true;
5325 
5326 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5327 
5328 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5329 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5330 		dma_addr_t dma_addr;
5331 		bool use_rx_wd;
5332 
5333 		if (!buf->xdp) {
5334 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5335 			if (!buf->xdp) {
5336 				ret = false;
5337 				break;
5338 			}
5339 		}
5340 
5341 		if (priv->extend_desc)
5342 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5343 		else
5344 			rx_desc = rx_q->dma_rx + entry;
5345 
5346 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5347 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5348 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5349 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5350 
5351 		rx_q->rx_count_frames++;
5352 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5353 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5354 			rx_q->rx_count_frames = 0;
5355 
5356 		use_rx_wd = !priv->rx_coal_frames[queue];
5357 		use_rx_wd |= rx_q->rx_count_frames > 0;
5358 		if (!priv->use_riwt)
5359 			use_rx_wd = false;
5360 
5361 		dma_wmb();
5362 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5363 
5364 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5365 	}
5366 
5367 	if (rx_desc) {
5368 		rx_q->dirty_rx = entry;
5369 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5370 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5371 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5372 	}
5373 
5374 	return ret;
5375 }
5376 
5377 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5378 {
5379 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5380 	 * to represent incoming packet, whereas cb field in the same structure
5381 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5382 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5383 	 */
5384 	return (struct stmmac_xdp_buff *)xdp;
5385 }
5386 
5387 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5388 {
5389 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5390 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5391 	unsigned int count = 0, error = 0, len = 0;
5392 	int dirty = stmmac_rx_dirty(priv, queue);
5393 	unsigned int next_entry = rx_q->cur_rx;
5394 	u32 rx_errors = 0, rx_dropped = 0;
5395 	unsigned int desc_size;
5396 	struct bpf_prog *prog;
5397 	bool failure = false;
5398 	int xdp_status = 0;
5399 	int status = 0;
5400 
5401 	if (netif_msg_rx_status(priv)) {
5402 		void *rx_head;
5403 
5404 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5405 		if (priv->extend_desc) {
5406 			rx_head = (void *)rx_q->dma_erx;
5407 			desc_size = sizeof(struct dma_extended_desc);
5408 		} else {
5409 			rx_head = (void *)rx_q->dma_rx;
5410 			desc_size = sizeof(struct dma_desc);
5411 		}
5412 
5413 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5414 				    rx_q->dma_rx_phy, desc_size);
5415 	}
5416 	while (count < limit) {
5417 		struct stmmac_rx_buffer *buf;
5418 		struct stmmac_xdp_buff *ctx;
5419 		unsigned int buf1_len = 0;
5420 		struct dma_desc *np, *p;
5421 		int entry;
5422 		int res;
5423 
5424 		if (!count && rx_q->state_saved) {
5425 			error = rx_q->state.error;
5426 			len = rx_q->state.len;
5427 		} else {
5428 			rx_q->state_saved = false;
5429 			error = 0;
5430 			len = 0;
5431 		}
5432 
5433 read_again:
5434 		if (count >= limit)
5435 			break;
5436 
5437 		buf1_len = 0;
5438 		entry = next_entry;
5439 		buf = &rx_q->buf_pool[entry];
5440 
5441 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5442 			failure = failure ||
5443 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5444 			dirty = 0;
5445 		}
5446 
5447 		if (priv->extend_desc)
5448 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5449 		else
5450 			p = rx_q->dma_rx + entry;
5451 
5452 		/* read the status of the incoming frame */
5453 		status = stmmac_rx_status(priv, &priv->xstats, p);
5454 		/* check if managed by the DMA otherwise go ahead */
5455 		if (unlikely(status & dma_own))
5456 			break;
5457 
5458 		/* Prefetch the next RX descriptor */
5459 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5460 						priv->dma_conf.dma_rx_size);
5461 		next_entry = rx_q->cur_rx;
5462 
5463 		if (priv->extend_desc)
5464 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5465 		else
5466 			np = rx_q->dma_rx + next_entry;
5467 
5468 		prefetch(np);
5469 
5470 		/* Ensure a valid XSK buffer before proceed */
5471 		if (!buf->xdp)
5472 			break;
5473 
5474 		if (priv->extend_desc)
5475 			stmmac_rx_extended_status(priv, &priv->xstats,
5476 						  rx_q->dma_erx + entry);
5477 		if (unlikely(status == discard_frame)) {
5478 			xsk_buff_free(buf->xdp);
5479 			buf->xdp = NULL;
5480 			dirty++;
5481 			error = 1;
5482 			if (!priv->hwts_rx_en)
5483 				rx_errors++;
5484 		}
5485 
5486 		if (unlikely(error && (status & rx_not_ls)))
5487 			goto read_again;
5488 		if (unlikely(error)) {
5489 			count++;
5490 			continue;
5491 		}
5492 
5493 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5494 		if (likely(status & rx_not_ls)) {
5495 			xsk_buff_free(buf->xdp);
5496 			buf->xdp = NULL;
5497 			dirty++;
5498 			count++;
5499 			goto read_again;
5500 		}
5501 
5502 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5503 		ctx->priv = priv;
5504 		ctx->desc = p;
5505 		ctx->ndesc = np;
5506 
5507 		/* XDP ZC Frame only support primary buffers for now */
5508 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5509 		len += buf1_len;
5510 
5511 		/* ACS is disabled; strip manually. */
5512 		if (likely(!(status & rx_not_ls))) {
5513 			buf1_len -= ETH_FCS_LEN;
5514 			len -= ETH_FCS_LEN;
5515 		}
5516 
5517 		/* RX buffer is good and fit into a XSK pool buffer */
5518 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5519 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5520 
5521 		prog = READ_ONCE(priv->xdp_prog);
5522 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5523 
5524 		switch (res) {
5525 		case STMMAC_XDP_PASS:
5526 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5527 			xsk_buff_free(buf->xdp);
5528 			break;
5529 		case STMMAC_XDP_CONSUMED:
5530 			xsk_buff_free(buf->xdp);
5531 			fallthrough;
5532 		case STMMAC_XSK_CONSUMED:
5533 			rx_dropped++;
5534 			break;
5535 		case STMMAC_XDP_TX:
5536 		case STMMAC_XDP_REDIRECT:
5537 			xdp_status |= res;
5538 			break;
5539 		}
5540 
5541 		buf->xdp = NULL;
5542 		dirty++;
5543 		count++;
5544 	}
5545 
5546 	if (status & rx_not_ls) {
5547 		rx_q->state_saved = true;
5548 		rx_q->state.error = error;
5549 		rx_q->state.len = len;
5550 	}
5551 
5552 	stmmac_finalize_xdp_rx(priv, xdp_status);
5553 
5554 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5555 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5556 	u64_stats_update_end(&rxq_stats->napi_syncp);
5557 
5558 	priv->xstats.rx_dropped += rx_dropped;
5559 	priv->xstats.rx_errors += rx_errors;
5560 
5561 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5562 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5563 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5564 		else
5565 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5566 
5567 		return (int)count;
5568 	}
5569 
5570 	return failure ? limit : (int)count;
5571 }
5572 
5573 /**
5574  * stmmac_rx - manage the receive process
5575  * @priv: driver private structure
5576  * @limit: napi bugget
5577  * @queue: RX queue index.
5578  * Description :  this the function called by the napi poll method.
5579  * It gets all the frames inside the ring.
5580  */
5581 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5582 {
5583 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5584 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5585 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5586 	struct stmmac_channel *ch = &priv->channel[queue];
5587 	unsigned int count = 0, error = 0, len = 0;
5588 	int status = 0, coe = priv->hw->rx_csum;
5589 	unsigned int next_entry = rx_q->cur_rx;
5590 	enum dma_data_direction dma_dir;
5591 	unsigned int desc_size;
5592 	struct sk_buff *skb = NULL;
5593 	struct stmmac_xdp_buff ctx;
5594 	int xdp_status = 0;
5595 	int bufsz;
5596 
5597 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5598 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5599 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5600 
5601 	if (netif_msg_rx_status(priv)) {
5602 		void *rx_head;
5603 
5604 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5605 		if (priv->extend_desc) {
5606 			rx_head = (void *)rx_q->dma_erx;
5607 			desc_size = sizeof(struct dma_extended_desc);
5608 		} else {
5609 			rx_head = (void *)rx_q->dma_rx;
5610 			desc_size = sizeof(struct dma_desc);
5611 		}
5612 
5613 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5614 				    rx_q->dma_rx_phy, desc_size);
5615 	}
5616 	while (count < limit) {
5617 		unsigned int buf1_len = 0, buf2_len = 0;
5618 		enum pkt_hash_types hash_type;
5619 		struct stmmac_rx_buffer *buf;
5620 		struct dma_desc *np, *p;
5621 		int entry;
5622 		u32 hash;
5623 
5624 		if (!count && rx_q->state_saved) {
5625 			skb = rx_q->state.skb;
5626 			error = rx_q->state.error;
5627 			len = rx_q->state.len;
5628 		} else {
5629 			rx_q->state_saved = false;
5630 			skb = NULL;
5631 			error = 0;
5632 			len = 0;
5633 		}
5634 
5635 read_again:
5636 		if (count >= limit)
5637 			break;
5638 
5639 		buf1_len = 0;
5640 		buf2_len = 0;
5641 		entry = next_entry;
5642 		buf = &rx_q->buf_pool[entry];
5643 
5644 		if (priv->extend_desc)
5645 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5646 		else
5647 			p = rx_q->dma_rx + entry;
5648 
5649 		/* read the status of the incoming frame */
5650 		status = stmmac_rx_status(priv, &priv->xstats, p);
5651 		/* check if managed by the DMA otherwise go ahead */
5652 		if (unlikely(status & dma_own))
5653 			break;
5654 
5655 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5656 						priv->dma_conf.dma_rx_size);
5657 		next_entry = rx_q->cur_rx;
5658 
5659 		if (priv->extend_desc)
5660 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5661 		else
5662 			np = rx_q->dma_rx + next_entry;
5663 
5664 		prefetch(np);
5665 
5666 		if (priv->extend_desc)
5667 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5668 		if (unlikely(status == discard_frame)) {
5669 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5670 			buf->page = NULL;
5671 			error = 1;
5672 			if (!priv->hwts_rx_en)
5673 				rx_errors++;
5674 		}
5675 
5676 		if (unlikely(error && (status & rx_not_ls)))
5677 			goto read_again;
5678 		if (unlikely(error)) {
5679 			dev_kfree_skb(skb);
5680 			skb = NULL;
5681 			count++;
5682 			continue;
5683 		}
5684 
5685 		/* Buffer is good. Go on. */
5686 
5687 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5688 		len += buf1_len;
5689 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5690 		len += buf2_len;
5691 
5692 		/* ACS is disabled; strip manually. */
5693 		if (likely(!(status & rx_not_ls))) {
5694 			if (buf2_len) {
5695 				buf2_len -= ETH_FCS_LEN;
5696 				len -= ETH_FCS_LEN;
5697 			} else if (buf1_len) {
5698 				buf1_len -= ETH_FCS_LEN;
5699 				len -= ETH_FCS_LEN;
5700 			}
5701 		}
5702 
5703 		if (!skb) {
5704 			unsigned int pre_len, sync_len;
5705 
5706 			dma_sync_single_for_cpu(priv->device, buf->addr,
5707 						buf1_len, dma_dir);
5708 			net_prefetch(page_address(buf->page) +
5709 				     buf->page_offset);
5710 
5711 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5712 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5713 					 buf->page_offset, buf1_len, true);
5714 
5715 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5716 				  buf->page_offset;
5717 
5718 			ctx.priv = priv;
5719 			ctx.desc = p;
5720 			ctx.ndesc = np;
5721 
5722 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5723 			/* Due xdp_adjust_tail: DMA sync for_device
5724 			 * cover max len CPU touch
5725 			 */
5726 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5727 				   buf->page_offset;
5728 			sync_len = max(sync_len, pre_len);
5729 
5730 			/* For Not XDP_PASS verdict */
5731 			if (IS_ERR(skb)) {
5732 				unsigned int xdp_res = -PTR_ERR(skb);
5733 
5734 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5735 					page_pool_put_page(rx_q->page_pool,
5736 							   virt_to_head_page(ctx.xdp.data),
5737 							   sync_len, true);
5738 					buf->page = NULL;
5739 					rx_dropped++;
5740 
5741 					/* Clear skb as it was set as
5742 					 * status by XDP program.
5743 					 */
5744 					skb = NULL;
5745 
5746 					if (unlikely((status & rx_not_ls)))
5747 						goto read_again;
5748 
5749 					count++;
5750 					continue;
5751 				} else if (xdp_res & (STMMAC_XDP_TX |
5752 						      STMMAC_XDP_REDIRECT)) {
5753 					xdp_status |= xdp_res;
5754 					buf->page = NULL;
5755 					skb = NULL;
5756 					count++;
5757 					continue;
5758 				}
5759 			}
5760 		}
5761 
5762 		if (!skb) {
5763 			unsigned int head_pad_len;
5764 
5765 			/* XDP program may expand or reduce tail */
5766 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5767 
5768 			skb = napi_build_skb(page_address(buf->page),
5769 					     rx_q->napi_skb_frag_size);
5770 			if (!skb) {
5771 				page_pool_recycle_direct(rx_q->page_pool,
5772 							 buf->page);
5773 				rx_dropped++;
5774 				count++;
5775 				goto drain_data;
5776 			}
5777 
5778 			/* XDP program may adjust header */
5779 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5780 			skb_reserve(skb, head_pad_len);
5781 			skb_put(skb, buf1_len);
5782 			skb_mark_for_recycle(skb);
5783 			buf->page = NULL;
5784 		} else if (buf1_len) {
5785 			dma_sync_single_for_cpu(priv->device, buf->addr,
5786 						buf1_len, dma_dir);
5787 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5788 					buf->page, buf->page_offset, buf1_len,
5789 					priv->dma_conf.dma_buf_sz);
5790 			buf->page = NULL;
5791 		}
5792 
5793 		if (buf2_len) {
5794 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5795 						buf2_len, dma_dir);
5796 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5797 					buf->sec_page, 0, buf2_len,
5798 					priv->dma_conf.dma_buf_sz);
5799 			buf->sec_page = NULL;
5800 		}
5801 
5802 drain_data:
5803 		if (likely(status & rx_not_ls))
5804 			goto read_again;
5805 		if (!skb)
5806 			continue;
5807 
5808 		/* Got entire packet into SKB. Finish it. */
5809 
5810 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5811 
5812 		if (priv->hw->hw_vlan_en)
5813 			/* MAC level stripping. */
5814 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5815 		else
5816 			/* Driver level stripping. */
5817 			stmmac_rx_vlan(priv->dev, skb);
5818 
5819 		skb->protocol = eth_type_trans(skb, priv->dev);
5820 
5821 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5822 		    (status & csum_none))
5823 			skb_checksum_none_assert(skb);
5824 		else
5825 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5826 
5827 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5828 			skb_set_hash(skb, hash, hash_type);
5829 
5830 		skb_record_rx_queue(skb, queue);
5831 		napi_gro_receive(&ch->rx_napi, skb);
5832 		skb = NULL;
5833 
5834 		rx_packets++;
5835 		rx_bytes += len;
5836 		count++;
5837 	}
5838 
5839 	if (status & rx_not_ls || skb) {
5840 		rx_q->state_saved = true;
5841 		rx_q->state.skb = skb;
5842 		rx_q->state.error = error;
5843 		rx_q->state.len = len;
5844 	}
5845 
5846 	stmmac_finalize_xdp_rx(priv, xdp_status);
5847 
5848 	stmmac_rx_refill(priv, queue);
5849 
5850 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5851 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5852 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5853 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5854 	u64_stats_update_end(&rxq_stats->napi_syncp);
5855 
5856 	priv->xstats.rx_dropped += rx_dropped;
5857 	priv->xstats.rx_errors += rx_errors;
5858 
5859 	return count;
5860 }
5861 
5862 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5863 {
5864 	struct stmmac_channel *ch =
5865 		container_of(napi, struct stmmac_channel, rx_napi);
5866 	struct stmmac_priv *priv = ch->priv_data;
5867 	struct stmmac_rxq_stats *rxq_stats;
5868 	u32 chan = ch->index;
5869 	int work_done;
5870 
5871 	rxq_stats = &priv->xstats.rxq_stats[chan];
5872 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5873 	u64_stats_inc(&rxq_stats->napi.poll);
5874 	u64_stats_update_end(&rxq_stats->napi_syncp);
5875 
5876 	work_done = stmmac_rx(priv, budget, chan);
5877 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5878 		unsigned long flags;
5879 
5880 		spin_lock_irqsave(&ch->lock, flags);
5881 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5882 		spin_unlock_irqrestore(&ch->lock, flags);
5883 	}
5884 
5885 	return work_done;
5886 }
5887 
5888 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5889 {
5890 	struct stmmac_channel *ch =
5891 		container_of(napi, struct stmmac_channel, tx_napi);
5892 	struct stmmac_priv *priv = ch->priv_data;
5893 	struct stmmac_txq_stats *txq_stats;
5894 	bool pending_packets = false;
5895 	u32 chan = ch->index;
5896 	int work_done;
5897 
5898 	txq_stats = &priv->xstats.txq_stats[chan];
5899 	u64_stats_update_begin(&txq_stats->napi_syncp);
5900 	u64_stats_inc(&txq_stats->napi.poll);
5901 	u64_stats_update_end(&txq_stats->napi_syncp);
5902 
5903 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5904 	work_done = min(work_done, budget);
5905 
5906 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5907 		unsigned long flags;
5908 
5909 		spin_lock_irqsave(&ch->lock, flags);
5910 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5911 		spin_unlock_irqrestore(&ch->lock, flags);
5912 	}
5913 
5914 	/* TX still have packet to handle, check if we need to arm tx timer */
5915 	if (pending_packets)
5916 		stmmac_tx_timer_arm(priv, chan);
5917 
5918 	return work_done;
5919 }
5920 
5921 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5922 {
5923 	struct stmmac_channel *ch =
5924 		container_of(napi, struct stmmac_channel, rxtx_napi);
5925 	struct stmmac_priv *priv = ch->priv_data;
5926 	bool tx_pending_packets = false;
5927 	int rx_done, tx_done, rxtx_done;
5928 	struct stmmac_rxq_stats *rxq_stats;
5929 	struct stmmac_txq_stats *txq_stats;
5930 	u32 chan = ch->index;
5931 
5932 	rxq_stats = &priv->xstats.rxq_stats[chan];
5933 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5934 	u64_stats_inc(&rxq_stats->napi.poll);
5935 	u64_stats_update_end(&rxq_stats->napi_syncp);
5936 
5937 	txq_stats = &priv->xstats.txq_stats[chan];
5938 	u64_stats_update_begin(&txq_stats->napi_syncp);
5939 	u64_stats_inc(&txq_stats->napi.poll);
5940 	u64_stats_update_end(&txq_stats->napi_syncp);
5941 
5942 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5943 	tx_done = min(tx_done, budget);
5944 
5945 	rx_done = stmmac_rx_zc(priv, budget, chan);
5946 
5947 	rxtx_done = max(tx_done, rx_done);
5948 
5949 	/* If either TX or RX work is not complete, return budget
5950 	 * and keep pooling
5951 	 */
5952 	if (rxtx_done >= budget)
5953 		return budget;
5954 
5955 	/* all work done, exit the polling mode */
5956 	if (napi_complete_done(napi, rxtx_done)) {
5957 		unsigned long flags;
5958 
5959 		spin_lock_irqsave(&ch->lock, flags);
5960 		/* Both RX and TX work done are complete,
5961 		 * so enable both RX & TX IRQs.
5962 		 */
5963 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5964 		spin_unlock_irqrestore(&ch->lock, flags);
5965 	}
5966 
5967 	/* TX still have packet to handle, check if we need to arm tx timer */
5968 	if (tx_pending_packets)
5969 		stmmac_tx_timer_arm(priv, chan);
5970 
5971 	return min(rxtx_done, budget - 1);
5972 }
5973 
5974 /**
5975  *  stmmac_tx_timeout
5976  *  @dev : Pointer to net device structure
5977  *  @txqueue: the index of the hanging transmit queue
5978  *  Description: this function is called when a packet transmission fails to
5979  *   complete within a reasonable time. The driver will mark the error in the
5980  *   netdev structure and arrange for the device to be reset to a sane state
5981  *   in order to transmit a new packet.
5982  */
5983 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5984 {
5985 	struct stmmac_priv *priv = netdev_priv(dev);
5986 
5987 	stmmac_global_err(priv);
5988 }
5989 
5990 /**
5991  *  stmmac_set_rx_mode - entry point for multicast addressing
5992  *  @dev : pointer to the device structure
5993  *  Description:
5994  *  This function is a driver entry point which gets called by the kernel
5995  *  whenever multicast addresses must be enabled/disabled.
5996  *  Return value:
5997  *  void.
5998  *
5999  *  FIXME: This may need RXC to be running, but it may be called with BH
6000  *  disabled, which means we can't call phylink_rx_clk_stop*().
6001  */
6002 static void stmmac_set_rx_mode(struct net_device *dev)
6003 {
6004 	struct stmmac_priv *priv = netdev_priv(dev);
6005 
6006 	stmmac_set_filter(priv, priv->hw, dev);
6007 }
6008 
6009 /**
6010  *  stmmac_change_mtu - entry point to change MTU size for the device.
6011  *  @dev : device pointer.
6012  *  @new_mtu : the new MTU size for the device.
6013  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
6014  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
6015  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
6016  *  Return value:
6017  *  0 on success and an appropriate (-)ve integer as defined in errno.h
6018  *  file on failure.
6019  */
6020 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
6021 {
6022 	struct stmmac_priv *priv = netdev_priv(dev);
6023 	int txfifosz = priv->plat->tx_fifo_size;
6024 	struct stmmac_dma_conf *dma_conf;
6025 	const int mtu = new_mtu;
6026 	int ret;
6027 
6028 	if (txfifosz == 0)
6029 		txfifosz = priv->dma_cap.tx_fifo_size;
6030 
6031 	txfifosz /= priv->plat->tx_queues_to_use;
6032 
6033 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
6034 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
6035 		return -EINVAL;
6036 	}
6037 
6038 	new_mtu = STMMAC_ALIGN(new_mtu);
6039 
6040 	/* If condition true, FIFO is too small or MTU too large */
6041 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
6042 		return -EINVAL;
6043 
6044 	if (netif_running(dev)) {
6045 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
6046 		/* Try to allocate the new DMA conf with the new mtu */
6047 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
6048 		if (IS_ERR(dma_conf)) {
6049 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
6050 				   mtu);
6051 			return PTR_ERR(dma_conf);
6052 		}
6053 
6054 		__stmmac_release(dev);
6055 
6056 		ret = __stmmac_open(dev, dma_conf);
6057 		if (ret) {
6058 			free_dma_desc_resources(priv, dma_conf);
6059 			kfree(dma_conf);
6060 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
6061 			return ret;
6062 		}
6063 
6064 		kfree(dma_conf);
6065 
6066 		stmmac_set_rx_mode(dev);
6067 	}
6068 
6069 	WRITE_ONCE(dev->mtu, mtu);
6070 	netdev_update_features(dev);
6071 
6072 	return 0;
6073 }
6074 
6075 static netdev_features_t stmmac_fix_features(struct net_device *dev,
6076 					     netdev_features_t features)
6077 {
6078 	struct stmmac_priv *priv = netdev_priv(dev);
6079 
6080 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
6081 		features &= ~NETIF_F_RXCSUM;
6082 
6083 	if (!priv->plat->tx_coe)
6084 		features &= ~NETIF_F_CSUM_MASK;
6085 
6086 	/* Some GMAC devices have a bugged Jumbo frame support that
6087 	 * needs to have the Tx COE disabled for oversized frames
6088 	 * (due to limited buffer sizes). In this case we disable
6089 	 * the TX csum insertion in the TDES and not use SF.
6090 	 */
6091 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6092 		features &= ~NETIF_F_CSUM_MASK;
6093 
6094 	/* Disable tso if asked by ethtool */
6095 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6096 		if (features & NETIF_F_TSO)
6097 			priv->tso = true;
6098 		else
6099 			priv->tso = false;
6100 	}
6101 
6102 	return features;
6103 }
6104 
6105 static int stmmac_set_features(struct net_device *netdev,
6106 			       netdev_features_t features)
6107 {
6108 	struct stmmac_priv *priv = netdev_priv(netdev);
6109 
6110 	/* Keep the COE Type in case of csum is supporting */
6111 	if (features & NETIF_F_RXCSUM)
6112 		priv->hw->rx_csum = priv->plat->rx_coe;
6113 	else
6114 		priv->hw->rx_csum = 0;
6115 	/* No check needed because rx_coe has been set before and it will be
6116 	 * fixed in case of issue.
6117 	 */
6118 	stmmac_rx_ipc(priv, priv->hw);
6119 
6120 	if (priv->sph_capable) {
6121 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
6122 		u32 chan;
6123 
6124 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6125 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6126 	}
6127 
6128 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
6129 		priv->hw->hw_vlan_en = true;
6130 	else
6131 		priv->hw->hw_vlan_en = false;
6132 
6133 	phylink_rx_clk_stop_block(priv->phylink);
6134 	stmmac_set_hw_vlan_mode(priv, priv->hw);
6135 	phylink_rx_clk_stop_unblock(priv->phylink);
6136 
6137 	return 0;
6138 }
6139 
6140 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6141 {
6142 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6143 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6144 	u32 queues_count;
6145 	u32 queue;
6146 	bool xmac;
6147 
6148 	xmac = dwmac_is_xmac(priv->plat->core_type);
6149 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6150 
6151 	if (priv->irq_wake)
6152 		pm_wakeup_event(priv->device, 0);
6153 
6154 	if (priv->dma_cap.estsel)
6155 		stmmac_est_irq_status(priv, priv, priv->dev,
6156 				      &priv->xstats, tx_cnt);
6157 
6158 	if (stmmac_fpe_supported(priv))
6159 		stmmac_fpe_irq_status(priv);
6160 
6161 	/* To handle GMAC own interrupts */
6162 	if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
6163 		int status = stmmac_host_irq_status(priv, &priv->xstats);
6164 
6165 		if (unlikely(status)) {
6166 			/* For LPI we need to save the tx status */
6167 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6168 				priv->tx_path_in_lpi_mode = true;
6169 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6170 				priv->tx_path_in_lpi_mode = false;
6171 		}
6172 
6173 		for (queue = 0; queue < queues_count; queue++)
6174 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6175 
6176 		stmmac_timestamp_interrupt(priv, priv);
6177 	}
6178 }
6179 
6180 /**
6181  *  stmmac_interrupt - main ISR
6182  *  @irq: interrupt number.
6183  *  @dev_id: to pass the net device pointer.
6184  *  Description: this is the main driver interrupt service routine.
6185  *  It can call:
6186  *  o DMA service routine (to manage incoming frame reception and transmission
6187  *    status)
6188  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6189  *    interrupts.
6190  */
6191 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6192 {
6193 	struct net_device *dev = (struct net_device *)dev_id;
6194 	struct stmmac_priv *priv = netdev_priv(dev);
6195 
6196 	/* Check if adapter is up */
6197 	if (test_bit(STMMAC_DOWN, &priv->state))
6198 		return IRQ_HANDLED;
6199 
6200 	/* Check ASP error if it isn't delivered via an individual IRQ */
6201 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6202 		return IRQ_HANDLED;
6203 
6204 	/* To handle Common interrupts */
6205 	stmmac_common_interrupt(priv);
6206 
6207 	/* To handle DMA interrupts */
6208 	stmmac_dma_interrupt(priv);
6209 
6210 	return IRQ_HANDLED;
6211 }
6212 
6213 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6214 {
6215 	struct net_device *dev = (struct net_device *)dev_id;
6216 	struct stmmac_priv *priv = netdev_priv(dev);
6217 
6218 	/* Check if adapter is up */
6219 	if (test_bit(STMMAC_DOWN, &priv->state))
6220 		return IRQ_HANDLED;
6221 
6222 	/* To handle Common interrupts */
6223 	stmmac_common_interrupt(priv);
6224 
6225 	return IRQ_HANDLED;
6226 }
6227 
6228 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6229 {
6230 	struct net_device *dev = (struct net_device *)dev_id;
6231 	struct stmmac_priv *priv = netdev_priv(dev);
6232 
6233 	/* Check if adapter is up */
6234 	if (test_bit(STMMAC_DOWN, &priv->state))
6235 		return IRQ_HANDLED;
6236 
6237 	/* Check if a fatal error happened */
6238 	stmmac_safety_feat_interrupt(priv);
6239 
6240 	return IRQ_HANDLED;
6241 }
6242 
6243 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6244 {
6245 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6246 	struct stmmac_dma_conf *dma_conf;
6247 	int chan = tx_q->queue_index;
6248 	struct stmmac_priv *priv;
6249 	int status;
6250 
6251 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6252 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6253 
6254 	/* Check if adapter is up */
6255 	if (test_bit(STMMAC_DOWN, &priv->state))
6256 		return IRQ_HANDLED;
6257 
6258 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6259 
6260 	if (unlikely(status & tx_hard_error_bump_tc)) {
6261 		/* Try to bump up the dma threshold on this failure */
6262 		stmmac_bump_dma_threshold(priv, chan);
6263 	} else if (unlikely(status == tx_hard_error)) {
6264 		stmmac_tx_err(priv, chan);
6265 	}
6266 
6267 	return IRQ_HANDLED;
6268 }
6269 
6270 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6271 {
6272 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6273 	struct stmmac_dma_conf *dma_conf;
6274 	int chan = rx_q->queue_index;
6275 	struct stmmac_priv *priv;
6276 
6277 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6278 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6279 
6280 	/* Check if adapter is up */
6281 	if (test_bit(STMMAC_DOWN, &priv->state))
6282 		return IRQ_HANDLED;
6283 
6284 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6285 
6286 	return IRQ_HANDLED;
6287 }
6288 
6289 /**
6290  *  stmmac_ioctl - Entry point for the Ioctl
6291  *  @dev: Device pointer.
6292  *  @rq: An IOCTL specific structure, that can contain a pointer to
6293  *  a proprietary structure used to pass information to the driver.
6294  *  @cmd: IOCTL command
6295  *  Description:
6296  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6297  */
6298 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6299 {
6300 	struct stmmac_priv *priv = netdev_priv (dev);
6301 	int ret = -EOPNOTSUPP;
6302 
6303 	if (!netif_running(dev))
6304 		return -EINVAL;
6305 
6306 	switch (cmd) {
6307 	case SIOCGMIIPHY:
6308 	case SIOCGMIIREG:
6309 	case SIOCSMIIREG:
6310 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6311 		break;
6312 	default:
6313 		break;
6314 	}
6315 
6316 	return ret;
6317 }
6318 
6319 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6320 				    void *cb_priv)
6321 {
6322 	struct stmmac_priv *priv = cb_priv;
6323 	int ret = -EOPNOTSUPP;
6324 
6325 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6326 		return ret;
6327 
6328 	__stmmac_disable_all_queues(priv);
6329 
6330 	switch (type) {
6331 	case TC_SETUP_CLSU32:
6332 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6333 		break;
6334 	case TC_SETUP_CLSFLOWER:
6335 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6336 		break;
6337 	default:
6338 		break;
6339 	}
6340 
6341 	stmmac_enable_all_queues(priv);
6342 	return ret;
6343 }
6344 
6345 static LIST_HEAD(stmmac_block_cb_list);
6346 
6347 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6348 			   void *type_data)
6349 {
6350 	struct stmmac_priv *priv = netdev_priv(ndev);
6351 
6352 	switch (type) {
6353 	case TC_QUERY_CAPS:
6354 		return stmmac_tc_query_caps(priv, priv, type_data);
6355 	case TC_SETUP_QDISC_MQPRIO:
6356 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6357 	case TC_SETUP_BLOCK:
6358 		return flow_block_cb_setup_simple(type_data,
6359 						  &stmmac_block_cb_list,
6360 						  stmmac_setup_tc_block_cb,
6361 						  priv, priv, true);
6362 	case TC_SETUP_QDISC_CBS:
6363 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6364 	case TC_SETUP_QDISC_TAPRIO:
6365 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6366 	case TC_SETUP_QDISC_ETF:
6367 		return stmmac_tc_setup_etf(priv, priv, type_data);
6368 	default:
6369 		return -EOPNOTSUPP;
6370 	}
6371 }
6372 
6373 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6374 			       struct net_device *sb_dev)
6375 {
6376 	int gso = skb_shinfo(skb)->gso_type;
6377 
6378 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6379 		/*
6380 		 * There is no way to determine the number of TSO/USO
6381 		 * capable Queues. Let's use always the Queue 0
6382 		 * because if TSO/USO is supported then at least this
6383 		 * one will be capable.
6384 		 */
6385 		return 0;
6386 	}
6387 
6388 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6389 }
6390 
6391 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6392 {
6393 	struct stmmac_priv *priv = netdev_priv(ndev);
6394 	int ret = 0;
6395 
6396 	ret = pm_runtime_resume_and_get(priv->device);
6397 	if (ret < 0)
6398 		return ret;
6399 
6400 	ret = eth_mac_addr(ndev, addr);
6401 	if (ret)
6402 		goto set_mac_error;
6403 
6404 	phylink_rx_clk_stop_block(priv->phylink);
6405 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6406 	phylink_rx_clk_stop_unblock(priv->phylink);
6407 
6408 set_mac_error:
6409 	pm_runtime_put(priv->device);
6410 
6411 	return ret;
6412 }
6413 
6414 #ifdef CONFIG_DEBUG_FS
6415 static struct dentry *stmmac_fs_dir;
6416 
6417 static void sysfs_display_ring(void *head, int size, int extend_desc,
6418 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6419 {
6420 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6421 	struct dma_desc *p = (struct dma_desc *)head;
6422 	unsigned int desc_size;
6423 	dma_addr_t dma_addr;
6424 	int i;
6425 
6426 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6427 	for (i = 0; i < size; i++) {
6428 		dma_addr = dma_phy_addr + i * desc_size;
6429 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6430 				i, &dma_addr,
6431 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6432 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6433 		if (extend_desc)
6434 			p = &(++ep)->basic;
6435 		else
6436 			p++;
6437 	}
6438 }
6439 
6440 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6441 {
6442 	struct net_device *dev = seq->private;
6443 	struct stmmac_priv *priv = netdev_priv(dev);
6444 	u32 rx_count = priv->plat->rx_queues_to_use;
6445 	u32 tx_count = priv->plat->tx_queues_to_use;
6446 	u32 queue;
6447 
6448 	if ((dev->flags & IFF_UP) == 0)
6449 		return 0;
6450 
6451 	for (queue = 0; queue < rx_count; queue++) {
6452 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6453 
6454 		seq_printf(seq, "RX Queue %d:\n", queue);
6455 
6456 		if (priv->extend_desc) {
6457 			seq_printf(seq, "Extended descriptor ring:\n");
6458 			sysfs_display_ring((void *)rx_q->dma_erx,
6459 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6460 		} else {
6461 			seq_printf(seq, "Descriptor ring:\n");
6462 			sysfs_display_ring((void *)rx_q->dma_rx,
6463 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6464 		}
6465 	}
6466 
6467 	for (queue = 0; queue < tx_count; queue++) {
6468 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6469 
6470 		seq_printf(seq, "TX Queue %d:\n", queue);
6471 
6472 		if (priv->extend_desc) {
6473 			seq_printf(seq, "Extended descriptor ring:\n");
6474 			sysfs_display_ring((void *)tx_q->dma_etx,
6475 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6476 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6477 			seq_printf(seq, "Descriptor ring:\n");
6478 			sysfs_display_ring((void *)tx_q->dma_tx,
6479 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6480 		}
6481 	}
6482 
6483 	return 0;
6484 }
6485 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6486 
6487 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6488 {
6489 	static const char * const dwxgmac_timestamp_source[] = {
6490 		"None",
6491 		"Internal",
6492 		"External",
6493 		"Both",
6494 	};
6495 	static const char * const dwxgmac_safety_feature_desc[] = {
6496 		"No",
6497 		"All Safety Features with ECC and Parity",
6498 		"All Safety Features without ECC or Parity",
6499 		"All Safety Features with Parity Only",
6500 		"ECC Only",
6501 		"UNDEFINED",
6502 		"UNDEFINED",
6503 		"UNDEFINED",
6504 	};
6505 	struct net_device *dev = seq->private;
6506 	struct stmmac_priv *priv = netdev_priv(dev);
6507 
6508 	if (!priv->hw_cap_support) {
6509 		seq_printf(seq, "DMA HW features not supported\n");
6510 		return 0;
6511 	}
6512 
6513 	seq_printf(seq, "==============================\n");
6514 	seq_printf(seq, "\tDMA HW features\n");
6515 	seq_printf(seq, "==============================\n");
6516 
6517 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6518 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6519 	seq_printf(seq, "\t1000 Mbps: %s\n",
6520 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6521 	seq_printf(seq, "\tHalf duplex: %s\n",
6522 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6523 	if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
6524 		seq_printf(seq,
6525 			   "\tNumber of Additional MAC address registers: %d\n",
6526 			   priv->dma_cap.multi_addr);
6527 	} else {
6528 		seq_printf(seq, "\tHash Filter: %s\n",
6529 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6530 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6531 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6532 	}
6533 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6534 		   (priv->dma_cap.pcs) ? "Y" : "N");
6535 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6536 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6537 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6538 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6539 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6540 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6541 	seq_printf(seq, "\tRMON module: %s\n",
6542 		   (priv->dma_cap.rmon) ? "Y" : "N");
6543 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6544 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6545 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6546 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6547 	if (priv->plat->core_type == DWMAC_CORE_XGMAC)
6548 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6549 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6550 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6551 		   (priv->dma_cap.eee) ? "Y" : "N");
6552 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6553 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6554 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6555 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6556 	    priv->plat->core_type == DWMAC_CORE_XGMAC) {
6557 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6558 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6559 	} else {
6560 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6561 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6562 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6563 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6564 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6565 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6566 	}
6567 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6568 		   priv->dma_cap.number_rx_channel);
6569 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6570 		   priv->dma_cap.number_tx_channel);
6571 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6572 		   priv->dma_cap.number_rx_queues);
6573 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6574 		   priv->dma_cap.number_tx_queues);
6575 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6576 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6577 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6578 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6579 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6580 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6581 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6582 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6583 		   priv->dma_cap.pps_out_num);
6584 	seq_printf(seq, "\tSafety Features: %s\n",
6585 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6586 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6587 		   priv->dma_cap.frpsel ? "Y" : "N");
6588 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6589 		   priv->dma_cap.host_dma_width);
6590 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6591 		   priv->dma_cap.rssen ? "Y" : "N");
6592 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6593 		   priv->dma_cap.vlhash ? "Y" : "N");
6594 	seq_printf(seq, "\tSplit Header: %s\n",
6595 		   priv->dma_cap.sphen ? "Y" : "N");
6596 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6597 		   priv->dma_cap.vlins ? "Y" : "N");
6598 	seq_printf(seq, "\tDouble VLAN: %s\n",
6599 		   priv->dma_cap.dvlan ? "Y" : "N");
6600 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6601 		   priv->dma_cap.l3l4fnum);
6602 	seq_printf(seq, "\tARP Offloading: %s\n",
6603 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6604 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6605 		   priv->dma_cap.estsel ? "Y" : "N");
6606 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6607 		   priv->dma_cap.fpesel ? "Y" : "N");
6608 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6609 		   priv->dma_cap.tbssel ? "Y" : "N");
6610 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6611 		   priv->dma_cap.tbs_ch_num);
6612 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6613 		   priv->dma_cap.sgfsel ? "Y" : "N");
6614 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6615 		   BIT(priv->dma_cap.ttsfd) >> 1);
6616 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6617 		   priv->dma_cap.numtc);
6618 	seq_printf(seq, "\tDCB Feature: %s\n",
6619 		   priv->dma_cap.dcben ? "Y" : "N");
6620 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6621 		   priv->dma_cap.advthword ? "Y" : "N");
6622 	seq_printf(seq, "\tPTP Offload: %s\n",
6623 		   priv->dma_cap.ptoen ? "Y" : "N");
6624 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6625 		   priv->dma_cap.osten ? "Y" : "N");
6626 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6627 		   priv->dma_cap.pfcen ? "Y" : "N");
6628 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6629 		   BIT(priv->dma_cap.frpes) << 6);
6630 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6631 		   BIT(priv->dma_cap.frpbs) << 6);
6632 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6633 		   priv->dma_cap.frppipe_num);
6634 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6635 		   priv->dma_cap.nrvf_num ?
6636 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6637 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6638 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6639 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6640 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6641 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6642 		   priv->dma_cap.cbtisel ? "Y" : "N");
6643 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6644 		   priv->dma_cap.aux_snapshot_n);
6645 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6646 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6647 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6648 		   priv->dma_cap.edma ? "Y" : "N");
6649 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6650 		   priv->dma_cap.ediffc ? "Y" : "N");
6651 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6652 		   priv->dma_cap.vxn ? "Y" : "N");
6653 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6654 		   priv->dma_cap.dbgmem ? "Y" : "N");
6655 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6656 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6657 	return 0;
6658 }
6659 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6660 
6661 /* Use network device events to rename debugfs file entries.
6662  */
6663 static int stmmac_device_event(struct notifier_block *unused,
6664 			       unsigned long event, void *ptr)
6665 {
6666 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6667 	struct stmmac_priv *priv = netdev_priv(dev);
6668 
6669 	if (dev->netdev_ops != &stmmac_netdev_ops)
6670 		goto done;
6671 
6672 	switch (event) {
6673 	case NETDEV_CHANGENAME:
6674 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6675 		break;
6676 	}
6677 done:
6678 	return NOTIFY_DONE;
6679 }
6680 
6681 static struct notifier_block stmmac_notifier = {
6682 	.notifier_call = stmmac_device_event,
6683 };
6684 
6685 static void stmmac_init_fs(struct net_device *dev)
6686 {
6687 	struct stmmac_priv *priv = netdev_priv(dev);
6688 
6689 	rtnl_lock();
6690 
6691 	/* Create per netdev entries */
6692 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6693 
6694 	/* Entry to report DMA RX/TX rings */
6695 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6696 			    &stmmac_rings_status_fops);
6697 
6698 	/* Entry to report the DMA HW features */
6699 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6700 			    &stmmac_dma_cap_fops);
6701 
6702 	rtnl_unlock();
6703 }
6704 
6705 static void stmmac_exit_fs(struct net_device *dev)
6706 {
6707 	struct stmmac_priv *priv = netdev_priv(dev);
6708 
6709 	debugfs_remove_recursive(priv->dbgfs_dir);
6710 }
6711 #endif /* CONFIG_DEBUG_FS */
6712 
6713 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6714 {
6715 	unsigned char *data = (unsigned char *)&vid_le;
6716 	unsigned char data_byte = 0;
6717 	u32 crc = ~0x0;
6718 	u32 temp = 0;
6719 	int i, bits;
6720 
6721 	bits = get_bitmask_order(VLAN_VID_MASK);
6722 	for (i = 0; i < bits; i++) {
6723 		if ((i % 8) == 0)
6724 			data_byte = data[i / 8];
6725 
6726 		temp = ((crc & 1) ^ data_byte) & 1;
6727 		crc >>= 1;
6728 		data_byte >>= 1;
6729 
6730 		if (temp)
6731 			crc ^= 0xedb88320;
6732 	}
6733 
6734 	return crc;
6735 }
6736 
6737 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6738 {
6739 	u32 crc, hash = 0;
6740 	u16 pmatch = 0;
6741 	int count = 0;
6742 	u16 vid = 0;
6743 
6744 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6745 		__le16 vid_le = cpu_to_le16(vid);
6746 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6747 		hash |= (1 << crc);
6748 		count++;
6749 	}
6750 
6751 	if (!priv->dma_cap.vlhash) {
6752 		if (count > 2) /* VID = 0 always passes filter */
6753 			return -EOPNOTSUPP;
6754 
6755 		pmatch = vid;
6756 		hash = 0;
6757 	}
6758 
6759 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6760 }
6761 
6762 /* FIXME: This may need RXC to be running, but it may be called with BH
6763  * disabled, which means we can't call phylink_rx_clk_stop*().
6764  */
6765 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6766 {
6767 	struct stmmac_priv *priv = netdev_priv(ndev);
6768 	bool is_double = false;
6769 	int ret;
6770 
6771 	ret = pm_runtime_resume_and_get(priv->device);
6772 	if (ret < 0)
6773 		return ret;
6774 
6775 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6776 		is_double = true;
6777 
6778 	set_bit(vid, priv->active_vlans);
6779 	ret = stmmac_vlan_update(priv, is_double);
6780 	if (ret) {
6781 		clear_bit(vid, priv->active_vlans);
6782 		goto err_pm_put;
6783 	}
6784 
6785 	if (priv->hw->num_vlan) {
6786 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6787 		if (ret)
6788 			goto err_pm_put;
6789 	}
6790 err_pm_put:
6791 	pm_runtime_put(priv->device);
6792 
6793 	return ret;
6794 }
6795 
6796 /* FIXME: This may need RXC to be running, but it may be called with BH
6797  * disabled, which means we can't call phylink_rx_clk_stop*().
6798  */
6799 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6800 {
6801 	struct stmmac_priv *priv = netdev_priv(ndev);
6802 	bool is_double = false;
6803 	int ret;
6804 
6805 	ret = pm_runtime_resume_and_get(priv->device);
6806 	if (ret < 0)
6807 		return ret;
6808 
6809 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6810 		is_double = true;
6811 
6812 	clear_bit(vid, priv->active_vlans);
6813 
6814 	if (priv->hw->num_vlan) {
6815 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6816 		if (ret)
6817 			goto del_vlan_error;
6818 	}
6819 
6820 	ret = stmmac_vlan_update(priv, is_double);
6821 
6822 del_vlan_error:
6823 	pm_runtime_put(priv->device);
6824 
6825 	return ret;
6826 }
6827 
6828 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6829 {
6830 	struct stmmac_priv *priv = netdev_priv(dev);
6831 
6832 	switch (bpf->command) {
6833 	case XDP_SETUP_PROG:
6834 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6835 	case XDP_SETUP_XSK_POOL:
6836 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6837 					     bpf->xsk.queue_id);
6838 	default:
6839 		return -EOPNOTSUPP;
6840 	}
6841 }
6842 
6843 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6844 			   struct xdp_frame **frames, u32 flags)
6845 {
6846 	struct stmmac_priv *priv = netdev_priv(dev);
6847 	int cpu = smp_processor_id();
6848 	struct netdev_queue *nq;
6849 	int i, nxmit = 0;
6850 	int queue;
6851 
6852 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6853 		return -ENETDOWN;
6854 
6855 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6856 		return -EINVAL;
6857 
6858 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6859 	nq = netdev_get_tx_queue(priv->dev, queue);
6860 
6861 	__netif_tx_lock(nq, cpu);
6862 	/* Avoids TX time-out as we are sharing with slow path */
6863 	txq_trans_cond_update(nq);
6864 
6865 	for (i = 0; i < num_frames; i++) {
6866 		int res;
6867 
6868 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6869 		if (res == STMMAC_XDP_CONSUMED)
6870 			break;
6871 
6872 		nxmit++;
6873 	}
6874 
6875 	if (flags & XDP_XMIT_FLUSH) {
6876 		stmmac_flush_tx_descriptors(priv, queue);
6877 		stmmac_tx_timer_arm(priv, queue);
6878 	}
6879 
6880 	__netif_tx_unlock(nq);
6881 
6882 	return nxmit;
6883 }
6884 
6885 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6886 {
6887 	struct stmmac_channel *ch = &priv->channel[queue];
6888 	unsigned long flags;
6889 
6890 	spin_lock_irqsave(&ch->lock, flags);
6891 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6892 	spin_unlock_irqrestore(&ch->lock, flags);
6893 
6894 	stmmac_stop_rx_dma(priv, queue);
6895 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6896 }
6897 
6898 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6899 {
6900 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6901 	struct stmmac_channel *ch = &priv->channel[queue];
6902 	unsigned long flags;
6903 	u32 buf_size;
6904 	int ret;
6905 
6906 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6907 	if (ret) {
6908 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6909 		return;
6910 	}
6911 
6912 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6913 	if (ret) {
6914 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6915 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6916 		return;
6917 	}
6918 
6919 	stmmac_reset_rx_queue(priv, queue);
6920 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6921 
6922 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6923 			    rx_q->dma_rx_phy, rx_q->queue_index);
6924 
6925 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6926 			     sizeof(struct dma_desc));
6927 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6928 			       rx_q->rx_tail_addr, rx_q->queue_index);
6929 
6930 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6931 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6932 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6933 				      buf_size,
6934 				      rx_q->queue_index);
6935 	} else {
6936 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6937 				      priv->dma_conf.dma_buf_sz,
6938 				      rx_q->queue_index);
6939 	}
6940 
6941 	stmmac_start_rx_dma(priv, queue);
6942 
6943 	spin_lock_irqsave(&ch->lock, flags);
6944 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6945 	spin_unlock_irqrestore(&ch->lock, flags);
6946 }
6947 
6948 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6949 {
6950 	struct stmmac_channel *ch = &priv->channel[queue];
6951 	unsigned long flags;
6952 
6953 	spin_lock_irqsave(&ch->lock, flags);
6954 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6955 	spin_unlock_irqrestore(&ch->lock, flags);
6956 
6957 	stmmac_stop_tx_dma(priv, queue);
6958 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6959 }
6960 
6961 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6962 {
6963 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6964 	struct stmmac_channel *ch = &priv->channel[queue];
6965 	unsigned long flags;
6966 	int ret;
6967 
6968 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6969 	if (ret) {
6970 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6971 		return;
6972 	}
6973 
6974 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6975 	if (ret) {
6976 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6977 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6978 		return;
6979 	}
6980 
6981 	stmmac_reset_tx_queue(priv, queue);
6982 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6983 
6984 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6985 			    tx_q->dma_tx_phy, tx_q->queue_index);
6986 
6987 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6988 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6989 
6990 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6991 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6992 			       tx_q->tx_tail_addr, tx_q->queue_index);
6993 
6994 	stmmac_start_tx_dma(priv, queue);
6995 
6996 	spin_lock_irqsave(&ch->lock, flags);
6997 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6998 	spin_unlock_irqrestore(&ch->lock, flags);
6999 }
7000 
7001 void stmmac_xdp_release(struct net_device *dev)
7002 {
7003 	struct stmmac_priv *priv = netdev_priv(dev);
7004 	u32 chan;
7005 
7006 	/* Ensure tx function is not running */
7007 	netif_tx_disable(dev);
7008 
7009 	/* Disable NAPI process */
7010 	stmmac_disable_all_queues(priv);
7011 
7012 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7013 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7014 
7015 	/* Free the IRQ lines */
7016 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
7017 
7018 	/* Stop TX/RX DMA channels */
7019 	stmmac_stop_all_dma(priv);
7020 
7021 	/* Release and free the Rx/Tx resources */
7022 	free_dma_desc_resources(priv, &priv->dma_conf);
7023 
7024 	/* Disable the MAC Rx/Tx */
7025 	stmmac_mac_set(priv, priv->ioaddr, false);
7026 
7027 	/* set trans_start so we don't get spurious
7028 	 * watchdogs during reset
7029 	 */
7030 	netif_trans_update(dev);
7031 	netif_carrier_off(dev);
7032 }
7033 
7034 int stmmac_xdp_open(struct net_device *dev)
7035 {
7036 	struct stmmac_priv *priv = netdev_priv(dev);
7037 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7038 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7039 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
7040 	struct stmmac_rx_queue *rx_q;
7041 	struct stmmac_tx_queue *tx_q;
7042 	u32 buf_size;
7043 	bool sph_en;
7044 	u32 chan;
7045 	int ret;
7046 
7047 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
7048 	if (ret < 0) {
7049 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
7050 			   __func__);
7051 		goto dma_desc_error;
7052 	}
7053 
7054 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
7055 	if (ret < 0) {
7056 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
7057 			   __func__);
7058 		goto init_error;
7059 	}
7060 
7061 	stmmac_reset_queues_param(priv);
7062 
7063 	/* DMA CSR Channel configuration */
7064 	for (chan = 0; chan < dma_csr_ch; chan++) {
7065 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
7066 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
7067 	}
7068 
7069 	/* Adjust Split header */
7070 	sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
7071 
7072 	/* DMA RX Channel Configuration */
7073 	for (chan = 0; chan < rx_cnt; chan++) {
7074 		rx_q = &priv->dma_conf.rx_queue[chan];
7075 
7076 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7077 				    rx_q->dma_rx_phy, chan);
7078 
7079 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7080 				     (rx_q->buf_alloc_num *
7081 				      sizeof(struct dma_desc));
7082 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7083 				       rx_q->rx_tail_addr, chan);
7084 
7085 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7086 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7087 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7088 					      buf_size,
7089 					      rx_q->queue_index);
7090 		} else {
7091 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7092 					      priv->dma_conf.dma_buf_sz,
7093 					      rx_q->queue_index);
7094 		}
7095 
7096 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7097 	}
7098 
7099 	/* DMA TX Channel Configuration */
7100 	for (chan = 0; chan < tx_cnt; chan++) {
7101 		tx_q = &priv->dma_conf.tx_queue[chan];
7102 
7103 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7104 				    tx_q->dma_tx_phy, chan);
7105 
7106 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7107 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7108 				       tx_q->tx_tail_addr, chan);
7109 
7110 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7111 	}
7112 
7113 	/* Enable the MAC Rx/Tx */
7114 	stmmac_mac_set(priv, priv->ioaddr, true);
7115 
7116 	/* Start Rx & Tx DMA Channels */
7117 	stmmac_start_all_dma(priv);
7118 
7119 	ret = stmmac_request_irq(dev);
7120 	if (ret)
7121 		goto irq_error;
7122 
7123 	/* Enable NAPI process*/
7124 	stmmac_enable_all_queues(priv);
7125 	netif_carrier_on(dev);
7126 	netif_tx_start_all_queues(dev);
7127 	stmmac_enable_all_dma_irq(priv);
7128 
7129 	return 0;
7130 
7131 irq_error:
7132 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7133 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7134 
7135 init_error:
7136 	free_dma_desc_resources(priv, &priv->dma_conf);
7137 dma_desc_error:
7138 	return ret;
7139 }
7140 
7141 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7142 {
7143 	struct stmmac_priv *priv = netdev_priv(dev);
7144 	struct stmmac_rx_queue *rx_q;
7145 	struct stmmac_tx_queue *tx_q;
7146 	struct stmmac_channel *ch;
7147 
7148 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7149 	    !netif_carrier_ok(priv->dev))
7150 		return -ENETDOWN;
7151 
7152 	if (!stmmac_xdp_is_enabled(priv))
7153 		return -EINVAL;
7154 
7155 	if (queue >= priv->plat->rx_queues_to_use ||
7156 	    queue >= priv->plat->tx_queues_to_use)
7157 		return -EINVAL;
7158 
7159 	rx_q = &priv->dma_conf.rx_queue[queue];
7160 	tx_q = &priv->dma_conf.tx_queue[queue];
7161 	ch = &priv->channel[queue];
7162 
7163 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7164 		return -EINVAL;
7165 
7166 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7167 		/* EQoS does not have per-DMA channel SW interrupt,
7168 		 * so we schedule RX Napi straight-away.
7169 		 */
7170 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7171 			__napi_schedule(&ch->rxtx_napi);
7172 	}
7173 
7174 	return 0;
7175 }
7176 
7177 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7178 {
7179 	struct stmmac_priv *priv = netdev_priv(dev);
7180 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7181 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7182 	unsigned int start;
7183 	int q;
7184 
7185 	for (q = 0; q < tx_cnt; q++) {
7186 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7187 		u64 tx_packets;
7188 		u64 tx_bytes;
7189 
7190 		do {
7191 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7192 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7193 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7194 		do {
7195 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7196 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7197 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7198 
7199 		stats->tx_packets += tx_packets;
7200 		stats->tx_bytes += tx_bytes;
7201 	}
7202 
7203 	for (q = 0; q < rx_cnt; q++) {
7204 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7205 		u64 rx_packets;
7206 		u64 rx_bytes;
7207 
7208 		do {
7209 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7210 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7211 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7212 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7213 
7214 		stats->rx_packets += rx_packets;
7215 		stats->rx_bytes += rx_bytes;
7216 	}
7217 
7218 	stats->rx_dropped = priv->xstats.rx_dropped;
7219 	stats->rx_errors = priv->xstats.rx_errors;
7220 	stats->tx_dropped = priv->xstats.tx_dropped;
7221 	stats->tx_errors = priv->xstats.tx_errors;
7222 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7223 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7224 	stats->rx_length_errors = priv->xstats.rx_length;
7225 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7226 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7227 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7228 }
7229 
7230 static const struct net_device_ops stmmac_netdev_ops = {
7231 	.ndo_open = stmmac_open,
7232 	.ndo_start_xmit = stmmac_xmit,
7233 	.ndo_stop = stmmac_release,
7234 	.ndo_change_mtu = stmmac_change_mtu,
7235 	.ndo_fix_features = stmmac_fix_features,
7236 	.ndo_set_features = stmmac_set_features,
7237 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7238 	.ndo_tx_timeout = stmmac_tx_timeout,
7239 	.ndo_eth_ioctl = stmmac_ioctl,
7240 	.ndo_get_stats64 = stmmac_get_stats64,
7241 	.ndo_setup_tc = stmmac_setup_tc,
7242 	.ndo_select_queue = stmmac_select_queue,
7243 	.ndo_set_mac_address = stmmac_set_mac_address,
7244 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7245 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7246 	.ndo_bpf = stmmac_bpf,
7247 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7248 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7249 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7250 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7251 };
7252 
7253 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7254 {
7255 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7256 		return;
7257 	if (test_bit(STMMAC_DOWN, &priv->state))
7258 		return;
7259 
7260 	netdev_err(priv->dev, "Reset adapter.\n");
7261 
7262 	rtnl_lock();
7263 	netif_trans_update(priv->dev);
7264 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7265 		usleep_range(1000, 2000);
7266 
7267 	set_bit(STMMAC_DOWN, &priv->state);
7268 	dev_close(priv->dev);
7269 	dev_open(priv->dev, NULL);
7270 	clear_bit(STMMAC_DOWN, &priv->state);
7271 	clear_bit(STMMAC_RESETING, &priv->state);
7272 	rtnl_unlock();
7273 }
7274 
7275 static void stmmac_service_task(struct work_struct *work)
7276 {
7277 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7278 			service_task);
7279 
7280 	stmmac_reset_subtask(priv);
7281 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7282 }
7283 
7284 static void stmmac_print_actphyif(struct stmmac_priv *priv)
7285 {
7286 	const char **phyif_table;
7287 	const char *actphyif_str;
7288 	size_t phyif_table_size;
7289 
7290 	switch (priv->plat->core_type) {
7291 	case DWMAC_CORE_MAC100:
7292 		return;
7293 
7294 	case DWMAC_CORE_GMAC:
7295 	case DWMAC_CORE_GMAC4:
7296 		phyif_table = stmmac_dwmac_actphyif;
7297 		phyif_table_size = ARRAY_SIZE(stmmac_dwmac_actphyif);
7298 		break;
7299 
7300 	case DWMAC_CORE_XGMAC:
7301 		phyif_table = stmmac_dwxgmac_phyif;
7302 		phyif_table_size = ARRAY_SIZE(stmmac_dwxgmac_phyif);
7303 		break;
7304 	}
7305 
7306 	if (priv->dma_cap.actphyif < phyif_table_size)
7307 		actphyif_str = phyif_table[priv->dma_cap.actphyif];
7308 	else
7309 		actphyif_str = NULL;
7310 
7311 	if (!actphyif_str)
7312 		actphyif_str = "unknown";
7313 
7314 	dev_info(priv->device, "Active PHY interface: %s (%u)\n",
7315 		 actphyif_str, priv->dma_cap.actphyif);
7316 }
7317 
7318 /**
7319  *  stmmac_hw_init - Init the MAC device
7320  *  @priv: driver private structure
7321  *  Description: this function is to configure the MAC device according to
7322  *  some platform parameters or the HW capability register. It prepares the
7323  *  driver to use either ring or chain modes and to setup either enhanced or
7324  *  normal descriptors.
7325  */
7326 static int stmmac_hw_init(struct stmmac_priv *priv)
7327 {
7328 	int ret;
7329 
7330 	/* dwmac-sun8i only work in chain mode */
7331 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7332 		chain_mode = 1;
7333 	priv->chain_mode = chain_mode;
7334 
7335 	/* Initialize HW Interface */
7336 	ret = stmmac_hwif_init(priv);
7337 	if (ret)
7338 		return ret;
7339 
7340 	/* Get the HW capability (new GMAC newer than 3.50a) */
7341 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7342 	if (priv->hw_cap_support) {
7343 		dev_info(priv->device, "DMA HW capability register supported\n");
7344 
7345 		/* We can override some gmac/dma configuration fields: e.g.
7346 		 * enh_desc, tx_coe (e.g. that are passed through the
7347 		 * platform) with the values from the HW capability
7348 		 * register (if supported).
7349 		 */
7350 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7351 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7352 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7353 		if (priv->dma_cap.hash_tb_sz) {
7354 			priv->hw->multicast_filter_bins =
7355 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7356 			priv->hw->mcast_bits_log2 =
7357 					ilog2(priv->hw->multicast_filter_bins);
7358 		}
7359 
7360 		/* TXCOE doesn't work in thresh DMA mode */
7361 		if (priv->plat->force_thresh_dma_mode)
7362 			priv->plat->tx_coe = 0;
7363 		else
7364 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7365 
7366 		/* In case of GMAC4 rx_coe is from HW cap register. */
7367 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7368 
7369 		if (priv->dma_cap.rx_coe_type2)
7370 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7371 		else if (priv->dma_cap.rx_coe_type1)
7372 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7373 
7374 		stmmac_print_actphyif(priv);
7375 	} else {
7376 		dev_info(priv->device, "No HW DMA feature register supported\n");
7377 	}
7378 
7379 	if (priv->plat->rx_coe) {
7380 		priv->hw->rx_csum = priv->plat->rx_coe;
7381 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7382 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7383 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7384 	}
7385 	if (priv->plat->tx_coe)
7386 		dev_info(priv->device, "TX Checksum insertion supported\n");
7387 
7388 	if (priv->plat->pmt) {
7389 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7390 		device_set_wakeup_capable(priv->device, 1);
7391 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7392 	}
7393 
7394 	if (priv->dma_cap.tsoen)
7395 		dev_info(priv->device, "TSO supported\n");
7396 
7397 	if (priv->dma_cap.number_rx_queues &&
7398 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7399 		dev_warn(priv->device,
7400 			 "Number of Rx queues (%u) exceeds dma capability\n",
7401 			 priv->plat->rx_queues_to_use);
7402 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7403 	}
7404 	if (priv->dma_cap.number_tx_queues &&
7405 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7406 		dev_warn(priv->device,
7407 			 "Number of Tx queues (%u) exceeds dma capability\n",
7408 			 priv->plat->tx_queues_to_use);
7409 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7410 	}
7411 
7412 	if (priv->dma_cap.rx_fifo_size &&
7413 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7414 		dev_warn(priv->device,
7415 			 "Rx FIFO size (%u) exceeds dma capability\n",
7416 			 priv->plat->rx_fifo_size);
7417 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7418 	}
7419 	if (priv->dma_cap.tx_fifo_size &&
7420 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7421 		dev_warn(priv->device,
7422 			 "Tx FIFO size (%u) exceeds dma capability\n",
7423 			 priv->plat->tx_fifo_size);
7424 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7425 	}
7426 
7427 	priv->hw->vlan_fail_q_en =
7428 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7429 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7430 
7431 	/* Run HW quirks, if any */
7432 	if (priv->hwif_quirks) {
7433 		ret = priv->hwif_quirks(priv);
7434 		if (ret)
7435 			return ret;
7436 	}
7437 
7438 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7439 	 * In some case, for example on bugged HW this feature
7440 	 * has to be disable and this can be done by passing the
7441 	 * riwt_off field from the platform.
7442 	 */
7443 	if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
7444 	     priv->plat->core_type == DWMAC_CORE_XGMAC) &&
7445 	    !priv->plat->riwt_off) {
7446 		priv->use_riwt = 1;
7447 		dev_info(priv->device,
7448 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7449 	}
7450 
7451 	/* Unimplemented PCS init (as indicated by stmmac_do_callback()
7452 	 * perversely returning -EINVAL) is non-fatal.
7453 	 */
7454 	ret = stmmac_mac_pcs_init(priv);
7455 	if (ret != -EINVAL)
7456 		return ret;
7457 
7458 	return 0;
7459 }
7460 
7461 static void stmmac_napi_add(struct net_device *dev)
7462 {
7463 	struct stmmac_priv *priv = netdev_priv(dev);
7464 	u32 queue, maxq;
7465 
7466 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7467 
7468 	for (queue = 0; queue < maxq; queue++) {
7469 		struct stmmac_channel *ch = &priv->channel[queue];
7470 
7471 		ch->priv_data = priv;
7472 		ch->index = queue;
7473 		spin_lock_init(&ch->lock);
7474 
7475 		if (queue < priv->plat->rx_queues_to_use) {
7476 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7477 		}
7478 		if (queue < priv->plat->tx_queues_to_use) {
7479 			netif_napi_add_tx(dev, &ch->tx_napi,
7480 					  stmmac_napi_poll_tx);
7481 		}
7482 		if (queue < priv->plat->rx_queues_to_use &&
7483 		    queue < priv->plat->tx_queues_to_use) {
7484 			netif_napi_add(dev, &ch->rxtx_napi,
7485 				       stmmac_napi_poll_rxtx);
7486 		}
7487 	}
7488 }
7489 
7490 static void stmmac_napi_del(struct net_device *dev)
7491 {
7492 	struct stmmac_priv *priv = netdev_priv(dev);
7493 	u32 queue, maxq;
7494 
7495 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7496 
7497 	for (queue = 0; queue < maxq; queue++) {
7498 		struct stmmac_channel *ch = &priv->channel[queue];
7499 
7500 		if (queue < priv->plat->rx_queues_to_use)
7501 			netif_napi_del(&ch->rx_napi);
7502 		if (queue < priv->plat->tx_queues_to_use)
7503 			netif_napi_del(&ch->tx_napi);
7504 		if (queue < priv->plat->rx_queues_to_use &&
7505 		    queue < priv->plat->tx_queues_to_use) {
7506 			netif_napi_del(&ch->rxtx_napi);
7507 		}
7508 	}
7509 }
7510 
7511 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7512 {
7513 	struct stmmac_priv *priv = netdev_priv(dev);
7514 	int ret = 0, i;
7515 
7516 	if (netif_running(dev))
7517 		stmmac_release(dev);
7518 
7519 	stmmac_napi_del(dev);
7520 
7521 	priv->plat->rx_queues_to_use = rx_cnt;
7522 	priv->plat->tx_queues_to_use = tx_cnt;
7523 	if (!netif_is_rxfh_configured(dev))
7524 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7525 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7526 									rx_cnt);
7527 
7528 	stmmac_napi_add(dev);
7529 
7530 	if (netif_running(dev))
7531 		ret = stmmac_open(dev);
7532 
7533 	return ret;
7534 }
7535 
7536 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7537 {
7538 	struct stmmac_priv *priv = netdev_priv(dev);
7539 	int ret = 0;
7540 
7541 	if (netif_running(dev))
7542 		stmmac_release(dev);
7543 
7544 	priv->dma_conf.dma_rx_size = rx_size;
7545 	priv->dma_conf.dma_tx_size = tx_size;
7546 
7547 	if (netif_running(dev))
7548 		ret = stmmac_open(dev);
7549 
7550 	return ret;
7551 }
7552 
7553 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7554 {
7555 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7556 	struct dma_desc *desc_contains_ts = ctx->desc;
7557 	struct stmmac_priv *priv = ctx->priv;
7558 	struct dma_desc *ndesc = ctx->ndesc;
7559 	struct dma_desc *desc = ctx->desc;
7560 	u64 ns = 0;
7561 
7562 	if (!priv->hwts_rx_en)
7563 		return -ENODATA;
7564 
7565 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7566 	if (dwmac_is_xmac(priv->plat->core_type))
7567 		desc_contains_ts = ndesc;
7568 
7569 	/* Check if timestamp is available */
7570 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7571 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7572 		ns -= priv->plat->cdc_error_adj;
7573 		*timestamp = ns_to_ktime(ns);
7574 		return 0;
7575 	}
7576 
7577 	return -ENODATA;
7578 }
7579 
7580 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7581 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7582 };
7583 
7584 static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
7585 				   struct devlink_param_gset_ctx *ctx,
7586 				   struct netlink_ext_ack *extack)
7587 {
7588 	struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7589 	struct stmmac_priv *priv = dl_priv->stmmac_priv;
7590 
7591 	priv->tsfupdt_coarse = ctx->val.vbool;
7592 
7593 	if (priv->tsfupdt_coarse)
7594 		priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
7595 	else
7596 		priv->systime_flags |= PTP_TCR_TSCFUPDT;
7597 
7598 	/* In Coarse mode, we can use a smaller subsecond increment, let's
7599 	 * reconfigure the systime, subsecond increment and addend.
7600 	 */
7601 	stmmac_update_subsecond_increment(priv);
7602 
7603 	return 0;
7604 }
7605 
7606 static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
7607 				   struct devlink_param_gset_ctx *ctx,
7608 				   struct netlink_ext_ack *extack)
7609 {
7610 	struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7611 	struct stmmac_priv *priv = dl_priv->stmmac_priv;
7612 
7613 	ctx->val.vbool = priv->tsfupdt_coarse;
7614 
7615 	return 0;
7616 }
7617 
7618 static const struct devlink_param stmmac_devlink_params[] = {
7619 	DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
7620 			     DEVLINK_PARAM_TYPE_BOOL,
7621 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
7622 			     stmmac_dl_ts_coarse_get,
7623 			     stmmac_dl_ts_coarse_set, NULL),
7624 };
7625 
7626 /* None of the generic devlink parameters are implemented */
7627 static const struct devlink_ops stmmac_devlink_ops = {};
7628 
7629 static int stmmac_register_devlink(struct stmmac_priv *priv)
7630 {
7631 	struct stmmac_devlink_priv *dl_priv;
7632 	int ret;
7633 
7634 	/* For now, what is exposed over devlink is only relevant when
7635 	 * timestamping is available and we have a valid ptp clock rate
7636 	 */
7637 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
7638 	    !priv->plat->clk_ptp_rate)
7639 		return 0;
7640 
7641 	priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
7642 				      priv->device);
7643 	if (!priv->devlink)
7644 		return -ENOMEM;
7645 
7646 	dl_priv = devlink_priv(priv->devlink);
7647 	dl_priv->stmmac_priv = priv;
7648 
7649 	ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
7650 				      ARRAY_SIZE(stmmac_devlink_params));
7651 	if (ret)
7652 		goto dl_free;
7653 
7654 	devlink_register(priv->devlink);
7655 	return 0;
7656 
7657 dl_free:
7658 	devlink_free(priv->devlink);
7659 
7660 	return ret;
7661 }
7662 
7663 static void stmmac_unregister_devlink(struct stmmac_priv *priv)
7664 {
7665 	if (!priv->devlink)
7666 		return;
7667 
7668 	devlink_unregister(priv->devlink);
7669 	devlink_params_unregister(priv->devlink, stmmac_devlink_params,
7670 				  ARRAY_SIZE(stmmac_devlink_params));
7671 	devlink_free(priv->devlink);
7672 }
7673 
7674 struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
7675 {
7676 	struct plat_stmmacenet_data *plat_dat;
7677 	int i;
7678 
7679 	plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
7680 	if (!plat_dat)
7681 		return NULL;
7682 
7683 	/* Set the defaults:
7684 	 * - phy autodetection
7685 	 * - determine GMII_Address CR field from CSR clock
7686 	 * - allow MTU up to JUMBO_LEN
7687 	 * - hash table size
7688 	 * - one unicast filter entry
7689 	 */
7690 	plat_dat->phy_addr = -1;
7691 	plat_dat->clk_csr = -1;
7692 	plat_dat->maxmtu = JUMBO_LEN;
7693 	plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
7694 	plat_dat->unicast_filter_entries = 1;
7695 
7696 	/* Set the mtl defaults */
7697 	plat_dat->tx_queues_to_use = 1;
7698 	plat_dat->rx_queues_to_use = 1;
7699 
7700 	/* Setup the default RX queue channel map */
7701 	for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
7702 		plat_dat->rx_queues_cfg[i].chan = i;
7703 
7704 	return plat_dat;
7705 }
7706 EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
7707 
7708 static int __stmmac_dvr_probe(struct device *device,
7709 			      struct plat_stmmacenet_data *plat_dat,
7710 			      struct stmmac_resources *res)
7711 {
7712 	struct net_device *ndev = NULL;
7713 	struct stmmac_priv *priv;
7714 	u32 rxq;
7715 	int i, ret = 0;
7716 
7717 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7718 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7719 	if (!ndev)
7720 		return -ENOMEM;
7721 
7722 	SET_NETDEV_DEV(ndev, device);
7723 
7724 	priv = netdev_priv(ndev);
7725 	priv->device = device;
7726 	priv->dev = ndev;
7727 
7728 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7729 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7730 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7731 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7732 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7733 	}
7734 
7735 	priv->xstats.pcpu_stats =
7736 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7737 	if (!priv->xstats.pcpu_stats)
7738 		return -ENOMEM;
7739 
7740 	stmmac_set_ethtool_ops(ndev);
7741 	priv->pause_time = pause;
7742 	priv->plat = plat_dat;
7743 	priv->ioaddr = res->addr;
7744 	priv->dev->base_addr = (unsigned long)res->addr;
7745 	priv->plat->dma_cfg->multi_msi_en =
7746 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7747 
7748 	priv->dev->irq = res->irq;
7749 	priv->wol_irq = res->wol_irq;
7750 	priv->sfty_irq = res->sfty_irq;
7751 	priv->sfty_ce_irq = res->sfty_ce_irq;
7752 	priv->sfty_ue_irq = res->sfty_ue_irq;
7753 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7754 		priv->rx_irq[i] = res->rx_irq[i];
7755 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7756 		priv->tx_irq[i] = res->tx_irq[i];
7757 
7758 	if (!is_zero_ether_addr(res->mac))
7759 		eth_hw_addr_set(priv->dev, res->mac);
7760 
7761 	dev_set_drvdata(device, priv->dev);
7762 
7763 	/* Verify driver arguments */
7764 	stmmac_verify_args();
7765 
7766 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7767 	if (!priv->af_xdp_zc_qps)
7768 		return -ENOMEM;
7769 
7770 	/* Allocate workqueue */
7771 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7772 	if (!priv->wq) {
7773 		dev_err(priv->device, "failed to create workqueue\n");
7774 		ret = -ENOMEM;
7775 		goto error_wq_init;
7776 	}
7777 
7778 	INIT_WORK(&priv->service_task, stmmac_service_task);
7779 
7780 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7781 
7782 	/* Override with kernel parameters if supplied XXX CRS XXX
7783 	 * this needs to have multiple instances
7784 	 */
7785 	if ((phyaddr >= 0) && (phyaddr <= 31))
7786 		priv->plat->phy_addr = phyaddr;
7787 
7788 	if (priv->plat->stmmac_rst) {
7789 		ret = reset_control_assert(priv->plat->stmmac_rst);
7790 		reset_control_deassert(priv->plat->stmmac_rst);
7791 		/* Some reset controllers have only reset callback instead of
7792 		 * assert + deassert callbacks pair.
7793 		 */
7794 		if (ret == -ENOTSUPP)
7795 			reset_control_reset(priv->plat->stmmac_rst);
7796 	}
7797 
7798 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7799 	if (ret == -ENOTSUPP)
7800 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7801 			ERR_PTR(ret));
7802 
7803 	/* Wait a bit for the reset to take effect */
7804 	udelay(10);
7805 
7806 	/* Init MAC and get the capabilities */
7807 	ret = stmmac_hw_init(priv);
7808 	if (ret)
7809 		goto error_hw_init;
7810 
7811 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7812 	 */
7813 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7814 		priv->plat->dma_cfg->dche = false;
7815 
7816 	stmmac_check_ether_addr(priv);
7817 
7818 	ndev->netdev_ops = &stmmac_netdev_ops;
7819 
7820 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7821 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7822 
7823 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7824 			    NETIF_F_RXCSUM;
7825 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7826 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7827 
7828 	ret = stmmac_tc_init(priv, priv);
7829 	if (!ret) {
7830 		ndev->hw_features |= NETIF_F_HW_TC;
7831 	}
7832 
7833 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7834 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7835 		if (priv->plat->core_type == DWMAC_CORE_GMAC4)
7836 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7837 		priv->tso = true;
7838 		dev_info(priv->device, "TSO feature enabled\n");
7839 	}
7840 
7841 	if (priv->dma_cap.sphen &&
7842 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7843 		ndev->hw_features |= NETIF_F_GRO;
7844 		priv->sph_capable = true;
7845 		priv->sph_active = priv->sph_capable;
7846 		dev_info(priv->device, "SPH feature enabled\n");
7847 	}
7848 
7849 	/* Ideally our host DMA address width is the same as for the
7850 	 * device. However, it may differ and then we have to use our
7851 	 * host DMA width for allocation and the device DMA width for
7852 	 * register handling.
7853 	 */
7854 	if (priv->plat->host_dma_width)
7855 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7856 	else
7857 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7858 
7859 	if (priv->dma_cap.host_dma_width) {
7860 		ret = dma_set_mask_and_coherent(device,
7861 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7862 		if (!ret) {
7863 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7864 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7865 
7866 			/*
7867 			 * If more than 32 bits can be addressed, make sure to
7868 			 * enable enhanced addressing mode.
7869 			 */
7870 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7871 				priv->plat->dma_cfg->eame = true;
7872 		} else {
7873 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7874 			if (ret) {
7875 				dev_err(priv->device, "Failed to set DMA Mask\n");
7876 				goto error_hw_init;
7877 			}
7878 
7879 			priv->dma_cap.host_dma_width = 32;
7880 		}
7881 	}
7882 
7883 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7884 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7885 #ifdef STMMAC_VLAN_TAG_USED
7886 	/* Both mac100 and gmac support receive VLAN tag detection */
7887 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7888 	if (dwmac_is_xmac(priv->plat->core_type)) {
7889 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7890 		priv->hw->hw_vlan_en = true;
7891 	}
7892 	if (priv->dma_cap.vlhash) {
7893 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7894 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7895 	}
7896 	if (priv->dma_cap.vlins)
7897 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7898 #endif
7899 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7900 
7901 	priv->xstats.threshold = tc;
7902 
7903 	/* Initialize RSS */
7904 	rxq = priv->plat->rx_queues_to_use;
7905 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7906 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7907 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7908 
7909 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7910 		ndev->features |= NETIF_F_RXHASH;
7911 
7912 	ndev->vlan_features |= ndev->features;
7913 
7914 	/* MTU range: 46 - hw-specific max */
7915 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7916 
7917 	if (priv->plat->core_type == DWMAC_CORE_XGMAC)
7918 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7919 	else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
7920 		ndev->max_mtu = JUMBO_LEN;
7921 	else
7922 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7923 
7924 	/* Warn if the platform's maxmtu is smaller than the minimum MTU,
7925 	 * otherwise clamp the maximum MTU above to the platform's maxmtu.
7926 	 */
7927 	if (priv->plat->maxmtu < ndev->min_mtu)
7928 		dev_warn(priv->device,
7929 			 "%s: warning: maxmtu having invalid value (%d)\n",
7930 			 __func__, priv->plat->maxmtu);
7931 	else if (priv->plat->maxmtu < ndev->max_mtu)
7932 		ndev->max_mtu = priv->plat->maxmtu;
7933 
7934 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7935 
7936 	/* Setup channels NAPI */
7937 	stmmac_napi_add(ndev);
7938 
7939 	mutex_init(&priv->lock);
7940 
7941 	stmmac_fpe_init(priv);
7942 
7943 	stmmac_check_pcs_mode(priv);
7944 
7945 	pm_runtime_get_noresume(device);
7946 	pm_runtime_set_active(device);
7947 	if (!pm_runtime_enabled(device))
7948 		pm_runtime_enable(device);
7949 
7950 	ret = stmmac_mdio_register(ndev);
7951 	if (ret < 0) {
7952 		dev_err_probe(priv->device, ret,
7953 			      "MDIO bus (id: %d) registration failed\n",
7954 			      priv->plat->bus_id);
7955 		goto error_mdio_register;
7956 	}
7957 
7958 	ret = stmmac_pcs_setup(ndev);
7959 	if (ret)
7960 		goto error_pcs_setup;
7961 
7962 	ret = stmmac_phylink_setup(priv);
7963 	if (ret) {
7964 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7965 		goto error_phy_setup;
7966 	}
7967 
7968 	ret = stmmac_register_devlink(priv);
7969 	if (ret)
7970 		goto error_devlink_setup;
7971 
7972 	ret = register_netdev(ndev);
7973 	if (ret) {
7974 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7975 			__func__, ret);
7976 		goto error_netdev_register;
7977 	}
7978 
7979 #ifdef CONFIG_DEBUG_FS
7980 	stmmac_init_fs(ndev);
7981 #endif
7982 
7983 	if (priv->plat->dump_debug_regs)
7984 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7985 
7986 	/* Let pm_runtime_put() disable the clocks.
7987 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7988 	 */
7989 	pm_runtime_put(device);
7990 
7991 	return ret;
7992 
7993 error_netdev_register:
7994 	stmmac_unregister_devlink(priv);
7995 error_devlink_setup:
7996 	phylink_destroy(priv->phylink);
7997 error_phy_setup:
7998 	stmmac_pcs_clean(ndev);
7999 error_pcs_setup:
8000 	stmmac_mdio_unregister(ndev);
8001 error_mdio_register:
8002 	stmmac_napi_del(ndev);
8003 error_hw_init:
8004 	destroy_workqueue(priv->wq);
8005 error_wq_init:
8006 	bitmap_free(priv->af_xdp_zc_qps);
8007 
8008 	return ret;
8009 }
8010 
8011 /**
8012  * stmmac_dvr_probe
8013  * @dev: device pointer
8014  * @plat_dat: platform data pointer
8015  * @res: stmmac resource pointer
8016  * Description: this is the main probe function used to
8017  * call the alloc_etherdev, allocate the priv structure.
8018  * Return:
8019  * returns 0 on success, otherwise errno.
8020  */
8021 int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
8022 		     struct stmmac_resources *res)
8023 {
8024 	int ret;
8025 
8026 	if (plat_dat->init) {
8027 		ret = plat_dat->init(dev, plat_dat->bsp_priv);
8028 		if (ret)
8029 			return ret;
8030 	}
8031 
8032 	ret = __stmmac_dvr_probe(dev, plat_dat, res);
8033 	if (ret && plat_dat->exit)
8034 		plat_dat->exit(dev, plat_dat->bsp_priv);
8035 
8036 	return ret;
8037 }
8038 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
8039 
8040 /**
8041  * stmmac_dvr_remove
8042  * @dev: device pointer
8043  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
8044  * changes the link status, releases the DMA descriptor rings.
8045  */
8046 void stmmac_dvr_remove(struct device *dev)
8047 {
8048 	struct net_device *ndev = dev_get_drvdata(dev);
8049 	struct stmmac_priv *priv = netdev_priv(ndev);
8050 
8051 	netdev_info(priv->dev, "%s: removing driver", __func__);
8052 
8053 	pm_runtime_get_sync(dev);
8054 
8055 	unregister_netdev(ndev);
8056 
8057 #ifdef CONFIG_DEBUG_FS
8058 	stmmac_exit_fs(ndev);
8059 #endif
8060 	stmmac_unregister_devlink(priv);
8061 
8062 	phylink_destroy(priv->phylink);
8063 	if (priv->plat->stmmac_rst)
8064 		reset_control_assert(priv->plat->stmmac_rst);
8065 	reset_control_assert(priv->plat->stmmac_ahb_rst);
8066 
8067 	stmmac_pcs_clean(ndev);
8068 	stmmac_mdio_unregister(ndev);
8069 
8070 	destroy_workqueue(priv->wq);
8071 	mutex_destroy(&priv->lock);
8072 	bitmap_free(priv->af_xdp_zc_qps);
8073 
8074 	pm_runtime_disable(dev);
8075 	pm_runtime_put_noidle(dev);
8076 
8077 	if (priv->plat->exit)
8078 		priv->plat->exit(dev, priv->plat->bsp_priv);
8079 }
8080 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
8081 
8082 /**
8083  * stmmac_suspend - suspend callback
8084  * @dev: device pointer
8085  * Description: this is the function to suspend the device and it is called
8086  * by the platform driver to stop the network queue, release the resources,
8087  * program the PMT register (for WoL), clean and release driver resources.
8088  */
8089 int stmmac_suspend(struct device *dev)
8090 {
8091 	struct net_device *ndev = dev_get_drvdata(dev);
8092 	struct stmmac_priv *priv = netdev_priv(ndev);
8093 	u32 chan;
8094 
8095 	if (!ndev || !netif_running(ndev))
8096 		goto suspend_bsp;
8097 
8098 	mutex_lock(&priv->lock);
8099 
8100 	netif_device_detach(ndev);
8101 
8102 	stmmac_disable_all_queues(priv);
8103 
8104 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
8105 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
8106 
8107 	if (priv->eee_sw_timer_en) {
8108 		priv->tx_path_in_lpi_mode = false;
8109 		timer_delete_sync(&priv->eee_ctrl_timer);
8110 	}
8111 
8112 	/* Stop TX/RX DMA */
8113 	stmmac_stop_all_dma(priv);
8114 
8115 	stmmac_legacy_serdes_power_down(priv);
8116 
8117 	/* Enable Power down mode by programming the PMT regs */
8118 	if (priv->wolopts) {
8119 		stmmac_pmt(priv, priv->hw, priv->wolopts);
8120 		priv->irq_wake = 1;
8121 	} else {
8122 		stmmac_mac_set(priv, priv->ioaddr, false);
8123 		pinctrl_pm_select_sleep_state(priv->device);
8124 	}
8125 
8126 	mutex_unlock(&priv->lock);
8127 
8128 	rtnl_lock();
8129 	phylink_suspend(priv->phylink, !!priv->wolopts);
8130 	rtnl_unlock();
8131 
8132 	if (stmmac_fpe_supported(priv))
8133 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
8134 
8135 suspend_bsp:
8136 	if (priv->plat->suspend)
8137 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
8138 
8139 	return 0;
8140 }
8141 EXPORT_SYMBOL_GPL(stmmac_suspend);
8142 
8143 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
8144 {
8145 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
8146 
8147 	rx_q->cur_rx = 0;
8148 	rx_q->dirty_rx = 0;
8149 }
8150 
8151 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
8152 {
8153 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
8154 
8155 	tx_q->cur_tx = 0;
8156 	tx_q->dirty_tx = 0;
8157 	tx_q->mss = 0;
8158 
8159 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
8160 }
8161 
8162 /**
8163  * stmmac_reset_queues_param - reset queue parameters
8164  * @priv: device pointer
8165  */
8166 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
8167 {
8168 	u32 rx_cnt = priv->plat->rx_queues_to_use;
8169 	u32 tx_cnt = priv->plat->tx_queues_to_use;
8170 	u32 queue;
8171 
8172 	for (queue = 0; queue < rx_cnt; queue++)
8173 		stmmac_reset_rx_queue(priv, queue);
8174 
8175 	for (queue = 0; queue < tx_cnt; queue++)
8176 		stmmac_reset_tx_queue(priv, queue);
8177 }
8178 
8179 /**
8180  * stmmac_resume - resume callback
8181  * @dev: device pointer
8182  * Description: when resume this function is invoked to setup the DMA and CORE
8183  * in a usable state.
8184  */
8185 int stmmac_resume(struct device *dev)
8186 {
8187 	struct net_device *ndev = dev_get_drvdata(dev);
8188 	struct stmmac_priv *priv = netdev_priv(ndev);
8189 	int ret;
8190 
8191 	if (priv->plat->resume) {
8192 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
8193 		if (ret)
8194 			return ret;
8195 	}
8196 
8197 	if (!netif_running(ndev))
8198 		return 0;
8199 
8200 	/* Power Down bit, into the PM register, is cleared
8201 	 * automatically as soon as a magic packet or a Wake-up frame
8202 	 * is received. Anyway, it's better to manually clear
8203 	 * this bit because it can generate problems while resuming
8204 	 * from another devices (e.g. serial console).
8205 	 */
8206 	if (priv->wolopts) {
8207 		mutex_lock(&priv->lock);
8208 		stmmac_pmt(priv, priv->hw, 0);
8209 		mutex_unlock(&priv->lock);
8210 		priv->irq_wake = 0;
8211 	} else {
8212 		pinctrl_pm_select_default_state(priv->device);
8213 		/* reset the phy so that it's ready */
8214 		if (priv->mii)
8215 			stmmac_mdio_reset(priv->mii);
8216 	}
8217 
8218 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
8219 		ret = stmmac_legacy_serdes_power_up(priv);
8220 		if (ret < 0)
8221 			return ret;
8222 	}
8223 
8224 	rtnl_lock();
8225 
8226 	/* Prepare the PHY to resume, ensuring that its clocks which are
8227 	 * necessary for the MAC DMA reset to complete are running
8228 	 */
8229 	phylink_prepare_resume(priv->phylink);
8230 
8231 	mutex_lock(&priv->lock);
8232 
8233 	stmmac_reset_queues_param(priv);
8234 
8235 	stmmac_free_tx_skbufs(priv);
8236 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8237 
8238 	ret = stmmac_hw_setup(ndev);
8239 	if (ret < 0) {
8240 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
8241 		stmmac_legacy_serdes_power_down(priv);
8242 		mutex_unlock(&priv->lock);
8243 		rtnl_unlock();
8244 		return ret;
8245 	}
8246 
8247 	stmmac_init_timestamping(priv);
8248 
8249 	stmmac_init_coalesce(priv);
8250 	phylink_rx_clk_stop_block(priv->phylink);
8251 	stmmac_set_rx_mode(ndev);
8252 
8253 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8254 	phylink_rx_clk_stop_unblock(priv->phylink);
8255 
8256 	stmmac_enable_all_queues(priv);
8257 	stmmac_enable_all_dma_irq(priv);
8258 
8259 	mutex_unlock(&priv->lock);
8260 
8261 	/* phylink_resume() must be called after the hardware has been
8262 	 * initialised because it may bring the link up immediately in a
8263 	 * workqueue thread, which will race with initialisation.
8264 	 */
8265 	phylink_resume(priv->phylink);
8266 	rtnl_unlock();
8267 
8268 	netif_device_attach(ndev);
8269 
8270 	return 0;
8271 }
8272 EXPORT_SYMBOL_GPL(stmmac_resume);
8273 
8274 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
8275 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
8276 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
8277 
8278 #ifndef MODULE
8279 static int __init stmmac_cmdline_opt(char *str)
8280 {
8281 	char *opt;
8282 
8283 	if (!str || !*str)
8284 		return 1;
8285 	while ((opt = strsep(&str, ",")) != NULL) {
8286 		if (!strncmp(opt, "debug:", 6)) {
8287 			if (kstrtoint(opt + 6, 0, &debug))
8288 				goto err;
8289 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8290 			if (kstrtoint(opt + 8, 0, &phyaddr))
8291 				goto err;
8292 		} else if (!strncmp(opt, "tc:", 3)) {
8293 			if (kstrtoint(opt + 3, 0, &tc))
8294 				goto err;
8295 		} else if (!strncmp(opt, "watchdog:", 9)) {
8296 			if (kstrtoint(opt + 9, 0, &watchdog))
8297 				goto err;
8298 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8299 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8300 				goto err;
8301 		} else if (!strncmp(opt, "pause:", 6)) {
8302 			if (kstrtoint(opt + 6, 0, &pause))
8303 				goto err;
8304 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8305 			if (kstrtoint(opt + 10, 0, &eee_timer))
8306 				goto err;
8307 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8308 			if (kstrtoint(opt + 11, 0, &chain_mode))
8309 				goto err;
8310 		}
8311 	}
8312 	return 1;
8313 
8314 err:
8315 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8316 	return 1;
8317 }
8318 
8319 __setup("stmmaceth=", stmmac_cmdline_opt);
8320 #endif /* MODULE */
8321 
8322 static int __init stmmac_init(void)
8323 {
8324 #ifdef CONFIG_DEBUG_FS
8325 	/* Create debugfs main directory if it doesn't exist yet */
8326 	if (!stmmac_fs_dir)
8327 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8328 	register_netdevice_notifier(&stmmac_notifier);
8329 #endif
8330 
8331 	return 0;
8332 }
8333 
8334 static void __exit stmmac_exit(void)
8335 {
8336 #ifdef CONFIG_DEBUG_FS
8337 	unregister_netdevice_notifier(&stmmac_notifier);
8338 	debugfs_remove_recursive(stmmac_fs_dir);
8339 #endif
8340 }
8341 
8342 module_init(stmmac_init)
8343 module_exit(stmmac_exit)
8344 
8345 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8346 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8347 MODULE_LICENSE("GPL");
8348