xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision fc3a2810412c163b5df1b377d332e048860f45db)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/page_pool/helpers.h>
44 #include <net/pkt_cls.h>
45 #include <net/xdp_sock_drv.h>
46 #include "stmmac_ptp.h"
47 #include "stmmac_fpe.h"
48 #include "stmmac.h"
49 #include "stmmac_xdp.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 #include "dwxgmac2.h"
54 #include "hwif.h"
55 
56 /* As long as the interface is active, we keep the timestamping counter enabled
57  * with fine resolution and binary rollover. This avoid non-monotonic behavior
58  * (clock jumps) when changing timestamping settings at runtime.
59  */
60 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 				 PTP_TCR_TSCTRLSSR)
62 
63 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
64 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
65 
66 /* Module parameters */
67 #define TX_TIMEO	5000
68 static int watchdog = TX_TIMEO;
69 module_param(watchdog, int, 0644);
70 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
71 
72 static int debug = -1;
73 module_param(debug, int, 0644);
74 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75 
76 static int phyaddr = -1;
77 module_param(phyaddr, int, 0444);
78 MODULE_PARM_DESC(phyaddr, "Physical device address");
79 
80 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = 0xdead;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 /* This is unused */
106 #define	DEFAULT_BUFSIZE	1536
107 static int buf_sz = DEFAULT_BUFSIZE;
108 module_param(buf_sz, int, 0644);
109 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, uint, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	struct plat_stmmacenet_data *plat_dat = priv->plat;
153 	int ret;
154 
155 	if (enabled) {
156 		ret = clk_prepare_enable(plat_dat->stmmac_clk);
157 		if (ret)
158 			return ret;
159 		ret = clk_prepare_enable(plat_dat->pclk);
160 		if (ret) {
161 			clk_disable_unprepare(plat_dat->stmmac_clk);
162 			return ret;
163 		}
164 		if (plat_dat->clks_config) {
165 			ret = plat_dat->clks_config(plat_dat->bsp_priv, enabled);
166 			if (ret) {
167 				clk_disable_unprepare(plat_dat->stmmac_clk);
168 				clk_disable_unprepare(plat_dat->pclk);
169 				return ret;
170 			}
171 		}
172 	} else {
173 		clk_disable_unprepare(plat_dat->stmmac_clk);
174 		clk_disable_unprepare(plat_dat->pclk);
175 		if (plat_dat->clks_config)
176 			plat_dat->clks_config(plat_dat->bsp_priv, enabled);
177 	}
178 
179 	return 0;
180 }
181 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
182 
183 /**
184  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
185  * @bsp_priv: BSP private data structure (unused)
186  * @clk_tx_i: the transmit clock
187  * @interface: the selected interface mode
188  * @speed: the speed that the MAC will be operating at
189  *
190  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
191  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
192  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
193  * the plat_data->set_clk_tx_rate method directly, call it via their own
194  * implementation, or implement their own method should they have more
195  * complex requirements. It is intended to only be used in this method.
196  *
197  * plat_data->clk_tx_i must be filled in.
198  */
199 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
200 			   phy_interface_t interface, int speed)
201 {
202 	long rate = rgmii_clock(speed);
203 
204 	/* Silently ignore unsupported speeds as rgmii_clock() only
205 	 * supports 10, 100 and 1000Mbps. We do not want to spit
206 	 * errors for 2500 and higher speeds here.
207 	 */
208 	if (rate < 0)
209 		return 0;
210 
211 	return clk_set_rate(clk_tx_i, rate);
212 }
213 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
214 
215 /**
216  * stmmac_verify_args - verify the driver parameters.
217  * Description: it checks the driver parameters and set a default in case of
218  * errors.
219  */
220 static void stmmac_verify_args(void)
221 {
222 	if (unlikely(watchdog < 0))
223 		watchdog = TX_TIMEO;
224 	if (unlikely((pause < 0) || (pause > 0xffff)))
225 		pause = PAUSE_TIME;
226 
227 	if (flow_ctrl != 0xdead)
228 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
229 }
230 
231 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
232 {
233 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
234 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
235 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
236 	u32 queue;
237 
238 	for (queue = 0; queue < maxq; queue++) {
239 		struct stmmac_channel *ch = &priv->channel[queue];
240 
241 		if (stmmac_xdp_is_enabled(priv) &&
242 		    test_bit(queue, priv->af_xdp_zc_qps)) {
243 			napi_disable(&ch->rxtx_napi);
244 			continue;
245 		}
246 
247 		if (queue < rx_queues_cnt)
248 			napi_disable(&ch->rx_napi);
249 		if (queue < tx_queues_cnt)
250 			napi_disable(&ch->tx_napi);
251 	}
252 }
253 
254 /**
255  * stmmac_disable_all_queues - Disable all queues
256  * @priv: driver private structure
257  */
258 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
259 {
260 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
261 	struct stmmac_rx_queue *rx_q;
262 	u32 queue;
263 
264 	/* synchronize_rcu() needed for pending XDP buffers to drain */
265 	for (queue = 0; queue < rx_queues_cnt; queue++) {
266 		rx_q = &priv->dma_conf.rx_queue[queue];
267 		if (rx_q->xsk_pool) {
268 			synchronize_rcu();
269 			break;
270 		}
271 	}
272 
273 	__stmmac_disable_all_queues(priv);
274 }
275 
276 /**
277  * stmmac_enable_all_queues - Enable all queues
278  * @priv: driver private structure
279  */
280 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
281 {
282 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
283 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
284 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
285 	u32 queue;
286 
287 	for (queue = 0; queue < maxq; queue++) {
288 		struct stmmac_channel *ch = &priv->channel[queue];
289 
290 		if (stmmac_xdp_is_enabled(priv) &&
291 		    test_bit(queue, priv->af_xdp_zc_qps)) {
292 			napi_enable(&ch->rxtx_napi);
293 			continue;
294 		}
295 
296 		if (queue < rx_queues_cnt)
297 			napi_enable(&ch->rx_napi);
298 		if (queue < tx_queues_cnt)
299 			napi_enable(&ch->tx_napi);
300 	}
301 }
302 
303 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
304 {
305 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
306 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
307 		queue_work(priv->wq, &priv->service_task);
308 }
309 
310 static void stmmac_global_err(struct stmmac_priv *priv)
311 {
312 	netif_carrier_off(priv->dev);
313 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
314 	stmmac_service_event_schedule(priv);
315 }
316 
317 static void print_pkt(unsigned char *buf, int len)
318 {
319 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
320 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
321 }
322 
323 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
324 {
325 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
326 	u32 avail;
327 
328 	if (tx_q->dirty_tx > tx_q->cur_tx)
329 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
330 	else
331 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
332 
333 	return avail;
334 }
335 
336 /**
337  * stmmac_rx_dirty - Get RX queue dirty
338  * @priv: driver private structure
339  * @queue: RX queue index
340  */
341 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
342 {
343 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
344 	u32 dirty;
345 
346 	if (rx_q->dirty_rx <= rx_q->cur_rx)
347 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
348 	else
349 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
350 
351 	return dirty;
352 }
353 
354 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
355 {
356 	u32 tx_cnt = priv->plat->tx_queues_to_use;
357 	u32 queue;
358 
359 	/* check if all TX queues have the work finished */
360 	for (queue = 0; queue < tx_cnt; queue++) {
361 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
362 
363 		if (tx_q->dirty_tx != tx_q->cur_tx)
364 			return true; /* still unfinished work */
365 	}
366 
367 	return false;
368 }
369 
370 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
371 {
372 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
373 }
374 
375 /**
376  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
377  * @priv: driver private structure
378  * Description: this function is to verify and enter in LPI mode in case of
379  * EEE.
380  */
381 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
382 {
383 	if (stmmac_eee_tx_busy(priv)) {
384 		stmmac_restart_sw_lpi_timer(priv);
385 		return;
386 	}
387 
388 	/* Check and enter in LPI mode */
389 	if (!priv->tx_path_in_lpi_mode)
390 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
391 				    priv->tx_lpi_clk_stop, 0);
392 }
393 
394 /**
395  * stmmac_stop_sw_lpi - stop transmitting LPI
396  * @priv: driver private structure
397  * Description: When using software-controlled LPI, stop transmitting LPI state.
398  */
399 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
400 {
401 	timer_delete_sync(&priv->eee_ctrl_timer);
402 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
403 	priv->tx_path_in_lpi_mode = false;
404 }
405 
406 /**
407  * stmmac_eee_ctrl_timer - EEE TX SW timer.
408  * @t:  timer_list struct containing private info
409  * Description:
410  *  if there is no data transfer and if we are not in LPI state,
411  *  then MAC Transmitter can be moved to LPI state.
412  */
413 static void stmmac_eee_ctrl_timer(struct timer_list *t)
414 {
415 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
416 
417 	stmmac_try_to_start_sw_lpi(priv);
418 }
419 
420 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
421  * @priv: driver private structure
422  * @p : descriptor pointer
423  * @skb : the socket buffer
424  * Description :
425  * This function will read timestamp from the descriptor & pass it to stack.
426  * and also perform some sanity checks.
427  */
428 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
429 				   struct dma_desc *p, struct sk_buff *skb)
430 {
431 	struct skb_shared_hwtstamps shhwtstamp;
432 	bool found = false;
433 	u64 ns = 0;
434 
435 	if (!priv->hwts_tx_en)
436 		return;
437 
438 	/* exit if skb doesn't support hw tstamp */
439 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
440 		return;
441 
442 	/* check tx tstamp status */
443 	if (stmmac_get_tx_timestamp_status(priv, p)) {
444 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
445 		found = true;
446 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
447 		found = true;
448 	}
449 
450 	if (found) {
451 		ns -= priv->plat->cdc_error_adj;
452 
453 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
454 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
455 
456 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
457 		/* pass tstamp to stack */
458 		skb_tstamp_tx(skb, &shhwtstamp);
459 	}
460 }
461 
462 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
463  * @priv: driver private structure
464  * @p : descriptor pointer
465  * @np : next descriptor pointer
466  * @skb : the socket buffer
467  * Description :
468  * This function will read received packet's timestamp from the descriptor
469  * and pass it to stack. It also perform some sanity checks.
470  */
471 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
472 				   struct dma_desc *np, struct sk_buff *skb)
473 {
474 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
475 	struct dma_desc *desc = p;
476 	u64 ns = 0;
477 
478 	if (!priv->hwts_rx_en)
479 		return;
480 	/* For GMAC4, the valid timestamp is from CTX next desc. */
481 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
482 		desc = np;
483 
484 	/* Check if timestamp is available */
485 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
486 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
487 
488 		ns -= priv->plat->cdc_error_adj;
489 
490 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
491 		shhwtstamp = skb_hwtstamps(skb);
492 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
493 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
494 	} else  {
495 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
496 	}
497 }
498 
499 /**
500  *  stmmac_hwtstamp_set - control hardware timestamping.
501  *  @dev: device pointer.
502  *  @config: the timestamping configuration.
503  *  @extack: netlink extended ack structure for error reporting.
504  *  Description:
505  *  This function configures the MAC to enable/disable both outgoing(TX)
506  *  and incoming(RX) packets time stamping based on user input.
507  *  Return Value:
508  *  0 on success and an appropriate -ve integer on failure.
509  */
510 static int stmmac_hwtstamp_set(struct net_device *dev,
511 			       struct kernel_hwtstamp_config *config,
512 			       struct netlink_ext_ack *extack)
513 {
514 	struct stmmac_priv *priv = netdev_priv(dev);
515 	u32 ptp_v2 = 0;
516 	u32 tstamp_all = 0;
517 	u32 ptp_over_ipv4_udp = 0;
518 	u32 ptp_over_ipv6_udp = 0;
519 	u32 ptp_over_ethernet = 0;
520 	u32 snap_type_sel = 0;
521 	u32 ts_master_en = 0;
522 	u32 ts_event_en = 0;
523 
524 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
525 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
526 		priv->hwts_tx_en = 0;
527 		priv->hwts_rx_en = 0;
528 
529 		return -EOPNOTSUPP;
530 	}
531 
532 	if (!netif_running(dev)) {
533 		NL_SET_ERR_MSG_MOD(extack,
534 				   "Cannot change timestamping configuration while down");
535 		return -ENODEV;
536 	}
537 
538 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
539 		   __func__, config->flags, config->tx_type, config->rx_filter);
540 
541 	if (config->tx_type != HWTSTAMP_TX_OFF &&
542 	    config->tx_type != HWTSTAMP_TX_ON)
543 		return -ERANGE;
544 
545 	if (priv->adv_ts) {
546 		switch (config->rx_filter) {
547 		case HWTSTAMP_FILTER_NONE:
548 			/* time stamp no incoming packet at all */
549 			config->rx_filter = HWTSTAMP_FILTER_NONE;
550 			break;
551 
552 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
553 			/* PTP v1, UDP, any kind of event packet */
554 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
555 			/* 'xmac' hardware can support Sync, Pdelay_Req and
556 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
557 			 * This leaves Delay_Req timestamps out.
558 			 * Enable all events *and* general purpose message
559 			 * timestamping
560 			 */
561 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
562 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
563 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
564 			break;
565 
566 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
567 			/* PTP v1, UDP, Sync packet */
568 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
569 			/* take time stamp for SYNC messages only */
570 			ts_event_en = PTP_TCR_TSEVNTENA;
571 
572 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574 			break;
575 
576 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
577 			/* PTP v1, UDP, Delay_req packet */
578 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
579 			/* take time stamp for Delay_Req messages only */
580 			ts_master_en = PTP_TCR_TSMSTRENA;
581 			ts_event_en = PTP_TCR_TSEVNTENA;
582 
583 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 			break;
586 
587 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
588 			/* PTP v2, UDP, any kind of event packet */
589 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
590 			ptp_v2 = PTP_TCR_TSVER2ENA;
591 			/* take time stamp for all event messages */
592 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
593 
594 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596 			break;
597 
598 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
599 			/* PTP v2, UDP, Sync packet */
600 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
601 			ptp_v2 = PTP_TCR_TSVER2ENA;
602 			/* take time stamp for SYNC messages only */
603 			ts_event_en = PTP_TCR_TSEVNTENA;
604 
605 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
606 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
607 			break;
608 
609 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
610 			/* PTP v2, UDP, Delay_req packet */
611 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
612 			ptp_v2 = PTP_TCR_TSVER2ENA;
613 			/* take time stamp for Delay_Req messages only */
614 			ts_master_en = PTP_TCR_TSMSTRENA;
615 			ts_event_en = PTP_TCR_TSEVNTENA;
616 
617 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
618 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
619 			break;
620 
621 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
622 			/* PTP v2/802.AS1 any layer, any kind of event packet */
623 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
624 			ptp_v2 = PTP_TCR_TSVER2ENA;
625 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
626 			if (priv->synopsys_id < DWMAC_CORE_4_10)
627 				ts_event_en = PTP_TCR_TSEVNTENA;
628 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
629 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
630 			ptp_over_ethernet = PTP_TCR_TSIPENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
634 			/* PTP v2/802.AS1, any layer, Sync packet */
635 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
636 			ptp_v2 = PTP_TCR_TSVER2ENA;
637 			/* take time stamp for SYNC messages only */
638 			ts_event_en = PTP_TCR_TSEVNTENA;
639 
640 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
641 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
642 			ptp_over_ethernet = PTP_TCR_TSIPENA;
643 			break;
644 
645 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
646 			/* PTP v2/802.AS1, any layer, Delay_req packet */
647 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
648 			ptp_v2 = PTP_TCR_TSVER2ENA;
649 			/* take time stamp for Delay_Req messages only */
650 			ts_master_en = PTP_TCR_TSMSTRENA;
651 			ts_event_en = PTP_TCR_TSEVNTENA;
652 
653 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655 			ptp_over_ethernet = PTP_TCR_TSIPENA;
656 			break;
657 
658 		case HWTSTAMP_FILTER_NTP_ALL:
659 		case HWTSTAMP_FILTER_ALL:
660 			/* time stamp any incoming packet */
661 			config->rx_filter = HWTSTAMP_FILTER_ALL;
662 			tstamp_all = PTP_TCR_TSENALL;
663 			break;
664 
665 		default:
666 			return -ERANGE;
667 		}
668 	} else {
669 		switch (config->rx_filter) {
670 		case HWTSTAMP_FILTER_NONE:
671 			config->rx_filter = HWTSTAMP_FILTER_NONE;
672 			break;
673 		default:
674 			/* PTP v1, UDP, any kind of event packet */
675 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
676 			break;
677 		}
678 	}
679 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
680 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
681 
682 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
683 
684 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
685 		priv->systime_flags |= tstamp_all | ptp_v2 |
686 				       ptp_over_ethernet | ptp_over_ipv6_udp |
687 				       ptp_over_ipv4_udp | ts_event_en |
688 				       ts_master_en | snap_type_sel;
689 	}
690 
691 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
692 
693 	priv->tstamp_config = *config;
694 
695 	return 0;
696 }
697 
698 /**
699  *  stmmac_hwtstamp_get - read hardware timestamping.
700  *  @dev: device pointer.
701  *  @config: the timestamping configuration.
702  *  Description:
703  *  This function obtain the current hardware timestamping settings
704  *  as requested.
705  */
706 static int stmmac_hwtstamp_get(struct net_device *dev,
707 			       struct kernel_hwtstamp_config *config)
708 {
709 	struct stmmac_priv *priv = netdev_priv(dev);
710 
711 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
712 		return -EOPNOTSUPP;
713 
714 	*config = priv->tstamp_config;
715 
716 	return 0;
717 }
718 
719 /**
720  * stmmac_init_tstamp_counter - init hardware timestamping counter
721  * @priv: driver private structure
722  * @systime_flags: timestamping flags
723  * Description:
724  * Initialize hardware counter for packet timestamping.
725  * This is valid as long as the interface is open and not suspended.
726  * Will be rerun after resuming from suspend, case in which the timestamping
727  * flags updated by stmmac_hwtstamp_set() also need to be restored.
728  */
729 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
730 {
731 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
732 	struct timespec64 now;
733 	u32 sec_inc = 0;
734 	u64 temp = 0;
735 
736 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
737 		return -EOPNOTSUPP;
738 
739 	if (!priv->plat->clk_ptp_rate) {
740 		netdev_err(priv->dev, "Invalid PTP clock rate");
741 		return -EINVAL;
742 	}
743 
744 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
745 	priv->systime_flags = systime_flags;
746 
747 	/* program Sub Second Increment reg */
748 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
749 					   priv->plat->clk_ptp_rate,
750 					   xmac, &sec_inc);
751 	temp = div_u64(1000000000ULL, sec_inc);
752 
753 	/* Store sub second increment for later use */
754 	priv->sub_second_inc = sec_inc;
755 
756 	/* calculate default added value:
757 	 * formula is :
758 	 * addend = (2^32)/freq_div_ratio;
759 	 * where, freq_div_ratio = 1e9ns/sec_inc
760 	 */
761 	temp = (u64)(temp << 32);
762 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
763 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
764 
765 	/* initialize system time */
766 	ktime_get_real_ts64(&now);
767 
768 	/* lower 32 bits of tv_sec are safe until y2106 */
769 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
770 
771 	return 0;
772 }
773 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
774 
775 /**
776  * stmmac_init_ptp - init PTP
777  * @priv: driver private structure
778  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
779  * This is done by looking at the HW cap. register.
780  * This function also registers the ptp driver.
781  */
782 static int stmmac_init_ptp(struct stmmac_priv *priv)
783 {
784 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
785 	int ret;
786 
787 	if (priv->plat->ptp_clk_freq_config)
788 		priv->plat->ptp_clk_freq_config(priv);
789 
790 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
791 	if (ret)
792 		return ret;
793 
794 	priv->adv_ts = 0;
795 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
796 	if (xmac && priv->dma_cap.atime_stamp)
797 		priv->adv_ts = 1;
798 	/* Dwmac 3.x core with extend_desc can support adv_ts */
799 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
800 		priv->adv_ts = 1;
801 
802 	if (priv->dma_cap.time_stamp)
803 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
804 
805 	if (priv->adv_ts)
806 		netdev_info(priv->dev,
807 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
808 
809 	priv->hwts_tx_en = 0;
810 	priv->hwts_rx_en = 0;
811 
812 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
813 		stmmac_hwtstamp_correct_latency(priv, priv);
814 
815 	return 0;
816 }
817 
818 static void stmmac_release_ptp(struct stmmac_priv *priv)
819 {
820 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
821 	stmmac_ptp_unregister(priv);
822 }
823 
824 /**
825  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
826  *  @priv: driver private structure
827  *  @duplex: duplex passed to the next function
828  *  @flow_ctrl: desired flow control modes
829  *  Description: It is used for configuring the flow control in all queues
830  */
831 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
832 				 unsigned int flow_ctrl)
833 {
834 	u32 tx_cnt = priv->plat->tx_queues_to_use;
835 
836 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
837 			 tx_cnt);
838 }
839 
840 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
841 					 phy_interface_t interface)
842 {
843 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
844 
845 	/* Refresh the MAC-specific capabilities */
846 	stmmac_mac_update_caps(priv);
847 
848 	config->mac_capabilities = priv->hw->link.caps;
849 
850 	if (priv->plat->max_speed)
851 		phylink_limit_mac_speed(config, priv->plat->max_speed);
852 
853 	return config->mac_capabilities;
854 }
855 
856 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
857 						 phy_interface_t interface)
858 {
859 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
860 	struct phylink_pcs *pcs;
861 
862 	if (priv->plat->select_pcs) {
863 		pcs = priv->plat->select_pcs(priv, interface);
864 		if (!IS_ERR(pcs))
865 			return pcs;
866 	}
867 
868 	return NULL;
869 }
870 
871 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
872 			      const struct phylink_link_state *state)
873 {
874 	/* Nothing to do, xpcs_config() handles everything */
875 }
876 
877 static void stmmac_mac_link_down(struct phylink_config *config,
878 				 unsigned int mode, phy_interface_t interface)
879 {
880 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
881 
882 	stmmac_mac_set(priv, priv->ioaddr, false);
883 	if (priv->dma_cap.eee)
884 		stmmac_set_eee_pls(priv, priv->hw, false);
885 
886 	if (stmmac_fpe_supported(priv))
887 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
888 }
889 
890 static void stmmac_mac_link_up(struct phylink_config *config,
891 			       struct phy_device *phy,
892 			       unsigned int mode, phy_interface_t interface,
893 			       int speed, int duplex,
894 			       bool tx_pause, bool rx_pause)
895 {
896 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
897 	unsigned int flow_ctrl;
898 	u32 old_ctrl, ctrl;
899 	int ret;
900 
901 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
902 	    priv->plat->serdes_powerup)
903 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
904 
905 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
906 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
907 
908 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
909 		switch (speed) {
910 		case SPEED_10000:
911 			ctrl |= priv->hw->link.xgmii.speed10000;
912 			break;
913 		case SPEED_5000:
914 			ctrl |= priv->hw->link.xgmii.speed5000;
915 			break;
916 		case SPEED_2500:
917 			ctrl |= priv->hw->link.xgmii.speed2500;
918 			break;
919 		default:
920 			return;
921 		}
922 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
923 		switch (speed) {
924 		case SPEED_100000:
925 			ctrl |= priv->hw->link.xlgmii.speed100000;
926 			break;
927 		case SPEED_50000:
928 			ctrl |= priv->hw->link.xlgmii.speed50000;
929 			break;
930 		case SPEED_40000:
931 			ctrl |= priv->hw->link.xlgmii.speed40000;
932 			break;
933 		case SPEED_25000:
934 			ctrl |= priv->hw->link.xlgmii.speed25000;
935 			break;
936 		case SPEED_10000:
937 			ctrl |= priv->hw->link.xgmii.speed10000;
938 			break;
939 		case SPEED_2500:
940 			ctrl |= priv->hw->link.speed2500;
941 			break;
942 		case SPEED_1000:
943 			ctrl |= priv->hw->link.speed1000;
944 			break;
945 		default:
946 			return;
947 		}
948 	} else {
949 		switch (speed) {
950 		case SPEED_2500:
951 			ctrl |= priv->hw->link.speed2500;
952 			break;
953 		case SPEED_1000:
954 			ctrl |= priv->hw->link.speed1000;
955 			break;
956 		case SPEED_100:
957 			ctrl |= priv->hw->link.speed100;
958 			break;
959 		case SPEED_10:
960 			ctrl |= priv->hw->link.speed10;
961 			break;
962 		default:
963 			return;
964 		}
965 	}
966 
967 	if (priv->plat->fix_mac_speed)
968 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
969 
970 	if (!duplex)
971 		ctrl &= ~priv->hw->link.duplex;
972 	else
973 		ctrl |= priv->hw->link.duplex;
974 
975 	/* Flow Control operation */
976 	if (rx_pause && tx_pause)
977 		flow_ctrl = FLOW_AUTO;
978 	else if (rx_pause && !tx_pause)
979 		flow_ctrl = FLOW_RX;
980 	else if (!rx_pause && tx_pause)
981 		flow_ctrl = FLOW_TX;
982 	else
983 		flow_ctrl = FLOW_OFF;
984 
985 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
986 
987 	if (ctrl != old_ctrl)
988 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
989 
990 	if (priv->plat->set_clk_tx_rate) {
991 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
992 						priv->plat->clk_tx_i,
993 						interface, speed);
994 		if (ret < 0)
995 			netdev_err(priv->dev,
996 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
997 				   phy_modes(interface), speed, ERR_PTR(ret));
998 	}
999 
1000 	stmmac_mac_set(priv, priv->ioaddr, true);
1001 	if (priv->dma_cap.eee)
1002 		stmmac_set_eee_pls(priv, priv->hw, true);
1003 
1004 	if (stmmac_fpe_supported(priv))
1005 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1006 
1007 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1008 		stmmac_hwtstamp_correct_latency(priv, priv);
1009 }
1010 
1011 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1012 {
1013 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1014 
1015 	priv->eee_active = false;
1016 
1017 	mutex_lock(&priv->lock);
1018 
1019 	priv->eee_enabled = false;
1020 
1021 	netdev_dbg(priv->dev, "disable EEE\n");
1022 	priv->eee_sw_timer_en = false;
1023 	timer_delete_sync(&priv->eee_ctrl_timer);
1024 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1025 	priv->tx_path_in_lpi_mode = false;
1026 
1027 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1028 	mutex_unlock(&priv->lock);
1029 }
1030 
1031 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1032 				    bool tx_clk_stop)
1033 {
1034 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1035 	int ret;
1036 
1037 	priv->tx_lpi_timer = timer;
1038 	priv->eee_active = true;
1039 
1040 	mutex_lock(&priv->lock);
1041 
1042 	priv->eee_enabled = true;
1043 
1044 	/* Update the transmit clock stop according to PHY capability if
1045 	 * the platform allows
1046 	 */
1047 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1048 		priv->tx_lpi_clk_stop = tx_clk_stop;
1049 
1050 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1051 			     STMMAC_DEFAULT_TWT_LS);
1052 
1053 	/* Try to cnfigure the hardware timer. */
1054 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1055 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1056 
1057 	if (ret) {
1058 		/* Hardware timer mode not supported, or value out of range.
1059 		 * Fall back to using software LPI mode
1060 		 */
1061 		priv->eee_sw_timer_en = true;
1062 		stmmac_restart_sw_lpi_timer(priv);
1063 	}
1064 
1065 	mutex_unlock(&priv->lock);
1066 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1067 
1068 	return 0;
1069 }
1070 
1071 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1072 			     phy_interface_t interface)
1073 {
1074 	struct net_device *ndev = to_net_dev(config->dev);
1075 	struct stmmac_priv *priv = netdev_priv(ndev);
1076 
1077 	if (priv->plat->mac_finish)
1078 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1079 
1080 	return 0;
1081 }
1082 
1083 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1084 	.mac_get_caps = stmmac_mac_get_caps,
1085 	.mac_select_pcs = stmmac_mac_select_pcs,
1086 	.mac_config = stmmac_mac_config,
1087 	.mac_link_down = stmmac_mac_link_down,
1088 	.mac_link_up = stmmac_mac_link_up,
1089 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1090 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1091 	.mac_finish = stmmac_mac_finish,
1092 };
1093 
1094 /**
1095  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1096  * @priv: driver private structure
1097  * Description: this is to verify if the HW supports the PCS.
1098  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1099  * configured for the TBI, RTBI, or SGMII PHY interface.
1100  */
1101 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1102 {
1103 	int interface = priv->plat->mac_interface;
1104 
1105 	if (priv->dma_cap.pcs) {
1106 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1107 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1108 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1109 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1110 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1111 			priv->hw->pcs = STMMAC_PCS_RGMII;
1112 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1113 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1114 			priv->hw->pcs = STMMAC_PCS_SGMII;
1115 		}
1116 	}
1117 }
1118 
1119 /**
1120  * stmmac_init_phy - PHY initialization
1121  * @dev: net device structure
1122  * Description: it initializes the driver's PHY state, and attaches the PHY
1123  * to the mac driver.
1124  *  Return value:
1125  *  0 on success
1126  */
1127 static int stmmac_init_phy(struct net_device *dev)
1128 {
1129 	struct stmmac_priv *priv = netdev_priv(dev);
1130 	struct fwnode_handle *phy_fwnode;
1131 	struct fwnode_handle *fwnode;
1132 	int ret;
1133 
1134 	if (!phylink_expects_phy(priv->phylink))
1135 		return 0;
1136 
1137 	fwnode = priv->plat->port_node;
1138 	if (!fwnode)
1139 		fwnode = dev_fwnode(priv->device);
1140 
1141 	if (fwnode)
1142 		phy_fwnode = fwnode_get_phy_node(fwnode);
1143 	else
1144 		phy_fwnode = NULL;
1145 
1146 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1147 	 * manually parse it
1148 	 */
1149 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1150 		int addr = priv->plat->phy_addr;
1151 		struct phy_device *phydev;
1152 
1153 		if (addr < 0) {
1154 			netdev_err(priv->dev, "no phy found\n");
1155 			return -ENODEV;
1156 		}
1157 
1158 		phydev = mdiobus_get_phy(priv->mii, addr);
1159 		if (!phydev) {
1160 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1161 			return -ENODEV;
1162 		}
1163 
1164 		ret = phylink_connect_phy(priv->phylink, phydev);
1165 	} else {
1166 		fwnode_handle_put(phy_fwnode);
1167 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1168 	}
1169 
1170 	if (ret == 0) {
1171 		struct ethtool_keee eee;
1172 
1173 		/* Configure phylib's copy of the LPI timer. Normally,
1174 		 * phylink_config.lpi_timer_default would do this, but there is
1175 		 * a chance that userspace could change the eee_timer setting
1176 		 * via sysfs before the first open. Thus, preserve existing
1177 		 * behaviour.
1178 		 */
1179 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1180 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1181 			phylink_ethtool_set_eee(priv->phylink, &eee);
1182 		}
1183 	}
1184 
1185 	if (!priv->plat->pmt) {
1186 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1187 
1188 		phylink_ethtool_get_wol(priv->phylink, &wol);
1189 		device_set_wakeup_capable(priv->device, !!wol.supported);
1190 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1191 	}
1192 
1193 	return ret;
1194 }
1195 
1196 static int stmmac_phy_setup(struct stmmac_priv *priv)
1197 {
1198 	struct stmmac_mdio_bus_data *mdio_bus_data;
1199 	struct phylink_config *config;
1200 	struct fwnode_handle *fwnode;
1201 	struct phylink_pcs *pcs;
1202 	struct phylink *phylink;
1203 
1204 	config = &priv->phylink_config;
1205 
1206 	config->dev = &priv->dev->dev;
1207 	config->type = PHYLINK_NETDEV;
1208 	config->mac_managed_pm = true;
1209 
1210 	/* Stmmac always requires an RX clock for hardware initialization */
1211 	config->mac_requires_rxc = true;
1212 
1213 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1214 		config->eee_rx_clk_stop_enable = true;
1215 
1216 	/* Set the default transmit clock stop bit based on the platform glue */
1217 	priv->tx_lpi_clk_stop = priv->plat->flags &
1218 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1219 
1220 	mdio_bus_data = priv->plat->mdio_bus_data;
1221 	if (mdio_bus_data)
1222 		config->default_an_inband = mdio_bus_data->default_an_inband;
1223 
1224 	/* Get the PHY interface modes (at the PHY end of the link) that
1225 	 * are supported by the platform.
1226 	 */
1227 	if (priv->plat->get_interfaces)
1228 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1229 					   config->supported_interfaces);
1230 
1231 	/* Set the platform/firmware specified interface mode if the
1232 	 * supported interfaces have not already been provided using
1233 	 * phy_interface as a last resort.
1234 	 */
1235 	if (phy_interface_empty(config->supported_interfaces))
1236 		__set_bit(priv->plat->phy_interface,
1237 			  config->supported_interfaces);
1238 
1239 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1240 	if (priv->hw->xpcs)
1241 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1242 	else
1243 		pcs = priv->hw->phylink_pcs;
1244 
1245 	if (pcs)
1246 		phy_interface_or(config->supported_interfaces,
1247 				 config->supported_interfaces,
1248 				 pcs->supported_interfaces);
1249 
1250 	if (priv->dma_cap.eee) {
1251 		/* Assume all supported interfaces also support LPI */
1252 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1253 		       sizeof(config->lpi_interfaces));
1254 
1255 		/* All full duplex speeds above 100Mbps are supported */
1256 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1257 		config->lpi_timer_default = eee_timer * 1000;
1258 		config->eee_enabled_default = true;
1259 	}
1260 
1261 	fwnode = priv->plat->port_node;
1262 	if (!fwnode)
1263 		fwnode = dev_fwnode(priv->device);
1264 
1265 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1266 				 &stmmac_phylink_mac_ops);
1267 	if (IS_ERR(phylink))
1268 		return PTR_ERR(phylink);
1269 
1270 	priv->phylink = phylink;
1271 	return 0;
1272 }
1273 
1274 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1275 				    struct stmmac_dma_conf *dma_conf)
1276 {
1277 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1278 	unsigned int desc_size;
1279 	void *head_rx;
1280 	u32 queue;
1281 
1282 	/* Display RX rings */
1283 	for (queue = 0; queue < rx_cnt; queue++) {
1284 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1285 
1286 		pr_info("\tRX Queue %u rings\n", queue);
1287 
1288 		if (priv->extend_desc) {
1289 			head_rx = (void *)rx_q->dma_erx;
1290 			desc_size = sizeof(struct dma_extended_desc);
1291 		} else {
1292 			head_rx = (void *)rx_q->dma_rx;
1293 			desc_size = sizeof(struct dma_desc);
1294 		}
1295 
1296 		/* Display RX ring */
1297 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1298 				    rx_q->dma_rx_phy, desc_size);
1299 	}
1300 }
1301 
1302 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1303 				    struct stmmac_dma_conf *dma_conf)
1304 {
1305 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1306 	unsigned int desc_size;
1307 	void *head_tx;
1308 	u32 queue;
1309 
1310 	/* Display TX rings */
1311 	for (queue = 0; queue < tx_cnt; queue++) {
1312 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1313 
1314 		pr_info("\tTX Queue %d rings\n", queue);
1315 
1316 		if (priv->extend_desc) {
1317 			head_tx = (void *)tx_q->dma_etx;
1318 			desc_size = sizeof(struct dma_extended_desc);
1319 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1320 			head_tx = (void *)tx_q->dma_entx;
1321 			desc_size = sizeof(struct dma_edesc);
1322 		} else {
1323 			head_tx = (void *)tx_q->dma_tx;
1324 			desc_size = sizeof(struct dma_desc);
1325 		}
1326 
1327 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1328 				    tx_q->dma_tx_phy, desc_size);
1329 	}
1330 }
1331 
1332 static void stmmac_display_rings(struct stmmac_priv *priv,
1333 				 struct stmmac_dma_conf *dma_conf)
1334 {
1335 	/* Display RX ring */
1336 	stmmac_display_rx_rings(priv, dma_conf);
1337 
1338 	/* Display TX ring */
1339 	stmmac_display_tx_rings(priv, dma_conf);
1340 }
1341 
1342 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1343 {
1344 	if (stmmac_xdp_is_enabled(priv))
1345 		return XDP_PACKET_HEADROOM;
1346 
1347 	return NET_SKB_PAD;
1348 }
1349 
1350 static int stmmac_set_bfsize(int mtu, int bufsize)
1351 {
1352 	int ret = bufsize;
1353 
1354 	if (mtu >= BUF_SIZE_8KiB)
1355 		ret = BUF_SIZE_16KiB;
1356 	else if (mtu >= BUF_SIZE_4KiB)
1357 		ret = BUF_SIZE_8KiB;
1358 	else if (mtu >= BUF_SIZE_2KiB)
1359 		ret = BUF_SIZE_4KiB;
1360 	else if (mtu > DEFAULT_BUFSIZE)
1361 		ret = BUF_SIZE_2KiB;
1362 	else
1363 		ret = DEFAULT_BUFSIZE;
1364 
1365 	return ret;
1366 }
1367 
1368 /**
1369  * stmmac_clear_rx_descriptors - clear RX descriptors
1370  * @priv: driver private structure
1371  * @dma_conf: structure to take the dma data
1372  * @queue: RX queue index
1373  * Description: this function is called to clear the RX descriptors
1374  * in case of both basic and extended descriptors are used.
1375  */
1376 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1377 					struct stmmac_dma_conf *dma_conf,
1378 					u32 queue)
1379 {
1380 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1381 	int i;
1382 
1383 	/* Clear the RX descriptors */
1384 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1385 		if (priv->extend_desc)
1386 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1387 					priv->use_riwt, priv->mode,
1388 					(i == dma_conf->dma_rx_size - 1),
1389 					dma_conf->dma_buf_sz);
1390 		else
1391 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1392 					priv->use_riwt, priv->mode,
1393 					(i == dma_conf->dma_rx_size - 1),
1394 					dma_conf->dma_buf_sz);
1395 }
1396 
1397 /**
1398  * stmmac_clear_tx_descriptors - clear tx descriptors
1399  * @priv: driver private structure
1400  * @dma_conf: structure to take the dma data
1401  * @queue: TX queue index.
1402  * Description: this function is called to clear the TX descriptors
1403  * in case of both basic and extended descriptors are used.
1404  */
1405 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1406 					struct stmmac_dma_conf *dma_conf,
1407 					u32 queue)
1408 {
1409 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1410 	int i;
1411 
1412 	/* Clear the TX descriptors */
1413 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1414 		int last = (i == (dma_conf->dma_tx_size - 1));
1415 		struct dma_desc *p;
1416 
1417 		if (priv->extend_desc)
1418 			p = &tx_q->dma_etx[i].basic;
1419 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1420 			p = &tx_q->dma_entx[i].basic;
1421 		else
1422 			p = &tx_q->dma_tx[i];
1423 
1424 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1425 	}
1426 }
1427 
1428 /**
1429  * stmmac_clear_descriptors - clear descriptors
1430  * @priv: driver private structure
1431  * @dma_conf: structure to take the dma data
1432  * Description: this function is called to clear the TX and RX descriptors
1433  * in case of both basic and extended descriptors are used.
1434  */
1435 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1436 				     struct stmmac_dma_conf *dma_conf)
1437 {
1438 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1439 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1440 	u32 queue;
1441 
1442 	/* Clear the RX descriptors */
1443 	for (queue = 0; queue < rx_queue_cnt; queue++)
1444 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1445 
1446 	/* Clear the TX descriptors */
1447 	for (queue = 0; queue < tx_queue_cnt; queue++)
1448 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1449 }
1450 
1451 /**
1452  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1453  * @priv: driver private structure
1454  * @dma_conf: structure to take the dma data
1455  * @p: descriptor pointer
1456  * @i: descriptor index
1457  * @flags: gfp flag
1458  * @queue: RX queue index
1459  * Description: this function is called to allocate a receive buffer, perform
1460  * the DMA mapping and init the descriptor.
1461  */
1462 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1463 				  struct stmmac_dma_conf *dma_conf,
1464 				  struct dma_desc *p,
1465 				  int i, gfp_t flags, u32 queue)
1466 {
1467 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1468 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1469 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1470 
1471 	if (priv->dma_cap.host_dma_width <= 32)
1472 		gfp |= GFP_DMA32;
1473 
1474 	if (!buf->page) {
1475 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1476 		if (!buf->page)
1477 			return -ENOMEM;
1478 		buf->page_offset = stmmac_rx_offset(priv);
1479 	}
1480 
1481 	if (priv->sph && !buf->sec_page) {
1482 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1483 		if (!buf->sec_page)
1484 			return -ENOMEM;
1485 
1486 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1487 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1488 	} else {
1489 		buf->sec_page = NULL;
1490 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1491 	}
1492 
1493 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1494 
1495 	stmmac_set_desc_addr(priv, p, buf->addr);
1496 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1497 		stmmac_init_desc3(priv, p);
1498 
1499 	return 0;
1500 }
1501 
1502 /**
1503  * stmmac_free_rx_buffer - free RX dma buffers
1504  * @priv: private structure
1505  * @rx_q: RX queue
1506  * @i: buffer index.
1507  */
1508 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1509 				  struct stmmac_rx_queue *rx_q,
1510 				  int i)
1511 {
1512 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1513 
1514 	if (buf->page)
1515 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1516 	buf->page = NULL;
1517 
1518 	if (buf->sec_page)
1519 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1520 	buf->sec_page = NULL;
1521 }
1522 
1523 /**
1524  * stmmac_free_tx_buffer - free RX dma buffers
1525  * @priv: private structure
1526  * @dma_conf: structure to take the dma data
1527  * @queue: RX queue index
1528  * @i: buffer index.
1529  */
1530 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1531 				  struct stmmac_dma_conf *dma_conf,
1532 				  u32 queue, int i)
1533 {
1534 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1535 
1536 	if (tx_q->tx_skbuff_dma[i].buf &&
1537 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1538 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1539 			dma_unmap_page(priv->device,
1540 				       tx_q->tx_skbuff_dma[i].buf,
1541 				       tx_q->tx_skbuff_dma[i].len,
1542 				       DMA_TO_DEVICE);
1543 		else
1544 			dma_unmap_single(priv->device,
1545 					 tx_q->tx_skbuff_dma[i].buf,
1546 					 tx_q->tx_skbuff_dma[i].len,
1547 					 DMA_TO_DEVICE);
1548 	}
1549 
1550 	if (tx_q->xdpf[i] &&
1551 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1552 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1553 		xdp_return_frame(tx_q->xdpf[i]);
1554 		tx_q->xdpf[i] = NULL;
1555 	}
1556 
1557 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1558 		tx_q->xsk_frames_done++;
1559 
1560 	if (tx_q->tx_skbuff[i] &&
1561 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1562 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1563 		tx_q->tx_skbuff[i] = NULL;
1564 	}
1565 
1566 	tx_q->tx_skbuff_dma[i].buf = 0;
1567 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1568 }
1569 
1570 /**
1571  * dma_free_rx_skbufs - free RX dma buffers
1572  * @priv: private structure
1573  * @dma_conf: structure to take the dma data
1574  * @queue: RX queue index
1575  */
1576 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1577 			       struct stmmac_dma_conf *dma_conf,
1578 			       u32 queue)
1579 {
1580 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1581 	int i;
1582 
1583 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1584 		stmmac_free_rx_buffer(priv, rx_q, i);
1585 }
1586 
1587 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1588 				   struct stmmac_dma_conf *dma_conf,
1589 				   u32 queue, gfp_t flags)
1590 {
1591 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1592 	int i;
1593 
1594 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1595 		struct dma_desc *p;
1596 		int ret;
1597 
1598 		if (priv->extend_desc)
1599 			p = &((rx_q->dma_erx + i)->basic);
1600 		else
1601 			p = rx_q->dma_rx + i;
1602 
1603 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1604 					     queue);
1605 		if (ret)
1606 			return ret;
1607 
1608 		rx_q->buf_alloc_num++;
1609 	}
1610 
1611 	return 0;
1612 }
1613 
1614 /**
1615  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1616  * @priv: private structure
1617  * @dma_conf: structure to take the dma data
1618  * @queue: RX queue index
1619  */
1620 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1621 				struct stmmac_dma_conf *dma_conf,
1622 				u32 queue)
1623 {
1624 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1625 	int i;
1626 
1627 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1628 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1629 
1630 		if (!buf->xdp)
1631 			continue;
1632 
1633 		xsk_buff_free(buf->xdp);
1634 		buf->xdp = NULL;
1635 	}
1636 }
1637 
1638 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1639 				      struct stmmac_dma_conf *dma_conf,
1640 				      u32 queue)
1641 {
1642 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1643 	int i;
1644 
1645 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1646 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1647 	 * use this macro to make sure no size violations.
1648 	 */
1649 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1650 
1651 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1652 		struct stmmac_rx_buffer *buf;
1653 		dma_addr_t dma_addr;
1654 		struct dma_desc *p;
1655 
1656 		if (priv->extend_desc)
1657 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1658 		else
1659 			p = rx_q->dma_rx + i;
1660 
1661 		buf = &rx_q->buf_pool[i];
1662 
1663 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1664 		if (!buf->xdp)
1665 			return -ENOMEM;
1666 
1667 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1668 		stmmac_set_desc_addr(priv, p, dma_addr);
1669 		rx_q->buf_alloc_num++;
1670 	}
1671 
1672 	return 0;
1673 }
1674 
1675 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1676 {
1677 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1678 		return NULL;
1679 
1680 	return xsk_get_pool_from_qid(priv->dev, queue);
1681 }
1682 
1683 /**
1684  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1685  * @priv: driver private structure
1686  * @dma_conf: structure to take the dma data
1687  * @queue: RX queue index
1688  * @flags: gfp flag.
1689  * Description: this function initializes the DMA RX descriptors
1690  * and allocates the socket buffers. It supports the chained and ring
1691  * modes.
1692  */
1693 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1694 				    struct stmmac_dma_conf *dma_conf,
1695 				    u32 queue, gfp_t flags)
1696 {
1697 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1698 	int ret;
1699 
1700 	netif_dbg(priv, probe, priv->dev,
1701 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1702 		  (u32)rx_q->dma_rx_phy);
1703 
1704 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1705 
1706 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1707 
1708 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1709 
1710 	if (rx_q->xsk_pool) {
1711 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1712 						   MEM_TYPE_XSK_BUFF_POOL,
1713 						   NULL));
1714 		netdev_info(priv->dev,
1715 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1716 			    rx_q->queue_index);
1717 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1718 	} else {
1719 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1720 						   MEM_TYPE_PAGE_POOL,
1721 						   rx_q->page_pool));
1722 		netdev_info(priv->dev,
1723 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1724 			    rx_q->queue_index);
1725 	}
1726 
1727 	if (rx_q->xsk_pool) {
1728 		/* RX XDP ZC buffer pool may not be populated, e.g.
1729 		 * xdpsock TX-only.
1730 		 */
1731 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1732 	} else {
1733 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1734 		if (ret < 0)
1735 			return -ENOMEM;
1736 	}
1737 
1738 	/* Setup the chained descriptor addresses */
1739 	if (priv->mode == STMMAC_CHAIN_MODE) {
1740 		if (priv->extend_desc)
1741 			stmmac_mode_init(priv, rx_q->dma_erx,
1742 					 rx_q->dma_rx_phy,
1743 					 dma_conf->dma_rx_size, 1);
1744 		else
1745 			stmmac_mode_init(priv, rx_q->dma_rx,
1746 					 rx_q->dma_rx_phy,
1747 					 dma_conf->dma_rx_size, 0);
1748 	}
1749 
1750 	return 0;
1751 }
1752 
1753 static int init_dma_rx_desc_rings(struct net_device *dev,
1754 				  struct stmmac_dma_conf *dma_conf,
1755 				  gfp_t flags)
1756 {
1757 	struct stmmac_priv *priv = netdev_priv(dev);
1758 	u32 rx_count = priv->plat->rx_queues_to_use;
1759 	int queue;
1760 	int ret;
1761 
1762 	/* RX INITIALIZATION */
1763 	netif_dbg(priv, probe, priv->dev,
1764 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1765 
1766 	for (queue = 0; queue < rx_count; queue++) {
1767 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1768 		if (ret)
1769 			goto err_init_rx_buffers;
1770 	}
1771 
1772 	return 0;
1773 
1774 err_init_rx_buffers:
1775 	while (queue >= 0) {
1776 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1777 
1778 		if (rx_q->xsk_pool)
1779 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1780 		else
1781 			dma_free_rx_skbufs(priv, dma_conf, queue);
1782 
1783 		rx_q->buf_alloc_num = 0;
1784 		rx_q->xsk_pool = NULL;
1785 
1786 		queue--;
1787 	}
1788 
1789 	return ret;
1790 }
1791 
1792 /**
1793  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1794  * @priv: driver private structure
1795  * @dma_conf: structure to take the dma data
1796  * @queue: TX queue index
1797  * Description: this function initializes the DMA TX descriptors
1798  * and allocates the socket buffers. It supports the chained and ring
1799  * modes.
1800  */
1801 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1802 				    struct stmmac_dma_conf *dma_conf,
1803 				    u32 queue)
1804 {
1805 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1806 	int i;
1807 
1808 	netif_dbg(priv, probe, priv->dev,
1809 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1810 		  (u32)tx_q->dma_tx_phy);
1811 
1812 	/* Setup the chained descriptor addresses */
1813 	if (priv->mode == STMMAC_CHAIN_MODE) {
1814 		if (priv->extend_desc)
1815 			stmmac_mode_init(priv, tx_q->dma_etx,
1816 					 tx_q->dma_tx_phy,
1817 					 dma_conf->dma_tx_size, 1);
1818 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1819 			stmmac_mode_init(priv, tx_q->dma_tx,
1820 					 tx_q->dma_tx_phy,
1821 					 dma_conf->dma_tx_size, 0);
1822 	}
1823 
1824 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1825 
1826 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1827 		struct dma_desc *p;
1828 
1829 		if (priv->extend_desc)
1830 			p = &((tx_q->dma_etx + i)->basic);
1831 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1832 			p = &((tx_q->dma_entx + i)->basic);
1833 		else
1834 			p = tx_q->dma_tx + i;
1835 
1836 		stmmac_clear_desc(priv, p);
1837 
1838 		tx_q->tx_skbuff_dma[i].buf = 0;
1839 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1840 		tx_q->tx_skbuff_dma[i].len = 0;
1841 		tx_q->tx_skbuff_dma[i].last_segment = false;
1842 		tx_q->tx_skbuff[i] = NULL;
1843 	}
1844 
1845 	return 0;
1846 }
1847 
1848 static int init_dma_tx_desc_rings(struct net_device *dev,
1849 				  struct stmmac_dma_conf *dma_conf)
1850 {
1851 	struct stmmac_priv *priv = netdev_priv(dev);
1852 	u32 tx_queue_cnt;
1853 	u32 queue;
1854 
1855 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1856 
1857 	for (queue = 0; queue < tx_queue_cnt; queue++)
1858 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1859 
1860 	return 0;
1861 }
1862 
1863 /**
1864  * init_dma_desc_rings - init the RX/TX descriptor rings
1865  * @dev: net device structure
1866  * @dma_conf: structure to take the dma data
1867  * @flags: gfp flag.
1868  * Description: this function initializes the DMA RX/TX descriptors
1869  * and allocates the socket buffers. It supports the chained and ring
1870  * modes.
1871  */
1872 static int init_dma_desc_rings(struct net_device *dev,
1873 			       struct stmmac_dma_conf *dma_conf,
1874 			       gfp_t flags)
1875 {
1876 	struct stmmac_priv *priv = netdev_priv(dev);
1877 	int ret;
1878 
1879 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1880 	if (ret)
1881 		return ret;
1882 
1883 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1884 
1885 	stmmac_clear_descriptors(priv, dma_conf);
1886 
1887 	if (netif_msg_hw(priv))
1888 		stmmac_display_rings(priv, dma_conf);
1889 
1890 	return ret;
1891 }
1892 
1893 /**
1894  * dma_free_tx_skbufs - free TX dma buffers
1895  * @priv: private structure
1896  * @dma_conf: structure to take the dma data
1897  * @queue: TX queue index
1898  */
1899 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1900 			       struct stmmac_dma_conf *dma_conf,
1901 			       u32 queue)
1902 {
1903 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1904 	int i;
1905 
1906 	tx_q->xsk_frames_done = 0;
1907 
1908 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1909 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1910 
1911 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1912 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1913 		tx_q->xsk_frames_done = 0;
1914 		tx_q->xsk_pool = NULL;
1915 	}
1916 }
1917 
1918 /**
1919  * stmmac_free_tx_skbufs - free TX skb buffers
1920  * @priv: private structure
1921  */
1922 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1923 {
1924 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1925 	u32 queue;
1926 
1927 	for (queue = 0; queue < tx_queue_cnt; queue++)
1928 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1929 }
1930 
1931 /**
1932  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1933  * @priv: private structure
1934  * @dma_conf: structure to take the dma data
1935  * @queue: RX queue index
1936  */
1937 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1938 					 struct stmmac_dma_conf *dma_conf,
1939 					 u32 queue)
1940 {
1941 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1942 
1943 	/* Release the DMA RX socket buffers */
1944 	if (rx_q->xsk_pool)
1945 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1946 	else
1947 		dma_free_rx_skbufs(priv, dma_conf, queue);
1948 
1949 	rx_q->buf_alloc_num = 0;
1950 	rx_q->xsk_pool = NULL;
1951 
1952 	/* Free DMA regions of consistent memory previously allocated */
1953 	if (!priv->extend_desc)
1954 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1955 				  sizeof(struct dma_desc),
1956 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1957 	else
1958 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1959 				  sizeof(struct dma_extended_desc),
1960 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1961 
1962 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1963 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1964 
1965 	kfree(rx_q->buf_pool);
1966 	if (rx_q->page_pool)
1967 		page_pool_destroy(rx_q->page_pool);
1968 }
1969 
1970 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1971 				       struct stmmac_dma_conf *dma_conf)
1972 {
1973 	u32 rx_count = priv->plat->rx_queues_to_use;
1974 	u32 queue;
1975 
1976 	/* Free RX queue resources */
1977 	for (queue = 0; queue < rx_count; queue++)
1978 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1979 }
1980 
1981 /**
1982  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1983  * @priv: private structure
1984  * @dma_conf: structure to take the dma data
1985  * @queue: TX queue index
1986  */
1987 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1988 					 struct stmmac_dma_conf *dma_conf,
1989 					 u32 queue)
1990 {
1991 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1992 	size_t size;
1993 	void *addr;
1994 
1995 	/* Release the DMA TX socket buffers */
1996 	dma_free_tx_skbufs(priv, dma_conf, queue);
1997 
1998 	if (priv->extend_desc) {
1999 		size = sizeof(struct dma_extended_desc);
2000 		addr = tx_q->dma_etx;
2001 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2002 		size = sizeof(struct dma_edesc);
2003 		addr = tx_q->dma_entx;
2004 	} else {
2005 		size = sizeof(struct dma_desc);
2006 		addr = tx_q->dma_tx;
2007 	}
2008 
2009 	size *= dma_conf->dma_tx_size;
2010 
2011 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2012 
2013 	kfree(tx_q->tx_skbuff_dma);
2014 	kfree(tx_q->tx_skbuff);
2015 }
2016 
2017 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2018 				       struct stmmac_dma_conf *dma_conf)
2019 {
2020 	u32 tx_count = priv->plat->tx_queues_to_use;
2021 	u32 queue;
2022 
2023 	/* Free TX queue resources */
2024 	for (queue = 0; queue < tx_count; queue++)
2025 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2026 }
2027 
2028 /**
2029  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2030  * @priv: private structure
2031  * @dma_conf: structure to take the dma data
2032  * @queue: RX queue index
2033  * Description: according to which descriptor can be used (extend or basic)
2034  * this function allocates the resources for TX and RX paths. In case of
2035  * reception, for example, it pre-allocated the RX socket buffer in order to
2036  * allow zero-copy mechanism.
2037  */
2038 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2039 					 struct stmmac_dma_conf *dma_conf,
2040 					 u32 queue)
2041 {
2042 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2043 	struct stmmac_channel *ch = &priv->channel[queue];
2044 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2045 	struct page_pool_params pp_params = { 0 };
2046 	unsigned int dma_buf_sz_pad, num_pages;
2047 	unsigned int napi_id;
2048 	int ret;
2049 
2050 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2051 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2052 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2053 
2054 	rx_q->queue_index = queue;
2055 	rx_q->priv_data = priv;
2056 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2057 
2058 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2059 	pp_params.pool_size = dma_conf->dma_rx_size;
2060 	pp_params.order = order_base_2(num_pages);
2061 	pp_params.nid = dev_to_node(priv->device);
2062 	pp_params.dev = priv->device;
2063 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2064 	pp_params.offset = stmmac_rx_offset(priv);
2065 	pp_params.max_len = dma_conf->dma_buf_sz;
2066 
2067 	if (priv->sph) {
2068 		pp_params.offset = 0;
2069 		pp_params.max_len += stmmac_rx_offset(priv);
2070 	}
2071 
2072 	rx_q->page_pool = page_pool_create(&pp_params);
2073 	if (IS_ERR(rx_q->page_pool)) {
2074 		ret = PTR_ERR(rx_q->page_pool);
2075 		rx_q->page_pool = NULL;
2076 		return ret;
2077 	}
2078 
2079 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2080 				 sizeof(*rx_q->buf_pool),
2081 				 GFP_KERNEL);
2082 	if (!rx_q->buf_pool)
2083 		return -ENOMEM;
2084 
2085 	if (priv->extend_desc) {
2086 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2087 						   dma_conf->dma_rx_size *
2088 						   sizeof(struct dma_extended_desc),
2089 						   &rx_q->dma_rx_phy,
2090 						   GFP_KERNEL);
2091 		if (!rx_q->dma_erx)
2092 			return -ENOMEM;
2093 
2094 	} else {
2095 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2096 						  dma_conf->dma_rx_size *
2097 						  sizeof(struct dma_desc),
2098 						  &rx_q->dma_rx_phy,
2099 						  GFP_KERNEL);
2100 		if (!rx_q->dma_rx)
2101 			return -ENOMEM;
2102 	}
2103 
2104 	if (stmmac_xdp_is_enabled(priv) &&
2105 	    test_bit(queue, priv->af_xdp_zc_qps))
2106 		napi_id = ch->rxtx_napi.napi_id;
2107 	else
2108 		napi_id = ch->rx_napi.napi_id;
2109 
2110 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2111 			       rx_q->queue_index,
2112 			       napi_id);
2113 	if (ret) {
2114 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2115 		return -EINVAL;
2116 	}
2117 
2118 	return 0;
2119 }
2120 
2121 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2122 				       struct stmmac_dma_conf *dma_conf)
2123 {
2124 	u32 rx_count = priv->plat->rx_queues_to_use;
2125 	u32 queue;
2126 	int ret;
2127 
2128 	/* RX queues buffers and DMA */
2129 	for (queue = 0; queue < rx_count; queue++) {
2130 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2131 		if (ret)
2132 			goto err_dma;
2133 	}
2134 
2135 	return 0;
2136 
2137 err_dma:
2138 	free_dma_rx_desc_resources(priv, dma_conf);
2139 
2140 	return ret;
2141 }
2142 
2143 /**
2144  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2145  * @priv: private structure
2146  * @dma_conf: structure to take the dma data
2147  * @queue: TX queue index
2148  * Description: according to which descriptor can be used (extend or basic)
2149  * this function allocates the resources for TX and RX paths. In case of
2150  * reception, for example, it pre-allocated the RX socket buffer in order to
2151  * allow zero-copy mechanism.
2152  */
2153 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2154 					 struct stmmac_dma_conf *dma_conf,
2155 					 u32 queue)
2156 {
2157 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2158 	size_t size;
2159 	void *addr;
2160 
2161 	tx_q->queue_index = queue;
2162 	tx_q->priv_data = priv;
2163 
2164 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2165 				      sizeof(*tx_q->tx_skbuff_dma),
2166 				      GFP_KERNEL);
2167 	if (!tx_q->tx_skbuff_dma)
2168 		return -ENOMEM;
2169 
2170 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2171 				  sizeof(struct sk_buff *),
2172 				  GFP_KERNEL);
2173 	if (!tx_q->tx_skbuff)
2174 		return -ENOMEM;
2175 
2176 	if (priv->extend_desc)
2177 		size = sizeof(struct dma_extended_desc);
2178 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2179 		size = sizeof(struct dma_edesc);
2180 	else
2181 		size = sizeof(struct dma_desc);
2182 
2183 	size *= dma_conf->dma_tx_size;
2184 
2185 	addr = dma_alloc_coherent(priv->device, size,
2186 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2187 	if (!addr)
2188 		return -ENOMEM;
2189 
2190 	if (priv->extend_desc)
2191 		tx_q->dma_etx = addr;
2192 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2193 		tx_q->dma_entx = addr;
2194 	else
2195 		tx_q->dma_tx = addr;
2196 
2197 	return 0;
2198 }
2199 
2200 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2201 				       struct stmmac_dma_conf *dma_conf)
2202 {
2203 	u32 tx_count = priv->plat->tx_queues_to_use;
2204 	u32 queue;
2205 	int ret;
2206 
2207 	/* TX queues buffers and DMA */
2208 	for (queue = 0; queue < tx_count; queue++) {
2209 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2210 		if (ret)
2211 			goto err_dma;
2212 	}
2213 
2214 	return 0;
2215 
2216 err_dma:
2217 	free_dma_tx_desc_resources(priv, dma_conf);
2218 	return ret;
2219 }
2220 
2221 /**
2222  * alloc_dma_desc_resources - alloc TX/RX resources.
2223  * @priv: private structure
2224  * @dma_conf: structure to take the dma data
2225  * Description: according to which descriptor can be used (extend or basic)
2226  * this function allocates the resources for TX and RX paths. In case of
2227  * reception, for example, it pre-allocated the RX socket buffer in order to
2228  * allow zero-copy mechanism.
2229  */
2230 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2231 				    struct stmmac_dma_conf *dma_conf)
2232 {
2233 	/* RX Allocation */
2234 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2235 
2236 	if (ret)
2237 		return ret;
2238 
2239 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2240 
2241 	return ret;
2242 }
2243 
2244 /**
2245  * free_dma_desc_resources - free dma desc resources
2246  * @priv: private structure
2247  * @dma_conf: structure to take the dma data
2248  */
2249 static void free_dma_desc_resources(struct stmmac_priv *priv,
2250 				    struct stmmac_dma_conf *dma_conf)
2251 {
2252 	/* Release the DMA TX socket buffers */
2253 	free_dma_tx_desc_resources(priv, dma_conf);
2254 
2255 	/* Release the DMA RX socket buffers later
2256 	 * to ensure all pending XDP_TX buffers are returned.
2257 	 */
2258 	free_dma_rx_desc_resources(priv, dma_conf);
2259 }
2260 
2261 /**
2262  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2263  *  @priv: driver private structure
2264  *  Description: It is used for enabling the rx queues in the MAC
2265  */
2266 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2267 {
2268 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2269 	int queue;
2270 	u8 mode;
2271 
2272 	for (queue = 0; queue < rx_queues_count; queue++) {
2273 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2274 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2275 	}
2276 }
2277 
2278 /**
2279  * stmmac_start_rx_dma - start RX DMA channel
2280  * @priv: driver private structure
2281  * @chan: RX channel index
2282  * Description:
2283  * This starts a RX DMA channel
2284  */
2285 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2288 	stmmac_start_rx(priv, priv->ioaddr, chan);
2289 }
2290 
2291 /**
2292  * stmmac_start_tx_dma - start TX DMA channel
2293  * @priv: driver private structure
2294  * @chan: TX channel index
2295  * Description:
2296  * This starts a TX DMA channel
2297  */
2298 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2301 	stmmac_start_tx(priv, priv->ioaddr, chan);
2302 }
2303 
2304 /**
2305  * stmmac_stop_rx_dma - stop RX DMA channel
2306  * @priv: driver private structure
2307  * @chan: RX channel index
2308  * Description:
2309  * This stops a RX DMA channel
2310  */
2311 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2312 {
2313 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2314 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2315 }
2316 
2317 /**
2318  * stmmac_stop_tx_dma - stop TX DMA channel
2319  * @priv: driver private structure
2320  * @chan: TX channel index
2321  * Description:
2322  * This stops a TX DMA channel
2323  */
2324 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2325 {
2326 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2327 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2328 }
2329 
2330 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2331 {
2332 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2333 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2334 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2335 	u32 chan;
2336 
2337 	for (chan = 0; chan < dma_csr_ch; chan++) {
2338 		struct stmmac_channel *ch = &priv->channel[chan];
2339 		unsigned long flags;
2340 
2341 		spin_lock_irqsave(&ch->lock, flags);
2342 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2343 		spin_unlock_irqrestore(&ch->lock, flags);
2344 	}
2345 }
2346 
2347 /**
2348  * stmmac_start_all_dma - start all RX and TX DMA channels
2349  * @priv: driver private structure
2350  * Description:
2351  * This starts all the RX and TX DMA channels
2352  */
2353 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2354 {
2355 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2356 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2357 	u32 chan = 0;
2358 
2359 	for (chan = 0; chan < rx_channels_count; chan++)
2360 		stmmac_start_rx_dma(priv, chan);
2361 
2362 	for (chan = 0; chan < tx_channels_count; chan++)
2363 		stmmac_start_tx_dma(priv, chan);
2364 }
2365 
2366 /**
2367  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2368  * @priv: driver private structure
2369  * Description:
2370  * This stops the RX and TX DMA channels
2371  */
2372 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2373 {
2374 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2375 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2376 	u32 chan = 0;
2377 
2378 	for (chan = 0; chan < rx_channels_count; chan++)
2379 		stmmac_stop_rx_dma(priv, chan);
2380 
2381 	for (chan = 0; chan < tx_channels_count; chan++)
2382 		stmmac_stop_tx_dma(priv, chan);
2383 }
2384 
2385 /**
2386  *  stmmac_dma_operation_mode - HW DMA operation mode
2387  *  @priv: driver private structure
2388  *  Description: it is used for configuring the DMA operation mode register in
2389  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2390  */
2391 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2392 {
2393 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2394 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2395 	int rxfifosz = priv->plat->rx_fifo_size;
2396 	int txfifosz = priv->plat->tx_fifo_size;
2397 	u32 txmode = 0;
2398 	u32 rxmode = 0;
2399 	u32 chan = 0;
2400 	u8 qmode = 0;
2401 
2402 	if (rxfifosz == 0)
2403 		rxfifosz = priv->dma_cap.rx_fifo_size;
2404 	if (txfifosz == 0)
2405 		txfifosz = priv->dma_cap.tx_fifo_size;
2406 
2407 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2408 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2409 		rxfifosz /= rx_channels_count;
2410 		txfifosz /= tx_channels_count;
2411 	}
2412 
2413 	if (priv->plat->force_thresh_dma_mode) {
2414 		txmode = tc;
2415 		rxmode = tc;
2416 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2417 		/*
2418 		 * In case of GMAC, SF mode can be enabled
2419 		 * to perform the TX COE in HW. This depends on:
2420 		 * 1) TX COE if actually supported
2421 		 * 2) There is no bugged Jumbo frame support
2422 		 *    that needs to not insert csum in the TDES.
2423 		 */
2424 		txmode = SF_DMA_MODE;
2425 		rxmode = SF_DMA_MODE;
2426 		priv->xstats.threshold = SF_DMA_MODE;
2427 	} else {
2428 		txmode = tc;
2429 		rxmode = SF_DMA_MODE;
2430 	}
2431 
2432 	/* configure all channels */
2433 	for (chan = 0; chan < rx_channels_count; chan++) {
2434 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2435 		u32 buf_size;
2436 
2437 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2438 
2439 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2440 				rxfifosz, qmode);
2441 
2442 		if (rx_q->xsk_pool) {
2443 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2444 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2445 					      buf_size,
2446 					      chan);
2447 		} else {
2448 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2449 					      priv->dma_conf.dma_buf_sz,
2450 					      chan);
2451 		}
2452 	}
2453 
2454 	for (chan = 0; chan < tx_channels_count; chan++) {
2455 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2456 
2457 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2458 				txfifosz, qmode);
2459 	}
2460 }
2461 
2462 static void stmmac_xsk_request_timestamp(void *_priv)
2463 {
2464 	struct stmmac_metadata_request *meta_req = _priv;
2465 
2466 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2467 	*meta_req->set_ic = true;
2468 }
2469 
2470 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2471 {
2472 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2473 	struct stmmac_priv *priv = tx_compl->priv;
2474 	struct dma_desc *desc = tx_compl->desc;
2475 	bool found = false;
2476 	u64 ns = 0;
2477 
2478 	if (!priv->hwts_tx_en)
2479 		return 0;
2480 
2481 	/* check tx tstamp status */
2482 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2483 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2484 		found = true;
2485 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2486 		found = true;
2487 	}
2488 
2489 	if (found) {
2490 		ns -= priv->plat->cdc_error_adj;
2491 		return ns_to_ktime(ns);
2492 	}
2493 
2494 	return 0;
2495 }
2496 
2497 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2498 {
2499 	struct timespec64 ts = ns_to_timespec64(launch_time);
2500 	struct stmmac_metadata_request *meta_req = _priv;
2501 
2502 	if (meta_req->tbs & STMMAC_TBS_EN)
2503 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2504 				    ts.tv_nsec);
2505 }
2506 
2507 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2508 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2509 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2510 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2511 };
2512 
2513 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2514 {
2515 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2516 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2517 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2518 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2519 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2520 	unsigned int entry = tx_q->cur_tx;
2521 	struct dma_desc *tx_desc = NULL;
2522 	struct xdp_desc xdp_desc;
2523 	bool work_done = true;
2524 	u32 tx_set_ic_bit = 0;
2525 
2526 	/* Avoids TX time-out as we are sharing with slow path */
2527 	txq_trans_cond_update(nq);
2528 
2529 	budget = min(budget, stmmac_tx_avail(priv, queue));
2530 
2531 	for (; budget > 0; budget--) {
2532 		struct stmmac_metadata_request meta_req;
2533 		struct xsk_tx_metadata *meta = NULL;
2534 		dma_addr_t dma_addr;
2535 		bool set_ic;
2536 
2537 		/* We are sharing with slow path and stop XSK TX desc submission when
2538 		 * available TX ring is less than threshold.
2539 		 */
2540 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2541 		    !netif_carrier_ok(priv->dev)) {
2542 			work_done = false;
2543 			break;
2544 		}
2545 
2546 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2547 			break;
2548 
2549 		if (priv->est && priv->est->enable &&
2550 		    priv->est->max_sdu[queue] &&
2551 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2552 			priv->xstats.max_sdu_txq_drop[queue]++;
2553 			continue;
2554 		}
2555 
2556 		if (likely(priv->extend_desc))
2557 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2558 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2559 			tx_desc = &tx_q->dma_entx[entry].basic;
2560 		else
2561 			tx_desc = tx_q->dma_tx + entry;
2562 
2563 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2564 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2565 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2566 
2567 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2568 
2569 		/* To return XDP buffer to XSK pool, we simple call
2570 		 * xsk_tx_completed(), so we don't need to fill up
2571 		 * 'buf' and 'xdpf'.
2572 		 */
2573 		tx_q->tx_skbuff_dma[entry].buf = 0;
2574 		tx_q->xdpf[entry] = NULL;
2575 
2576 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2577 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2578 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2579 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2580 
2581 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2582 
2583 		tx_q->tx_count_frames++;
2584 
2585 		if (!priv->tx_coal_frames[queue])
2586 			set_ic = false;
2587 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2588 			set_ic = true;
2589 		else
2590 			set_ic = false;
2591 
2592 		meta_req.priv = priv;
2593 		meta_req.tx_desc = tx_desc;
2594 		meta_req.set_ic = &set_ic;
2595 		meta_req.tbs = tx_q->tbs;
2596 		meta_req.edesc = &tx_q->dma_entx[entry];
2597 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2598 					&meta_req);
2599 		if (set_ic) {
2600 			tx_q->tx_count_frames = 0;
2601 			stmmac_set_tx_ic(priv, tx_desc);
2602 			tx_set_ic_bit++;
2603 		}
2604 
2605 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2606 				       csum, priv->mode, true, true,
2607 				       xdp_desc.len);
2608 
2609 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2610 
2611 		xsk_tx_metadata_to_compl(meta,
2612 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2613 
2614 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2615 		entry = tx_q->cur_tx;
2616 	}
2617 	u64_stats_update_begin(&txq_stats->napi_syncp);
2618 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2619 	u64_stats_update_end(&txq_stats->napi_syncp);
2620 
2621 	if (tx_desc) {
2622 		stmmac_flush_tx_descriptors(priv, queue);
2623 		xsk_tx_release(pool);
2624 	}
2625 
2626 	/* Return true if all of the 3 conditions are met
2627 	 *  a) TX Budget is still available
2628 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2629 	 *     pending XSK TX for transmission)
2630 	 */
2631 	return !!budget && work_done;
2632 }
2633 
2634 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2635 {
2636 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2637 		tc += 64;
2638 
2639 		if (priv->plat->force_thresh_dma_mode)
2640 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2641 		else
2642 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2643 						      chan);
2644 
2645 		priv->xstats.threshold = tc;
2646 	}
2647 }
2648 
2649 /**
2650  * stmmac_tx_clean - to manage the transmission completion
2651  * @priv: driver private structure
2652  * @budget: napi budget limiting this functions packet handling
2653  * @queue: TX queue index
2654  * @pending_packets: signal to arm the TX coal timer
2655  * Description: it reclaims the transmit resources after transmission completes.
2656  * If some packets still needs to be handled, due to TX coalesce, set
2657  * pending_packets to true to make NAPI arm the TX coal timer.
2658  */
2659 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2660 			   bool *pending_packets)
2661 {
2662 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2663 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2664 	unsigned int bytes_compl = 0, pkts_compl = 0;
2665 	unsigned int entry, xmits = 0, count = 0;
2666 	u32 tx_packets = 0, tx_errors = 0;
2667 
2668 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2669 
2670 	tx_q->xsk_frames_done = 0;
2671 
2672 	entry = tx_q->dirty_tx;
2673 
2674 	/* Try to clean all TX complete frame in 1 shot */
2675 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2676 		struct xdp_frame *xdpf;
2677 		struct sk_buff *skb;
2678 		struct dma_desc *p;
2679 		int status;
2680 
2681 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2682 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2683 			xdpf = tx_q->xdpf[entry];
2684 			skb = NULL;
2685 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2686 			xdpf = NULL;
2687 			skb = tx_q->tx_skbuff[entry];
2688 		} else {
2689 			xdpf = NULL;
2690 			skb = NULL;
2691 		}
2692 
2693 		if (priv->extend_desc)
2694 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2695 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2696 			p = &tx_q->dma_entx[entry].basic;
2697 		else
2698 			p = tx_q->dma_tx + entry;
2699 
2700 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2701 		/* Check if the descriptor is owned by the DMA */
2702 		if (unlikely(status & tx_dma_own))
2703 			break;
2704 
2705 		count++;
2706 
2707 		/* Make sure descriptor fields are read after reading
2708 		 * the own bit.
2709 		 */
2710 		dma_rmb();
2711 
2712 		/* Just consider the last segment and ...*/
2713 		if (likely(!(status & tx_not_ls))) {
2714 			/* ... verify the status error condition */
2715 			if (unlikely(status & tx_err)) {
2716 				tx_errors++;
2717 				if (unlikely(status & tx_err_bump_tc))
2718 					stmmac_bump_dma_threshold(priv, queue);
2719 			} else {
2720 				tx_packets++;
2721 			}
2722 			if (skb) {
2723 				stmmac_get_tx_hwtstamp(priv, p, skb);
2724 			} else if (tx_q->xsk_pool &&
2725 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2726 				struct stmmac_xsk_tx_complete tx_compl = {
2727 					.priv = priv,
2728 					.desc = p,
2729 				};
2730 
2731 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2732 							 &stmmac_xsk_tx_metadata_ops,
2733 							 &tx_compl);
2734 			}
2735 		}
2736 
2737 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2738 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2739 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2740 				dma_unmap_page(priv->device,
2741 					       tx_q->tx_skbuff_dma[entry].buf,
2742 					       tx_q->tx_skbuff_dma[entry].len,
2743 					       DMA_TO_DEVICE);
2744 			else
2745 				dma_unmap_single(priv->device,
2746 						 tx_q->tx_skbuff_dma[entry].buf,
2747 						 tx_q->tx_skbuff_dma[entry].len,
2748 						 DMA_TO_DEVICE);
2749 			tx_q->tx_skbuff_dma[entry].buf = 0;
2750 			tx_q->tx_skbuff_dma[entry].len = 0;
2751 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2752 		}
2753 
2754 		stmmac_clean_desc3(priv, tx_q, p);
2755 
2756 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2757 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2758 
2759 		if (xdpf &&
2760 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2761 			xdp_return_frame_rx_napi(xdpf);
2762 			tx_q->xdpf[entry] = NULL;
2763 		}
2764 
2765 		if (xdpf &&
2766 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2767 			xdp_return_frame(xdpf);
2768 			tx_q->xdpf[entry] = NULL;
2769 		}
2770 
2771 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2772 			tx_q->xsk_frames_done++;
2773 
2774 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2775 			if (likely(skb)) {
2776 				pkts_compl++;
2777 				bytes_compl += skb->len;
2778 				dev_consume_skb_any(skb);
2779 				tx_q->tx_skbuff[entry] = NULL;
2780 			}
2781 		}
2782 
2783 		stmmac_release_tx_desc(priv, p, priv->mode);
2784 
2785 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2786 	}
2787 	tx_q->dirty_tx = entry;
2788 
2789 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2790 				  pkts_compl, bytes_compl);
2791 
2792 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2793 								queue))) &&
2794 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2795 
2796 		netif_dbg(priv, tx_done, priv->dev,
2797 			  "%s: restart transmit\n", __func__);
2798 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2799 	}
2800 
2801 	if (tx_q->xsk_pool) {
2802 		bool work_done;
2803 
2804 		if (tx_q->xsk_frames_done)
2805 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2806 
2807 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2808 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2809 
2810 		/* For XSK TX, we try to send as many as possible.
2811 		 * If XSK work done (XSK TX desc empty and budget still
2812 		 * available), return "budget - 1" to reenable TX IRQ.
2813 		 * Else, return "budget" to make NAPI continue polling.
2814 		 */
2815 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2816 					       STMMAC_XSK_TX_BUDGET_MAX);
2817 		if (work_done)
2818 			xmits = budget - 1;
2819 		else
2820 			xmits = budget;
2821 	}
2822 
2823 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2824 		stmmac_restart_sw_lpi_timer(priv);
2825 
2826 	/* We still have pending packets, let's call for a new scheduling */
2827 	if (tx_q->dirty_tx != tx_q->cur_tx)
2828 		*pending_packets = true;
2829 
2830 	u64_stats_update_begin(&txq_stats->napi_syncp);
2831 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2832 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2833 	u64_stats_inc(&txq_stats->napi.tx_clean);
2834 	u64_stats_update_end(&txq_stats->napi_syncp);
2835 
2836 	priv->xstats.tx_errors += tx_errors;
2837 
2838 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2839 
2840 	/* Combine decisions from TX clean and XSK TX */
2841 	return max(count, xmits);
2842 }
2843 
2844 /**
2845  * stmmac_tx_err - to manage the tx error
2846  * @priv: driver private structure
2847  * @chan: channel index
2848  * Description: it cleans the descriptors and restarts the transmission
2849  * in case of transmission errors.
2850  */
2851 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2852 {
2853 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2854 
2855 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2856 
2857 	stmmac_stop_tx_dma(priv, chan);
2858 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2859 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2860 	stmmac_reset_tx_queue(priv, chan);
2861 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2862 			    tx_q->dma_tx_phy, chan);
2863 	stmmac_start_tx_dma(priv, chan);
2864 
2865 	priv->xstats.tx_errors++;
2866 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2867 }
2868 
2869 /**
2870  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2871  *  @priv: driver private structure
2872  *  @txmode: TX operating mode
2873  *  @rxmode: RX operating mode
2874  *  @chan: channel index
2875  *  Description: it is used for configuring of the DMA operation mode in
2876  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2877  *  mode.
2878  */
2879 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2880 					  u32 rxmode, u32 chan)
2881 {
2882 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2883 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2884 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2885 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2886 	int rxfifosz = priv->plat->rx_fifo_size;
2887 	int txfifosz = priv->plat->tx_fifo_size;
2888 
2889 	if (rxfifosz == 0)
2890 		rxfifosz = priv->dma_cap.rx_fifo_size;
2891 	if (txfifosz == 0)
2892 		txfifosz = priv->dma_cap.tx_fifo_size;
2893 
2894 	/* Adjust for real per queue fifo size */
2895 	rxfifosz /= rx_channels_count;
2896 	txfifosz /= tx_channels_count;
2897 
2898 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2899 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2900 }
2901 
2902 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2903 {
2904 	int ret;
2905 
2906 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2907 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2908 	if (ret && (ret != -EINVAL)) {
2909 		stmmac_global_err(priv);
2910 		return true;
2911 	}
2912 
2913 	return false;
2914 }
2915 
2916 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2917 {
2918 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2919 						 &priv->xstats, chan, dir);
2920 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2921 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2922 	struct stmmac_channel *ch = &priv->channel[chan];
2923 	struct napi_struct *rx_napi;
2924 	struct napi_struct *tx_napi;
2925 	unsigned long flags;
2926 
2927 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2928 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2929 
2930 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2931 		if (napi_schedule_prep(rx_napi)) {
2932 			spin_lock_irqsave(&ch->lock, flags);
2933 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2934 			spin_unlock_irqrestore(&ch->lock, flags);
2935 			__napi_schedule(rx_napi);
2936 		}
2937 	}
2938 
2939 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2940 		if (napi_schedule_prep(tx_napi)) {
2941 			spin_lock_irqsave(&ch->lock, flags);
2942 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2943 			spin_unlock_irqrestore(&ch->lock, flags);
2944 			__napi_schedule(tx_napi);
2945 		}
2946 	}
2947 
2948 	return status;
2949 }
2950 
2951 /**
2952  * stmmac_dma_interrupt - DMA ISR
2953  * @priv: driver private structure
2954  * Description: this is the DMA ISR. It is called by the main ISR.
2955  * It calls the dwmac dma routine and schedule poll method in case of some
2956  * work can be done.
2957  */
2958 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2959 {
2960 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2961 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2962 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2963 				tx_channel_count : rx_channel_count;
2964 	u32 chan;
2965 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2966 
2967 	/* Make sure we never check beyond our status buffer. */
2968 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2969 		channels_to_check = ARRAY_SIZE(status);
2970 
2971 	for (chan = 0; chan < channels_to_check; chan++)
2972 		status[chan] = stmmac_napi_check(priv, chan,
2973 						 DMA_DIR_RXTX);
2974 
2975 	for (chan = 0; chan < tx_channel_count; chan++) {
2976 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2977 			/* Try to bump up the dma threshold on this failure */
2978 			stmmac_bump_dma_threshold(priv, chan);
2979 		} else if (unlikely(status[chan] == tx_hard_error)) {
2980 			stmmac_tx_err(priv, chan);
2981 		}
2982 	}
2983 }
2984 
2985 /**
2986  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2987  * @priv: driver private structure
2988  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2989  */
2990 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2991 {
2992 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2993 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2994 
2995 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2996 
2997 	if (priv->dma_cap.rmon) {
2998 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2999 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3000 	} else
3001 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3002 }
3003 
3004 /**
3005  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3006  * @priv: driver private structure
3007  * Description:
3008  *  new GMAC chip generations have a new register to indicate the
3009  *  presence of the optional feature/functions.
3010  *  This can be also used to override the value passed through the
3011  *  platform and necessary for old MAC10/100 and GMAC chips.
3012  */
3013 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3014 {
3015 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3016 }
3017 
3018 /**
3019  * stmmac_check_ether_addr - check if the MAC addr is valid
3020  * @priv: driver private structure
3021  * Description:
3022  * it is to verify if the MAC address is valid, in case of failures it
3023  * generates a random MAC address
3024  */
3025 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3026 {
3027 	u8 addr[ETH_ALEN];
3028 
3029 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3030 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3031 		if (is_valid_ether_addr(addr))
3032 			eth_hw_addr_set(priv->dev, addr);
3033 		else
3034 			eth_hw_addr_random(priv->dev);
3035 		dev_info(priv->device, "device MAC address %pM\n",
3036 			 priv->dev->dev_addr);
3037 	}
3038 }
3039 
3040 /**
3041  * stmmac_init_dma_engine - DMA init.
3042  * @priv: driver private structure
3043  * Description:
3044  * It inits the DMA invoking the specific MAC/GMAC callback.
3045  * Some DMA parameters can be passed from the platform;
3046  * in case of these are not passed a default is kept for the MAC or GMAC.
3047  */
3048 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3049 {
3050 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3051 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3052 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3053 	struct stmmac_rx_queue *rx_q;
3054 	struct stmmac_tx_queue *tx_q;
3055 	u32 chan = 0;
3056 	int ret = 0;
3057 
3058 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3059 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3060 		return -EINVAL;
3061 	}
3062 
3063 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3064 		priv->plat->dma_cfg->atds = 1;
3065 
3066 	ret = stmmac_reset(priv, priv->ioaddr);
3067 	if (ret) {
3068 		netdev_err(priv->dev, "Failed to reset the dma\n");
3069 		return ret;
3070 	}
3071 
3072 	/* DMA Configuration */
3073 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3074 
3075 	if (priv->plat->axi)
3076 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3077 
3078 	/* DMA CSR Channel configuration */
3079 	for (chan = 0; chan < dma_csr_ch; chan++) {
3080 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3081 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3082 	}
3083 
3084 	/* DMA RX Channel Configuration */
3085 	for (chan = 0; chan < rx_channels_count; chan++) {
3086 		rx_q = &priv->dma_conf.rx_queue[chan];
3087 
3088 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3089 				    rx_q->dma_rx_phy, chan);
3090 
3091 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3092 				     (rx_q->buf_alloc_num *
3093 				      sizeof(struct dma_desc));
3094 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3095 				       rx_q->rx_tail_addr, chan);
3096 	}
3097 
3098 	/* DMA TX Channel Configuration */
3099 	for (chan = 0; chan < tx_channels_count; chan++) {
3100 		tx_q = &priv->dma_conf.tx_queue[chan];
3101 
3102 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3103 				    tx_q->dma_tx_phy, chan);
3104 
3105 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3106 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3107 				       tx_q->tx_tail_addr, chan);
3108 	}
3109 
3110 	return ret;
3111 }
3112 
3113 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3114 {
3115 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3116 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3117 	struct stmmac_channel *ch;
3118 	struct napi_struct *napi;
3119 
3120 	if (!tx_coal_timer)
3121 		return;
3122 
3123 	ch = &priv->channel[tx_q->queue_index];
3124 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3125 
3126 	/* Arm timer only if napi is not already scheduled.
3127 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3128 	 * again in the next scheduled napi.
3129 	 */
3130 	if (unlikely(!napi_is_scheduled(napi)))
3131 		hrtimer_start(&tx_q->txtimer,
3132 			      STMMAC_COAL_TIMER(tx_coal_timer),
3133 			      HRTIMER_MODE_REL);
3134 	else
3135 		hrtimer_try_to_cancel(&tx_q->txtimer);
3136 }
3137 
3138 /**
3139  * stmmac_tx_timer - mitigation sw timer for tx.
3140  * @t: data pointer
3141  * Description:
3142  * This is the timer handler to directly invoke the stmmac_tx_clean.
3143  */
3144 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3145 {
3146 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3147 	struct stmmac_priv *priv = tx_q->priv_data;
3148 	struct stmmac_channel *ch;
3149 	struct napi_struct *napi;
3150 
3151 	ch = &priv->channel[tx_q->queue_index];
3152 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3153 
3154 	if (likely(napi_schedule_prep(napi))) {
3155 		unsigned long flags;
3156 
3157 		spin_lock_irqsave(&ch->lock, flags);
3158 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3159 		spin_unlock_irqrestore(&ch->lock, flags);
3160 		__napi_schedule(napi);
3161 	}
3162 
3163 	return HRTIMER_NORESTART;
3164 }
3165 
3166 /**
3167  * stmmac_init_coalesce - init mitigation options.
3168  * @priv: driver private structure
3169  * Description:
3170  * This inits the coalesce parameters: i.e. timer rate,
3171  * timer handler and default threshold used for enabling the
3172  * interrupt on completion bit.
3173  */
3174 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3175 {
3176 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3177 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3178 	u32 chan;
3179 
3180 	for (chan = 0; chan < tx_channel_count; chan++) {
3181 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3182 
3183 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3184 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3185 
3186 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3187 	}
3188 
3189 	for (chan = 0; chan < rx_channel_count; chan++)
3190 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3191 }
3192 
3193 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3194 {
3195 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3196 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3197 	u32 chan;
3198 
3199 	/* set TX ring length */
3200 	for (chan = 0; chan < tx_channels_count; chan++)
3201 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3202 				       (priv->dma_conf.dma_tx_size - 1), chan);
3203 
3204 	/* set RX ring length */
3205 	for (chan = 0; chan < rx_channels_count; chan++)
3206 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3207 				       (priv->dma_conf.dma_rx_size - 1), chan);
3208 }
3209 
3210 /**
3211  *  stmmac_set_tx_queue_weight - Set TX queue weight
3212  *  @priv: driver private structure
3213  *  Description: It is used for setting TX queues weight
3214  */
3215 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3216 {
3217 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3218 	u32 weight;
3219 	u32 queue;
3220 
3221 	for (queue = 0; queue < tx_queues_count; queue++) {
3222 		weight = priv->plat->tx_queues_cfg[queue].weight;
3223 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3224 	}
3225 }
3226 
3227 /**
3228  *  stmmac_configure_cbs - Configure CBS in TX queue
3229  *  @priv: driver private structure
3230  *  Description: It is used for configuring CBS in AVB TX queues
3231  */
3232 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3233 {
3234 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3235 	u32 mode_to_use;
3236 	u32 queue;
3237 
3238 	/* queue 0 is reserved for legacy traffic */
3239 	for (queue = 1; queue < tx_queues_count; queue++) {
3240 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3241 		if (mode_to_use == MTL_QUEUE_DCB)
3242 			continue;
3243 
3244 		stmmac_config_cbs(priv, priv->hw,
3245 				priv->plat->tx_queues_cfg[queue].send_slope,
3246 				priv->plat->tx_queues_cfg[queue].idle_slope,
3247 				priv->plat->tx_queues_cfg[queue].high_credit,
3248 				priv->plat->tx_queues_cfg[queue].low_credit,
3249 				queue);
3250 	}
3251 }
3252 
3253 /**
3254  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3255  *  @priv: driver private structure
3256  *  Description: It is used for mapping RX queues to RX dma channels
3257  */
3258 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3259 {
3260 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3261 	u32 queue;
3262 	u32 chan;
3263 
3264 	for (queue = 0; queue < rx_queues_count; queue++) {
3265 		chan = priv->plat->rx_queues_cfg[queue].chan;
3266 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3267 	}
3268 }
3269 
3270 /**
3271  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3272  *  @priv: driver private structure
3273  *  Description: It is used for configuring the RX Queue Priority
3274  */
3275 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3276 {
3277 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3278 	u32 queue;
3279 	u32 prio;
3280 
3281 	for (queue = 0; queue < rx_queues_count; queue++) {
3282 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3283 			continue;
3284 
3285 		prio = priv->plat->rx_queues_cfg[queue].prio;
3286 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3287 	}
3288 }
3289 
3290 /**
3291  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3292  *  @priv: driver private structure
3293  *  Description: It is used for configuring the TX Queue Priority
3294  */
3295 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3296 {
3297 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3298 	u32 queue;
3299 	u32 prio;
3300 
3301 	for (queue = 0; queue < tx_queues_count; queue++) {
3302 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3303 			continue;
3304 
3305 		prio = priv->plat->tx_queues_cfg[queue].prio;
3306 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3307 	}
3308 }
3309 
3310 /**
3311  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3312  *  @priv: driver private structure
3313  *  Description: It is used for configuring the RX queue routing
3314  */
3315 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3316 {
3317 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3318 	u32 queue;
3319 	u8 packet;
3320 
3321 	for (queue = 0; queue < rx_queues_count; queue++) {
3322 		/* no specific packet type routing specified for the queue */
3323 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3324 			continue;
3325 
3326 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3327 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3328 	}
3329 }
3330 
3331 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3332 {
3333 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3334 		priv->rss.enable = false;
3335 		return;
3336 	}
3337 
3338 	if (priv->dev->features & NETIF_F_RXHASH)
3339 		priv->rss.enable = true;
3340 	else
3341 		priv->rss.enable = false;
3342 
3343 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3344 			     priv->plat->rx_queues_to_use);
3345 }
3346 
3347 /**
3348  *  stmmac_mtl_configuration - Configure MTL
3349  *  @priv: driver private structure
3350  *  Description: It is used for configurring MTL
3351  */
3352 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3353 {
3354 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3355 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3356 
3357 	if (tx_queues_count > 1)
3358 		stmmac_set_tx_queue_weight(priv);
3359 
3360 	/* Configure MTL RX algorithms */
3361 	if (rx_queues_count > 1)
3362 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3363 				priv->plat->rx_sched_algorithm);
3364 
3365 	/* Configure MTL TX algorithms */
3366 	if (tx_queues_count > 1)
3367 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3368 				priv->plat->tx_sched_algorithm);
3369 
3370 	/* Configure CBS in AVB TX queues */
3371 	if (tx_queues_count > 1)
3372 		stmmac_configure_cbs(priv);
3373 
3374 	/* Map RX MTL to DMA channels */
3375 	stmmac_rx_queue_dma_chan_map(priv);
3376 
3377 	/* Enable MAC RX Queues */
3378 	stmmac_mac_enable_rx_queues(priv);
3379 
3380 	/* Set RX priorities */
3381 	if (rx_queues_count > 1)
3382 		stmmac_mac_config_rx_queues_prio(priv);
3383 
3384 	/* Set TX priorities */
3385 	if (tx_queues_count > 1)
3386 		stmmac_mac_config_tx_queues_prio(priv);
3387 
3388 	/* Set RX routing */
3389 	if (rx_queues_count > 1)
3390 		stmmac_mac_config_rx_queues_routing(priv);
3391 
3392 	/* Receive Side Scaling */
3393 	if (rx_queues_count > 1)
3394 		stmmac_mac_config_rss(priv);
3395 }
3396 
3397 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3398 {
3399 	if (priv->dma_cap.asp) {
3400 		netdev_info(priv->dev, "Enabling Safety Features\n");
3401 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3402 					  priv->plat->safety_feat_cfg);
3403 	} else {
3404 		netdev_info(priv->dev, "No Safety Features support found\n");
3405 	}
3406 }
3407 
3408 /**
3409  * stmmac_hw_setup - setup mac in a usable state.
3410  *  @dev : pointer to the device structure.
3411  *  @ptp_register: register PTP if set
3412  *  Description:
3413  *  this is the main function to setup the HW in a usable state because the
3414  *  dma engine is reset, the core registers are configured (e.g. AXI,
3415  *  Checksum features, timers). The DMA is ready to start receiving and
3416  *  transmitting.
3417  *  Return value:
3418  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3419  *  file on failure.
3420  */
3421 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3422 {
3423 	struct stmmac_priv *priv = netdev_priv(dev);
3424 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3425 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3426 	bool sph_en;
3427 	u32 chan;
3428 	int ret;
3429 
3430 	/* Make sure RX clock is enabled */
3431 	if (priv->hw->phylink_pcs)
3432 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3433 
3434 	/* Note that clk_rx_i must be running for reset to complete. This
3435 	 * clock may also be required when setting the MAC address.
3436 	 *
3437 	 * Block the receive clock stop for LPI mode at the PHY in case
3438 	 * the link is established with EEE mode active.
3439 	 */
3440 	phylink_rx_clk_stop_block(priv->phylink);
3441 
3442 	/* DMA initialization and SW reset */
3443 	ret = stmmac_init_dma_engine(priv);
3444 	if (ret < 0) {
3445 		phylink_rx_clk_stop_unblock(priv->phylink);
3446 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3447 			   __func__);
3448 		return ret;
3449 	}
3450 
3451 	/* Copy the MAC addr into the HW  */
3452 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3453 	phylink_rx_clk_stop_unblock(priv->phylink);
3454 
3455 	/* PS and related bits will be programmed according to the speed */
3456 	if (priv->hw->pcs) {
3457 		int speed = priv->plat->mac_port_sel_speed;
3458 
3459 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3460 		    (speed == SPEED_1000)) {
3461 			priv->hw->ps = speed;
3462 		} else {
3463 			dev_warn(priv->device, "invalid port speed\n");
3464 			priv->hw->ps = 0;
3465 		}
3466 	}
3467 
3468 	/* Initialize the MAC Core */
3469 	stmmac_core_init(priv, priv->hw, dev);
3470 
3471 	/* Initialize MTL*/
3472 	stmmac_mtl_configuration(priv);
3473 
3474 	/* Initialize Safety Features */
3475 	stmmac_safety_feat_configuration(priv);
3476 
3477 	ret = stmmac_rx_ipc(priv, priv->hw);
3478 	if (!ret) {
3479 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3480 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3481 		priv->hw->rx_csum = 0;
3482 	}
3483 
3484 	/* Enable the MAC Rx/Tx */
3485 	stmmac_mac_set(priv, priv->ioaddr, true);
3486 
3487 	/* Set the HW DMA mode and the COE */
3488 	stmmac_dma_operation_mode(priv);
3489 
3490 	stmmac_mmc_setup(priv);
3491 
3492 	if (ptp_register) {
3493 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3494 		if (ret < 0)
3495 			netdev_warn(priv->dev,
3496 				    "failed to enable PTP reference clock: %pe\n",
3497 				    ERR_PTR(ret));
3498 	}
3499 
3500 	ret = stmmac_init_ptp(priv);
3501 	if (ret == -EOPNOTSUPP)
3502 		netdev_info(priv->dev, "PTP not supported by HW\n");
3503 	else if (ret)
3504 		netdev_warn(priv->dev, "PTP init failed\n");
3505 	else if (ptp_register)
3506 		stmmac_ptp_register(priv);
3507 
3508 	if (priv->use_riwt) {
3509 		u32 queue;
3510 
3511 		for (queue = 0; queue < rx_cnt; queue++) {
3512 			if (!priv->rx_riwt[queue])
3513 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3514 
3515 			stmmac_rx_watchdog(priv, priv->ioaddr,
3516 					   priv->rx_riwt[queue], queue);
3517 		}
3518 	}
3519 
3520 	if (priv->hw->pcs)
3521 		stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
3522 
3523 	/* set TX and RX rings length */
3524 	stmmac_set_rings_length(priv);
3525 
3526 	/* Enable TSO */
3527 	if (priv->tso) {
3528 		for (chan = 0; chan < tx_cnt; chan++) {
3529 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3530 
3531 			/* TSO and TBS cannot co-exist */
3532 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3533 				continue;
3534 
3535 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3536 		}
3537 	}
3538 
3539 	/* Enable Split Header */
3540 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3541 	for (chan = 0; chan < rx_cnt; chan++)
3542 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3543 
3544 
3545 	/* VLAN Tag Insertion */
3546 	if (priv->dma_cap.vlins)
3547 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3548 
3549 	/* TBS */
3550 	for (chan = 0; chan < tx_cnt; chan++) {
3551 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3552 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3553 
3554 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3555 	}
3556 
3557 	/* Configure real RX and TX queues */
3558 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3559 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3560 
3561 	/* Start the ball rolling... */
3562 	stmmac_start_all_dma(priv);
3563 
3564 	phylink_rx_clk_stop_block(priv->phylink);
3565 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3566 	phylink_rx_clk_stop_unblock(priv->phylink);
3567 
3568 	return 0;
3569 }
3570 
3571 static void stmmac_hw_teardown(struct net_device *dev)
3572 {
3573 	struct stmmac_priv *priv = netdev_priv(dev);
3574 
3575 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3576 }
3577 
3578 static void stmmac_free_irq(struct net_device *dev,
3579 			    enum request_irq_err irq_err, int irq_idx)
3580 {
3581 	struct stmmac_priv *priv = netdev_priv(dev);
3582 	int j;
3583 
3584 	switch (irq_err) {
3585 	case REQ_IRQ_ERR_ALL:
3586 		irq_idx = priv->plat->tx_queues_to_use;
3587 		fallthrough;
3588 	case REQ_IRQ_ERR_TX:
3589 		for (j = irq_idx - 1; j >= 0; j--) {
3590 			if (priv->tx_irq[j] > 0) {
3591 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3592 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3593 			}
3594 		}
3595 		irq_idx = priv->plat->rx_queues_to_use;
3596 		fallthrough;
3597 	case REQ_IRQ_ERR_RX:
3598 		for (j = irq_idx - 1; j >= 0; j--) {
3599 			if (priv->rx_irq[j] > 0) {
3600 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3601 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3602 			}
3603 		}
3604 
3605 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3606 			free_irq(priv->sfty_ue_irq, dev);
3607 		fallthrough;
3608 	case REQ_IRQ_ERR_SFTY_UE:
3609 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3610 			free_irq(priv->sfty_ce_irq, dev);
3611 		fallthrough;
3612 	case REQ_IRQ_ERR_SFTY_CE:
3613 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3614 			free_irq(priv->lpi_irq, dev);
3615 		fallthrough;
3616 	case REQ_IRQ_ERR_LPI:
3617 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3618 			free_irq(priv->wol_irq, dev);
3619 		fallthrough;
3620 	case REQ_IRQ_ERR_SFTY:
3621 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3622 			free_irq(priv->sfty_irq, dev);
3623 		fallthrough;
3624 	case REQ_IRQ_ERR_WOL:
3625 		free_irq(dev->irq, dev);
3626 		fallthrough;
3627 	case REQ_IRQ_ERR_MAC:
3628 	case REQ_IRQ_ERR_NO:
3629 		/* If MAC IRQ request error, no more IRQ to free */
3630 		break;
3631 	}
3632 }
3633 
3634 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3635 {
3636 	struct stmmac_priv *priv = netdev_priv(dev);
3637 	enum request_irq_err irq_err;
3638 	int irq_idx = 0;
3639 	char *int_name;
3640 	int ret;
3641 	int i;
3642 
3643 	/* For common interrupt */
3644 	int_name = priv->int_name_mac;
3645 	sprintf(int_name, "%s:%s", dev->name, "mac");
3646 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3647 			  0, int_name, dev);
3648 	if (unlikely(ret < 0)) {
3649 		netdev_err(priv->dev,
3650 			   "%s: alloc mac MSI %d (error: %d)\n",
3651 			   __func__, dev->irq, ret);
3652 		irq_err = REQ_IRQ_ERR_MAC;
3653 		goto irq_error;
3654 	}
3655 
3656 	/* Request the Wake IRQ in case of another line
3657 	 * is used for WoL
3658 	 */
3659 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3660 		int_name = priv->int_name_wol;
3661 		sprintf(int_name, "%s:%s", dev->name, "wol");
3662 		ret = request_irq(priv->wol_irq,
3663 				  stmmac_mac_interrupt,
3664 				  0, int_name, dev);
3665 		if (unlikely(ret < 0)) {
3666 			netdev_err(priv->dev,
3667 				   "%s: alloc wol MSI %d (error: %d)\n",
3668 				   __func__, priv->wol_irq, ret);
3669 			irq_err = REQ_IRQ_ERR_WOL;
3670 			goto irq_error;
3671 		}
3672 	}
3673 
3674 	/* Request the LPI IRQ in case of another line
3675 	 * is used for LPI
3676 	 */
3677 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3678 		int_name = priv->int_name_lpi;
3679 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3680 		ret = request_irq(priv->lpi_irq,
3681 				  stmmac_mac_interrupt,
3682 				  0, int_name, dev);
3683 		if (unlikely(ret < 0)) {
3684 			netdev_err(priv->dev,
3685 				   "%s: alloc lpi MSI %d (error: %d)\n",
3686 				   __func__, priv->lpi_irq, ret);
3687 			irq_err = REQ_IRQ_ERR_LPI;
3688 			goto irq_error;
3689 		}
3690 	}
3691 
3692 	/* Request the common Safety Feature Correctible/Uncorrectible
3693 	 * Error line in case of another line is used
3694 	 */
3695 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3696 		int_name = priv->int_name_sfty;
3697 		sprintf(int_name, "%s:%s", dev->name, "safety");
3698 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3699 				  0, int_name, dev);
3700 		if (unlikely(ret < 0)) {
3701 			netdev_err(priv->dev,
3702 				   "%s: alloc sfty MSI %d (error: %d)\n",
3703 				   __func__, priv->sfty_irq, ret);
3704 			irq_err = REQ_IRQ_ERR_SFTY;
3705 			goto irq_error;
3706 		}
3707 	}
3708 
3709 	/* Request the Safety Feature Correctible Error line in
3710 	 * case of another line is used
3711 	 */
3712 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3713 		int_name = priv->int_name_sfty_ce;
3714 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3715 		ret = request_irq(priv->sfty_ce_irq,
3716 				  stmmac_safety_interrupt,
3717 				  0, int_name, dev);
3718 		if (unlikely(ret < 0)) {
3719 			netdev_err(priv->dev,
3720 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3721 				   __func__, priv->sfty_ce_irq, ret);
3722 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3723 			goto irq_error;
3724 		}
3725 	}
3726 
3727 	/* Request the Safety Feature Uncorrectible Error line in
3728 	 * case of another line is used
3729 	 */
3730 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3731 		int_name = priv->int_name_sfty_ue;
3732 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3733 		ret = request_irq(priv->sfty_ue_irq,
3734 				  stmmac_safety_interrupt,
3735 				  0, int_name, dev);
3736 		if (unlikely(ret < 0)) {
3737 			netdev_err(priv->dev,
3738 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3739 				   __func__, priv->sfty_ue_irq, ret);
3740 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3741 			goto irq_error;
3742 		}
3743 	}
3744 
3745 	/* Request Rx MSI irq */
3746 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3747 		if (i >= MTL_MAX_RX_QUEUES)
3748 			break;
3749 		if (priv->rx_irq[i] == 0)
3750 			continue;
3751 
3752 		int_name = priv->int_name_rx_irq[i];
3753 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3754 		ret = request_irq(priv->rx_irq[i],
3755 				  stmmac_msi_intr_rx,
3756 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3757 		if (unlikely(ret < 0)) {
3758 			netdev_err(priv->dev,
3759 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3760 				   __func__, i, priv->rx_irq[i], ret);
3761 			irq_err = REQ_IRQ_ERR_RX;
3762 			irq_idx = i;
3763 			goto irq_error;
3764 		}
3765 		irq_set_affinity_hint(priv->rx_irq[i],
3766 				      cpumask_of(i % num_online_cpus()));
3767 	}
3768 
3769 	/* Request Tx MSI irq */
3770 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3771 		if (i >= MTL_MAX_TX_QUEUES)
3772 			break;
3773 		if (priv->tx_irq[i] == 0)
3774 			continue;
3775 
3776 		int_name = priv->int_name_tx_irq[i];
3777 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3778 		ret = request_irq(priv->tx_irq[i],
3779 				  stmmac_msi_intr_tx,
3780 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3781 		if (unlikely(ret < 0)) {
3782 			netdev_err(priv->dev,
3783 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3784 				   __func__, i, priv->tx_irq[i], ret);
3785 			irq_err = REQ_IRQ_ERR_TX;
3786 			irq_idx = i;
3787 			goto irq_error;
3788 		}
3789 		irq_set_affinity_hint(priv->tx_irq[i],
3790 				      cpumask_of(i % num_online_cpus()));
3791 	}
3792 
3793 	return 0;
3794 
3795 irq_error:
3796 	stmmac_free_irq(dev, irq_err, irq_idx);
3797 	return ret;
3798 }
3799 
3800 static int stmmac_request_irq_single(struct net_device *dev)
3801 {
3802 	struct stmmac_priv *priv = netdev_priv(dev);
3803 	enum request_irq_err irq_err;
3804 	int ret;
3805 
3806 	ret = request_irq(dev->irq, stmmac_interrupt,
3807 			  IRQF_SHARED, dev->name, dev);
3808 	if (unlikely(ret < 0)) {
3809 		netdev_err(priv->dev,
3810 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3811 			   __func__, dev->irq, ret);
3812 		irq_err = REQ_IRQ_ERR_MAC;
3813 		goto irq_error;
3814 	}
3815 
3816 	/* Request the Wake IRQ in case of another line
3817 	 * is used for WoL
3818 	 */
3819 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3820 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3821 				  IRQF_SHARED, dev->name, dev);
3822 		if (unlikely(ret < 0)) {
3823 			netdev_err(priv->dev,
3824 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3825 				   __func__, priv->wol_irq, ret);
3826 			irq_err = REQ_IRQ_ERR_WOL;
3827 			goto irq_error;
3828 		}
3829 	}
3830 
3831 	/* Request the IRQ lines */
3832 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3833 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3834 				  IRQF_SHARED, dev->name, dev);
3835 		if (unlikely(ret < 0)) {
3836 			netdev_err(priv->dev,
3837 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3838 				   __func__, priv->lpi_irq, ret);
3839 			irq_err = REQ_IRQ_ERR_LPI;
3840 			goto irq_error;
3841 		}
3842 	}
3843 
3844 	/* Request the common Safety Feature Correctible/Uncorrectible
3845 	 * Error line in case of another line is used
3846 	 */
3847 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3848 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3849 				  IRQF_SHARED, dev->name, dev);
3850 		if (unlikely(ret < 0)) {
3851 			netdev_err(priv->dev,
3852 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3853 				   __func__, priv->sfty_irq, ret);
3854 			irq_err = REQ_IRQ_ERR_SFTY;
3855 			goto irq_error;
3856 		}
3857 	}
3858 
3859 	return 0;
3860 
3861 irq_error:
3862 	stmmac_free_irq(dev, irq_err, 0);
3863 	return ret;
3864 }
3865 
3866 static int stmmac_request_irq(struct net_device *dev)
3867 {
3868 	struct stmmac_priv *priv = netdev_priv(dev);
3869 	int ret;
3870 
3871 	/* Request the IRQ lines */
3872 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3873 		ret = stmmac_request_irq_multi_msi(dev);
3874 	else
3875 		ret = stmmac_request_irq_single(dev);
3876 
3877 	return ret;
3878 }
3879 
3880 /**
3881  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3882  *  @priv: driver private structure
3883  *  @mtu: MTU to setup the dma queue and buf with
3884  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3885  *  Allocate the Tx/Rx DMA queue and init them.
3886  *  Return value:
3887  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3888  */
3889 static struct stmmac_dma_conf *
3890 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3891 {
3892 	struct stmmac_dma_conf *dma_conf;
3893 	int chan, bfsize, ret;
3894 
3895 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3896 	if (!dma_conf) {
3897 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3898 			   __func__);
3899 		return ERR_PTR(-ENOMEM);
3900 	}
3901 
3902 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3903 	if (bfsize < 0)
3904 		bfsize = 0;
3905 
3906 	if (bfsize < BUF_SIZE_16KiB)
3907 		bfsize = stmmac_set_bfsize(mtu, 0);
3908 
3909 	dma_conf->dma_buf_sz = bfsize;
3910 	/* Chose the tx/rx size from the already defined one in the
3911 	 * priv struct. (if defined)
3912 	 */
3913 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3914 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3915 
3916 	if (!dma_conf->dma_tx_size)
3917 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3918 	if (!dma_conf->dma_rx_size)
3919 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3920 
3921 	/* Earlier check for TBS */
3922 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3923 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3924 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3925 
3926 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3927 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3928 	}
3929 
3930 	ret = alloc_dma_desc_resources(priv, dma_conf);
3931 	if (ret < 0) {
3932 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3933 			   __func__);
3934 		goto alloc_error;
3935 	}
3936 
3937 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3938 	if (ret < 0) {
3939 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3940 			   __func__);
3941 		goto init_error;
3942 	}
3943 
3944 	return dma_conf;
3945 
3946 init_error:
3947 	free_dma_desc_resources(priv, dma_conf);
3948 alloc_error:
3949 	kfree(dma_conf);
3950 	return ERR_PTR(ret);
3951 }
3952 
3953 /**
3954  *  __stmmac_open - open entry point of the driver
3955  *  @dev : pointer to the device structure.
3956  *  @dma_conf :  structure to take the dma data
3957  *  Description:
3958  *  This function is the open entry point of the driver.
3959  *  Return value:
3960  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3961  *  file on failure.
3962  */
3963 static int __stmmac_open(struct net_device *dev,
3964 			 struct stmmac_dma_conf *dma_conf)
3965 {
3966 	struct stmmac_priv *priv = netdev_priv(dev);
3967 	int mode = priv->plat->phy_interface;
3968 	u32 chan;
3969 	int ret;
3970 
3971 	/* Initialise the tx lpi timer, converting from msec to usec */
3972 	if (!priv->tx_lpi_timer)
3973 		priv->tx_lpi_timer = eee_timer * 1000;
3974 
3975 	ret = pm_runtime_resume_and_get(priv->device);
3976 	if (ret < 0)
3977 		return ret;
3978 
3979 	if ((!priv->hw->xpcs ||
3980 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3981 		ret = stmmac_init_phy(dev);
3982 		if (ret) {
3983 			netdev_err(priv->dev,
3984 				   "%s: Cannot attach to PHY (error: %d)\n",
3985 				   __func__, ret);
3986 			goto init_phy_error;
3987 		}
3988 	}
3989 
3990 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3991 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3992 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3993 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3994 
3995 	stmmac_reset_queues_param(priv);
3996 
3997 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3998 	    priv->plat->serdes_powerup) {
3999 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4000 		if (ret < 0) {
4001 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4002 				   __func__);
4003 			goto init_error;
4004 		}
4005 	}
4006 
4007 	ret = stmmac_hw_setup(dev, true);
4008 	if (ret < 0) {
4009 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4010 		goto init_error;
4011 	}
4012 
4013 	stmmac_init_coalesce(priv);
4014 
4015 	phylink_start(priv->phylink);
4016 	/* We may have called phylink_speed_down before */
4017 	phylink_speed_up(priv->phylink);
4018 
4019 	ret = stmmac_request_irq(dev);
4020 	if (ret)
4021 		goto irq_error;
4022 
4023 	stmmac_enable_all_queues(priv);
4024 	netif_tx_start_all_queues(priv->dev);
4025 	stmmac_enable_all_dma_irq(priv);
4026 
4027 	return 0;
4028 
4029 irq_error:
4030 	phylink_stop(priv->phylink);
4031 
4032 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4033 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4034 
4035 	stmmac_hw_teardown(dev);
4036 init_error:
4037 	phylink_disconnect_phy(priv->phylink);
4038 init_phy_error:
4039 	pm_runtime_put(priv->device);
4040 	return ret;
4041 }
4042 
4043 static int stmmac_open(struct net_device *dev)
4044 {
4045 	struct stmmac_priv *priv = netdev_priv(dev);
4046 	struct stmmac_dma_conf *dma_conf;
4047 	int ret;
4048 
4049 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4050 	if (IS_ERR(dma_conf))
4051 		return PTR_ERR(dma_conf);
4052 
4053 	ret = __stmmac_open(dev, dma_conf);
4054 	if (ret)
4055 		free_dma_desc_resources(priv, dma_conf);
4056 
4057 	kfree(dma_conf);
4058 	return ret;
4059 }
4060 
4061 /**
4062  *  stmmac_release - close entry point of the driver
4063  *  @dev : device pointer.
4064  *  Description:
4065  *  This is the stop entry point of the driver.
4066  */
4067 static int stmmac_release(struct net_device *dev)
4068 {
4069 	struct stmmac_priv *priv = netdev_priv(dev);
4070 	u32 chan;
4071 
4072 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4073 	 * suspended when phylink_stop() is called below. Set the PHY
4074 	 * to its slowest speed to save power.
4075 	 */
4076 	if (device_may_wakeup(priv->device))
4077 		phylink_speed_down(priv->phylink, false);
4078 
4079 	/* Stop and disconnect the PHY */
4080 	phylink_stop(priv->phylink);
4081 	phylink_disconnect_phy(priv->phylink);
4082 
4083 	stmmac_disable_all_queues(priv);
4084 
4085 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4086 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4087 
4088 	netif_tx_disable(dev);
4089 
4090 	/* Free the IRQ lines */
4091 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4092 
4093 	/* Stop TX/RX DMA and clear the descriptors */
4094 	stmmac_stop_all_dma(priv);
4095 
4096 	/* Release and free the Rx/Tx resources */
4097 	free_dma_desc_resources(priv, &priv->dma_conf);
4098 
4099 	/* Powerdown Serdes if there is */
4100 	if (priv->plat->serdes_powerdown)
4101 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4102 
4103 	stmmac_release_ptp(priv);
4104 
4105 	if (stmmac_fpe_supported(priv))
4106 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4107 
4108 	pm_runtime_put(priv->device);
4109 
4110 	return 0;
4111 }
4112 
4113 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4114 			       struct stmmac_tx_queue *tx_q)
4115 {
4116 	u16 tag = 0x0, inner_tag = 0x0;
4117 	u32 inner_type = 0x0;
4118 	struct dma_desc *p;
4119 
4120 	if (!priv->dma_cap.vlins)
4121 		return false;
4122 	if (!skb_vlan_tag_present(skb))
4123 		return false;
4124 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4125 		inner_tag = skb_vlan_tag_get(skb);
4126 		inner_type = STMMAC_VLAN_INSERT;
4127 	}
4128 
4129 	tag = skb_vlan_tag_get(skb);
4130 
4131 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4132 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4133 	else
4134 		p = &tx_q->dma_tx[tx_q->cur_tx];
4135 
4136 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4137 		return false;
4138 
4139 	stmmac_set_tx_owner(priv, p);
4140 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4141 	return true;
4142 }
4143 
4144 /**
4145  *  stmmac_tso_allocator - close entry point of the driver
4146  *  @priv: driver private structure
4147  *  @des: buffer start address
4148  *  @total_len: total length to fill in descriptors
4149  *  @last_segment: condition for the last descriptor
4150  *  @queue: TX queue index
4151  *  Description:
4152  *  This function fills descriptor and request new descriptors according to
4153  *  buffer length to fill
4154  */
4155 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4156 				 int total_len, bool last_segment, u32 queue)
4157 {
4158 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4159 	struct dma_desc *desc;
4160 	u32 buff_size;
4161 	int tmp_len;
4162 
4163 	tmp_len = total_len;
4164 
4165 	while (tmp_len > 0) {
4166 		dma_addr_t curr_addr;
4167 
4168 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4169 						priv->dma_conf.dma_tx_size);
4170 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4171 
4172 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4173 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4174 		else
4175 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4176 
4177 		curr_addr = des + (total_len - tmp_len);
4178 		stmmac_set_desc_addr(priv, desc, curr_addr);
4179 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4180 			    TSO_MAX_BUFF_SIZE : tmp_len;
4181 
4182 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4183 				0, 1,
4184 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4185 				0, 0);
4186 
4187 		tmp_len -= TSO_MAX_BUFF_SIZE;
4188 	}
4189 }
4190 
4191 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4192 {
4193 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4194 	int desc_size;
4195 
4196 	if (likely(priv->extend_desc))
4197 		desc_size = sizeof(struct dma_extended_desc);
4198 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4199 		desc_size = sizeof(struct dma_edesc);
4200 	else
4201 		desc_size = sizeof(struct dma_desc);
4202 
4203 	/* The own bit must be the latest setting done when prepare the
4204 	 * descriptor and then barrier is needed to make sure that
4205 	 * all is coherent before granting the DMA engine.
4206 	 */
4207 	wmb();
4208 
4209 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4210 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4211 }
4212 
4213 /**
4214  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4215  *  @skb : the socket buffer
4216  *  @dev : device pointer
4217  *  Description: this is the transmit function that is called on TSO frames
4218  *  (support available on GMAC4 and newer chips).
4219  *  Diagram below show the ring programming in case of TSO frames:
4220  *
4221  *  First Descriptor
4222  *   --------
4223  *   | DES0 |---> buffer1 = L2/L3/L4 header
4224  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4225  *   |      |     width is 32-bit, but we never use it.
4226  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4227  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4228  *   |      |     or 48-bit, and we always use it.
4229  *   | DES2 |---> buffer1 len
4230  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4231  *   --------
4232  *   --------
4233  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4234  *   | DES1 |---> same as the First Descriptor
4235  *   | DES2 |---> buffer1 len
4236  *   | DES3 |
4237  *   --------
4238  *	|
4239  *     ...
4240  *	|
4241  *   --------
4242  *   | DES0 |---> buffer1 = Split TCP Payload
4243  *   | DES1 |---> same as the First Descriptor
4244  *   | DES2 |---> buffer1 len
4245  *   | DES3 |
4246  *   --------
4247  *
4248  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4249  */
4250 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4251 {
4252 	struct dma_desc *desc, *first, *mss_desc = NULL;
4253 	struct stmmac_priv *priv = netdev_priv(dev);
4254 	unsigned int first_entry, tx_packets;
4255 	struct stmmac_txq_stats *txq_stats;
4256 	struct stmmac_tx_queue *tx_q;
4257 	u32 pay_len, mss, queue;
4258 	int i, first_tx, nfrags;
4259 	u8 proto_hdr_len, hdr;
4260 	dma_addr_t des;
4261 	bool set_ic;
4262 
4263 	/* Always insert VLAN tag to SKB payload for TSO frames.
4264 	 *
4265 	 * Never insert VLAN tag by HW, since segments splited by
4266 	 * TSO engine will be un-tagged by mistake.
4267 	 */
4268 	if (skb_vlan_tag_present(skb)) {
4269 		skb = __vlan_hwaccel_push_inside(skb);
4270 		if (unlikely(!skb)) {
4271 			priv->xstats.tx_dropped++;
4272 			return NETDEV_TX_OK;
4273 		}
4274 	}
4275 
4276 	nfrags = skb_shinfo(skb)->nr_frags;
4277 	queue = skb_get_queue_mapping(skb);
4278 
4279 	tx_q = &priv->dma_conf.tx_queue[queue];
4280 	txq_stats = &priv->xstats.txq_stats[queue];
4281 	first_tx = tx_q->cur_tx;
4282 
4283 	/* Compute header lengths */
4284 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4285 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4286 		hdr = sizeof(struct udphdr);
4287 	} else {
4288 		proto_hdr_len = skb_tcp_all_headers(skb);
4289 		hdr = tcp_hdrlen(skb);
4290 	}
4291 
4292 	/* Desc availability based on threshold should be enough safe */
4293 	if (unlikely(stmmac_tx_avail(priv, queue) <
4294 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4295 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4296 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4297 								queue));
4298 			/* This is a hard error, log it. */
4299 			netdev_err(priv->dev,
4300 				   "%s: Tx Ring full when queue awake\n",
4301 				   __func__);
4302 		}
4303 		return NETDEV_TX_BUSY;
4304 	}
4305 
4306 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4307 
4308 	mss = skb_shinfo(skb)->gso_size;
4309 
4310 	/* set new MSS value if needed */
4311 	if (mss != tx_q->mss) {
4312 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4313 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4314 		else
4315 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4316 
4317 		stmmac_set_mss(priv, mss_desc, mss);
4318 		tx_q->mss = mss;
4319 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4320 						priv->dma_conf.dma_tx_size);
4321 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4322 	}
4323 
4324 	if (netif_msg_tx_queued(priv)) {
4325 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4326 			__func__, hdr, proto_hdr_len, pay_len, mss);
4327 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4328 			skb->data_len);
4329 	}
4330 
4331 	first_entry = tx_q->cur_tx;
4332 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4333 
4334 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4335 		desc = &tx_q->dma_entx[first_entry].basic;
4336 	else
4337 		desc = &tx_q->dma_tx[first_entry];
4338 	first = desc;
4339 
4340 	/* first descriptor: fill Headers on Buf1 */
4341 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4342 			     DMA_TO_DEVICE);
4343 	if (dma_mapping_error(priv->device, des))
4344 		goto dma_map_err;
4345 
4346 	stmmac_set_desc_addr(priv, first, des);
4347 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4348 			     (nfrags == 0), queue);
4349 
4350 	/* In case two or more DMA transmit descriptors are allocated for this
4351 	 * non-paged SKB data, the DMA buffer address should be saved to
4352 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4353 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4354 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4355 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4356 	 * sooner or later.
4357 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4358 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4359 	 * this DMA buffer right after the DMA engine completely finishes the
4360 	 * full buffer transmission.
4361 	 */
4362 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4363 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4364 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4365 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4366 
4367 	/* Prepare fragments */
4368 	for (i = 0; i < nfrags; i++) {
4369 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4370 
4371 		des = skb_frag_dma_map(priv->device, frag, 0,
4372 				       skb_frag_size(frag),
4373 				       DMA_TO_DEVICE);
4374 		if (dma_mapping_error(priv->device, des))
4375 			goto dma_map_err;
4376 
4377 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4378 				     (i == nfrags - 1), queue);
4379 
4380 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4381 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4382 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4383 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4384 	}
4385 
4386 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4387 
4388 	/* Only the last descriptor gets to point to the skb. */
4389 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4390 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4391 
4392 	/* Manage tx mitigation */
4393 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4394 	tx_q->tx_count_frames += tx_packets;
4395 
4396 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4397 		set_ic = true;
4398 	else if (!priv->tx_coal_frames[queue])
4399 		set_ic = false;
4400 	else if (tx_packets > priv->tx_coal_frames[queue])
4401 		set_ic = true;
4402 	else if ((tx_q->tx_count_frames %
4403 		  priv->tx_coal_frames[queue]) < tx_packets)
4404 		set_ic = true;
4405 	else
4406 		set_ic = false;
4407 
4408 	if (set_ic) {
4409 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4410 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4411 		else
4412 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4413 
4414 		tx_q->tx_count_frames = 0;
4415 		stmmac_set_tx_ic(priv, desc);
4416 	}
4417 
4418 	/* We've used all descriptors we need for this skb, however,
4419 	 * advance cur_tx so that it references a fresh descriptor.
4420 	 * ndo_start_xmit will fill this descriptor the next time it's
4421 	 * called and stmmac_tx_clean may clean up to this descriptor.
4422 	 */
4423 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4424 
4425 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4426 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4427 			  __func__);
4428 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4429 	}
4430 
4431 	u64_stats_update_begin(&txq_stats->q_syncp);
4432 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4433 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4434 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4435 	if (set_ic)
4436 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4437 	u64_stats_update_end(&txq_stats->q_syncp);
4438 
4439 	if (priv->sarc_type)
4440 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4441 
4442 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4443 		     priv->hwts_tx_en)) {
4444 		/* declare that device is doing timestamping */
4445 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4446 		stmmac_enable_tx_timestamp(priv, first);
4447 	}
4448 
4449 	/* Complete the first descriptor before granting the DMA */
4450 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4451 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4452 				   hdr / 4, (skb->len - proto_hdr_len));
4453 
4454 	/* If context desc is used to change MSS */
4455 	if (mss_desc) {
4456 		/* Make sure that first descriptor has been completely
4457 		 * written, including its own bit. This is because MSS is
4458 		 * actually before first descriptor, so we need to make
4459 		 * sure that MSS's own bit is the last thing written.
4460 		 */
4461 		dma_wmb();
4462 		stmmac_set_tx_owner(priv, mss_desc);
4463 	}
4464 
4465 	if (netif_msg_pktdata(priv)) {
4466 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4467 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4468 			tx_q->cur_tx, first, nfrags);
4469 		pr_info(">>> frame to be transmitted: ");
4470 		print_pkt(skb->data, skb_headlen(skb));
4471 	}
4472 
4473 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4474 	skb_tx_timestamp(skb);
4475 
4476 	stmmac_flush_tx_descriptors(priv, queue);
4477 	stmmac_tx_timer_arm(priv, queue);
4478 
4479 	return NETDEV_TX_OK;
4480 
4481 dma_map_err:
4482 	dev_err(priv->device, "Tx dma map failed\n");
4483 	dev_kfree_skb(skb);
4484 	priv->xstats.tx_dropped++;
4485 	return NETDEV_TX_OK;
4486 }
4487 
4488 /**
4489  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4490  * @skb: socket buffer to check
4491  *
4492  * Check if a packet has an ethertype that will trigger the IP header checks
4493  * and IP/TCP checksum engine of the stmmac core.
4494  *
4495  * Return: true if the ethertype can trigger the checksum engine, false
4496  * otherwise
4497  */
4498 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4499 {
4500 	int depth = 0;
4501 	__be16 proto;
4502 
4503 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4504 				    &depth);
4505 
4506 	return (depth <= ETH_HLEN) &&
4507 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4508 }
4509 
4510 /**
4511  *  stmmac_xmit - Tx entry point of the driver
4512  *  @skb : the socket buffer
4513  *  @dev : device pointer
4514  *  Description : this is the tx entry point of the driver.
4515  *  It programs the chain or the ring and supports oversized frames
4516  *  and SG feature.
4517  */
4518 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4519 {
4520 	unsigned int first_entry, tx_packets, enh_desc;
4521 	struct stmmac_priv *priv = netdev_priv(dev);
4522 	unsigned int nopaged_len = skb_headlen(skb);
4523 	int i, csum_insertion = 0, is_jumbo = 0;
4524 	u32 queue = skb_get_queue_mapping(skb);
4525 	int nfrags = skb_shinfo(skb)->nr_frags;
4526 	int gso = skb_shinfo(skb)->gso_type;
4527 	struct stmmac_txq_stats *txq_stats;
4528 	struct dma_edesc *tbs_desc = NULL;
4529 	struct dma_desc *desc, *first;
4530 	struct stmmac_tx_queue *tx_q;
4531 	bool has_vlan, set_ic;
4532 	int entry, first_tx;
4533 	dma_addr_t des;
4534 
4535 	tx_q = &priv->dma_conf.tx_queue[queue];
4536 	txq_stats = &priv->xstats.txq_stats[queue];
4537 	first_tx = tx_q->cur_tx;
4538 
4539 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4540 		stmmac_stop_sw_lpi(priv);
4541 
4542 	/* Manage oversized TCP frames for GMAC4 device */
4543 	if (skb_is_gso(skb) && priv->tso) {
4544 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4545 			return stmmac_tso_xmit(skb, dev);
4546 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4547 			return stmmac_tso_xmit(skb, dev);
4548 	}
4549 
4550 	if (priv->est && priv->est->enable &&
4551 	    priv->est->max_sdu[queue] &&
4552 	    skb->len > priv->est->max_sdu[queue]){
4553 		priv->xstats.max_sdu_txq_drop[queue]++;
4554 		goto max_sdu_err;
4555 	}
4556 
4557 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4558 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4559 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4560 								queue));
4561 			/* This is a hard error, log it. */
4562 			netdev_err(priv->dev,
4563 				   "%s: Tx Ring full when queue awake\n",
4564 				   __func__);
4565 		}
4566 		return NETDEV_TX_BUSY;
4567 	}
4568 
4569 	/* Check if VLAN can be inserted by HW */
4570 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4571 
4572 	entry = tx_q->cur_tx;
4573 	first_entry = entry;
4574 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4575 
4576 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4577 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4578 	 * queues. In that case, checksum offloading for those queues that don't
4579 	 * support tx coe needs to fallback to software checksum calculation.
4580 	 *
4581 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4582 	 * also have to be checksummed in software.
4583 	 */
4584 	if (csum_insertion &&
4585 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4586 	     !stmmac_has_ip_ethertype(skb))) {
4587 		if (unlikely(skb_checksum_help(skb)))
4588 			goto dma_map_err;
4589 		csum_insertion = !csum_insertion;
4590 	}
4591 
4592 	if (likely(priv->extend_desc))
4593 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4594 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4595 		desc = &tx_q->dma_entx[entry].basic;
4596 	else
4597 		desc = tx_q->dma_tx + entry;
4598 
4599 	first = desc;
4600 
4601 	if (has_vlan)
4602 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4603 
4604 	enh_desc = priv->plat->enh_desc;
4605 	/* To program the descriptors according to the size of the frame */
4606 	if (enh_desc)
4607 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4608 
4609 	if (unlikely(is_jumbo)) {
4610 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4611 		if (unlikely(entry < 0) && (entry != -EINVAL))
4612 			goto dma_map_err;
4613 	}
4614 
4615 	for (i = 0; i < nfrags; i++) {
4616 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4617 		int len = skb_frag_size(frag);
4618 		bool last_segment = (i == (nfrags - 1));
4619 
4620 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4621 		WARN_ON(tx_q->tx_skbuff[entry]);
4622 
4623 		if (likely(priv->extend_desc))
4624 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4625 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4626 			desc = &tx_q->dma_entx[entry].basic;
4627 		else
4628 			desc = tx_q->dma_tx + entry;
4629 
4630 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4631 				       DMA_TO_DEVICE);
4632 		if (dma_mapping_error(priv->device, des))
4633 			goto dma_map_err; /* should reuse desc w/o issues */
4634 
4635 		tx_q->tx_skbuff_dma[entry].buf = des;
4636 
4637 		stmmac_set_desc_addr(priv, desc, des);
4638 
4639 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4640 		tx_q->tx_skbuff_dma[entry].len = len;
4641 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4642 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4643 
4644 		/* Prepare the descriptor and set the own bit too */
4645 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4646 				priv->mode, 1, last_segment, skb->len);
4647 	}
4648 
4649 	/* Only the last descriptor gets to point to the skb. */
4650 	tx_q->tx_skbuff[entry] = skb;
4651 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4652 
4653 	/* According to the coalesce parameter the IC bit for the latest
4654 	 * segment is reset and the timer re-started to clean the tx status.
4655 	 * This approach takes care about the fragments: desc is the first
4656 	 * element in case of no SG.
4657 	 */
4658 	tx_packets = (entry + 1) - first_tx;
4659 	tx_q->tx_count_frames += tx_packets;
4660 
4661 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4662 		set_ic = true;
4663 	else if (!priv->tx_coal_frames[queue])
4664 		set_ic = false;
4665 	else if (tx_packets > priv->tx_coal_frames[queue])
4666 		set_ic = true;
4667 	else if ((tx_q->tx_count_frames %
4668 		  priv->tx_coal_frames[queue]) < tx_packets)
4669 		set_ic = true;
4670 	else
4671 		set_ic = false;
4672 
4673 	if (set_ic) {
4674 		if (likely(priv->extend_desc))
4675 			desc = &tx_q->dma_etx[entry].basic;
4676 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4677 			desc = &tx_q->dma_entx[entry].basic;
4678 		else
4679 			desc = &tx_q->dma_tx[entry];
4680 
4681 		tx_q->tx_count_frames = 0;
4682 		stmmac_set_tx_ic(priv, desc);
4683 	}
4684 
4685 	/* We've used all descriptors we need for this skb, however,
4686 	 * advance cur_tx so that it references a fresh descriptor.
4687 	 * ndo_start_xmit will fill this descriptor the next time it's
4688 	 * called and stmmac_tx_clean may clean up to this descriptor.
4689 	 */
4690 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4691 	tx_q->cur_tx = entry;
4692 
4693 	if (netif_msg_pktdata(priv)) {
4694 		netdev_dbg(priv->dev,
4695 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4696 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4697 			   entry, first, nfrags);
4698 
4699 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4700 		print_pkt(skb->data, skb->len);
4701 	}
4702 
4703 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4704 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4705 			  __func__);
4706 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4707 	}
4708 
4709 	u64_stats_update_begin(&txq_stats->q_syncp);
4710 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4711 	if (set_ic)
4712 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4713 	u64_stats_update_end(&txq_stats->q_syncp);
4714 
4715 	if (priv->sarc_type)
4716 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4717 
4718 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4719 	 * problems because all the descriptors are actually ready to be
4720 	 * passed to the DMA engine.
4721 	 */
4722 	if (likely(!is_jumbo)) {
4723 		bool last_segment = (nfrags == 0);
4724 
4725 		des = dma_map_single(priv->device, skb->data,
4726 				     nopaged_len, DMA_TO_DEVICE);
4727 		if (dma_mapping_error(priv->device, des))
4728 			goto dma_map_err;
4729 
4730 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4731 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4732 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4733 
4734 		stmmac_set_desc_addr(priv, first, des);
4735 
4736 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4737 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4738 
4739 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4740 			     priv->hwts_tx_en)) {
4741 			/* declare that device is doing timestamping */
4742 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4743 			stmmac_enable_tx_timestamp(priv, first);
4744 		}
4745 
4746 		/* Prepare the first descriptor setting the OWN bit too */
4747 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4748 				csum_insertion, priv->mode, 0, last_segment,
4749 				skb->len);
4750 	}
4751 
4752 	if (tx_q->tbs & STMMAC_TBS_EN) {
4753 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4754 
4755 		tbs_desc = &tx_q->dma_entx[first_entry];
4756 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4757 	}
4758 
4759 	stmmac_set_tx_owner(priv, first);
4760 
4761 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4762 
4763 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4764 	skb_tx_timestamp(skb);
4765 	stmmac_flush_tx_descriptors(priv, queue);
4766 	stmmac_tx_timer_arm(priv, queue);
4767 
4768 	return NETDEV_TX_OK;
4769 
4770 dma_map_err:
4771 	netdev_err(priv->dev, "Tx DMA map failed\n");
4772 max_sdu_err:
4773 	dev_kfree_skb(skb);
4774 	priv->xstats.tx_dropped++;
4775 	return NETDEV_TX_OK;
4776 }
4777 
4778 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4779 {
4780 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4781 	__be16 vlan_proto = veth->h_vlan_proto;
4782 	u16 vlanid;
4783 
4784 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4785 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4786 	    (vlan_proto == htons(ETH_P_8021AD) &&
4787 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4788 		/* pop the vlan tag */
4789 		vlanid = ntohs(veth->h_vlan_TCI);
4790 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4791 		skb_pull(skb, VLAN_HLEN);
4792 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4793 	}
4794 }
4795 
4796 /**
4797  * stmmac_rx_refill - refill used skb preallocated buffers
4798  * @priv: driver private structure
4799  * @queue: RX queue index
4800  * Description : this is to reallocate the skb for the reception process
4801  * that is based on zero-copy.
4802  */
4803 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4804 {
4805 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4806 	int dirty = stmmac_rx_dirty(priv, queue);
4807 	unsigned int entry = rx_q->dirty_rx;
4808 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4809 
4810 	if (priv->dma_cap.host_dma_width <= 32)
4811 		gfp |= GFP_DMA32;
4812 
4813 	while (dirty-- > 0) {
4814 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4815 		struct dma_desc *p;
4816 		bool use_rx_wd;
4817 
4818 		if (priv->extend_desc)
4819 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4820 		else
4821 			p = rx_q->dma_rx + entry;
4822 
4823 		if (!buf->page) {
4824 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4825 			if (!buf->page)
4826 				break;
4827 		}
4828 
4829 		if (priv->sph && !buf->sec_page) {
4830 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4831 			if (!buf->sec_page)
4832 				break;
4833 
4834 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4835 		}
4836 
4837 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4838 
4839 		stmmac_set_desc_addr(priv, p, buf->addr);
4840 		if (priv->sph)
4841 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4842 		else
4843 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4844 		stmmac_refill_desc3(priv, rx_q, p);
4845 
4846 		rx_q->rx_count_frames++;
4847 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4848 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4849 			rx_q->rx_count_frames = 0;
4850 
4851 		use_rx_wd = !priv->rx_coal_frames[queue];
4852 		use_rx_wd |= rx_q->rx_count_frames > 0;
4853 		if (!priv->use_riwt)
4854 			use_rx_wd = false;
4855 
4856 		dma_wmb();
4857 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4858 
4859 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4860 	}
4861 	rx_q->dirty_rx = entry;
4862 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4863 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4864 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4865 }
4866 
4867 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4868 				       struct dma_desc *p,
4869 				       int status, unsigned int len)
4870 {
4871 	unsigned int plen = 0, hlen = 0;
4872 	int coe = priv->hw->rx_csum;
4873 
4874 	/* Not first descriptor, buffer is always zero */
4875 	if (priv->sph && len)
4876 		return 0;
4877 
4878 	/* First descriptor, get split header length */
4879 	stmmac_get_rx_header_len(priv, p, &hlen);
4880 	if (priv->sph && hlen) {
4881 		priv->xstats.rx_split_hdr_pkt_n++;
4882 		return hlen;
4883 	}
4884 
4885 	/* First descriptor, not last descriptor and not split header */
4886 	if (status & rx_not_ls)
4887 		return priv->dma_conf.dma_buf_sz;
4888 
4889 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4890 
4891 	/* First descriptor and last descriptor and not split header */
4892 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4893 }
4894 
4895 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4896 				       struct dma_desc *p,
4897 				       int status, unsigned int len)
4898 {
4899 	int coe = priv->hw->rx_csum;
4900 	unsigned int plen = 0;
4901 
4902 	/* Not split header, buffer is not available */
4903 	if (!priv->sph)
4904 		return 0;
4905 
4906 	/* Not last descriptor */
4907 	if (status & rx_not_ls)
4908 		return priv->dma_conf.dma_buf_sz;
4909 
4910 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4911 
4912 	/* Last descriptor */
4913 	return plen - len;
4914 }
4915 
4916 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4917 				struct xdp_frame *xdpf, bool dma_map)
4918 {
4919 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4920 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4921 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
4922 	unsigned int entry = tx_q->cur_tx;
4923 	struct dma_desc *tx_desc;
4924 	dma_addr_t dma_addr;
4925 	bool set_ic;
4926 
4927 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4928 		return STMMAC_XDP_CONSUMED;
4929 
4930 	if (priv->est && priv->est->enable &&
4931 	    priv->est->max_sdu[queue] &&
4932 	    xdpf->len > priv->est->max_sdu[queue]) {
4933 		priv->xstats.max_sdu_txq_drop[queue]++;
4934 		return STMMAC_XDP_CONSUMED;
4935 	}
4936 
4937 	if (likely(priv->extend_desc))
4938 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4939 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4940 		tx_desc = &tx_q->dma_entx[entry].basic;
4941 	else
4942 		tx_desc = tx_q->dma_tx + entry;
4943 
4944 	if (dma_map) {
4945 		dma_addr = dma_map_single(priv->device, xdpf->data,
4946 					  xdpf->len, DMA_TO_DEVICE);
4947 		if (dma_mapping_error(priv->device, dma_addr))
4948 			return STMMAC_XDP_CONSUMED;
4949 
4950 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4951 	} else {
4952 		struct page *page = virt_to_page(xdpf->data);
4953 
4954 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4955 			   xdpf->headroom;
4956 		dma_sync_single_for_device(priv->device, dma_addr,
4957 					   xdpf->len, DMA_BIDIRECTIONAL);
4958 
4959 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4960 	}
4961 
4962 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4963 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4964 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4965 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4966 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4967 
4968 	tx_q->xdpf[entry] = xdpf;
4969 
4970 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4971 
4972 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4973 			       csum, priv->mode, true, true,
4974 			       xdpf->len);
4975 
4976 	tx_q->tx_count_frames++;
4977 
4978 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4979 		set_ic = true;
4980 	else
4981 		set_ic = false;
4982 
4983 	if (set_ic) {
4984 		tx_q->tx_count_frames = 0;
4985 		stmmac_set_tx_ic(priv, tx_desc);
4986 		u64_stats_update_begin(&txq_stats->q_syncp);
4987 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4988 		u64_stats_update_end(&txq_stats->q_syncp);
4989 	}
4990 
4991 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4992 
4993 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4994 	tx_q->cur_tx = entry;
4995 
4996 	return STMMAC_XDP_TX;
4997 }
4998 
4999 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5000 				   int cpu)
5001 {
5002 	int index = cpu;
5003 
5004 	if (unlikely(index < 0))
5005 		index = 0;
5006 
5007 	while (index >= priv->plat->tx_queues_to_use)
5008 		index -= priv->plat->tx_queues_to_use;
5009 
5010 	return index;
5011 }
5012 
5013 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5014 				struct xdp_buff *xdp)
5015 {
5016 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5017 	int cpu = smp_processor_id();
5018 	struct netdev_queue *nq;
5019 	int queue;
5020 	int res;
5021 
5022 	if (unlikely(!xdpf))
5023 		return STMMAC_XDP_CONSUMED;
5024 
5025 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5026 	nq = netdev_get_tx_queue(priv->dev, queue);
5027 
5028 	__netif_tx_lock(nq, cpu);
5029 	/* Avoids TX time-out as we are sharing with slow path */
5030 	txq_trans_cond_update(nq);
5031 
5032 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5033 	if (res == STMMAC_XDP_TX)
5034 		stmmac_flush_tx_descriptors(priv, queue);
5035 
5036 	__netif_tx_unlock(nq);
5037 
5038 	return res;
5039 }
5040 
5041 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5042 				 struct bpf_prog *prog,
5043 				 struct xdp_buff *xdp)
5044 {
5045 	u32 act;
5046 	int res;
5047 
5048 	act = bpf_prog_run_xdp(prog, xdp);
5049 	switch (act) {
5050 	case XDP_PASS:
5051 		res = STMMAC_XDP_PASS;
5052 		break;
5053 	case XDP_TX:
5054 		res = stmmac_xdp_xmit_back(priv, xdp);
5055 		break;
5056 	case XDP_REDIRECT:
5057 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5058 			res = STMMAC_XDP_CONSUMED;
5059 		else
5060 			res = STMMAC_XDP_REDIRECT;
5061 		break;
5062 	default:
5063 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5064 		fallthrough;
5065 	case XDP_ABORTED:
5066 		trace_xdp_exception(priv->dev, prog, act);
5067 		fallthrough;
5068 	case XDP_DROP:
5069 		res = STMMAC_XDP_CONSUMED;
5070 		break;
5071 	}
5072 
5073 	return res;
5074 }
5075 
5076 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5077 					   struct xdp_buff *xdp)
5078 {
5079 	struct bpf_prog *prog;
5080 	int res;
5081 
5082 	prog = READ_ONCE(priv->xdp_prog);
5083 	if (!prog) {
5084 		res = STMMAC_XDP_PASS;
5085 		goto out;
5086 	}
5087 
5088 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5089 out:
5090 	return ERR_PTR(-res);
5091 }
5092 
5093 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5094 				   int xdp_status)
5095 {
5096 	int cpu = smp_processor_id();
5097 	int queue;
5098 
5099 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5100 
5101 	if (xdp_status & STMMAC_XDP_TX)
5102 		stmmac_tx_timer_arm(priv, queue);
5103 
5104 	if (xdp_status & STMMAC_XDP_REDIRECT)
5105 		xdp_do_flush();
5106 }
5107 
5108 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5109 					       struct xdp_buff *xdp)
5110 {
5111 	unsigned int metasize = xdp->data - xdp->data_meta;
5112 	unsigned int datasize = xdp->data_end - xdp->data;
5113 	struct sk_buff *skb;
5114 
5115 	skb = napi_alloc_skb(&ch->rxtx_napi,
5116 			     xdp->data_end - xdp->data_hard_start);
5117 	if (unlikely(!skb))
5118 		return NULL;
5119 
5120 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5121 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5122 	if (metasize)
5123 		skb_metadata_set(skb, metasize);
5124 
5125 	return skb;
5126 }
5127 
5128 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5129 				   struct dma_desc *p, struct dma_desc *np,
5130 				   struct xdp_buff *xdp)
5131 {
5132 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5133 	struct stmmac_channel *ch = &priv->channel[queue];
5134 	unsigned int len = xdp->data_end - xdp->data;
5135 	enum pkt_hash_types hash_type;
5136 	int coe = priv->hw->rx_csum;
5137 	struct sk_buff *skb;
5138 	u32 hash;
5139 
5140 	skb = stmmac_construct_skb_zc(ch, xdp);
5141 	if (!skb) {
5142 		priv->xstats.rx_dropped++;
5143 		return;
5144 	}
5145 
5146 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5147 	if (priv->hw->hw_vlan_en)
5148 		/* MAC level stripping. */
5149 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5150 	else
5151 		/* Driver level stripping. */
5152 		stmmac_rx_vlan(priv->dev, skb);
5153 	skb->protocol = eth_type_trans(skb, priv->dev);
5154 
5155 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5156 		skb_checksum_none_assert(skb);
5157 	else
5158 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5159 
5160 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5161 		skb_set_hash(skb, hash, hash_type);
5162 
5163 	skb_record_rx_queue(skb, queue);
5164 	napi_gro_receive(&ch->rxtx_napi, skb);
5165 
5166 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5167 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5168 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5169 	u64_stats_update_end(&rxq_stats->napi_syncp);
5170 }
5171 
5172 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5173 {
5174 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5175 	unsigned int entry = rx_q->dirty_rx;
5176 	struct dma_desc *rx_desc = NULL;
5177 	bool ret = true;
5178 
5179 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5180 
5181 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5182 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5183 		dma_addr_t dma_addr;
5184 		bool use_rx_wd;
5185 
5186 		if (!buf->xdp) {
5187 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5188 			if (!buf->xdp) {
5189 				ret = false;
5190 				break;
5191 			}
5192 		}
5193 
5194 		if (priv->extend_desc)
5195 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5196 		else
5197 			rx_desc = rx_q->dma_rx + entry;
5198 
5199 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5200 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5201 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5202 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5203 
5204 		rx_q->rx_count_frames++;
5205 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5206 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5207 			rx_q->rx_count_frames = 0;
5208 
5209 		use_rx_wd = !priv->rx_coal_frames[queue];
5210 		use_rx_wd |= rx_q->rx_count_frames > 0;
5211 		if (!priv->use_riwt)
5212 			use_rx_wd = false;
5213 
5214 		dma_wmb();
5215 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5216 
5217 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5218 	}
5219 
5220 	if (rx_desc) {
5221 		rx_q->dirty_rx = entry;
5222 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5223 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5224 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5225 	}
5226 
5227 	return ret;
5228 }
5229 
5230 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5231 {
5232 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5233 	 * to represent incoming packet, whereas cb field in the same structure
5234 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5235 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5236 	 */
5237 	return (struct stmmac_xdp_buff *)xdp;
5238 }
5239 
5240 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5241 {
5242 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5243 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5244 	unsigned int count = 0, error = 0, len = 0;
5245 	int dirty = stmmac_rx_dirty(priv, queue);
5246 	unsigned int next_entry = rx_q->cur_rx;
5247 	u32 rx_errors = 0, rx_dropped = 0;
5248 	unsigned int desc_size;
5249 	struct bpf_prog *prog;
5250 	bool failure = false;
5251 	int xdp_status = 0;
5252 	int status = 0;
5253 
5254 	if (netif_msg_rx_status(priv)) {
5255 		void *rx_head;
5256 
5257 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5258 		if (priv->extend_desc) {
5259 			rx_head = (void *)rx_q->dma_erx;
5260 			desc_size = sizeof(struct dma_extended_desc);
5261 		} else {
5262 			rx_head = (void *)rx_q->dma_rx;
5263 			desc_size = sizeof(struct dma_desc);
5264 		}
5265 
5266 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5267 				    rx_q->dma_rx_phy, desc_size);
5268 	}
5269 	while (count < limit) {
5270 		struct stmmac_rx_buffer *buf;
5271 		struct stmmac_xdp_buff *ctx;
5272 		unsigned int buf1_len = 0;
5273 		struct dma_desc *np, *p;
5274 		int entry;
5275 		int res;
5276 
5277 		if (!count && rx_q->state_saved) {
5278 			error = rx_q->state.error;
5279 			len = rx_q->state.len;
5280 		} else {
5281 			rx_q->state_saved = false;
5282 			error = 0;
5283 			len = 0;
5284 		}
5285 
5286 		if (count >= limit)
5287 			break;
5288 
5289 read_again:
5290 		buf1_len = 0;
5291 		entry = next_entry;
5292 		buf = &rx_q->buf_pool[entry];
5293 
5294 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5295 			failure = failure ||
5296 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5297 			dirty = 0;
5298 		}
5299 
5300 		if (priv->extend_desc)
5301 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5302 		else
5303 			p = rx_q->dma_rx + entry;
5304 
5305 		/* read the status of the incoming frame */
5306 		status = stmmac_rx_status(priv, &priv->xstats, p);
5307 		/* check if managed by the DMA otherwise go ahead */
5308 		if (unlikely(status & dma_own))
5309 			break;
5310 
5311 		/* Prefetch the next RX descriptor */
5312 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5313 						priv->dma_conf.dma_rx_size);
5314 		next_entry = rx_q->cur_rx;
5315 
5316 		if (priv->extend_desc)
5317 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5318 		else
5319 			np = rx_q->dma_rx + next_entry;
5320 
5321 		prefetch(np);
5322 
5323 		/* Ensure a valid XSK buffer before proceed */
5324 		if (!buf->xdp)
5325 			break;
5326 
5327 		if (priv->extend_desc)
5328 			stmmac_rx_extended_status(priv, &priv->xstats,
5329 						  rx_q->dma_erx + entry);
5330 		if (unlikely(status == discard_frame)) {
5331 			xsk_buff_free(buf->xdp);
5332 			buf->xdp = NULL;
5333 			dirty++;
5334 			error = 1;
5335 			if (!priv->hwts_rx_en)
5336 				rx_errors++;
5337 		}
5338 
5339 		if (unlikely(error && (status & rx_not_ls)))
5340 			goto read_again;
5341 		if (unlikely(error)) {
5342 			count++;
5343 			continue;
5344 		}
5345 
5346 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5347 		if (likely(status & rx_not_ls)) {
5348 			xsk_buff_free(buf->xdp);
5349 			buf->xdp = NULL;
5350 			dirty++;
5351 			count++;
5352 			goto read_again;
5353 		}
5354 
5355 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5356 		ctx->priv = priv;
5357 		ctx->desc = p;
5358 		ctx->ndesc = np;
5359 
5360 		/* XDP ZC Frame only support primary buffers for now */
5361 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5362 		len += buf1_len;
5363 
5364 		/* ACS is disabled; strip manually. */
5365 		if (likely(!(status & rx_not_ls))) {
5366 			buf1_len -= ETH_FCS_LEN;
5367 			len -= ETH_FCS_LEN;
5368 		}
5369 
5370 		/* RX buffer is good and fit into a XSK pool buffer */
5371 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5372 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5373 
5374 		prog = READ_ONCE(priv->xdp_prog);
5375 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5376 
5377 		switch (res) {
5378 		case STMMAC_XDP_PASS:
5379 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5380 			xsk_buff_free(buf->xdp);
5381 			break;
5382 		case STMMAC_XDP_CONSUMED:
5383 			xsk_buff_free(buf->xdp);
5384 			rx_dropped++;
5385 			break;
5386 		case STMMAC_XDP_TX:
5387 		case STMMAC_XDP_REDIRECT:
5388 			xdp_status |= res;
5389 			break;
5390 		}
5391 
5392 		buf->xdp = NULL;
5393 		dirty++;
5394 		count++;
5395 	}
5396 
5397 	if (status & rx_not_ls) {
5398 		rx_q->state_saved = true;
5399 		rx_q->state.error = error;
5400 		rx_q->state.len = len;
5401 	}
5402 
5403 	stmmac_finalize_xdp_rx(priv, xdp_status);
5404 
5405 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5406 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5407 	u64_stats_update_end(&rxq_stats->napi_syncp);
5408 
5409 	priv->xstats.rx_dropped += rx_dropped;
5410 	priv->xstats.rx_errors += rx_errors;
5411 
5412 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5413 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5414 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5415 		else
5416 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5417 
5418 		return (int)count;
5419 	}
5420 
5421 	return failure ? limit : (int)count;
5422 }
5423 
5424 /**
5425  * stmmac_rx - manage the receive process
5426  * @priv: driver private structure
5427  * @limit: napi bugget
5428  * @queue: RX queue index.
5429  * Description :  this the function called by the napi poll method.
5430  * It gets all the frames inside the ring.
5431  */
5432 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5433 {
5434 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5435 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5436 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5437 	struct stmmac_channel *ch = &priv->channel[queue];
5438 	unsigned int count = 0, error = 0, len = 0;
5439 	int status = 0, coe = priv->hw->rx_csum;
5440 	unsigned int next_entry = rx_q->cur_rx;
5441 	enum dma_data_direction dma_dir;
5442 	unsigned int desc_size;
5443 	struct sk_buff *skb = NULL;
5444 	struct stmmac_xdp_buff ctx;
5445 	int xdp_status = 0;
5446 	int bufsz;
5447 
5448 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5449 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5450 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5451 
5452 	if (netif_msg_rx_status(priv)) {
5453 		void *rx_head;
5454 
5455 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5456 		if (priv->extend_desc) {
5457 			rx_head = (void *)rx_q->dma_erx;
5458 			desc_size = sizeof(struct dma_extended_desc);
5459 		} else {
5460 			rx_head = (void *)rx_q->dma_rx;
5461 			desc_size = sizeof(struct dma_desc);
5462 		}
5463 
5464 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5465 				    rx_q->dma_rx_phy, desc_size);
5466 	}
5467 	while (count < limit) {
5468 		unsigned int buf1_len = 0, buf2_len = 0;
5469 		enum pkt_hash_types hash_type;
5470 		struct stmmac_rx_buffer *buf;
5471 		struct dma_desc *np, *p;
5472 		int entry;
5473 		u32 hash;
5474 
5475 		if (!count && rx_q->state_saved) {
5476 			skb = rx_q->state.skb;
5477 			error = rx_q->state.error;
5478 			len = rx_q->state.len;
5479 		} else {
5480 			rx_q->state_saved = false;
5481 			skb = NULL;
5482 			error = 0;
5483 			len = 0;
5484 		}
5485 
5486 read_again:
5487 		if (count >= limit)
5488 			break;
5489 
5490 		buf1_len = 0;
5491 		buf2_len = 0;
5492 		entry = next_entry;
5493 		buf = &rx_q->buf_pool[entry];
5494 
5495 		if (priv->extend_desc)
5496 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5497 		else
5498 			p = rx_q->dma_rx + entry;
5499 
5500 		/* read the status of the incoming frame */
5501 		status = stmmac_rx_status(priv, &priv->xstats, p);
5502 		/* check if managed by the DMA otherwise go ahead */
5503 		if (unlikely(status & dma_own))
5504 			break;
5505 
5506 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5507 						priv->dma_conf.dma_rx_size);
5508 		next_entry = rx_q->cur_rx;
5509 
5510 		if (priv->extend_desc)
5511 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5512 		else
5513 			np = rx_q->dma_rx + next_entry;
5514 
5515 		prefetch(np);
5516 
5517 		if (priv->extend_desc)
5518 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5519 		if (unlikely(status == discard_frame)) {
5520 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5521 			buf->page = NULL;
5522 			error = 1;
5523 			if (!priv->hwts_rx_en)
5524 				rx_errors++;
5525 		}
5526 
5527 		if (unlikely(error && (status & rx_not_ls)))
5528 			goto read_again;
5529 		if (unlikely(error)) {
5530 			dev_kfree_skb(skb);
5531 			skb = NULL;
5532 			count++;
5533 			continue;
5534 		}
5535 
5536 		/* Buffer is good. Go on. */
5537 
5538 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5539 		len += buf1_len;
5540 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5541 		len += buf2_len;
5542 
5543 		/* ACS is disabled; strip manually. */
5544 		if (likely(!(status & rx_not_ls))) {
5545 			if (buf2_len) {
5546 				buf2_len -= ETH_FCS_LEN;
5547 				len -= ETH_FCS_LEN;
5548 			} else if (buf1_len) {
5549 				buf1_len -= ETH_FCS_LEN;
5550 				len -= ETH_FCS_LEN;
5551 			}
5552 		}
5553 
5554 		if (!skb) {
5555 			unsigned int pre_len, sync_len;
5556 
5557 			dma_sync_single_for_cpu(priv->device, buf->addr,
5558 						buf1_len, dma_dir);
5559 			net_prefetch(page_address(buf->page) +
5560 				     buf->page_offset);
5561 
5562 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5563 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5564 					 buf->page_offset, buf1_len, true);
5565 
5566 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5567 				  buf->page_offset;
5568 
5569 			ctx.priv = priv;
5570 			ctx.desc = p;
5571 			ctx.ndesc = np;
5572 
5573 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5574 			/* Due xdp_adjust_tail: DMA sync for_device
5575 			 * cover max len CPU touch
5576 			 */
5577 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5578 				   buf->page_offset;
5579 			sync_len = max(sync_len, pre_len);
5580 
5581 			/* For Not XDP_PASS verdict */
5582 			if (IS_ERR(skb)) {
5583 				unsigned int xdp_res = -PTR_ERR(skb);
5584 
5585 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5586 					page_pool_put_page(rx_q->page_pool,
5587 							   virt_to_head_page(ctx.xdp.data),
5588 							   sync_len, true);
5589 					buf->page = NULL;
5590 					rx_dropped++;
5591 
5592 					/* Clear skb as it was set as
5593 					 * status by XDP program.
5594 					 */
5595 					skb = NULL;
5596 
5597 					if (unlikely((status & rx_not_ls)))
5598 						goto read_again;
5599 
5600 					count++;
5601 					continue;
5602 				} else if (xdp_res & (STMMAC_XDP_TX |
5603 						      STMMAC_XDP_REDIRECT)) {
5604 					xdp_status |= xdp_res;
5605 					buf->page = NULL;
5606 					skb = NULL;
5607 					count++;
5608 					continue;
5609 				}
5610 			}
5611 		}
5612 
5613 		if (!skb) {
5614 			unsigned int head_pad_len;
5615 
5616 			/* XDP program may expand or reduce tail */
5617 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5618 
5619 			skb = napi_build_skb(page_address(buf->page),
5620 					     rx_q->napi_skb_frag_size);
5621 			if (!skb) {
5622 				page_pool_recycle_direct(rx_q->page_pool,
5623 							 buf->page);
5624 				rx_dropped++;
5625 				count++;
5626 				goto drain_data;
5627 			}
5628 
5629 			/* XDP program may adjust header */
5630 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5631 			skb_reserve(skb, head_pad_len);
5632 			skb_put(skb, buf1_len);
5633 			skb_mark_for_recycle(skb);
5634 			buf->page = NULL;
5635 		} else if (buf1_len) {
5636 			dma_sync_single_for_cpu(priv->device, buf->addr,
5637 						buf1_len, dma_dir);
5638 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5639 					buf->page, buf->page_offset, buf1_len,
5640 					priv->dma_conf.dma_buf_sz);
5641 			buf->page = NULL;
5642 		}
5643 
5644 		if (buf2_len) {
5645 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5646 						buf2_len, dma_dir);
5647 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5648 					buf->sec_page, 0, buf2_len,
5649 					priv->dma_conf.dma_buf_sz);
5650 			buf->sec_page = NULL;
5651 		}
5652 
5653 drain_data:
5654 		if (likely(status & rx_not_ls))
5655 			goto read_again;
5656 		if (!skb)
5657 			continue;
5658 
5659 		/* Got entire packet into SKB. Finish it. */
5660 
5661 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5662 
5663 		if (priv->hw->hw_vlan_en)
5664 			/* MAC level stripping. */
5665 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5666 		else
5667 			/* Driver level stripping. */
5668 			stmmac_rx_vlan(priv->dev, skb);
5669 
5670 		skb->protocol = eth_type_trans(skb, priv->dev);
5671 
5672 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5673 		    (status & csum_none))
5674 			skb_checksum_none_assert(skb);
5675 		else
5676 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5677 
5678 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5679 			skb_set_hash(skb, hash, hash_type);
5680 
5681 		skb_record_rx_queue(skb, queue);
5682 		napi_gro_receive(&ch->rx_napi, skb);
5683 		skb = NULL;
5684 
5685 		rx_packets++;
5686 		rx_bytes += len;
5687 		count++;
5688 	}
5689 
5690 	if (status & rx_not_ls || skb) {
5691 		rx_q->state_saved = true;
5692 		rx_q->state.skb = skb;
5693 		rx_q->state.error = error;
5694 		rx_q->state.len = len;
5695 	}
5696 
5697 	stmmac_finalize_xdp_rx(priv, xdp_status);
5698 
5699 	stmmac_rx_refill(priv, queue);
5700 
5701 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5702 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5703 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5704 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5705 	u64_stats_update_end(&rxq_stats->napi_syncp);
5706 
5707 	priv->xstats.rx_dropped += rx_dropped;
5708 	priv->xstats.rx_errors += rx_errors;
5709 
5710 	return count;
5711 }
5712 
5713 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5714 {
5715 	struct stmmac_channel *ch =
5716 		container_of(napi, struct stmmac_channel, rx_napi);
5717 	struct stmmac_priv *priv = ch->priv_data;
5718 	struct stmmac_rxq_stats *rxq_stats;
5719 	u32 chan = ch->index;
5720 	int work_done;
5721 
5722 	rxq_stats = &priv->xstats.rxq_stats[chan];
5723 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5724 	u64_stats_inc(&rxq_stats->napi.poll);
5725 	u64_stats_update_end(&rxq_stats->napi_syncp);
5726 
5727 	work_done = stmmac_rx(priv, budget, chan);
5728 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5729 		unsigned long flags;
5730 
5731 		spin_lock_irqsave(&ch->lock, flags);
5732 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5733 		spin_unlock_irqrestore(&ch->lock, flags);
5734 	}
5735 
5736 	return work_done;
5737 }
5738 
5739 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5740 {
5741 	struct stmmac_channel *ch =
5742 		container_of(napi, struct stmmac_channel, tx_napi);
5743 	struct stmmac_priv *priv = ch->priv_data;
5744 	struct stmmac_txq_stats *txq_stats;
5745 	bool pending_packets = false;
5746 	u32 chan = ch->index;
5747 	int work_done;
5748 
5749 	txq_stats = &priv->xstats.txq_stats[chan];
5750 	u64_stats_update_begin(&txq_stats->napi_syncp);
5751 	u64_stats_inc(&txq_stats->napi.poll);
5752 	u64_stats_update_end(&txq_stats->napi_syncp);
5753 
5754 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5755 	work_done = min(work_done, budget);
5756 
5757 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5758 		unsigned long flags;
5759 
5760 		spin_lock_irqsave(&ch->lock, flags);
5761 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5762 		spin_unlock_irqrestore(&ch->lock, flags);
5763 	}
5764 
5765 	/* TX still have packet to handle, check if we need to arm tx timer */
5766 	if (pending_packets)
5767 		stmmac_tx_timer_arm(priv, chan);
5768 
5769 	return work_done;
5770 }
5771 
5772 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5773 {
5774 	struct stmmac_channel *ch =
5775 		container_of(napi, struct stmmac_channel, rxtx_napi);
5776 	struct stmmac_priv *priv = ch->priv_data;
5777 	bool tx_pending_packets = false;
5778 	int rx_done, tx_done, rxtx_done;
5779 	struct stmmac_rxq_stats *rxq_stats;
5780 	struct stmmac_txq_stats *txq_stats;
5781 	u32 chan = ch->index;
5782 
5783 	rxq_stats = &priv->xstats.rxq_stats[chan];
5784 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5785 	u64_stats_inc(&rxq_stats->napi.poll);
5786 	u64_stats_update_end(&rxq_stats->napi_syncp);
5787 
5788 	txq_stats = &priv->xstats.txq_stats[chan];
5789 	u64_stats_update_begin(&txq_stats->napi_syncp);
5790 	u64_stats_inc(&txq_stats->napi.poll);
5791 	u64_stats_update_end(&txq_stats->napi_syncp);
5792 
5793 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5794 	tx_done = min(tx_done, budget);
5795 
5796 	rx_done = stmmac_rx_zc(priv, budget, chan);
5797 
5798 	rxtx_done = max(tx_done, rx_done);
5799 
5800 	/* If either TX or RX work is not complete, return budget
5801 	 * and keep pooling
5802 	 */
5803 	if (rxtx_done >= budget)
5804 		return budget;
5805 
5806 	/* all work done, exit the polling mode */
5807 	if (napi_complete_done(napi, rxtx_done)) {
5808 		unsigned long flags;
5809 
5810 		spin_lock_irqsave(&ch->lock, flags);
5811 		/* Both RX and TX work done are compelte,
5812 		 * so enable both RX & TX IRQs.
5813 		 */
5814 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5815 		spin_unlock_irqrestore(&ch->lock, flags);
5816 	}
5817 
5818 	/* TX still have packet to handle, check if we need to arm tx timer */
5819 	if (tx_pending_packets)
5820 		stmmac_tx_timer_arm(priv, chan);
5821 
5822 	return min(rxtx_done, budget - 1);
5823 }
5824 
5825 /**
5826  *  stmmac_tx_timeout
5827  *  @dev : Pointer to net device structure
5828  *  @txqueue: the index of the hanging transmit queue
5829  *  Description: this function is called when a packet transmission fails to
5830  *   complete within a reasonable time. The driver will mark the error in the
5831  *   netdev structure and arrange for the device to be reset to a sane state
5832  *   in order to transmit a new packet.
5833  */
5834 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5835 {
5836 	struct stmmac_priv *priv = netdev_priv(dev);
5837 
5838 	stmmac_global_err(priv);
5839 }
5840 
5841 /**
5842  *  stmmac_set_rx_mode - entry point for multicast addressing
5843  *  @dev : pointer to the device structure
5844  *  Description:
5845  *  This function is a driver entry point which gets called by the kernel
5846  *  whenever multicast addresses must be enabled/disabled.
5847  *  Return value:
5848  *  void.
5849  *
5850  *  FIXME: This may need RXC to be running, but it may be called with BH
5851  *  disabled, which means we can't call phylink_rx_clk_stop*().
5852  */
5853 static void stmmac_set_rx_mode(struct net_device *dev)
5854 {
5855 	struct stmmac_priv *priv = netdev_priv(dev);
5856 
5857 	stmmac_set_filter(priv, priv->hw, dev);
5858 }
5859 
5860 /**
5861  *  stmmac_change_mtu - entry point to change MTU size for the device.
5862  *  @dev : device pointer.
5863  *  @new_mtu : the new MTU size for the device.
5864  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5865  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5866  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5867  *  Return value:
5868  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5869  *  file on failure.
5870  */
5871 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5872 {
5873 	struct stmmac_priv *priv = netdev_priv(dev);
5874 	int txfifosz = priv->plat->tx_fifo_size;
5875 	struct stmmac_dma_conf *dma_conf;
5876 	const int mtu = new_mtu;
5877 	int ret;
5878 
5879 	if (txfifosz == 0)
5880 		txfifosz = priv->dma_cap.tx_fifo_size;
5881 
5882 	txfifosz /= priv->plat->tx_queues_to_use;
5883 
5884 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5885 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5886 		return -EINVAL;
5887 	}
5888 
5889 	new_mtu = STMMAC_ALIGN(new_mtu);
5890 
5891 	/* If condition true, FIFO is too small or MTU too large */
5892 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5893 		return -EINVAL;
5894 
5895 	if (netif_running(dev)) {
5896 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5897 		/* Try to allocate the new DMA conf with the new mtu */
5898 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5899 		if (IS_ERR(dma_conf)) {
5900 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5901 				   mtu);
5902 			return PTR_ERR(dma_conf);
5903 		}
5904 
5905 		stmmac_release(dev);
5906 
5907 		ret = __stmmac_open(dev, dma_conf);
5908 		if (ret) {
5909 			free_dma_desc_resources(priv, dma_conf);
5910 			kfree(dma_conf);
5911 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5912 			return ret;
5913 		}
5914 
5915 		kfree(dma_conf);
5916 
5917 		stmmac_set_rx_mode(dev);
5918 	}
5919 
5920 	WRITE_ONCE(dev->mtu, mtu);
5921 	netdev_update_features(dev);
5922 
5923 	return 0;
5924 }
5925 
5926 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5927 					     netdev_features_t features)
5928 {
5929 	struct stmmac_priv *priv = netdev_priv(dev);
5930 
5931 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5932 		features &= ~NETIF_F_RXCSUM;
5933 
5934 	if (!priv->plat->tx_coe)
5935 		features &= ~NETIF_F_CSUM_MASK;
5936 
5937 	/* Some GMAC devices have a bugged Jumbo frame support that
5938 	 * needs to have the Tx COE disabled for oversized frames
5939 	 * (due to limited buffer sizes). In this case we disable
5940 	 * the TX csum insertion in the TDES and not use SF.
5941 	 */
5942 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5943 		features &= ~NETIF_F_CSUM_MASK;
5944 
5945 	/* Disable tso if asked by ethtool */
5946 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5947 		if (features & NETIF_F_TSO)
5948 			priv->tso = true;
5949 		else
5950 			priv->tso = false;
5951 	}
5952 
5953 	return features;
5954 }
5955 
5956 static int stmmac_set_features(struct net_device *netdev,
5957 			       netdev_features_t features)
5958 {
5959 	struct stmmac_priv *priv = netdev_priv(netdev);
5960 
5961 	/* Keep the COE Type in case of csum is supporting */
5962 	if (features & NETIF_F_RXCSUM)
5963 		priv->hw->rx_csum = priv->plat->rx_coe;
5964 	else
5965 		priv->hw->rx_csum = 0;
5966 	/* No check needed because rx_coe has been set before and it will be
5967 	 * fixed in case of issue.
5968 	 */
5969 	stmmac_rx_ipc(priv, priv->hw);
5970 
5971 	if (priv->sph_cap) {
5972 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5973 		u32 chan;
5974 
5975 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5976 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5977 	}
5978 
5979 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5980 		priv->hw->hw_vlan_en = true;
5981 	else
5982 		priv->hw->hw_vlan_en = false;
5983 
5984 	phylink_rx_clk_stop_block(priv->phylink);
5985 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5986 	phylink_rx_clk_stop_unblock(priv->phylink);
5987 
5988 	return 0;
5989 }
5990 
5991 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5992 {
5993 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5994 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5995 	u32 queues_count;
5996 	u32 queue;
5997 	bool xmac;
5998 
5999 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6000 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6001 
6002 	if (priv->irq_wake)
6003 		pm_wakeup_event(priv->device, 0);
6004 
6005 	if (priv->dma_cap.estsel)
6006 		stmmac_est_irq_status(priv, priv, priv->dev,
6007 				      &priv->xstats, tx_cnt);
6008 
6009 	if (stmmac_fpe_supported(priv))
6010 		stmmac_fpe_irq_status(priv);
6011 
6012 	/* To handle GMAC own interrupts */
6013 	if ((priv->plat->has_gmac) || xmac) {
6014 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6015 
6016 		if (unlikely(status)) {
6017 			/* For LPI we need to save the tx status */
6018 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6019 				priv->tx_path_in_lpi_mode = true;
6020 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6021 				priv->tx_path_in_lpi_mode = false;
6022 		}
6023 
6024 		for (queue = 0; queue < queues_count; queue++)
6025 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6026 
6027 		/* PCS link status */
6028 		if (priv->hw->pcs &&
6029 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6030 			if (priv->xstats.pcs_link)
6031 				netif_carrier_on(priv->dev);
6032 			else
6033 				netif_carrier_off(priv->dev);
6034 		}
6035 
6036 		stmmac_timestamp_interrupt(priv, priv);
6037 	}
6038 }
6039 
6040 /**
6041  *  stmmac_interrupt - main ISR
6042  *  @irq: interrupt number.
6043  *  @dev_id: to pass the net device pointer.
6044  *  Description: this is the main driver interrupt service routine.
6045  *  It can call:
6046  *  o DMA service routine (to manage incoming frame reception and transmission
6047  *    status)
6048  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6049  *    interrupts.
6050  */
6051 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6052 {
6053 	struct net_device *dev = (struct net_device *)dev_id;
6054 	struct stmmac_priv *priv = netdev_priv(dev);
6055 
6056 	/* Check if adapter is up */
6057 	if (test_bit(STMMAC_DOWN, &priv->state))
6058 		return IRQ_HANDLED;
6059 
6060 	/* Check ASP error if it isn't delivered via an individual IRQ */
6061 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6062 		return IRQ_HANDLED;
6063 
6064 	/* To handle Common interrupts */
6065 	stmmac_common_interrupt(priv);
6066 
6067 	/* To handle DMA interrupts */
6068 	stmmac_dma_interrupt(priv);
6069 
6070 	return IRQ_HANDLED;
6071 }
6072 
6073 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6074 {
6075 	struct net_device *dev = (struct net_device *)dev_id;
6076 	struct stmmac_priv *priv = netdev_priv(dev);
6077 
6078 	/* Check if adapter is up */
6079 	if (test_bit(STMMAC_DOWN, &priv->state))
6080 		return IRQ_HANDLED;
6081 
6082 	/* To handle Common interrupts */
6083 	stmmac_common_interrupt(priv);
6084 
6085 	return IRQ_HANDLED;
6086 }
6087 
6088 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6089 {
6090 	struct net_device *dev = (struct net_device *)dev_id;
6091 	struct stmmac_priv *priv = netdev_priv(dev);
6092 
6093 	/* Check if adapter is up */
6094 	if (test_bit(STMMAC_DOWN, &priv->state))
6095 		return IRQ_HANDLED;
6096 
6097 	/* Check if a fatal error happened */
6098 	stmmac_safety_feat_interrupt(priv);
6099 
6100 	return IRQ_HANDLED;
6101 }
6102 
6103 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6104 {
6105 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6106 	struct stmmac_dma_conf *dma_conf;
6107 	int chan = tx_q->queue_index;
6108 	struct stmmac_priv *priv;
6109 	int status;
6110 
6111 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6112 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6113 
6114 	/* Check if adapter is up */
6115 	if (test_bit(STMMAC_DOWN, &priv->state))
6116 		return IRQ_HANDLED;
6117 
6118 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6119 
6120 	if (unlikely(status & tx_hard_error_bump_tc)) {
6121 		/* Try to bump up the dma threshold on this failure */
6122 		stmmac_bump_dma_threshold(priv, chan);
6123 	} else if (unlikely(status == tx_hard_error)) {
6124 		stmmac_tx_err(priv, chan);
6125 	}
6126 
6127 	return IRQ_HANDLED;
6128 }
6129 
6130 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6131 {
6132 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6133 	struct stmmac_dma_conf *dma_conf;
6134 	int chan = rx_q->queue_index;
6135 	struct stmmac_priv *priv;
6136 
6137 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6138 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6139 
6140 	/* Check if adapter is up */
6141 	if (test_bit(STMMAC_DOWN, &priv->state))
6142 		return IRQ_HANDLED;
6143 
6144 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6145 
6146 	return IRQ_HANDLED;
6147 }
6148 
6149 /**
6150  *  stmmac_ioctl - Entry point for the Ioctl
6151  *  @dev: Device pointer.
6152  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6153  *  a proprietary structure used to pass information to the driver.
6154  *  @cmd: IOCTL command
6155  *  Description:
6156  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6157  */
6158 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6159 {
6160 	struct stmmac_priv *priv = netdev_priv (dev);
6161 	int ret = -EOPNOTSUPP;
6162 
6163 	if (!netif_running(dev))
6164 		return -EINVAL;
6165 
6166 	switch (cmd) {
6167 	case SIOCGMIIPHY:
6168 	case SIOCGMIIREG:
6169 	case SIOCSMIIREG:
6170 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6171 		break;
6172 	default:
6173 		break;
6174 	}
6175 
6176 	return ret;
6177 }
6178 
6179 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6180 				    void *cb_priv)
6181 {
6182 	struct stmmac_priv *priv = cb_priv;
6183 	int ret = -EOPNOTSUPP;
6184 
6185 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6186 		return ret;
6187 
6188 	__stmmac_disable_all_queues(priv);
6189 
6190 	switch (type) {
6191 	case TC_SETUP_CLSU32:
6192 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6193 		break;
6194 	case TC_SETUP_CLSFLOWER:
6195 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6196 		break;
6197 	default:
6198 		break;
6199 	}
6200 
6201 	stmmac_enable_all_queues(priv);
6202 	return ret;
6203 }
6204 
6205 static LIST_HEAD(stmmac_block_cb_list);
6206 
6207 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6208 			   void *type_data)
6209 {
6210 	struct stmmac_priv *priv = netdev_priv(ndev);
6211 
6212 	switch (type) {
6213 	case TC_QUERY_CAPS:
6214 		return stmmac_tc_query_caps(priv, priv, type_data);
6215 	case TC_SETUP_QDISC_MQPRIO:
6216 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6217 	case TC_SETUP_BLOCK:
6218 		return flow_block_cb_setup_simple(type_data,
6219 						  &stmmac_block_cb_list,
6220 						  stmmac_setup_tc_block_cb,
6221 						  priv, priv, true);
6222 	case TC_SETUP_QDISC_CBS:
6223 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6224 	case TC_SETUP_QDISC_TAPRIO:
6225 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6226 	case TC_SETUP_QDISC_ETF:
6227 		return stmmac_tc_setup_etf(priv, priv, type_data);
6228 	default:
6229 		return -EOPNOTSUPP;
6230 	}
6231 }
6232 
6233 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6234 			       struct net_device *sb_dev)
6235 {
6236 	int gso = skb_shinfo(skb)->gso_type;
6237 
6238 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6239 		/*
6240 		 * There is no way to determine the number of TSO/USO
6241 		 * capable Queues. Let's use always the Queue 0
6242 		 * because if TSO/USO is supported then at least this
6243 		 * one will be capable.
6244 		 */
6245 		return 0;
6246 	}
6247 
6248 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6249 }
6250 
6251 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6252 {
6253 	struct stmmac_priv *priv = netdev_priv(ndev);
6254 	int ret = 0;
6255 
6256 	ret = pm_runtime_resume_and_get(priv->device);
6257 	if (ret < 0)
6258 		return ret;
6259 
6260 	ret = eth_mac_addr(ndev, addr);
6261 	if (ret)
6262 		goto set_mac_error;
6263 
6264 	phylink_rx_clk_stop_block(priv->phylink);
6265 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6266 	phylink_rx_clk_stop_unblock(priv->phylink);
6267 
6268 set_mac_error:
6269 	pm_runtime_put(priv->device);
6270 
6271 	return ret;
6272 }
6273 
6274 #ifdef CONFIG_DEBUG_FS
6275 static struct dentry *stmmac_fs_dir;
6276 
6277 static void sysfs_display_ring(void *head, int size, int extend_desc,
6278 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6279 {
6280 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6281 	struct dma_desc *p = (struct dma_desc *)head;
6282 	unsigned int desc_size;
6283 	dma_addr_t dma_addr;
6284 	int i;
6285 
6286 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6287 	for (i = 0; i < size; i++) {
6288 		dma_addr = dma_phy_addr + i * desc_size;
6289 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6290 				i, &dma_addr,
6291 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6292 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6293 		if (extend_desc)
6294 			p = &(++ep)->basic;
6295 		else
6296 			p++;
6297 	}
6298 }
6299 
6300 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6301 {
6302 	struct net_device *dev = seq->private;
6303 	struct stmmac_priv *priv = netdev_priv(dev);
6304 	u32 rx_count = priv->plat->rx_queues_to_use;
6305 	u32 tx_count = priv->plat->tx_queues_to_use;
6306 	u32 queue;
6307 
6308 	if ((dev->flags & IFF_UP) == 0)
6309 		return 0;
6310 
6311 	for (queue = 0; queue < rx_count; queue++) {
6312 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6313 
6314 		seq_printf(seq, "RX Queue %d:\n", queue);
6315 
6316 		if (priv->extend_desc) {
6317 			seq_printf(seq, "Extended descriptor ring:\n");
6318 			sysfs_display_ring((void *)rx_q->dma_erx,
6319 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6320 		} else {
6321 			seq_printf(seq, "Descriptor ring:\n");
6322 			sysfs_display_ring((void *)rx_q->dma_rx,
6323 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6324 		}
6325 	}
6326 
6327 	for (queue = 0; queue < tx_count; queue++) {
6328 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6329 
6330 		seq_printf(seq, "TX Queue %d:\n", queue);
6331 
6332 		if (priv->extend_desc) {
6333 			seq_printf(seq, "Extended descriptor ring:\n");
6334 			sysfs_display_ring((void *)tx_q->dma_etx,
6335 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6336 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6337 			seq_printf(seq, "Descriptor ring:\n");
6338 			sysfs_display_ring((void *)tx_q->dma_tx,
6339 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6340 		}
6341 	}
6342 
6343 	return 0;
6344 }
6345 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6346 
6347 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6348 {
6349 	static const char * const dwxgmac_timestamp_source[] = {
6350 		"None",
6351 		"Internal",
6352 		"External",
6353 		"Both",
6354 	};
6355 	static const char * const dwxgmac_safety_feature_desc[] = {
6356 		"No",
6357 		"All Safety Features with ECC and Parity",
6358 		"All Safety Features without ECC or Parity",
6359 		"All Safety Features with Parity Only",
6360 		"ECC Only",
6361 		"UNDEFINED",
6362 		"UNDEFINED",
6363 		"UNDEFINED",
6364 	};
6365 	struct net_device *dev = seq->private;
6366 	struct stmmac_priv *priv = netdev_priv(dev);
6367 
6368 	if (!priv->hw_cap_support) {
6369 		seq_printf(seq, "DMA HW features not supported\n");
6370 		return 0;
6371 	}
6372 
6373 	seq_printf(seq, "==============================\n");
6374 	seq_printf(seq, "\tDMA HW features\n");
6375 	seq_printf(seq, "==============================\n");
6376 
6377 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6378 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6379 	seq_printf(seq, "\t1000 Mbps: %s\n",
6380 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6381 	seq_printf(seq, "\tHalf duplex: %s\n",
6382 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6383 	if (priv->plat->has_xgmac) {
6384 		seq_printf(seq,
6385 			   "\tNumber of Additional MAC address registers: %d\n",
6386 			   priv->dma_cap.multi_addr);
6387 	} else {
6388 		seq_printf(seq, "\tHash Filter: %s\n",
6389 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6390 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6391 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6392 	}
6393 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6394 		   (priv->dma_cap.pcs) ? "Y" : "N");
6395 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6396 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6397 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6398 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6399 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6400 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6401 	seq_printf(seq, "\tRMON module: %s\n",
6402 		   (priv->dma_cap.rmon) ? "Y" : "N");
6403 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6404 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6405 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6406 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6407 	if (priv->plat->has_xgmac)
6408 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6409 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6410 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6411 		   (priv->dma_cap.eee) ? "Y" : "N");
6412 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6413 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6414 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6415 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6416 	    priv->plat->has_xgmac) {
6417 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6418 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6419 	} else {
6420 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6421 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6422 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6423 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6424 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6425 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6426 	}
6427 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6428 		   priv->dma_cap.number_rx_channel);
6429 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6430 		   priv->dma_cap.number_tx_channel);
6431 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6432 		   priv->dma_cap.number_rx_queues);
6433 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6434 		   priv->dma_cap.number_tx_queues);
6435 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6436 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6437 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6438 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6439 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6440 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6441 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6442 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6443 		   priv->dma_cap.pps_out_num);
6444 	seq_printf(seq, "\tSafety Features: %s\n",
6445 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6446 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6447 		   priv->dma_cap.frpsel ? "Y" : "N");
6448 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6449 		   priv->dma_cap.host_dma_width);
6450 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6451 		   priv->dma_cap.rssen ? "Y" : "N");
6452 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6453 		   priv->dma_cap.vlhash ? "Y" : "N");
6454 	seq_printf(seq, "\tSplit Header: %s\n",
6455 		   priv->dma_cap.sphen ? "Y" : "N");
6456 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6457 		   priv->dma_cap.vlins ? "Y" : "N");
6458 	seq_printf(seq, "\tDouble VLAN: %s\n",
6459 		   priv->dma_cap.dvlan ? "Y" : "N");
6460 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6461 		   priv->dma_cap.l3l4fnum);
6462 	seq_printf(seq, "\tARP Offloading: %s\n",
6463 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6464 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6465 		   priv->dma_cap.estsel ? "Y" : "N");
6466 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6467 		   priv->dma_cap.fpesel ? "Y" : "N");
6468 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6469 		   priv->dma_cap.tbssel ? "Y" : "N");
6470 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6471 		   priv->dma_cap.tbs_ch_num);
6472 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6473 		   priv->dma_cap.sgfsel ? "Y" : "N");
6474 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6475 		   BIT(priv->dma_cap.ttsfd) >> 1);
6476 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6477 		   priv->dma_cap.numtc);
6478 	seq_printf(seq, "\tDCB Feature: %s\n",
6479 		   priv->dma_cap.dcben ? "Y" : "N");
6480 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6481 		   priv->dma_cap.advthword ? "Y" : "N");
6482 	seq_printf(seq, "\tPTP Offload: %s\n",
6483 		   priv->dma_cap.ptoen ? "Y" : "N");
6484 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6485 		   priv->dma_cap.osten ? "Y" : "N");
6486 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6487 		   priv->dma_cap.pfcen ? "Y" : "N");
6488 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6489 		   BIT(priv->dma_cap.frpes) << 6);
6490 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6491 		   BIT(priv->dma_cap.frpbs) << 6);
6492 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6493 		   priv->dma_cap.frppipe_num);
6494 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6495 		   priv->dma_cap.nrvf_num ?
6496 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6497 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6498 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6499 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6500 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6501 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6502 		   priv->dma_cap.cbtisel ? "Y" : "N");
6503 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6504 		   priv->dma_cap.aux_snapshot_n);
6505 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6506 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6507 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6508 		   priv->dma_cap.edma ? "Y" : "N");
6509 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6510 		   priv->dma_cap.ediffc ? "Y" : "N");
6511 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6512 		   priv->dma_cap.vxn ? "Y" : "N");
6513 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6514 		   priv->dma_cap.dbgmem ? "Y" : "N");
6515 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6516 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6517 	return 0;
6518 }
6519 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6520 
6521 /* Use network device events to rename debugfs file entries.
6522  */
6523 static int stmmac_device_event(struct notifier_block *unused,
6524 			       unsigned long event, void *ptr)
6525 {
6526 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6527 	struct stmmac_priv *priv = netdev_priv(dev);
6528 
6529 	if (dev->netdev_ops != &stmmac_netdev_ops)
6530 		goto done;
6531 
6532 	switch (event) {
6533 	case NETDEV_CHANGENAME:
6534 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6535 		break;
6536 	}
6537 done:
6538 	return NOTIFY_DONE;
6539 }
6540 
6541 static struct notifier_block stmmac_notifier = {
6542 	.notifier_call = stmmac_device_event,
6543 };
6544 
6545 static void stmmac_init_fs(struct net_device *dev)
6546 {
6547 	struct stmmac_priv *priv = netdev_priv(dev);
6548 
6549 	rtnl_lock();
6550 
6551 	/* Create per netdev entries */
6552 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6553 
6554 	/* Entry to report DMA RX/TX rings */
6555 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6556 			    &stmmac_rings_status_fops);
6557 
6558 	/* Entry to report the DMA HW features */
6559 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6560 			    &stmmac_dma_cap_fops);
6561 
6562 	rtnl_unlock();
6563 }
6564 
6565 static void stmmac_exit_fs(struct net_device *dev)
6566 {
6567 	struct stmmac_priv *priv = netdev_priv(dev);
6568 
6569 	debugfs_remove_recursive(priv->dbgfs_dir);
6570 }
6571 #endif /* CONFIG_DEBUG_FS */
6572 
6573 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6574 {
6575 	unsigned char *data = (unsigned char *)&vid_le;
6576 	unsigned char data_byte = 0;
6577 	u32 crc = ~0x0;
6578 	u32 temp = 0;
6579 	int i, bits;
6580 
6581 	bits = get_bitmask_order(VLAN_VID_MASK);
6582 	for (i = 0; i < bits; i++) {
6583 		if ((i % 8) == 0)
6584 			data_byte = data[i / 8];
6585 
6586 		temp = ((crc & 1) ^ data_byte) & 1;
6587 		crc >>= 1;
6588 		data_byte >>= 1;
6589 
6590 		if (temp)
6591 			crc ^= 0xedb88320;
6592 	}
6593 
6594 	return crc;
6595 }
6596 
6597 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6598 {
6599 	u32 crc, hash = 0;
6600 	u16 pmatch = 0;
6601 	int count = 0;
6602 	u16 vid = 0;
6603 
6604 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6605 		__le16 vid_le = cpu_to_le16(vid);
6606 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6607 		hash |= (1 << crc);
6608 		count++;
6609 	}
6610 
6611 	if (!priv->dma_cap.vlhash) {
6612 		if (count > 2) /* VID = 0 always passes filter */
6613 			return -EOPNOTSUPP;
6614 
6615 		pmatch = vid;
6616 		hash = 0;
6617 	}
6618 
6619 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6620 }
6621 
6622 /* FIXME: This may need RXC to be running, but it may be called with BH
6623  * disabled, which means we can't call phylink_rx_clk_stop*().
6624  */
6625 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6626 {
6627 	struct stmmac_priv *priv = netdev_priv(ndev);
6628 	bool is_double = false;
6629 	int ret;
6630 
6631 	ret = pm_runtime_resume_and_get(priv->device);
6632 	if (ret < 0)
6633 		return ret;
6634 
6635 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6636 		is_double = true;
6637 
6638 	set_bit(vid, priv->active_vlans);
6639 	ret = stmmac_vlan_update(priv, is_double);
6640 	if (ret) {
6641 		clear_bit(vid, priv->active_vlans);
6642 		goto err_pm_put;
6643 	}
6644 
6645 	if (priv->hw->num_vlan) {
6646 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6647 		if (ret)
6648 			goto err_pm_put;
6649 	}
6650 err_pm_put:
6651 	pm_runtime_put(priv->device);
6652 
6653 	return ret;
6654 }
6655 
6656 /* FIXME: This may need RXC to be running, but it may be called with BH
6657  * disabled, which means we can't call phylink_rx_clk_stop*().
6658  */
6659 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6660 {
6661 	struct stmmac_priv *priv = netdev_priv(ndev);
6662 	bool is_double = false;
6663 	int ret;
6664 
6665 	ret = pm_runtime_resume_and_get(priv->device);
6666 	if (ret < 0)
6667 		return ret;
6668 
6669 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6670 		is_double = true;
6671 
6672 	clear_bit(vid, priv->active_vlans);
6673 
6674 	if (priv->hw->num_vlan) {
6675 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6676 		if (ret)
6677 			goto del_vlan_error;
6678 	}
6679 
6680 	ret = stmmac_vlan_update(priv, is_double);
6681 
6682 del_vlan_error:
6683 	pm_runtime_put(priv->device);
6684 
6685 	return ret;
6686 }
6687 
6688 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6689 {
6690 	struct stmmac_priv *priv = netdev_priv(dev);
6691 
6692 	switch (bpf->command) {
6693 	case XDP_SETUP_PROG:
6694 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6695 	case XDP_SETUP_XSK_POOL:
6696 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6697 					     bpf->xsk.queue_id);
6698 	default:
6699 		return -EOPNOTSUPP;
6700 	}
6701 }
6702 
6703 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6704 			   struct xdp_frame **frames, u32 flags)
6705 {
6706 	struct stmmac_priv *priv = netdev_priv(dev);
6707 	int cpu = smp_processor_id();
6708 	struct netdev_queue *nq;
6709 	int i, nxmit = 0;
6710 	int queue;
6711 
6712 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6713 		return -ENETDOWN;
6714 
6715 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6716 		return -EINVAL;
6717 
6718 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6719 	nq = netdev_get_tx_queue(priv->dev, queue);
6720 
6721 	__netif_tx_lock(nq, cpu);
6722 	/* Avoids TX time-out as we are sharing with slow path */
6723 	txq_trans_cond_update(nq);
6724 
6725 	for (i = 0; i < num_frames; i++) {
6726 		int res;
6727 
6728 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6729 		if (res == STMMAC_XDP_CONSUMED)
6730 			break;
6731 
6732 		nxmit++;
6733 	}
6734 
6735 	if (flags & XDP_XMIT_FLUSH) {
6736 		stmmac_flush_tx_descriptors(priv, queue);
6737 		stmmac_tx_timer_arm(priv, queue);
6738 	}
6739 
6740 	__netif_tx_unlock(nq);
6741 
6742 	return nxmit;
6743 }
6744 
6745 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6746 {
6747 	struct stmmac_channel *ch = &priv->channel[queue];
6748 	unsigned long flags;
6749 
6750 	spin_lock_irqsave(&ch->lock, flags);
6751 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6752 	spin_unlock_irqrestore(&ch->lock, flags);
6753 
6754 	stmmac_stop_rx_dma(priv, queue);
6755 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6756 }
6757 
6758 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6759 {
6760 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6761 	struct stmmac_channel *ch = &priv->channel[queue];
6762 	unsigned long flags;
6763 	u32 buf_size;
6764 	int ret;
6765 
6766 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6767 	if (ret) {
6768 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6769 		return;
6770 	}
6771 
6772 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6773 	if (ret) {
6774 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6775 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6776 		return;
6777 	}
6778 
6779 	stmmac_reset_rx_queue(priv, queue);
6780 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6781 
6782 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6783 			    rx_q->dma_rx_phy, rx_q->queue_index);
6784 
6785 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6786 			     sizeof(struct dma_desc));
6787 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6788 			       rx_q->rx_tail_addr, rx_q->queue_index);
6789 
6790 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6791 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6792 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6793 				      buf_size,
6794 				      rx_q->queue_index);
6795 	} else {
6796 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6797 				      priv->dma_conf.dma_buf_sz,
6798 				      rx_q->queue_index);
6799 	}
6800 
6801 	stmmac_start_rx_dma(priv, queue);
6802 
6803 	spin_lock_irqsave(&ch->lock, flags);
6804 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6805 	spin_unlock_irqrestore(&ch->lock, flags);
6806 }
6807 
6808 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6809 {
6810 	struct stmmac_channel *ch = &priv->channel[queue];
6811 	unsigned long flags;
6812 
6813 	spin_lock_irqsave(&ch->lock, flags);
6814 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6815 	spin_unlock_irqrestore(&ch->lock, flags);
6816 
6817 	stmmac_stop_tx_dma(priv, queue);
6818 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6819 }
6820 
6821 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6822 {
6823 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6824 	struct stmmac_channel *ch = &priv->channel[queue];
6825 	unsigned long flags;
6826 	int ret;
6827 
6828 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6829 	if (ret) {
6830 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6831 		return;
6832 	}
6833 
6834 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6835 	if (ret) {
6836 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6837 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6838 		return;
6839 	}
6840 
6841 	stmmac_reset_tx_queue(priv, queue);
6842 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6843 
6844 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6845 			    tx_q->dma_tx_phy, tx_q->queue_index);
6846 
6847 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6848 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6849 
6850 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6851 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6852 			       tx_q->tx_tail_addr, tx_q->queue_index);
6853 
6854 	stmmac_start_tx_dma(priv, queue);
6855 
6856 	spin_lock_irqsave(&ch->lock, flags);
6857 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6858 	spin_unlock_irqrestore(&ch->lock, flags);
6859 }
6860 
6861 void stmmac_xdp_release(struct net_device *dev)
6862 {
6863 	struct stmmac_priv *priv = netdev_priv(dev);
6864 	u32 chan;
6865 
6866 	/* Ensure tx function is not running */
6867 	netif_tx_disable(dev);
6868 
6869 	/* Disable NAPI process */
6870 	stmmac_disable_all_queues(priv);
6871 
6872 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6873 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6874 
6875 	/* Free the IRQ lines */
6876 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6877 
6878 	/* Stop TX/RX DMA channels */
6879 	stmmac_stop_all_dma(priv);
6880 
6881 	/* Release and free the Rx/Tx resources */
6882 	free_dma_desc_resources(priv, &priv->dma_conf);
6883 
6884 	/* Disable the MAC Rx/Tx */
6885 	stmmac_mac_set(priv, priv->ioaddr, false);
6886 
6887 	/* set trans_start so we don't get spurious
6888 	 * watchdogs during reset
6889 	 */
6890 	netif_trans_update(dev);
6891 	netif_carrier_off(dev);
6892 }
6893 
6894 int stmmac_xdp_open(struct net_device *dev)
6895 {
6896 	struct stmmac_priv *priv = netdev_priv(dev);
6897 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6898 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6899 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6900 	struct stmmac_rx_queue *rx_q;
6901 	struct stmmac_tx_queue *tx_q;
6902 	u32 buf_size;
6903 	bool sph_en;
6904 	u32 chan;
6905 	int ret;
6906 
6907 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6908 	if (ret < 0) {
6909 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6910 			   __func__);
6911 		goto dma_desc_error;
6912 	}
6913 
6914 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6915 	if (ret < 0) {
6916 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6917 			   __func__);
6918 		goto init_error;
6919 	}
6920 
6921 	stmmac_reset_queues_param(priv);
6922 
6923 	/* DMA CSR Channel configuration */
6924 	for (chan = 0; chan < dma_csr_ch; chan++) {
6925 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6926 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6927 	}
6928 
6929 	/* Adjust Split header */
6930 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6931 
6932 	/* DMA RX Channel Configuration */
6933 	for (chan = 0; chan < rx_cnt; chan++) {
6934 		rx_q = &priv->dma_conf.rx_queue[chan];
6935 
6936 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6937 				    rx_q->dma_rx_phy, chan);
6938 
6939 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6940 				     (rx_q->buf_alloc_num *
6941 				      sizeof(struct dma_desc));
6942 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6943 				       rx_q->rx_tail_addr, chan);
6944 
6945 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6946 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6947 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6948 					      buf_size,
6949 					      rx_q->queue_index);
6950 		} else {
6951 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6952 					      priv->dma_conf.dma_buf_sz,
6953 					      rx_q->queue_index);
6954 		}
6955 
6956 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6957 	}
6958 
6959 	/* DMA TX Channel Configuration */
6960 	for (chan = 0; chan < tx_cnt; chan++) {
6961 		tx_q = &priv->dma_conf.tx_queue[chan];
6962 
6963 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6964 				    tx_q->dma_tx_phy, chan);
6965 
6966 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6967 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6968 				       tx_q->tx_tail_addr, chan);
6969 
6970 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6971 	}
6972 
6973 	/* Enable the MAC Rx/Tx */
6974 	stmmac_mac_set(priv, priv->ioaddr, true);
6975 
6976 	/* Start Rx & Tx DMA Channels */
6977 	stmmac_start_all_dma(priv);
6978 
6979 	ret = stmmac_request_irq(dev);
6980 	if (ret)
6981 		goto irq_error;
6982 
6983 	/* Enable NAPI process*/
6984 	stmmac_enable_all_queues(priv);
6985 	netif_carrier_on(dev);
6986 	netif_tx_start_all_queues(dev);
6987 	stmmac_enable_all_dma_irq(priv);
6988 
6989 	return 0;
6990 
6991 irq_error:
6992 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6993 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6994 
6995 	stmmac_hw_teardown(dev);
6996 init_error:
6997 	free_dma_desc_resources(priv, &priv->dma_conf);
6998 dma_desc_error:
6999 	return ret;
7000 }
7001 
7002 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7003 {
7004 	struct stmmac_priv *priv = netdev_priv(dev);
7005 	struct stmmac_rx_queue *rx_q;
7006 	struct stmmac_tx_queue *tx_q;
7007 	struct stmmac_channel *ch;
7008 
7009 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7010 	    !netif_carrier_ok(priv->dev))
7011 		return -ENETDOWN;
7012 
7013 	if (!stmmac_xdp_is_enabled(priv))
7014 		return -EINVAL;
7015 
7016 	if (queue >= priv->plat->rx_queues_to_use ||
7017 	    queue >= priv->plat->tx_queues_to_use)
7018 		return -EINVAL;
7019 
7020 	rx_q = &priv->dma_conf.rx_queue[queue];
7021 	tx_q = &priv->dma_conf.tx_queue[queue];
7022 	ch = &priv->channel[queue];
7023 
7024 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7025 		return -EINVAL;
7026 
7027 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7028 		/* EQoS does not have per-DMA channel SW interrupt,
7029 		 * so we schedule RX Napi straight-away.
7030 		 */
7031 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7032 			__napi_schedule(&ch->rxtx_napi);
7033 	}
7034 
7035 	return 0;
7036 }
7037 
7038 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7039 {
7040 	struct stmmac_priv *priv = netdev_priv(dev);
7041 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7042 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7043 	unsigned int start;
7044 	int q;
7045 
7046 	for (q = 0; q < tx_cnt; q++) {
7047 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7048 		u64 tx_packets;
7049 		u64 tx_bytes;
7050 
7051 		do {
7052 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7053 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7054 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7055 		do {
7056 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7057 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7058 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7059 
7060 		stats->tx_packets += tx_packets;
7061 		stats->tx_bytes += tx_bytes;
7062 	}
7063 
7064 	for (q = 0; q < rx_cnt; q++) {
7065 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7066 		u64 rx_packets;
7067 		u64 rx_bytes;
7068 
7069 		do {
7070 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7071 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7072 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7073 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7074 
7075 		stats->rx_packets += rx_packets;
7076 		stats->rx_bytes += rx_bytes;
7077 	}
7078 
7079 	stats->rx_dropped = priv->xstats.rx_dropped;
7080 	stats->rx_errors = priv->xstats.rx_errors;
7081 	stats->tx_dropped = priv->xstats.tx_dropped;
7082 	stats->tx_errors = priv->xstats.tx_errors;
7083 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7084 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7085 	stats->rx_length_errors = priv->xstats.rx_length;
7086 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7087 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7088 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7089 }
7090 
7091 static const struct net_device_ops stmmac_netdev_ops = {
7092 	.ndo_open = stmmac_open,
7093 	.ndo_start_xmit = stmmac_xmit,
7094 	.ndo_stop = stmmac_release,
7095 	.ndo_change_mtu = stmmac_change_mtu,
7096 	.ndo_fix_features = stmmac_fix_features,
7097 	.ndo_set_features = stmmac_set_features,
7098 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7099 	.ndo_tx_timeout = stmmac_tx_timeout,
7100 	.ndo_eth_ioctl = stmmac_ioctl,
7101 	.ndo_get_stats64 = stmmac_get_stats64,
7102 	.ndo_setup_tc = stmmac_setup_tc,
7103 	.ndo_select_queue = stmmac_select_queue,
7104 	.ndo_set_mac_address = stmmac_set_mac_address,
7105 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7106 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7107 	.ndo_bpf = stmmac_bpf,
7108 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7109 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7110 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7111 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7112 };
7113 
7114 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7115 {
7116 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7117 		return;
7118 	if (test_bit(STMMAC_DOWN, &priv->state))
7119 		return;
7120 
7121 	netdev_err(priv->dev, "Reset adapter.\n");
7122 
7123 	rtnl_lock();
7124 	netif_trans_update(priv->dev);
7125 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7126 		usleep_range(1000, 2000);
7127 
7128 	set_bit(STMMAC_DOWN, &priv->state);
7129 	dev_close(priv->dev);
7130 	dev_open(priv->dev, NULL);
7131 	clear_bit(STMMAC_DOWN, &priv->state);
7132 	clear_bit(STMMAC_RESETING, &priv->state);
7133 	rtnl_unlock();
7134 }
7135 
7136 static void stmmac_service_task(struct work_struct *work)
7137 {
7138 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7139 			service_task);
7140 
7141 	stmmac_reset_subtask(priv);
7142 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7143 }
7144 
7145 /**
7146  *  stmmac_hw_init - Init the MAC device
7147  *  @priv: driver private structure
7148  *  Description: this function is to configure the MAC device according to
7149  *  some platform parameters or the HW capability register. It prepares the
7150  *  driver to use either ring or chain modes and to setup either enhanced or
7151  *  normal descriptors.
7152  */
7153 static int stmmac_hw_init(struct stmmac_priv *priv)
7154 {
7155 	int ret;
7156 
7157 	/* dwmac-sun8i only work in chain mode */
7158 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7159 		chain_mode = 1;
7160 	priv->chain_mode = chain_mode;
7161 
7162 	/* Initialize HW Interface */
7163 	ret = stmmac_hwif_init(priv);
7164 	if (ret)
7165 		return ret;
7166 
7167 	/* Get the HW capability (new GMAC newer than 3.50a) */
7168 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7169 	if (priv->hw_cap_support) {
7170 		dev_info(priv->device, "DMA HW capability register supported\n");
7171 
7172 		/* We can override some gmac/dma configuration fields: e.g.
7173 		 * enh_desc, tx_coe (e.g. that are passed through the
7174 		 * platform) with the values from the HW capability
7175 		 * register (if supported).
7176 		 */
7177 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7178 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7179 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7180 		if (priv->dma_cap.hash_tb_sz) {
7181 			priv->hw->multicast_filter_bins =
7182 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7183 			priv->hw->mcast_bits_log2 =
7184 					ilog2(priv->hw->multicast_filter_bins);
7185 		}
7186 
7187 		/* TXCOE doesn't work in thresh DMA mode */
7188 		if (priv->plat->force_thresh_dma_mode)
7189 			priv->plat->tx_coe = 0;
7190 		else
7191 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7192 
7193 		/* In case of GMAC4 rx_coe is from HW cap register. */
7194 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7195 
7196 		if (priv->dma_cap.rx_coe_type2)
7197 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7198 		else if (priv->dma_cap.rx_coe_type1)
7199 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7200 
7201 	} else {
7202 		dev_info(priv->device, "No HW DMA feature register supported\n");
7203 	}
7204 
7205 	if (priv->plat->rx_coe) {
7206 		priv->hw->rx_csum = priv->plat->rx_coe;
7207 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7208 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7209 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7210 	}
7211 	if (priv->plat->tx_coe)
7212 		dev_info(priv->device, "TX Checksum insertion supported\n");
7213 
7214 	if (priv->plat->pmt) {
7215 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7216 		device_set_wakeup_capable(priv->device, 1);
7217 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7218 	}
7219 
7220 	if (priv->dma_cap.tsoen)
7221 		dev_info(priv->device, "TSO supported\n");
7222 
7223 	if (priv->dma_cap.number_rx_queues &&
7224 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7225 		dev_warn(priv->device,
7226 			 "Number of Rx queues (%u) exceeds dma capability\n",
7227 			 priv->plat->rx_queues_to_use);
7228 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7229 	}
7230 	if (priv->dma_cap.number_tx_queues &&
7231 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7232 		dev_warn(priv->device,
7233 			 "Number of Tx queues (%u) exceeds dma capability\n",
7234 			 priv->plat->tx_queues_to_use);
7235 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7236 	}
7237 
7238 	if (priv->dma_cap.rx_fifo_size &&
7239 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7240 		dev_warn(priv->device,
7241 			 "Rx FIFO size (%u) exceeds dma capability\n",
7242 			 priv->plat->rx_fifo_size);
7243 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7244 	}
7245 	if (priv->dma_cap.tx_fifo_size &&
7246 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7247 		dev_warn(priv->device,
7248 			 "Tx FIFO size (%u) exceeds dma capability\n",
7249 			 priv->plat->tx_fifo_size);
7250 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7251 	}
7252 
7253 	priv->hw->vlan_fail_q_en =
7254 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7255 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7256 
7257 	/* Run HW quirks, if any */
7258 	if (priv->hwif_quirks) {
7259 		ret = priv->hwif_quirks(priv);
7260 		if (ret)
7261 			return ret;
7262 	}
7263 
7264 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7265 	 * In some case, for example on bugged HW this feature
7266 	 * has to be disable and this can be done by passing the
7267 	 * riwt_off field from the platform.
7268 	 */
7269 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7270 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7271 		priv->use_riwt = 1;
7272 		dev_info(priv->device,
7273 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7274 	}
7275 
7276 	return 0;
7277 }
7278 
7279 static void stmmac_napi_add(struct net_device *dev)
7280 {
7281 	struct stmmac_priv *priv = netdev_priv(dev);
7282 	u32 queue, maxq;
7283 
7284 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7285 
7286 	for (queue = 0; queue < maxq; queue++) {
7287 		struct stmmac_channel *ch = &priv->channel[queue];
7288 
7289 		ch->priv_data = priv;
7290 		ch->index = queue;
7291 		spin_lock_init(&ch->lock);
7292 
7293 		if (queue < priv->plat->rx_queues_to_use) {
7294 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7295 		}
7296 		if (queue < priv->plat->tx_queues_to_use) {
7297 			netif_napi_add_tx(dev, &ch->tx_napi,
7298 					  stmmac_napi_poll_tx);
7299 		}
7300 		if (queue < priv->plat->rx_queues_to_use &&
7301 		    queue < priv->plat->tx_queues_to_use) {
7302 			netif_napi_add(dev, &ch->rxtx_napi,
7303 				       stmmac_napi_poll_rxtx);
7304 		}
7305 	}
7306 }
7307 
7308 static void stmmac_napi_del(struct net_device *dev)
7309 {
7310 	struct stmmac_priv *priv = netdev_priv(dev);
7311 	u32 queue, maxq;
7312 
7313 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7314 
7315 	for (queue = 0; queue < maxq; queue++) {
7316 		struct stmmac_channel *ch = &priv->channel[queue];
7317 
7318 		if (queue < priv->plat->rx_queues_to_use)
7319 			netif_napi_del(&ch->rx_napi);
7320 		if (queue < priv->plat->tx_queues_to_use)
7321 			netif_napi_del(&ch->tx_napi);
7322 		if (queue < priv->plat->rx_queues_to_use &&
7323 		    queue < priv->plat->tx_queues_to_use) {
7324 			netif_napi_del(&ch->rxtx_napi);
7325 		}
7326 	}
7327 }
7328 
7329 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7330 {
7331 	struct stmmac_priv *priv = netdev_priv(dev);
7332 	int ret = 0, i;
7333 
7334 	if (netif_running(dev))
7335 		stmmac_release(dev);
7336 
7337 	stmmac_napi_del(dev);
7338 
7339 	priv->plat->rx_queues_to_use = rx_cnt;
7340 	priv->plat->tx_queues_to_use = tx_cnt;
7341 	if (!netif_is_rxfh_configured(dev))
7342 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7343 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7344 									rx_cnt);
7345 
7346 	stmmac_napi_add(dev);
7347 
7348 	if (netif_running(dev))
7349 		ret = stmmac_open(dev);
7350 
7351 	return ret;
7352 }
7353 
7354 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7355 {
7356 	struct stmmac_priv *priv = netdev_priv(dev);
7357 	int ret = 0;
7358 
7359 	if (netif_running(dev))
7360 		stmmac_release(dev);
7361 
7362 	priv->dma_conf.dma_rx_size = rx_size;
7363 	priv->dma_conf.dma_tx_size = tx_size;
7364 
7365 	if (netif_running(dev))
7366 		ret = stmmac_open(dev);
7367 
7368 	return ret;
7369 }
7370 
7371 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7372 {
7373 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7374 	struct dma_desc *desc_contains_ts = ctx->desc;
7375 	struct stmmac_priv *priv = ctx->priv;
7376 	struct dma_desc *ndesc = ctx->ndesc;
7377 	struct dma_desc *desc = ctx->desc;
7378 	u64 ns = 0;
7379 
7380 	if (!priv->hwts_rx_en)
7381 		return -ENODATA;
7382 
7383 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7384 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7385 		desc_contains_ts = ndesc;
7386 
7387 	/* Check if timestamp is available */
7388 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7389 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7390 		ns -= priv->plat->cdc_error_adj;
7391 		*timestamp = ns_to_ktime(ns);
7392 		return 0;
7393 	}
7394 
7395 	return -ENODATA;
7396 }
7397 
7398 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7399 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7400 };
7401 
7402 /**
7403  * stmmac_dvr_probe
7404  * @device: device pointer
7405  * @plat_dat: platform data pointer
7406  * @res: stmmac resource pointer
7407  * Description: this is the main probe function used to
7408  * call the alloc_etherdev, allocate the priv structure.
7409  * Return:
7410  * returns 0 on success, otherwise errno.
7411  */
7412 int stmmac_dvr_probe(struct device *device,
7413 		     struct plat_stmmacenet_data *plat_dat,
7414 		     struct stmmac_resources *res)
7415 {
7416 	struct net_device *ndev = NULL;
7417 	struct stmmac_priv *priv;
7418 	u32 rxq;
7419 	int i, ret = 0;
7420 
7421 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7422 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7423 	if (!ndev)
7424 		return -ENOMEM;
7425 
7426 	SET_NETDEV_DEV(ndev, device);
7427 
7428 	priv = netdev_priv(ndev);
7429 	priv->device = device;
7430 	priv->dev = ndev;
7431 
7432 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7433 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7434 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7435 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7436 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7437 	}
7438 
7439 	priv->xstats.pcpu_stats =
7440 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7441 	if (!priv->xstats.pcpu_stats)
7442 		return -ENOMEM;
7443 
7444 	stmmac_set_ethtool_ops(ndev);
7445 	priv->pause_time = pause;
7446 	priv->plat = plat_dat;
7447 	priv->ioaddr = res->addr;
7448 	priv->dev->base_addr = (unsigned long)res->addr;
7449 	priv->plat->dma_cfg->multi_msi_en =
7450 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7451 
7452 	priv->dev->irq = res->irq;
7453 	priv->wol_irq = res->wol_irq;
7454 	priv->lpi_irq = res->lpi_irq;
7455 	priv->sfty_irq = res->sfty_irq;
7456 	priv->sfty_ce_irq = res->sfty_ce_irq;
7457 	priv->sfty_ue_irq = res->sfty_ue_irq;
7458 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7459 		priv->rx_irq[i] = res->rx_irq[i];
7460 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7461 		priv->tx_irq[i] = res->tx_irq[i];
7462 
7463 	if (!is_zero_ether_addr(res->mac))
7464 		eth_hw_addr_set(priv->dev, res->mac);
7465 
7466 	dev_set_drvdata(device, priv->dev);
7467 
7468 	/* Verify driver arguments */
7469 	stmmac_verify_args();
7470 
7471 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7472 	if (!priv->af_xdp_zc_qps)
7473 		return -ENOMEM;
7474 
7475 	/* Allocate workqueue */
7476 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7477 	if (!priv->wq) {
7478 		dev_err(priv->device, "failed to create workqueue\n");
7479 		ret = -ENOMEM;
7480 		goto error_wq_init;
7481 	}
7482 
7483 	INIT_WORK(&priv->service_task, stmmac_service_task);
7484 
7485 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7486 
7487 	/* Override with kernel parameters if supplied XXX CRS XXX
7488 	 * this needs to have multiple instances
7489 	 */
7490 	if ((phyaddr >= 0) && (phyaddr <= 31))
7491 		priv->plat->phy_addr = phyaddr;
7492 
7493 	if (priv->plat->stmmac_rst) {
7494 		ret = reset_control_assert(priv->plat->stmmac_rst);
7495 		reset_control_deassert(priv->plat->stmmac_rst);
7496 		/* Some reset controllers have only reset callback instead of
7497 		 * assert + deassert callbacks pair.
7498 		 */
7499 		if (ret == -ENOTSUPP)
7500 			reset_control_reset(priv->plat->stmmac_rst);
7501 	}
7502 
7503 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7504 	if (ret == -ENOTSUPP)
7505 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7506 			ERR_PTR(ret));
7507 
7508 	/* Wait a bit for the reset to take effect */
7509 	udelay(10);
7510 
7511 	/* Init MAC and get the capabilities */
7512 	ret = stmmac_hw_init(priv);
7513 	if (ret)
7514 		goto error_hw_init;
7515 
7516 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7517 	 */
7518 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7519 		priv->plat->dma_cfg->dche = false;
7520 
7521 	stmmac_check_ether_addr(priv);
7522 
7523 	ndev->netdev_ops = &stmmac_netdev_ops;
7524 
7525 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7526 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7527 
7528 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7529 			    NETIF_F_RXCSUM;
7530 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7531 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7532 
7533 	ret = stmmac_tc_init(priv, priv);
7534 	if (!ret) {
7535 		ndev->hw_features |= NETIF_F_HW_TC;
7536 	}
7537 
7538 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7539 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7540 		if (priv->plat->has_gmac4)
7541 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7542 		priv->tso = true;
7543 		dev_info(priv->device, "TSO feature enabled\n");
7544 	}
7545 
7546 	if (priv->dma_cap.sphen &&
7547 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7548 		ndev->hw_features |= NETIF_F_GRO;
7549 		priv->sph_cap = true;
7550 		priv->sph = priv->sph_cap;
7551 		dev_info(priv->device, "SPH feature enabled\n");
7552 	}
7553 
7554 	/* Ideally our host DMA address width is the same as for the
7555 	 * device. However, it may differ and then we have to use our
7556 	 * host DMA width for allocation and the device DMA width for
7557 	 * register handling.
7558 	 */
7559 	if (priv->plat->host_dma_width)
7560 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7561 	else
7562 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7563 
7564 	if (priv->dma_cap.host_dma_width) {
7565 		ret = dma_set_mask_and_coherent(device,
7566 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7567 		if (!ret) {
7568 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7569 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7570 
7571 			/*
7572 			 * If more than 32 bits can be addressed, make sure to
7573 			 * enable enhanced addressing mode.
7574 			 */
7575 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7576 				priv->plat->dma_cfg->eame = true;
7577 		} else {
7578 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7579 			if (ret) {
7580 				dev_err(priv->device, "Failed to set DMA Mask\n");
7581 				goto error_hw_init;
7582 			}
7583 
7584 			priv->dma_cap.host_dma_width = 32;
7585 		}
7586 	}
7587 
7588 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7589 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7590 #ifdef STMMAC_VLAN_TAG_USED
7591 	/* Both mac100 and gmac support receive VLAN tag detection */
7592 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7593 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7594 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7595 		priv->hw->hw_vlan_en = true;
7596 	}
7597 	if (priv->dma_cap.vlhash) {
7598 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7599 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7600 	}
7601 	if (priv->dma_cap.vlins) {
7602 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7603 		if (priv->dma_cap.dvlan)
7604 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7605 	}
7606 #endif
7607 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7608 
7609 	priv->xstats.threshold = tc;
7610 
7611 	/* Initialize RSS */
7612 	rxq = priv->plat->rx_queues_to_use;
7613 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7614 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7615 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7616 
7617 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7618 		ndev->features |= NETIF_F_RXHASH;
7619 
7620 	ndev->vlan_features |= ndev->features;
7621 
7622 	/* MTU range: 46 - hw-specific max */
7623 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7624 	if (priv->plat->has_xgmac)
7625 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7626 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7627 		ndev->max_mtu = JUMBO_LEN;
7628 	else
7629 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7630 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7631 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7632 	 */
7633 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7634 	    (priv->plat->maxmtu >= ndev->min_mtu))
7635 		ndev->max_mtu = priv->plat->maxmtu;
7636 	else if (priv->plat->maxmtu < ndev->min_mtu)
7637 		dev_warn(priv->device,
7638 			 "%s: warning: maxmtu having invalid value (%d)\n",
7639 			 __func__, priv->plat->maxmtu);
7640 
7641 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7642 
7643 	/* Setup channels NAPI */
7644 	stmmac_napi_add(ndev);
7645 
7646 	mutex_init(&priv->lock);
7647 
7648 	stmmac_fpe_init(priv);
7649 
7650 	stmmac_check_pcs_mode(priv);
7651 
7652 	pm_runtime_get_noresume(device);
7653 	pm_runtime_set_active(device);
7654 	if (!pm_runtime_enabled(device))
7655 		pm_runtime_enable(device);
7656 
7657 	ret = stmmac_mdio_register(ndev);
7658 	if (ret < 0) {
7659 		dev_err_probe(priv->device, ret,
7660 			      "MDIO bus (id: %d) registration failed\n",
7661 			      priv->plat->bus_id);
7662 		goto error_mdio_register;
7663 	}
7664 
7665 	ret = stmmac_pcs_setup(ndev);
7666 	if (ret)
7667 		goto error_pcs_setup;
7668 
7669 	ret = stmmac_phy_setup(priv);
7670 	if (ret) {
7671 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7672 		goto error_phy_setup;
7673 	}
7674 
7675 	ret = register_netdev(ndev);
7676 	if (ret) {
7677 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7678 			__func__, ret);
7679 		goto error_netdev_register;
7680 	}
7681 
7682 #ifdef CONFIG_DEBUG_FS
7683 	stmmac_init_fs(ndev);
7684 #endif
7685 
7686 	if (priv->plat->dump_debug_regs)
7687 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7688 
7689 	/* Let pm_runtime_put() disable the clocks.
7690 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7691 	 */
7692 	pm_runtime_put(device);
7693 
7694 	return ret;
7695 
7696 error_netdev_register:
7697 	phylink_destroy(priv->phylink);
7698 error_phy_setup:
7699 	stmmac_pcs_clean(ndev);
7700 error_pcs_setup:
7701 	stmmac_mdio_unregister(ndev);
7702 error_mdio_register:
7703 	stmmac_napi_del(ndev);
7704 error_hw_init:
7705 	destroy_workqueue(priv->wq);
7706 error_wq_init:
7707 	bitmap_free(priv->af_xdp_zc_qps);
7708 
7709 	return ret;
7710 }
7711 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7712 
7713 /**
7714  * stmmac_dvr_remove
7715  * @dev: device pointer
7716  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7717  * changes the link status, releases the DMA descriptor rings.
7718  */
7719 void stmmac_dvr_remove(struct device *dev)
7720 {
7721 	struct net_device *ndev = dev_get_drvdata(dev);
7722 	struct stmmac_priv *priv = netdev_priv(ndev);
7723 
7724 	netdev_info(priv->dev, "%s: removing driver", __func__);
7725 
7726 	pm_runtime_get_sync(dev);
7727 
7728 	unregister_netdev(ndev);
7729 
7730 #ifdef CONFIG_DEBUG_FS
7731 	stmmac_exit_fs(ndev);
7732 #endif
7733 	phylink_destroy(priv->phylink);
7734 	if (priv->plat->stmmac_rst)
7735 		reset_control_assert(priv->plat->stmmac_rst);
7736 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7737 
7738 	stmmac_pcs_clean(ndev);
7739 	stmmac_mdio_unregister(ndev);
7740 
7741 	destroy_workqueue(priv->wq);
7742 	mutex_destroy(&priv->lock);
7743 	bitmap_free(priv->af_xdp_zc_qps);
7744 
7745 	pm_runtime_disable(dev);
7746 	pm_runtime_put_noidle(dev);
7747 }
7748 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7749 
7750 /**
7751  * stmmac_suspend - suspend callback
7752  * @dev: device pointer
7753  * Description: this is the function to suspend the device and it is called
7754  * by the platform driver to stop the network queue, release the resources,
7755  * program the PMT register (for WoL), clean and release driver resources.
7756  */
7757 int stmmac_suspend(struct device *dev)
7758 {
7759 	struct net_device *ndev = dev_get_drvdata(dev);
7760 	struct stmmac_priv *priv = netdev_priv(ndev);
7761 	u32 chan;
7762 
7763 	if (!ndev || !netif_running(ndev))
7764 		return 0;
7765 
7766 	mutex_lock(&priv->lock);
7767 
7768 	netif_device_detach(ndev);
7769 
7770 	stmmac_disable_all_queues(priv);
7771 
7772 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7773 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7774 
7775 	if (priv->eee_sw_timer_en) {
7776 		priv->tx_path_in_lpi_mode = false;
7777 		timer_delete_sync(&priv->eee_ctrl_timer);
7778 	}
7779 
7780 	/* Stop TX/RX DMA */
7781 	stmmac_stop_all_dma(priv);
7782 
7783 	if (priv->plat->serdes_powerdown)
7784 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7785 
7786 	/* Enable Power down mode by programming the PMT regs */
7787 	if (stmmac_wol_enabled_mac(priv)) {
7788 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7789 		priv->irq_wake = 1;
7790 	} else {
7791 		stmmac_mac_set(priv, priv->ioaddr, false);
7792 		pinctrl_pm_select_sleep_state(priv->device);
7793 	}
7794 
7795 	mutex_unlock(&priv->lock);
7796 
7797 	rtnl_lock();
7798 	if (stmmac_wol_enabled_phy(priv))
7799 		phylink_speed_down(priv->phylink, false);
7800 
7801 	phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
7802 	rtnl_unlock();
7803 
7804 	if (stmmac_fpe_supported(priv))
7805 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7806 
7807 	if (priv->plat->suspend)
7808 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
7809 
7810 	return 0;
7811 }
7812 EXPORT_SYMBOL_GPL(stmmac_suspend);
7813 
7814 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7815 {
7816 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7817 
7818 	rx_q->cur_rx = 0;
7819 	rx_q->dirty_rx = 0;
7820 }
7821 
7822 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7823 {
7824 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7825 
7826 	tx_q->cur_tx = 0;
7827 	tx_q->dirty_tx = 0;
7828 	tx_q->mss = 0;
7829 
7830 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7831 }
7832 
7833 /**
7834  * stmmac_reset_queues_param - reset queue parameters
7835  * @priv: device pointer
7836  */
7837 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7838 {
7839 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7840 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7841 	u32 queue;
7842 
7843 	for (queue = 0; queue < rx_cnt; queue++)
7844 		stmmac_reset_rx_queue(priv, queue);
7845 
7846 	for (queue = 0; queue < tx_cnt; queue++)
7847 		stmmac_reset_tx_queue(priv, queue);
7848 }
7849 
7850 /**
7851  * stmmac_resume - resume callback
7852  * @dev: device pointer
7853  * Description: when resume this function is invoked to setup the DMA and CORE
7854  * in a usable state.
7855  */
7856 int stmmac_resume(struct device *dev)
7857 {
7858 	struct net_device *ndev = dev_get_drvdata(dev);
7859 	struct stmmac_priv *priv = netdev_priv(ndev);
7860 	int ret;
7861 
7862 	if (priv->plat->resume) {
7863 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
7864 		if (ret)
7865 			return ret;
7866 	}
7867 
7868 	if (!netif_running(ndev))
7869 		return 0;
7870 
7871 	/* Power Down bit, into the PM register, is cleared
7872 	 * automatically as soon as a magic packet or a Wake-up frame
7873 	 * is received. Anyway, it's better to manually clear
7874 	 * this bit because it can generate problems while resuming
7875 	 * from another devices (e.g. serial console).
7876 	 */
7877 	if (stmmac_wol_enabled_mac(priv)) {
7878 		mutex_lock(&priv->lock);
7879 		stmmac_pmt(priv, priv->hw, 0);
7880 		mutex_unlock(&priv->lock);
7881 		priv->irq_wake = 0;
7882 	} else {
7883 		pinctrl_pm_select_default_state(priv->device);
7884 		/* reset the phy so that it's ready */
7885 		if (priv->mii)
7886 			stmmac_mdio_reset(priv->mii);
7887 	}
7888 
7889 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7890 	    priv->plat->serdes_powerup) {
7891 		ret = priv->plat->serdes_powerup(ndev,
7892 						 priv->plat->bsp_priv);
7893 
7894 		if (ret < 0)
7895 			return ret;
7896 	}
7897 
7898 	rtnl_lock();
7899 
7900 	/* Prepare the PHY to resume, ensuring that its clocks which are
7901 	 * necessary for the MAC DMA reset to complete are running
7902 	 */
7903 	phylink_prepare_resume(priv->phylink);
7904 
7905 	mutex_lock(&priv->lock);
7906 
7907 	stmmac_reset_queues_param(priv);
7908 
7909 	stmmac_free_tx_skbufs(priv);
7910 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7911 
7912 	ret = stmmac_hw_setup(ndev, false);
7913 	if (ret < 0) {
7914 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
7915 		mutex_unlock(&priv->lock);
7916 		rtnl_unlock();
7917 		return ret;
7918 	}
7919 
7920 	stmmac_init_coalesce(priv);
7921 	phylink_rx_clk_stop_block(priv->phylink);
7922 	stmmac_set_rx_mode(ndev);
7923 
7924 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7925 	phylink_rx_clk_stop_unblock(priv->phylink);
7926 
7927 	stmmac_enable_all_queues(priv);
7928 	stmmac_enable_all_dma_irq(priv);
7929 
7930 	mutex_unlock(&priv->lock);
7931 
7932 	/* phylink_resume() must be called after the hardware has been
7933 	 * initialised because it may bring the link up immediately in a
7934 	 * workqueue thread, which will race with initialisation.
7935 	 */
7936 	phylink_resume(priv->phylink);
7937 	if (stmmac_wol_enabled_phy(priv))
7938 		phylink_speed_up(priv->phylink);
7939 
7940 	rtnl_unlock();
7941 
7942 	netif_device_attach(ndev);
7943 
7944 	return 0;
7945 }
7946 EXPORT_SYMBOL_GPL(stmmac_resume);
7947 
7948 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
7949 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
7950 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
7951 
7952 #ifndef MODULE
7953 static int __init stmmac_cmdline_opt(char *str)
7954 {
7955 	char *opt;
7956 
7957 	if (!str || !*str)
7958 		return 1;
7959 	while ((opt = strsep(&str, ",")) != NULL) {
7960 		if (!strncmp(opt, "debug:", 6)) {
7961 			if (kstrtoint(opt + 6, 0, &debug))
7962 				goto err;
7963 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7964 			if (kstrtoint(opt + 8, 0, &phyaddr))
7965 				goto err;
7966 		} else if (!strncmp(opt, "tc:", 3)) {
7967 			if (kstrtoint(opt + 3, 0, &tc))
7968 				goto err;
7969 		} else if (!strncmp(opt, "watchdog:", 9)) {
7970 			if (kstrtoint(opt + 9, 0, &watchdog))
7971 				goto err;
7972 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7973 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7974 				goto err;
7975 		} else if (!strncmp(opt, "pause:", 6)) {
7976 			if (kstrtoint(opt + 6, 0, &pause))
7977 				goto err;
7978 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7979 			if (kstrtoint(opt + 10, 0, &eee_timer))
7980 				goto err;
7981 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7982 			if (kstrtoint(opt + 11, 0, &chain_mode))
7983 				goto err;
7984 		}
7985 	}
7986 	return 1;
7987 
7988 err:
7989 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7990 	return 1;
7991 }
7992 
7993 __setup("stmmaceth=", stmmac_cmdline_opt);
7994 #endif /* MODULE */
7995 
7996 static int __init stmmac_init(void)
7997 {
7998 #ifdef CONFIG_DEBUG_FS
7999 	/* Create debugfs main directory if it doesn't exist yet */
8000 	if (!stmmac_fs_dir)
8001 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8002 	register_netdevice_notifier(&stmmac_notifier);
8003 #endif
8004 
8005 	return 0;
8006 }
8007 
8008 static void __exit stmmac_exit(void)
8009 {
8010 #ifdef CONFIG_DEBUG_FS
8011 	unregister_netdevice_notifier(&stmmac_notifier);
8012 	debugfs_remove_recursive(stmmac_fs_dir);
8013 #endif
8014 }
8015 
8016 module_init(stmmac_init)
8017 module_exit(stmmac_exit)
8018 
8019 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8020 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8021 MODULE_LICENSE("GPL");
8022