xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision ccde82e909467abdf098a8ee6f63e1ecf9a47ce5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/page_pool/helpers.h>
44 #include <net/pkt_cls.h>
45 #include <net/xdp_sock_drv.h>
46 #include "stmmac_ptp.h"
47 #include "stmmac_fpe.h"
48 #include "stmmac.h"
49 #include "stmmac_xdp.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 #include "dwxgmac2.h"
54 #include "hwif.h"
55 
56 /* As long as the interface is active, we keep the timestamping counter enabled
57  * with fine resolution and binary rollover. This avoid non-monotonic behavior
58  * (clock jumps) when changing timestamping settings at runtime.
59  */
60 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 				 PTP_TCR_TSCTRLSSR)
62 
63 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
64 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
65 
66 /* Module parameters */
67 #define TX_TIMEO	5000
68 static int watchdog = TX_TIMEO;
69 module_param(watchdog, int, 0644);
70 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
71 
72 static int debug = -1;
73 module_param(debug, int, 0644);
74 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75 
76 static int phyaddr = -1;
77 module_param(phyaddr, int, 0444);
78 MODULE_PARM_DESC(phyaddr, "Physical device address");
79 
80 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = 0xdead;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 /* This is unused */
106 #define	DEFAULT_BUFSIZE	1536
107 static int buf_sz = DEFAULT_BUFSIZE;
108 module_param(buf_sz, int, 0644);
109 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, uint, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	struct plat_stmmacenet_data *plat_dat = priv->plat;
153 	int ret;
154 
155 	if (enabled) {
156 		ret = clk_prepare_enable(plat_dat->stmmac_clk);
157 		if (ret)
158 			return ret;
159 		ret = clk_prepare_enable(plat_dat->pclk);
160 		if (ret) {
161 			clk_disable_unprepare(plat_dat->stmmac_clk);
162 			return ret;
163 		}
164 		if (plat_dat->clks_config) {
165 			ret = plat_dat->clks_config(plat_dat->bsp_priv, enabled);
166 			if (ret) {
167 				clk_disable_unprepare(plat_dat->stmmac_clk);
168 				clk_disable_unprepare(plat_dat->pclk);
169 				return ret;
170 			}
171 		}
172 	} else {
173 		clk_disable_unprepare(plat_dat->stmmac_clk);
174 		clk_disable_unprepare(plat_dat->pclk);
175 		if (plat_dat->clks_config)
176 			plat_dat->clks_config(plat_dat->bsp_priv, enabled);
177 	}
178 
179 	return 0;
180 }
181 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
182 
183 /**
184  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
185  * @bsp_priv: BSP private data structure (unused)
186  * @clk_tx_i: the transmit clock
187  * @interface: the selected interface mode
188  * @speed: the speed that the MAC will be operating at
189  *
190  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
191  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
192  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
193  * the plat_data->set_clk_tx_rate method directly, call it via their own
194  * implementation, or implement their own method should they have more
195  * complex requirements. It is intended to only be used in this method.
196  *
197  * plat_data->clk_tx_i must be filled in.
198  */
199 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
200 			   phy_interface_t interface, int speed)
201 {
202 	long rate = rgmii_clock(speed);
203 
204 	/* Silently ignore unsupported speeds as rgmii_clock() only
205 	 * supports 10, 100 and 1000Mbps. We do not want to spit
206 	 * errors for 2500 and higher speeds here.
207 	 */
208 	if (rate < 0)
209 		return 0;
210 
211 	return clk_set_rate(clk_tx_i, rate);
212 }
213 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
214 
215 /**
216  * stmmac_verify_args - verify the driver parameters.
217  * Description: it checks the driver parameters and set a default in case of
218  * errors.
219  */
220 static void stmmac_verify_args(void)
221 {
222 	if (unlikely(watchdog < 0))
223 		watchdog = TX_TIMEO;
224 	if (unlikely((pause < 0) || (pause > 0xffff)))
225 		pause = PAUSE_TIME;
226 
227 	if (flow_ctrl != 0xdead)
228 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
229 }
230 
231 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
232 {
233 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
234 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
235 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
236 	u32 queue;
237 
238 	for (queue = 0; queue < maxq; queue++) {
239 		struct stmmac_channel *ch = &priv->channel[queue];
240 
241 		if (stmmac_xdp_is_enabled(priv) &&
242 		    test_bit(queue, priv->af_xdp_zc_qps)) {
243 			napi_disable(&ch->rxtx_napi);
244 			continue;
245 		}
246 
247 		if (queue < rx_queues_cnt)
248 			napi_disable(&ch->rx_napi);
249 		if (queue < tx_queues_cnt)
250 			napi_disable(&ch->tx_napi);
251 	}
252 }
253 
254 /**
255  * stmmac_disable_all_queues - Disable all queues
256  * @priv: driver private structure
257  */
258 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
259 {
260 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
261 	struct stmmac_rx_queue *rx_q;
262 	u32 queue;
263 
264 	/* synchronize_rcu() needed for pending XDP buffers to drain */
265 	for (queue = 0; queue < rx_queues_cnt; queue++) {
266 		rx_q = &priv->dma_conf.rx_queue[queue];
267 		if (rx_q->xsk_pool) {
268 			synchronize_rcu();
269 			break;
270 		}
271 	}
272 
273 	__stmmac_disable_all_queues(priv);
274 }
275 
276 /**
277  * stmmac_enable_all_queues - Enable all queues
278  * @priv: driver private structure
279  */
280 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
281 {
282 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
283 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
284 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
285 	u32 queue;
286 
287 	for (queue = 0; queue < maxq; queue++) {
288 		struct stmmac_channel *ch = &priv->channel[queue];
289 
290 		if (stmmac_xdp_is_enabled(priv) &&
291 		    test_bit(queue, priv->af_xdp_zc_qps)) {
292 			napi_enable(&ch->rxtx_napi);
293 			continue;
294 		}
295 
296 		if (queue < rx_queues_cnt)
297 			napi_enable(&ch->rx_napi);
298 		if (queue < tx_queues_cnt)
299 			napi_enable(&ch->tx_napi);
300 	}
301 }
302 
303 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
304 {
305 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
306 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
307 		queue_work(priv->wq, &priv->service_task);
308 }
309 
310 static void stmmac_global_err(struct stmmac_priv *priv)
311 {
312 	netif_carrier_off(priv->dev);
313 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
314 	stmmac_service_event_schedule(priv);
315 }
316 
317 static void print_pkt(unsigned char *buf, int len)
318 {
319 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
320 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
321 }
322 
323 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
324 {
325 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
326 	u32 avail;
327 
328 	if (tx_q->dirty_tx > tx_q->cur_tx)
329 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
330 	else
331 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
332 
333 	return avail;
334 }
335 
336 /**
337  * stmmac_rx_dirty - Get RX queue dirty
338  * @priv: driver private structure
339  * @queue: RX queue index
340  */
341 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
342 {
343 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
344 	u32 dirty;
345 
346 	if (rx_q->dirty_rx <= rx_q->cur_rx)
347 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
348 	else
349 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
350 
351 	return dirty;
352 }
353 
354 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
355 {
356 	u32 tx_cnt = priv->plat->tx_queues_to_use;
357 	u32 queue;
358 
359 	/* check if all TX queues have the work finished */
360 	for (queue = 0; queue < tx_cnt; queue++) {
361 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
362 
363 		if (tx_q->dirty_tx != tx_q->cur_tx)
364 			return true; /* still unfinished work */
365 	}
366 
367 	return false;
368 }
369 
370 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
371 {
372 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
373 }
374 
375 /**
376  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
377  * @priv: driver private structure
378  * Description: this function is to verify and enter in LPI mode in case of
379  * EEE.
380  */
381 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
382 {
383 	if (stmmac_eee_tx_busy(priv)) {
384 		stmmac_restart_sw_lpi_timer(priv);
385 		return;
386 	}
387 
388 	/* Check and enter in LPI mode */
389 	if (!priv->tx_path_in_lpi_mode)
390 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
391 				    priv->tx_lpi_clk_stop, 0);
392 }
393 
394 /**
395  * stmmac_stop_sw_lpi - stop transmitting LPI
396  * @priv: driver private structure
397  * Description: When using software-controlled LPI, stop transmitting LPI state.
398  */
399 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
400 {
401 	timer_delete_sync(&priv->eee_ctrl_timer);
402 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
403 	priv->tx_path_in_lpi_mode = false;
404 }
405 
406 /**
407  * stmmac_eee_ctrl_timer - EEE TX SW timer.
408  * @t:  timer_list struct containing private info
409  * Description:
410  *  if there is no data transfer and if we are not in LPI state,
411  *  then MAC Transmitter can be moved to LPI state.
412  */
413 static void stmmac_eee_ctrl_timer(struct timer_list *t)
414 {
415 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
416 
417 	stmmac_try_to_start_sw_lpi(priv);
418 }
419 
420 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
421  * @priv: driver private structure
422  * @p : descriptor pointer
423  * @skb : the socket buffer
424  * Description :
425  * This function will read timestamp from the descriptor & pass it to stack.
426  * and also perform some sanity checks.
427  */
428 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
429 				   struct dma_desc *p, struct sk_buff *skb)
430 {
431 	struct skb_shared_hwtstamps shhwtstamp;
432 	bool found = false;
433 	u64 ns = 0;
434 
435 	if (!priv->hwts_tx_en)
436 		return;
437 
438 	/* exit if skb doesn't support hw tstamp */
439 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
440 		return;
441 
442 	/* check tx tstamp status */
443 	if (stmmac_get_tx_timestamp_status(priv, p)) {
444 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
445 		found = true;
446 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
447 		found = true;
448 	}
449 
450 	if (found) {
451 		ns -= priv->plat->cdc_error_adj;
452 
453 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
454 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
455 
456 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
457 		/* pass tstamp to stack */
458 		skb_tstamp_tx(skb, &shhwtstamp);
459 	}
460 }
461 
462 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
463  * @priv: driver private structure
464  * @p : descriptor pointer
465  * @np : next descriptor pointer
466  * @skb : the socket buffer
467  * Description :
468  * This function will read received packet's timestamp from the descriptor
469  * and pass it to stack. It also perform some sanity checks.
470  */
471 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
472 				   struct dma_desc *np, struct sk_buff *skb)
473 {
474 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
475 	struct dma_desc *desc = p;
476 	u64 ns = 0;
477 
478 	if (!priv->hwts_rx_en)
479 		return;
480 	/* For GMAC4, the valid timestamp is from CTX next desc. */
481 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
482 		desc = np;
483 
484 	/* Check if timestamp is available */
485 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
486 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
487 
488 		ns -= priv->plat->cdc_error_adj;
489 
490 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
491 		shhwtstamp = skb_hwtstamps(skb);
492 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
493 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
494 	} else  {
495 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
496 	}
497 }
498 
499 /**
500  *  stmmac_hwtstamp_set - control hardware timestamping.
501  *  @dev: device pointer.
502  *  @config: the timestamping configuration.
503  *  @extack: netlink extended ack structure for error reporting.
504  *  Description:
505  *  This function configures the MAC to enable/disable both outgoing(TX)
506  *  and incoming(RX) packets time stamping based on user input.
507  *  Return Value:
508  *  0 on success and an appropriate -ve integer on failure.
509  */
510 static int stmmac_hwtstamp_set(struct net_device *dev,
511 			       struct kernel_hwtstamp_config *config,
512 			       struct netlink_ext_ack *extack)
513 {
514 	struct stmmac_priv *priv = netdev_priv(dev);
515 	u32 ptp_v2 = 0;
516 	u32 tstamp_all = 0;
517 	u32 ptp_over_ipv4_udp = 0;
518 	u32 ptp_over_ipv6_udp = 0;
519 	u32 ptp_over_ethernet = 0;
520 	u32 snap_type_sel = 0;
521 	u32 ts_master_en = 0;
522 	u32 ts_event_en = 0;
523 
524 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
525 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
526 		priv->hwts_tx_en = 0;
527 		priv->hwts_rx_en = 0;
528 
529 		return -EOPNOTSUPP;
530 	}
531 
532 	if (!netif_running(dev)) {
533 		NL_SET_ERR_MSG_MOD(extack,
534 				   "Cannot change timestamping configuration while down");
535 		return -ENODEV;
536 	}
537 
538 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
539 		   __func__, config->flags, config->tx_type, config->rx_filter);
540 
541 	if (config->tx_type != HWTSTAMP_TX_OFF &&
542 	    config->tx_type != HWTSTAMP_TX_ON)
543 		return -ERANGE;
544 
545 	if (priv->adv_ts) {
546 		switch (config->rx_filter) {
547 		case HWTSTAMP_FILTER_NONE:
548 			/* time stamp no incoming packet at all */
549 			config->rx_filter = HWTSTAMP_FILTER_NONE;
550 			break;
551 
552 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
553 			/* PTP v1, UDP, any kind of event packet */
554 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
555 			/* 'xmac' hardware can support Sync, Pdelay_Req and
556 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
557 			 * This leaves Delay_Req timestamps out.
558 			 * Enable all events *and* general purpose message
559 			 * timestamping
560 			 */
561 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
562 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
563 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
564 			break;
565 
566 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
567 			/* PTP v1, UDP, Sync packet */
568 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
569 			/* take time stamp for SYNC messages only */
570 			ts_event_en = PTP_TCR_TSEVNTENA;
571 
572 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574 			break;
575 
576 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
577 			/* PTP v1, UDP, Delay_req packet */
578 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
579 			/* take time stamp for Delay_Req messages only */
580 			ts_master_en = PTP_TCR_TSMSTRENA;
581 			ts_event_en = PTP_TCR_TSEVNTENA;
582 
583 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
584 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
585 			break;
586 
587 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
588 			/* PTP v2, UDP, any kind of event packet */
589 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
590 			ptp_v2 = PTP_TCR_TSVER2ENA;
591 			/* take time stamp for all event messages */
592 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
593 
594 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
595 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
596 			break;
597 
598 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
599 			/* PTP v2, UDP, Sync packet */
600 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
601 			ptp_v2 = PTP_TCR_TSVER2ENA;
602 			/* take time stamp for SYNC messages only */
603 			ts_event_en = PTP_TCR_TSEVNTENA;
604 
605 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
606 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
607 			break;
608 
609 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
610 			/* PTP v2, UDP, Delay_req packet */
611 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
612 			ptp_v2 = PTP_TCR_TSVER2ENA;
613 			/* take time stamp for Delay_Req messages only */
614 			ts_master_en = PTP_TCR_TSMSTRENA;
615 			ts_event_en = PTP_TCR_TSEVNTENA;
616 
617 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
618 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
619 			break;
620 
621 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
622 			/* PTP v2/802.AS1 any layer, any kind of event packet */
623 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
624 			ptp_v2 = PTP_TCR_TSVER2ENA;
625 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
626 			if (priv->synopsys_id < DWMAC_CORE_4_10)
627 				ts_event_en = PTP_TCR_TSEVNTENA;
628 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
629 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
630 			ptp_over_ethernet = PTP_TCR_TSIPENA;
631 			break;
632 
633 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
634 			/* PTP v2/802.AS1, any layer, Sync packet */
635 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
636 			ptp_v2 = PTP_TCR_TSVER2ENA;
637 			/* take time stamp for SYNC messages only */
638 			ts_event_en = PTP_TCR_TSEVNTENA;
639 
640 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
641 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
642 			ptp_over_ethernet = PTP_TCR_TSIPENA;
643 			break;
644 
645 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
646 			/* PTP v2/802.AS1, any layer, Delay_req packet */
647 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
648 			ptp_v2 = PTP_TCR_TSVER2ENA;
649 			/* take time stamp for Delay_Req messages only */
650 			ts_master_en = PTP_TCR_TSMSTRENA;
651 			ts_event_en = PTP_TCR_TSEVNTENA;
652 
653 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
654 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
655 			ptp_over_ethernet = PTP_TCR_TSIPENA;
656 			break;
657 
658 		case HWTSTAMP_FILTER_NTP_ALL:
659 		case HWTSTAMP_FILTER_ALL:
660 			/* time stamp any incoming packet */
661 			config->rx_filter = HWTSTAMP_FILTER_ALL;
662 			tstamp_all = PTP_TCR_TSENALL;
663 			break;
664 
665 		default:
666 			return -ERANGE;
667 		}
668 	} else {
669 		switch (config->rx_filter) {
670 		case HWTSTAMP_FILTER_NONE:
671 			config->rx_filter = HWTSTAMP_FILTER_NONE;
672 			break;
673 		default:
674 			/* PTP v1, UDP, any kind of event packet */
675 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
676 			break;
677 		}
678 	}
679 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
680 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
681 
682 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
683 
684 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
685 		priv->systime_flags |= tstamp_all | ptp_v2 |
686 				       ptp_over_ethernet | ptp_over_ipv6_udp |
687 				       ptp_over_ipv4_udp | ts_event_en |
688 				       ts_master_en | snap_type_sel;
689 	}
690 
691 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
692 
693 	priv->tstamp_config = *config;
694 
695 	return 0;
696 }
697 
698 /**
699  *  stmmac_hwtstamp_get - read hardware timestamping.
700  *  @dev: device pointer.
701  *  @config: the timestamping configuration.
702  *  Description:
703  *  This function obtain the current hardware timestamping settings
704  *  as requested.
705  */
706 static int stmmac_hwtstamp_get(struct net_device *dev,
707 			       struct kernel_hwtstamp_config *config)
708 {
709 	struct stmmac_priv *priv = netdev_priv(dev);
710 
711 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
712 		return -EOPNOTSUPP;
713 
714 	*config = priv->tstamp_config;
715 
716 	return 0;
717 }
718 
719 /**
720  * stmmac_init_tstamp_counter - init hardware timestamping counter
721  * @priv: driver private structure
722  * @systime_flags: timestamping flags
723  * Description:
724  * Initialize hardware counter for packet timestamping.
725  * This is valid as long as the interface is open and not suspended.
726  * Will be rerun after resuming from suspend, case in which the timestamping
727  * flags updated by stmmac_hwtstamp_set() also need to be restored.
728  */
729 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
730 				      u32 systime_flags)
731 {
732 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
733 	struct timespec64 now;
734 	u32 sec_inc = 0;
735 	u64 temp = 0;
736 
737 	if (!priv->plat->clk_ptp_rate) {
738 		netdev_err(priv->dev, "Invalid PTP clock rate");
739 		return -EINVAL;
740 	}
741 
742 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
743 	priv->systime_flags = systime_flags;
744 
745 	/* program Sub Second Increment reg */
746 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
747 					   priv->plat->clk_ptp_rate,
748 					   xmac, &sec_inc);
749 	temp = div_u64(1000000000ULL, sec_inc);
750 
751 	/* Store sub second increment for later use */
752 	priv->sub_second_inc = sec_inc;
753 
754 	/* calculate default added value:
755 	 * formula is :
756 	 * addend = (2^32)/freq_div_ratio;
757 	 * where, freq_div_ratio = 1e9ns/sec_inc
758 	 */
759 	temp = (u64)(temp << 32);
760 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
761 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
762 
763 	/* initialize system time */
764 	ktime_get_real_ts64(&now);
765 
766 	/* lower 32 bits of tv_sec are safe until y2106 */
767 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
768 
769 	return 0;
770 }
771 
772 /**
773  * stmmac_init_timestamping - initialise timestamping
774  * @priv: driver private structure
775  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
776  * This is done by looking at the HW cap. register.
777  * This function also registers the ptp driver.
778  */
779 static int stmmac_init_timestamping(struct stmmac_priv *priv)
780 {
781 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
782 	int ret;
783 
784 	if (priv->plat->ptp_clk_freq_config)
785 		priv->plat->ptp_clk_freq_config(priv);
786 
787 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
788 		netdev_info(priv->dev, "PTP not supported by HW\n");
789 		return -EOPNOTSUPP;
790 	}
791 
792 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
793 	if (ret) {
794 		netdev_warn(priv->dev, "PTP init failed\n");
795 		return ret;
796 	}
797 
798 	priv->adv_ts = 0;
799 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
800 	if (xmac && priv->dma_cap.atime_stamp)
801 		priv->adv_ts = 1;
802 	/* Dwmac 3.x core with extend_desc can support adv_ts */
803 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
804 		priv->adv_ts = 1;
805 
806 	if (priv->dma_cap.time_stamp)
807 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
808 
809 	if (priv->adv_ts)
810 		netdev_info(priv->dev,
811 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
812 
813 	priv->hwts_tx_en = 0;
814 	priv->hwts_rx_en = 0;
815 
816 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
817 		stmmac_hwtstamp_correct_latency(priv, priv);
818 
819 	return 0;
820 }
821 
822 static void stmmac_setup_ptp(struct stmmac_priv *priv)
823 {
824 	int ret;
825 
826 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
827 	if (ret < 0)
828 		netdev_warn(priv->dev,
829 			    "failed to enable PTP reference clock: %pe\n",
830 			    ERR_PTR(ret));
831 
832 	if (stmmac_init_timestamping(priv) == 0)
833 		stmmac_ptp_register(priv);
834 }
835 
836 static void stmmac_release_ptp(struct stmmac_priv *priv)
837 {
838 	stmmac_ptp_unregister(priv);
839 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
840 }
841 
842 /**
843  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
844  *  @priv: driver private structure
845  *  @duplex: duplex passed to the next function
846  *  @flow_ctrl: desired flow control modes
847  *  Description: It is used for configuring the flow control in all queues
848  */
849 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
850 				 unsigned int flow_ctrl)
851 {
852 	u32 tx_cnt = priv->plat->tx_queues_to_use;
853 
854 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
855 			 tx_cnt);
856 }
857 
858 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
859 					 phy_interface_t interface)
860 {
861 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
862 
863 	/* Refresh the MAC-specific capabilities */
864 	stmmac_mac_update_caps(priv);
865 
866 	config->mac_capabilities = priv->hw->link.caps;
867 
868 	if (priv->plat->max_speed)
869 		phylink_limit_mac_speed(config, priv->plat->max_speed);
870 
871 	return config->mac_capabilities;
872 }
873 
874 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
875 						 phy_interface_t interface)
876 {
877 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
878 	struct phylink_pcs *pcs;
879 
880 	if (priv->plat->select_pcs) {
881 		pcs = priv->plat->select_pcs(priv, interface);
882 		if (!IS_ERR(pcs))
883 			return pcs;
884 	}
885 
886 	return NULL;
887 }
888 
889 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
890 			      const struct phylink_link_state *state)
891 {
892 	/* Nothing to do, xpcs_config() handles everything */
893 }
894 
895 static void stmmac_mac_link_down(struct phylink_config *config,
896 				 unsigned int mode, phy_interface_t interface)
897 {
898 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
899 
900 	stmmac_mac_set(priv, priv->ioaddr, false);
901 	if (priv->dma_cap.eee)
902 		stmmac_set_eee_pls(priv, priv->hw, false);
903 
904 	if (stmmac_fpe_supported(priv))
905 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
906 }
907 
908 static void stmmac_mac_link_up(struct phylink_config *config,
909 			       struct phy_device *phy,
910 			       unsigned int mode, phy_interface_t interface,
911 			       int speed, int duplex,
912 			       bool tx_pause, bool rx_pause)
913 {
914 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
915 	unsigned int flow_ctrl;
916 	u32 old_ctrl, ctrl;
917 	int ret;
918 
919 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
920 	    priv->plat->serdes_powerup)
921 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
922 
923 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
924 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
925 
926 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
927 		switch (speed) {
928 		case SPEED_10000:
929 			ctrl |= priv->hw->link.xgmii.speed10000;
930 			break;
931 		case SPEED_5000:
932 			ctrl |= priv->hw->link.xgmii.speed5000;
933 			break;
934 		case SPEED_2500:
935 			ctrl |= priv->hw->link.xgmii.speed2500;
936 			break;
937 		default:
938 			return;
939 		}
940 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
941 		switch (speed) {
942 		case SPEED_100000:
943 			ctrl |= priv->hw->link.xlgmii.speed100000;
944 			break;
945 		case SPEED_50000:
946 			ctrl |= priv->hw->link.xlgmii.speed50000;
947 			break;
948 		case SPEED_40000:
949 			ctrl |= priv->hw->link.xlgmii.speed40000;
950 			break;
951 		case SPEED_25000:
952 			ctrl |= priv->hw->link.xlgmii.speed25000;
953 			break;
954 		case SPEED_10000:
955 			ctrl |= priv->hw->link.xgmii.speed10000;
956 			break;
957 		case SPEED_2500:
958 			ctrl |= priv->hw->link.speed2500;
959 			break;
960 		case SPEED_1000:
961 			ctrl |= priv->hw->link.speed1000;
962 			break;
963 		default:
964 			return;
965 		}
966 	} else {
967 		switch (speed) {
968 		case SPEED_2500:
969 			ctrl |= priv->hw->link.speed2500;
970 			break;
971 		case SPEED_1000:
972 			ctrl |= priv->hw->link.speed1000;
973 			break;
974 		case SPEED_100:
975 			ctrl |= priv->hw->link.speed100;
976 			break;
977 		case SPEED_10:
978 			ctrl |= priv->hw->link.speed10;
979 			break;
980 		default:
981 			return;
982 		}
983 	}
984 
985 	if (priv->plat->fix_mac_speed)
986 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
987 
988 	if (!duplex)
989 		ctrl &= ~priv->hw->link.duplex;
990 	else
991 		ctrl |= priv->hw->link.duplex;
992 
993 	/* Flow Control operation */
994 	if (rx_pause && tx_pause)
995 		flow_ctrl = FLOW_AUTO;
996 	else if (rx_pause && !tx_pause)
997 		flow_ctrl = FLOW_RX;
998 	else if (!rx_pause && tx_pause)
999 		flow_ctrl = FLOW_TX;
1000 	else
1001 		flow_ctrl = FLOW_OFF;
1002 
1003 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1004 
1005 	if (ctrl != old_ctrl)
1006 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1007 
1008 	if (priv->plat->set_clk_tx_rate) {
1009 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1010 						priv->plat->clk_tx_i,
1011 						interface, speed);
1012 		if (ret < 0)
1013 			netdev_err(priv->dev,
1014 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
1015 				   phy_modes(interface), speed, ERR_PTR(ret));
1016 	}
1017 
1018 	stmmac_mac_set(priv, priv->ioaddr, true);
1019 	if (priv->dma_cap.eee)
1020 		stmmac_set_eee_pls(priv, priv->hw, true);
1021 
1022 	if (stmmac_fpe_supported(priv))
1023 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1024 
1025 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1026 		stmmac_hwtstamp_correct_latency(priv, priv);
1027 }
1028 
1029 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1030 {
1031 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1032 
1033 	priv->eee_active = false;
1034 
1035 	mutex_lock(&priv->lock);
1036 
1037 	priv->eee_enabled = false;
1038 
1039 	netdev_dbg(priv->dev, "disable EEE\n");
1040 	priv->eee_sw_timer_en = false;
1041 	timer_delete_sync(&priv->eee_ctrl_timer);
1042 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1043 	priv->tx_path_in_lpi_mode = false;
1044 
1045 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1046 	mutex_unlock(&priv->lock);
1047 }
1048 
1049 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1050 				    bool tx_clk_stop)
1051 {
1052 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1053 	int ret;
1054 
1055 	priv->tx_lpi_timer = timer;
1056 	priv->eee_active = true;
1057 
1058 	mutex_lock(&priv->lock);
1059 
1060 	priv->eee_enabled = true;
1061 
1062 	/* Update the transmit clock stop according to PHY capability if
1063 	 * the platform allows
1064 	 */
1065 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1066 		priv->tx_lpi_clk_stop = tx_clk_stop;
1067 
1068 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1069 			     STMMAC_DEFAULT_TWT_LS);
1070 
1071 	/* Try to cnfigure the hardware timer. */
1072 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1073 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1074 
1075 	if (ret) {
1076 		/* Hardware timer mode not supported, or value out of range.
1077 		 * Fall back to using software LPI mode
1078 		 */
1079 		priv->eee_sw_timer_en = true;
1080 		stmmac_restart_sw_lpi_timer(priv);
1081 	}
1082 
1083 	mutex_unlock(&priv->lock);
1084 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1085 
1086 	return 0;
1087 }
1088 
1089 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1090 			     phy_interface_t interface)
1091 {
1092 	struct net_device *ndev = to_net_dev(config->dev);
1093 	struct stmmac_priv *priv = netdev_priv(ndev);
1094 
1095 	if (priv->plat->mac_finish)
1096 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1097 
1098 	return 0;
1099 }
1100 
1101 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1102 	.mac_get_caps = stmmac_mac_get_caps,
1103 	.mac_select_pcs = stmmac_mac_select_pcs,
1104 	.mac_config = stmmac_mac_config,
1105 	.mac_link_down = stmmac_mac_link_down,
1106 	.mac_link_up = stmmac_mac_link_up,
1107 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1108 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1109 	.mac_finish = stmmac_mac_finish,
1110 };
1111 
1112 /**
1113  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1114  * @priv: driver private structure
1115  * Description: this is to verify if the HW supports the PCS.
1116  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1117  * configured for the TBI, RTBI, or SGMII PHY interface.
1118  */
1119 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1120 {
1121 	int interface = priv->plat->mac_interface;
1122 
1123 	if (priv->dma_cap.pcs) {
1124 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1125 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1126 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1127 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1128 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1129 			priv->hw->pcs = STMMAC_PCS_RGMII;
1130 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1131 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1132 			priv->hw->pcs = STMMAC_PCS_SGMII;
1133 		}
1134 	}
1135 }
1136 
1137 /**
1138  * stmmac_init_phy - PHY initialization
1139  * @dev: net device structure
1140  * Description: it initializes the driver's PHY state, and attaches the PHY
1141  * to the mac driver.
1142  *  Return value:
1143  *  0 on success
1144  */
1145 static int stmmac_init_phy(struct net_device *dev)
1146 {
1147 	struct stmmac_priv *priv = netdev_priv(dev);
1148 	struct fwnode_handle *phy_fwnode;
1149 	struct fwnode_handle *fwnode;
1150 	int ret;
1151 
1152 	if (!phylink_expects_phy(priv->phylink))
1153 		return 0;
1154 
1155 	fwnode = priv->plat->port_node;
1156 	if (!fwnode)
1157 		fwnode = dev_fwnode(priv->device);
1158 
1159 	if (fwnode)
1160 		phy_fwnode = fwnode_get_phy_node(fwnode);
1161 	else
1162 		phy_fwnode = NULL;
1163 
1164 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1165 	 * manually parse it
1166 	 */
1167 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1168 		int addr = priv->plat->phy_addr;
1169 		struct phy_device *phydev;
1170 
1171 		if (addr < 0) {
1172 			netdev_err(priv->dev, "no phy found\n");
1173 			return -ENODEV;
1174 		}
1175 
1176 		phydev = mdiobus_get_phy(priv->mii, addr);
1177 		if (!phydev) {
1178 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1179 			return -ENODEV;
1180 		}
1181 
1182 		ret = phylink_connect_phy(priv->phylink, phydev);
1183 	} else {
1184 		fwnode_handle_put(phy_fwnode);
1185 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1186 	}
1187 
1188 	if (ret == 0) {
1189 		struct ethtool_keee eee;
1190 
1191 		/* Configure phylib's copy of the LPI timer. Normally,
1192 		 * phylink_config.lpi_timer_default would do this, but there is
1193 		 * a chance that userspace could change the eee_timer setting
1194 		 * via sysfs before the first open. Thus, preserve existing
1195 		 * behaviour.
1196 		 */
1197 		if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1198 			eee.tx_lpi_timer = priv->tx_lpi_timer;
1199 			phylink_ethtool_set_eee(priv->phylink, &eee);
1200 		}
1201 	}
1202 
1203 	if (!priv->plat->pmt) {
1204 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1205 
1206 		phylink_ethtool_get_wol(priv->phylink, &wol);
1207 		device_set_wakeup_capable(priv->device, !!wol.supported);
1208 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1209 	}
1210 
1211 	return ret;
1212 }
1213 
1214 static int stmmac_phy_setup(struct stmmac_priv *priv)
1215 {
1216 	struct stmmac_mdio_bus_data *mdio_bus_data;
1217 	struct phylink_config *config;
1218 	struct fwnode_handle *fwnode;
1219 	struct phylink_pcs *pcs;
1220 	struct phylink *phylink;
1221 
1222 	config = &priv->phylink_config;
1223 
1224 	config->dev = &priv->dev->dev;
1225 	config->type = PHYLINK_NETDEV;
1226 	config->mac_managed_pm = true;
1227 
1228 	/* Stmmac always requires an RX clock for hardware initialization */
1229 	config->mac_requires_rxc = true;
1230 
1231 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1232 		config->eee_rx_clk_stop_enable = true;
1233 
1234 	/* Set the default transmit clock stop bit based on the platform glue */
1235 	priv->tx_lpi_clk_stop = priv->plat->flags &
1236 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1237 
1238 	mdio_bus_data = priv->plat->mdio_bus_data;
1239 	if (mdio_bus_data)
1240 		config->default_an_inband = mdio_bus_data->default_an_inband;
1241 
1242 	/* Get the PHY interface modes (at the PHY end of the link) that
1243 	 * are supported by the platform.
1244 	 */
1245 	if (priv->plat->get_interfaces)
1246 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1247 					   config->supported_interfaces);
1248 
1249 	/* Set the platform/firmware specified interface mode if the
1250 	 * supported interfaces have not already been provided using
1251 	 * phy_interface as a last resort.
1252 	 */
1253 	if (phy_interface_empty(config->supported_interfaces))
1254 		__set_bit(priv->plat->phy_interface,
1255 			  config->supported_interfaces);
1256 
1257 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1258 	if (priv->hw->xpcs)
1259 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1260 	else
1261 		pcs = priv->hw->phylink_pcs;
1262 
1263 	if (pcs)
1264 		phy_interface_or(config->supported_interfaces,
1265 				 config->supported_interfaces,
1266 				 pcs->supported_interfaces);
1267 
1268 	if (priv->dma_cap.eee) {
1269 		/* Assume all supported interfaces also support LPI */
1270 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1271 		       sizeof(config->lpi_interfaces));
1272 
1273 		/* All full duplex speeds above 100Mbps are supported */
1274 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1275 		config->lpi_timer_default = eee_timer * 1000;
1276 		config->eee_enabled_default = true;
1277 	}
1278 
1279 	fwnode = priv->plat->port_node;
1280 	if (!fwnode)
1281 		fwnode = dev_fwnode(priv->device);
1282 
1283 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1284 				 &stmmac_phylink_mac_ops);
1285 	if (IS_ERR(phylink))
1286 		return PTR_ERR(phylink);
1287 
1288 	priv->phylink = phylink;
1289 	return 0;
1290 }
1291 
1292 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1293 				    struct stmmac_dma_conf *dma_conf)
1294 {
1295 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1296 	unsigned int desc_size;
1297 	void *head_rx;
1298 	u32 queue;
1299 
1300 	/* Display RX rings */
1301 	for (queue = 0; queue < rx_cnt; queue++) {
1302 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1303 
1304 		pr_info("\tRX Queue %u rings\n", queue);
1305 
1306 		if (priv->extend_desc) {
1307 			head_rx = (void *)rx_q->dma_erx;
1308 			desc_size = sizeof(struct dma_extended_desc);
1309 		} else {
1310 			head_rx = (void *)rx_q->dma_rx;
1311 			desc_size = sizeof(struct dma_desc);
1312 		}
1313 
1314 		/* Display RX ring */
1315 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1316 				    rx_q->dma_rx_phy, desc_size);
1317 	}
1318 }
1319 
1320 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1321 				    struct stmmac_dma_conf *dma_conf)
1322 {
1323 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1324 	unsigned int desc_size;
1325 	void *head_tx;
1326 	u32 queue;
1327 
1328 	/* Display TX rings */
1329 	for (queue = 0; queue < tx_cnt; queue++) {
1330 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1331 
1332 		pr_info("\tTX Queue %d rings\n", queue);
1333 
1334 		if (priv->extend_desc) {
1335 			head_tx = (void *)tx_q->dma_etx;
1336 			desc_size = sizeof(struct dma_extended_desc);
1337 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1338 			head_tx = (void *)tx_q->dma_entx;
1339 			desc_size = sizeof(struct dma_edesc);
1340 		} else {
1341 			head_tx = (void *)tx_q->dma_tx;
1342 			desc_size = sizeof(struct dma_desc);
1343 		}
1344 
1345 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1346 				    tx_q->dma_tx_phy, desc_size);
1347 	}
1348 }
1349 
1350 static void stmmac_display_rings(struct stmmac_priv *priv,
1351 				 struct stmmac_dma_conf *dma_conf)
1352 {
1353 	/* Display RX ring */
1354 	stmmac_display_rx_rings(priv, dma_conf);
1355 
1356 	/* Display TX ring */
1357 	stmmac_display_tx_rings(priv, dma_conf);
1358 }
1359 
1360 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1361 {
1362 	if (stmmac_xdp_is_enabled(priv))
1363 		return XDP_PACKET_HEADROOM;
1364 
1365 	return NET_SKB_PAD;
1366 }
1367 
1368 static int stmmac_set_bfsize(int mtu, int bufsize)
1369 {
1370 	int ret = bufsize;
1371 
1372 	if (mtu >= BUF_SIZE_8KiB)
1373 		ret = BUF_SIZE_16KiB;
1374 	else if (mtu >= BUF_SIZE_4KiB)
1375 		ret = BUF_SIZE_8KiB;
1376 	else if (mtu >= BUF_SIZE_2KiB)
1377 		ret = BUF_SIZE_4KiB;
1378 	else if (mtu > DEFAULT_BUFSIZE)
1379 		ret = BUF_SIZE_2KiB;
1380 	else
1381 		ret = DEFAULT_BUFSIZE;
1382 
1383 	return ret;
1384 }
1385 
1386 /**
1387  * stmmac_clear_rx_descriptors - clear RX descriptors
1388  * @priv: driver private structure
1389  * @dma_conf: structure to take the dma data
1390  * @queue: RX queue index
1391  * Description: this function is called to clear the RX descriptors
1392  * in case of both basic and extended descriptors are used.
1393  */
1394 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1395 					struct stmmac_dma_conf *dma_conf,
1396 					u32 queue)
1397 {
1398 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1399 	int i;
1400 
1401 	/* Clear the RX descriptors */
1402 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1403 		if (priv->extend_desc)
1404 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1405 					priv->use_riwt, priv->mode,
1406 					(i == dma_conf->dma_rx_size - 1),
1407 					dma_conf->dma_buf_sz);
1408 		else
1409 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1410 					priv->use_riwt, priv->mode,
1411 					(i == dma_conf->dma_rx_size - 1),
1412 					dma_conf->dma_buf_sz);
1413 }
1414 
1415 /**
1416  * stmmac_clear_tx_descriptors - clear tx descriptors
1417  * @priv: driver private structure
1418  * @dma_conf: structure to take the dma data
1419  * @queue: TX queue index.
1420  * Description: this function is called to clear the TX descriptors
1421  * in case of both basic and extended descriptors are used.
1422  */
1423 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1424 					struct stmmac_dma_conf *dma_conf,
1425 					u32 queue)
1426 {
1427 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1428 	int i;
1429 
1430 	/* Clear the TX descriptors */
1431 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1432 		int last = (i == (dma_conf->dma_tx_size - 1));
1433 		struct dma_desc *p;
1434 
1435 		if (priv->extend_desc)
1436 			p = &tx_q->dma_etx[i].basic;
1437 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1438 			p = &tx_q->dma_entx[i].basic;
1439 		else
1440 			p = &tx_q->dma_tx[i];
1441 
1442 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1443 	}
1444 }
1445 
1446 /**
1447  * stmmac_clear_descriptors - clear descriptors
1448  * @priv: driver private structure
1449  * @dma_conf: structure to take the dma data
1450  * Description: this function is called to clear the TX and RX descriptors
1451  * in case of both basic and extended descriptors are used.
1452  */
1453 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1454 				     struct stmmac_dma_conf *dma_conf)
1455 {
1456 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1457 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1458 	u32 queue;
1459 
1460 	/* Clear the RX descriptors */
1461 	for (queue = 0; queue < rx_queue_cnt; queue++)
1462 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1463 
1464 	/* Clear the TX descriptors */
1465 	for (queue = 0; queue < tx_queue_cnt; queue++)
1466 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1467 }
1468 
1469 /**
1470  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1471  * @priv: driver private structure
1472  * @dma_conf: structure to take the dma data
1473  * @p: descriptor pointer
1474  * @i: descriptor index
1475  * @flags: gfp flag
1476  * @queue: RX queue index
1477  * Description: this function is called to allocate a receive buffer, perform
1478  * the DMA mapping and init the descriptor.
1479  */
1480 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1481 				  struct stmmac_dma_conf *dma_conf,
1482 				  struct dma_desc *p,
1483 				  int i, gfp_t flags, u32 queue)
1484 {
1485 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1486 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1487 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1488 
1489 	if (priv->dma_cap.host_dma_width <= 32)
1490 		gfp |= GFP_DMA32;
1491 
1492 	if (!buf->page) {
1493 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1494 		if (!buf->page)
1495 			return -ENOMEM;
1496 		buf->page_offset = stmmac_rx_offset(priv);
1497 	}
1498 
1499 	if (priv->sph && !buf->sec_page) {
1500 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1501 		if (!buf->sec_page)
1502 			return -ENOMEM;
1503 
1504 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1505 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1506 	} else {
1507 		buf->sec_page = NULL;
1508 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1509 	}
1510 
1511 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1512 
1513 	stmmac_set_desc_addr(priv, p, buf->addr);
1514 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1515 		stmmac_init_desc3(priv, p);
1516 
1517 	return 0;
1518 }
1519 
1520 /**
1521  * stmmac_free_rx_buffer - free RX dma buffers
1522  * @priv: private structure
1523  * @rx_q: RX queue
1524  * @i: buffer index.
1525  */
1526 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1527 				  struct stmmac_rx_queue *rx_q,
1528 				  int i)
1529 {
1530 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1531 
1532 	if (buf->page)
1533 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1534 	buf->page = NULL;
1535 
1536 	if (buf->sec_page)
1537 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1538 	buf->sec_page = NULL;
1539 }
1540 
1541 /**
1542  * stmmac_free_tx_buffer - free RX dma buffers
1543  * @priv: private structure
1544  * @dma_conf: structure to take the dma data
1545  * @queue: RX queue index
1546  * @i: buffer index.
1547  */
1548 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1549 				  struct stmmac_dma_conf *dma_conf,
1550 				  u32 queue, int i)
1551 {
1552 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1553 
1554 	if (tx_q->tx_skbuff_dma[i].buf &&
1555 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1556 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1557 			dma_unmap_page(priv->device,
1558 				       tx_q->tx_skbuff_dma[i].buf,
1559 				       tx_q->tx_skbuff_dma[i].len,
1560 				       DMA_TO_DEVICE);
1561 		else
1562 			dma_unmap_single(priv->device,
1563 					 tx_q->tx_skbuff_dma[i].buf,
1564 					 tx_q->tx_skbuff_dma[i].len,
1565 					 DMA_TO_DEVICE);
1566 	}
1567 
1568 	if (tx_q->xdpf[i] &&
1569 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1570 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1571 		xdp_return_frame(tx_q->xdpf[i]);
1572 		tx_q->xdpf[i] = NULL;
1573 	}
1574 
1575 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1576 		tx_q->xsk_frames_done++;
1577 
1578 	if (tx_q->tx_skbuff[i] &&
1579 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1580 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1581 		tx_q->tx_skbuff[i] = NULL;
1582 	}
1583 
1584 	tx_q->tx_skbuff_dma[i].buf = 0;
1585 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1586 }
1587 
1588 /**
1589  * dma_free_rx_skbufs - free RX dma buffers
1590  * @priv: private structure
1591  * @dma_conf: structure to take the dma data
1592  * @queue: RX queue index
1593  */
1594 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1595 			       struct stmmac_dma_conf *dma_conf,
1596 			       u32 queue)
1597 {
1598 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1599 	int i;
1600 
1601 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1602 		stmmac_free_rx_buffer(priv, rx_q, i);
1603 }
1604 
1605 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1606 				   struct stmmac_dma_conf *dma_conf,
1607 				   u32 queue, gfp_t flags)
1608 {
1609 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1610 	int i;
1611 
1612 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1613 		struct dma_desc *p;
1614 		int ret;
1615 
1616 		if (priv->extend_desc)
1617 			p = &((rx_q->dma_erx + i)->basic);
1618 		else
1619 			p = rx_q->dma_rx + i;
1620 
1621 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1622 					     queue);
1623 		if (ret)
1624 			return ret;
1625 
1626 		rx_q->buf_alloc_num++;
1627 	}
1628 
1629 	return 0;
1630 }
1631 
1632 /**
1633  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1634  * @priv: private structure
1635  * @dma_conf: structure to take the dma data
1636  * @queue: RX queue index
1637  */
1638 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1639 				struct stmmac_dma_conf *dma_conf,
1640 				u32 queue)
1641 {
1642 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1643 	int i;
1644 
1645 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1646 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1647 
1648 		if (!buf->xdp)
1649 			continue;
1650 
1651 		xsk_buff_free(buf->xdp);
1652 		buf->xdp = NULL;
1653 	}
1654 }
1655 
1656 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1657 				      struct stmmac_dma_conf *dma_conf,
1658 				      u32 queue)
1659 {
1660 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1661 	int i;
1662 
1663 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1664 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1665 	 * use this macro to make sure no size violations.
1666 	 */
1667 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1668 
1669 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1670 		struct stmmac_rx_buffer *buf;
1671 		dma_addr_t dma_addr;
1672 		struct dma_desc *p;
1673 
1674 		if (priv->extend_desc)
1675 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1676 		else
1677 			p = rx_q->dma_rx + i;
1678 
1679 		buf = &rx_q->buf_pool[i];
1680 
1681 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1682 		if (!buf->xdp)
1683 			return -ENOMEM;
1684 
1685 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1686 		stmmac_set_desc_addr(priv, p, dma_addr);
1687 		rx_q->buf_alloc_num++;
1688 	}
1689 
1690 	return 0;
1691 }
1692 
1693 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1694 {
1695 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1696 		return NULL;
1697 
1698 	return xsk_get_pool_from_qid(priv->dev, queue);
1699 }
1700 
1701 /**
1702  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1703  * @priv: driver private structure
1704  * @dma_conf: structure to take the dma data
1705  * @queue: RX queue index
1706  * @flags: gfp flag.
1707  * Description: this function initializes the DMA RX descriptors
1708  * and allocates the socket buffers. It supports the chained and ring
1709  * modes.
1710  */
1711 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1712 				    struct stmmac_dma_conf *dma_conf,
1713 				    u32 queue, gfp_t flags)
1714 {
1715 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1716 	int ret;
1717 
1718 	netif_dbg(priv, probe, priv->dev,
1719 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1720 		  (u32)rx_q->dma_rx_phy);
1721 
1722 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1723 
1724 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1725 
1726 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1727 
1728 	if (rx_q->xsk_pool) {
1729 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1730 						   MEM_TYPE_XSK_BUFF_POOL,
1731 						   NULL));
1732 		netdev_info(priv->dev,
1733 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1734 			    rx_q->queue_index);
1735 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1736 	} else {
1737 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1738 						   MEM_TYPE_PAGE_POOL,
1739 						   rx_q->page_pool));
1740 		netdev_info(priv->dev,
1741 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1742 			    rx_q->queue_index);
1743 	}
1744 
1745 	if (rx_q->xsk_pool) {
1746 		/* RX XDP ZC buffer pool may not be populated, e.g.
1747 		 * xdpsock TX-only.
1748 		 */
1749 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1750 	} else {
1751 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1752 		if (ret < 0)
1753 			return -ENOMEM;
1754 	}
1755 
1756 	/* Setup the chained descriptor addresses */
1757 	if (priv->mode == STMMAC_CHAIN_MODE) {
1758 		if (priv->extend_desc)
1759 			stmmac_mode_init(priv, rx_q->dma_erx,
1760 					 rx_q->dma_rx_phy,
1761 					 dma_conf->dma_rx_size, 1);
1762 		else
1763 			stmmac_mode_init(priv, rx_q->dma_rx,
1764 					 rx_q->dma_rx_phy,
1765 					 dma_conf->dma_rx_size, 0);
1766 	}
1767 
1768 	return 0;
1769 }
1770 
1771 static int init_dma_rx_desc_rings(struct net_device *dev,
1772 				  struct stmmac_dma_conf *dma_conf,
1773 				  gfp_t flags)
1774 {
1775 	struct stmmac_priv *priv = netdev_priv(dev);
1776 	u32 rx_count = priv->plat->rx_queues_to_use;
1777 	int queue;
1778 	int ret;
1779 
1780 	/* RX INITIALIZATION */
1781 	netif_dbg(priv, probe, priv->dev,
1782 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1783 
1784 	for (queue = 0; queue < rx_count; queue++) {
1785 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1786 		if (ret)
1787 			goto err_init_rx_buffers;
1788 	}
1789 
1790 	return 0;
1791 
1792 err_init_rx_buffers:
1793 	while (queue >= 0) {
1794 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1795 
1796 		if (rx_q->xsk_pool)
1797 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1798 		else
1799 			dma_free_rx_skbufs(priv, dma_conf, queue);
1800 
1801 		rx_q->buf_alloc_num = 0;
1802 		rx_q->xsk_pool = NULL;
1803 
1804 		queue--;
1805 	}
1806 
1807 	return ret;
1808 }
1809 
1810 /**
1811  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1812  * @priv: driver private structure
1813  * @dma_conf: structure to take the dma data
1814  * @queue: TX queue index
1815  * Description: this function initializes the DMA TX descriptors
1816  * and allocates the socket buffers. It supports the chained and ring
1817  * modes.
1818  */
1819 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1820 				    struct stmmac_dma_conf *dma_conf,
1821 				    u32 queue)
1822 {
1823 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1824 	int i;
1825 
1826 	netif_dbg(priv, probe, priv->dev,
1827 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1828 		  (u32)tx_q->dma_tx_phy);
1829 
1830 	/* Setup the chained descriptor addresses */
1831 	if (priv->mode == STMMAC_CHAIN_MODE) {
1832 		if (priv->extend_desc)
1833 			stmmac_mode_init(priv, tx_q->dma_etx,
1834 					 tx_q->dma_tx_phy,
1835 					 dma_conf->dma_tx_size, 1);
1836 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1837 			stmmac_mode_init(priv, tx_q->dma_tx,
1838 					 tx_q->dma_tx_phy,
1839 					 dma_conf->dma_tx_size, 0);
1840 	}
1841 
1842 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1843 
1844 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1845 		struct dma_desc *p;
1846 
1847 		if (priv->extend_desc)
1848 			p = &((tx_q->dma_etx + i)->basic);
1849 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1850 			p = &((tx_q->dma_entx + i)->basic);
1851 		else
1852 			p = tx_q->dma_tx + i;
1853 
1854 		stmmac_clear_desc(priv, p);
1855 
1856 		tx_q->tx_skbuff_dma[i].buf = 0;
1857 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1858 		tx_q->tx_skbuff_dma[i].len = 0;
1859 		tx_q->tx_skbuff_dma[i].last_segment = false;
1860 		tx_q->tx_skbuff[i] = NULL;
1861 	}
1862 
1863 	return 0;
1864 }
1865 
1866 static int init_dma_tx_desc_rings(struct net_device *dev,
1867 				  struct stmmac_dma_conf *dma_conf)
1868 {
1869 	struct stmmac_priv *priv = netdev_priv(dev);
1870 	u32 tx_queue_cnt;
1871 	u32 queue;
1872 
1873 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1874 
1875 	for (queue = 0; queue < tx_queue_cnt; queue++)
1876 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1877 
1878 	return 0;
1879 }
1880 
1881 /**
1882  * init_dma_desc_rings - init the RX/TX descriptor rings
1883  * @dev: net device structure
1884  * @dma_conf: structure to take the dma data
1885  * @flags: gfp flag.
1886  * Description: this function initializes the DMA RX/TX descriptors
1887  * and allocates the socket buffers. It supports the chained and ring
1888  * modes.
1889  */
1890 static int init_dma_desc_rings(struct net_device *dev,
1891 			       struct stmmac_dma_conf *dma_conf,
1892 			       gfp_t flags)
1893 {
1894 	struct stmmac_priv *priv = netdev_priv(dev);
1895 	int ret;
1896 
1897 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1898 	if (ret)
1899 		return ret;
1900 
1901 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1902 
1903 	stmmac_clear_descriptors(priv, dma_conf);
1904 
1905 	if (netif_msg_hw(priv))
1906 		stmmac_display_rings(priv, dma_conf);
1907 
1908 	return ret;
1909 }
1910 
1911 /**
1912  * dma_free_tx_skbufs - free TX dma buffers
1913  * @priv: private structure
1914  * @dma_conf: structure to take the dma data
1915  * @queue: TX queue index
1916  */
1917 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1918 			       struct stmmac_dma_conf *dma_conf,
1919 			       u32 queue)
1920 {
1921 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1922 	int i;
1923 
1924 	tx_q->xsk_frames_done = 0;
1925 
1926 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1927 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1928 
1929 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1930 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1931 		tx_q->xsk_frames_done = 0;
1932 		tx_q->xsk_pool = NULL;
1933 	}
1934 }
1935 
1936 /**
1937  * stmmac_free_tx_skbufs - free TX skb buffers
1938  * @priv: private structure
1939  */
1940 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1941 {
1942 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1943 	u32 queue;
1944 
1945 	for (queue = 0; queue < tx_queue_cnt; queue++)
1946 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1947 }
1948 
1949 /**
1950  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1951  * @priv: private structure
1952  * @dma_conf: structure to take the dma data
1953  * @queue: RX queue index
1954  */
1955 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1956 					 struct stmmac_dma_conf *dma_conf,
1957 					 u32 queue)
1958 {
1959 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1960 
1961 	/* Release the DMA RX socket buffers */
1962 	if (rx_q->xsk_pool)
1963 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1964 	else
1965 		dma_free_rx_skbufs(priv, dma_conf, queue);
1966 
1967 	rx_q->buf_alloc_num = 0;
1968 	rx_q->xsk_pool = NULL;
1969 
1970 	/* Free DMA regions of consistent memory previously allocated */
1971 	if (!priv->extend_desc)
1972 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1973 				  sizeof(struct dma_desc),
1974 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1975 	else
1976 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1977 				  sizeof(struct dma_extended_desc),
1978 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1979 
1980 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1981 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1982 
1983 	kfree(rx_q->buf_pool);
1984 	if (rx_q->page_pool)
1985 		page_pool_destroy(rx_q->page_pool);
1986 }
1987 
1988 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1989 				       struct stmmac_dma_conf *dma_conf)
1990 {
1991 	u32 rx_count = priv->plat->rx_queues_to_use;
1992 	u32 queue;
1993 
1994 	/* Free RX queue resources */
1995 	for (queue = 0; queue < rx_count; queue++)
1996 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1997 }
1998 
1999 /**
2000  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2001  * @priv: private structure
2002  * @dma_conf: structure to take the dma data
2003  * @queue: TX queue index
2004  */
2005 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2006 					 struct stmmac_dma_conf *dma_conf,
2007 					 u32 queue)
2008 {
2009 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2010 	size_t size;
2011 	void *addr;
2012 
2013 	/* Release the DMA TX socket buffers */
2014 	dma_free_tx_skbufs(priv, dma_conf, queue);
2015 
2016 	if (priv->extend_desc) {
2017 		size = sizeof(struct dma_extended_desc);
2018 		addr = tx_q->dma_etx;
2019 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2020 		size = sizeof(struct dma_edesc);
2021 		addr = tx_q->dma_entx;
2022 	} else {
2023 		size = sizeof(struct dma_desc);
2024 		addr = tx_q->dma_tx;
2025 	}
2026 
2027 	size *= dma_conf->dma_tx_size;
2028 
2029 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2030 
2031 	kfree(tx_q->tx_skbuff_dma);
2032 	kfree(tx_q->tx_skbuff);
2033 }
2034 
2035 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2036 				       struct stmmac_dma_conf *dma_conf)
2037 {
2038 	u32 tx_count = priv->plat->tx_queues_to_use;
2039 	u32 queue;
2040 
2041 	/* Free TX queue resources */
2042 	for (queue = 0; queue < tx_count; queue++)
2043 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2044 }
2045 
2046 /**
2047  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2048  * @priv: private structure
2049  * @dma_conf: structure to take the dma data
2050  * @queue: RX queue index
2051  * Description: according to which descriptor can be used (extend or basic)
2052  * this function allocates the resources for TX and RX paths. In case of
2053  * reception, for example, it pre-allocated the RX socket buffer in order to
2054  * allow zero-copy mechanism.
2055  */
2056 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2057 					 struct stmmac_dma_conf *dma_conf,
2058 					 u32 queue)
2059 {
2060 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2061 	struct stmmac_channel *ch = &priv->channel[queue];
2062 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2063 	struct page_pool_params pp_params = { 0 };
2064 	unsigned int dma_buf_sz_pad, num_pages;
2065 	unsigned int napi_id;
2066 	int ret;
2067 
2068 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2069 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2070 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2071 
2072 	rx_q->queue_index = queue;
2073 	rx_q->priv_data = priv;
2074 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2075 
2076 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2077 	pp_params.pool_size = dma_conf->dma_rx_size;
2078 	pp_params.order = order_base_2(num_pages);
2079 	pp_params.nid = dev_to_node(priv->device);
2080 	pp_params.dev = priv->device;
2081 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2082 	pp_params.offset = stmmac_rx_offset(priv);
2083 	pp_params.max_len = dma_conf->dma_buf_sz;
2084 
2085 	if (priv->sph) {
2086 		pp_params.offset = 0;
2087 		pp_params.max_len += stmmac_rx_offset(priv);
2088 	}
2089 
2090 	rx_q->page_pool = page_pool_create(&pp_params);
2091 	if (IS_ERR(rx_q->page_pool)) {
2092 		ret = PTR_ERR(rx_q->page_pool);
2093 		rx_q->page_pool = NULL;
2094 		return ret;
2095 	}
2096 
2097 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2098 				 sizeof(*rx_q->buf_pool),
2099 				 GFP_KERNEL);
2100 	if (!rx_q->buf_pool)
2101 		return -ENOMEM;
2102 
2103 	if (priv->extend_desc) {
2104 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2105 						   dma_conf->dma_rx_size *
2106 						   sizeof(struct dma_extended_desc),
2107 						   &rx_q->dma_rx_phy,
2108 						   GFP_KERNEL);
2109 		if (!rx_q->dma_erx)
2110 			return -ENOMEM;
2111 
2112 	} else {
2113 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2114 						  dma_conf->dma_rx_size *
2115 						  sizeof(struct dma_desc),
2116 						  &rx_q->dma_rx_phy,
2117 						  GFP_KERNEL);
2118 		if (!rx_q->dma_rx)
2119 			return -ENOMEM;
2120 	}
2121 
2122 	if (stmmac_xdp_is_enabled(priv) &&
2123 	    test_bit(queue, priv->af_xdp_zc_qps))
2124 		napi_id = ch->rxtx_napi.napi_id;
2125 	else
2126 		napi_id = ch->rx_napi.napi_id;
2127 
2128 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2129 			       rx_q->queue_index,
2130 			       napi_id);
2131 	if (ret) {
2132 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2133 		return -EINVAL;
2134 	}
2135 
2136 	return 0;
2137 }
2138 
2139 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2140 				       struct stmmac_dma_conf *dma_conf)
2141 {
2142 	u32 rx_count = priv->plat->rx_queues_to_use;
2143 	u32 queue;
2144 	int ret;
2145 
2146 	/* RX queues buffers and DMA */
2147 	for (queue = 0; queue < rx_count; queue++) {
2148 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2149 		if (ret)
2150 			goto err_dma;
2151 	}
2152 
2153 	return 0;
2154 
2155 err_dma:
2156 	free_dma_rx_desc_resources(priv, dma_conf);
2157 
2158 	return ret;
2159 }
2160 
2161 /**
2162  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2163  * @priv: private structure
2164  * @dma_conf: structure to take the dma data
2165  * @queue: TX queue index
2166  * Description: according to which descriptor can be used (extend or basic)
2167  * this function allocates the resources for TX and RX paths. In case of
2168  * reception, for example, it pre-allocated the RX socket buffer in order to
2169  * allow zero-copy mechanism.
2170  */
2171 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2172 					 struct stmmac_dma_conf *dma_conf,
2173 					 u32 queue)
2174 {
2175 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2176 	size_t size;
2177 	void *addr;
2178 
2179 	tx_q->queue_index = queue;
2180 	tx_q->priv_data = priv;
2181 
2182 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2183 				      sizeof(*tx_q->tx_skbuff_dma),
2184 				      GFP_KERNEL);
2185 	if (!tx_q->tx_skbuff_dma)
2186 		return -ENOMEM;
2187 
2188 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2189 				  sizeof(struct sk_buff *),
2190 				  GFP_KERNEL);
2191 	if (!tx_q->tx_skbuff)
2192 		return -ENOMEM;
2193 
2194 	if (priv->extend_desc)
2195 		size = sizeof(struct dma_extended_desc);
2196 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2197 		size = sizeof(struct dma_edesc);
2198 	else
2199 		size = sizeof(struct dma_desc);
2200 
2201 	size *= dma_conf->dma_tx_size;
2202 
2203 	addr = dma_alloc_coherent(priv->device, size,
2204 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2205 	if (!addr)
2206 		return -ENOMEM;
2207 
2208 	if (priv->extend_desc)
2209 		tx_q->dma_etx = addr;
2210 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2211 		tx_q->dma_entx = addr;
2212 	else
2213 		tx_q->dma_tx = addr;
2214 
2215 	return 0;
2216 }
2217 
2218 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2219 				       struct stmmac_dma_conf *dma_conf)
2220 {
2221 	u32 tx_count = priv->plat->tx_queues_to_use;
2222 	u32 queue;
2223 	int ret;
2224 
2225 	/* TX queues buffers and DMA */
2226 	for (queue = 0; queue < tx_count; queue++) {
2227 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2228 		if (ret)
2229 			goto err_dma;
2230 	}
2231 
2232 	return 0;
2233 
2234 err_dma:
2235 	free_dma_tx_desc_resources(priv, dma_conf);
2236 	return ret;
2237 }
2238 
2239 /**
2240  * alloc_dma_desc_resources - alloc TX/RX resources.
2241  * @priv: private structure
2242  * @dma_conf: structure to take the dma data
2243  * Description: according to which descriptor can be used (extend or basic)
2244  * this function allocates the resources for TX and RX paths. In case of
2245  * reception, for example, it pre-allocated the RX socket buffer in order to
2246  * allow zero-copy mechanism.
2247  */
2248 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2249 				    struct stmmac_dma_conf *dma_conf)
2250 {
2251 	/* RX Allocation */
2252 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2253 
2254 	if (ret)
2255 		return ret;
2256 
2257 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2258 
2259 	return ret;
2260 }
2261 
2262 /**
2263  * free_dma_desc_resources - free dma desc resources
2264  * @priv: private structure
2265  * @dma_conf: structure to take the dma data
2266  */
2267 static void free_dma_desc_resources(struct stmmac_priv *priv,
2268 				    struct stmmac_dma_conf *dma_conf)
2269 {
2270 	/* Release the DMA TX socket buffers */
2271 	free_dma_tx_desc_resources(priv, dma_conf);
2272 
2273 	/* Release the DMA RX socket buffers later
2274 	 * to ensure all pending XDP_TX buffers are returned.
2275 	 */
2276 	free_dma_rx_desc_resources(priv, dma_conf);
2277 }
2278 
2279 /**
2280  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2281  *  @priv: driver private structure
2282  *  Description: It is used for enabling the rx queues in the MAC
2283  */
2284 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2285 {
2286 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2287 	int queue;
2288 	u8 mode;
2289 
2290 	for (queue = 0; queue < rx_queues_count; queue++) {
2291 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2292 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2293 	}
2294 }
2295 
2296 /**
2297  * stmmac_start_rx_dma - start RX DMA channel
2298  * @priv: driver private structure
2299  * @chan: RX channel index
2300  * Description:
2301  * This starts a RX DMA channel
2302  */
2303 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2304 {
2305 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2306 	stmmac_start_rx(priv, priv->ioaddr, chan);
2307 }
2308 
2309 /**
2310  * stmmac_start_tx_dma - start TX DMA channel
2311  * @priv: driver private structure
2312  * @chan: TX channel index
2313  * Description:
2314  * This starts a TX DMA channel
2315  */
2316 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2317 {
2318 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2319 	stmmac_start_tx(priv, priv->ioaddr, chan);
2320 }
2321 
2322 /**
2323  * stmmac_stop_rx_dma - stop RX DMA channel
2324  * @priv: driver private structure
2325  * @chan: RX channel index
2326  * Description:
2327  * This stops a RX DMA channel
2328  */
2329 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2330 {
2331 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2332 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2333 }
2334 
2335 /**
2336  * stmmac_stop_tx_dma - stop TX DMA channel
2337  * @priv: driver private structure
2338  * @chan: TX channel index
2339  * Description:
2340  * This stops a TX DMA channel
2341  */
2342 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2343 {
2344 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2345 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2346 }
2347 
2348 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2349 {
2350 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2351 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2352 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2353 	u32 chan;
2354 
2355 	for (chan = 0; chan < dma_csr_ch; chan++) {
2356 		struct stmmac_channel *ch = &priv->channel[chan];
2357 		unsigned long flags;
2358 
2359 		spin_lock_irqsave(&ch->lock, flags);
2360 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2361 		spin_unlock_irqrestore(&ch->lock, flags);
2362 	}
2363 }
2364 
2365 /**
2366  * stmmac_start_all_dma - start all RX and TX DMA channels
2367  * @priv: driver private structure
2368  * Description:
2369  * This starts all the RX and TX DMA channels
2370  */
2371 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2372 {
2373 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2374 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2375 	u32 chan = 0;
2376 
2377 	for (chan = 0; chan < rx_channels_count; chan++)
2378 		stmmac_start_rx_dma(priv, chan);
2379 
2380 	for (chan = 0; chan < tx_channels_count; chan++)
2381 		stmmac_start_tx_dma(priv, chan);
2382 }
2383 
2384 /**
2385  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2386  * @priv: driver private structure
2387  * Description:
2388  * This stops the RX and TX DMA channels
2389  */
2390 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2391 {
2392 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2393 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2394 	u32 chan = 0;
2395 
2396 	for (chan = 0; chan < rx_channels_count; chan++)
2397 		stmmac_stop_rx_dma(priv, chan);
2398 
2399 	for (chan = 0; chan < tx_channels_count; chan++)
2400 		stmmac_stop_tx_dma(priv, chan);
2401 }
2402 
2403 /**
2404  *  stmmac_dma_operation_mode - HW DMA operation mode
2405  *  @priv: driver private structure
2406  *  Description: it is used for configuring the DMA operation mode register in
2407  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2408  */
2409 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2410 {
2411 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2412 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2413 	int rxfifosz = priv->plat->rx_fifo_size;
2414 	int txfifosz = priv->plat->tx_fifo_size;
2415 	u32 txmode = 0;
2416 	u32 rxmode = 0;
2417 	u32 chan = 0;
2418 	u8 qmode = 0;
2419 
2420 	if (rxfifosz == 0)
2421 		rxfifosz = priv->dma_cap.rx_fifo_size;
2422 	if (txfifosz == 0)
2423 		txfifosz = priv->dma_cap.tx_fifo_size;
2424 
2425 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2426 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2427 		rxfifosz /= rx_channels_count;
2428 		txfifosz /= tx_channels_count;
2429 	}
2430 
2431 	if (priv->plat->force_thresh_dma_mode) {
2432 		txmode = tc;
2433 		rxmode = tc;
2434 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2435 		/*
2436 		 * In case of GMAC, SF mode can be enabled
2437 		 * to perform the TX COE in HW. This depends on:
2438 		 * 1) TX COE if actually supported
2439 		 * 2) There is no bugged Jumbo frame support
2440 		 *    that needs to not insert csum in the TDES.
2441 		 */
2442 		txmode = SF_DMA_MODE;
2443 		rxmode = SF_DMA_MODE;
2444 		priv->xstats.threshold = SF_DMA_MODE;
2445 	} else {
2446 		txmode = tc;
2447 		rxmode = SF_DMA_MODE;
2448 	}
2449 
2450 	/* configure all channels */
2451 	for (chan = 0; chan < rx_channels_count; chan++) {
2452 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2453 		u32 buf_size;
2454 
2455 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2456 
2457 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2458 				rxfifosz, qmode);
2459 
2460 		if (rx_q->xsk_pool) {
2461 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2462 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2463 					      buf_size,
2464 					      chan);
2465 		} else {
2466 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2467 					      priv->dma_conf.dma_buf_sz,
2468 					      chan);
2469 		}
2470 	}
2471 
2472 	for (chan = 0; chan < tx_channels_count; chan++) {
2473 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2474 
2475 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2476 				txfifosz, qmode);
2477 	}
2478 }
2479 
2480 static void stmmac_xsk_request_timestamp(void *_priv)
2481 {
2482 	struct stmmac_metadata_request *meta_req = _priv;
2483 
2484 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2485 	*meta_req->set_ic = true;
2486 }
2487 
2488 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2489 {
2490 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2491 	struct stmmac_priv *priv = tx_compl->priv;
2492 	struct dma_desc *desc = tx_compl->desc;
2493 	bool found = false;
2494 	u64 ns = 0;
2495 
2496 	if (!priv->hwts_tx_en)
2497 		return 0;
2498 
2499 	/* check tx tstamp status */
2500 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2501 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2502 		found = true;
2503 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2504 		found = true;
2505 	}
2506 
2507 	if (found) {
2508 		ns -= priv->plat->cdc_error_adj;
2509 		return ns_to_ktime(ns);
2510 	}
2511 
2512 	return 0;
2513 }
2514 
2515 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2516 {
2517 	struct timespec64 ts = ns_to_timespec64(launch_time);
2518 	struct stmmac_metadata_request *meta_req = _priv;
2519 
2520 	if (meta_req->tbs & STMMAC_TBS_EN)
2521 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2522 				    ts.tv_nsec);
2523 }
2524 
2525 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2526 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2527 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2528 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2529 };
2530 
2531 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2532 {
2533 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2534 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2535 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2536 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2537 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2538 	unsigned int entry = tx_q->cur_tx;
2539 	struct dma_desc *tx_desc = NULL;
2540 	struct xdp_desc xdp_desc;
2541 	bool work_done = true;
2542 	u32 tx_set_ic_bit = 0;
2543 
2544 	/* Avoids TX time-out as we are sharing with slow path */
2545 	txq_trans_cond_update(nq);
2546 
2547 	budget = min(budget, stmmac_tx_avail(priv, queue));
2548 
2549 	for (; budget > 0; budget--) {
2550 		struct stmmac_metadata_request meta_req;
2551 		struct xsk_tx_metadata *meta = NULL;
2552 		dma_addr_t dma_addr;
2553 		bool set_ic;
2554 
2555 		/* We are sharing with slow path and stop XSK TX desc submission when
2556 		 * available TX ring is less than threshold.
2557 		 */
2558 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2559 		    !netif_carrier_ok(priv->dev)) {
2560 			work_done = false;
2561 			break;
2562 		}
2563 
2564 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2565 			break;
2566 
2567 		if (priv->est && priv->est->enable &&
2568 		    priv->est->max_sdu[queue] &&
2569 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2570 			priv->xstats.max_sdu_txq_drop[queue]++;
2571 			continue;
2572 		}
2573 
2574 		if (likely(priv->extend_desc))
2575 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2576 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2577 			tx_desc = &tx_q->dma_entx[entry].basic;
2578 		else
2579 			tx_desc = tx_q->dma_tx + entry;
2580 
2581 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2582 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2583 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2584 
2585 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2586 
2587 		/* To return XDP buffer to XSK pool, we simple call
2588 		 * xsk_tx_completed(), so we don't need to fill up
2589 		 * 'buf' and 'xdpf'.
2590 		 */
2591 		tx_q->tx_skbuff_dma[entry].buf = 0;
2592 		tx_q->xdpf[entry] = NULL;
2593 
2594 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2595 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2596 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2597 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2598 
2599 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2600 
2601 		tx_q->tx_count_frames++;
2602 
2603 		if (!priv->tx_coal_frames[queue])
2604 			set_ic = false;
2605 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2606 			set_ic = true;
2607 		else
2608 			set_ic = false;
2609 
2610 		meta_req.priv = priv;
2611 		meta_req.tx_desc = tx_desc;
2612 		meta_req.set_ic = &set_ic;
2613 		meta_req.tbs = tx_q->tbs;
2614 		meta_req.edesc = &tx_q->dma_entx[entry];
2615 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2616 					&meta_req);
2617 		if (set_ic) {
2618 			tx_q->tx_count_frames = 0;
2619 			stmmac_set_tx_ic(priv, tx_desc);
2620 			tx_set_ic_bit++;
2621 		}
2622 
2623 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2624 				       csum, priv->mode, true, true,
2625 				       xdp_desc.len);
2626 
2627 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2628 
2629 		xsk_tx_metadata_to_compl(meta,
2630 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2631 
2632 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2633 		entry = tx_q->cur_tx;
2634 	}
2635 	u64_stats_update_begin(&txq_stats->napi_syncp);
2636 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2637 	u64_stats_update_end(&txq_stats->napi_syncp);
2638 
2639 	if (tx_desc) {
2640 		stmmac_flush_tx_descriptors(priv, queue);
2641 		xsk_tx_release(pool);
2642 	}
2643 
2644 	/* Return true if all of the 3 conditions are met
2645 	 *  a) TX Budget is still available
2646 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2647 	 *     pending XSK TX for transmission)
2648 	 */
2649 	return !!budget && work_done;
2650 }
2651 
2652 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2653 {
2654 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2655 		tc += 64;
2656 
2657 		if (priv->plat->force_thresh_dma_mode)
2658 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2659 		else
2660 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2661 						      chan);
2662 
2663 		priv->xstats.threshold = tc;
2664 	}
2665 }
2666 
2667 /**
2668  * stmmac_tx_clean - to manage the transmission completion
2669  * @priv: driver private structure
2670  * @budget: napi budget limiting this functions packet handling
2671  * @queue: TX queue index
2672  * @pending_packets: signal to arm the TX coal timer
2673  * Description: it reclaims the transmit resources after transmission completes.
2674  * If some packets still needs to be handled, due to TX coalesce, set
2675  * pending_packets to true to make NAPI arm the TX coal timer.
2676  */
2677 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2678 			   bool *pending_packets)
2679 {
2680 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2681 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2682 	unsigned int bytes_compl = 0, pkts_compl = 0;
2683 	unsigned int entry, xmits = 0, count = 0;
2684 	u32 tx_packets = 0, tx_errors = 0;
2685 
2686 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2687 
2688 	tx_q->xsk_frames_done = 0;
2689 
2690 	entry = tx_q->dirty_tx;
2691 
2692 	/* Try to clean all TX complete frame in 1 shot */
2693 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2694 		struct xdp_frame *xdpf;
2695 		struct sk_buff *skb;
2696 		struct dma_desc *p;
2697 		int status;
2698 
2699 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2700 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2701 			xdpf = tx_q->xdpf[entry];
2702 			skb = NULL;
2703 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2704 			xdpf = NULL;
2705 			skb = tx_q->tx_skbuff[entry];
2706 		} else {
2707 			xdpf = NULL;
2708 			skb = NULL;
2709 		}
2710 
2711 		if (priv->extend_desc)
2712 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2713 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2714 			p = &tx_q->dma_entx[entry].basic;
2715 		else
2716 			p = tx_q->dma_tx + entry;
2717 
2718 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2719 		/* Check if the descriptor is owned by the DMA */
2720 		if (unlikely(status & tx_dma_own))
2721 			break;
2722 
2723 		count++;
2724 
2725 		/* Make sure descriptor fields are read after reading
2726 		 * the own bit.
2727 		 */
2728 		dma_rmb();
2729 
2730 		/* Just consider the last segment and ...*/
2731 		if (likely(!(status & tx_not_ls))) {
2732 			/* ... verify the status error condition */
2733 			if (unlikely(status & tx_err)) {
2734 				tx_errors++;
2735 				if (unlikely(status & tx_err_bump_tc))
2736 					stmmac_bump_dma_threshold(priv, queue);
2737 			} else {
2738 				tx_packets++;
2739 			}
2740 			if (skb) {
2741 				stmmac_get_tx_hwtstamp(priv, p, skb);
2742 			} else if (tx_q->xsk_pool &&
2743 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2744 				struct stmmac_xsk_tx_complete tx_compl = {
2745 					.priv = priv,
2746 					.desc = p,
2747 				};
2748 
2749 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2750 							 &stmmac_xsk_tx_metadata_ops,
2751 							 &tx_compl);
2752 			}
2753 		}
2754 
2755 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2756 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2757 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2758 				dma_unmap_page(priv->device,
2759 					       tx_q->tx_skbuff_dma[entry].buf,
2760 					       tx_q->tx_skbuff_dma[entry].len,
2761 					       DMA_TO_DEVICE);
2762 			else
2763 				dma_unmap_single(priv->device,
2764 						 tx_q->tx_skbuff_dma[entry].buf,
2765 						 tx_q->tx_skbuff_dma[entry].len,
2766 						 DMA_TO_DEVICE);
2767 			tx_q->tx_skbuff_dma[entry].buf = 0;
2768 			tx_q->tx_skbuff_dma[entry].len = 0;
2769 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2770 		}
2771 
2772 		stmmac_clean_desc3(priv, tx_q, p);
2773 
2774 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2775 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2776 
2777 		if (xdpf &&
2778 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2779 			xdp_return_frame_rx_napi(xdpf);
2780 			tx_q->xdpf[entry] = NULL;
2781 		}
2782 
2783 		if (xdpf &&
2784 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2785 			xdp_return_frame(xdpf);
2786 			tx_q->xdpf[entry] = NULL;
2787 		}
2788 
2789 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2790 			tx_q->xsk_frames_done++;
2791 
2792 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2793 			if (likely(skb)) {
2794 				pkts_compl++;
2795 				bytes_compl += skb->len;
2796 				dev_consume_skb_any(skb);
2797 				tx_q->tx_skbuff[entry] = NULL;
2798 			}
2799 		}
2800 
2801 		stmmac_release_tx_desc(priv, p, priv->mode);
2802 
2803 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2804 	}
2805 	tx_q->dirty_tx = entry;
2806 
2807 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2808 				  pkts_compl, bytes_compl);
2809 
2810 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2811 								queue))) &&
2812 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2813 
2814 		netif_dbg(priv, tx_done, priv->dev,
2815 			  "%s: restart transmit\n", __func__);
2816 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2817 	}
2818 
2819 	if (tx_q->xsk_pool) {
2820 		bool work_done;
2821 
2822 		if (tx_q->xsk_frames_done)
2823 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2824 
2825 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2826 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2827 
2828 		/* For XSK TX, we try to send as many as possible.
2829 		 * If XSK work done (XSK TX desc empty and budget still
2830 		 * available), return "budget - 1" to reenable TX IRQ.
2831 		 * Else, return "budget" to make NAPI continue polling.
2832 		 */
2833 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2834 					       STMMAC_XSK_TX_BUDGET_MAX);
2835 		if (work_done)
2836 			xmits = budget - 1;
2837 		else
2838 			xmits = budget;
2839 	}
2840 
2841 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2842 		stmmac_restart_sw_lpi_timer(priv);
2843 
2844 	/* We still have pending packets, let's call for a new scheduling */
2845 	if (tx_q->dirty_tx != tx_q->cur_tx)
2846 		*pending_packets = true;
2847 
2848 	u64_stats_update_begin(&txq_stats->napi_syncp);
2849 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2850 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2851 	u64_stats_inc(&txq_stats->napi.tx_clean);
2852 	u64_stats_update_end(&txq_stats->napi_syncp);
2853 
2854 	priv->xstats.tx_errors += tx_errors;
2855 
2856 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2857 
2858 	/* Combine decisions from TX clean and XSK TX */
2859 	return max(count, xmits);
2860 }
2861 
2862 /**
2863  * stmmac_tx_err - to manage the tx error
2864  * @priv: driver private structure
2865  * @chan: channel index
2866  * Description: it cleans the descriptors and restarts the transmission
2867  * in case of transmission errors.
2868  */
2869 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2870 {
2871 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2872 
2873 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2874 
2875 	stmmac_stop_tx_dma(priv, chan);
2876 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2877 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2878 	stmmac_reset_tx_queue(priv, chan);
2879 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2880 			    tx_q->dma_tx_phy, chan);
2881 	stmmac_start_tx_dma(priv, chan);
2882 
2883 	priv->xstats.tx_errors++;
2884 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2885 }
2886 
2887 /**
2888  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2889  *  @priv: driver private structure
2890  *  @txmode: TX operating mode
2891  *  @rxmode: RX operating mode
2892  *  @chan: channel index
2893  *  Description: it is used for configuring of the DMA operation mode in
2894  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2895  *  mode.
2896  */
2897 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2898 					  u32 rxmode, u32 chan)
2899 {
2900 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2901 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2902 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2903 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2904 	int rxfifosz = priv->plat->rx_fifo_size;
2905 	int txfifosz = priv->plat->tx_fifo_size;
2906 
2907 	if (rxfifosz == 0)
2908 		rxfifosz = priv->dma_cap.rx_fifo_size;
2909 	if (txfifosz == 0)
2910 		txfifosz = priv->dma_cap.tx_fifo_size;
2911 
2912 	/* Adjust for real per queue fifo size */
2913 	rxfifosz /= rx_channels_count;
2914 	txfifosz /= tx_channels_count;
2915 
2916 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2917 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2918 }
2919 
2920 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2921 {
2922 	int ret;
2923 
2924 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2925 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2926 	if (ret && (ret != -EINVAL)) {
2927 		stmmac_global_err(priv);
2928 		return true;
2929 	}
2930 
2931 	return false;
2932 }
2933 
2934 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2935 {
2936 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2937 						 &priv->xstats, chan, dir);
2938 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2939 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2940 	struct stmmac_channel *ch = &priv->channel[chan];
2941 	struct napi_struct *rx_napi;
2942 	struct napi_struct *tx_napi;
2943 	unsigned long flags;
2944 
2945 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2946 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2947 
2948 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2949 		if (napi_schedule_prep(rx_napi)) {
2950 			spin_lock_irqsave(&ch->lock, flags);
2951 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2952 			spin_unlock_irqrestore(&ch->lock, flags);
2953 			__napi_schedule(rx_napi);
2954 		}
2955 	}
2956 
2957 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2958 		if (napi_schedule_prep(tx_napi)) {
2959 			spin_lock_irqsave(&ch->lock, flags);
2960 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2961 			spin_unlock_irqrestore(&ch->lock, flags);
2962 			__napi_schedule(tx_napi);
2963 		}
2964 	}
2965 
2966 	return status;
2967 }
2968 
2969 /**
2970  * stmmac_dma_interrupt - DMA ISR
2971  * @priv: driver private structure
2972  * Description: this is the DMA ISR. It is called by the main ISR.
2973  * It calls the dwmac dma routine and schedule poll method in case of some
2974  * work can be done.
2975  */
2976 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2977 {
2978 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2979 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2980 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2981 				tx_channel_count : rx_channel_count;
2982 	u32 chan;
2983 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2984 
2985 	/* Make sure we never check beyond our status buffer. */
2986 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2987 		channels_to_check = ARRAY_SIZE(status);
2988 
2989 	for (chan = 0; chan < channels_to_check; chan++)
2990 		status[chan] = stmmac_napi_check(priv, chan,
2991 						 DMA_DIR_RXTX);
2992 
2993 	for (chan = 0; chan < tx_channel_count; chan++) {
2994 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2995 			/* Try to bump up the dma threshold on this failure */
2996 			stmmac_bump_dma_threshold(priv, chan);
2997 		} else if (unlikely(status[chan] == tx_hard_error)) {
2998 			stmmac_tx_err(priv, chan);
2999 		}
3000 	}
3001 }
3002 
3003 /**
3004  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3005  * @priv: driver private structure
3006  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3007  */
3008 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3009 {
3010 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3011 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3012 
3013 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3014 
3015 	if (priv->dma_cap.rmon) {
3016 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3017 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3018 	} else
3019 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3020 }
3021 
3022 /**
3023  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3024  * @priv: driver private structure
3025  * Description:
3026  *  new GMAC chip generations have a new register to indicate the
3027  *  presence of the optional feature/functions.
3028  *  This can be also used to override the value passed through the
3029  *  platform and necessary for old MAC10/100 and GMAC chips.
3030  */
3031 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3032 {
3033 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3034 }
3035 
3036 /**
3037  * stmmac_check_ether_addr - check if the MAC addr is valid
3038  * @priv: driver private structure
3039  * Description:
3040  * it is to verify if the MAC address is valid, in case of failures it
3041  * generates a random MAC address
3042  */
3043 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3044 {
3045 	u8 addr[ETH_ALEN];
3046 
3047 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3048 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3049 		if (is_valid_ether_addr(addr))
3050 			eth_hw_addr_set(priv->dev, addr);
3051 		else
3052 			eth_hw_addr_random(priv->dev);
3053 		dev_info(priv->device, "device MAC address %pM\n",
3054 			 priv->dev->dev_addr);
3055 	}
3056 }
3057 
3058 /**
3059  * stmmac_init_dma_engine - DMA init.
3060  * @priv: driver private structure
3061  * Description:
3062  * It inits the DMA invoking the specific MAC/GMAC callback.
3063  * Some DMA parameters can be passed from the platform;
3064  * in case of these are not passed a default is kept for the MAC or GMAC.
3065  */
3066 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3067 {
3068 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3069 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3070 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3071 	struct stmmac_rx_queue *rx_q;
3072 	struct stmmac_tx_queue *tx_q;
3073 	u32 chan = 0;
3074 	int ret = 0;
3075 
3076 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3077 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3078 		return -EINVAL;
3079 	}
3080 
3081 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3082 		priv->plat->dma_cfg->atds = 1;
3083 
3084 	ret = stmmac_reset(priv, priv->ioaddr);
3085 	if (ret) {
3086 		netdev_err(priv->dev, "Failed to reset the dma\n");
3087 		return ret;
3088 	}
3089 
3090 	/* DMA Configuration */
3091 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3092 
3093 	if (priv->plat->axi)
3094 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3095 
3096 	/* DMA CSR Channel configuration */
3097 	for (chan = 0; chan < dma_csr_ch; chan++) {
3098 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3099 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3100 	}
3101 
3102 	/* DMA RX Channel Configuration */
3103 	for (chan = 0; chan < rx_channels_count; chan++) {
3104 		rx_q = &priv->dma_conf.rx_queue[chan];
3105 
3106 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3107 				    rx_q->dma_rx_phy, chan);
3108 
3109 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3110 				     (rx_q->buf_alloc_num *
3111 				      sizeof(struct dma_desc));
3112 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3113 				       rx_q->rx_tail_addr, chan);
3114 	}
3115 
3116 	/* DMA TX Channel Configuration */
3117 	for (chan = 0; chan < tx_channels_count; chan++) {
3118 		tx_q = &priv->dma_conf.tx_queue[chan];
3119 
3120 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3121 				    tx_q->dma_tx_phy, chan);
3122 
3123 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3124 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3125 				       tx_q->tx_tail_addr, chan);
3126 	}
3127 
3128 	return ret;
3129 }
3130 
3131 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3132 {
3133 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3134 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3135 	struct stmmac_channel *ch;
3136 	struct napi_struct *napi;
3137 
3138 	if (!tx_coal_timer)
3139 		return;
3140 
3141 	ch = &priv->channel[tx_q->queue_index];
3142 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3143 
3144 	/* Arm timer only if napi is not already scheduled.
3145 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3146 	 * again in the next scheduled napi.
3147 	 */
3148 	if (unlikely(!napi_is_scheduled(napi)))
3149 		hrtimer_start(&tx_q->txtimer,
3150 			      STMMAC_COAL_TIMER(tx_coal_timer),
3151 			      HRTIMER_MODE_REL);
3152 	else
3153 		hrtimer_try_to_cancel(&tx_q->txtimer);
3154 }
3155 
3156 /**
3157  * stmmac_tx_timer - mitigation sw timer for tx.
3158  * @t: data pointer
3159  * Description:
3160  * This is the timer handler to directly invoke the stmmac_tx_clean.
3161  */
3162 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3163 {
3164 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3165 	struct stmmac_priv *priv = tx_q->priv_data;
3166 	struct stmmac_channel *ch;
3167 	struct napi_struct *napi;
3168 
3169 	ch = &priv->channel[tx_q->queue_index];
3170 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3171 
3172 	if (likely(napi_schedule_prep(napi))) {
3173 		unsigned long flags;
3174 
3175 		spin_lock_irqsave(&ch->lock, flags);
3176 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3177 		spin_unlock_irqrestore(&ch->lock, flags);
3178 		__napi_schedule(napi);
3179 	}
3180 
3181 	return HRTIMER_NORESTART;
3182 }
3183 
3184 /**
3185  * stmmac_init_coalesce - init mitigation options.
3186  * @priv: driver private structure
3187  * Description:
3188  * This inits the coalesce parameters: i.e. timer rate,
3189  * timer handler and default threshold used for enabling the
3190  * interrupt on completion bit.
3191  */
3192 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3193 {
3194 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3195 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3196 	u32 chan;
3197 
3198 	for (chan = 0; chan < tx_channel_count; chan++) {
3199 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3200 
3201 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3202 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3203 
3204 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3205 	}
3206 
3207 	for (chan = 0; chan < rx_channel_count; chan++)
3208 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3209 }
3210 
3211 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3212 {
3213 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3214 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3215 	u32 chan;
3216 
3217 	/* set TX ring length */
3218 	for (chan = 0; chan < tx_channels_count; chan++)
3219 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3220 				       (priv->dma_conf.dma_tx_size - 1), chan);
3221 
3222 	/* set RX ring length */
3223 	for (chan = 0; chan < rx_channels_count; chan++)
3224 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3225 				       (priv->dma_conf.dma_rx_size - 1), chan);
3226 }
3227 
3228 /**
3229  *  stmmac_set_tx_queue_weight - Set TX queue weight
3230  *  @priv: driver private structure
3231  *  Description: It is used for setting TX queues weight
3232  */
3233 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3234 {
3235 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3236 	u32 weight;
3237 	u32 queue;
3238 
3239 	for (queue = 0; queue < tx_queues_count; queue++) {
3240 		weight = priv->plat->tx_queues_cfg[queue].weight;
3241 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3242 	}
3243 }
3244 
3245 /**
3246  *  stmmac_configure_cbs - Configure CBS in TX queue
3247  *  @priv: driver private structure
3248  *  Description: It is used for configuring CBS in AVB TX queues
3249  */
3250 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3251 {
3252 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3253 	u32 mode_to_use;
3254 	u32 queue;
3255 
3256 	/* queue 0 is reserved for legacy traffic */
3257 	for (queue = 1; queue < tx_queues_count; queue++) {
3258 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3259 		if (mode_to_use == MTL_QUEUE_DCB)
3260 			continue;
3261 
3262 		stmmac_config_cbs(priv, priv->hw,
3263 				priv->plat->tx_queues_cfg[queue].send_slope,
3264 				priv->plat->tx_queues_cfg[queue].idle_slope,
3265 				priv->plat->tx_queues_cfg[queue].high_credit,
3266 				priv->plat->tx_queues_cfg[queue].low_credit,
3267 				queue);
3268 	}
3269 }
3270 
3271 /**
3272  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3273  *  @priv: driver private structure
3274  *  Description: It is used for mapping RX queues to RX dma channels
3275  */
3276 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3277 {
3278 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3279 	u32 queue;
3280 	u32 chan;
3281 
3282 	for (queue = 0; queue < rx_queues_count; queue++) {
3283 		chan = priv->plat->rx_queues_cfg[queue].chan;
3284 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3285 	}
3286 }
3287 
3288 /**
3289  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3290  *  @priv: driver private structure
3291  *  Description: It is used for configuring the RX Queue Priority
3292  */
3293 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3294 {
3295 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3296 	u32 queue;
3297 	u32 prio;
3298 
3299 	for (queue = 0; queue < rx_queues_count; queue++) {
3300 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3301 			continue;
3302 
3303 		prio = priv->plat->rx_queues_cfg[queue].prio;
3304 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3305 	}
3306 }
3307 
3308 /**
3309  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3310  *  @priv: driver private structure
3311  *  Description: It is used for configuring the TX Queue Priority
3312  */
3313 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3314 {
3315 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3316 	u32 queue;
3317 	u32 prio;
3318 
3319 	for (queue = 0; queue < tx_queues_count; queue++) {
3320 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3321 			continue;
3322 
3323 		prio = priv->plat->tx_queues_cfg[queue].prio;
3324 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3325 	}
3326 }
3327 
3328 /**
3329  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3330  *  @priv: driver private structure
3331  *  Description: It is used for configuring the RX queue routing
3332  */
3333 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3334 {
3335 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3336 	u32 queue;
3337 	u8 packet;
3338 
3339 	for (queue = 0; queue < rx_queues_count; queue++) {
3340 		/* no specific packet type routing specified for the queue */
3341 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3342 			continue;
3343 
3344 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3345 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3346 	}
3347 }
3348 
3349 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3350 {
3351 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3352 		priv->rss.enable = false;
3353 		return;
3354 	}
3355 
3356 	if (priv->dev->features & NETIF_F_RXHASH)
3357 		priv->rss.enable = true;
3358 	else
3359 		priv->rss.enable = false;
3360 
3361 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3362 			     priv->plat->rx_queues_to_use);
3363 }
3364 
3365 /**
3366  *  stmmac_mtl_configuration - Configure MTL
3367  *  @priv: driver private structure
3368  *  Description: It is used for configurring MTL
3369  */
3370 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3371 {
3372 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3373 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3374 
3375 	if (tx_queues_count > 1)
3376 		stmmac_set_tx_queue_weight(priv);
3377 
3378 	/* Configure MTL RX algorithms */
3379 	if (rx_queues_count > 1)
3380 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3381 				priv->plat->rx_sched_algorithm);
3382 
3383 	/* Configure MTL TX algorithms */
3384 	if (tx_queues_count > 1)
3385 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3386 				priv->plat->tx_sched_algorithm);
3387 
3388 	/* Configure CBS in AVB TX queues */
3389 	if (tx_queues_count > 1)
3390 		stmmac_configure_cbs(priv);
3391 
3392 	/* Map RX MTL to DMA channels */
3393 	stmmac_rx_queue_dma_chan_map(priv);
3394 
3395 	/* Enable MAC RX Queues */
3396 	stmmac_mac_enable_rx_queues(priv);
3397 
3398 	/* Set RX priorities */
3399 	if (rx_queues_count > 1)
3400 		stmmac_mac_config_rx_queues_prio(priv);
3401 
3402 	/* Set TX priorities */
3403 	if (tx_queues_count > 1)
3404 		stmmac_mac_config_tx_queues_prio(priv);
3405 
3406 	/* Set RX routing */
3407 	if (rx_queues_count > 1)
3408 		stmmac_mac_config_rx_queues_routing(priv);
3409 
3410 	/* Receive Side Scaling */
3411 	if (rx_queues_count > 1)
3412 		stmmac_mac_config_rss(priv);
3413 }
3414 
3415 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3416 {
3417 	if (priv->dma_cap.asp) {
3418 		netdev_info(priv->dev, "Enabling Safety Features\n");
3419 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3420 					  priv->plat->safety_feat_cfg);
3421 	} else {
3422 		netdev_info(priv->dev, "No Safety Features support found\n");
3423 	}
3424 }
3425 
3426 /**
3427  * stmmac_hw_setup - setup mac in a usable state.
3428  *  @dev : pointer to the device structure.
3429  *  @ptp_register: register PTP if set
3430  *  Description:
3431  *  this is the main function to setup the HW in a usable state because the
3432  *  dma engine is reset, the core registers are configured (e.g. AXI,
3433  *  Checksum features, timers). The DMA is ready to start receiving and
3434  *  transmitting.
3435  *  Return value:
3436  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3437  *  file on failure.
3438  */
3439 static int stmmac_hw_setup(struct net_device *dev)
3440 {
3441 	struct stmmac_priv *priv = netdev_priv(dev);
3442 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3443 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3444 	bool sph_en;
3445 	u32 chan;
3446 	int ret;
3447 
3448 	/* Make sure RX clock is enabled */
3449 	if (priv->hw->phylink_pcs)
3450 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3451 
3452 	/* Note that clk_rx_i must be running for reset to complete. This
3453 	 * clock may also be required when setting the MAC address.
3454 	 *
3455 	 * Block the receive clock stop for LPI mode at the PHY in case
3456 	 * the link is established with EEE mode active.
3457 	 */
3458 	phylink_rx_clk_stop_block(priv->phylink);
3459 
3460 	/* DMA initialization and SW reset */
3461 	ret = stmmac_init_dma_engine(priv);
3462 	if (ret < 0) {
3463 		phylink_rx_clk_stop_unblock(priv->phylink);
3464 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3465 			   __func__);
3466 		return ret;
3467 	}
3468 
3469 	/* Copy the MAC addr into the HW  */
3470 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3471 	phylink_rx_clk_stop_unblock(priv->phylink);
3472 
3473 	/* PS and related bits will be programmed according to the speed */
3474 	if (priv->hw->pcs) {
3475 		int speed = priv->plat->mac_port_sel_speed;
3476 
3477 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3478 		    (speed == SPEED_1000)) {
3479 			priv->hw->ps = speed;
3480 		} else {
3481 			dev_warn(priv->device, "invalid port speed\n");
3482 			priv->hw->ps = 0;
3483 		}
3484 	}
3485 
3486 	/* Initialize the MAC Core */
3487 	stmmac_core_init(priv, priv->hw, dev);
3488 
3489 	/* Initialize MTL*/
3490 	stmmac_mtl_configuration(priv);
3491 
3492 	/* Initialize Safety Features */
3493 	stmmac_safety_feat_configuration(priv);
3494 
3495 	ret = stmmac_rx_ipc(priv, priv->hw);
3496 	if (!ret) {
3497 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3498 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3499 		priv->hw->rx_csum = 0;
3500 	}
3501 
3502 	/* Enable the MAC Rx/Tx */
3503 	stmmac_mac_set(priv, priv->ioaddr, true);
3504 
3505 	/* Set the HW DMA mode and the COE */
3506 	stmmac_dma_operation_mode(priv);
3507 
3508 	stmmac_mmc_setup(priv);
3509 
3510 	if (priv->use_riwt) {
3511 		u32 queue;
3512 
3513 		for (queue = 0; queue < rx_cnt; queue++) {
3514 			if (!priv->rx_riwt[queue])
3515 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3516 
3517 			stmmac_rx_watchdog(priv, priv->ioaddr,
3518 					   priv->rx_riwt[queue], queue);
3519 		}
3520 	}
3521 
3522 	if (priv->hw->pcs)
3523 		stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
3524 
3525 	/* set TX and RX rings length */
3526 	stmmac_set_rings_length(priv);
3527 
3528 	/* Enable TSO */
3529 	if (priv->tso) {
3530 		for (chan = 0; chan < tx_cnt; chan++) {
3531 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3532 
3533 			/* TSO and TBS cannot co-exist */
3534 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3535 				continue;
3536 
3537 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3538 		}
3539 	}
3540 
3541 	/* Enable Split Header */
3542 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3543 	for (chan = 0; chan < rx_cnt; chan++)
3544 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3545 
3546 
3547 	/* VLAN Tag Insertion */
3548 	if (priv->dma_cap.vlins)
3549 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3550 
3551 	/* TBS */
3552 	for (chan = 0; chan < tx_cnt; chan++) {
3553 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3554 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3555 
3556 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3557 	}
3558 
3559 	/* Configure real RX and TX queues */
3560 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3561 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3562 
3563 	/* Start the ball rolling... */
3564 	stmmac_start_all_dma(priv);
3565 
3566 	phylink_rx_clk_stop_block(priv->phylink);
3567 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3568 	phylink_rx_clk_stop_unblock(priv->phylink);
3569 
3570 	return 0;
3571 }
3572 
3573 static void stmmac_free_irq(struct net_device *dev,
3574 			    enum request_irq_err irq_err, int irq_idx)
3575 {
3576 	struct stmmac_priv *priv = netdev_priv(dev);
3577 	int j;
3578 
3579 	switch (irq_err) {
3580 	case REQ_IRQ_ERR_ALL:
3581 		irq_idx = priv->plat->tx_queues_to_use;
3582 		fallthrough;
3583 	case REQ_IRQ_ERR_TX:
3584 		for (j = irq_idx - 1; j >= 0; j--) {
3585 			if (priv->tx_irq[j] > 0) {
3586 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3587 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3588 			}
3589 		}
3590 		irq_idx = priv->plat->rx_queues_to_use;
3591 		fallthrough;
3592 	case REQ_IRQ_ERR_RX:
3593 		for (j = irq_idx - 1; j >= 0; j--) {
3594 			if (priv->rx_irq[j] > 0) {
3595 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3596 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3597 			}
3598 		}
3599 
3600 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3601 			free_irq(priv->sfty_ue_irq, dev);
3602 		fallthrough;
3603 	case REQ_IRQ_ERR_SFTY_UE:
3604 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3605 			free_irq(priv->sfty_ce_irq, dev);
3606 		fallthrough;
3607 	case REQ_IRQ_ERR_SFTY_CE:
3608 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3609 			free_irq(priv->lpi_irq, dev);
3610 		fallthrough;
3611 	case REQ_IRQ_ERR_LPI:
3612 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3613 			free_irq(priv->wol_irq, dev);
3614 		fallthrough;
3615 	case REQ_IRQ_ERR_SFTY:
3616 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3617 			free_irq(priv->sfty_irq, dev);
3618 		fallthrough;
3619 	case REQ_IRQ_ERR_WOL:
3620 		free_irq(dev->irq, dev);
3621 		fallthrough;
3622 	case REQ_IRQ_ERR_MAC:
3623 	case REQ_IRQ_ERR_NO:
3624 		/* If MAC IRQ request error, no more IRQ to free */
3625 		break;
3626 	}
3627 }
3628 
3629 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3630 {
3631 	struct stmmac_priv *priv = netdev_priv(dev);
3632 	enum request_irq_err irq_err;
3633 	int irq_idx = 0;
3634 	char *int_name;
3635 	int ret;
3636 	int i;
3637 
3638 	/* For common interrupt */
3639 	int_name = priv->int_name_mac;
3640 	sprintf(int_name, "%s:%s", dev->name, "mac");
3641 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3642 			  0, int_name, dev);
3643 	if (unlikely(ret < 0)) {
3644 		netdev_err(priv->dev,
3645 			   "%s: alloc mac MSI %d (error: %d)\n",
3646 			   __func__, dev->irq, ret);
3647 		irq_err = REQ_IRQ_ERR_MAC;
3648 		goto irq_error;
3649 	}
3650 
3651 	/* Request the Wake IRQ in case of another line
3652 	 * is used for WoL
3653 	 */
3654 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3655 		int_name = priv->int_name_wol;
3656 		sprintf(int_name, "%s:%s", dev->name, "wol");
3657 		ret = request_irq(priv->wol_irq,
3658 				  stmmac_mac_interrupt,
3659 				  0, int_name, dev);
3660 		if (unlikely(ret < 0)) {
3661 			netdev_err(priv->dev,
3662 				   "%s: alloc wol MSI %d (error: %d)\n",
3663 				   __func__, priv->wol_irq, ret);
3664 			irq_err = REQ_IRQ_ERR_WOL;
3665 			goto irq_error;
3666 		}
3667 	}
3668 
3669 	/* Request the LPI IRQ in case of another line
3670 	 * is used for LPI
3671 	 */
3672 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3673 		int_name = priv->int_name_lpi;
3674 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3675 		ret = request_irq(priv->lpi_irq,
3676 				  stmmac_mac_interrupt,
3677 				  0, int_name, dev);
3678 		if (unlikely(ret < 0)) {
3679 			netdev_err(priv->dev,
3680 				   "%s: alloc lpi MSI %d (error: %d)\n",
3681 				   __func__, priv->lpi_irq, ret);
3682 			irq_err = REQ_IRQ_ERR_LPI;
3683 			goto irq_error;
3684 		}
3685 	}
3686 
3687 	/* Request the common Safety Feature Correctible/Uncorrectible
3688 	 * Error line in case of another line is used
3689 	 */
3690 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3691 		int_name = priv->int_name_sfty;
3692 		sprintf(int_name, "%s:%s", dev->name, "safety");
3693 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3694 				  0, int_name, dev);
3695 		if (unlikely(ret < 0)) {
3696 			netdev_err(priv->dev,
3697 				   "%s: alloc sfty MSI %d (error: %d)\n",
3698 				   __func__, priv->sfty_irq, ret);
3699 			irq_err = REQ_IRQ_ERR_SFTY;
3700 			goto irq_error;
3701 		}
3702 	}
3703 
3704 	/* Request the Safety Feature Correctible Error line in
3705 	 * case of another line is used
3706 	 */
3707 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3708 		int_name = priv->int_name_sfty_ce;
3709 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3710 		ret = request_irq(priv->sfty_ce_irq,
3711 				  stmmac_safety_interrupt,
3712 				  0, int_name, dev);
3713 		if (unlikely(ret < 0)) {
3714 			netdev_err(priv->dev,
3715 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3716 				   __func__, priv->sfty_ce_irq, ret);
3717 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3718 			goto irq_error;
3719 		}
3720 	}
3721 
3722 	/* Request the Safety Feature Uncorrectible Error line in
3723 	 * case of another line is used
3724 	 */
3725 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3726 		int_name = priv->int_name_sfty_ue;
3727 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3728 		ret = request_irq(priv->sfty_ue_irq,
3729 				  stmmac_safety_interrupt,
3730 				  0, int_name, dev);
3731 		if (unlikely(ret < 0)) {
3732 			netdev_err(priv->dev,
3733 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3734 				   __func__, priv->sfty_ue_irq, ret);
3735 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3736 			goto irq_error;
3737 		}
3738 	}
3739 
3740 	/* Request Rx MSI irq */
3741 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3742 		if (i >= MTL_MAX_RX_QUEUES)
3743 			break;
3744 		if (priv->rx_irq[i] == 0)
3745 			continue;
3746 
3747 		int_name = priv->int_name_rx_irq[i];
3748 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3749 		ret = request_irq(priv->rx_irq[i],
3750 				  stmmac_msi_intr_rx,
3751 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3752 		if (unlikely(ret < 0)) {
3753 			netdev_err(priv->dev,
3754 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3755 				   __func__, i, priv->rx_irq[i], ret);
3756 			irq_err = REQ_IRQ_ERR_RX;
3757 			irq_idx = i;
3758 			goto irq_error;
3759 		}
3760 		irq_set_affinity_hint(priv->rx_irq[i],
3761 				      cpumask_of(i % num_online_cpus()));
3762 	}
3763 
3764 	/* Request Tx MSI irq */
3765 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3766 		if (i >= MTL_MAX_TX_QUEUES)
3767 			break;
3768 		if (priv->tx_irq[i] == 0)
3769 			continue;
3770 
3771 		int_name = priv->int_name_tx_irq[i];
3772 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3773 		ret = request_irq(priv->tx_irq[i],
3774 				  stmmac_msi_intr_tx,
3775 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3776 		if (unlikely(ret < 0)) {
3777 			netdev_err(priv->dev,
3778 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3779 				   __func__, i, priv->tx_irq[i], ret);
3780 			irq_err = REQ_IRQ_ERR_TX;
3781 			irq_idx = i;
3782 			goto irq_error;
3783 		}
3784 		irq_set_affinity_hint(priv->tx_irq[i],
3785 				      cpumask_of(i % num_online_cpus()));
3786 	}
3787 
3788 	return 0;
3789 
3790 irq_error:
3791 	stmmac_free_irq(dev, irq_err, irq_idx);
3792 	return ret;
3793 }
3794 
3795 static int stmmac_request_irq_single(struct net_device *dev)
3796 {
3797 	struct stmmac_priv *priv = netdev_priv(dev);
3798 	enum request_irq_err irq_err;
3799 	int ret;
3800 
3801 	ret = request_irq(dev->irq, stmmac_interrupt,
3802 			  IRQF_SHARED, dev->name, dev);
3803 	if (unlikely(ret < 0)) {
3804 		netdev_err(priv->dev,
3805 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3806 			   __func__, dev->irq, ret);
3807 		irq_err = REQ_IRQ_ERR_MAC;
3808 		goto irq_error;
3809 	}
3810 
3811 	/* Request the Wake IRQ in case of another line
3812 	 * is used for WoL
3813 	 */
3814 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3815 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3816 				  IRQF_SHARED, dev->name, dev);
3817 		if (unlikely(ret < 0)) {
3818 			netdev_err(priv->dev,
3819 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3820 				   __func__, priv->wol_irq, ret);
3821 			irq_err = REQ_IRQ_ERR_WOL;
3822 			goto irq_error;
3823 		}
3824 	}
3825 
3826 	/* Request the IRQ lines */
3827 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3828 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3829 				  IRQF_SHARED, dev->name, dev);
3830 		if (unlikely(ret < 0)) {
3831 			netdev_err(priv->dev,
3832 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3833 				   __func__, priv->lpi_irq, ret);
3834 			irq_err = REQ_IRQ_ERR_LPI;
3835 			goto irq_error;
3836 		}
3837 	}
3838 
3839 	/* Request the common Safety Feature Correctible/Uncorrectible
3840 	 * Error line in case of another line is used
3841 	 */
3842 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3843 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3844 				  IRQF_SHARED, dev->name, dev);
3845 		if (unlikely(ret < 0)) {
3846 			netdev_err(priv->dev,
3847 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3848 				   __func__, priv->sfty_irq, ret);
3849 			irq_err = REQ_IRQ_ERR_SFTY;
3850 			goto irq_error;
3851 		}
3852 	}
3853 
3854 	return 0;
3855 
3856 irq_error:
3857 	stmmac_free_irq(dev, irq_err, 0);
3858 	return ret;
3859 }
3860 
3861 static int stmmac_request_irq(struct net_device *dev)
3862 {
3863 	struct stmmac_priv *priv = netdev_priv(dev);
3864 	int ret;
3865 
3866 	/* Request the IRQ lines */
3867 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3868 		ret = stmmac_request_irq_multi_msi(dev);
3869 	else
3870 		ret = stmmac_request_irq_single(dev);
3871 
3872 	return ret;
3873 }
3874 
3875 /**
3876  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3877  *  @priv: driver private structure
3878  *  @mtu: MTU to setup the dma queue and buf with
3879  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3880  *  Allocate the Tx/Rx DMA queue and init them.
3881  *  Return value:
3882  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3883  */
3884 static struct stmmac_dma_conf *
3885 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3886 {
3887 	struct stmmac_dma_conf *dma_conf;
3888 	int chan, bfsize, ret;
3889 
3890 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3891 	if (!dma_conf) {
3892 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3893 			   __func__);
3894 		return ERR_PTR(-ENOMEM);
3895 	}
3896 
3897 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3898 	if (bfsize < 0)
3899 		bfsize = 0;
3900 
3901 	if (bfsize < BUF_SIZE_16KiB)
3902 		bfsize = stmmac_set_bfsize(mtu, 0);
3903 
3904 	dma_conf->dma_buf_sz = bfsize;
3905 	/* Chose the tx/rx size from the already defined one in the
3906 	 * priv struct. (if defined)
3907 	 */
3908 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3909 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3910 
3911 	if (!dma_conf->dma_tx_size)
3912 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3913 	if (!dma_conf->dma_rx_size)
3914 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3915 
3916 	/* Earlier check for TBS */
3917 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3918 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3919 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3920 
3921 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3922 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3923 	}
3924 
3925 	ret = alloc_dma_desc_resources(priv, dma_conf);
3926 	if (ret < 0) {
3927 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3928 			   __func__);
3929 		goto alloc_error;
3930 	}
3931 
3932 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3933 	if (ret < 0) {
3934 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3935 			   __func__);
3936 		goto init_error;
3937 	}
3938 
3939 	return dma_conf;
3940 
3941 init_error:
3942 	free_dma_desc_resources(priv, dma_conf);
3943 alloc_error:
3944 	kfree(dma_conf);
3945 	return ERR_PTR(ret);
3946 }
3947 
3948 /**
3949  *  __stmmac_open - open entry point of the driver
3950  *  @dev : pointer to the device structure.
3951  *  @dma_conf :  structure to take the dma data
3952  *  Description:
3953  *  This function is the open entry point of the driver.
3954  *  Return value:
3955  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3956  *  file on failure.
3957  */
3958 static int __stmmac_open(struct net_device *dev,
3959 			 struct stmmac_dma_conf *dma_conf)
3960 {
3961 	struct stmmac_priv *priv = netdev_priv(dev);
3962 	int mode = priv->plat->phy_interface;
3963 	u32 chan;
3964 	int ret;
3965 
3966 	/* Initialise the tx lpi timer, converting from msec to usec */
3967 	if (!priv->tx_lpi_timer)
3968 		priv->tx_lpi_timer = eee_timer * 1000;
3969 
3970 	if ((!priv->hw->xpcs ||
3971 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3972 		ret = stmmac_init_phy(dev);
3973 		if (ret) {
3974 			netdev_err(priv->dev,
3975 				   "%s: Cannot attach to PHY (error: %d)\n",
3976 				   __func__, ret);
3977 			return ret;
3978 		}
3979 	}
3980 
3981 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3982 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3983 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3984 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3985 
3986 	stmmac_reset_queues_param(priv);
3987 
3988 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3989 	    priv->plat->serdes_powerup) {
3990 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3991 		if (ret < 0) {
3992 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3993 				   __func__);
3994 			goto init_error;
3995 		}
3996 	}
3997 
3998 	ret = stmmac_hw_setup(dev);
3999 	if (ret < 0) {
4000 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4001 		goto init_error;
4002 	}
4003 
4004 	stmmac_setup_ptp(priv);
4005 
4006 	stmmac_init_coalesce(priv);
4007 
4008 	phylink_start(priv->phylink);
4009 	/* We may have called phylink_speed_down before */
4010 	phylink_speed_up(priv->phylink);
4011 
4012 	ret = stmmac_request_irq(dev);
4013 	if (ret)
4014 		goto irq_error;
4015 
4016 	stmmac_enable_all_queues(priv);
4017 	netif_tx_start_all_queues(priv->dev);
4018 	stmmac_enable_all_dma_irq(priv);
4019 
4020 	return 0;
4021 
4022 irq_error:
4023 	phylink_stop(priv->phylink);
4024 
4025 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4026 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4027 
4028 	stmmac_release_ptp(priv);
4029 init_error:
4030 	phylink_disconnect_phy(priv->phylink);
4031 	return ret;
4032 }
4033 
4034 static int stmmac_open(struct net_device *dev)
4035 {
4036 	struct stmmac_priv *priv = netdev_priv(dev);
4037 	struct stmmac_dma_conf *dma_conf;
4038 	int ret;
4039 
4040 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4041 	if (IS_ERR(dma_conf))
4042 		return PTR_ERR(dma_conf);
4043 
4044 	ret = pm_runtime_resume_and_get(priv->device);
4045 	if (ret < 0)
4046 		goto err;
4047 
4048 	ret = __stmmac_open(dev, dma_conf);
4049 	if (ret) {
4050 		pm_runtime_put(priv->device);
4051 err:
4052 		free_dma_desc_resources(priv, dma_conf);
4053 	}
4054 
4055 	kfree(dma_conf);
4056 
4057 	return ret;
4058 }
4059 
4060 static void __stmmac_release(struct net_device *dev)
4061 {
4062 	struct stmmac_priv *priv = netdev_priv(dev);
4063 	u32 chan;
4064 
4065 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4066 	 * suspended when phylink_stop() is called below. Set the PHY
4067 	 * to its slowest speed to save power.
4068 	 */
4069 	if (device_may_wakeup(priv->device))
4070 		phylink_speed_down(priv->phylink, false);
4071 
4072 	/* Stop and disconnect the PHY */
4073 	phylink_stop(priv->phylink);
4074 	phylink_disconnect_phy(priv->phylink);
4075 
4076 	stmmac_disable_all_queues(priv);
4077 
4078 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4079 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4080 
4081 	netif_tx_disable(dev);
4082 
4083 	/* Free the IRQ lines */
4084 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4085 
4086 	/* Stop TX/RX DMA and clear the descriptors */
4087 	stmmac_stop_all_dma(priv);
4088 
4089 	/* Release and free the Rx/Tx resources */
4090 	free_dma_desc_resources(priv, &priv->dma_conf);
4091 
4092 	/* Powerdown Serdes if there is */
4093 	if (priv->plat->serdes_powerdown)
4094 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4095 
4096 	stmmac_release_ptp(priv);
4097 
4098 	if (stmmac_fpe_supported(priv))
4099 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4100 }
4101 
4102 /**
4103  *  stmmac_release - close entry point of the driver
4104  *  @dev : device pointer.
4105  *  Description:
4106  *  This is the stop entry point of the driver.
4107  */
4108 static int stmmac_release(struct net_device *dev)
4109 {
4110 	struct stmmac_priv *priv = netdev_priv(dev);
4111 
4112 	__stmmac_release(dev);
4113 
4114 	pm_runtime_put(priv->device);
4115 
4116 	return 0;
4117 }
4118 
4119 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4120 			       struct stmmac_tx_queue *tx_q)
4121 {
4122 	u16 tag = 0x0, inner_tag = 0x0;
4123 	u32 inner_type = 0x0;
4124 	struct dma_desc *p;
4125 
4126 	if (!priv->dma_cap.vlins)
4127 		return false;
4128 	if (!skb_vlan_tag_present(skb))
4129 		return false;
4130 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4131 		inner_tag = skb_vlan_tag_get(skb);
4132 		inner_type = STMMAC_VLAN_INSERT;
4133 	}
4134 
4135 	tag = skb_vlan_tag_get(skb);
4136 
4137 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4138 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4139 	else
4140 		p = &tx_q->dma_tx[tx_q->cur_tx];
4141 
4142 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4143 		return false;
4144 
4145 	stmmac_set_tx_owner(priv, p);
4146 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4147 	return true;
4148 }
4149 
4150 /**
4151  *  stmmac_tso_allocator - close entry point of the driver
4152  *  @priv: driver private structure
4153  *  @des: buffer start address
4154  *  @total_len: total length to fill in descriptors
4155  *  @last_segment: condition for the last descriptor
4156  *  @queue: TX queue index
4157  *  Description:
4158  *  This function fills descriptor and request new descriptors according to
4159  *  buffer length to fill
4160  */
4161 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4162 				 int total_len, bool last_segment, u32 queue)
4163 {
4164 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4165 	struct dma_desc *desc;
4166 	u32 buff_size;
4167 	int tmp_len;
4168 
4169 	tmp_len = total_len;
4170 
4171 	while (tmp_len > 0) {
4172 		dma_addr_t curr_addr;
4173 
4174 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4175 						priv->dma_conf.dma_tx_size);
4176 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4177 
4178 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4179 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4180 		else
4181 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4182 
4183 		curr_addr = des + (total_len - tmp_len);
4184 		stmmac_set_desc_addr(priv, desc, curr_addr);
4185 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4186 			    TSO_MAX_BUFF_SIZE : tmp_len;
4187 
4188 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4189 				0, 1,
4190 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4191 				0, 0);
4192 
4193 		tmp_len -= TSO_MAX_BUFF_SIZE;
4194 	}
4195 }
4196 
4197 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4198 {
4199 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4200 	int desc_size;
4201 
4202 	if (likely(priv->extend_desc))
4203 		desc_size = sizeof(struct dma_extended_desc);
4204 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4205 		desc_size = sizeof(struct dma_edesc);
4206 	else
4207 		desc_size = sizeof(struct dma_desc);
4208 
4209 	/* The own bit must be the latest setting done when prepare the
4210 	 * descriptor and then barrier is needed to make sure that
4211 	 * all is coherent before granting the DMA engine.
4212 	 */
4213 	wmb();
4214 
4215 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4216 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4217 }
4218 
4219 /**
4220  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4221  *  @skb : the socket buffer
4222  *  @dev : device pointer
4223  *  Description: this is the transmit function that is called on TSO frames
4224  *  (support available on GMAC4 and newer chips).
4225  *  Diagram below show the ring programming in case of TSO frames:
4226  *
4227  *  First Descriptor
4228  *   --------
4229  *   | DES0 |---> buffer1 = L2/L3/L4 header
4230  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4231  *   |      |     width is 32-bit, but we never use it.
4232  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4233  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4234  *   |      |     or 48-bit, and we always use it.
4235  *   | DES2 |---> buffer1 len
4236  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4237  *   --------
4238  *   --------
4239  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4240  *   | DES1 |---> same as the First Descriptor
4241  *   | DES2 |---> buffer1 len
4242  *   | DES3 |
4243  *   --------
4244  *	|
4245  *     ...
4246  *	|
4247  *   --------
4248  *   | DES0 |---> buffer1 = Split TCP Payload
4249  *   | DES1 |---> same as the First Descriptor
4250  *   | DES2 |---> buffer1 len
4251  *   | DES3 |
4252  *   --------
4253  *
4254  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4255  */
4256 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4257 {
4258 	struct dma_desc *desc, *first, *mss_desc = NULL;
4259 	struct stmmac_priv *priv = netdev_priv(dev);
4260 	unsigned int first_entry, tx_packets;
4261 	struct stmmac_txq_stats *txq_stats;
4262 	struct stmmac_tx_queue *tx_q;
4263 	u32 pay_len, mss, queue;
4264 	int i, first_tx, nfrags;
4265 	u8 proto_hdr_len, hdr;
4266 	dma_addr_t des;
4267 	bool set_ic;
4268 
4269 	/* Always insert VLAN tag to SKB payload for TSO frames.
4270 	 *
4271 	 * Never insert VLAN tag by HW, since segments splited by
4272 	 * TSO engine will be un-tagged by mistake.
4273 	 */
4274 	if (skb_vlan_tag_present(skb)) {
4275 		skb = __vlan_hwaccel_push_inside(skb);
4276 		if (unlikely(!skb)) {
4277 			priv->xstats.tx_dropped++;
4278 			return NETDEV_TX_OK;
4279 		}
4280 	}
4281 
4282 	nfrags = skb_shinfo(skb)->nr_frags;
4283 	queue = skb_get_queue_mapping(skb);
4284 
4285 	tx_q = &priv->dma_conf.tx_queue[queue];
4286 	txq_stats = &priv->xstats.txq_stats[queue];
4287 	first_tx = tx_q->cur_tx;
4288 
4289 	/* Compute header lengths */
4290 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4291 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4292 		hdr = sizeof(struct udphdr);
4293 	} else {
4294 		proto_hdr_len = skb_tcp_all_headers(skb);
4295 		hdr = tcp_hdrlen(skb);
4296 	}
4297 
4298 	/* Desc availability based on threshold should be enough safe */
4299 	if (unlikely(stmmac_tx_avail(priv, queue) <
4300 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4301 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4302 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4303 								queue));
4304 			/* This is a hard error, log it. */
4305 			netdev_err(priv->dev,
4306 				   "%s: Tx Ring full when queue awake\n",
4307 				   __func__);
4308 		}
4309 		return NETDEV_TX_BUSY;
4310 	}
4311 
4312 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4313 
4314 	mss = skb_shinfo(skb)->gso_size;
4315 
4316 	/* set new MSS value if needed */
4317 	if (mss != tx_q->mss) {
4318 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4319 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4320 		else
4321 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4322 
4323 		stmmac_set_mss(priv, mss_desc, mss);
4324 		tx_q->mss = mss;
4325 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4326 						priv->dma_conf.dma_tx_size);
4327 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4328 	}
4329 
4330 	if (netif_msg_tx_queued(priv)) {
4331 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4332 			__func__, hdr, proto_hdr_len, pay_len, mss);
4333 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4334 			skb->data_len);
4335 	}
4336 
4337 	first_entry = tx_q->cur_tx;
4338 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4339 
4340 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4341 		desc = &tx_q->dma_entx[first_entry].basic;
4342 	else
4343 		desc = &tx_q->dma_tx[first_entry];
4344 	first = desc;
4345 
4346 	/* first descriptor: fill Headers on Buf1 */
4347 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4348 			     DMA_TO_DEVICE);
4349 	if (dma_mapping_error(priv->device, des))
4350 		goto dma_map_err;
4351 
4352 	stmmac_set_desc_addr(priv, first, des);
4353 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4354 			     (nfrags == 0), queue);
4355 
4356 	/* In case two or more DMA transmit descriptors are allocated for this
4357 	 * non-paged SKB data, the DMA buffer address should be saved to
4358 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4359 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4360 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4361 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4362 	 * sooner or later.
4363 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4364 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4365 	 * this DMA buffer right after the DMA engine completely finishes the
4366 	 * full buffer transmission.
4367 	 */
4368 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4369 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4370 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4371 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4372 
4373 	/* Prepare fragments */
4374 	for (i = 0; i < nfrags; i++) {
4375 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4376 
4377 		des = skb_frag_dma_map(priv->device, frag, 0,
4378 				       skb_frag_size(frag),
4379 				       DMA_TO_DEVICE);
4380 		if (dma_mapping_error(priv->device, des))
4381 			goto dma_map_err;
4382 
4383 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4384 				     (i == nfrags - 1), queue);
4385 
4386 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4387 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4388 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4389 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4390 	}
4391 
4392 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4393 
4394 	/* Only the last descriptor gets to point to the skb. */
4395 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4396 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4397 
4398 	/* Manage tx mitigation */
4399 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4400 	tx_q->tx_count_frames += tx_packets;
4401 
4402 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4403 		set_ic = true;
4404 	else if (!priv->tx_coal_frames[queue])
4405 		set_ic = false;
4406 	else if (tx_packets > priv->tx_coal_frames[queue])
4407 		set_ic = true;
4408 	else if ((tx_q->tx_count_frames %
4409 		  priv->tx_coal_frames[queue]) < tx_packets)
4410 		set_ic = true;
4411 	else
4412 		set_ic = false;
4413 
4414 	if (set_ic) {
4415 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4416 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4417 		else
4418 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4419 
4420 		tx_q->tx_count_frames = 0;
4421 		stmmac_set_tx_ic(priv, desc);
4422 	}
4423 
4424 	/* We've used all descriptors we need for this skb, however,
4425 	 * advance cur_tx so that it references a fresh descriptor.
4426 	 * ndo_start_xmit will fill this descriptor the next time it's
4427 	 * called and stmmac_tx_clean may clean up to this descriptor.
4428 	 */
4429 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4430 
4431 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4432 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4433 			  __func__);
4434 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4435 	}
4436 
4437 	u64_stats_update_begin(&txq_stats->q_syncp);
4438 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4439 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4440 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4441 	if (set_ic)
4442 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4443 	u64_stats_update_end(&txq_stats->q_syncp);
4444 
4445 	if (priv->sarc_type)
4446 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4447 
4448 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4449 		     priv->hwts_tx_en)) {
4450 		/* declare that device is doing timestamping */
4451 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4452 		stmmac_enable_tx_timestamp(priv, first);
4453 	}
4454 
4455 	/* Complete the first descriptor before granting the DMA */
4456 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4457 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4458 				   hdr / 4, (skb->len - proto_hdr_len));
4459 
4460 	/* If context desc is used to change MSS */
4461 	if (mss_desc) {
4462 		/* Make sure that first descriptor has been completely
4463 		 * written, including its own bit. This is because MSS is
4464 		 * actually before first descriptor, so we need to make
4465 		 * sure that MSS's own bit is the last thing written.
4466 		 */
4467 		dma_wmb();
4468 		stmmac_set_tx_owner(priv, mss_desc);
4469 	}
4470 
4471 	if (netif_msg_pktdata(priv)) {
4472 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4473 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4474 			tx_q->cur_tx, first, nfrags);
4475 		pr_info(">>> frame to be transmitted: ");
4476 		print_pkt(skb->data, skb_headlen(skb));
4477 	}
4478 
4479 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4480 	skb_tx_timestamp(skb);
4481 
4482 	stmmac_flush_tx_descriptors(priv, queue);
4483 	stmmac_tx_timer_arm(priv, queue);
4484 
4485 	return NETDEV_TX_OK;
4486 
4487 dma_map_err:
4488 	dev_err(priv->device, "Tx dma map failed\n");
4489 	dev_kfree_skb(skb);
4490 	priv->xstats.tx_dropped++;
4491 	return NETDEV_TX_OK;
4492 }
4493 
4494 /**
4495  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4496  * @skb: socket buffer to check
4497  *
4498  * Check if a packet has an ethertype that will trigger the IP header checks
4499  * and IP/TCP checksum engine of the stmmac core.
4500  *
4501  * Return: true if the ethertype can trigger the checksum engine, false
4502  * otherwise
4503  */
4504 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4505 {
4506 	int depth = 0;
4507 	__be16 proto;
4508 
4509 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4510 				    &depth);
4511 
4512 	return (depth <= ETH_HLEN) &&
4513 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4514 }
4515 
4516 /**
4517  *  stmmac_xmit - Tx entry point of the driver
4518  *  @skb : the socket buffer
4519  *  @dev : device pointer
4520  *  Description : this is the tx entry point of the driver.
4521  *  It programs the chain or the ring and supports oversized frames
4522  *  and SG feature.
4523  */
4524 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4525 {
4526 	unsigned int first_entry, tx_packets, enh_desc;
4527 	struct stmmac_priv *priv = netdev_priv(dev);
4528 	unsigned int nopaged_len = skb_headlen(skb);
4529 	int i, csum_insertion = 0, is_jumbo = 0;
4530 	u32 queue = skb_get_queue_mapping(skb);
4531 	int nfrags = skb_shinfo(skb)->nr_frags;
4532 	int gso = skb_shinfo(skb)->gso_type;
4533 	struct stmmac_txq_stats *txq_stats;
4534 	struct dma_edesc *tbs_desc = NULL;
4535 	struct dma_desc *desc, *first;
4536 	struct stmmac_tx_queue *tx_q;
4537 	bool has_vlan, set_ic;
4538 	int entry, first_tx;
4539 	dma_addr_t des;
4540 
4541 	tx_q = &priv->dma_conf.tx_queue[queue];
4542 	txq_stats = &priv->xstats.txq_stats[queue];
4543 	first_tx = tx_q->cur_tx;
4544 
4545 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4546 		stmmac_stop_sw_lpi(priv);
4547 
4548 	/* Manage oversized TCP frames for GMAC4 device */
4549 	if (skb_is_gso(skb) && priv->tso) {
4550 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4551 			return stmmac_tso_xmit(skb, dev);
4552 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4553 			return stmmac_tso_xmit(skb, dev);
4554 	}
4555 
4556 	if (priv->est && priv->est->enable &&
4557 	    priv->est->max_sdu[queue] &&
4558 	    skb->len > priv->est->max_sdu[queue]){
4559 		priv->xstats.max_sdu_txq_drop[queue]++;
4560 		goto max_sdu_err;
4561 	}
4562 
4563 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4564 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4565 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4566 								queue));
4567 			/* This is a hard error, log it. */
4568 			netdev_err(priv->dev,
4569 				   "%s: Tx Ring full when queue awake\n",
4570 				   __func__);
4571 		}
4572 		return NETDEV_TX_BUSY;
4573 	}
4574 
4575 	/* Check if VLAN can be inserted by HW */
4576 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4577 
4578 	entry = tx_q->cur_tx;
4579 	first_entry = entry;
4580 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4581 
4582 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4583 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4584 	 * queues. In that case, checksum offloading for those queues that don't
4585 	 * support tx coe needs to fallback to software checksum calculation.
4586 	 *
4587 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4588 	 * also have to be checksummed in software.
4589 	 */
4590 	if (csum_insertion &&
4591 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4592 	     !stmmac_has_ip_ethertype(skb))) {
4593 		if (unlikely(skb_checksum_help(skb)))
4594 			goto dma_map_err;
4595 		csum_insertion = !csum_insertion;
4596 	}
4597 
4598 	if (likely(priv->extend_desc))
4599 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4600 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4601 		desc = &tx_q->dma_entx[entry].basic;
4602 	else
4603 		desc = tx_q->dma_tx + entry;
4604 
4605 	first = desc;
4606 
4607 	if (has_vlan)
4608 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4609 
4610 	enh_desc = priv->plat->enh_desc;
4611 	/* To program the descriptors according to the size of the frame */
4612 	if (enh_desc)
4613 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4614 
4615 	if (unlikely(is_jumbo)) {
4616 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4617 		if (unlikely(entry < 0) && (entry != -EINVAL))
4618 			goto dma_map_err;
4619 	}
4620 
4621 	for (i = 0; i < nfrags; i++) {
4622 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4623 		int len = skb_frag_size(frag);
4624 		bool last_segment = (i == (nfrags - 1));
4625 
4626 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4627 		WARN_ON(tx_q->tx_skbuff[entry]);
4628 
4629 		if (likely(priv->extend_desc))
4630 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4631 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4632 			desc = &tx_q->dma_entx[entry].basic;
4633 		else
4634 			desc = tx_q->dma_tx + entry;
4635 
4636 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4637 				       DMA_TO_DEVICE);
4638 		if (dma_mapping_error(priv->device, des))
4639 			goto dma_map_err; /* should reuse desc w/o issues */
4640 
4641 		tx_q->tx_skbuff_dma[entry].buf = des;
4642 
4643 		stmmac_set_desc_addr(priv, desc, des);
4644 
4645 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4646 		tx_q->tx_skbuff_dma[entry].len = len;
4647 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4648 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4649 
4650 		/* Prepare the descriptor and set the own bit too */
4651 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4652 				priv->mode, 1, last_segment, skb->len);
4653 	}
4654 
4655 	/* Only the last descriptor gets to point to the skb. */
4656 	tx_q->tx_skbuff[entry] = skb;
4657 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4658 
4659 	/* According to the coalesce parameter the IC bit for the latest
4660 	 * segment is reset and the timer re-started to clean the tx status.
4661 	 * This approach takes care about the fragments: desc is the first
4662 	 * element in case of no SG.
4663 	 */
4664 	tx_packets = (entry + 1) - first_tx;
4665 	tx_q->tx_count_frames += tx_packets;
4666 
4667 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4668 		set_ic = true;
4669 	else if (!priv->tx_coal_frames[queue])
4670 		set_ic = false;
4671 	else if (tx_packets > priv->tx_coal_frames[queue])
4672 		set_ic = true;
4673 	else if ((tx_q->tx_count_frames %
4674 		  priv->tx_coal_frames[queue]) < tx_packets)
4675 		set_ic = true;
4676 	else
4677 		set_ic = false;
4678 
4679 	if (set_ic) {
4680 		if (likely(priv->extend_desc))
4681 			desc = &tx_q->dma_etx[entry].basic;
4682 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4683 			desc = &tx_q->dma_entx[entry].basic;
4684 		else
4685 			desc = &tx_q->dma_tx[entry];
4686 
4687 		tx_q->tx_count_frames = 0;
4688 		stmmac_set_tx_ic(priv, desc);
4689 	}
4690 
4691 	/* We've used all descriptors we need for this skb, however,
4692 	 * advance cur_tx so that it references a fresh descriptor.
4693 	 * ndo_start_xmit will fill this descriptor the next time it's
4694 	 * called and stmmac_tx_clean may clean up to this descriptor.
4695 	 */
4696 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4697 	tx_q->cur_tx = entry;
4698 
4699 	if (netif_msg_pktdata(priv)) {
4700 		netdev_dbg(priv->dev,
4701 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4702 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4703 			   entry, first, nfrags);
4704 
4705 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4706 		print_pkt(skb->data, skb->len);
4707 	}
4708 
4709 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4710 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4711 			  __func__);
4712 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4713 	}
4714 
4715 	u64_stats_update_begin(&txq_stats->q_syncp);
4716 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4717 	if (set_ic)
4718 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4719 	u64_stats_update_end(&txq_stats->q_syncp);
4720 
4721 	if (priv->sarc_type)
4722 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4723 
4724 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4725 	 * problems because all the descriptors are actually ready to be
4726 	 * passed to the DMA engine.
4727 	 */
4728 	if (likely(!is_jumbo)) {
4729 		bool last_segment = (nfrags == 0);
4730 
4731 		des = dma_map_single(priv->device, skb->data,
4732 				     nopaged_len, DMA_TO_DEVICE);
4733 		if (dma_mapping_error(priv->device, des))
4734 			goto dma_map_err;
4735 
4736 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4737 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4738 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4739 
4740 		stmmac_set_desc_addr(priv, first, des);
4741 
4742 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4743 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4744 
4745 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4746 			     priv->hwts_tx_en)) {
4747 			/* declare that device is doing timestamping */
4748 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4749 			stmmac_enable_tx_timestamp(priv, first);
4750 		}
4751 
4752 		/* Prepare the first descriptor setting the OWN bit too */
4753 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4754 				csum_insertion, priv->mode, 0, last_segment,
4755 				skb->len);
4756 	}
4757 
4758 	if (tx_q->tbs & STMMAC_TBS_EN) {
4759 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4760 
4761 		tbs_desc = &tx_q->dma_entx[first_entry];
4762 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4763 	}
4764 
4765 	stmmac_set_tx_owner(priv, first);
4766 
4767 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4768 
4769 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4770 	skb_tx_timestamp(skb);
4771 	stmmac_flush_tx_descriptors(priv, queue);
4772 	stmmac_tx_timer_arm(priv, queue);
4773 
4774 	return NETDEV_TX_OK;
4775 
4776 dma_map_err:
4777 	netdev_err(priv->dev, "Tx DMA map failed\n");
4778 max_sdu_err:
4779 	dev_kfree_skb(skb);
4780 	priv->xstats.tx_dropped++;
4781 	return NETDEV_TX_OK;
4782 }
4783 
4784 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4785 {
4786 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4787 	__be16 vlan_proto = veth->h_vlan_proto;
4788 	u16 vlanid;
4789 
4790 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4791 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4792 	    (vlan_proto == htons(ETH_P_8021AD) &&
4793 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4794 		/* pop the vlan tag */
4795 		vlanid = ntohs(veth->h_vlan_TCI);
4796 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4797 		skb_pull(skb, VLAN_HLEN);
4798 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4799 	}
4800 }
4801 
4802 /**
4803  * stmmac_rx_refill - refill used skb preallocated buffers
4804  * @priv: driver private structure
4805  * @queue: RX queue index
4806  * Description : this is to reallocate the skb for the reception process
4807  * that is based on zero-copy.
4808  */
4809 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4810 {
4811 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4812 	int dirty = stmmac_rx_dirty(priv, queue);
4813 	unsigned int entry = rx_q->dirty_rx;
4814 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4815 
4816 	if (priv->dma_cap.host_dma_width <= 32)
4817 		gfp |= GFP_DMA32;
4818 
4819 	while (dirty-- > 0) {
4820 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4821 		struct dma_desc *p;
4822 		bool use_rx_wd;
4823 
4824 		if (priv->extend_desc)
4825 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4826 		else
4827 			p = rx_q->dma_rx + entry;
4828 
4829 		if (!buf->page) {
4830 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4831 			if (!buf->page)
4832 				break;
4833 		}
4834 
4835 		if (priv->sph && !buf->sec_page) {
4836 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4837 			if (!buf->sec_page)
4838 				break;
4839 
4840 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4841 		}
4842 
4843 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4844 
4845 		stmmac_set_desc_addr(priv, p, buf->addr);
4846 		if (priv->sph)
4847 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4848 		else
4849 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4850 		stmmac_refill_desc3(priv, rx_q, p);
4851 
4852 		rx_q->rx_count_frames++;
4853 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4854 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4855 			rx_q->rx_count_frames = 0;
4856 
4857 		use_rx_wd = !priv->rx_coal_frames[queue];
4858 		use_rx_wd |= rx_q->rx_count_frames > 0;
4859 		if (!priv->use_riwt)
4860 			use_rx_wd = false;
4861 
4862 		dma_wmb();
4863 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4864 
4865 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4866 	}
4867 	rx_q->dirty_rx = entry;
4868 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4869 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4870 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4871 }
4872 
4873 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4874 				       struct dma_desc *p,
4875 				       int status, unsigned int len)
4876 {
4877 	unsigned int plen = 0, hlen = 0;
4878 	int coe = priv->hw->rx_csum;
4879 
4880 	/* Not first descriptor, buffer is always zero */
4881 	if (priv->sph && len)
4882 		return 0;
4883 
4884 	/* First descriptor, get split header length */
4885 	stmmac_get_rx_header_len(priv, p, &hlen);
4886 	if (priv->sph && hlen) {
4887 		priv->xstats.rx_split_hdr_pkt_n++;
4888 		return hlen;
4889 	}
4890 
4891 	/* First descriptor, not last descriptor and not split header */
4892 	if (status & rx_not_ls)
4893 		return priv->dma_conf.dma_buf_sz;
4894 
4895 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4896 
4897 	/* First descriptor and last descriptor and not split header */
4898 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4899 }
4900 
4901 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4902 				       struct dma_desc *p,
4903 				       int status, unsigned int len)
4904 {
4905 	int coe = priv->hw->rx_csum;
4906 	unsigned int plen = 0;
4907 
4908 	/* Not split header, buffer is not available */
4909 	if (!priv->sph)
4910 		return 0;
4911 
4912 	/* Not last descriptor */
4913 	if (status & rx_not_ls)
4914 		return priv->dma_conf.dma_buf_sz;
4915 
4916 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4917 
4918 	/* Last descriptor */
4919 	return plen - len;
4920 }
4921 
4922 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4923 				struct xdp_frame *xdpf, bool dma_map)
4924 {
4925 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4926 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4927 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
4928 	unsigned int entry = tx_q->cur_tx;
4929 	struct dma_desc *tx_desc;
4930 	dma_addr_t dma_addr;
4931 	bool set_ic;
4932 
4933 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4934 		return STMMAC_XDP_CONSUMED;
4935 
4936 	if (priv->est && priv->est->enable &&
4937 	    priv->est->max_sdu[queue] &&
4938 	    xdpf->len > priv->est->max_sdu[queue]) {
4939 		priv->xstats.max_sdu_txq_drop[queue]++;
4940 		return STMMAC_XDP_CONSUMED;
4941 	}
4942 
4943 	if (likely(priv->extend_desc))
4944 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4945 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4946 		tx_desc = &tx_q->dma_entx[entry].basic;
4947 	else
4948 		tx_desc = tx_q->dma_tx + entry;
4949 
4950 	if (dma_map) {
4951 		dma_addr = dma_map_single(priv->device, xdpf->data,
4952 					  xdpf->len, DMA_TO_DEVICE);
4953 		if (dma_mapping_error(priv->device, dma_addr))
4954 			return STMMAC_XDP_CONSUMED;
4955 
4956 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4957 	} else {
4958 		struct page *page = virt_to_page(xdpf->data);
4959 
4960 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4961 			   xdpf->headroom;
4962 		dma_sync_single_for_device(priv->device, dma_addr,
4963 					   xdpf->len, DMA_BIDIRECTIONAL);
4964 
4965 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4966 	}
4967 
4968 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4969 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4970 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4971 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4972 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4973 
4974 	tx_q->xdpf[entry] = xdpf;
4975 
4976 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4977 
4978 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4979 			       csum, priv->mode, true, true,
4980 			       xdpf->len);
4981 
4982 	tx_q->tx_count_frames++;
4983 
4984 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4985 		set_ic = true;
4986 	else
4987 		set_ic = false;
4988 
4989 	if (set_ic) {
4990 		tx_q->tx_count_frames = 0;
4991 		stmmac_set_tx_ic(priv, tx_desc);
4992 		u64_stats_update_begin(&txq_stats->q_syncp);
4993 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4994 		u64_stats_update_end(&txq_stats->q_syncp);
4995 	}
4996 
4997 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4998 
4999 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5000 	tx_q->cur_tx = entry;
5001 
5002 	return STMMAC_XDP_TX;
5003 }
5004 
5005 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5006 				   int cpu)
5007 {
5008 	int index = cpu;
5009 
5010 	if (unlikely(index < 0))
5011 		index = 0;
5012 
5013 	while (index >= priv->plat->tx_queues_to_use)
5014 		index -= priv->plat->tx_queues_to_use;
5015 
5016 	return index;
5017 }
5018 
5019 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5020 				struct xdp_buff *xdp)
5021 {
5022 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5023 	int cpu = smp_processor_id();
5024 	struct netdev_queue *nq;
5025 	int queue;
5026 	int res;
5027 
5028 	if (unlikely(!xdpf))
5029 		return STMMAC_XDP_CONSUMED;
5030 
5031 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5032 	nq = netdev_get_tx_queue(priv->dev, queue);
5033 
5034 	__netif_tx_lock(nq, cpu);
5035 	/* Avoids TX time-out as we are sharing with slow path */
5036 	txq_trans_cond_update(nq);
5037 
5038 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5039 	if (res == STMMAC_XDP_TX)
5040 		stmmac_flush_tx_descriptors(priv, queue);
5041 
5042 	__netif_tx_unlock(nq);
5043 
5044 	return res;
5045 }
5046 
5047 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5048 				 struct bpf_prog *prog,
5049 				 struct xdp_buff *xdp)
5050 {
5051 	u32 act;
5052 	int res;
5053 
5054 	act = bpf_prog_run_xdp(prog, xdp);
5055 	switch (act) {
5056 	case XDP_PASS:
5057 		res = STMMAC_XDP_PASS;
5058 		break;
5059 	case XDP_TX:
5060 		res = stmmac_xdp_xmit_back(priv, xdp);
5061 		break;
5062 	case XDP_REDIRECT:
5063 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5064 			res = STMMAC_XDP_CONSUMED;
5065 		else
5066 			res = STMMAC_XDP_REDIRECT;
5067 		break;
5068 	default:
5069 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5070 		fallthrough;
5071 	case XDP_ABORTED:
5072 		trace_xdp_exception(priv->dev, prog, act);
5073 		fallthrough;
5074 	case XDP_DROP:
5075 		res = STMMAC_XDP_CONSUMED;
5076 		break;
5077 	}
5078 
5079 	return res;
5080 }
5081 
5082 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5083 					   struct xdp_buff *xdp)
5084 {
5085 	struct bpf_prog *prog;
5086 	int res;
5087 
5088 	prog = READ_ONCE(priv->xdp_prog);
5089 	if (!prog) {
5090 		res = STMMAC_XDP_PASS;
5091 		goto out;
5092 	}
5093 
5094 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5095 out:
5096 	return ERR_PTR(-res);
5097 }
5098 
5099 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5100 				   int xdp_status)
5101 {
5102 	int cpu = smp_processor_id();
5103 	int queue;
5104 
5105 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5106 
5107 	if (xdp_status & STMMAC_XDP_TX)
5108 		stmmac_tx_timer_arm(priv, queue);
5109 
5110 	if (xdp_status & STMMAC_XDP_REDIRECT)
5111 		xdp_do_flush();
5112 }
5113 
5114 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5115 					       struct xdp_buff *xdp)
5116 {
5117 	unsigned int metasize = xdp->data - xdp->data_meta;
5118 	unsigned int datasize = xdp->data_end - xdp->data;
5119 	struct sk_buff *skb;
5120 
5121 	skb = napi_alloc_skb(&ch->rxtx_napi,
5122 			     xdp->data_end - xdp->data_hard_start);
5123 	if (unlikely(!skb))
5124 		return NULL;
5125 
5126 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5127 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5128 	if (metasize)
5129 		skb_metadata_set(skb, metasize);
5130 
5131 	return skb;
5132 }
5133 
5134 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5135 				   struct dma_desc *p, struct dma_desc *np,
5136 				   struct xdp_buff *xdp)
5137 {
5138 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5139 	struct stmmac_channel *ch = &priv->channel[queue];
5140 	unsigned int len = xdp->data_end - xdp->data;
5141 	enum pkt_hash_types hash_type;
5142 	int coe = priv->hw->rx_csum;
5143 	struct sk_buff *skb;
5144 	u32 hash;
5145 
5146 	skb = stmmac_construct_skb_zc(ch, xdp);
5147 	if (!skb) {
5148 		priv->xstats.rx_dropped++;
5149 		return;
5150 	}
5151 
5152 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5153 	if (priv->hw->hw_vlan_en)
5154 		/* MAC level stripping. */
5155 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5156 	else
5157 		/* Driver level stripping. */
5158 		stmmac_rx_vlan(priv->dev, skb);
5159 	skb->protocol = eth_type_trans(skb, priv->dev);
5160 
5161 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5162 		skb_checksum_none_assert(skb);
5163 	else
5164 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5165 
5166 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5167 		skb_set_hash(skb, hash, hash_type);
5168 
5169 	skb_record_rx_queue(skb, queue);
5170 	napi_gro_receive(&ch->rxtx_napi, skb);
5171 
5172 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5173 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5174 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5175 	u64_stats_update_end(&rxq_stats->napi_syncp);
5176 }
5177 
5178 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5179 {
5180 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5181 	unsigned int entry = rx_q->dirty_rx;
5182 	struct dma_desc *rx_desc = NULL;
5183 	bool ret = true;
5184 
5185 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5186 
5187 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5188 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5189 		dma_addr_t dma_addr;
5190 		bool use_rx_wd;
5191 
5192 		if (!buf->xdp) {
5193 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5194 			if (!buf->xdp) {
5195 				ret = false;
5196 				break;
5197 			}
5198 		}
5199 
5200 		if (priv->extend_desc)
5201 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5202 		else
5203 			rx_desc = rx_q->dma_rx + entry;
5204 
5205 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5206 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5207 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5208 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5209 
5210 		rx_q->rx_count_frames++;
5211 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5212 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5213 			rx_q->rx_count_frames = 0;
5214 
5215 		use_rx_wd = !priv->rx_coal_frames[queue];
5216 		use_rx_wd |= rx_q->rx_count_frames > 0;
5217 		if (!priv->use_riwt)
5218 			use_rx_wd = false;
5219 
5220 		dma_wmb();
5221 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5222 
5223 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5224 	}
5225 
5226 	if (rx_desc) {
5227 		rx_q->dirty_rx = entry;
5228 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5229 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5230 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5231 	}
5232 
5233 	return ret;
5234 }
5235 
5236 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5237 {
5238 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5239 	 * to represent incoming packet, whereas cb field in the same structure
5240 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5241 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5242 	 */
5243 	return (struct stmmac_xdp_buff *)xdp;
5244 }
5245 
5246 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5247 {
5248 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5249 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5250 	unsigned int count = 0, error = 0, len = 0;
5251 	int dirty = stmmac_rx_dirty(priv, queue);
5252 	unsigned int next_entry = rx_q->cur_rx;
5253 	u32 rx_errors = 0, rx_dropped = 0;
5254 	unsigned int desc_size;
5255 	struct bpf_prog *prog;
5256 	bool failure = false;
5257 	int xdp_status = 0;
5258 	int status = 0;
5259 
5260 	if (netif_msg_rx_status(priv)) {
5261 		void *rx_head;
5262 
5263 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5264 		if (priv->extend_desc) {
5265 			rx_head = (void *)rx_q->dma_erx;
5266 			desc_size = sizeof(struct dma_extended_desc);
5267 		} else {
5268 			rx_head = (void *)rx_q->dma_rx;
5269 			desc_size = sizeof(struct dma_desc);
5270 		}
5271 
5272 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5273 				    rx_q->dma_rx_phy, desc_size);
5274 	}
5275 	while (count < limit) {
5276 		struct stmmac_rx_buffer *buf;
5277 		struct stmmac_xdp_buff *ctx;
5278 		unsigned int buf1_len = 0;
5279 		struct dma_desc *np, *p;
5280 		int entry;
5281 		int res;
5282 
5283 		if (!count && rx_q->state_saved) {
5284 			error = rx_q->state.error;
5285 			len = rx_q->state.len;
5286 		} else {
5287 			rx_q->state_saved = false;
5288 			error = 0;
5289 			len = 0;
5290 		}
5291 
5292 		if (count >= limit)
5293 			break;
5294 
5295 read_again:
5296 		buf1_len = 0;
5297 		entry = next_entry;
5298 		buf = &rx_q->buf_pool[entry];
5299 
5300 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5301 			failure = failure ||
5302 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5303 			dirty = 0;
5304 		}
5305 
5306 		if (priv->extend_desc)
5307 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5308 		else
5309 			p = rx_q->dma_rx + entry;
5310 
5311 		/* read the status of the incoming frame */
5312 		status = stmmac_rx_status(priv, &priv->xstats, p);
5313 		/* check if managed by the DMA otherwise go ahead */
5314 		if (unlikely(status & dma_own))
5315 			break;
5316 
5317 		/* Prefetch the next RX descriptor */
5318 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5319 						priv->dma_conf.dma_rx_size);
5320 		next_entry = rx_q->cur_rx;
5321 
5322 		if (priv->extend_desc)
5323 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5324 		else
5325 			np = rx_q->dma_rx + next_entry;
5326 
5327 		prefetch(np);
5328 
5329 		/* Ensure a valid XSK buffer before proceed */
5330 		if (!buf->xdp)
5331 			break;
5332 
5333 		if (priv->extend_desc)
5334 			stmmac_rx_extended_status(priv, &priv->xstats,
5335 						  rx_q->dma_erx + entry);
5336 		if (unlikely(status == discard_frame)) {
5337 			xsk_buff_free(buf->xdp);
5338 			buf->xdp = NULL;
5339 			dirty++;
5340 			error = 1;
5341 			if (!priv->hwts_rx_en)
5342 				rx_errors++;
5343 		}
5344 
5345 		if (unlikely(error && (status & rx_not_ls)))
5346 			goto read_again;
5347 		if (unlikely(error)) {
5348 			count++;
5349 			continue;
5350 		}
5351 
5352 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5353 		if (likely(status & rx_not_ls)) {
5354 			xsk_buff_free(buf->xdp);
5355 			buf->xdp = NULL;
5356 			dirty++;
5357 			count++;
5358 			goto read_again;
5359 		}
5360 
5361 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5362 		ctx->priv = priv;
5363 		ctx->desc = p;
5364 		ctx->ndesc = np;
5365 
5366 		/* XDP ZC Frame only support primary buffers for now */
5367 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5368 		len += buf1_len;
5369 
5370 		/* ACS is disabled; strip manually. */
5371 		if (likely(!(status & rx_not_ls))) {
5372 			buf1_len -= ETH_FCS_LEN;
5373 			len -= ETH_FCS_LEN;
5374 		}
5375 
5376 		/* RX buffer is good and fit into a XSK pool buffer */
5377 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5378 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5379 
5380 		prog = READ_ONCE(priv->xdp_prog);
5381 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5382 
5383 		switch (res) {
5384 		case STMMAC_XDP_PASS:
5385 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5386 			xsk_buff_free(buf->xdp);
5387 			break;
5388 		case STMMAC_XDP_CONSUMED:
5389 			xsk_buff_free(buf->xdp);
5390 			rx_dropped++;
5391 			break;
5392 		case STMMAC_XDP_TX:
5393 		case STMMAC_XDP_REDIRECT:
5394 			xdp_status |= res;
5395 			break;
5396 		}
5397 
5398 		buf->xdp = NULL;
5399 		dirty++;
5400 		count++;
5401 	}
5402 
5403 	if (status & rx_not_ls) {
5404 		rx_q->state_saved = true;
5405 		rx_q->state.error = error;
5406 		rx_q->state.len = len;
5407 	}
5408 
5409 	stmmac_finalize_xdp_rx(priv, xdp_status);
5410 
5411 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5412 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5413 	u64_stats_update_end(&rxq_stats->napi_syncp);
5414 
5415 	priv->xstats.rx_dropped += rx_dropped;
5416 	priv->xstats.rx_errors += rx_errors;
5417 
5418 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5419 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5420 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5421 		else
5422 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5423 
5424 		return (int)count;
5425 	}
5426 
5427 	return failure ? limit : (int)count;
5428 }
5429 
5430 /**
5431  * stmmac_rx - manage the receive process
5432  * @priv: driver private structure
5433  * @limit: napi bugget
5434  * @queue: RX queue index.
5435  * Description :  this the function called by the napi poll method.
5436  * It gets all the frames inside the ring.
5437  */
5438 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5439 {
5440 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5441 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5442 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5443 	struct stmmac_channel *ch = &priv->channel[queue];
5444 	unsigned int count = 0, error = 0, len = 0;
5445 	int status = 0, coe = priv->hw->rx_csum;
5446 	unsigned int next_entry = rx_q->cur_rx;
5447 	enum dma_data_direction dma_dir;
5448 	unsigned int desc_size;
5449 	struct sk_buff *skb = NULL;
5450 	struct stmmac_xdp_buff ctx;
5451 	int xdp_status = 0;
5452 	int bufsz;
5453 
5454 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5455 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5456 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5457 
5458 	if (netif_msg_rx_status(priv)) {
5459 		void *rx_head;
5460 
5461 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5462 		if (priv->extend_desc) {
5463 			rx_head = (void *)rx_q->dma_erx;
5464 			desc_size = sizeof(struct dma_extended_desc);
5465 		} else {
5466 			rx_head = (void *)rx_q->dma_rx;
5467 			desc_size = sizeof(struct dma_desc);
5468 		}
5469 
5470 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5471 				    rx_q->dma_rx_phy, desc_size);
5472 	}
5473 	while (count < limit) {
5474 		unsigned int buf1_len = 0, buf2_len = 0;
5475 		enum pkt_hash_types hash_type;
5476 		struct stmmac_rx_buffer *buf;
5477 		struct dma_desc *np, *p;
5478 		int entry;
5479 		u32 hash;
5480 
5481 		if (!count && rx_q->state_saved) {
5482 			skb = rx_q->state.skb;
5483 			error = rx_q->state.error;
5484 			len = rx_q->state.len;
5485 		} else {
5486 			rx_q->state_saved = false;
5487 			skb = NULL;
5488 			error = 0;
5489 			len = 0;
5490 		}
5491 
5492 read_again:
5493 		if (count >= limit)
5494 			break;
5495 
5496 		buf1_len = 0;
5497 		buf2_len = 0;
5498 		entry = next_entry;
5499 		buf = &rx_q->buf_pool[entry];
5500 
5501 		if (priv->extend_desc)
5502 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5503 		else
5504 			p = rx_q->dma_rx + entry;
5505 
5506 		/* read the status of the incoming frame */
5507 		status = stmmac_rx_status(priv, &priv->xstats, p);
5508 		/* check if managed by the DMA otherwise go ahead */
5509 		if (unlikely(status & dma_own))
5510 			break;
5511 
5512 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5513 						priv->dma_conf.dma_rx_size);
5514 		next_entry = rx_q->cur_rx;
5515 
5516 		if (priv->extend_desc)
5517 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5518 		else
5519 			np = rx_q->dma_rx + next_entry;
5520 
5521 		prefetch(np);
5522 
5523 		if (priv->extend_desc)
5524 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5525 		if (unlikely(status == discard_frame)) {
5526 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5527 			buf->page = NULL;
5528 			error = 1;
5529 			if (!priv->hwts_rx_en)
5530 				rx_errors++;
5531 		}
5532 
5533 		if (unlikely(error && (status & rx_not_ls)))
5534 			goto read_again;
5535 		if (unlikely(error)) {
5536 			dev_kfree_skb(skb);
5537 			skb = NULL;
5538 			count++;
5539 			continue;
5540 		}
5541 
5542 		/* Buffer is good. Go on. */
5543 
5544 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5545 		len += buf1_len;
5546 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5547 		len += buf2_len;
5548 
5549 		/* ACS is disabled; strip manually. */
5550 		if (likely(!(status & rx_not_ls))) {
5551 			if (buf2_len) {
5552 				buf2_len -= ETH_FCS_LEN;
5553 				len -= ETH_FCS_LEN;
5554 			} else if (buf1_len) {
5555 				buf1_len -= ETH_FCS_LEN;
5556 				len -= ETH_FCS_LEN;
5557 			}
5558 		}
5559 
5560 		if (!skb) {
5561 			unsigned int pre_len, sync_len;
5562 
5563 			dma_sync_single_for_cpu(priv->device, buf->addr,
5564 						buf1_len, dma_dir);
5565 			net_prefetch(page_address(buf->page) +
5566 				     buf->page_offset);
5567 
5568 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5569 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5570 					 buf->page_offset, buf1_len, true);
5571 
5572 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5573 				  buf->page_offset;
5574 
5575 			ctx.priv = priv;
5576 			ctx.desc = p;
5577 			ctx.ndesc = np;
5578 
5579 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5580 			/* Due xdp_adjust_tail: DMA sync for_device
5581 			 * cover max len CPU touch
5582 			 */
5583 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5584 				   buf->page_offset;
5585 			sync_len = max(sync_len, pre_len);
5586 
5587 			/* For Not XDP_PASS verdict */
5588 			if (IS_ERR(skb)) {
5589 				unsigned int xdp_res = -PTR_ERR(skb);
5590 
5591 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5592 					page_pool_put_page(rx_q->page_pool,
5593 							   virt_to_head_page(ctx.xdp.data),
5594 							   sync_len, true);
5595 					buf->page = NULL;
5596 					rx_dropped++;
5597 
5598 					/* Clear skb as it was set as
5599 					 * status by XDP program.
5600 					 */
5601 					skb = NULL;
5602 
5603 					if (unlikely((status & rx_not_ls)))
5604 						goto read_again;
5605 
5606 					count++;
5607 					continue;
5608 				} else if (xdp_res & (STMMAC_XDP_TX |
5609 						      STMMAC_XDP_REDIRECT)) {
5610 					xdp_status |= xdp_res;
5611 					buf->page = NULL;
5612 					skb = NULL;
5613 					count++;
5614 					continue;
5615 				}
5616 			}
5617 		}
5618 
5619 		if (!skb) {
5620 			unsigned int head_pad_len;
5621 
5622 			/* XDP program may expand or reduce tail */
5623 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5624 
5625 			skb = napi_build_skb(page_address(buf->page),
5626 					     rx_q->napi_skb_frag_size);
5627 			if (!skb) {
5628 				page_pool_recycle_direct(rx_q->page_pool,
5629 							 buf->page);
5630 				rx_dropped++;
5631 				count++;
5632 				goto drain_data;
5633 			}
5634 
5635 			/* XDP program may adjust header */
5636 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5637 			skb_reserve(skb, head_pad_len);
5638 			skb_put(skb, buf1_len);
5639 			skb_mark_for_recycle(skb);
5640 			buf->page = NULL;
5641 		} else if (buf1_len) {
5642 			dma_sync_single_for_cpu(priv->device, buf->addr,
5643 						buf1_len, dma_dir);
5644 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5645 					buf->page, buf->page_offset, buf1_len,
5646 					priv->dma_conf.dma_buf_sz);
5647 			buf->page = NULL;
5648 		}
5649 
5650 		if (buf2_len) {
5651 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5652 						buf2_len, dma_dir);
5653 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5654 					buf->sec_page, 0, buf2_len,
5655 					priv->dma_conf.dma_buf_sz);
5656 			buf->sec_page = NULL;
5657 		}
5658 
5659 drain_data:
5660 		if (likely(status & rx_not_ls))
5661 			goto read_again;
5662 		if (!skb)
5663 			continue;
5664 
5665 		/* Got entire packet into SKB. Finish it. */
5666 
5667 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5668 
5669 		if (priv->hw->hw_vlan_en)
5670 			/* MAC level stripping. */
5671 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5672 		else
5673 			/* Driver level stripping. */
5674 			stmmac_rx_vlan(priv->dev, skb);
5675 
5676 		skb->protocol = eth_type_trans(skb, priv->dev);
5677 
5678 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5679 		    (status & csum_none))
5680 			skb_checksum_none_assert(skb);
5681 		else
5682 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5683 
5684 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5685 			skb_set_hash(skb, hash, hash_type);
5686 
5687 		skb_record_rx_queue(skb, queue);
5688 		napi_gro_receive(&ch->rx_napi, skb);
5689 		skb = NULL;
5690 
5691 		rx_packets++;
5692 		rx_bytes += len;
5693 		count++;
5694 	}
5695 
5696 	if (status & rx_not_ls || skb) {
5697 		rx_q->state_saved = true;
5698 		rx_q->state.skb = skb;
5699 		rx_q->state.error = error;
5700 		rx_q->state.len = len;
5701 	}
5702 
5703 	stmmac_finalize_xdp_rx(priv, xdp_status);
5704 
5705 	stmmac_rx_refill(priv, queue);
5706 
5707 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5708 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5709 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5710 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5711 	u64_stats_update_end(&rxq_stats->napi_syncp);
5712 
5713 	priv->xstats.rx_dropped += rx_dropped;
5714 	priv->xstats.rx_errors += rx_errors;
5715 
5716 	return count;
5717 }
5718 
5719 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5720 {
5721 	struct stmmac_channel *ch =
5722 		container_of(napi, struct stmmac_channel, rx_napi);
5723 	struct stmmac_priv *priv = ch->priv_data;
5724 	struct stmmac_rxq_stats *rxq_stats;
5725 	u32 chan = ch->index;
5726 	int work_done;
5727 
5728 	rxq_stats = &priv->xstats.rxq_stats[chan];
5729 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5730 	u64_stats_inc(&rxq_stats->napi.poll);
5731 	u64_stats_update_end(&rxq_stats->napi_syncp);
5732 
5733 	work_done = stmmac_rx(priv, budget, chan);
5734 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5735 		unsigned long flags;
5736 
5737 		spin_lock_irqsave(&ch->lock, flags);
5738 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5739 		spin_unlock_irqrestore(&ch->lock, flags);
5740 	}
5741 
5742 	return work_done;
5743 }
5744 
5745 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5746 {
5747 	struct stmmac_channel *ch =
5748 		container_of(napi, struct stmmac_channel, tx_napi);
5749 	struct stmmac_priv *priv = ch->priv_data;
5750 	struct stmmac_txq_stats *txq_stats;
5751 	bool pending_packets = false;
5752 	u32 chan = ch->index;
5753 	int work_done;
5754 
5755 	txq_stats = &priv->xstats.txq_stats[chan];
5756 	u64_stats_update_begin(&txq_stats->napi_syncp);
5757 	u64_stats_inc(&txq_stats->napi.poll);
5758 	u64_stats_update_end(&txq_stats->napi_syncp);
5759 
5760 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5761 	work_done = min(work_done, budget);
5762 
5763 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5764 		unsigned long flags;
5765 
5766 		spin_lock_irqsave(&ch->lock, flags);
5767 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5768 		spin_unlock_irqrestore(&ch->lock, flags);
5769 	}
5770 
5771 	/* TX still have packet to handle, check if we need to arm tx timer */
5772 	if (pending_packets)
5773 		stmmac_tx_timer_arm(priv, chan);
5774 
5775 	return work_done;
5776 }
5777 
5778 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5779 {
5780 	struct stmmac_channel *ch =
5781 		container_of(napi, struct stmmac_channel, rxtx_napi);
5782 	struct stmmac_priv *priv = ch->priv_data;
5783 	bool tx_pending_packets = false;
5784 	int rx_done, tx_done, rxtx_done;
5785 	struct stmmac_rxq_stats *rxq_stats;
5786 	struct stmmac_txq_stats *txq_stats;
5787 	u32 chan = ch->index;
5788 
5789 	rxq_stats = &priv->xstats.rxq_stats[chan];
5790 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5791 	u64_stats_inc(&rxq_stats->napi.poll);
5792 	u64_stats_update_end(&rxq_stats->napi_syncp);
5793 
5794 	txq_stats = &priv->xstats.txq_stats[chan];
5795 	u64_stats_update_begin(&txq_stats->napi_syncp);
5796 	u64_stats_inc(&txq_stats->napi.poll);
5797 	u64_stats_update_end(&txq_stats->napi_syncp);
5798 
5799 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5800 	tx_done = min(tx_done, budget);
5801 
5802 	rx_done = stmmac_rx_zc(priv, budget, chan);
5803 
5804 	rxtx_done = max(tx_done, rx_done);
5805 
5806 	/* If either TX or RX work is not complete, return budget
5807 	 * and keep pooling
5808 	 */
5809 	if (rxtx_done >= budget)
5810 		return budget;
5811 
5812 	/* all work done, exit the polling mode */
5813 	if (napi_complete_done(napi, rxtx_done)) {
5814 		unsigned long flags;
5815 
5816 		spin_lock_irqsave(&ch->lock, flags);
5817 		/* Both RX and TX work done are compelte,
5818 		 * so enable both RX & TX IRQs.
5819 		 */
5820 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5821 		spin_unlock_irqrestore(&ch->lock, flags);
5822 	}
5823 
5824 	/* TX still have packet to handle, check if we need to arm tx timer */
5825 	if (tx_pending_packets)
5826 		stmmac_tx_timer_arm(priv, chan);
5827 
5828 	return min(rxtx_done, budget - 1);
5829 }
5830 
5831 /**
5832  *  stmmac_tx_timeout
5833  *  @dev : Pointer to net device structure
5834  *  @txqueue: the index of the hanging transmit queue
5835  *  Description: this function is called when a packet transmission fails to
5836  *   complete within a reasonable time. The driver will mark the error in the
5837  *   netdev structure and arrange for the device to be reset to a sane state
5838  *   in order to transmit a new packet.
5839  */
5840 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5841 {
5842 	struct stmmac_priv *priv = netdev_priv(dev);
5843 
5844 	stmmac_global_err(priv);
5845 }
5846 
5847 /**
5848  *  stmmac_set_rx_mode - entry point for multicast addressing
5849  *  @dev : pointer to the device structure
5850  *  Description:
5851  *  This function is a driver entry point which gets called by the kernel
5852  *  whenever multicast addresses must be enabled/disabled.
5853  *  Return value:
5854  *  void.
5855  *
5856  *  FIXME: This may need RXC to be running, but it may be called with BH
5857  *  disabled, which means we can't call phylink_rx_clk_stop*().
5858  */
5859 static void stmmac_set_rx_mode(struct net_device *dev)
5860 {
5861 	struct stmmac_priv *priv = netdev_priv(dev);
5862 
5863 	stmmac_set_filter(priv, priv->hw, dev);
5864 }
5865 
5866 /**
5867  *  stmmac_change_mtu - entry point to change MTU size for the device.
5868  *  @dev : device pointer.
5869  *  @new_mtu : the new MTU size for the device.
5870  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5871  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5872  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5873  *  Return value:
5874  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5875  *  file on failure.
5876  */
5877 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5878 {
5879 	struct stmmac_priv *priv = netdev_priv(dev);
5880 	int txfifosz = priv->plat->tx_fifo_size;
5881 	struct stmmac_dma_conf *dma_conf;
5882 	const int mtu = new_mtu;
5883 	int ret;
5884 
5885 	if (txfifosz == 0)
5886 		txfifosz = priv->dma_cap.tx_fifo_size;
5887 
5888 	txfifosz /= priv->plat->tx_queues_to_use;
5889 
5890 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5891 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5892 		return -EINVAL;
5893 	}
5894 
5895 	new_mtu = STMMAC_ALIGN(new_mtu);
5896 
5897 	/* If condition true, FIFO is too small or MTU too large */
5898 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5899 		return -EINVAL;
5900 
5901 	if (netif_running(dev)) {
5902 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5903 		/* Try to allocate the new DMA conf with the new mtu */
5904 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5905 		if (IS_ERR(dma_conf)) {
5906 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5907 				   mtu);
5908 			return PTR_ERR(dma_conf);
5909 		}
5910 
5911 		__stmmac_release(dev);
5912 
5913 		ret = __stmmac_open(dev, dma_conf);
5914 		if (ret) {
5915 			free_dma_desc_resources(priv, dma_conf);
5916 			kfree(dma_conf);
5917 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5918 			return ret;
5919 		}
5920 
5921 		kfree(dma_conf);
5922 
5923 		stmmac_set_rx_mode(dev);
5924 	}
5925 
5926 	WRITE_ONCE(dev->mtu, mtu);
5927 	netdev_update_features(dev);
5928 
5929 	return 0;
5930 }
5931 
5932 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5933 					     netdev_features_t features)
5934 {
5935 	struct stmmac_priv *priv = netdev_priv(dev);
5936 
5937 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5938 		features &= ~NETIF_F_RXCSUM;
5939 
5940 	if (!priv->plat->tx_coe)
5941 		features &= ~NETIF_F_CSUM_MASK;
5942 
5943 	/* Some GMAC devices have a bugged Jumbo frame support that
5944 	 * needs to have the Tx COE disabled for oversized frames
5945 	 * (due to limited buffer sizes). In this case we disable
5946 	 * the TX csum insertion in the TDES and not use SF.
5947 	 */
5948 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5949 		features &= ~NETIF_F_CSUM_MASK;
5950 
5951 	/* Disable tso if asked by ethtool */
5952 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5953 		if (features & NETIF_F_TSO)
5954 			priv->tso = true;
5955 		else
5956 			priv->tso = false;
5957 	}
5958 
5959 	return features;
5960 }
5961 
5962 static int stmmac_set_features(struct net_device *netdev,
5963 			       netdev_features_t features)
5964 {
5965 	struct stmmac_priv *priv = netdev_priv(netdev);
5966 
5967 	/* Keep the COE Type in case of csum is supporting */
5968 	if (features & NETIF_F_RXCSUM)
5969 		priv->hw->rx_csum = priv->plat->rx_coe;
5970 	else
5971 		priv->hw->rx_csum = 0;
5972 	/* No check needed because rx_coe has been set before and it will be
5973 	 * fixed in case of issue.
5974 	 */
5975 	stmmac_rx_ipc(priv, priv->hw);
5976 
5977 	if (priv->sph_cap) {
5978 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5979 		u32 chan;
5980 
5981 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5982 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5983 	}
5984 
5985 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5986 		priv->hw->hw_vlan_en = true;
5987 	else
5988 		priv->hw->hw_vlan_en = false;
5989 
5990 	phylink_rx_clk_stop_block(priv->phylink);
5991 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5992 	phylink_rx_clk_stop_unblock(priv->phylink);
5993 
5994 	return 0;
5995 }
5996 
5997 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5998 {
5999 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6000 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6001 	u32 queues_count;
6002 	u32 queue;
6003 	bool xmac;
6004 
6005 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6006 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6007 
6008 	if (priv->irq_wake)
6009 		pm_wakeup_event(priv->device, 0);
6010 
6011 	if (priv->dma_cap.estsel)
6012 		stmmac_est_irq_status(priv, priv, priv->dev,
6013 				      &priv->xstats, tx_cnt);
6014 
6015 	if (stmmac_fpe_supported(priv))
6016 		stmmac_fpe_irq_status(priv);
6017 
6018 	/* To handle GMAC own interrupts */
6019 	if ((priv->plat->has_gmac) || xmac) {
6020 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6021 
6022 		if (unlikely(status)) {
6023 			/* For LPI we need to save the tx status */
6024 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6025 				priv->tx_path_in_lpi_mode = true;
6026 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6027 				priv->tx_path_in_lpi_mode = false;
6028 		}
6029 
6030 		for (queue = 0; queue < queues_count; queue++)
6031 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6032 
6033 		/* PCS link status */
6034 		if (priv->hw->pcs &&
6035 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6036 			if (priv->xstats.pcs_link)
6037 				netif_carrier_on(priv->dev);
6038 			else
6039 				netif_carrier_off(priv->dev);
6040 		}
6041 
6042 		stmmac_timestamp_interrupt(priv, priv);
6043 	}
6044 }
6045 
6046 /**
6047  *  stmmac_interrupt - main ISR
6048  *  @irq: interrupt number.
6049  *  @dev_id: to pass the net device pointer.
6050  *  Description: this is the main driver interrupt service routine.
6051  *  It can call:
6052  *  o DMA service routine (to manage incoming frame reception and transmission
6053  *    status)
6054  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6055  *    interrupts.
6056  */
6057 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6058 {
6059 	struct net_device *dev = (struct net_device *)dev_id;
6060 	struct stmmac_priv *priv = netdev_priv(dev);
6061 
6062 	/* Check if adapter is up */
6063 	if (test_bit(STMMAC_DOWN, &priv->state))
6064 		return IRQ_HANDLED;
6065 
6066 	/* Check ASP error if it isn't delivered via an individual IRQ */
6067 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6068 		return IRQ_HANDLED;
6069 
6070 	/* To handle Common interrupts */
6071 	stmmac_common_interrupt(priv);
6072 
6073 	/* To handle DMA interrupts */
6074 	stmmac_dma_interrupt(priv);
6075 
6076 	return IRQ_HANDLED;
6077 }
6078 
6079 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6080 {
6081 	struct net_device *dev = (struct net_device *)dev_id;
6082 	struct stmmac_priv *priv = netdev_priv(dev);
6083 
6084 	/* Check if adapter is up */
6085 	if (test_bit(STMMAC_DOWN, &priv->state))
6086 		return IRQ_HANDLED;
6087 
6088 	/* To handle Common interrupts */
6089 	stmmac_common_interrupt(priv);
6090 
6091 	return IRQ_HANDLED;
6092 }
6093 
6094 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6095 {
6096 	struct net_device *dev = (struct net_device *)dev_id;
6097 	struct stmmac_priv *priv = netdev_priv(dev);
6098 
6099 	/* Check if adapter is up */
6100 	if (test_bit(STMMAC_DOWN, &priv->state))
6101 		return IRQ_HANDLED;
6102 
6103 	/* Check if a fatal error happened */
6104 	stmmac_safety_feat_interrupt(priv);
6105 
6106 	return IRQ_HANDLED;
6107 }
6108 
6109 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6110 {
6111 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6112 	struct stmmac_dma_conf *dma_conf;
6113 	int chan = tx_q->queue_index;
6114 	struct stmmac_priv *priv;
6115 	int status;
6116 
6117 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6118 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6119 
6120 	/* Check if adapter is up */
6121 	if (test_bit(STMMAC_DOWN, &priv->state))
6122 		return IRQ_HANDLED;
6123 
6124 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6125 
6126 	if (unlikely(status & tx_hard_error_bump_tc)) {
6127 		/* Try to bump up the dma threshold on this failure */
6128 		stmmac_bump_dma_threshold(priv, chan);
6129 	} else if (unlikely(status == tx_hard_error)) {
6130 		stmmac_tx_err(priv, chan);
6131 	}
6132 
6133 	return IRQ_HANDLED;
6134 }
6135 
6136 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6137 {
6138 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6139 	struct stmmac_dma_conf *dma_conf;
6140 	int chan = rx_q->queue_index;
6141 	struct stmmac_priv *priv;
6142 
6143 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6144 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6145 
6146 	/* Check if adapter is up */
6147 	if (test_bit(STMMAC_DOWN, &priv->state))
6148 		return IRQ_HANDLED;
6149 
6150 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6151 
6152 	return IRQ_HANDLED;
6153 }
6154 
6155 /**
6156  *  stmmac_ioctl - Entry point for the Ioctl
6157  *  @dev: Device pointer.
6158  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6159  *  a proprietary structure used to pass information to the driver.
6160  *  @cmd: IOCTL command
6161  *  Description:
6162  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6163  */
6164 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6165 {
6166 	struct stmmac_priv *priv = netdev_priv (dev);
6167 	int ret = -EOPNOTSUPP;
6168 
6169 	if (!netif_running(dev))
6170 		return -EINVAL;
6171 
6172 	switch (cmd) {
6173 	case SIOCGMIIPHY:
6174 	case SIOCGMIIREG:
6175 	case SIOCSMIIREG:
6176 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6177 		break;
6178 	default:
6179 		break;
6180 	}
6181 
6182 	return ret;
6183 }
6184 
6185 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6186 				    void *cb_priv)
6187 {
6188 	struct stmmac_priv *priv = cb_priv;
6189 	int ret = -EOPNOTSUPP;
6190 
6191 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6192 		return ret;
6193 
6194 	__stmmac_disable_all_queues(priv);
6195 
6196 	switch (type) {
6197 	case TC_SETUP_CLSU32:
6198 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6199 		break;
6200 	case TC_SETUP_CLSFLOWER:
6201 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6202 		break;
6203 	default:
6204 		break;
6205 	}
6206 
6207 	stmmac_enable_all_queues(priv);
6208 	return ret;
6209 }
6210 
6211 static LIST_HEAD(stmmac_block_cb_list);
6212 
6213 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6214 			   void *type_data)
6215 {
6216 	struct stmmac_priv *priv = netdev_priv(ndev);
6217 
6218 	switch (type) {
6219 	case TC_QUERY_CAPS:
6220 		return stmmac_tc_query_caps(priv, priv, type_data);
6221 	case TC_SETUP_QDISC_MQPRIO:
6222 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6223 	case TC_SETUP_BLOCK:
6224 		return flow_block_cb_setup_simple(type_data,
6225 						  &stmmac_block_cb_list,
6226 						  stmmac_setup_tc_block_cb,
6227 						  priv, priv, true);
6228 	case TC_SETUP_QDISC_CBS:
6229 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6230 	case TC_SETUP_QDISC_TAPRIO:
6231 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6232 	case TC_SETUP_QDISC_ETF:
6233 		return stmmac_tc_setup_etf(priv, priv, type_data);
6234 	default:
6235 		return -EOPNOTSUPP;
6236 	}
6237 }
6238 
6239 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6240 			       struct net_device *sb_dev)
6241 {
6242 	int gso = skb_shinfo(skb)->gso_type;
6243 
6244 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6245 		/*
6246 		 * There is no way to determine the number of TSO/USO
6247 		 * capable Queues. Let's use always the Queue 0
6248 		 * because if TSO/USO is supported then at least this
6249 		 * one will be capable.
6250 		 */
6251 		return 0;
6252 	}
6253 
6254 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6255 }
6256 
6257 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6258 {
6259 	struct stmmac_priv *priv = netdev_priv(ndev);
6260 	int ret = 0;
6261 
6262 	ret = pm_runtime_resume_and_get(priv->device);
6263 	if (ret < 0)
6264 		return ret;
6265 
6266 	ret = eth_mac_addr(ndev, addr);
6267 	if (ret)
6268 		goto set_mac_error;
6269 
6270 	phylink_rx_clk_stop_block(priv->phylink);
6271 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6272 	phylink_rx_clk_stop_unblock(priv->phylink);
6273 
6274 set_mac_error:
6275 	pm_runtime_put(priv->device);
6276 
6277 	return ret;
6278 }
6279 
6280 #ifdef CONFIG_DEBUG_FS
6281 static struct dentry *stmmac_fs_dir;
6282 
6283 static void sysfs_display_ring(void *head, int size, int extend_desc,
6284 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6285 {
6286 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6287 	struct dma_desc *p = (struct dma_desc *)head;
6288 	unsigned int desc_size;
6289 	dma_addr_t dma_addr;
6290 	int i;
6291 
6292 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6293 	for (i = 0; i < size; i++) {
6294 		dma_addr = dma_phy_addr + i * desc_size;
6295 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6296 				i, &dma_addr,
6297 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6298 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6299 		if (extend_desc)
6300 			p = &(++ep)->basic;
6301 		else
6302 			p++;
6303 	}
6304 }
6305 
6306 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6307 {
6308 	struct net_device *dev = seq->private;
6309 	struct stmmac_priv *priv = netdev_priv(dev);
6310 	u32 rx_count = priv->plat->rx_queues_to_use;
6311 	u32 tx_count = priv->plat->tx_queues_to_use;
6312 	u32 queue;
6313 
6314 	if ((dev->flags & IFF_UP) == 0)
6315 		return 0;
6316 
6317 	for (queue = 0; queue < rx_count; queue++) {
6318 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6319 
6320 		seq_printf(seq, "RX Queue %d:\n", queue);
6321 
6322 		if (priv->extend_desc) {
6323 			seq_printf(seq, "Extended descriptor ring:\n");
6324 			sysfs_display_ring((void *)rx_q->dma_erx,
6325 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6326 		} else {
6327 			seq_printf(seq, "Descriptor ring:\n");
6328 			sysfs_display_ring((void *)rx_q->dma_rx,
6329 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6330 		}
6331 	}
6332 
6333 	for (queue = 0; queue < tx_count; queue++) {
6334 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6335 
6336 		seq_printf(seq, "TX Queue %d:\n", queue);
6337 
6338 		if (priv->extend_desc) {
6339 			seq_printf(seq, "Extended descriptor ring:\n");
6340 			sysfs_display_ring((void *)tx_q->dma_etx,
6341 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6342 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6343 			seq_printf(seq, "Descriptor ring:\n");
6344 			sysfs_display_ring((void *)tx_q->dma_tx,
6345 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6346 		}
6347 	}
6348 
6349 	return 0;
6350 }
6351 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6352 
6353 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6354 {
6355 	static const char * const dwxgmac_timestamp_source[] = {
6356 		"None",
6357 		"Internal",
6358 		"External",
6359 		"Both",
6360 	};
6361 	static const char * const dwxgmac_safety_feature_desc[] = {
6362 		"No",
6363 		"All Safety Features with ECC and Parity",
6364 		"All Safety Features without ECC or Parity",
6365 		"All Safety Features with Parity Only",
6366 		"ECC Only",
6367 		"UNDEFINED",
6368 		"UNDEFINED",
6369 		"UNDEFINED",
6370 	};
6371 	struct net_device *dev = seq->private;
6372 	struct stmmac_priv *priv = netdev_priv(dev);
6373 
6374 	if (!priv->hw_cap_support) {
6375 		seq_printf(seq, "DMA HW features not supported\n");
6376 		return 0;
6377 	}
6378 
6379 	seq_printf(seq, "==============================\n");
6380 	seq_printf(seq, "\tDMA HW features\n");
6381 	seq_printf(seq, "==============================\n");
6382 
6383 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6384 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6385 	seq_printf(seq, "\t1000 Mbps: %s\n",
6386 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6387 	seq_printf(seq, "\tHalf duplex: %s\n",
6388 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6389 	if (priv->plat->has_xgmac) {
6390 		seq_printf(seq,
6391 			   "\tNumber of Additional MAC address registers: %d\n",
6392 			   priv->dma_cap.multi_addr);
6393 	} else {
6394 		seq_printf(seq, "\tHash Filter: %s\n",
6395 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6396 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6397 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6398 	}
6399 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6400 		   (priv->dma_cap.pcs) ? "Y" : "N");
6401 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6402 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6403 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6404 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6405 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6406 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6407 	seq_printf(seq, "\tRMON module: %s\n",
6408 		   (priv->dma_cap.rmon) ? "Y" : "N");
6409 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6410 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6411 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6412 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6413 	if (priv->plat->has_xgmac)
6414 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6415 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6416 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6417 		   (priv->dma_cap.eee) ? "Y" : "N");
6418 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6419 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6420 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6421 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6422 	    priv->plat->has_xgmac) {
6423 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6424 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6425 	} else {
6426 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6427 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6428 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6429 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6430 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6431 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6432 	}
6433 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6434 		   priv->dma_cap.number_rx_channel);
6435 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6436 		   priv->dma_cap.number_tx_channel);
6437 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6438 		   priv->dma_cap.number_rx_queues);
6439 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6440 		   priv->dma_cap.number_tx_queues);
6441 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6442 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6443 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6444 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6445 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6446 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6447 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6448 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6449 		   priv->dma_cap.pps_out_num);
6450 	seq_printf(seq, "\tSafety Features: %s\n",
6451 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6452 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6453 		   priv->dma_cap.frpsel ? "Y" : "N");
6454 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6455 		   priv->dma_cap.host_dma_width);
6456 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6457 		   priv->dma_cap.rssen ? "Y" : "N");
6458 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6459 		   priv->dma_cap.vlhash ? "Y" : "N");
6460 	seq_printf(seq, "\tSplit Header: %s\n",
6461 		   priv->dma_cap.sphen ? "Y" : "N");
6462 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6463 		   priv->dma_cap.vlins ? "Y" : "N");
6464 	seq_printf(seq, "\tDouble VLAN: %s\n",
6465 		   priv->dma_cap.dvlan ? "Y" : "N");
6466 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6467 		   priv->dma_cap.l3l4fnum);
6468 	seq_printf(seq, "\tARP Offloading: %s\n",
6469 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6470 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6471 		   priv->dma_cap.estsel ? "Y" : "N");
6472 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6473 		   priv->dma_cap.fpesel ? "Y" : "N");
6474 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6475 		   priv->dma_cap.tbssel ? "Y" : "N");
6476 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6477 		   priv->dma_cap.tbs_ch_num);
6478 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6479 		   priv->dma_cap.sgfsel ? "Y" : "N");
6480 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6481 		   BIT(priv->dma_cap.ttsfd) >> 1);
6482 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6483 		   priv->dma_cap.numtc);
6484 	seq_printf(seq, "\tDCB Feature: %s\n",
6485 		   priv->dma_cap.dcben ? "Y" : "N");
6486 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6487 		   priv->dma_cap.advthword ? "Y" : "N");
6488 	seq_printf(seq, "\tPTP Offload: %s\n",
6489 		   priv->dma_cap.ptoen ? "Y" : "N");
6490 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6491 		   priv->dma_cap.osten ? "Y" : "N");
6492 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6493 		   priv->dma_cap.pfcen ? "Y" : "N");
6494 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6495 		   BIT(priv->dma_cap.frpes) << 6);
6496 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6497 		   BIT(priv->dma_cap.frpbs) << 6);
6498 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6499 		   priv->dma_cap.frppipe_num);
6500 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6501 		   priv->dma_cap.nrvf_num ?
6502 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6503 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6504 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6505 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6506 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6507 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6508 		   priv->dma_cap.cbtisel ? "Y" : "N");
6509 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6510 		   priv->dma_cap.aux_snapshot_n);
6511 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6512 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6513 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6514 		   priv->dma_cap.edma ? "Y" : "N");
6515 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6516 		   priv->dma_cap.ediffc ? "Y" : "N");
6517 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6518 		   priv->dma_cap.vxn ? "Y" : "N");
6519 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6520 		   priv->dma_cap.dbgmem ? "Y" : "N");
6521 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6522 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6523 	return 0;
6524 }
6525 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6526 
6527 /* Use network device events to rename debugfs file entries.
6528  */
6529 static int stmmac_device_event(struct notifier_block *unused,
6530 			       unsigned long event, void *ptr)
6531 {
6532 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6533 	struct stmmac_priv *priv = netdev_priv(dev);
6534 
6535 	if (dev->netdev_ops != &stmmac_netdev_ops)
6536 		goto done;
6537 
6538 	switch (event) {
6539 	case NETDEV_CHANGENAME:
6540 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6541 		break;
6542 	}
6543 done:
6544 	return NOTIFY_DONE;
6545 }
6546 
6547 static struct notifier_block stmmac_notifier = {
6548 	.notifier_call = stmmac_device_event,
6549 };
6550 
6551 static void stmmac_init_fs(struct net_device *dev)
6552 {
6553 	struct stmmac_priv *priv = netdev_priv(dev);
6554 
6555 	rtnl_lock();
6556 
6557 	/* Create per netdev entries */
6558 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6559 
6560 	/* Entry to report DMA RX/TX rings */
6561 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6562 			    &stmmac_rings_status_fops);
6563 
6564 	/* Entry to report the DMA HW features */
6565 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6566 			    &stmmac_dma_cap_fops);
6567 
6568 	rtnl_unlock();
6569 }
6570 
6571 static void stmmac_exit_fs(struct net_device *dev)
6572 {
6573 	struct stmmac_priv *priv = netdev_priv(dev);
6574 
6575 	debugfs_remove_recursive(priv->dbgfs_dir);
6576 }
6577 #endif /* CONFIG_DEBUG_FS */
6578 
6579 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6580 {
6581 	unsigned char *data = (unsigned char *)&vid_le;
6582 	unsigned char data_byte = 0;
6583 	u32 crc = ~0x0;
6584 	u32 temp = 0;
6585 	int i, bits;
6586 
6587 	bits = get_bitmask_order(VLAN_VID_MASK);
6588 	for (i = 0; i < bits; i++) {
6589 		if ((i % 8) == 0)
6590 			data_byte = data[i / 8];
6591 
6592 		temp = ((crc & 1) ^ data_byte) & 1;
6593 		crc >>= 1;
6594 		data_byte >>= 1;
6595 
6596 		if (temp)
6597 			crc ^= 0xedb88320;
6598 	}
6599 
6600 	return crc;
6601 }
6602 
6603 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6604 {
6605 	u32 crc, hash = 0;
6606 	u16 pmatch = 0;
6607 	int count = 0;
6608 	u16 vid = 0;
6609 
6610 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6611 		__le16 vid_le = cpu_to_le16(vid);
6612 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6613 		hash |= (1 << crc);
6614 		count++;
6615 	}
6616 
6617 	if (!priv->dma_cap.vlhash) {
6618 		if (count > 2) /* VID = 0 always passes filter */
6619 			return -EOPNOTSUPP;
6620 
6621 		pmatch = vid;
6622 		hash = 0;
6623 	}
6624 
6625 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6626 }
6627 
6628 /* FIXME: This may need RXC to be running, but it may be called with BH
6629  * disabled, which means we can't call phylink_rx_clk_stop*().
6630  */
6631 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6632 {
6633 	struct stmmac_priv *priv = netdev_priv(ndev);
6634 	bool is_double = false;
6635 	int ret;
6636 
6637 	ret = pm_runtime_resume_and_get(priv->device);
6638 	if (ret < 0)
6639 		return ret;
6640 
6641 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6642 		is_double = true;
6643 
6644 	set_bit(vid, priv->active_vlans);
6645 	ret = stmmac_vlan_update(priv, is_double);
6646 	if (ret) {
6647 		clear_bit(vid, priv->active_vlans);
6648 		goto err_pm_put;
6649 	}
6650 
6651 	if (priv->hw->num_vlan) {
6652 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6653 		if (ret)
6654 			goto err_pm_put;
6655 	}
6656 err_pm_put:
6657 	pm_runtime_put(priv->device);
6658 
6659 	return ret;
6660 }
6661 
6662 /* FIXME: This may need RXC to be running, but it may be called with BH
6663  * disabled, which means we can't call phylink_rx_clk_stop*().
6664  */
6665 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6666 {
6667 	struct stmmac_priv *priv = netdev_priv(ndev);
6668 	bool is_double = false;
6669 	int ret;
6670 
6671 	ret = pm_runtime_resume_and_get(priv->device);
6672 	if (ret < 0)
6673 		return ret;
6674 
6675 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6676 		is_double = true;
6677 
6678 	clear_bit(vid, priv->active_vlans);
6679 
6680 	if (priv->hw->num_vlan) {
6681 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6682 		if (ret)
6683 			goto del_vlan_error;
6684 	}
6685 
6686 	ret = stmmac_vlan_update(priv, is_double);
6687 
6688 del_vlan_error:
6689 	pm_runtime_put(priv->device);
6690 
6691 	return ret;
6692 }
6693 
6694 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6695 {
6696 	struct stmmac_priv *priv = netdev_priv(dev);
6697 
6698 	switch (bpf->command) {
6699 	case XDP_SETUP_PROG:
6700 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6701 	case XDP_SETUP_XSK_POOL:
6702 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6703 					     bpf->xsk.queue_id);
6704 	default:
6705 		return -EOPNOTSUPP;
6706 	}
6707 }
6708 
6709 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6710 			   struct xdp_frame **frames, u32 flags)
6711 {
6712 	struct stmmac_priv *priv = netdev_priv(dev);
6713 	int cpu = smp_processor_id();
6714 	struct netdev_queue *nq;
6715 	int i, nxmit = 0;
6716 	int queue;
6717 
6718 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6719 		return -ENETDOWN;
6720 
6721 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6722 		return -EINVAL;
6723 
6724 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6725 	nq = netdev_get_tx_queue(priv->dev, queue);
6726 
6727 	__netif_tx_lock(nq, cpu);
6728 	/* Avoids TX time-out as we are sharing with slow path */
6729 	txq_trans_cond_update(nq);
6730 
6731 	for (i = 0; i < num_frames; i++) {
6732 		int res;
6733 
6734 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6735 		if (res == STMMAC_XDP_CONSUMED)
6736 			break;
6737 
6738 		nxmit++;
6739 	}
6740 
6741 	if (flags & XDP_XMIT_FLUSH) {
6742 		stmmac_flush_tx_descriptors(priv, queue);
6743 		stmmac_tx_timer_arm(priv, queue);
6744 	}
6745 
6746 	__netif_tx_unlock(nq);
6747 
6748 	return nxmit;
6749 }
6750 
6751 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6752 {
6753 	struct stmmac_channel *ch = &priv->channel[queue];
6754 	unsigned long flags;
6755 
6756 	spin_lock_irqsave(&ch->lock, flags);
6757 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6758 	spin_unlock_irqrestore(&ch->lock, flags);
6759 
6760 	stmmac_stop_rx_dma(priv, queue);
6761 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6762 }
6763 
6764 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6765 {
6766 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6767 	struct stmmac_channel *ch = &priv->channel[queue];
6768 	unsigned long flags;
6769 	u32 buf_size;
6770 	int ret;
6771 
6772 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6773 	if (ret) {
6774 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6775 		return;
6776 	}
6777 
6778 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6779 	if (ret) {
6780 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6781 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6782 		return;
6783 	}
6784 
6785 	stmmac_reset_rx_queue(priv, queue);
6786 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6787 
6788 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6789 			    rx_q->dma_rx_phy, rx_q->queue_index);
6790 
6791 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6792 			     sizeof(struct dma_desc));
6793 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6794 			       rx_q->rx_tail_addr, rx_q->queue_index);
6795 
6796 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6797 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6798 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6799 				      buf_size,
6800 				      rx_q->queue_index);
6801 	} else {
6802 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6803 				      priv->dma_conf.dma_buf_sz,
6804 				      rx_q->queue_index);
6805 	}
6806 
6807 	stmmac_start_rx_dma(priv, queue);
6808 
6809 	spin_lock_irqsave(&ch->lock, flags);
6810 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6811 	spin_unlock_irqrestore(&ch->lock, flags);
6812 }
6813 
6814 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6815 {
6816 	struct stmmac_channel *ch = &priv->channel[queue];
6817 	unsigned long flags;
6818 
6819 	spin_lock_irqsave(&ch->lock, flags);
6820 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6821 	spin_unlock_irqrestore(&ch->lock, flags);
6822 
6823 	stmmac_stop_tx_dma(priv, queue);
6824 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6825 }
6826 
6827 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6828 {
6829 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6830 	struct stmmac_channel *ch = &priv->channel[queue];
6831 	unsigned long flags;
6832 	int ret;
6833 
6834 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6835 	if (ret) {
6836 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6837 		return;
6838 	}
6839 
6840 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6841 	if (ret) {
6842 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6843 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6844 		return;
6845 	}
6846 
6847 	stmmac_reset_tx_queue(priv, queue);
6848 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6849 
6850 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6851 			    tx_q->dma_tx_phy, tx_q->queue_index);
6852 
6853 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6854 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6855 
6856 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6857 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6858 			       tx_q->tx_tail_addr, tx_q->queue_index);
6859 
6860 	stmmac_start_tx_dma(priv, queue);
6861 
6862 	spin_lock_irqsave(&ch->lock, flags);
6863 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6864 	spin_unlock_irqrestore(&ch->lock, flags);
6865 }
6866 
6867 void stmmac_xdp_release(struct net_device *dev)
6868 {
6869 	struct stmmac_priv *priv = netdev_priv(dev);
6870 	u32 chan;
6871 
6872 	/* Ensure tx function is not running */
6873 	netif_tx_disable(dev);
6874 
6875 	/* Disable NAPI process */
6876 	stmmac_disable_all_queues(priv);
6877 
6878 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6879 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6880 
6881 	/* Free the IRQ lines */
6882 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6883 
6884 	/* Stop TX/RX DMA channels */
6885 	stmmac_stop_all_dma(priv);
6886 
6887 	/* Release and free the Rx/Tx resources */
6888 	free_dma_desc_resources(priv, &priv->dma_conf);
6889 
6890 	/* Disable the MAC Rx/Tx */
6891 	stmmac_mac_set(priv, priv->ioaddr, false);
6892 
6893 	/* set trans_start so we don't get spurious
6894 	 * watchdogs during reset
6895 	 */
6896 	netif_trans_update(dev);
6897 	netif_carrier_off(dev);
6898 }
6899 
6900 int stmmac_xdp_open(struct net_device *dev)
6901 {
6902 	struct stmmac_priv *priv = netdev_priv(dev);
6903 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6904 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6905 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6906 	struct stmmac_rx_queue *rx_q;
6907 	struct stmmac_tx_queue *tx_q;
6908 	u32 buf_size;
6909 	bool sph_en;
6910 	u32 chan;
6911 	int ret;
6912 
6913 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6914 	if (ret < 0) {
6915 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6916 			   __func__);
6917 		goto dma_desc_error;
6918 	}
6919 
6920 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6921 	if (ret < 0) {
6922 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6923 			   __func__);
6924 		goto init_error;
6925 	}
6926 
6927 	stmmac_reset_queues_param(priv);
6928 
6929 	/* DMA CSR Channel configuration */
6930 	for (chan = 0; chan < dma_csr_ch; chan++) {
6931 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6932 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6933 	}
6934 
6935 	/* Adjust Split header */
6936 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6937 
6938 	/* DMA RX Channel Configuration */
6939 	for (chan = 0; chan < rx_cnt; chan++) {
6940 		rx_q = &priv->dma_conf.rx_queue[chan];
6941 
6942 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6943 				    rx_q->dma_rx_phy, chan);
6944 
6945 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6946 				     (rx_q->buf_alloc_num *
6947 				      sizeof(struct dma_desc));
6948 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6949 				       rx_q->rx_tail_addr, chan);
6950 
6951 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6952 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6953 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6954 					      buf_size,
6955 					      rx_q->queue_index);
6956 		} else {
6957 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6958 					      priv->dma_conf.dma_buf_sz,
6959 					      rx_q->queue_index);
6960 		}
6961 
6962 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6963 	}
6964 
6965 	/* DMA TX Channel Configuration */
6966 	for (chan = 0; chan < tx_cnt; chan++) {
6967 		tx_q = &priv->dma_conf.tx_queue[chan];
6968 
6969 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6970 				    tx_q->dma_tx_phy, chan);
6971 
6972 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6973 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6974 				       tx_q->tx_tail_addr, chan);
6975 
6976 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6977 	}
6978 
6979 	/* Enable the MAC Rx/Tx */
6980 	stmmac_mac_set(priv, priv->ioaddr, true);
6981 
6982 	/* Start Rx & Tx DMA Channels */
6983 	stmmac_start_all_dma(priv);
6984 
6985 	ret = stmmac_request_irq(dev);
6986 	if (ret)
6987 		goto irq_error;
6988 
6989 	/* Enable NAPI process*/
6990 	stmmac_enable_all_queues(priv);
6991 	netif_carrier_on(dev);
6992 	netif_tx_start_all_queues(dev);
6993 	stmmac_enable_all_dma_irq(priv);
6994 
6995 	return 0;
6996 
6997 irq_error:
6998 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6999 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7000 
7001 init_error:
7002 	free_dma_desc_resources(priv, &priv->dma_conf);
7003 dma_desc_error:
7004 	return ret;
7005 }
7006 
7007 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7008 {
7009 	struct stmmac_priv *priv = netdev_priv(dev);
7010 	struct stmmac_rx_queue *rx_q;
7011 	struct stmmac_tx_queue *tx_q;
7012 	struct stmmac_channel *ch;
7013 
7014 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7015 	    !netif_carrier_ok(priv->dev))
7016 		return -ENETDOWN;
7017 
7018 	if (!stmmac_xdp_is_enabled(priv))
7019 		return -EINVAL;
7020 
7021 	if (queue >= priv->plat->rx_queues_to_use ||
7022 	    queue >= priv->plat->tx_queues_to_use)
7023 		return -EINVAL;
7024 
7025 	rx_q = &priv->dma_conf.rx_queue[queue];
7026 	tx_q = &priv->dma_conf.tx_queue[queue];
7027 	ch = &priv->channel[queue];
7028 
7029 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7030 		return -EINVAL;
7031 
7032 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7033 		/* EQoS does not have per-DMA channel SW interrupt,
7034 		 * so we schedule RX Napi straight-away.
7035 		 */
7036 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7037 			__napi_schedule(&ch->rxtx_napi);
7038 	}
7039 
7040 	return 0;
7041 }
7042 
7043 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7044 {
7045 	struct stmmac_priv *priv = netdev_priv(dev);
7046 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7047 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7048 	unsigned int start;
7049 	int q;
7050 
7051 	for (q = 0; q < tx_cnt; q++) {
7052 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7053 		u64 tx_packets;
7054 		u64 tx_bytes;
7055 
7056 		do {
7057 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7058 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7059 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7060 		do {
7061 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7062 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7063 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7064 
7065 		stats->tx_packets += tx_packets;
7066 		stats->tx_bytes += tx_bytes;
7067 	}
7068 
7069 	for (q = 0; q < rx_cnt; q++) {
7070 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7071 		u64 rx_packets;
7072 		u64 rx_bytes;
7073 
7074 		do {
7075 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7076 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7077 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7078 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7079 
7080 		stats->rx_packets += rx_packets;
7081 		stats->rx_bytes += rx_bytes;
7082 	}
7083 
7084 	stats->rx_dropped = priv->xstats.rx_dropped;
7085 	stats->rx_errors = priv->xstats.rx_errors;
7086 	stats->tx_dropped = priv->xstats.tx_dropped;
7087 	stats->tx_errors = priv->xstats.tx_errors;
7088 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7089 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7090 	stats->rx_length_errors = priv->xstats.rx_length;
7091 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7092 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7093 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7094 }
7095 
7096 static const struct net_device_ops stmmac_netdev_ops = {
7097 	.ndo_open = stmmac_open,
7098 	.ndo_start_xmit = stmmac_xmit,
7099 	.ndo_stop = stmmac_release,
7100 	.ndo_change_mtu = stmmac_change_mtu,
7101 	.ndo_fix_features = stmmac_fix_features,
7102 	.ndo_set_features = stmmac_set_features,
7103 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7104 	.ndo_tx_timeout = stmmac_tx_timeout,
7105 	.ndo_eth_ioctl = stmmac_ioctl,
7106 	.ndo_get_stats64 = stmmac_get_stats64,
7107 	.ndo_setup_tc = stmmac_setup_tc,
7108 	.ndo_select_queue = stmmac_select_queue,
7109 	.ndo_set_mac_address = stmmac_set_mac_address,
7110 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7111 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7112 	.ndo_bpf = stmmac_bpf,
7113 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7114 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7115 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7116 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7117 };
7118 
7119 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7120 {
7121 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7122 		return;
7123 	if (test_bit(STMMAC_DOWN, &priv->state))
7124 		return;
7125 
7126 	netdev_err(priv->dev, "Reset adapter.\n");
7127 
7128 	rtnl_lock();
7129 	netif_trans_update(priv->dev);
7130 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7131 		usleep_range(1000, 2000);
7132 
7133 	set_bit(STMMAC_DOWN, &priv->state);
7134 	dev_close(priv->dev);
7135 	dev_open(priv->dev, NULL);
7136 	clear_bit(STMMAC_DOWN, &priv->state);
7137 	clear_bit(STMMAC_RESETING, &priv->state);
7138 	rtnl_unlock();
7139 }
7140 
7141 static void stmmac_service_task(struct work_struct *work)
7142 {
7143 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7144 			service_task);
7145 
7146 	stmmac_reset_subtask(priv);
7147 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7148 }
7149 
7150 /**
7151  *  stmmac_hw_init - Init the MAC device
7152  *  @priv: driver private structure
7153  *  Description: this function is to configure the MAC device according to
7154  *  some platform parameters or the HW capability register. It prepares the
7155  *  driver to use either ring or chain modes and to setup either enhanced or
7156  *  normal descriptors.
7157  */
7158 static int stmmac_hw_init(struct stmmac_priv *priv)
7159 {
7160 	int ret;
7161 
7162 	/* dwmac-sun8i only work in chain mode */
7163 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7164 		chain_mode = 1;
7165 	priv->chain_mode = chain_mode;
7166 
7167 	/* Initialize HW Interface */
7168 	ret = stmmac_hwif_init(priv);
7169 	if (ret)
7170 		return ret;
7171 
7172 	/* Get the HW capability (new GMAC newer than 3.50a) */
7173 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7174 	if (priv->hw_cap_support) {
7175 		dev_info(priv->device, "DMA HW capability register supported\n");
7176 
7177 		/* We can override some gmac/dma configuration fields: e.g.
7178 		 * enh_desc, tx_coe (e.g. that are passed through the
7179 		 * platform) with the values from the HW capability
7180 		 * register (if supported).
7181 		 */
7182 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7183 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7184 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7185 		if (priv->dma_cap.hash_tb_sz) {
7186 			priv->hw->multicast_filter_bins =
7187 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7188 			priv->hw->mcast_bits_log2 =
7189 					ilog2(priv->hw->multicast_filter_bins);
7190 		}
7191 
7192 		/* TXCOE doesn't work in thresh DMA mode */
7193 		if (priv->plat->force_thresh_dma_mode)
7194 			priv->plat->tx_coe = 0;
7195 		else
7196 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7197 
7198 		/* In case of GMAC4 rx_coe is from HW cap register. */
7199 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7200 
7201 		if (priv->dma_cap.rx_coe_type2)
7202 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7203 		else if (priv->dma_cap.rx_coe_type1)
7204 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7205 
7206 	} else {
7207 		dev_info(priv->device, "No HW DMA feature register supported\n");
7208 	}
7209 
7210 	if (priv->plat->rx_coe) {
7211 		priv->hw->rx_csum = priv->plat->rx_coe;
7212 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7213 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7214 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7215 	}
7216 	if (priv->plat->tx_coe)
7217 		dev_info(priv->device, "TX Checksum insertion supported\n");
7218 
7219 	if (priv->plat->pmt) {
7220 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7221 		device_set_wakeup_capable(priv->device, 1);
7222 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7223 	}
7224 
7225 	if (priv->dma_cap.tsoen)
7226 		dev_info(priv->device, "TSO supported\n");
7227 
7228 	if (priv->dma_cap.number_rx_queues &&
7229 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7230 		dev_warn(priv->device,
7231 			 "Number of Rx queues (%u) exceeds dma capability\n",
7232 			 priv->plat->rx_queues_to_use);
7233 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7234 	}
7235 	if (priv->dma_cap.number_tx_queues &&
7236 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7237 		dev_warn(priv->device,
7238 			 "Number of Tx queues (%u) exceeds dma capability\n",
7239 			 priv->plat->tx_queues_to_use);
7240 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7241 	}
7242 
7243 	if (priv->dma_cap.rx_fifo_size &&
7244 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7245 		dev_warn(priv->device,
7246 			 "Rx FIFO size (%u) exceeds dma capability\n",
7247 			 priv->plat->rx_fifo_size);
7248 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7249 	}
7250 	if (priv->dma_cap.tx_fifo_size &&
7251 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7252 		dev_warn(priv->device,
7253 			 "Tx FIFO size (%u) exceeds dma capability\n",
7254 			 priv->plat->tx_fifo_size);
7255 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7256 	}
7257 
7258 	priv->hw->vlan_fail_q_en =
7259 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7260 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7261 
7262 	/* Run HW quirks, if any */
7263 	if (priv->hwif_quirks) {
7264 		ret = priv->hwif_quirks(priv);
7265 		if (ret)
7266 			return ret;
7267 	}
7268 
7269 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7270 	 * In some case, for example on bugged HW this feature
7271 	 * has to be disable and this can be done by passing the
7272 	 * riwt_off field from the platform.
7273 	 */
7274 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7275 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7276 		priv->use_riwt = 1;
7277 		dev_info(priv->device,
7278 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7279 	}
7280 
7281 	return 0;
7282 }
7283 
7284 static void stmmac_napi_add(struct net_device *dev)
7285 {
7286 	struct stmmac_priv *priv = netdev_priv(dev);
7287 	u32 queue, maxq;
7288 
7289 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7290 
7291 	for (queue = 0; queue < maxq; queue++) {
7292 		struct stmmac_channel *ch = &priv->channel[queue];
7293 
7294 		ch->priv_data = priv;
7295 		ch->index = queue;
7296 		spin_lock_init(&ch->lock);
7297 
7298 		if (queue < priv->plat->rx_queues_to_use) {
7299 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7300 		}
7301 		if (queue < priv->plat->tx_queues_to_use) {
7302 			netif_napi_add_tx(dev, &ch->tx_napi,
7303 					  stmmac_napi_poll_tx);
7304 		}
7305 		if (queue < priv->plat->rx_queues_to_use &&
7306 		    queue < priv->plat->tx_queues_to_use) {
7307 			netif_napi_add(dev, &ch->rxtx_napi,
7308 				       stmmac_napi_poll_rxtx);
7309 		}
7310 	}
7311 }
7312 
7313 static void stmmac_napi_del(struct net_device *dev)
7314 {
7315 	struct stmmac_priv *priv = netdev_priv(dev);
7316 	u32 queue, maxq;
7317 
7318 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7319 
7320 	for (queue = 0; queue < maxq; queue++) {
7321 		struct stmmac_channel *ch = &priv->channel[queue];
7322 
7323 		if (queue < priv->plat->rx_queues_to_use)
7324 			netif_napi_del(&ch->rx_napi);
7325 		if (queue < priv->plat->tx_queues_to_use)
7326 			netif_napi_del(&ch->tx_napi);
7327 		if (queue < priv->plat->rx_queues_to_use &&
7328 		    queue < priv->plat->tx_queues_to_use) {
7329 			netif_napi_del(&ch->rxtx_napi);
7330 		}
7331 	}
7332 }
7333 
7334 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7335 {
7336 	struct stmmac_priv *priv = netdev_priv(dev);
7337 	int ret = 0, i;
7338 
7339 	if (netif_running(dev))
7340 		stmmac_release(dev);
7341 
7342 	stmmac_napi_del(dev);
7343 
7344 	priv->plat->rx_queues_to_use = rx_cnt;
7345 	priv->plat->tx_queues_to_use = tx_cnt;
7346 	if (!netif_is_rxfh_configured(dev))
7347 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7348 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7349 									rx_cnt);
7350 
7351 	stmmac_napi_add(dev);
7352 
7353 	if (netif_running(dev))
7354 		ret = stmmac_open(dev);
7355 
7356 	return ret;
7357 }
7358 
7359 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7360 {
7361 	struct stmmac_priv *priv = netdev_priv(dev);
7362 	int ret = 0;
7363 
7364 	if (netif_running(dev))
7365 		stmmac_release(dev);
7366 
7367 	priv->dma_conf.dma_rx_size = rx_size;
7368 	priv->dma_conf.dma_tx_size = tx_size;
7369 
7370 	if (netif_running(dev))
7371 		ret = stmmac_open(dev);
7372 
7373 	return ret;
7374 }
7375 
7376 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7377 {
7378 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7379 	struct dma_desc *desc_contains_ts = ctx->desc;
7380 	struct stmmac_priv *priv = ctx->priv;
7381 	struct dma_desc *ndesc = ctx->ndesc;
7382 	struct dma_desc *desc = ctx->desc;
7383 	u64 ns = 0;
7384 
7385 	if (!priv->hwts_rx_en)
7386 		return -ENODATA;
7387 
7388 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7389 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7390 		desc_contains_ts = ndesc;
7391 
7392 	/* Check if timestamp is available */
7393 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7394 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7395 		ns -= priv->plat->cdc_error_adj;
7396 		*timestamp = ns_to_ktime(ns);
7397 		return 0;
7398 	}
7399 
7400 	return -ENODATA;
7401 }
7402 
7403 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7404 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7405 };
7406 
7407 /**
7408  * stmmac_dvr_probe
7409  * @device: device pointer
7410  * @plat_dat: platform data pointer
7411  * @res: stmmac resource pointer
7412  * Description: this is the main probe function used to
7413  * call the alloc_etherdev, allocate the priv structure.
7414  * Return:
7415  * returns 0 on success, otherwise errno.
7416  */
7417 int stmmac_dvr_probe(struct device *device,
7418 		     struct plat_stmmacenet_data *plat_dat,
7419 		     struct stmmac_resources *res)
7420 {
7421 	struct net_device *ndev = NULL;
7422 	struct stmmac_priv *priv;
7423 	u32 rxq;
7424 	int i, ret = 0;
7425 
7426 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7427 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7428 	if (!ndev)
7429 		return -ENOMEM;
7430 
7431 	SET_NETDEV_DEV(ndev, device);
7432 
7433 	priv = netdev_priv(ndev);
7434 	priv->device = device;
7435 	priv->dev = ndev;
7436 
7437 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7438 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7439 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7440 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7441 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7442 	}
7443 
7444 	priv->xstats.pcpu_stats =
7445 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7446 	if (!priv->xstats.pcpu_stats)
7447 		return -ENOMEM;
7448 
7449 	stmmac_set_ethtool_ops(ndev);
7450 	priv->pause_time = pause;
7451 	priv->plat = plat_dat;
7452 	priv->ioaddr = res->addr;
7453 	priv->dev->base_addr = (unsigned long)res->addr;
7454 	priv->plat->dma_cfg->multi_msi_en =
7455 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7456 
7457 	priv->dev->irq = res->irq;
7458 	priv->wol_irq = res->wol_irq;
7459 	priv->lpi_irq = res->lpi_irq;
7460 	priv->sfty_irq = res->sfty_irq;
7461 	priv->sfty_ce_irq = res->sfty_ce_irq;
7462 	priv->sfty_ue_irq = res->sfty_ue_irq;
7463 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7464 		priv->rx_irq[i] = res->rx_irq[i];
7465 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7466 		priv->tx_irq[i] = res->tx_irq[i];
7467 
7468 	if (!is_zero_ether_addr(res->mac))
7469 		eth_hw_addr_set(priv->dev, res->mac);
7470 
7471 	dev_set_drvdata(device, priv->dev);
7472 
7473 	/* Verify driver arguments */
7474 	stmmac_verify_args();
7475 
7476 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7477 	if (!priv->af_xdp_zc_qps)
7478 		return -ENOMEM;
7479 
7480 	/* Allocate workqueue */
7481 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7482 	if (!priv->wq) {
7483 		dev_err(priv->device, "failed to create workqueue\n");
7484 		ret = -ENOMEM;
7485 		goto error_wq_init;
7486 	}
7487 
7488 	INIT_WORK(&priv->service_task, stmmac_service_task);
7489 
7490 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7491 
7492 	/* Override with kernel parameters if supplied XXX CRS XXX
7493 	 * this needs to have multiple instances
7494 	 */
7495 	if ((phyaddr >= 0) && (phyaddr <= 31))
7496 		priv->plat->phy_addr = phyaddr;
7497 
7498 	if (priv->plat->stmmac_rst) {
7499 		ret = reset_control_assert(priv->plat->stmmac_rst);
7500 		reset_control_deassert(priv->plat->stmmac_rst);
7501 		/* Some reset controllers have only reset callback instead of
7502 		 * assert + deassert callbacks pair.
7503 		 */
7504 		if (ret == -ENOTSUPP)
7505 			reset_control_reset(priv->plat->stmmac_rst);
7506 	}
7507 
7508 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7509 	if (ret == -ENOTSUPP)
7510 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7511 			ERR_PTR(ret));
7512 
7513 	/* Wait a bit for the reset to take effect */
7514 	udelay(10);
7515 
7516 	/* Init MAC and get the capabilities */
7517 	ret = stmmac_hw_init(priv);
7518 	if (ret)
7519 		goto error_hw_init;
7520 
7521 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7522 	 */
7523 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7524 		priv->plat->dma_cfg->dche = false;
7525 
7526 	stmmac_check_ether_addr(priv);
7527 
7528 	ndev->netdev_ops = &stmmac_netdev_ops;
7529 
7530 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7531 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7532 
7533 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7534 			    NETIF_F_RXCSUM;
7535 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7536 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7537 
7538 	ret = stmmac_tc_init(priv, priv);
7539 	if (!ret) {
7540 		ndev->hw_features |= NETIF_F_HW_TC;
7541 	}
7542 
7543 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7544 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7545 		if (priv->plat->has_gmac4)
7546 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7547 		priv->tso = true;
7548 		dev_info(priv->device, "TSO feature enabled\n");
7549 	}
7550 
7551 	if (priv->dma_cap.sphen &&
7552 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7553 		ndev->hw_features |= NETIF_F_GRO;
7554 		priv->sph_cap = true;
7555 		priv->sph = priv->sph_cap;
7556 		dev_info(priv->device, "SPH feature enabled\n");
7557 	}
7558 
7559 	/* Ideally our host DMA address width is the same as for the
7560 	 * device. However, it may differ and then we have to use our
7561 	 * host DMA width for allocation and the device DMA width for
7562 	 * register handling.
7563 	 */
7564 	if (priv->plat->host_dma_width)
7565 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7566 	else
7567 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7568 
7569 	if (priv->dma_cap.host_dma_width) {
7570 		ret = dma_set_mask_and_coherent(device,
7571 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7572 		if (!ret) {
7573 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7574 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7575 
7576 			/*
7577 			 * If more than 32 bits can be addressed, make sure to
7578 			 * enable enhanced addressing mode.
7579 			 */
7580 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7581 				priv->plat->dma_cfg->eame = true;
7582 		} else {
7583 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7584 			if (ret) {
7585 				dev_err(priv->device, "Failed to set DMA Mask\n");
7586 				goto error_hw_init;
7587 			}
7588 
7589 			priv->dma_cap.host_dma_width = 32;
7590 		}
7591 	}
7592 
7593 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7594 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7595 #ifdef STMMAC_VLAN_TAG_USED
7596 	/* Both mac100 and gmac support receive VLAN tag detection */
7597 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7598 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7599 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7600 		priv->hw->hw_vlan_en = true;
7601 	}
7602 	if (priv->dma_cap.vlhash) {
7603 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7604 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7605 	}
7606 	if (priv->dma_cap.vlins) {
7607 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7608 		if (priv->dma_cap.dvlan)
7609 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7610 	}
7611 #endif
7612 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7613 
7614 	priv->xstats.threshold = tc;
7615 
7616 	/* Initialize RSS */
7617 	rxq = priv->plat->rx_queues_to_use;
7618 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7619 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7620 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7621 
7622 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7623 		ndev->features |= NETIF_F_RXHASH;
7624 
7625 	ndev->vlan_features |= ndev->features;
7626 
7627 	/* MTU range: 46 - hw-specific max */
7628 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7629 	if (priv->plat->has_xgmac)
7630 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7631 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7632 		ndev->max_mtu = JUMBO_LEN;
7633 	else
7634 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7635 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7636 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7637 	 */
7638 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7639 	    (priv->plat->maxmtu >= ndev->min_mtu))
7640 		ndev->max_mtu = priv->plat->maxmtu;
7641 	else if (priv->plat->maxmtu < ndev->min_mtu)
7642 		dev_warn(priv->device,
7643 			 "%s: warning: maxmtu having invalid value (%d)\n",
7644 			 __func__, priv->plat->maxmtu);
7645 
7646 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7647 
7648 	/* Setup channels NAPI */
7649 	stmmac_napi_add(ndev);
7650 
7651 	mutex_init(&priv->lock);
7652 
7653 	stmmac_fpe_init(priv);
7654 
7655 	stmmac_check_pcs_mode(priv);
7656 
7657 	pm_runtime_get_noresume(device);
7658 	pm_runtime_set_active(device);
7659 	if (!pm_runtime_enabled(device))
7660 		pm_runtime_enable(device);
7661 
7662 	ret = stmmac_mdio_register(ndev);
7663 	if (ret < 0) {
7664 		dev_err_probe(priv->device, ret,
7665 			      "MDIO bus (id: %d) registration failed\n",
7666 			      priv->plat->bus_id);
7667 		goto error_mdio_register;
7668 	}
7669 
7670 	ret = stmmac_pcs_setup(ndev);
7671 	if (ret)
7672 		goto error_pcs_setup;
7673 
7674 	ret = stmmac_phy_setup(priv);
7675 	if (ret) {
7676 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7677 		goto error_phy_setup;
7678 	}
7679 
7680 	ret = register_netdev(ndev);
7681 	if (ret) {
7682 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7683 			__func__, ret);
7684 		goto error_netdev_register;
7685 	}
7686 
7687 #ifdef CONFIG_DEBUG_FS
7688 	stmmac_init_fs(ndev);
7689 #endif
7690 
7691 	if (priv->plat->dump_debug_regs)
7692 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7693 
7694 	/* Let pm_runtime_put() disable the clocks.
7695 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7696 	 */
7697 	pm_runtime_put(device);
7698 
7699 	return ret;
7700 
7701 error_netdev_register:
7702 	phylink_destroy(priv->phylink);
7703 error_phy_setup:
7704 	stmmac_pcs_clean(ndev);
7705 error_pcs_setup:
7706 	stmmac_mdio_unregister(ndev);
7707 error_mdio_register:
7708 	stmmac_napi_del(ndev);
7709 error_hw_init:
7710 	destroy_workqueue(priv->wq);
7711 error_wq_init:
7712 	bitmap_free(priv->af_xdp_zc_qps);
7713 
7714 	return ret;
7715 }
7716 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7717 
7718 /**
7719  * stmmac_dvr_remove
7720  * @dev: device pointer
7721  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7722  * changes the link status, releases the DMA descriptor rings.
7723  */
7724 void stmmac_dvr_remove(struct device *dev)
7725 {
7726 	struct net_device *ndev = dev_get_drvdata(dev);
7727 	struct stmmac_priv *priv = netdev_priv(ndev);
7728 
7729 	netdev_info(priv->dev, "%s: removing driver", __func__);
7730 
7731 	pm_runtime_get_sync(dev);
7732 
7733 	unregister_netdev(ndev);
7734 
7735 #ifdef CONFIG_DEBUG_FS
7736 	stmmac_exit_fs(ndev);
7737 #endif
7738 	phylink_destroy(priv->phylink);
7739 	if (priv->plat->stmmac_rst)
7740 		reset_control_assert(priv->plat->stmmac_rst);
7741 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7742 
7743 	stmmac_pcs_clean(ndev);
7744 	stmmac_mdio_unregister(ndev);
7745 
7746 	destroy_workqueue(priv->wq);
7747 	mutex_destroy(&priv->lock);
7748 	bitmap_free(priv->af_xdp_zc_qps);
7749 
7750 	pm_runtime_disable(dev);
7751 	pm_runtime_put_noidle(dev);
7752 }
7753 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7754 
7755 /**
7756  * stmmac_suspend - suspend callback
7757  * @dev: device pointer
7758  * Description: this is the function to suspend the device and it is called
7759  * by the platform driver to stop the network queue, release the resources,
7760  * program the PMT register (for WoL), clean and release driver resources.
7761  */
7762 int stmmac_suspend(struct device *dev)
7763 {
7764 	struct net_device *ndev = dev_get_drvdata(dev);
7765 	struct stmmac_priv *priv = netdev_priv(ndev);
7766 	u32 chan;
7767 
7768 	if (!ndev || !netif_running(ndev))
7769 		return 0;
7770 
7771 	mutex_lock(&priv->lock);
7772 
7773 	netif_device_detach(ndev);
7774 
7775 	stmmac_disable_all_queues(priv);
7776 
7777 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7778 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7779 
7780 	if (priv->eee_sw_timer_en) {
7781 		priv->tx_path_in_lpi_mode = false;
7782 		timer_delete_sync(&priv->eee_ctrl_timer);
7783 	}
7784 
7785 	/* Stop TX/RX DMA */
7786 	stmmac_stop_all_dma(priv);
7787 
7788 	if (priv->plat->serdes_powerdown)
7789 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7790 
7791 	/* Enable Power down mode by programming the PMT regs */
7792 	if (stmmac_wol_enabled_mac(priv)) {
7793 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7794 		priv->irq_wake = 1;
7795 	} else {
7796 		stmmac_mac_set(priv, priv->ioaddr, false);
7797 		pinctrl_pm_select_sleep_state(priv->device);
7798 	}
7799 
7800 	mutex_unlock(&priv->lock);
7801 
7802 	rtnl_lock();
7803 	if (stmmac_wol_enabled_phy(priv))
7804 		phylink_speed_down(priv->phylink, false);
7805 
7806 	phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
7807 	rtnl_unlock();
7808 
7809 	if (stmmac_fpe_supported(priv))
7810 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7811 
7812 	if (priv->plat->suspend)
7813 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
7814 
7815 	return 0;
7816 }
7817 EXPORT_SYMBOL_GPL(stmmac_suspend);
7818 
7819 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7820 {
7821 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7822 
7823 	rx_q->cur_rx = 0;
7824 	rx_q->dirty_rx = 0;
7825 }
7826 
7827 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7828 {
7829 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7830 
7831 	tx_q->cur_tx = 0;
7832 	tx_q->dirty_tx = 0;
7833 	tx_q->mss = 0;
7834 
7835 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7836 }
7837 
7838 /**
7839  * stmmac_reset_queues_param - reset queue parameters
7840  * @priv: device pointer
7841  */
7842 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7843 {
7844 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7845 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7846 	u32 queue;
7847 
7848 	for (queue = 0; queue < rx_cnt; queue++)
7849 		stmmac_reset_rx_queue(priv, queue);
7850 
7851 	for (queue = 0; queue < tx_cnt; queue++)
7852 		stmmac_reset_tx_queue(priv, queue);
7853 }
7854 
7855 /**
7856  * stmmac_resume - resume callback
7857  * @dev: device pointer
7858  * Description: when resume this function is invoked to setup the DMA and CORE
7859  * in a usable state.
7860  */
7861 int stmmac_resume(struct device *dev)
7862 {
7863 	struct net_device *ndev = dev_get_drvdata(dev);
7864 	struct stmmac_priv *priv = netdev_priv(ndev);
7865 	int ret;
7866 
7867 	if (priv->plat->resume) {
7868 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
7869 		if (ret)
7870 			return ret;
7871 	}
7872 
7873 	if (!netif_running(ndev))
7874 		return 0;
7875 
7876 	/* Power Down bit, into the PM register, is cleared
7877 	 * automatically as soon as a magic packet or a Wake-up frame
7878 	 * is received. Anyway, it's better to manually clear
7879 	 * this bit because it can generate problems while resuming
7880 	 * from another devices (e.g. serial console).
7881 	 */
7882 	if (stmmac_wol_enabled_mac(priv)) {
7883 		mutex_lock(&priv->lock);
7884 		stmmac_pmt(priv, priv->hw, 0);
7885 		mutex_unlock(&priv->lock);
7886 		priv->irq_wake = 0;
7887 	} else {
7888 		pinctrl_pm_select_default_state(priv->device);
7889 		/* reset the phy so that it's ready */
7890 		if (priv->mii)
7891 			stmmac_mdio_reset(priv->mii);
7892 	}
7893 
7894 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7895 	    priv->plat->serdes_powerup) {
7896 		ret = priv->plat->serdes_powerup(ndev,
7897 						 priv->plat->bsp_priv);
7898 
7899 		if (ret < 0)
7900 			return ret;
7901 	}
7902 
7903 	rtnl_lock();
7904 
7905 	/* Prepare the PHY to resume, ensuring that its clocks which are
7906 	 * necessary for the MAC DMA reset to complete are running
7907 	 */
7908 	phylink_prepare_resume(priv->phylink);
7909 
7910 	mutex_lock(&priv->lock);
7911 
7912 	stmmac_reset_queues_param(priv);
7913 
7914 	stmmac_free_tx_skbufs(priv);
7915 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7916 
7917 	ret = stmmac_hw_setup(ndev);
7918 	if (ret < 0) {
7919 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
7920 		mutex_unlock(&priv->lock);
7921 		rtnl_unlock();
7922 		return ret;
7923 	}
7924 
7925 	stmmac_init_timestamping(priv);
7926 
7927 	stmmac_init_coalesce(priv);
7928 	phylink_rx_clk_stop_block(priv->phylink);
7929 	stmmac_set_rx_mode(ndev);
7930 
7931 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7932 	phylink_rx_clk_stop_unblock(priv->phylink);
7933 
7934 	stmmac_enable_all_queues(priv);
7935 	stmmac_enable_all_dma_irq(priv);
7936 
7937 	mutex_unlock(&priv->lock);
7938 
7939 	/* phylink_resume() must be called after the hardware has been
7940 	 * initialised because it may bring the link up immediately in a
7941 	 * workqueue thread, which will race with initialisation.
7942 	 */
7943 	phylink_resume(priv->phylink);
7944 	if (stmmac_wol_enabled_phy(priv))
7945 		phylink_speed_up(priv->phylink);
7946 
7947 	rtnl_unlock();
7948 
7949 	netif_device_attach(ndev);
7950 
7951 	return 0;
7952 }
7953 EXPORT_SYMBOL_GPL(stmmac_resume);
7954 
7955 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
7956 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
7957 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
7958 
7959 #ifndef MODULE
7960 static int __init stmmac_cmdline_opt(char *str)
7961 {
7962 	char *opt;
7963 
7964 	if (!str || !*str)
7965 		return 1;
7966 	while ((opt = strsep(&str, ",")) != NULL) {
7967 		if (!strncmp(opt, "debug:", 6)) {
7968 			if (kstrtoint(opt + 6, 0, &debug))
7969 				goto err;
7970 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7971 			if (kstrtoint(opt + 8, 0, &phyaddr))
7972 				goto err;
7973 		} else if (!strncmp(opt, "tc:", 3)) {
7974 			if (kstrtoint(opt + 3, 0, &tc))
7975 				goto err;
7976 		} else if (!strncmp(opt, "watchdog:", 9)) {
7977 			if (kstrtoint(opt + 9, 0, &watchdog))
7978 				goto err;
7979 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7980 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7981 				goto err;
7982 		} else if (!strncmp(opt, "pause:", 6)) {
7983 			if (kstrtoint(opt + 6, 0, &pause))
7984 				goto err;
7985 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7986 			if (kstrtoint(opt + 10, 0, &eee_timer))
7987 				goto err;
7988 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7989 			if (kstrtoint(opt + 11, 0, &chain_mode))
7990 				goto err;
7991 		}
7992 	}
7993 	return 1;
7994 
7995 err:
7996 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7997 	return 1;
7998 }
7999 
8000 __setup("stmmaceth=", stmmac_cmdline_opt);
8001 #endif /* MODULE */
8002 
8003 static int __init stmmac_init(void)
8004 {
8005 #ifdef CONFIG_DEBUG_FS
8006 	/* Create debugfs main directory if it doesn't exist yet */
8007 	if (!stmmac_fs_dir)
8008 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8009 	register_netdevice_notifier(&stmmac_notifier);
8010 #endif
8011 
8012 	return 0;
8013 }
8014 
8015 static void __exit stmmac_exit(void)
8016 {
8017 #ifdef CONFIG_DEBUG_FS
8018 	unregister_netdevice_notifier(&stmmac_notifier);
8019 	debugfs_remove_recursive(stmmac_fs_dir);
8020 #endif
8021 }
8022 
8023 module_init(stmmac_init)
8024 module_exit(stmmac_exit)
8025 
8026 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8027 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8028 MODULE_LICENSE("GPL");
8029