xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision cc2f08129925b437bf28f7f7822f20dac083a87c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/page_pool/helpers.h>
44 #include <net/pkt_cls.h>
45 #include <net/xdp_sock_drv.h>
46 #include "stmmac_ptp.h"
47 #include "stmmac_fpe.h"
48 #include "stmmac.h"
49 #include "stmmac_xdp.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 #include "dwxgmac2.h"
54 #include "hwif.h"
55 
56 /* As long as the interface is active, we keep the timestamping counter enabled
57  * with fine resolution and binary rollover. This avoid non-monotonic behavior
58  * (clock jumps) when changing timestamping settings at runtime.
59  */
60 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 				 PTP_TCR_TSCTRLSSR)
62 
63 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
64 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
65 
66 /* Module parameters */
67 #define TX_TIMEO	5000
68 static int watchdog = TX_TIMEO;
69 module_param(watchdog, int, 0644);
70 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
71 
72 static int debug = -1;
73 module_param(debug, int, 0644);
74 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75 
76 static int phyaddr = -1;
77 module_param(phyaddr, int, 0444);
78 MODULE_PARM_DESC(phyaddr, "Physical device address");
79 
80 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = 0xdead;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 /* This is unused */
106 #define	DEFAULT_BUFSIZE	1536
107 static int buf_sz = DEFAULT_BUFSIZE;
108 module_param(buf_sz, int, 0644);
109 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, uint, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 /**
151  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
152  * @bsp_priv: BSP private data structure (unused)
153  * @clk_tx_i: the transmit clock
154  * @interface: the selected interface mode
155  * @speed: the speed that the MAC will be operating at
156  *
157  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
158  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
159  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
160  * the plat_data->set_clk_tx_rate method directly, call it via their own
161  * implementation, or implement their own method should they have more
162  * complex requirements. It is intended to only be used in this method.
163  *
164  * plat_data->clk_tx_i must be filled in.
165  */
166 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
167 			   phy_interface_t interface, int speed)
168 {
169 	long rate = rgmii_clock(speed);
170 
171 	/* Silently ignore unsupported speeds as rgmii_clock() only
172 	 * supports 10, 100 and 1000Mbps. We do not want to spit
173 	 * errors for 2500 and higher speeds here.
174 	 */
175 	if (rate < 0)
176 		return 0;
177 
178 	return clk_set_rate(clk_tx_i, rate);
179 }
180 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((pause < 0) || (pause > 0xffff)))
192 		pause = PAUSE_TIME;
193 
194 	if (flow_ctrl != 0xdead)
195 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
196 }
197 
198 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
199 {
200 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
201 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
202 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
203 	u32 queue;
204 
205 	for (queue = 0; queue < maxq; queue++) {
206 		struct stmmac_channel *ch = &priv->channel[queue];
207 
208 		if (stmmac_xdp_is_enabled(priv) &&
209 		    test_bit(queue, priv->af_xdp_zc_qps)) {
210 			napi_disable(&ch->rxtx_napi);
211 			continue;
212 		}
213 
214 		if (queue < rx_queues_cnt)
215 			napi_disable(&ch->rx_napi);
216 		if (queue < tx_queues_cnt)
217 			napi_disable(&ch->tx_napi);
218 	}
219 }
220 
221 /**
222  * stmmac_disable_all_queues - Disable all queues
223  * @priv: driver private structure
224  */
225 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
226 {
227 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
228 	struct stmmac_rx_queue *rx_q;
229 	u32 queue;
230 
231 	/* synchronize_rcu() needed for pending XDP buffers to drain */
232 	for (queue = 0; queue < rx_queues_cnt; queue++) {
233 		rx_q = &priv->dma_conf.rx_queue[queue];
234 		if (rx_q->xsk_pool) {
235 			synchronize_rcu();
236 			break;
237 		}
238 	}
239 
240 	__stmmac_disable_all_queues(priv);
241 }
242 
243 /**
244  * stmmac_enable_all_queues - Enable all queues
245  * @priv: driver private structure
246  */
247 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
248 {
249 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
250 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
251 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
252 	u32 queue;
253 
254 	for (queue = 0; queue < maxq; queue++) {
255 		struct stmmac_channel *ch = &priv->channel[queue];
256 
257 		if (stmmac_xdp_is_enabled(priv) &&
258 		    test_bit(queue, priv->af_xdp_zc_qps)) {
259 			napi_enable(&ch->rxtx_napi);
260 			continue;
261 		}
262 
263 		if (queue < rx_queues_cnt)
264 			napi_enable(&ch->rx_napi);
265 		if (queue < tx_queues_cnt)
266 			napi_enable(&ch->tx_napi);
267 	}
268 }
269 
270 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
271 {
272 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
273 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
274 		queue_work(priv->wq, &priv->service_task);
275 }
276 
277 static void stmmac_global_err(struct stmmac_priv *priv)
278 {
279 	netif_carrier_off(priv->dev);
280 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
281 	stmmac_service_event_schedule(priv);
282 }
283 
284 static void print_pkt(unsigned char *buf, int len)
285 {
286 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
287 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
288 }
289 
290 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
291 {
292 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
293 	u32 avail;
294 
295 	if (tx_q->dirty_tx > tx_q->cur_tx)
296 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
297 	else
298 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
299 
300 	return avail;
301 }
302 
303 /**
304  * stmmac_rx_dirty - Get RX queue dirty
305  * @priv: driver private structure
306  * @queue: RX queue index
307  */
308 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
309 {
310 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
311 	u32 dirty;
312 
313 	if (rx_q->dirty_rx <= rx_q->cur_rx)
314 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
315 	else
316 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
317 
318 	return dirty;
319 }
320 
321 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
322 {
323 	u32 tx_cnt = priv->plat->tx_queues_to_use;
324 	u32 queue;
325 
326 	/* check if all TX queues have the work finished */
327 	for (queue = 0; queue < tx_cnt; queue++) {
328 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
329 
330 		if (tx_q->dirty_tx != tx_q->cur_tx)
331 			return true; /* still unfinished work */
332 	}
333 
334 	return false;
335 }
336 
337 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
338 {
339 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
340 }
341 
342 /**
343  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
344  * @priv: driver private structure
345  * Description: this function is to verify and enter in LPI mode in case of
346  * EEE.
347  */
348 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
349 {
350 	if (stmmac_eee_tx_busy(priv)) {
351 		stmmac_restart_sw_lpi_timer(priv);
352 		return;
353 	}
354 
355 	/* Check and enter in LPI mode */
356 	if (!priv->tx_path_in_lpi_mode)
357 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
358 				    priv->tx_lpi_clk_stop, 0);
359 }
360 
361 /**
362  * stmmac_stop_sw_lpi - stop transmitting LPI
363  * @priv: driver private structure
364  * Description: When using software-controlled LPI, stop transmitting LPI state.
365  */
366 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
367 {
368 	timer_delete_sync(&priv->eee_ctrl_timer);
369 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
370 	priv->tx_path_in_lpi_mode = false;
371 }
372 
373 /**
374  * stmmac_eee_ctrl_timer - EEE TX SW timer.
375  * @t:  timer_list struct containing private info
376  * Description:
377  *  if there is no data transfer and if we are not in LPI state,
378  *  then MAC Transmitter can be moved to LPI state.
379  */
380 static void stmmac_eee_ctrl_timer(struct timer_list *t)
381 {
382 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
383 
384 	stmmac_try_to_start_sw_lpi(priv);
385 }
386 
387 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
388  * @priv: driver private structure
389  * @p : descriptor pointer
390  * @skb : the socket buffer
391  * Description :
392  * This function will read timestamp from the descriptor & pass it to stack.
393  * and also perform some sanity checks.
394  */
395 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
396 				   struct dma_desc *p, struct sk_buff *skb)
397 {
398 	struct skb_shared_hwtstamps shhwtstamp;
399 	bool found = false;
400 	u64 ns = 0;
401 
402 	if (!priv->hwts_tx_en)
403 		return;
404 
405 	/* exit if skb doesn't support hw tstamp */
406 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
407 		return;
408 
409 	/* check tx tstamp status */
410 	if (stmmac_get_tx_timestamp_status(priv, p)) {
411 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
412 		found = true;
413 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
414 		found = true;
415 	}
416 
417 	if (found) {
418 		ns -= priv->plat->cdc_error_adj;
419 
420 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
421 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
422 
423 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
424 		/* pass tstamp to stack */
425 		skb_tstamp_tx(skb, &shhwtstamp);
426 	}
427 }
428 
429 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
430  * @priv: driver private structure
431  * @p : descriptor pointer
432  * @np : next descriptor pointer
433  * @skb : the socket buffer
434  * Description :
435  * This function will read received packet's timestamp from the descriptor
436  * and pass it to stack. It also perform some sanity checks.
437  */
438 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
439 				   struct dma_desc *np, struct sk_buff *skb)
440 {
441 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
442 	struct dma_desc *desc = p;
443 	u64 ns = 0;
444 
445 	if (!priv->hwts_rx_en)
446 		return;
447 	/* For GMAC4, the valid timestamp is from CTX next desc. */
448 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
449 		desc = np;
450 
451 	/* Check if timestamp is available */
452 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
453 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
454 
455 		ns -= priv->plat->cdc_error_adj;
456 
457 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
458 		shhwtstamp = skb_hwtstamps(skb);
459 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
460 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
461 	} else  {
462 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
463 	}
464 }
465 
466 /**
467  *  stmmac_hwtstamp_set - control hardware timestamping.
468  *  @dev: device pointer.
469  *  @config: the timestamping configuration.
470  *  @extack: netlink extended ack structure for error reporting.
471  *  Description:
472  *  This function configures the MAC to enable/disable both outgoing(TX)
473  *  and incoming(RX) packets time stamping based on user input.
474  *  Return Value:
475  *  0 on success and an appropriate -ve integer on failure.
476  */
477 static int stmmac_hwtstamp_set(struct net_device *dev,
478 			       struct kernel_hwtstamp_config *config,
479 			       struct netlink_ext_ack *extack)
480 {
481 	struct stmmac_priv *priv = netdev_priv(dev);
482 	u32 ptp_v2 = 0;
483 	u32 tstamp_all = 0;
484 	u32 ptp_over_ipv4_udp = 0;
485 	u32 ptp_over_ipv6_udp = 0;
486 	u32 ptp_over_ethernet = 0;
487 	u32 snap_type_sel = 0;
488 	u32 ts_master_en = 0;
489 	u32 ts_event_en = 0;
490 
491 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
492 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
493 		priv->hwts_tx_en = 0;
494 		priv->hwts_rx_en = 0;
495 
496 		return -EOPNOTSUPP;
497 	}
498 
499 	if (!netif_running(dev)) {
500 		NL_SET_ERR_MSG_MOD(extack,
501 				   "Cannot change timestamping configuration while down");
502 		return -ENODEV;
503 	}
504 
505 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
506 		   __func__, config->flags, config->tx_type, config->rx_filter);
507 
508 	if (config->tx_type != HWTSTAMP_TX_OFF &&
509 	    config->tx_type != HWTSTAMP_TX_ON)
510 		return -ERANGE;
511 
512 	if (priv->adv_ts) {
513 		switch (config->rx_filter) {
514 		case HWTSTAMP_FILTER_NONE:
515 			/* time stamp no incoming packet at all */
516 			config->rx_filter = HWTSTAMP_FILTER_NONE;
517 			break;
518 
519 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
520 			/* PTP v1, UDP, any kind of event packet */
521 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
522 			/* 'xmac' hardware can support Sync, Pdelay_Req and
523 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
524 			 * This leaves Delay_Req timestamps out.
525 			 * Enable all events *and* general purpose message
526 			 * timestamping
527 			 */
528 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
529 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
530 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
531 			break;
532 
533 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
534 			/* PTP v1, UDP, Sync packet */
535 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
536 			/* take time stamp for SYNC messages only */
537 			ts_event_en = PTP_TCR_TSEVNTENA;
538 
539 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
540 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
541 			break;
542 
543 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
544 			/* PTP v1, UDP, Delay_req packet */
545 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
546 			/* take time stamp for Delay_Req messages only */
547 			ts_master_en = PTP_TCR_TSMSTRENA;
548 			ts_event_en = PTP_TCR_TSEVNTENA;
549 
550 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
551 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
552 			break;
553 
554 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
555 			/* PTP v2, UDP, any kind of event packet */
556 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
557 			ptp_v2 = PTP_TCR_TSVER2ENA;
558 			/* take time stamp for all event messages */
559 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
560 
561 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
562 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
563 			break;
564 
565 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
566 			/* PTP v2, UDP, Sync packet */
567 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
568 			ptp_v2 = PTP_TCR_TSVER2ENA;
569 			/* take time stamp for SYNC messages only */
570 			ts_event_en = PTP_TCR_TSEVNTENA;
571 
572 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574 			break;
575 
576 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
577 			/* PTP v2, UDP, Delay_req packet */
578 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
579 			ptp_v2 = PTP_TCR_TSVER2ENA;
580 			/* take time stamp for Delay_Req messages only */
581 			ts_master_en = PTP_TCR_TSMSTRENA;
582 			ts_event_en = PTP_TCR_TSEVNTENA;
583 
584 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586 			break;
587 
588 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
589 			/* PTP v2/802.AS1 any layer, any kind of event packet */
590 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
591 			ptp_v2 = PTP_TCR_TSVER2ENA;
592 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
593 			if (priv->synopsys_id < DWMAC_CORE_4_10)
594 				ts_event_en = PTP_TCR_TSEVNTENA;
595 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
596 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
597 			ptp_over_ethernet = PTP_TCR_TSIPENA;
598 			break;
599 
600 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
601 			/* PTP v2/802.AS1, any layer, Sync packet */
602 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
603 			ptp_v2 = PTP_TCR_TSVER2ENA;
604 			/* take time stamp for SYNC messages only */
605 			ts_event_en = PTP_TCR_TSEVNTENA;
606 
607 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
608 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
609 			ptp_over_ethernet = PTP_TCR_TSIPENA;
610 			break;
611 
612 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
613 			/* PTP v2/802.AS1, any layer, Delay_req packet */
614 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
615 			ptp_v2 = PTP_TCR_TSVER2ENA;
616 			/* take time stamp for Delay_Req messages only */
617 			ts_master_en = PTP_TCR_TSMSTRENA;
618 			ts_event_en = PTP_TCR_TSEVNTENA;
619 
620 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
621 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
622 			ptp_over_ethernet = PTP_TCR_TSIPENA;
623 			break;
624 
625 		case HWTSTAMP_FILTER_NTP_ALL:
626 		case HWTSTAMP_FILTER_ALL:
627 			/* time stamp any incoming packet */
628 			config->rx_filter = HWTSTAMP_FILTER_ALL;
629 			tstamp_all = PTP_TCR_TSENALL;
630 			break;
631 
632 		default:
633 			return -ERANGE;
634 		}
635 	} else {
636 		switch (config->rx_filter) {
637 		case HWTSTAMP_FILTER_NONE:
638 			config->rx_filter = HWTSTAMP_FILTER_NONE;
639 			break;
640 		default:
641 			/* PTP v1, UDP, any kind of event packet */
642 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
643 			break;
644 		}
645 	}
646 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
647 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
648 
649 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
650 
651 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
652 		priv->systime_flags |= tstamp_all | ptp_v2 |
653 				       ptp_over_ethernet | ptp_over_ipv6_udp |
654 				       ptp_over_ipv4_udp | ts_event_en |
655 				       ts_master_en | snap_type_sel;
656 	}
657 
658 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
659 
660 	priv->tstamp_config = *config;
661 
662 	return 0;
663 }
664 
665 /**
666  *  stmmac_hwtstamp_get - read hardware timestamping.
667  *  @dev: device pointer.
668  *  @config: the timestamping configuration.
669  *  Description:
670  *  This function obtain the current hardware timestamping settings
671  *  as requested.
672  */
673 static int stmmac_hwtstamp_get(struct net_device *dev,
674 			       struct kernel_hwtstamp_config *config)
675 {
676 	struct stmmac_priv *priv = netdev_priv(dev);
677 
678 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
679 		return -EOPNOTSUPP;
680 
681 	*config = priv->tstamp_config;
682 
683 	return 0;
684 }
685 
686 /**
687  * stmmac_init_tstamp_counter - init hardware timestamping counter
688  * @priv: driver private structure
689  * @systime_flags: timestamping flags
690  * Description:
691  * Initialize hardware counter for packet timestamping.
692  * This is valid as long as the interface is open and not suspended.
693  * Will be rerun after resuming from suspend, case in which the timestamping
694  * flags updated by stmmac_hwtstamp_set() also need to be restored.
695  */
696 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
697 				      u32 systime_flags)
698 {
699 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
700 	struct timespec64 now;
701 	u32 sec_inc = 0;
702 	u64 temp = 0;
703 
704 	if (!priv->plat->clk_ptp_rate) {
705 		netdev_err(priv->dev, "Invalid PTP clock rate");
706 		return -EINVAL;
707 	}
708 
709 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
710 	priv->systime_flags = systime_flags;
711 
712 	/* program Sub Second Increment reg */
713 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
714 					   priv->plat->clk_ptp_rate,
715 					   xmac, &sec_inc);
716 	temp = div_u64(1000000000ULL, sec_inc);
717 
718 	/* Store sub second increment for later use */
719 	priv->sub_second_inc = sec_inc;
720 
721 	/* calculate default added value:
722 	 * formula is :
723 	 * addend = (2^32)/freq_div_ratio;
724 	 * where, freq_div_ratio = 1e9ns/sec_inc
725 	 */
726 	temp = (u64)(temp << 32);
727 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
728 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
729 
730 	/* initialize system time */
731 	ktime_get_real_ts64(&now);
732 
733 	/* lower 32 bits of tv_sec are safe until y2106 */
734 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
735 
736 	return 0;
737 }
738 
739 /**
740  * stmmac_init_timestamping - initialise timestamping
741  * @priv: driver private structure
742  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
743  * This is done by looking at the HW cap. register.
744  * This function also registers the ptp driver.
745  */
746 static int stmmac_init_timestamping(struct stmmac_priv *priv)
747 {
748 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
749 	int ret;
750 
751 	if (priv->plat->ptp_clk_freq_config)
752 		priv->plat->ptp_clk_freq_config(priv);
753 
754 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
755 		netdev_info(priv->dev, "PTP not supported by HW\n");
756 		return -EOPNOTSUPP;
757 	}
758 
759 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
760 	if (ret) {
761 		netdev_warn(priv->dev, "PTP init failed\n");
762 		return ret;
763 	}
764 
765 	priv->adv_ts = 0;
766 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
767 	if (xmac && priv->dma_cap.atime_stamp)
768 		priv->adv_ts = 1;
769 	/* Dwmac 3.x core with extend_desc can support adv_ts */
770 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
771 		priv->adv_ts = 1;
772 
773 	if (priv->dma_cap.time_stamp)
774 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
775 
776 	if (priv->adv_ts)
777 		netdev_info(priv->dev,
778 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
779 
780 	priv->hwts_tx_en = 0;
781 	priv->hwts_rx_en = 0;
782 
783 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
784 		stmmac_hwtstamp_correct_latency(priv, priv);
785 
786 	return 0;
787 }
788 
789 static void stmmac_setup_ptp(struct stmmac_priv *priv)
790 {
791 	int ret;
792 
793 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
794 	if (ret < 0)
795 		netdev_warn(priv->dev,
796 			    "failed to enable PTP reference clock: %pe\n",
797 			    ERR_PTR(ret));
798 
799 	if (stmmac_init_timestamping(priv) == 0)
800 		stmmac_ptp_register(priv);
801 }
802 
803 static void stmmac_release_ptp(struct stmmac_priv *priv)
804 {
805 	stmmac_ptp_unregister(priv);
806 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
807 }
808 
809 /**
810  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
811  *  @priv: driver private structure
812  *  @duplex: duplex passed to the next function
813  *  @flow_ctrl: desired flow control modes
814  *  Description: It is used for configuring the flow control in all queues
815  */
816 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
817 				 unsigned int flow_ctrl)
818 {
819 	u32 tx_cnt = priv->plat->tx_queues_to_use;
820 
821 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
822 			 tx_cnt);
823 }
824 
825 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
826 					 phy_interface_t interface)
827 {
828 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
829 
830 	/* Refresh the MAC-specific capabilities */
831 	stmmac_mac_update_caps(priv);
832 
833 	config->mac_capabilities = priv->hw->link.caps;
834 
835 	if (priv->plat->max_speed)
836 		phylink_limit_mac_speed(config, priv->plat->max_speed);
837 
838 	return config->mac_capabilities;
839 }
840 
841 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
842 						 phy_interface_t interface)
843 {
844 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
845 	struct phylink_pcs *pcs;
846 
847 	if (priv->plat->select_pcs) {
848 		pcs = priv->plat->select_pcs(priv, interface);
849 		if (!IS_ERR(pcs))
850 			return pcs;
851 	}
852 
853 	return NULL;
854 }
855 
856 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
857 			      const struct phylink_link_state *state)
858 {
859 	/* Nothing to do, xpcs_config() handles everything */
860 }
861 
862 static void stmmac_mac_link_down(struct phylink_config *config,
863 				 unsigned int mode, phy_interface_t interface)
864 {
865 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
866 
867 	stmmac_mac_set(priv, priv->ioaddr, false);
868 	if (priv->dma_cap.eee)
869 		stmmac_set_eee_pls(priv, priv->hw, false);
870 
871 	if (stmmac_fpe_supported(priv))
872 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
873 }
874 
875 static void stmmac_mac_link_up(struct phylink_config *config,
876 			       struct phy_device *phy,
877 			       unsigned int mode, phy_interface_t interface,
878 			       int speed, int duplex,
879 			       bool tx_pause, bool rx_pause)
880 {
881 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
882 	unsigned int flow_ctrl;
883 	u32 old_ctrl, ctrl;
884 	int ret;
885 
886 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
887 	    priv->plat->serdes_powerup)
888 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
889 
890 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
891 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
892 
893 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
894 		switch (speed) {
895 		case SPEED_10000:
896 			ctrl |= priv->hw->link.xgmii.speed10000;
897 			break;
898 		case SPEED_5000:
899 			ctrl |= priv->hw->link.xgmii.speed5000;
900 			break;
901 		case SPEED_2500:
902 			ctrl |= priv->hw->link.xgmii.speed2500;
903 			break;
904 		default:
905 			return;
906 		}
907 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
908 		switch (speed) {
909 		case SPEED_100000:
910 			ctrl |= priv->hw->link.xlgmii.speed100000;
911 			break;
912 		case SPEED_50000:
913 			ctrl |= priv->hw->link.xlgmii.speed50000;
914 			break;
915 		case SPEED_40000:
916 			ctrl |= priv->hw->link.xlgmii.speed40000;
917 			break;
918 		case SPEED_25000:
919 			ctrl |= priv->hw->link.xlgmii.speed25000;
920 			break;
921 		case SPEED_10000:
922 			ctrl |= priv->hw->link.xgmii.speed10000;
923 			break;
924 		case SPEED_2500:
925 			ctrl |= priv->hw->link.speed2500;
926 			break;
927 		case SPEED_1000:
928 			ctrl |= priv->hw->link.speed1000;
929 			break;
930 		default:
931 			return;
932 		}
933 	} else {
934 		switch (speed) {
935 		case SPEED_2500:
936 			ctrl |= priv->hw->link.speed2500;
937 			break;
938 		case SPEED_1000:
939 			ctrl |= priv->hw->link.speed1000;
940 			break;
941 		case SPEED_100:
942 			ctrl |= priv->hw->link.speed100;
943 			break;
944 		case SPEED_10:
945 			ctrl |= priv->hw->link.speed10;
946 			break;
947 		default:
948 			return;
949 		}
950 	}
951 
952 	if (priv->plat->fix_mac_speed)
953 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
954 
955 	if (!duplex)
956 		ctrl &= ~priv->hw->link.duplex;
957 	else
958 		ctrl |= priv->hw->link.duplex;
959 
960 	/* Flow Control operation */
961 	if (rx_pause && tx_pause)
962 		flow_ctrl = FLOW_AUTO;
963 	else if (rx_pause && !tx_pause)
964 		flow_ctrl = FLOW_RX;
965 	else if (!rx_pause && tx_pause)
966 		flow_ctrl = FLOW_TX;
967 	else
968 		flow_ctrl = FLOW_OFF;
969 
970 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
971 
972 	if (ctrl != old_ctrl)
973 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
974 
975 	if (priv->plat->set_clk_tx_rate) {
976 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
977 						priv->plat->clk_tx_i,
978 						interface, speed);
979 		if (ret < 0)
980 			netdev_err(priv->dev,
981 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
982 				   phy_modes(interface), speed, ERR_PTR(ret));
983 	}
984 
985 	stmmac_mac_set(priv, priv->ioaddr, true);
986 	if (priv->dma_cap.eee)
987 		stmmac_set_eee_pls(priv, priv->hw, true);
988 
989 	if (stmmac_fpe_supported(priv))
990 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
991 
992 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
993 		stmmac_hwtstamp_correct_latency(priv, priv);
994 }
995 
996 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
997 {
998 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
999 
1000 	priv->eee_active = false;
1001 
1002 	mutex_lock(&priv->lock);
1003 
1004 	priv->eee_enabled = false;
1005 
1006 	netdev_dbg(priv->dev, "disable EEE\n");
1007 	priv->eee_sw_timer_en = false;
1008 	timer_delete_sync(&priv->eee_ctrl_timer);
1009 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1010 	priv->tx_path_in_lpi_mode = false;
1011 
1012 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1013 	mutex_unlock(&priv->lock);
1014 }
1015 
1016 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1017 				    bool tx_clk_stop)
1018 {
1019 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1020 	int ret;
1021 
1022 	priv->tx_lpi_timer = timer;
1023 	priv->eee_active = true;
1024 
1025 	mutex_lock(&priv->lock);
1026 
1027 	priv->eee_enabled = true;
1028 
1029 	/* Update the transmit clock stop according to PHY capability if
1030 	 * the platform allows
1031 	 */
1032 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1033 		priv->tx_lpi_clk_stop = tx_clk_stop;
1034 
1035 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1036 			     STMMAC_DEFAULT_TWT_LS);
1037 
1038 	/* Try to cnfigure the hardware timer. */
1039 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1040 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1041 
1042 	if (ret) {
1043 		/* Hardware timer mode not supported, or value out of range.
1044 		 * Fall back to using software LPI mode
1045 		 */
1046 		priv->eee_sw_timer_en = true;
1047 		stmmac_restart_sw_lpi_timer(priv);
1048 	}
1049 
1050 	mutex_unlock(&priv->lock);
1051 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1052 
1053 	return 0;
1054 }
1055 
1056 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1057 			     phy_interface_t interface)
1058 {
1059 	struct net_device *ndev = to_net_dev(config->dev);
1060 	struct stmmac_priv *priv = netdev_priv(ndev);
1061 
1062 	if (priv->plat->mac_finish)
1063 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1064 
1065 	return 0;
1066 }
1067 
1068 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1069 	.mac_get_caps = stmmac_mac_get_caps,
1070 	.mac_select_pcs = stmmac_mac_select_pcs,
1071 	.mac_config = stmmac_mac_config,
1072 	.mac_link_down = stmmac_mac_link_down,
1073 	.mac_link_up = stmmac_mac_link_up,
1074 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1075 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1076 	.mac_finish = stmmac_mac_finish,
1077 };
1078 
1079 /**
1080  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1081  * @priv: driver private structure
1082  * Description: this is to verify if the HW supports the PCS.
1083  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1084  * configured for the TBI, RTBI, or SGMII PHY interface.
1085  */
1086 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1087 {
1088 	int interface = priv->plat->phy_interface;
1089 
1090 	if (priv->dma_cap.pcs) {
1091 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1092 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1093 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1094 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1095 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1096 			priv->hw->pcs = STMMAC_PCS_RGMII;
1097 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1098 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1099 			priv->hw->pcs = STMMAC_PCS_SGMII;
1100 		}
1101 	}
1102 }
1103 
1104 /**
1105  * stmmac_init_phy - PHY initialization
1106  * @dev: net device structure
1107  * Description: it initializes the driver's PHY state, and attaches the PHY
1108  * to the mac driver.
1109  *  Return value:
1110  *  0 on success
1111  */
1112 static int stmmac_init_phy(struct net_device *dev)
1113 {
1114 	struct stmmac_priv *priv = netdev_priv(dev);
1115 	int mode = priv->plat->phy_interface;
1116 	struct fwnode_handle *phy_fwnode;
1117 	struct fwnode_handle *fwnode;
1118 	struct ethtool_keee eee;
1119 	int ret;
1120 
1121 	if (!phylink_expects_phy(priv->phylink))
1122 		return 0;
1123 
1124 	if (priv->hw->xpcs &&
1125 	    xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1126 		return 0;
1127 
1128 	fwnode = priv->plat->port_node;
1129 	if (!fwnode)
1130 		fwnode = dev_fwnode(priv->device);
1131 
1132 	if (fwnode)
1133 		phy_fwnode = fwnode_get_phy_node(fwnode);
1134 	else
1135 		phy_fwnode = NULL;
1136 
1137 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1138 	 * manually parse it
1139 	 */
1140 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1141 		int addr = priv->plat->phy_addr;
1142 		struct phy_device *phydev;
1143 
1144 		if (addr < 0) {
1145 			netdev_err(priv->dev, "no phy found\n");
1146 			return -ENODEV;
1147 		}
1148 
1149 		phydev = mdiobus_get_phy(priv->mii, addr);
1150 		if (!phydev) {
1151 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1152 			return -ENODEV;
1153 		}
1154 
1155 		ret = phylink_connect_phy(priv->phylink, phydev);
1156 	} else {
1157 		fwnode_handle_put(phy_fwnode);
1158 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1159 	}
1160 
1161 	if (ret) {
1162 		netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1163 			   ERR_PTR(ret));
1164 		return ret;
1165 	}
1166 
1167 	/* Configure phylib's copy of the LPI timer. Normally,
1168 	 * phylink_config.lpi_timer_default would do this, but there is a
1169 	 * chance that userspace could change the eee_timer setting via sysfs
1170 	 * before the first open. Thus, preserve existing behaviour.
1171 	 */
1172 	if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1173 		eee.tx_lpi_timer = priv->tx_lpi_timer;
1174 		phylink_ethtool_set_eee(priv->phylink, &eee);
1175 	}
1176 
1177 	if (!priv->plat->pmt) {
1178 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1179 
1180 		phylink_ethtool_get_wol(priv->phylink, &wol);
1181 		device_set_wakeup_capable(priv->device, !!wol.supported);
1182 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 static int stmmac_phy_setup(struct stmmac_priv *priv)
1189 {
1190 	struct stmmac_mdio_bus_data *mdio_bus_data;
1191 	struct phylink_config *config;
1192 	struct fwnode_handle *fwnode;
1193 	struct phylink_pcs *pcs;
1194 	struct phylink *phylink;
1195 
1196 	config = &priv->phylink_config;
1197 
1198 	config->dev = &priv->dev->dev;
1199 	config->type = PHYLINK_NETDEV;
1200 	config->mac_managed_pm = true;
1201 
1202 	/* Stmmac always requires an RX clock for hardware initialization */
1203 	config->mac_requires_rxc = true;
1204 
1205 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1206 		config->eee_rx_clk_stop_enable = true;
1207 
1208 	/* Set the default transmit clock stop bit based on the platform glue */
1209 	priv->tx_lpi_clk_stop = priv->plat->flags &
1210 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1211 
1212 	mdio_bus_data = priv->plat->mdio_bus_data;
1213 	if (mdio_bus_data)
1214 		config->default_an_inband = mdio_bus_data->default_an_inband;
1215 
1216 	/* Get the PHY interface modes (at the PHY end of the link) that
1217 	 * are supported by the platform.
1218 	 */
1219 	if (priv->plat->get_interfaces)
1220 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1221 					   config->supported_interfaces);
1222 
1223 	/* Set the platform/firmware specified interface mode if the
1224 	 * supported interfaces have not already been provided using
1225 	 * phy_interface as a last resort.
1226 	 */
1227 	if (phy_interface_empty(config->supported_interfaces))
1228 		__set_bit(priv->plat->phy_interface,
1229 			  config->supported_interfaces);
1230 
1231 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1232 	if (priv->hw->xpcs)
1233 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1234 	else
1235 		pcs = priv->hw->phylink_pcs;
1236 
1237 	if (pcs)
1238 		phy_interface_or(config->supported_interfaces,
1239 				 config->supported_interfaces,
1240 				 pcs->supported_interfaces);
1241 
1242 	if (priv->dma_cap.eee) {
1243 		/* Assume all supported interfaces also support LPI */
1244 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1245 		       sizeof(config->lpi_interfaces));
1246 
1247 		/* All full duplex speeds above 100Mbps are supported */
1248 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1249 		config->lpi_timer_default = eee_timer * 1000;
1250 		config->eee_enabled_default = true;
1251 	}
1252 
1253 	fwnode = priv->plat->port_node;
1254 	if (!fwnode)
1255 		fwnode = dev_fwnode(priv->device);
1256 
1257 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1258 				 &stmmac_phylink_mac_ops);
1259 	if (IS_ERR(phylink))
1260 		return PTR_ERR(phylink);
1261 
1262 	priv->phylink = phylink;
1263 	return 0;
1264 }
1265 
1266 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1267 				    struct stmmac_dma_conf *dma_conf)
1268 {
1269 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1270 	unsigned int desc_size;
1271 	void *head_rx;
1272 	u32 queue;
1273 
1274 	/* Display RX rings */
1275 	for (queue = 0; queue < rx_cnt; queue++) {
1276 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1277 
1278 		pr_info("\tRX Queue %u rings\n", queue);
1279 
1280 		if (priv->extend_desc) {
1281 			head_rx = (void *)rx_q->dma_erx;
1282 			desc_size = sizeof(struct dma_extended_desc);
1283 		} else {
1284 			head_rx = (void *)rx_q->dma_rx;
1285 			desc_size = sizeof(struct dma_desc);
1286 		}
1287 
1288 		/* Display RX ring */
1289 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1290 				    rx_q->dma_rx_phy, desc_size);
1291 	}
1292 }
1293 
1294 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1295 				    struct stmmac_dma_conf *dma_conf)
1296 {
1297 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1298 	unsigned int desc_size;
1299 	void *head_tx;
1300 	u32 queue;
1301 
1302 	/* Display TX rings */
1303 	for (queue = 0; queue < tx_cnt; queue++) {
1304 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1305 
1306 		pr_info("\tTX Queue %d rings\n", queue);
1307 
1308 		if (priv->extend_desc) {
1309 			head_tx = (void *)tx_q->dma_etx;
1310 			desc_size = sizeof(struct dma_extended_desc);
1311 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1312 			head_tx = (void *)tx_q->dma_entx;
1313 			desc_size = sizeof(struct dma_edesc);
1314 		} else {
1315 			head_tx = (void *)tx_q->dma_tx;
1316 			desc_size = sizeof(struct dma_desc);
1317 		}
1318 
1319 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1320 				    tx_q->dma_tx_phy, desc_size);
1321 	}
1322 }
1323 
1324 static void stmmac_display_rings(struct stmmac_priv *priv,
1325 				 struct stmmac_dma_conf *dma_conf)
1326 {
1327 	/* Display RX ring */
1328 	stmmac_display_rx_rings(priv, dma_conf);
1329 
1330 	/* Display TX ring */
1331 	stmmac_display_tx_rings(priv, dma_conf);
1332 }
1333 
1334 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1335 {
1336 	if (stmmac_xdp_is_enabled(priv))
1337 		return XDP_PACKET_HEADROOM;
1338 
1339 	return NET_SKB_PAD;
1340 }
1341 
1342 static int stmmac_set_bfsize(int mtu, int bufsize)
1343 {
1344 	int ret = bufsize;
1345 
1346 	if (mtu >= BUF_SIZE_8KiB)
1347 		ret = BUF_SIZE_16KiB;
1348 	else if (mtu >= BUF_SIZE_4KiB)
1349 		ret = BUF_SIZE_8KiB;
1350 	else if (mtu >= BUF_SIZE_2KiB)
1351 		ret = BUF_SIZE_4KiB;
1352 	else if (mtu > DEFAULT_BUFSIZE)
1353 		ret = BUF_SIZE_2KiB;
1354 	else
1355 		ret = DEFAULT_BUFSIZE;
1356 
1357 	return ret;
1358 }
1359 
1360 /**
1361  * stmmac_clear_rx_descriptors - clear RX descriptors
1362  * @priv: driver private structure
1363  * @dma_conf: structure to take the dma data
1364  * @queue: RX queue index
1365  * Description: this function is called to clear the RX descriptors
1366  * in case of both basic and extended descriptors are used.
1367  */
1368 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1369 					struct stmmac_dma_conf *dma_conf,
1370 					u32 queue)
1371 {
1372 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1373 	int i;
1374 
1375 	/* Clear the RX descriptors */
1376 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1377 		if (priv->extend_desc)
1378 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1379 					priv->use_riwt, priv->mode,
1380 					(i == dma_conf->dma_rx_size - 1),
1381 					dma_conf->dma_buf_sz);
1382 		else
1383 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1384 					priv->use_riwt, priv->mode,
1385 					(i == dma_conf->dma_rx_size - 1),
1386 					dma_conf->dma_buf_sz);
1387 }
1388 
1389 /**
1390  * stmmac_clear_tx_descriptors - clear tx descriptors
1391  * @priv: driver private structure
1392  * @dma_conf: structure to take the dma data
1393  * @queue: TX queue index.
1394  * Description: this function is called to clear the TX descriptors
1395  * in case of both basic and extended descriptors are used.
1396  */
1397 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1398 					struct stmmac_dma_conf *dma_conf,
1399 					u32 queue)
1400 {
1401 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1402 	int i;
1403 
1404 	/* Clear the TX descriptors */
1405 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1406 		int last = (i == (dma_conf->dma_tx_size - 1));
1407 		struct dma_desc *p;
1408 
1409 		if (priv->extend_desc)
1410 			p = &tx_q->dma_etx[i].basic;
1411 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1412 			p = &tx_q->dma_entx[i].basic;
1413 		else
1414 			p = &tx_q->dma_tx[i];
1415 
1416 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1417 	}
1418 }
1419 
1420 /**
1421  * stmmac_clear_descriptors - clear descriptors
1422  * @priv: driver private structure
1423  * @dma_conf: structure to take the dma data
1424  * Description: this function is called to clear the TX and RX descriptors
1425  * in case of both basic and extended descriptors are used.
1426  */
1427 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1428 				     struct stmmac_dma_conf *dma_conf)
1429 {
1430 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1431 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1432 	u32 queue;
1433 
1434 	/* Clear the RX descriptors */
1435 	for (queue = 0; queue < rx_queue_cnt; queue++)
1436 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1437 
1438 	/* Clear the TX descriptors */
1439 	for (queue = 0; queue < tx_queue_cnt; queue++)
1440 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1441 }
1442 
1443 /**
1444  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1445  * @priv: driver private structure
1446  * @dma_conf: structure to take the dma data
1447  * @p: descriptor pointer
1448  * @i: descriptor index
1449  * @flags: gfp flag
1450  * @queue: RX queue index
1451  * Description: this function is called to allocate a receive buffer, perform
1452  * the DMA mapping and init the descriptor.
1453  */
1454 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1455 				  struct stmmac_dma_conf *dma_conf,
1456 				  struct dma_desc *p,
1457 				  int i, gfp_t flags, u32 queue)
1458 {
1459 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1460 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1461 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1462 
1463 	if (priv->dma_cap.host_dma_width <= 32)
1464 		gfp |= GFP_DMA32;
1465 
1466 	if (!buf->page) {
1467 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1468 		if (!buf->page)
1469 			return -ENOMEM;
1470 		buf->page_offset = stmmac_rx_offset(priv);
1471 	}
1472 
1473 	if (priv->sph && !buf->sec_page) {
1474 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1475 		if (!buf->sec_page)
1476 			return -ENOMEM;
1477 
1478 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1479 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1480 	} else {
1481 		buf->sec_page = NULL;
1482 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1483 	}
1484 
1485 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1486 
1487 	stmmac_set_desc_addr(priv, p, buf->addr);
1488 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1489 		stmmac_init_desc3(priv, p);
1490 
1491 	return 0;
1492 }
1493 
1494 /**
1495  * stmmac_free_rx_buffer - free RX dma buffers
1496  * @priv: private structure
1497  * @rx_q: RX queue
1498  * @i: buffer index.
1499  */
1500 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1501 				  struct stmmac_rx_queue *rx_q,
1502 				  int i)
1503 {
1504 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1505 
1506 	if (buf->page)
1507 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1508 	buf->page = NULL;
1509 
1510 	if (buf->sec_page)
1511 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1512 	buf->sec_page = NULL;
1513 }
1514 
1515 /**
1516  * stmmac_free_tx_buffer - free RX dma buffers
1517  * @priv: private structure
1518  * @dma_conf: structure to take the dma data
1519  * @queue: RX queue index
1520  * @i: buffer index.
1521  */
1522 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1523 				  struct stmmac_dma_conf *dma_conf,
1524 				  u32 queue, int i)
1525 {
1526 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1527 
1528 	if (tx_q->tx_skbuff_dma[i].buf &&
1529 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1530 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1531 			dma_unmap_page(priv->device,
1532 				       tx_q->tx_skbuff_dma[i].buf,
1533 				       tx_q->tx_skbuff_dma[i].len,
1534 				       DMA_TO_DEVICE);
1535 		else
1536 			dma_unmap_single(priv->device,
1537 					 tx_q->tx_skbuff_dma[i].buf,
1538 					 tx_q->tx_skbuff_dma[i].len,
1539 					 DMA_TO_DEVICE);
1540 	}
1541 
1542 	if (tx_q->xdpf[i] &&
1543 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1544 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1545 		xdp_return_frame(tx_q->xdpf[i]);
1546 		tx_q->xdpf[i] = NULL;
1547 	}
1548 
1549 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1550 		tx_q->xsk_frames_done++;
1551 
1552 	if (tx_q->tx_skbuff[i] &&
1553 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1554 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1555 		tx_q->tx_skbuff[i] = NULL;
1556 	}
1557 
1558 	tx_q->tx_skbuff_dma[i].buf = 0;
1559 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1560 }
1561 
1562 /**
1563  * dma_free_rx_skbufs - free RX dma buffers
1564  * @priv: private structure
1565  * @dma_conf: structure to take the dma data
1566  * @queue: RX queue index
1567  */
1568 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1569 			       struct stmmac_dma_conf *dma_conf,
1570 			       u32 queue)
1571 {
1572 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1573 	int i;
1574 
1575 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1576 		stmmac_free_rx_buffer(priv, rx_q, i);
1577 }
1578 
1579 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1580 				   struct stmmac_dma_conf *dma_conf,
1581 				   u32 queue, gfp_t flags)
1582 {
1583 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1584 	int i;
1585 
1586 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1587 		struct dma_desc *p;
1588 		int ret;
1589 
1590 		if (priv->extend_desc)
1591 			p = &((rx_q->dma_erx + i)->basic);
1592 		else
1593 			p = rx_q->dma_rx + i;
1594 
1595 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1596 					     queue);
1597 		if (ret)
1598 			return ret;
1599 
1600 		rx_q->buf_alloc_num++;
1601 	}
1602 
1603 	return 0;
1604 }
1605 
1606 /**
1607  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1608  * @priv: private structure
1609  * @dma_conf: structure to take the dma data
1610  * @queue: RX queue index
1611  */
1612 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1613 				struct stmmac_dma_conf *dma_conf,
1614 				u32 queue)
1615 {
1616 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1617 	int i;
1618 
1619 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1621 
1622 		if (!buf->xdp)
1623 			continue;
1624 
1625 		xsk_buff_free(buf->xdp);
1626 		buf->xdp = NULL;
1627 	}
1628 }
1629 
1630 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1631 				      struct stmmac_dma_conf *dma_conf,
1632 				      u32 queue)
1633 {
1634 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1635 	int i;
1636 
1637 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1638 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1639 	 * use this macro to make sure no size violations.
1640 	 */
1641 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1642 
1643 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1644 		struct stmmac_rx_buffer *buf;
1645 		dma_addr_t dma_addr;
1646 		struct dma_desc *p;
1647 
1648 		if (priv->extend_desc)
1649 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1650 		else
1651 			p = rx_q->dma_rx + i;
1652 
1653 		buf = &rx_q->buf_pool[i];
1654 
1655 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1656 		if (!buf->xdp)
1657 			return -ENOMEM;
1658 
1659 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1660 		stmmac_set_desc_addr(priv, p, dma_addr);
1661 		rx_q->buf_alloc_num++;
1662 	}
1663 
1664 	return 0;
1665 }
1666 
1667 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1668 {
1669 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1670 		return NULL;
1671 
1672 	return xsk_get_pool_from_qid(priv->dev, queue);
1673 }
1674 
1675 /**
1676  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1677  * @priv: driver private structure
1678  * @dma_conf: structure to take the dma data
1679  * @queue: RX queue index
1680  * @flags: gfp flag.
1681  * Description: this function initializes the DMA RX descriptors
1682  * and allocates the socket buffers. It supports the chained and ring
1683  * modes.
1684  */
1685 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1686 				    struct stmmac_dma_conf *dma_conf,
1687 				    u32 queue, gfp_t flags)
1688 {
1689 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1690 	int ret;
1691 
1692 	netif_dbg(priv, probe, priv->dev,
1693 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1694 		  (u32)rx_q->dma_rx_phy);
1695 
1696 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1697 
1698 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1699 
1700 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1701 
1702 	if (rx_q->xsk_pool) {
1703 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1704 						   MEM_TYPE_XSK_BUFF_POOL,
1705 						   NULL));
1706 		netdev_info(priv->dev,
1707 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1708 			    rx_q->queue_index);
1709 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1710 	} else {
1711 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1712 						   MEM_TYPE_PAGE_POOL,
1713 						   rx_q->page_pool));
1714 		netdev_info(priv->dev,
1715 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1716 			    rx_q->queue_index);
1717 	}
1718 
1719 	if (rx_q->xsk_pool) {
1720 		/* RX XDP ZC buffer pool may not be populated, e.g.
1721 		 * xdpsock TX-only.
1722 		 */
1723 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1724 	} else {
1725 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1726 		if (ret < 0)
1727 			return -ENOMEM;
1728 	}
1729 
1730 	/* Setup the chained descriptor addresses */
1731 	if (priv->mode == STMMAC_CHAIN_MODE) {
1732 		if (priv->extend_desc)
1733 			stmmac_mode_init(priv, rx_q->dma_erx,
1734 					 rx_q->dma_rx_phy,
1735 					 dma_conf->dma_rx_size, 1);
1736 		else
1737 			stmmac_mode_init(priv, rx_q->dma_rx,
1738 					 rx_q->dma_rx_phy,
1739 					 dma_conf->dma_rx_size, 0);
1740 	}
1741 
1742 	return 0;
1743 }
1744 
1745 static int init_dma_rx_desc_rings(struct net_device *dev,
1746 				  struct stmmac_dma_conf *dma_conf,
1747 				  gfp_t flags)
1748 {
1749 	struct stmmac_priv *priv = netdev_priv(dev);
1750 	u32 rx_count = priv->plat->rx_queues_to_use;
1751 	int queue;
1752 	int ret;
1753 
1754 	/* RX INITIALIZATION */
1755 	netif_dbg(priv, probe, priv->dev,
1756 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1757 
1758 	for (queue = 0; queue < rx_count; queue++) {
1759 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1760 		if (ret)
1761 			goto err_init_rx_buffers;
1762 	}
1763 
1764 	return 0;
1765 
1766 err_init_rx_buffers:
1767 	while (queue >= 0) {
1768 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1769 
1770 		if (rx_q->xsk_pool)
1771 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1772 		else
1773 			dma_free_rx_skbufs(priv, dma_conf, queue);
1774 
1775 		rx_q->buf_alloc_num = 0;
1776 		rx_q->xsk_pool = NULL;
1777 
1778 		queue--;
1779 	}
1780 
1781 	return ret;
1782 }
1783 
1784 /**
1785  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1786  * @priv: driver private structure
1787  * @dma_conf: structure to take the dma data
1788  * @queue: TX queue index
1789  * Description: this function initializes the DMA TX descriptors
1790  * and allocates the socket buffers. It supports the chained and ring
1791  * modes.
1792  */
1793 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1794 				    struct stmmac_dma_conf *dma_conf,
1795 				    u32 queue)
1796 {
1797 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1798 	int i;
1799 
1800 	netif_dbg(priv, probe, priv->dev,
1801 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1802 		  (u32)tx_q->dma_tx_phy);
1803 
1804 	/* Setup the chained descriptor addresses */
1805 	if (priv->mode == STMMAC_CHAIN_MODE) {
1806 		if (priv->extend_desc)
1807 			stmmac_mode_init(priv, tx_q->dma_etx,
1808 					 tx_q->dma_tx_phy,
1809 					 dma_conf->dma_tx_size, 1);
1810 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1811 			stmmac_mode_init(priv, tx_q->dma_tx,
1812 					 tx_q->dma_tx_phy,
1813 					 dma_conf->dma_tx_size, 0);
1814 	}
1815 
1816 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1817 
1818 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1819 		struct dma_desc *p;
1820 
1821 		if (priv->extend_desc)
1822 			p = &((tx_q->dma_etx + i)->basic);
1823 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1824 			p = &((tx_q->dma_entx + i)->basic);
1825 		else
1826 			p = tx_q->dma_tx + i;
1827 
1828 		stmmac_clear_desc(priv, p);
1829 
1830 		tx_q->tx_skbuff_dma[i].buf = 0;
1831 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1832 		tx_q->tx_skbuff_dma[i].len = 0;
1833 		tx_q->tx_skbuff_dma[i].last_segment = false;
1834 		tx_q->tx_skbuff[i] = NULL;
1835 	}
1836 
1837 	return 0;
1838 }
1839 
1840 static int init_dma_tx_desc_rings(struct net_device *dev,
1841 				  struct stmmac_dma_conf *dma_conf)
1842 {
1843 	struct stmmac_priv *priv = netdev_priv(dev);
1844 	u32 tx_queue_cnt;
1845 	u32 queue;
1846 
1847 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1848 
1849 	for (queue = 0; queue < tx_queue_cnt; queue++)
1850 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1851 
1852 	return 0;
1853 }
1854 
1855 /**
1856  * init_dma_desc_rings - init the RX/TX descriptor rings
1857  * @dev: net device structure
1858  * @dma_conf: structure to take the dma data
1859  * @flags: gfp flag.
1860  * Description: this function initializes the DMA RX/TX descriptors
1861  * and allocates the socket buffers. It supports the chained and ring
1862  * modes.
1863  */
1864 static int init_dma_desc_rings(struct net_device *dev,
1865 			       struct stmmac_dma_conf *dma_conf,
1866 			       gfp_t flags)
1867 {
1868 	struct stmmac_priv *priv = netdev_priv(dev);
1869 	int ret;
1870 
1871 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1872 	if (ret)
1873 		return ret;
1874 
1875 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1876 
1877 	stmmac_clear_descriptors(priv, dma_conf);
1878 
1879 	if (netif_msg_hw(priv))
1880 		stmmac_display_rings(priv, dma_conf);
1881 
1882 	return ret;
1883 }
1884 
1885 /**
1886  * dma_free_tx_skbufs - free TX dma buffers
1887  * @priv: private structure
1888  * @dma_conf: structure to take the dma data
1889  * @queue: TX queue index
1890  */
1891 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1892 			       struct stmmac_dma_conf *dma_conf,
1893 			       u32 queue)
1894 {
1895 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1896 	int i;
1897 
1898 	tx_q->xsk_frames_done = 0;
1899 
1900 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1901 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1902 
1903 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1904 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1905 		tx_q->xsk_frames_done = 0;
1906 		tx_q->xsk_pool = NULL;
1907 	}
1908 }
1909 
1910 /**
1911  * stmmac_free_tx_skbufs - free TX skb buffers
1912  * @priv: private structure
1913  */
1914 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1915 {
1916 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1917 	u32 queue;
1918 
1919 	for (queue = 0; queue < tx_queue_cnt; queue++)
1920 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1921 }
1922 
1923 /**
1924  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1925  * @priv: private structure
1926  * @dma_conf: structure to take the dma data
1927  * @queue: RX queue index
1928  */
1929 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1930 					 struct stmmac_dma_conf *dma_conf,
1931 					 u32 queue)
1932 {
1933 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1934 
1935 	/* Release the DMA RX socket buffers */
1936 	if (rx_q->xsk_pool)
1937 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1938 	else
1939 		dma_free_rx_skbufs(priv, dma_conf, queue);
1940 
1941 	rx_q->buf_alloc_num = 0;
1942 	rx_q->xsk_pool = NULL;
1943 
1944 	/* Free DMA regions of consistent memory previously allocated */
1945 	if (!priv->extend_desc)
1946 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1947 				  sizeof(struct dma_desc),
1948 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1949 	else
1950 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1951 				  sizeof(struct dma_extended_desc),
1952 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1953 
1954 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1955 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1956 
1957 	kfree(rx_q->buf_pool);
1958 	if (rx_q->page_pool)
1959 		page_pool_destroy(rx_q->page_pool);
1960 }
1961 
1962 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1963 				       struct stmmac_dma_conf *dma_conf)
1964 {
1965 	u32 rx_count = priv->plat->rx_queues_to_use;
1966 	u32 queue;
1967 
1968 	/* Free RX queue resources */
1969 	for (queue = 0; queue < rx_count; queue++)
1970 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1971 }
1972 
1973 /**
1974  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1975  * @priv: private structure
1976  * @dma_conf: structure to take the dma data
1977  * @queue: TX queue index
1978  */
1979 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1980 					 struct stmmac_dma_conf *dma_conf,
1981 					 u32 queue)
1982 {
1983 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1984 	size_t size;
1985 	void *addr;
1986 
1987 	/* Release the DMA TX socket buffers */
1988 	dma_free_tx_skbufs(priv, dma_conf, queue);
1989 
1990 	if (priv->extend_desc) {
1991 		size = sizeof(struct dma_extended_desc);
1992 		addr = tx_q->dma_etx;
1993 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1994 		size = sizeof(struct dma_edesc);
1995 		addr = tx_q->dma_entx;
1996 	} else {
1997 		size = sizeof(struct dma_desc);
1998 		addr = tx_q->dma_tx;
1999 	}
2000 
2001 	size *= dma_conf->dma_tx_size;
2002 
2003 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2004 
2005 	kfree(tx_q->tx_skbuff_dma);
2006 	kfree(tx_q->tx_skbuff);
2007 }
2008 
2009 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2010 				       struct stmmac_dma_conf *dma_conf)
2011 {
2012 	u32 tx_count = priv->plat->tx_queues_to_use;
2013 	u32 queue;
2014 
2015 	/* Free TX queue resources */
2016 	for (queue = 0; queue < tx_count; queue++)
2017 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2018 }
2019 
2020 /**
2021  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2022  * @priv: private structure
2023  * @dma_conf: structure to take the dma data
2024  * @queue: RX queue index
2025  * Description: according to which descriptor can be used (extend or basic)
2026  * this function allocates the resources for TX and RX paths. In case of
2027  * reception, for example, it pre-allocated the RX socket buffer in order to
2028  * allow zero-copy mechanism.
2029  */
2030 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2031 					 struct stmmac_dma_conf *dma_conf,
2032 					 u32 queue)
2033 {
2034 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2035 	struct stmmac_channel *ch = &priv->channel[queue];
2036 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2037 	struct page_pool_params pp_params = { 0 };
2038 	unsigned int dma_buf_sz_pad, num_pages;
2039 	unsigned int napi_id;
2040 	int ret;
2041 
2042 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2043 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2044 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2045 
2046 	rx_q->queue_index = queue;
2047 	rx_q->priv_data = priv;
2048 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2049 
2050 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2051 	pp_params.pool_size = dma_conf->dma_rx_size;
2052 	pp_params.order = order_base_2(num_pages);
2053 	pp_params.nid = dev_to_node(priv->device);
2054 	pp_params.dev = priv->device;
2055 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2056 	pp_params.offset = stmmac_rx_offset(priv);
2057 	pp_params.max_len = dma_conf->dma_buf_sz;
2058 
2059 	if (priv->sph) {
2060 		pp_params.offset = 0;
2061 		pp_params.max_len += stmmac_rx_offset(priv);
2062 	}
2063 
2064 	rx_q->page_pool = page_pool_create(&pp_params);
2065 	if (IS_ERR(rx_q->page_pool)) {
2066 		ret = PTR_ERR(rx_q->page_pool);
2067 		rx_q->page_pool = NULL;
2068 		return ret;
2069 	}
2070 
2071 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2072 				 sizeof(*rx_q->buf_pool),
2073 				 GFP_KERNEL);
2074 	if (!rx_q->buf_pool)
2075 		return -ENOMEM;
2076 
2077 	if (priv->extend_desc) {
2078 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2079 						   dma_conf->dma_rx_size *
2080 						   sizeof(struct dma_extended_desc),
2081 						   &rx_q->dma_rx_phy,
2082 						   GFP_KERNEL);
2083 		if (!rx_q->dma_erx)
2084 			return -ENOMEM;
2085 
2086 	} else {
2087 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2088 						  dma_conf->dma_rx_size *
2089 						  sizeof(struct dma_desc),
2090 						  &rx_q->dma_rx_phy,
2091 						  GFP_KERNEL);
2092 		if (!rx_q->dma_rx)
2093 			return -ENOMEM;
2094 	}
2095 
2096 	if (stmmac_xdp_is_enabled(priv) &&
2097 	    test_bit(queue, priv->af_xdp_zc_qps))
2098 		napi_id = ch->rxtx_napi.napi_id;
2099 	else
2100 		napi_id = ch->rx_napi.napi_id;
2101 
2102 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2103 			       rx_q->queue_index,
2104 			       napi_id);
2105 	if (ret) {
2106 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2107 		return -EINVAL;
2108 	}
2109 
2110 	return 0;
2111 }
2112 
2113 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2114 				       struct stmmac_dma_conf *dma_conf)
2115 {
2116 	u32 rx_count = priv->plat->rx_queues_to_use;
2117 	u32 queue;
2118 	int ret;
2119 
2120 	/* RX queues buffers and DMA */
2121 	for (queue = 0; queue < rx_count; queue++) {
2122 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2123 		if (ret)
2124 			goto err_dma;
2125 	}
2126 
2127 	return 0;
2128 
2129 err_dma:
2130 	free_dma_rx_desc_resources(priv, dma_conf);
2131 
2132 	return ret;
2133 }
2134 
2135 /**
2136  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2137  * @priv: private structure
2138  * @dma_conf: structure to take the dma data
2139  * @queue: TX queue index
2140  * Description: according to which descriptor can be used (extend or basic)
2141  * this function allocates the resources for TX and RX paths. In case of
2142  * reception, for example, it pre-allocated the RX socket buffer in order to
2143  * allow zero-copy mechanism.
2144  */
2145 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2146 					 struct stmmac_dma_conf *dma_conf,
2147 					 u32 queue)
2148 {
2149 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2150 	size_t size;
2151 	void *addr;
2152 
2153 	tx_q->queue_index = queue;
2154 	tx_q->priv_data = priv;
2155 
2156 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2157 				      sizeof(*tx_q->tx_skbuff_dma),
2158 				      GFP_KERNEL);
2159 	if (!tx_q->tx_skbuff_dma)
2160 		return -ENOMEM;
2161 
2162 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2163 				  sizeof(struct sk_buff *),
2164 				  GFP_KERNEL);
2165 	if (!tx_q->tx_skbuff)
2166 		return -ENOMEM;
2167 
2168 	if (priv->extend_desc)
2169 		size = sizeof(struct dma_extended_desc);
2170 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2171 		size = sizeof(struct dma_edesc);
2172 	else
2173 		size = sizeof(struct dma_desc);
2174 
2175 	size *= dma_conf->dma_tx_size;
2176 
2177 	addr = dma_alloc_coherent(priv->device, size,
2178 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2179 	if (!addr)
2180 		return -ENOMEM;
2181 
2182 	if (priv->extend_desc)
2183 		tx_q->dma_etx = addr;
2184 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2185 		tx_q->dma_entx = addr;
2186 	else
2187 		tx_q->dma_tx = addr;
2188 
2189 	return 0;
2190 }
2191 
2192 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2193 				       struct stmmac_dma_conf *dma_conf)
2194 {
2195 	u32 tx_count = priv->plat->tx_queues_to_use;
2196 	u32 queue;
2197 	int ret;
2198 
2199 	/* TX queues buffers and DMA */
2200 	for (queue = 0; queue < tx_count; queue++) {
2201 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2202 		if (ret)
2203 			goto err_dma;
2204 	}
2205 
2206 	return 0;
2207 
2208 err_dma:
2209 	free_dma_tx_desc_resources(priv, dma_conf);
2210 	return ret;
2211 }
2212 
2213 /**
2214  * alloc_dma_desc_resources - alloc TX/RX resources.
2215  * @priv: private structure
2216  * @dma_conf: structure to take the dma data
2217  * Description: according to which descriptor can be used (extend or basic)
2218  * this function allocates the resources for TX and RX paths. In case of
2219  * reception, for example, it pre-allocated the RX socket buffer in order to
2220  * allow zero-copy mechanism.
2221  */
2222 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2223 				    struct stmmac_dma_conf *dma_conf)
2224 {
2225 	/* RX Allocation */
2226 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2227 
2228 	if (ret)
2229 		return ret;
2230 
2231 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2232 
2233 	return ret;
2234 }
2235 
2236 /**
2237  * free_dma_desc_resources - free dma desc resources
2238  * @priv: private structure
2239  * @dma_conf: structure to take the dma data
2240  */
2241 static void free_dma_desc_resources(struct stmmac_priv *priv,
2242 				    struct stmmac_dma_conf *dma_conf)
2243 {
2244 	/* Release the DMA TX socket buffers */
2245 	free_dma_tx_desc_resources(priv, dma_conf);
2246 
2247 	/* Release the DMA RX socket buffers later
2248 	 * to ensure all pending XDP_TX buffers are returned.
2249 	 */
2250 	free_dma_rx_desc_resources(priv, dma_conf);
2251 }
2252 
2253 /**
2254  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2255  *  @priv: driver private structure
2256  *  Description: It is used for enabling the rx queues in the MAC
2257  */
2258 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2259 {
2260 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2261 	int queue;
2262 	u8 mode;
2263 
2264 	for (queue = 0; queue < rx_queues_count; queue++) {
2265 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2266 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2267 	}
2268 }
2269 
2270 /**
2271  * stmmac_start_rx_dma - start RX DMA channel
2272  * @priv: driver private structure
2273  * @chan: RX channel index
2274  * Description:
2275  * This starts a RX DMA channel
2276  */
2277 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2278 {
2279 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2280 	stmmac_start_rx(priv, priv->ioaddr, chan);
2281 }
2282 
2283 /**
2284  * stmmac_start_tx_dma - start TX DMA channel
2285  * @priv: driver private structure
2286  * @chan: TX channel index
2287  * Description:
2288  * This starts a TX DMA channel
2289  */
2290 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2291 {
2292 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2293 	stmmac_start_tx(priv, priv->ioaddr, chan);
2294 }
2295 
2296 /**
2297  * stmmac_stop_rx_dma - stop RX DMA channel
2298  * @priv: driver private structure
2299  * @chan: RX channel index
2300  * Description:
2301  * This stops a RX DMA channel
2302  */
2303 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2304 {
2305 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2306 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2307 }
2308 
2309 /**
2310  * stmmac_stop_tx_dma - stop TX DMA channel
2311  * @priv: driver private structure
2312  * @chan: TX channel index
2313  * Description:
2314  * This stops a TX DMA channel
2315  */
2316 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2317 {
2318 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2319 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2320 }
2321 
2322 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2323 {
2324 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2325 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2326 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2327 	u32 chan;
2328 
2329 	for (chan = 0; chan < dma_csr_ch; chan++) {
2330 		struct stmmac_channel *ch = &priv->channel[chan];
2331 		unsigned long flags;
2332 
2333 		spin_lock_irqsave(&ch->lock, flags);
2334 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2335 		spin_unlock_irqrestore(&ch->lock, flags);
2336 	}
2337 }
2338 
2339 /**
2340  * stmmac_start_all_dma - start all RX and TX DMA channels
2341  * @priv: driver private structure
2342  * Description:
2343  * This starts all the RX and TX DMA channels
2344  */
2345 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2346 {
2347 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2348 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2349 	u32 chan = 0;
2350 
2351 	for (chan = 0; chan < rx_channels_count; chan++)
2352 		stmmac_start_rx_dma(priv, chan);
2353 
2354 	for (chan = 0; chan < tx_channels_count; chan++)
2355 		stmmac_start_tx_dma(priv, chan);
2356 }
2357 
2358 /**
2359  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2360  * @priv: driver private structure
2361  * Description:
2362  * This stops the RX and TX DMA channels
2363  */
2364 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2365 {
2366 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2367 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2368 	u32 chan = 0;
2369 
2370 	for (chan = 0; chan < rx_channels_count; chan++)
2371 		stmmac_stop_rx_dma(priv, chan);
2372 
2373 	for (chan = 0; chan < tx_channels_count; chan++)
2374 		stmmac_stop_tx_dma(priv, chan);
2375 }
2376 
2377 /**
2378  *  stmmac_dma_operation_mode - HW DMA operation mode
2379  *  @priv: driver private structure
2380  *  Description: it is used for configuring the DMA operation mode register in
2381  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2382  */
2383 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2384 {
2385 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2386 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2387 	int rxfifosz = priv->plat->rx_fifo_size;
2388 	int txfifosz = priv->plat->tx_fifo_size;
2389 	u32 txmode = 0;
2390 	u32 rxmode = 0;
2391 	u32 chan = 0;
2392 	u8 qmode = 0;
2393 
2394 	if (rxfifosz == 0)
2395 		rxfifosz = priv->dma_cap.rx_fifo_size;
2396 	if (txfifosz == 0)
2397 		txfifosz = priv->dma_cap.tx_fifo_size;
2398 
2399 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2400 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2401 		rxfifosz /= rx_channels_count;
2402 		txfifosz /= tx_channels_count;
2403 	}
2404 
2405 	if (priv->plat->force_thresh_dma_mode) {
2406 		txmode = tc;
2407 		rxmode = tc;
2408 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2409 		/*
2410 		 * In case of GMAC, SF mode can be enabled
2411 		 * to perform the TX COE in HW. This depends on:
2412 		 * 1) TX COE if actually supported
2413 		 * 2) There is no bugged Jumbo frame support
2414 		 *    that needs to not insert csum in the TDES.
2415 		 */
2416 		txmode = SF_DMA_MODE;
2417 		rxmode = SF_DMA_MODE;
2418 		priv->xstats.threshold = SF_DMA_MODE;
2419 	} else {
2420 		txmode = tc;
2421 		rxmode = SF_DMA_MODE;
2422 	}
2423 
2424 	/* configure all channels */
2425 	for (chan = 0; chan < rx_channels_count; chan++) {
2426 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2427 		u32 buf_size;
2428 
2429 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2430 
2431 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2432 				rxfifosz, qmode);
2433 
2434 		if (rx_q->xsk_pool) {
2435 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2436 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2437 					      buf_size,
2438 					      chan);
2439 		} else {
2440 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2441 					      priv->dma_conf.dma_buf_sz,
2442 					      chan);
2443 		}
2444 	}
2445 
2446 	for (chan = 0; chan < tx_channels_count; chan++) {
2447 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2448 
2449 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2450 				txfifosz, qmode);
2451 	}
2452 }
2453 
2454 static void stmmac_xsk_request_timestamp(void *_priv)
2455 {
2456 	struct stmmac_metadata_request *meta_req = _priv;
2457 
2458 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2459 	*meta_req->set_ic = true;
2460 }
2461 
2462 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2463 {
2464 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2465 	struct stmmac_priv *priv = tx_compl->priv;
2466 	struct dma_desc *desc = tx_compl->desc;
2467 	bool found = false;
2468 	u64 ns = 0;
2469 
2470 	if (!priv->hwts_tx_en)
2471 		return 0;
2472 
2473 	/* check tx tstamp status */
2474 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2475 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2476 		found = true;
2477 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2478 		found = true;
2479 	}
2480 
2481 	if (found) {
2482 		ns -= priv->plat->cdc_error_adj;
2483 		return ns_to_ktime(ns);
2484 	}
2485 
2486 	return 0;
2487 }
2488 
2489 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2490 {
2491 	struct timespec64 ts = ns_to_timespec64(launch_time);
2492 	struct stmmac_metadata_request *meta_req = _priv;
2493 
2494 	if (meta_req->tbs & STMMAC_TBS_EN)
2495 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2496 				    ts.tv_nsec);
2497 }
2498 
2499 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2500 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2501 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2502 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2503 };
2504 
2505 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2506 {
2507 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2508 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2509 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2510 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2511 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2512 	unsigned int entry = tx_q->cur_tx;
2513 	struct dma_desc *tx_desc = NULL;
2514 	struct xdp_desc xdp_desc;
2515 	bool work_done = true;
2516 	u32 tx_set_ic_bit = 0;
2517 
2518 	/* Avoids TX time-out as we are sharing with slow path */
2519 	txq_trans_cond_update(nq);
2520 
2521 	budget = min(budget, stmmac_tx_avail(priv, queue));
2522 
2523 	for (; budget > 0; budget--) {
2524 		struct stmmac_metadata_request meta_req;
2525 		struct xsk_tx_metadata *meta = NULL;
2526 		dma_addr_t dma_addr;
2527 		bool set_ic;
2528 
2529 		/* We are sharing with slow path and stop XSK TX desc submission when
2530 		 * available TX ring is less than threshold.
2531 		 */
2532 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2533 		    !netif_carrier_ok(priv->dev)) {
2534 			work_done = false;
2535 			break;
2536 		}
2537 
2538 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2539 			break;
2540 
2541 		if (priv->est && priv->est->enable &&
2542 		    priv->est->max_sdu[queue] &&
2543 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2544 			priv->xstats.max_sdu_txq_drop[queue]++;
2545 			continue;
2546 		}
2547 
2548 		if (likely(priv->extend_desc))
2549 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2550 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2551 			tx_desc = &tx_q->dma_entx[entry].basic;
2552 		else
2553 			tx_desc = tx_q->dma_tx + entry;
2554 
2555 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2556 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2557 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2558 
2559 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2560 
2561 		/* To return XDP buffer to XSK pool, we simple call
2562 		 * xsk_tx_completed(), so we don't need to fill up
2563 		 * 'buf' and 'xdpf'.
2564 		 */
2565 		tx_q->tx_skbuff_dma[entry].buf = 0;
2566 		tx_q->xdpf[entry] = NULL;
2567 
2568 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2569 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2570 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2571 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2572 
2573 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2574 
2575 		tx_q->tx_count_frames++;
2576 
2577 		if (!priv->tx_coal_frames[queue])
2578 			set_ic = false;
2579 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2580 			set_ic = true;
2581 		else
2582 			set_ic = false;
2583 
2584 		meta_req.priv = priv;
2585 		meta_req.tx_desc = tx_desc;
2586 		meta_req.set_ic = &set_ic;
2587 		meta_req.tbs = tx_q->tbs;
2588 		meta_req.edesc = &tx_q->dma_entx[entry];
2589 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2590 					&meta_req);
2591 		if (set_ic) {
2592 			tx_q->tx_count_frames = 0;
2593 			stmmac_set_tx_ic(priv, tx_desc);
2594 			tx_set_ic_bit++;
2595 		}
2596 
2597 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2598 				       csum, priv->mode, true, true,
2599 				       xdp_desc.len);
2600 
2601 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2602 
2603 		xsk_tx_metadata_to_compl(meta,
2604 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2605 
2606 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2607 		entry = tx_q->cur_tx;
2608 	}
2609 	u64_stats_update_begin(&txq_stats->napi_syncp);
2610 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2611 	u64_stats_update_end(&txq_stats->napi_syncp);
2612 
2613 	if (tx_desc) {
2614 		stmmac_flush_tx_descriptors(priv, queue);
2615 		xsk_tx_release(pool);
2616 	}
2617 
2618 	/* Return true if all of the 3 conditions are met
2619 	 *  a) TX Budget is still available
2620 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2621 	 *     pending XSK TX for transmission)
2622 	 */
2623 	return !!budget && work_done;
2624 }
2625 
2626 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2627 {
2628 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2629 		tc += 64;
2630 
2631 		if (priv->plat->force_thresh_dma_mode)
2632 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2633 		else
2634 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2635 						      chan);
2636 
2637 		priv->xstats.threshold = tc;
2638 	}
2639 }
2640 
2641 /**
2642  * stmmac_tx_clean - to manage the transmission completion
2643  * @priv: driver private structure
2644  * @budget: napi budget limiting this functions packet handling
2645  * @queue: TX queue index
2646  * @pending_packets: signal to arm the TX coal timer
2647  * Description: it reclaims the transmit resources after transmission completes.
2648  * If some packets still needs to be handled, due to TX coalesce, set
2649  * pending_packets to true to make NAPI arm the TX coal timer.
2650  */
2651 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2652 			   bool *pending_packets)
2653 {
2654 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2655 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2656 	unsigned int bytes_compl = 0, pkts_compl = 0;
2657 	unsigned int entry, xmits = 0, count = 0;
2658 	u32 tx_packets = 0, tx_errors = 0;
2659 
2660 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2661 
2662 	tx_q->xsk_frames_done = 0;
2663 
2664 	entry = tx_q->dirty_tx;
2665 
2666 	/* Try to clean all TX complete frame in 1 shot */
2667 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2668 		struct xdp_frame *xdpf;
2669 		struct sk_buff *skb;
2670 		struct dma_desc *p;
2671 		int status;
2672 
2673 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2674 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2675 			xdpf = tx_q->xdpf[entry];
2676 			skb = NULL;
2677 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2678 			xdpf = NULL;
2679 			skb = tx_q->tx_skbuff[entry];
2680 		} else {
2681 			xdpf = NULL;
2682 			skb = NULL;
2683 		}
2684 
2685 		if (priv->extend_desc)
2686 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2687 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2688 			p = &tx_q->dma_entx[entry].basic;
2689 		else
2690 			p = tx_q->dma_tx + entry;
2691 
2692 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2693 		/* Check if the descriptor is owned by the DMA */
2694 		if (unlikely(status & tx_dma_own))
2695 			break;
2696 
2697 		count++;
2698 
2699 		/* Make sure descriptor fields are read after reading
2700 		 * the own bit.
2701 		 */
2702 		dma_rmb();
2703 
2704 		/* Just consider the last segment and ...*/
2705 		if (likely(!(status & tx_not_ls))) {
2706 			/* ... verify the status error condition */
2707 			if (unlikely(status & tx_err)) {
2708 				tx_errors++;
2709 				if (unlikely(status & tx_err_bump_tc))
2710 					stmmac_bump_dma_threshold(priv, queue);
2711 			} else {
2712 				tx_packets++;
2713 			}
2714 			if (skb) {
2715 				stmmac_get_tx_hwtstamp(priv, p, skb);
2716 			} else if (tx_q->xsk_pool &&
2717 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2718 				struct stmmac_xsk_tx_complete tx_compl = {
2719 					.priv = priv,
2720 					.desc = p,
2721 				};
2722 
2723 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2724 							 &stmmac_xsk_tx_metadata_ops,
2725 							 &tx_compl);
2726 			}
2727 		}
2728 
2729 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2730 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2731 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2732 				dma_unmap_page(priv->device,
2733 					       tx_q->tx_skbuff_dma[entry].buf,
2734 					       tx_q->tx_skbuff_dma[entry].len,
2735 					       DMA_TO_DEVICE);
2736 			else
2737 				dma_unmap_single(priv->device,
2738 						 tx_q->tx_skbuff_dma[entry].buf,
2739 						 tx_q->tx_skbuff_dma[entry].len,
2740 						 DMA_TO_DEVICE);
2741 			tx_q->tx_skbuff_dma[entry].buf = 0;
2742 			tx_q->tx_skbuff_dma[entry].len = 0;
2743 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2744 		}
2745 
2746 		stmmac_clean_desc3(priv, tx_q, p);
2747 
2748 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2749 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2750 
2751 		if (xdpf &&
2752 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2753 			xdp_return_frame_rx_napi(xdpf);
2754 			tx_q->xdpf[entry] = NULL;
2755 		}
2756 
2757 		if (xdpf &&
2758 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2759 			xdp_return_frame(xdpf);
2760 			tx_q->xdpf[entry] = NULL;
2761 		}
2762 
2763 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2764 			tx_q->xsk_frames_done++;
2765 
2766 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2767 			if (likely(skb)) {
2768 				pkts_compl++;
2769 				bytes_compl += skb->len;
2770 				dev_consume_skb_any(skb);
2771 				tx_q->tx_skbuff[entry] = NULL;
2772 			}
2773 		}
2774 
2775 		stmmac_release_tx_desc(priv, p, priv->mode);
2776 
2777 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2778 	}
2779 	tx_q->dirty_tx = entry;
2780 
2781 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2782 				  pkts_compl, bytes_compl);
2783 
2784 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2785 								queue))) &&
2786 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2787 
2788 		netif_dbg(priv, tx_done, priv->dev,
2789 			  "%s: restart transmit\n", __func__);
2790 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2791 	}
2792 
2793 	if (tx_q->xsk_pool) {
2794 		bool work_done;
2795 
2796 		if (tx_q->xsk_frames_done)
2797 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2798 
2799 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2800 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2801 
2802 		/* For XSK TX, we try to send as many as possible.
2803 		 * If XSK work done (XSK TX desc empty and budget still
2804 		 * available), return "budget - 1" to reenable TX IRQ.
2805 		 * Else, return "budget" to make NAPI continue polling.
2806 		 */
2807 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2808 					       STMMAC_XSK_TX_BUDGET_MAX);
2809 		if (work_done)
2810 			xmits = budget - 1;
2811 		else
2812 			xmits = budget;
2813 	}
2814 
2815 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2816 		stmmac_restart_sw_lpi_timer(priv);
2817 
2818 	/* We still have pending packets, let's call for a new scheduling */
2819 	if (tx_q->dirty_tx != tx_q->cur_tx)
2820 		*pending_packets = true;
2821 
2822 	u64_stats_update_begin(&txq_stats->napi_syncp);
2823 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2824 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2825 	u64_stats_inc(&txq_stats->napi.tx_clean);
2826 	u64_stats_update_end(&txq_stats->napi_syncp);
2827 
2828 	priv->xstats.tx_errors += tx_errors;
2829 
2830 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2831 
2832 	/* Combine decisions from TX clean and XSK TX */
2833 	return max(count, xmits);
2834 }
2835 
2836 /**
2837  * stmmac_tx_err - to manage the tx error
2838  * @priv: driver private structure
2839  * @chan: channel index
2840  * Description: it cleans the descriptors and restarts the transmission
2841  * in case of transmission errors.
2842  */
2843 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2844 {
2845 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2846 
2847 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2848 
2849 	stmmac_stop_tx_dma(priv, chan);
2850 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2851 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2852 	stmmac_reset_tx_queue(priv, chan);
2853 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2854 			    tx_q->dma_tx_phy, chan);
2855 	stmmac_start_tx_dma(priv, chan);
2856 
2857 	priv->xstats.tx_errors++;
2858 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2859 }
2860 
2861 /**
2862  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2863  *  @priv: driver private structure
2864  *  @txmode: TX operating mode
2865  *  @rxmode: RX operating mode
2866  *  @chan: channel index
2867  *  Description: it is used for configuring of the DMA operation mode in
2868  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2869  *  mode.
2870  */
2871 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2872 					  u32 rxmode, u32 chan)
2873 {
2874 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2875 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2876 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2877 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2878 	int rxfifosz = priv->plat->rx_fifo_size;
2879 	int txfifosz = priv->plat->tx_fifo_size;
2880 
2881 	if (rxfifosz == 0)
2882 		rxfifosz = priv->dma_cap.rx_fifo_size;
2883 	if (txfifosz == 0)
2884 		txfifosz = priv->dma_cap.tx_fifo_size;
2885 
2886 	/* Adjust for real per queue fifo size */
2887 	rxfifosz /= rx_channels_count;
2888 	txfifosz /= tx_channels_count;
2889 
2890 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2891 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2892 }
2893 
2894 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2895 {
2896 	int ret;
2897 
2898 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2899 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2900 	if (ret && (ret != -EINVAL)) {
2901 		stmmac_global_err(priv);
2902 		return true;
2903 	}
2904 
2905 	return false;
2906 }
2907 
2908 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2909 {
2910 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2911 						 &priv->xstats, chan, dir);
2912 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2913 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2914 	struct stmmac_channel *ch = &priv->channel[chan];
2915 	struct napi_struct *rx_napi;
2916 	struct napi_struct *tx_napi;
2917 	unsigned long flags;
2918 
2919 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2920 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2921 
2922 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2923 		if (napi_schedule_prep(rx_napi)) {
2924 			spin_lock_irqsave(&ch->lock, flags);
2925 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2926 			spin_unlock_irqrestore(&ch->lock, flags);
2927 			__napi_schedule(rx_napi);
2928 		}
2929 	}
2930 
2931 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2932 		if (napi_schedule_prep(tx_napi)) {
2933 			spin_lock_irqsave(&ch->lock, flags);
2934 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2935 			spin_unlock_irqrestore(&ch->lock, flags);
2936 			__napi_schedule(tx_napi);
2937 		}
2938 	}
2939 
2940 	return status;
2941 }
2942 
2943 /**
2944  * stmmac_dma_interrupt - DMA ISR
2945  * @priv: driver private structure
2946  * Description: this is the DMA ISR. It is called by the main ISR.
2947  * It calls the dwmac dma routine and schedule poll method in case of some
2948  * work can be done.
2949  */
2950 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2951 {
2952 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2953 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2954 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2955 				tx_channel_count : rx_channel_count;
2956 	u32 chan;
2957 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2958 
2959 	/* Make sure we never check beyond our status buffer. */
2960 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2961 		channels_to_check = ARRAY_SIZE(status);
2962 
2963 	for (chan = 0; chan < channels_to_check; chan++)
2964 		status[chan] = stmmac_napi_check(priv, chan,
2965 						 DMA_DIR_RXTX);
2966 
2967 	for (chan = 0; chan < tx_channel_count; chan++) {
2968 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2969 			/* Try to bump up the dma threshold on this failure */
2970 			stmmac_bump_dma_threshold(priv, chan);
2971 		} else if (unlikely(status[chan] == tx_hard_error)) {
2972 			stmmac_tx_err(priv, chan);
2973 		}
2974 	}
2975 }
2976 
2977 /**
2978  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2979  * @priv: driver private structure
2980  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2981  */
2982 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2983 {
2984 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2985 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2986 
2987 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2988 
2989 	if (priv->dma_cap.rmon) {
2990 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2991 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2992 	} else
2993 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2994 }
2995 
2996 /**
2997  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2998  * @priv: driver private structure
2999  * Description:
3000  *  new GMAC chip generations have a new register to indicate the
3001  *  presence of the optional feature/functions.
3002  *  This can be also used to override the value passed through the
3003  *  platform and necessary for old MAC10/100 and GMAC chips.
3004  */
3005 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3006 {
3007 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3008 }
3009 
3010 /**
3011  * stmmac_check_ether_addr - check if the MAC addr is valid
3012  * @priv: driver private structure
3013  * Description:
3014  * it is to verify if the MAC address is valid, in case of failures it
3015  * generates a random MAC address
3016  */
3017 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3018 {
3019 	u8 addr[ETH_ALEN];
3020 
3021 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3022 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3023 		if (is_valid_ether_addr(addr))
3024 			eth_hw_addr_set(priv->dev, addr);
3025 		else
3026 			eth_hw_addr_random(priv->dev);
3027 		dev_info(priv->device, "device MAC address %pM\n",
3028 			 priv->dev->dev_addr);
3029 	}
3030 }
3031 
3032 /**
3033  * stmmac_init_dma_engine - DMA init.
3034  * @priv: driver private structure
3035  * Description:
3036  * It inits the DMA invoking the specific MAC/GMAC callback.
3037  * Some DMA parameters can be passed from the platform;
3038  * in case of these are not passed a default is kept for the MAC or GMAC.
3039  */
3040 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3041 {
3042 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3043 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3044 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3045 	struct stmmac_rx_queue *rx_q;
3046 	struct stmmac_tx_queue *tx_q;
3047 	u32 chan = 0;
3048 	int ret = 0;
3049 
3050 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3051 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3052 		return -EINVAL;
3053 	}
3054 
3055 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3056 		priv->plat->dma_cfg->atds = 1;
3057 
3058 	ret = stmmac_reset(priv, priv->ioaddr);
3059 	if (ret) {
3060 		netdev_err(priv->dev, "Failed to reset the dma\n");
3061 		return ret;
3062 	}
3063 
3064 	/* DMA Configuration */
3065 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3066 
3067 	if (priv->plat->axi)
3068 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3069 
3070 	/* DMA CSR Channel configuration */
3071 	for (chan = 0; chan < dma_csr_ch; chan++) {
3072 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3073 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3074 	}
3075 
3076 	/* DMA RX Channel Configuration */
3077 	for (chan = 0; chan < rx_channels_count; chan++) {
3078 		rx_q = &priv->dma_conf.rx_queue[chan];
3079 
3080 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3081 				    rx_q->dma_rx_phy, chan);
3082 
3083 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3084 				     (rx_q->buf_alloc_num *
3085 				      sizeof(struct dma_desc));
3086 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3087 				       rx_q->rx_tail_addr, chan);
3088 	}
3089 
3090 	/* DMA TX Channel Configuration */
3091 	for (chan = 0; chan < tx_channels_count; chan++) {
3092 		tx_q = &priv->dma_conf.tx_queue[chan];
3093 
3094 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3095 				    tx_q->dma_tx_phy, chan);
3096 
3097 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3098 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3099 				       tx_q->tx_tail_addr, chan);
3100 	}
3101 
3102 	return ret;
3103 }
3104 
3105 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3106 {
3107 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3108 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3109 	struct stmmac_channel *ch;
3110 	struct napi_struct *napi;
3111 
3112 	if (!tx_coal_timer)
3113 		return;
3114 
3115 	ch = &priv->channel[tx_q->queue_index];
3116 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3117 
3118 	/* Arm timer only if napi is not already scheduled.
3119 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3120 	 * again in the next scheduled napi.
3121 	 */
3122 	if (unlikely(!napi_is_scheduled(napi)))
3123 		hrtimer_start(&tx_q->txtimer,
3124 			      STMMAC_COAL_TIMER(tx_coal_timer),
3125 			      HRTIMER_MODE_REL);
3126 	else
3127 		hrtimer_try_to_cancel(&tx_q->txtimer);
3128 }
3129 
3130 /**
3131  * stmmac_tx_timer - mitigation sw timer for tx.
3132  * @t: data pointer
3133  * Description:
3134  * This is the timer handler to directly invoke the stmmac_tx_clean.
3135  */
3136 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3137 {
3138 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3139 	struct stmmac_priv *priv = tx_q->priv_data;
3140 	struct stmmac_channel *ch;
3141 	struct napi_struct *napi;
3142 
3143 	ch = &priv->channel[tx_q->queue_index];
3144 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3145 
3146 	if (likely(napi_schedule_prep(napi))) {
3147 		unsigned long flags;
3148 
3149 		spin_lock_irqsave(&ch->lock, flags);
3150 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3151 		spin_unlock_irqrestore(&ch->lock, flags);
3152 		__napi_schedule(napi);
3153 	}
3154 
3155 	return HRTIMER_NORESTART;
3156 }
3157 
3158 /**
3159  * stmmac_init_coalesce - init mitigation options.
3160  * @priv: driver private structure
3161  * Description:
3162  * This inits the coalesce parameters: i.e. timer rate,
3163  * timer handler and default threshold used for enabling the
3164  * interrupt on completion bit.
3165  */
3166 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3167 {
3168 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3169 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3170 	u32 chan;
3171 
3172 	for (chan = 0; chan < tx_channel_count; chan++) {
3173 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3174 
3175 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3176 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3177 
3178 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3179 	}
3180 
3181 	for (chan = 0; chan < rx_channel_count; chan++)
3182 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3183 }
3184 
3185 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3186 {
3187 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3188 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3189 	u32 chan;
3190 
3191 	/* set TX ring length */
3192 	for (chan = 0; chan < tx_channels_count; chan++)
3193 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3194 				       (priv->dma_conf.dma_tx_size - 1), chan);
3195 
3196 	/* set RX ring length */
3197 	for (chan = 0; chan < rx_channels_count; chan++)
3198 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3199 				       (priv->dma_conf.dma_rx_size - 1), chan);
3200 }
3201 
3202 /**
3203  *  stmmac_set_tx_queue_weight - Set TX queue weight
3204  *  @priv: driver private structure
3205  *  Description: It is used for setting TX queues weight
3206  */
3207 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3208 {
3209 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3210 	u32 weight;
3211 	u32 queue;
3212 
3213 	for (queue = 0; queue < tx_queues_count; queue++) {
3214 		weight = priv->plat->tx_queues_cfg[queue].weight;
3215 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3216 	}
3217 }
3218 
3219 /**
3220  *  stmmac_configure_cbs - Configure CBS in TX queue
3221  *  @priv: driver private structure
3222  *  Description: It is used for configuring CBS in AVB TX queues
3223  */
3224 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3225 {
3226 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3227 	u32 mode_to_use;
3228 	u32 queue;
3229 
3230 	/* queue 0 is reserved for legacy traffic */
3231 	for (queue = 1; queue < tx_queues_count; queue++) {
3232 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3233 		if (mode_to_use == MTL_QUEUE_DCB)
3234 			continue;
3235 
3236 		stmmac_config_cbs(priv, priv->hw,
3237 				priv->plat->tx_queues_cfg[queue].send_slope,
3238 				priv->plat->tx_queues_cfg[queue].idle_slope,
3239 				priv->plat->tx_queues_cfg[queue].high_credit,
3240 				priv->plat->tx_queues_cfg[queue].low_credit,
3241 				queue);
3242 	}
3243 }
3244 
3245 /**
3246  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3247  *  @priv: driver private structure
3248  *  Description: It is used for mapping RX queues to RX dma channels
3249  */
3250 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3251 {
3252 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3253 	u32 queue;
3254 	u32 chan;
3255 
3256 	for (queue = 0; queue < rx_queues_count; queue++) {
3257 		chan = priv->plat->rx_queues_cfg[queue].chan;
3258 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3259 	}
3260 }
3261 
3262 /**
3263  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3264  *  @priv: driver private structure
3265  *  Description: It is used for configuring the RX Queue Priority
3266  */
3267 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3268 {
3269 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3270 	u32 queue;
3271 	u32 prio;
3272 
3273 	for (queue = 0; queue < rx_queues_count; queue++) {
3274 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3275 			continue;
3276 
3277 		prio = priv->plat->rx_queues_cfg[queue].prio;
3278 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3279 	}
3280 }
3281 
3282 /**
3283  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3284  *  @priv: driver private structure
3285  *  Description: It is used for configuring the TX Queue Priority
3286  */
3287 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3288 {
3289 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3290 	u32 queue;
3291 	u32 prio;
3292 
3293 	for (queue = 0; queue < tx_queues_count; queue++) {
3294 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3295 			continue;
3296 
3297 		prio = priv->plat->tx_queues_cfg[queue].prio;
3298 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3299 	}
3300 }
3301 
3302 /**
3303  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3304  *  @priv: driver private structure
3305  *  Description: It is used for configuring the RX queue routing
3306  */
3307 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3308 {
3309 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3310 	u32 queue;
3311 	u8 packet;
3312 
3313 	for (queue = 0; queue < rx_queues_count; queue++) {
3314 		/* no specific packet type routing specified for the queue */
3315 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3316 			continue;
3317 
3318 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3319 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3320 	}
3321 }
3322 
3323 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3324 {
3325 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3326 		priv->rss.enable = false;
3327 		return;
3328 	}
3329 
3330 	if (priv->dev->features & NETIF_F_RXHASH)
3331 		priv->rss.enable = true;
3332 	else
3333 		priv->rss.enable = false;
3334 
3335 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3336 			     priv->plat->rx_queues_to_use);
3337 }
3338 
3339 /**
3340  *  stmmac_mtl_configuration - Configure MTL
3341  *  @priv: driver private structure
3342  *  Description: It is used for configurring MTL
3343  */
3344 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3345 {
3346 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3347 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3348 
3349 	if (tx_queues_count > 1)
3350 		stmmac_set_tx_queue_weight(priv);
3351 
3352 	/* Configure MTL RX algorithms */
3353 	if (rx_queues_count > 1)
3354 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3355 				priv->plat->rx_sched_algorithm);
3356 
3357 	/* Configure MTL TX algorithms */
3358 	if (tx_queues_count > 1)
3359 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3360 				priv->plat->tx_sched_algorithm);
3361 
3362 	/* Configure CBS in AVB TX queues */
3363 	if (tx_queues_count > 1)
3364 		stmmac_configure_cbs(priv);
3365 
3366 	/* Map RX MTL to DMA channels */
3367 	stmmac_rx_queue_dma_chan_map(priv);
3368 
3369 	/* Enable MAC RX Queues */
3370 	stmmac_mac_enable_rx_queues(priv);
3371 
3372 	/* Set RX priorities */
3373 	if (rx_queues_count > 1)
3374 		stmmac_mac_config_rx_queues_prio(priv);
3375 
3376 	/* Set TX priorities */
3377 	if (tx_queues_count > 1)
3378 		stmmac_mac_config_tx_queues_prio(priv);
3379 
3380 	/* Set RX routing */
3381 	if (rx_queues_count > 1)
3382 		stmmac_mac_config_rx_queues_routing(priv);
3383 
3384 	/* Receive Side Scaling */
3385 	if (rx_queues_count > 1)
3386 		stmmac_mac_config_rss(priv);
3387 }
3388 
3389 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3390 {
3391 	if (priv->dma_cap.asp) {
3392 		netdev_info(priv->dev, "Enabling Safety Features\n");
3393 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3394 					  priv->plat->safety_feat_cfg);
3395 	} else {
3396 		netdev_info(priv->dev, "No Safety Features support found\n");
3397 	}
3398 }
3399 
3400 /**
3401  * stmmac_hw_setup - setup mac in a usable state.
3402  *  @dev : pointer to the device structure.
3403  *  @ptp_register: register PTP if set
3404  *  Description:
3405  *  this is the main function to setup the HW in a usable state because the
3406  *  dma engine is reset, the core registers are configured (e.g. AXI,
3407  *  Checksum features, timers). The DMA is ready to start receiving and
3408  *  transmitting.
3409  *  Return value:
3410  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3411  *  file on failure.
3412  */
3413 static int stmmac_hw_setup(struct net_device *dev)
3414 {
3415 	struct stmmac_priv *priv = netdev_priv(dev);
3416 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3417 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3418 	bool sph_en;
3419 	u32 chan;
3420 	int ret;
3421 
3422 	/* Make sure RX clock is enabled */
3423 	if (priv->hw->phylink_pcs)
3424 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3425 
3426 	/* Note that clk_rx_i must be running for reset to complete. This
3427 	 * clock may also be required when setting the MAC address.
3428 	 *
3429 	 * Block the receive clock stop for LPI mode at the PHY in case
3430 	 * the link is established with EEE mode active.
3431 	 */
3432 	phylink_rx_clk_stop_block(priv->phylink);
3433 
3434 	/* DMA initialization and SW reset */
3435 	ret = stmmac_init_dma_engine(priv);
3436 	if (ret < 0) {
3437 		phylink_rx_clk_stop_unblock(priv->phylink);
3438 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3439 			   __func__);
3440 		return ret;
3441 	}
3442 
3443 	/* Copy the MAC addr into the HW  */
3444 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3445 	phylink_rx_clk_stop_unblock(priv->phylink);
3446 
3447 	/* PS and related bits will be programmed according to the speed */
3448 	if (priv->hw->pcs) {
3449 		int speed = priv->plat->mac_port_sel_speed;
3450 
3451 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3452 		    (speed == SPEED_1000)) {
3453 			priv->hw->ps = speed;
3454 		} else {
3455 			dev_warn(priv->device, "invalid port speed\n");
3456 			priv->hw->ps = 0;
3457 		}
3458 	}
3459 
3460 	/* Initialize the MAC Core */
3461 	stmmac_core_init(priv, priv->hw, dev);
3462 
3463 	/* Initialize MTL*/
3464 	stmmac_mtl_configuration(priv);
3465 
3466 	/* Initialize Safety Features */
3467 	stmmac_safety_feat_configuration(priv);
3468 
3469 	ret = stmmac_rx_ipc(priv, priv->hw);
3470 	if (!ret) {
3471 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3472 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3473 		priv->hw->rx_csum = 0;
3474 	}
3475 
3476 	/* Enable the MAC Rx/Tx */
3477 	stmmac_mac_set(priv, priv->ioaddr, true);
3478 
3479 	/* Set the HW DMA mode and the COE */
3480 	stmmac_dma_operation_mode(priv);
3481 
3482 	stmmac_mmc_setup(priv);
3483 
3484 	if (priv->use_riwt) {
3485 		u32 queue;
3486 
3487 		for (queue = 0; queue < rx_cnt; queue++) {
3488 			if (!priv->rx_riwt[queue])
3489 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3490 
3491 			stmmac_rx_watchdog(priv, priv->ioaddr,
3492 					   priv->rx_riwt[queue], queue);
3493 		}
3494 	}
3495 
3496 	if (priv->hw->pcs)
3497 		stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
3498 
3499 	/* set TX and RX rings length */
3500 	stmmac_set_rings_length(priv);
3501 
3502 	/* Enable TSO */
3503 	if (priv->tso) {
3504 		for (chan = 0; chan < tx_cnt; chan++) {
3505 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3506 
3507 			/* TSO and TBS cannot co-exist */
3508 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3509 				continue;
3510 
3511 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3512 		}
3513 	}
3514 
3515 	/* Enable Split Header */
3516 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3517 	for (chan = 0; chan < rx_cnt; chan++)
3518 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3519 
3520 
3521 	/* VLAN Tag Insertion */
3522 	if (priv->dma_cap.vlins)
3523 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3524 
3525 	/* TBS */
3526 	for (chan = 0; chan < tx_cnt; chan++) {
3527 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3528 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3529 
3530 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3531 	}
3532 
3533 	/* Configure real RX and TX queues */
3534 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3535 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3536 
3537 	/* Start the ball rolling... */
3538 	stmmac_start_all_dma(priv);
3539 
3540 	phylink_rx_clk_stop_block(priv->phylink);
3541 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3542 	phylink_rx_clk_stop_unblock(priv->phylink);
3543 
3544 	return 0;
3545 }
3546 
3547 static void stmmac_free_irq(struct net_device *dev,
3548 			    enum request_irq_err irq_err, int irq_idx)
3549 {
3550 	struct stmmac_priv *priv = netdev_priv(dev);
3551 	int j;
3552 
3553 	switch (irq_err) {
3554 	case REQ_IRQ_ERR_ALL:
3555 		irq_idx = priv->plat->tx_queues_to_use;
3556 		fallthrough;
3557 	case REQ_IRQ_ERR_TX:
3558 		for (j = irq_idx - 1; j >= 0; j--) {
3559 			if (priv->tx_irq[j] > 0) {
3560 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3561 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3562 			}
3563 		}
3564 		irq_idx = priv->plat->rx_queues_to_use;
3565 		fallthrough;
3566 	case REQ_IRQ_ERR_RX:
3567 		for (j = irq_idx - 1; j >= 0; j--) {
3568 			if (priv->rx_irq[j] > 0) {
3569 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3570 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3571 			}
3572 		}
3573 
3574 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3575 			free_irq(priv->sfty_ue_irq, dev);
3576 		fallthrough;
3577 	case REQ_IRQ_ERR_SFTY_UE:
3578 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3579 			free_irq(priv->sfty_ce_irq, dev);
3580 		fallthrough;
3581 	case REQ_IRQ_ERR_SFTY_CE:
3582 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3583 			free_irq(priv->lpi_irq, dev);
3584 		fallthrough;
3585 	case REQ_IRQ_ERR_LPI:
3586 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3587 			free_irq(priv->wol_irq, dev);
3588 		fallthrough;
3589 	case REQ_IRQ_ERR_SFTY:
3590 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3591 			free_irq(priv->sfty_irq, dev);
3592 		fallthrough;
3593 	case REQ_IRQ_ERR_WOL:
3594 		free_irq(dev->irq, dev);
3595 		fallthrough;
3596 	case REQ_IRQ_ERR_MAC:
3597 	case REQ_IRQ_ERR_NO:
3598 		/* If MAC IRQ request error, no more IRQ to free */
3599 		break;
3600 	}
3601 }
3602 
3603 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3604 {
3605 	struct stmmac_priv *priv = netdev_priv(dev);
3606 	enum request_irq_err irq_err;
3607 	int irq_idx = 0;
3608 	char *int_name;
3609 	int ret;
3610 	int i;
3611 
3612 	/* For common interrupt */
3613 	int_name = priv->int_name_mac;
3614 	sprintf(int_name, "%s:%s", dev->name, "mac");
3615 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3616 			  0, int_name, dev);
3617 	if (unlikely(ret < 0)) {
3618 		netdev_err(priv->dev,
3619 			   "%s: alloc mac MSI %d (error: %d)\n",
3620 			   __func__, dev->irq, ret);
3621 		irq_err = REQ_IRQ_ERR_MAC;
3622 		goto irq_error;
3623 	}
3624 
3625 	/* Request the Wake IRQ in case of another line
3626 	 * is used for WoL
3627 	 */
3628 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3629 		int_name = priv->int_name_wol;
3630 		sprintf(int_name, "%s:%s", dev->name, "wol");
3631 		ret = request_irq(priv->wol_irq,
3632 				  stmmac_mac_interrupt,
3633 				  0, int_name, dev);
3634 		if (unlikely(ret < 0)) {
3635 			netdev_err(priv->dev,
3636 				   "%s: alloc wol MSI %d (error: %d)\n",
3637 				   __func__, priv->wol_irq, ret);
3638 			irq_err = REQ_IRQ_ERR_WOL;
3639 			goto irq_error;
3640 		}
3641 	}
3642 
3643 	/* Request the LPI IRQ in case of another line
3644 	 * is used for LPI
3645 	 */
3646 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3647 		int_name = priv->int_name_lpi;
3648 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3649 		ret = request_irq(priv->lpi_irq,
3650 				  stmmac_mac_interrupt,
3651 				  0, int_name, dev);
3652 		if (unlikely(ret < 0)) {
3653 			netdev_err(priv->dev,
3654 				   "%s: alloc lpi MSI %d (error: %d)\n",
3655 				   __func__, priv->lpi_irq, ret);
3656 			irq_err = REQ_IRQ_ERR_LPI;
3657 			goto irq_error;
3658 		}
3659 	}
3660 
3661 	/* Request the common Safety Feature Correctible/Uncorrectible
3662 	 * Error line in case of another line is used
3663 	 */
3664 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3665 		int_name = priv->int_name_sfty;
3666 		sprintf(int_name, "%s:%s", dev->name, "safety");
3667 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3668 				  0, int_name, dev);
3669 		if (unlikely(ret < 0)) {
3670 			netdev_err(priv->dev,
3671 				   "%s: alloc sfty MSI %d (error: %d)\n",
3672 				   __func__, priv->sfty_irq, ret);
3673 			irq_err = REQ_IRQ_ERR_SFTY;
3674 			goto irq_error;
3675 		}
3676 	}
3677 
3678 	/* Request the Safety Feature Correctible Error line in
3679 	 * case of another line is used
3680 	 */
3681 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3682 		int_name = priv->int_name_sfty_ce;
3683 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3684 		ret = request_irq(priv->sfty_ce_irq,
3685 				  stmmac_safety_interrupt,
3686 				  0, int_name, dev);
3687 		if (unlikely(ret < 0)) {
3688 			netdev_err(priv->dev,
3689 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3690 				   __func__, priv->sfty_ce_irq, ret);
3691 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3692 			goto irq_error;
3693 		}
3694 	}
3695 
3696 	/* Request the Safety Feature Uncorrectible Error line in
3697 	 * case of another line is used
3698 	 */
3699 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3700 		int_name = priv->int_name_sfty_ue;
3701 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3702 		ret = request_irq(priv->sfty_ue_irq,
3703 				  stmmac_safety_interrupt,
3704 				  0, int_name, dev);
3705 		if (unlikely(ret < 0)) {
3706 			netdev_err(priv->dev,
3707 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3708 				   __func__, priv->sfty_ue_irq, ret);
3709 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3710 			goto irq_error;
3711 		}
3712 	}
3713 
3714 	/* Request Rx MSI irq */
3715 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3716 		if (i >= MTL_MAX_RX_QUEUES)
3717 			break;
3718 		if (priv->rx_irq[i] == 0)
3719 			continue;
3720 
3721 		int_name = priv->int_name_rx_irq[i];
3722 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3723 		ret = request_irq(priv->rx_irq[i],
3724 				  stmmac_msi_intr_rx,
3725 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3726 		if (unlikely(ret < 0)) {
3727 			netdev_err(priv->dev,
3728 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3729 				   __func__, i, priv->rx_irq[i], ret);
3730 			irq_err = REQ_IRQ_ERR_RX;
3731 			irq_idx = i;
3732 			goto irq_error;
3733 		}
3734 		irq_set_affinity_hint(priv->rx_irq[i],
3735 				      cpumask_of(i % num_online_cpus()));
3736 	}
3737 
3738 	/* Request Tx MSI irq */
3739 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3740 		if (i >= MTL_MAX_TX_QUEUES)
3741 			break;
3742 		if (priv->tx_irq[i] == 0)
3743 			continue;
3744 
3745 		int_name = priv->int_name_tx_irq[i];
3746 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3747 		ret = request_irq(priv->tx_irq[i],
3748 				  stmmac_msi_intr_tx,
3749 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3750 		if (unlikely(ret < 0)) {
3751 			netdev_err(priv->dev,
3752 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3753 				   __func__, i, priv->tx_irq[i], ret);
3754 			irq_err = REQ_IRQ_ERR_TX;
3755 			irq_idx = i;
3756 			goto irq_error;
3757 		}
3758 		irq_set_affinity_hint(priv->tx_irq[i],
3759 				      cpumask_of(i % num_online_cpus()));
3760 	}
3761 
3762 	return 0;
3763 
3764 irq_error:
3765 	stmmac_free_irq(dev, irq_err, irq_idx);
3766 	return ret;
3767 }
3768 
3769 static int stmmac_request_irq_single(struct net_device *dev)
3770 {
3771 	struct stmmac_priv *priv = netdev_priv(dev);
3772 	enum request_irq_err irq_err;
3773 	int ret;
3774 
3775 	ret = request_irq(dev->irq, stmmac_interrupt,
3776 			  IRQF_SHARED, dev->name, dev);
3777 	if (unlikely(ret < 0)) {
3778 		netdev_err(priv->dev,
3779 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3780 			   __func__, dev->irq, ret);
3781 		irq_err = REQ_IRQ_ERR_MAC;
3782 		goto irq_error;
3783 	}
3784 
3785 	/* Request the Wake IRQ in case of another line
3786 	 * is used for WoL
3787 	 */
3788 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3789 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3790 				  IRQF_SHARED, dev->name, dev);
3791 		if (unlikely(ret < 0)) {
3792 			netdev_err(priv->dev,
3793 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3794 				   __func__, priv->wol_irq, ret);
3795 			irq_err = REQ_IRQ_ERR_WOL;
3796 			goto irq_error;
3797 		}
3798 	}
3799 
3800 	/* Request the IRQ lines */
3801 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3802 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3803 				  IRQF_SHARED, dev->name, dev);
3804 		if (unlikely(ret < 0)) {
3805 			netdev_err(priv->dev,
3806 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3807 				   __func__, priv->lpi_irq, ret);
3808 			irq_err = REQ_IRQ_ERR_LPI;
3809 			goto irq_error;
3810 		}
3811 	}
3812 
3813 	/* Request the common Safety Feature Correctible/Uncorrectible
3814 	 * Error line in case of another line is used
3815 	 */
3816 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3817 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3818 				  IRQF_SHARED, dev->name, dev);
3819 		if (unlikely(ret < 0)) {
3820 			netdev_err(priv->dev,
3821 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3822 				   __func__, priv->sfty_irq, ret);
3823 			irq_err = REQ_IRQ_ERR_SFTY;
3824 			goto irq_error;
3825 		}
3826 	}
3827 
3828 	return 0;
3829 
3830 irq_error:
3831 	stmmac_free_irq(dev, irq_err, 0);
3832 	return ret;
3833 }
3834 
3835 static int stmmac_request_irq(struct net_device *dev)
3836 {
3837 	struct stmmac_priv *priv = netdev_priv(dev);
3838 	int ret;
3839 
3840 	/* Request the IRQ lines */
3841 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3842 		ret = stmmac_request_irq_multi_msi(dev);
3843 	else
3844 		ret = stmmac_request_irq_single(dev);
3845 
3846 	return ret;
3847 }
3848 
3849 /**
3850  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3851  *  @priv: driver private structure
3852  *  @mtu: MTU to setup the dma queue and buf with
3853  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3854  *  Allocate the Tx/Rx DMA queue and init them.
3855  *  Return value:
3856  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3857  */
3858 static struct stmmac_dma_conf *
3859 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3860 {
3861 	struct stmmac_dma_conf *dma_conf;
3862 	int chan, bfsize, ret;
3863 
3864 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3865 	if (!dma_conf) {
3866 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3867 			   __func__);
3868 		return ERR_PTR(-ENOMEM);
3869 	}
3870 
3871 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3872 	if (bfsize < 0)
3873 		bfsize = 0;
3874 
3875 	if (bfsize < BUF_SIZE_16KiB)
3876 		bfsize = stmmac_set_bfsize(mtu, 0);
3877 
3878 	dma_conf->dma_buf_sz = bfsize;
3879 	/* Chose the tx/rx size from the already defined one in the
3880 	 * priv struct. (if defined)
3881 	 */
3882 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3883 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3884 
3885 	if (!dma_conf->dma_tx_size)
3886 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3887 	if (!dma_conf->dma_rx_size)
3888 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3889 
3890 	/* Earlier check for TBS */
3891 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3892 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3893 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3894 
3895 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3896 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3897 	}
3898 
3899 	ret = alloc_dma_desc_resources(priv, dma_conf);
3900 	if (ret < 0) {
3901 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3902 			   __func__);
3903 		goto alloc_error;
3904 	}
3905 
3906 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3907 	if (ret < 0) {
3908 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3909 			   __func__);
3910 		goto init_error;
3911 	}
3912 
3913 	return dma_conf;
3914 
3915 init_error:
3916 	free_dma_desc_resources(priv, dma_conf);
3917 alloc_error:
3918 	kfree(dma_conf);
3919 	return ERR_PTR(ret);
3920 }
3921 
3922 /**
3923  *  __stmmac_open - open entry point of the driver
3924  *  @dev : pointer to the device structure.
3925  *  @dma_conf :  structure to take the dma data
3926  *  Description:
3927  *  This function is the open entry point of the driver.
3928  *  Return value:
3929  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3930  *  file on failure.
3931  */
3932 static int __stmmac_open(struct net_device *dev,
3933 			 struct stmmac_dma_conf *dma_conf)
3934 {
3935 	struct stmmac_priv *priv = netdev_priv(dev);
3936 	u32 chan;
3937 	int ret;
3938 
3939 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3940 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3941 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3942 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3943 
3944 	stmmac_reset_queues_param(priv);
3945 
3946 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3947 	    priv->plat->serdes_powerup) {
3948 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3949 		if (ret < 0) {
3950 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3951 				   __func__);
3952 			goto init_error;
3953 		}
3954 	}
3955 
3956 	ret = stmmac_hw_setup(dev);
3957 	if (ret < 0) {
3958 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3959 		goto init_error;
3960 	}
3961 
3962 	stmmac_setup_ptp(priv);
3963 
3964 	stmmac_init_coalesce(priv);
3965 
3966 	phylink_start(priv->phylink);
3967 	/* We may have called phylink_speed_down before */
3968 	phylink_speed_up(priv->phylink);
3969 
3970 	ret = stmmac_request_irq(dev);
3971 	if (ret)
3972 		goto irq_error;
3973 
3974 	stmmac_enable_all_queues(priv);
3975 	netif_tx_start_all_queues(priv->dev);
3976 	stmmac_enable_all_dma_irq(priv);
3977 
3978 	return 0;
3979 
3980 irq_error:
3981 	phylink_stop(priv->phylink);
3982 
3983 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3984 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3985 
3986 	stmmac_release_ptp(priv);
3987 init_error:
3988 	return ret;
3989 }
3990 
3991 static int stmmac_open(struct net_device *dev)
3992 {
3993 	struct stmmac_priv *priv = netdev_priv(dev);
3994 	struct stmmac_dma_conf *dma_conf;
3995 	int ret;
3996 
3997 	/* Initialise the tx lpi timer, converting from msec to usec */
3998 	if (!priv->tx_lpi_timer)
3999 		priv->tx_lpi_timer = eee_timer * 1000;
4000 
4001 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4002 	if (IS_ERR(dma_conf))
4003 		return PTR_ERR(dma_conf);
4004 
4005 	ret = pm_runtime_resume_and_get(priv->device);
4006 	if (ret < 0)
4007 		goto err_dma_resources;
4008 
4009 	ret = stmmac_init_phy(dev);
4010 	if (ret)
4011 		goto err_runtime_pm;
4012 
4013 	ret = __stmmac_open(dev, dma_conf);
4014 	if (ret)
4015 		goto err_disconnect_phy;
4016 
4017 	kfree(dma_conf);
4018 
4019 	return ret;
4020 
4021 err_disconnect_phy:
4022 	phylink_disconnect_phy(priv->phylink);
4023 err_runtime_pm:
4024 	pm_runtime_put(priv->device);
4025 err_dma_resources:
4026 	free_dma_desc_resources(priv, dma_conf);
4027 	kfree(dma_conf);
4028 	return ret;
4029 }
4030 
4031 static void __stmmac_release(struct net_device *dev)
4032 {
4033 	struct stmmac_priv *priv = netdev_priv(dev);
4034 	u32 chan;
4035 
4036 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4037 	 * suspended when phylink_stop() is called below. Set the PHY
4038 	 * to its slowest speed to save power.
4039 	 */
4040 	if (device_may_wakeup(priv->device))
4041 		phylink_speed_down(priv->phylink, false);
4042 
4043 	/* Stop and disconnect the PHY */
4044 	phylink_stop(priv->phylink);
4045 
4046 	stmmac_disable_all_queues(priv);
4047 
4048 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4049 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4050 
4051 	netif_tx_disable(dev);
4052 
4053 	/* Free the IRQ lines */
4054 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4055 
4056 	/* Stop TX/RX DMA and clear the descriptors */
4057 	stmmac_stop_all_dma(priv);
4058 
4059 	/* Release and free the Rx/Tx resources */
4060 	free_dma_desc_resources(priv, &priv->dma_conf);
4061 
4062 	/* Powerdown Serdes if there is */
4063 	if (priv->plat->serdes_powerdown)
4064 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4065 
4066 	stmmac_release_ptp(priv);
4067 
4068 	if (stmmac_fpe_supported(priv))
4069 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4070 }
4071 
4072 /**
4073  *  stmmac_release - close entry point of the driver
4074  *  @dev : device pointer.
4075  *  Description:
4076  *  This is the stop entry point of the driver.
4077  */
4078 static int stmmac_release(struct net_device *dev)
4079 {
4080 	struct stmmac_priv *priv = netdev_priv(dev);
4081 
4082 	__stmmac_release(dev);
4083 
4084 	phylink_disconnect_phy(priv->phylink);
4085 	pm_runtime_put(priv->device);
4086 
4087 	return 0;
4088 }
4089 
4090 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4091 			       struct stmmac_tx_queue *tx_q)
4092 {
4093 	u16 tag = 0x0, inner_tag = 0x0;
4094 	u32 inner_type = 0x0;
4095 	struct dma_desc *p;
4096 
4097 	if (!priv->dma_cap.vlins)
4098 		return false;
4099 	if (!skb_vlan_tag_present(skb))
4100 		return false;
4101 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4102 		inner_tag = skb_vlan_tag_get(skb);
4103 		inner_type = STMMAC_VLAN_INSERT;
4104 	}
4105 
4106 	tag = skb_vlan_tag_get(skb);
4107 
4108 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4109 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4110 	else
4111 		p = &tx_q->dma_tx[tx_q->cur_tx];
4112 
4113 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4114 		return false;
4115 
4116 	stmmac_set_tx_owner(priv, p);
4117 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4118 	return true;
4119 }
4120 
4121 /**
4122  *  stmmac_tso_allocator - close entry point of the driver
4123  *  @priv: driver private structure
4124  *  @des: buffer start address
4125  *  @total_len: total length to fill in descriptors
4126  *  @last_segment: condition for the last descriptor
4127  *  @queue: TX queue index
4128  *  Description:
4129  *  This function fills descriptor and request new descriptors according to
4130  *  buffer length to fill
4131  */
4132 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4133 				 int total_len, bool last_segment, u32 queue)
4134 {
4135 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4136 	struct dma_desc *desc;
4137 	u32 buff_size;
4138 	int tmp_len;
4139 
4140 	tmp_len = total_len;
4141 
4142 	while (tmp_len > 0) {
4143 		dma_addr_t curr_addr;
4144 
4145 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4146 						priv->dma_conf.dma_tx_size);
4147 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4148 
4149 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4150 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4151 		else
4152 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4153 
4154 		curr_addr = des + (total_len - tmp_len);
4155 		stmmac_set_desc_addr(priv, desc, curr_addr);
4156 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4157 			    TSO_MAX_BUFF_SIZE : tmp_len;
4158 
4159 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4160 				0, 1,
4161 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4162 				0, 0);
4163 
4164 		tmp_len -= TSO_MAX_BUFF_SIZE;
4165 	}
4166 }
4167 
4168 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4169 {
4170 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4171 	int desc_size;
4172 
4173 	if (likely(priv->extend_desc))
4174 		desc_size = sizeof(struct dma_extended_desc);
4175 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4176 		desc_size = sizeof(struct dma_edesc);
4177 	else
4178 		desc_size = sizeof(struct dma_desc);
4179 
4180 	/* The own bit must be the latest setting done when prepare the
4181 	 * descriptor and then barrier is needed to make sure that
4182 	 * all is coherent before granting the DMA engine.
4183 	 */
4184 	wmb();
4185 
4186 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4187 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4188 }
4189 
4190 /**
4191  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4192  *  @skb : the socket buffer
4193  *  @dev : device pointer
4194  *  Description: this is the transmit function that is called on TSO frames
4195  *  (support available on GMAC4 and newer chips).
4196  *  Diagram below show the ring programming in case of TSO frames:
4197  *
4198  *  First Descriptor
4199  *   --------
4200  *   | DES0 |---> buffer1 = L2/L3/L4 header
4201  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4202  *   |      |     width is 32-bit, but we never use it.
4203  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4204  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4205  *   |      |     or 48-bit, and we always use it.
4206  *   | DES2 |---> buffer1 len
4207  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4208  *   --------
4209  *   --------
4210  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4211  *   | DES1 |---> same as the First Descriptor
4212  *   | DES2 |---> buffer1 len
4213  *   | DES3 |
4214  *   --------
4215  *	|
4216  *     ...
4217  *	|
4218  *   --------
4219  *   | DES0 |---> buffer1 = Split TCP Payload
4220  *   | DES1 |---> same as the First Descriptor
4221  *   | DES2 |---> buffer1 len
4222  *   | DES3 |
4223  *   --------
4224  *
4225  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4226  */
4227 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4228 {
4229 	struct dma_desc *desc, *first, *mss_desc = NULL;
4230 	struct stmmac_priv *priv = netdev_priv(dev);
4231 	unsigned int first_entry, tx_packets;
4232 	struct stmmac_txq_stats *txq_stats;
4233 	struct stmmac_tx_queue *tx_q;
4234 	u32 pay_len, mss, queue;
4235 	int i, first_tx, nfrags;
4236 	u8 proto_hdr_len, hdr;
4237 	dma_addr_t des;
4238 	bool set_ic;
4239 
4240 	/* Always insert VLAN tag to SKB payload for TSO frames.
4241 	 *
4242 	 * Never insert VLAN tag by HW, since segments splited by
4243 	 * TSO engine will be un-tagged by mistake.
4244 	 */
4245 	if (skb_vlan_tag_present(skb)) {
4246 		skb = __vlan_hwaccel_push_inside(skb);
4247 		if (unlikely(!skb)) {
4248 			priv->xstats.tx_dropped++;
4249 			return NETDEV_TX_OK;
4250 		}
4251 	}
4252 
4253 	nfrags = skb_shinfo(skb)->nr_frags;
4254 	queue = skb_get_queue_mapping(skb);
4255 
4256 	tx_q = &priv->dma_conf.tx_queue[queue];
4257 	txq_stats = &priv->xstats.txq_stats[queue];
4258 	first_tx = tx_q->cur_tx;
4259 
4260 	/* Compute header lengths */
4261 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4262 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4263 		hdr = sizeof(struct udphdr);
4264 	} else {
4265 		proto_hdr_len = skb_tcp_all_headers(skb);
4266 		hdr = tcp_hdrlen(skb);
4267 	}
4268 
4269 	/* Desc availability based on threshold should be enough safe */
4270 	if (unlikely(stmmac_tx_avail(priv, queue) <
4271 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4272 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4273 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4274 								queue));
4275 			/* This is a hard error, log it. */
4276 			netdev_err(priv->dev,
4277 				   "%s: Tx Ring full when queue awake\n",
4278 				   __func__);
4279 		}
4280 		return NETDEV_TX_BUSY;
4281 	}
4282 
4283 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4284 
4285 	mss = skb_shinfo(skb)->gso_size;
4286 
4287 	/* set new MSS value if needed */
4288 	if (mss != tx_q->mss) {
4289 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4290 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4291 		else
4292 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4293 
4294 		stmmac_set_mss(priv, mss_desc, mss);
4295 		tx_q->mss = mss;
4296 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4297 						priv->dma_conf.dma_tx_size);
4298 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4299 	}
4300 
4301 	if (netif_msg_tx_queued(priv)) {
4302 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4303 			__func__, hdr, proto_hdr_len, pay_len, mss);
4304 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4305 			skb->data_len);
4306 	}
4307 
4308 	first_entry = tx_q->cur_tx;
4309 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4310 
4311 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4312 		desc = &tx_q->dma_entx[first_entry].basic;
4313 	else
4314 		desc = &tx_q->dma_tx[first_entry];
4315 	first = desc;
4316 
4317 	/* first descriptor: fill Headers on Buf1 */
4318 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4319 			     DMA_TO_DEVICE);
4320 	if (dma_mapping_error(priv->device, des))
4321 		goto dma_map_err;
4322 
4323 	stmmac_set_desc_addr(priv, first, des);
4324 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4325 			     (nfrags == 0), queue);
4326 
4327 	/* In case two or more DMA transmit descriptors are allocated for this
4328 	 * non-paged SKB data, the DMA buffer address should be saved to
4329 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4330 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4331 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4332 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4333 	 * sooner or later.
4334 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4335 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4336 	 * this DMA buffer right after the DMA engine completely finishes the
4337 	 * full buffer transmission.
4338 	 */
4339 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4340 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4341 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4342 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4343 
4344 	/* Prepare fragments */
4345 	for (i = 0; i < nfrags; i++) {
4346 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4347 
4348 		des = skb_frag_dma_map(priv->device, frag, 0,
4349 				       skb_frag_size(frag),
4350 				       DMA_TO_DEVICE);
4351 		if (dma_mapping_error(priv->device, des))
4352 			goto dma_map_err;
4353 
4354 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4355 				     (i == nfrags - 1), queue);
4356 
4357 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4358 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4359 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4360 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4361 	}
4362 
4363 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4364 
4365 	/* Only the last descriptor gets to point to the skb. */
4366 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4367 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4368 
4369 	/* Manage tx mitigation */
4370 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4371 	tx_q->tx_count_frames += tx_packets;
4372 
4373 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4374 		set_ic = true;
4375 	else if (!priv->tx_coal_frames[queue])
4376 		set_ic = false;
4377 	else if (tx_packets > priv->tx_coal_frames[queue])
4378 		set_ic = true;
4379 	else if ((tx_q->tx_count_frames %
4380 		  priv->tx_coal_frames[queue]) < tx_packets)
4381 		set_ic = true;
4382 	else
4383 		set_ic = false;
4384 
4385 	if (set_ic) {
4386 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4387 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4388 		else
4389 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4390 
4391 		tx_q->tx_count_frames = 0;
4392 		stmmac_set_tx_ic(priv, desc);
4393 	}
4394 
4395 	/* We've used all descriptors we need for this skb, however,
4396 	 * advance cur_tx so that it references a fresh descriptor.
4397 	 * ndo_start_xmit will fill this descriptor the next time it's
4398 	 * called and stmmac_tx_clean may clean up to this descriptor.
4399 	 */
4400 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4401 
4402 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4403 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4404 			  __func__);
4405 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4406 	}
4407 
4408 	u64_stats_update_begin(&txq_stats->q_syncp);
4409 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4410 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4411 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4412 	if (set_ic)
4413 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4414 	u64_stats_update_end(&txq_stats->q_syncp);
4415 
4416 	if (priv->sarc_type)
4417 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4418 
4419 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4420 		     priv->hwts_tx_en)) {
4421 		/* declare that device is doing timestamping */
4422 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4423 		stmmac_enable_tx_timestamp(priv, first);
4424 	}
4425 
4426 	/* Complete the first descriptor before granting the DMA */
4427 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4428 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4429 				   hdr / 4, (skb->len - proto_hdr_len));
4430 
4431 	/* If context desc is used to change MSS */
4432 	if (mss_desc) {
4433 		/* Make sure that first descriptor has been completely
4434 		 * written, including its own bit. This is because MSS is
4435 		 * actually before first descriptor, so we need to make
4436 		 * sure that MSS's own bit is the last thing written.
4437 		 */
4438 		dma_wmb();
4439 		stmmac_set_tx_owner(priv, mss_desc);
4440 	}
4441 
4442 	if (netif_msg_pktdata(priv)) {
4443 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4444 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4445 			tx_q->cur_tx, first, nfrags);
4446 		pr_info(">>> frame to be transmitted: ");
4447 		print_pkt(skb->data, skb_headlen(skb));
4448 	}
4449 
4450 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4451 	skb_tx_timestamp(skb);
4452 
4453 	stmmac_flush_tx_descriptors(priv, queue);
4454 	stmmac_tx_timer_arm(priv, queue);
4455 
4456 	return NETDEV_TX_OK;
4457 
4458 dma_map_err:
4459 	dev_err(priv->device, "Tx dma map failed\n");
4460 	dev_kfree_skb(skb);
4461 	priv->xstats.tx_dropped++;
4462 	return NETDEV_TX_OK;
4463 }
4464 
4465 /**
4466  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4467  * @skb: socket buffer to check
4468  *
4469  * Check if a packet has an ethertype that will trigger the IP header checks
4470  * and IP/TCP checksum engine of the stmmac core.
4471  *
4472  * Return: true if the ethertype can trigger the checksum engine, false
4473  * otherwise
4474  */
4475 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4476 {
4477 	int depth = 0;
4478 	__be16 proto;
4479 
4480 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4481 				    &depth);
4482 
4483 	return (depth <= ETH_HLEN) &&
4484 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4485 }
4486 
4487 /**
4488  *  stmmac_xmit - Tx entry point of the driver
4489  *  @skb : the socket buffer
4490  *  @dev : device pointer
4491  *  Description : this is the tx entry point of the driver.
4492  *  It programs the chain or the ring and supports oversized frames
4493  *  and SG feature.
4494  */
4495 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4496 {
4497 	unsigned int first_entry, tx_packets, enh_desc;
4498 	struct stmmac_priv *priv = netdev_priv(dev);
4499 	unsigned int nopaged_len = skb_headlen(skb);
4500 	int i, csum_insertion = 0, is_jumbo = 0;
4501 	u32 queue = skb_get_queue_mapping(skb);
4502 	int nfrags = skb_shinfo(skb)->nr_frags;
4503 	int gso = skb_shinfo(skb)->gso_type;
4504 	struct stmmac_txq_stats *txq_stats;
4505 	struct dma_edesc *tbs_desc = NULL;
4506 	struct dma_desc *desc, *first;
4507 	struct stmmac_tx_queue *tx_q;
4508 	bool has_vlan, set_ic;
4509 	int entry, first_tx;
4510 	dma_addr_t des;
4511 
4512 	tx_q = &priv->dma_conf.tx_queue[queue];
4513 	txq_stats = &priv->xstats.txq_stats[queue];
4514 	first_tx = tx_q->cur_tx;
4515 
4516 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4517 		stmmac_stop_sw_lpi(priv);
4518 
4519 	/* Manage oversized TCP frames for GMAC4 device */
4520 	if (skb_is_gso(skb) && priv->tso) {
4521 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4522 			return stmmac_tso_xmit(skb, dev);
4523 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4524 			return stmmac_tso_xmit(skb, dev);
4525 	}
4526 
4527 	if (priv->est && priv->est->enable &&
4528 	    priv->est->max_sdu[queue] &&
4529 	    skb->len > priv->est->max_sdu[queue]){
4530 		priv->xstats.max_sdu_txq_drop[queue]++;
4531 		goto max_sdu_err;
4532 	}
4533 
4534 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4535 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4536 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4537 								queue));
4538 			/* This is a hard error, log it. */
4539 			netdev_err(priv->dev,
4540 				   "%s: Tx Ring full when queue awake\n",
4541 				   __func__);
4542 		}
4543 		return NETDEV_TX_BUSY;
4544 	}
4545 
4546 	/* Check if VLAN can be inserted by HW */
4547 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4548 
4549 	entry = tx_q->cur_tx;
4550 	first_entry = entry;
4551 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4552 
4553 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4554 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4555 	 * queues. In that case, checksum offloading for those queues that don't
4556 	 * support tx coe needs to fallback to software checksum calculation.
4557 	 *
4558 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4559 	 * also have to be checksummed in software.
4560 	 */
4561 	if (csum_insertion &&
4562 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4563 	     !stmmac_has_ip_ethertype(skb))) {
4564 		if (unlikely(skb_checksum_help(skb)))
4565 			goto dma_map_err;
4566 		csum_insertion = !csum_insertion;
4567 	}
4568 
4569 	if (likely(priv->extend_desc))
4570 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4571 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4572 		desc = &tx_q->dma_entx[entry].basic;
4573 	else
4574 		desc = tx_q->dma_tx + entry;
4575 
4576 	first = desc;
4577 
4578 	if (has_vlan)
4579 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4580 
4581 	enh_desc = priv->plat->enh_desc;
4582 	/* To program the descriptors according to the size of the frame */
4583 	if (enh_desc)
4584 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4585 
4586 	if (unlikely(is_jumbo)) {
4587 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4588 		if (unlikely(entry < 0) && (entry != -EINVAL))
4589 			goto dma_map_err;
4590 	}
4591 
4592 	for (i = 0; i < nfrags; i++) {
4593 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4594 		int len = skb_frag_size(frag);
4595 		bool last_segment = (i == (nfrags - 1));
4596 
4597 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4598 		WARN_ON(tx_q->tx_skbuff[entry]);
4599 
4600 		if (likely(priv->extend_desc))
4601 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4602 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4603 			desc = &tx_q->dma_entx[entry].basic;
4604 		else
4605 			desc = tx_q->dma_tx + entry;
4606 
4607 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4608 				       DMA_TO_DEVICE);
4609 		if (dma_mapping_error(priv->device, des))
4610 			goto dma_map_err; /* should reuse desc w/o issues */
4611 
4612 		tx_q->tx_skbuff_dma[entry].buf = des;
4613 
4614 		stmmac_set_desc_addr(priv, desc, des);
4615 
4616 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4617 		tx_q->tx_skbuff_dma[entry].len = len;
4618 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4619 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4620 
4621 		/* Prepare the descriptor and set the own bit too */
4622 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4623 				priv->mode, 1, last_segment, skb->len);
4624 	}
4625 
4626 	/* Only the last descriptor gets to point to the skb. */
4627 	tx_q->tx_skbuff[entry] = skb;
4628 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4629 
4630 	/* According to the coalesce parameter the IC bit for the latest
4631 	 * segment is reset and the timer re-started to clean the tx status.
4632 	 * This approach takes care about the fragments: desc is the first
4633 	 * element in case of no SG.
4634 	 */
4635 	tx_packets = (entry + 1) - first_tx;
4636 	tx_q->tx_count_frames += tx_packets;
4637 
4638 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4639 		set_ic = true;
4640 	else if (!priv->tx_coal_frames[queue])
4641 		set_ic = false;
4642 	else if (tx_packets > priv->tx_coal_frames[queue])
4643 		set_ic = true;
4644 	else if ((tx_q->tx_count_frames %
4645 		  priv->tx_coal_frames[queue]) < tx_packets)
4646 		set_ic = true;
4647 	else
4648 		set_ic = false;
4649 
4650 	if (set_ic) {
4651 		if (likely(priv->extend_desc))
4652 			desc = &tx_q->dma_etx[entry].basic;
4653 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4654 			desc = &tx_q->dma_entx[entry].basic;
4655 		else
4656 			desc = &tx_q->dma_tx[entry];
4657 
4658 		tx_q->tx_count_frames = 0;
4659 		stmmac_set_tx_ic(priv, desc);
4660 	}
4661 
4662 	/* We've used all descriptors we need for this skb, however,
4663 	 * advance cur_tx so that it references a fresh descriptor.
4664 	 * ndo_start_xmit will fill this descriptor the next time it's
4665 	 * called and stmmac_tx_clean may clean up to this descriptor.
4666 	 */
4667 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4668 	tx_q->cur_tx = entry;
4669 
4670 	if (netif_msg_pktdata(priv)) {
4671 		netdev_dbg(priv->dev,
4672 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4673 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4674 			   entry, first, nfrags);
4675 
4676 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4677 		print_pkt(skb->data, skb->len);
4678 	}
4679 
4680 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4681 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4682 			  __func__);
4683 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4684 	}
4685 
4686 	u64_stats_update_begin(&txq_stats->q_syncp);
4687 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4688 	if (set_ic)
4689 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4690 	u64_stats_update_end(&txq_stats->q_syncp);
4691 
4692 	if (priv->sarc_type)
4693 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4694 
4695 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4696 	 * problems because all the descriptors are actually ready to be
4697 	 * passed to the DMA engine.
4698 	 */
4699 	if (likely(!is_jumbo)) {
4700 		bool last_segment = (nfrags == 0);
4701 
4702 		des = dma_map_single(priv->device, skb->data,
4703 				     nopaged_len, DMA_TO_DEVICE);
4704 		if (dma_mapping_error(priv->device, des))
4705 			goto dma_map_err;
4706 
4707 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4708 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4709 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4710 
4711 		stmmac_set_desc_addr(priv, first, des);
4712 
4713 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4714 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4715 
4716 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4717 			     priv->hwts_tx_en)) {
4718 			/* declare that device is doing timestamping */
4719 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4720 			stmmac_enable_tx_timestamp(priv, first);
4721 		}
4722 
4723 		/* Prepare the first descriptor setting the OWN bit too */
4724 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4725 				csum_insertion, priv->mode, 0, last_segment,
4726 				skb->len);
4727 	}
4728 
4729 	if (tx_q->tbs & STMMAC_TBS_EN) {
4730 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4731 
4732 		tbs_desc = &tx_q->dma_entx[first_entry];
4733 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4734 	}
4735 
4736 	stmmac_set_tx_owner(priv, first);
4737 
4738 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4739 
4740 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4741 	skb_tx_timestamp(skb);
4742 	stmmac_flush_tx_descriptors(priv, queue);
4743 	stmmac_tx_timer_arm(priv, queue);
4744 
4745 	return NETDEV_TX_OK;
4746 
4747 dma_map_err:
4748 	netdev_err(priv->dev, "Tx DMA map failed\n");
4749 max_sdu_err:
4750 	dev_kfree_skb(skb);
4751 	priv->xstats.tx_dropped++;
4752 	return NETDEV_TX_OK;
4753 }
4754 
4755 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4756 {
4757 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4758 	__be16 vlan_proto = veth->h_vlan_proto;
4759 	u16 vlanid;
4760 
4761 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4762 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4763 	    (vlan_proto == htons(ETH_P_8021AD) &&
4764 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4765 		/* pop the vlan tag */
4766 		vlanid = ntohs(veth->h_vlan_TCI);
4767 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4768 		skb_pull(skb, VLAN_HLEN);
4769 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4770 	}
4771 }
4772 
4773 /**
4774  * stmmac_rx_refill - refill used skb preallocated buffers
4775  * @priv: driver private structure
4776  * @queue: RX queue index
4777  * Description : this is to reallocate the skb for the reception process
4778  * that is based on zero-copy.
4779  */
4780 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4781 {
4782 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4783 	int dirty = stmmac_rx_dirty(priv, queue);
4784 	unsigned int entry = rx_q->dirty_rx;
4785 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4786 
4787 	if (priv->dma_cap.host_dma_width <= 32)
4788 		gfp |= GFP_DMA32;
4789 
4790 	while (dirty-- > 0) {
4791 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4792 		struct dma_desc *p;
4793 		bool use_rx_wd;
4794 
4795 		if (priv->extend_desc)
4796 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4797 		else
4798 			p = rx_q->dma_rx + entry;
4799 
4800 		if (!buf->page) {
4801 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4802 			if (!buf->page)
4803 				break;
4804 		}
4805 
4806 		if (priv->sph && !buf->sec_page) {
4807 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4808 			if (!buf->sec_page)
4809 				break;
4810 
4811 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4812 		}
4813 
4814 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4815 
4816 		stmmac_set_desc_addr(priv, p, buf->addr);
4817 		if (priv->sph)
4818 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4819 		else
4820 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4821 		stmmac_refill_desc3(priv, rx_q, p);
4822 
4823 		rx_q->rx_count_frames++;
4824 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4825 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4826 			rx_q->rx_count_frames = 0;
4827 
4828 		use_rx_wd = !priv->rx_coal_frames[queue];
4829 		use_rx_wd |= rx_q->rx_count_frames > 0;
4830 		if (!priv->use_riwt)
4831 			use_rx_wd = false;
4832 
4833 		dma_wmb();
4834 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4835 
4836 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4837 	}
4838 	rx_q->dirty_rx = entry;
4839 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4840 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4841 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4842 }
4843 
4844 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4845 				       struct dma_desc *p,
4846 				       int status, unsigned int len)
4847 {
4848 	unsigned int plen = 0, hlen = 0;
4849 	int coe = priv->hw->rx_csum;
4850 
4851 	/* Not first descriptor, buffer is always zero */
4852 	if (priv->sph && len)
4853 		return 0;
4854 
4855 	/* First descriptor, get split header length */
4856 	stmmac_get_rx_header_len(priv, p, &hlen);
4857 	if (priv->sph && hlen) {
4858 		priv->xstats.rx_split_hdr_pkt_n++;
4859 		return hlen;
4860 	}
4861 
4862 	/* First descriptor, not last descriptor and not split header */
4863 	if (status & rx_not_ls)
4864 		return priv->dma_conf.dma_buf_sz;
4865 
4866 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4867 
4868 	/* First descriptor and last descriptor and not split header */
4869 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4870 }
4871 
4872 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4873 				       struct dma_desc *p,
4874 				       int status, unsigned int len)
4875 {
4876 	int coe = priv->hw->rx_csum;
4877 	unsigned int plen = 0;
4878 
4879 	/* Not split header, buffer is not available */
4880 	if (!priv->sph)
4881 		return 0;
4882 
4883 	/* Not last descriptor */
4884 	if (status & rx_not_ls)
4885 		return priv->dma_conf.dma_buf_sz;
4886 
4887 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4888 
4889 	/* Last descriptor */
4890 	return plen - len;
4891 }
4892 
4893 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4894 				struct xdp_frame *xdpf, bool dma_map)
4895 {
4896 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4897 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4898 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
4899 	unsigned int entry = tx_q->cur_tx;
4900 	struct dma_desc *tx_desc;
4901 	dma_addr_t dma_addr;
4902 	bool set_ic;
4903 
4904 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4905 		return STMMAC_XDP_CONSUMED;
4906 
4907 	if (priv->est && priv->est->enable &&
4908 	    priv->est->max_sdu[queue] &&
4909 	    xdpf->len > priv->est->max_sdu[queue]) {
4910 		priv->xstats.max_sdu_txq_drop[queue]++;
4911 		return STMMAC_XDP_CONSUMED;
4912 	}
4913 
4914 	if (likely(priv->extend_desc))
4915 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4916 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4917 		tx_desc = &tx_q->dma_entx[entry].basic;
4918 	else
4919 		tx_desc = tx_q->dma_tx + entry;
4920 
4921 	if (dma_map) {
4922 		dma_addr = dma_map_single(priv->device, xdpf->data,
4923 					  xdpf->len, DMA_TO_DEVICE);
4924 		if (dma_mapping_error(priv->device, dma_addr))
4925 			return STMMAC_XDP_CONSUMED;
4926 
4927 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4928 	} else {
4929 		struct page *page = virt_to_page(xdpf->data);
4930 
4931 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4932 			   xdpf->headroom;
4933 		dma_sync_single_for_device(priv->device, dma_addr,
4934 					   xdpf->len, DMA_BIDIRECTIONAL);
4935 
4936 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4937 	}
4938 
4939 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4940 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4941 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4942 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4943 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4944 
4945 	tx_q->xdpf[entry] = xdpf;
4946 
4947 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4948 
4949 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4950 			       csum, priv->mode, true, true,
4951 			       xdpf->len);
4952 
4953 	tx_q->tx_count_frames++;
4954 
4955 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4956 		set_ic = true;
4957 	else
4958 		set_ic = false;
4959 
4960 	if (set_ic) {
4961 		tx_q->tx_count_frames = 0;
4962 		stmmac_set_tx_ic(priv, tx_desc);
4963 		u64_stats_update_begin(&txq_stats->q_syncp);
4964 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4965 		u64_stats_update_end(&txq_stats->q_syncp);
4966 	}
4967 
4968 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4969 
4970 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4971 	tx_q->cur_tx = entry;
4972 
4973 	return STMMAC_XDP_TX;
4974 }
4975 
4976 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4977 				   int cpu)
4978 {
4979 	int index = cpu;
4980 
4981 	if (unlikely(index < 0))
4982 		index = 0;
4983 
4984 	while (index >= priv->plat->tx_queues_to_use)
4985 		index -= priv->plat->tx_queues_to_use;
4986 
4987 	return index;
4988 }
4989 
4990 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4991 				struct xdp_buff *xdp)
4992 {
4993 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4994 	int cpu = smp_processor_id();
4995 	struct netdev_queue *nq;
4996 	int queue;
4997 	int res;
4998 
4999 	if (unlikely(!xdpf))
5000 		return STMMAC_XDP_CONSUMED;
5001 
5002 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5003 	nq = netdev_get_tx_queue(priv->dev, queue);
5004 
5005 	__netif_tx_lock(nq, cpu);
5006 	/* Avoids TX time-out as we are sharing with slow path */
5007 	txq_trans_cond_update(nq);
5008 
5009 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5010 	if (res == STMMAC_XDP_TX)
5011 		stmmac_flush_tx_descriptors(priv, queue);
5012 
5013 	__netif_tx_unlock(nq);
5014 
5015 	return res;
5016 }
5017 
5018 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5019 				 struct bpf_prog *prog,
5020 				 struct xdp_buff *xdp)
5021 {
5022 	u32 act;
5023 	int res;
5024 
5025 	act = bpf_prog_run_xdp(prog, xdp);
5026 	switch (act) {
5027 	case XDP_PASS:
5028 		res = STMMAC_XDP_PASS;
5029 		break;
5030 	case XDP_TX:
5031 		res = stmmac_xdp_xmit_back(priv, xdp);
5032 		break;
5033 	case XDP_REDIRECT:
5034 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5035 			res = STMMAC_XDP_CONSUMED;
5036 		else
5037 			res = STMMAC_XDP_REDIRECT;
5038 		break;
5039 	default:
5040 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5041 		fallthrough;
5042 	case XDP_ABORTED:
5043 		trace_xdp_exception(priv->dev, prog, act);
5044 		fallthrough;
5045 	case XDP_DROP:
5046 		res = STMMAC_XDP_CONSUMED;
5047 		break;
5048 	}
5049 
5050 	return res;
5051 }
5052 
5053 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5054 					   struct xdp_buff *xdp)
5055 {
5056 	struct bpf_prog *prog;
5057 	int res;
5058 
5059 	prog = READ_ONCE(priv->xdp_prog);
5060 	if (!prog) {
5061 		res = STMMAC_XDP_PASS;
5062 		goto out;
5063 	}
5064 
5065 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5066 out:
5067 	return ERR_PTR(-res);
5068 }
5069 
5070 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5071 				   int xdp_status)
5072 {
5073 	int cpu = smp_processor_id();
5074 	int queue;
5075 
5076 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5077 
5078 	if (xdp_status & STMMAC_XDP_TX)
5079 		stmmac_tx_timer_arm(priv, queue);
5080 
5081 	if (xdp_status & STMMAC_XDP_REDIRECT)
5082 		xdp_do_flush();
5083 }
5084 
5085 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5086 					       struct xdp_buff *xdp)
5087 {
5088 	unsigned int metasize = xdp->data - xdp->data_meta;
5089 	unsigned int datasize = xdp->data_end - xdp->data;
5090 	struct sk_buff *skb;
5091 
5092 	skb = napi_alloc_skb(&ch->rxtx_napi,
5093 			     xdp->data_end - xdp->data_hard_start);
5094 	if (unlikely(!skb))
5095 		return NULL;
5096 
5097 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5098 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5099 	if (metasize)
5100 		skb_metadata_set(skb, metasize);
5101 
5102 	return skb;
5103 }
5104 
5105 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5106 				   struct dma_desc *p, struct dma_desc *np,
5107 				   struct xdp_buff *xdp)
5108 {
5109 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5110 	struct stmmac_channel *ch = &priv->channel[queue];
5111 	unsigned int len = xdp->data_end - xdp->data;
5112 	enum pkt_hash_types hash_type;
5113 	int coe = priv->hw->rx_csum;
5114 	struct sk_buff *skb;
5115 	u32 hash;
5116 
5117 	skb = stmmac_construct_skb_zc(ch, xdp);
5118 	if (!skb) {
5119 		priv->xstats.rx_dropped++;
5120 		return;
5121 	}
5122 
5123 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5124 	if (priv->hw->hw_vlan_en)
5125 		/* MAC level stripping. */
5126 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5127 	else
5128 		/* Driver level stripping. */
5129 		stmmac_rx_vlan(priv->dev, skb);
5130 	skb->protocol = eth_type_trans(skb, priv->dev);
5131 
5132 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5133 		skb_checksum_none_assert(skb);
5134 	else
5135 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5136 
5137 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5138 		skb_set_hash(skb, hash, hash_type);
5139 
5140 	skb_record_rx_queue(skb, queue);
5141 	napi_gro_receive(&ch->rxtx_napi, skb);
5142 
5143 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5144 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5145 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5146 	u64_stats_update_end(&rxq_stats->napi_syncp);
5147 }
5148 
5149 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5150 {
5151 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5152 	unsigned int entry = rx_q->dirty_rx;
5153 	struct dma_desc *rx_desc = NULL;
5154 	bool ret = true;
5155 
5156 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5157 
5158 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5159 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5160 		dma_addr_t dma_addr;
5161 		bool use_rx_wd;
5162 
5163 		if (!buf->xdp) {
5164 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5165 			if (!buf->xdp) {
5166 				ret = false;
5167 				break;
5168 			}
5169 		}
5170 
5171 		if (priv->extend_desc)
5172 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5173 		else
5174 			rx_desc = rx_q->dma_rx + entry;
5175 
5176 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5177 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5178 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5179 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5180 
5181 		rx_q->rx_count_frames++;
5182 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5183 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5184 			rx_q->rx_count_frames = 0;
5185 
5186 		use_rx_wd = !priv->rx_coal_frames[queue];
5187 		use_rx_wd |= rx_q->rx_count_frames > 0;
5188 		if (!priv->use_riwt)
5189 			use_rx_wd = false;
5190 
5191 		dma_wmb();
5192 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5193 
5194 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5195 	}
5196 
5197 	if (rx_desc) {
5198 		rx_q->dirty_rx = entry;
5199 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5200 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5201 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5202 	}
5203 
5204 	return ret;
5205 }
5206 
5207 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5208 {
5209 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5210 	 * to represent incoming packet, whereas cb field in the same structure
5211 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5212 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5213 	 */
5214 	return (struct stmmac_xdp_buff *)xdp;
5215 }
5216 
5217 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5218 {
5219 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5220 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5221 	unsigned int count = 0, error = 0, len = 0;
5222 	int dirty = stmmac_rx_dirty(priv, queue);
5223 	unsigned int next_entry = rx_q->cur_rx;
5224 	u32 rx_errors = 0, rx_dropped = 0;
5225 	unsigned int desc_size;
5226 	struct bpf_prog *prog;
5227 	bool failure = false;
5228 	int xdp_status = 0;
5229 	int status = 0;
5230 
5231 	if (netif_msg_rx_status(priv)) {
5232 		void *rx_head;
5233 
5234 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5235 		if (priv->extend_desc) {
5236 			rx_head = (void *)rx_q->dma_erx;
5237 			desc_size = sizeof(struct dma_extended_desc);
5238 		} else {
5239 			rx_head = (void *)rx_q->dma_rx;
5240 			desc_size = sizeof(struct dma_desc);
5241 		}
5242 
5243 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5244 				    rx_q->dma_rx_phy, desc_size);
5245 	}
5246 	while (count < limit) {
5247 		struct stmmac_rx_buffer *buf;
5248 		struct stmmac_xdp_buff *ctx;
5249 		unsigned int buf1_len = 0;
5250 		struct dma_desc *np, *p;
5251 		int entry;
5252 		int res;
5253 
5254 		if (!count && rx_q->state_saved) {
5255 			error = rx_q->state.error;
5256 			len = rx_q->state.len;
5257 		} else {
5258 			rx_q->state_saved = false;
5259 			error = 0;
5260 			len = 0;
5261 		}
5262 
5263 		if (count >= limit)
5264 			break;
5265 
5266 read_again:
5267 		buf1_len = 0;
5268 		entry = next_entry;
5269 		buf = &rx_q->buf_pool[entry];
5270 
5271 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5272 			failure = failure ||
5273 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5274 			dirty = 0;
5275 		}
5276 
5277 		if (priv->extend_desc)
5278 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5279 		else
5280 			p = rx_q->dma_rx + entry;
5281 
5282 		/* read the status of the incoming frame */
5283 		status = stmmac_rx_status(priv, &priv->xstats, p);
5284 		/* check if managed by the DMA otherwise go ahead */
5285 		if (unlikely(status & dma_own))
5286 			break;
5287 
5288 		/* Prefetch the next RX descriptor */
5289 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5290 						priv->dma_conf.dma_rx_size);
5291 		next_entry = rx_q->cur_rx;
5292 
5293 		if (priv->extend_desc)
5294 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5295 		else
5296 			np = rx_q->dma_rx + next_entry;
5297 
5298 		prefetch(np);
5299 
5300 		/* Ensure a valid XSK buffer before proceed */
5301 		if (!buf->xdp)
5302 			break;
5303 
5304 		if (priv->extend_desc)
5305 			stmmac_rx_extended_status(priv, &priv->xstats,
5306 						  rx_q->dma_erx + entry);
5307 		if (unlikely(status == discard_frame)) {
5308 			xsk_buff_free(buf->xdp);
5309 			buf->xdp = NULL;
5310 			dirty++;
5311 			error = 1;
5312 			if (!priv->hwts_rx_en)
5313 				rx_errors++;
5314 		}
5315 
5316 		if (unlikely(error && (status & rx_not_ls)))
5317 			goto read_again;
5318 		if (unlikely(error)) {
5319 			count++;
5320 			continue;
5321 		}
5322 
5323 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5324 		if (likely(status & rx_not_ls)) {
5325 			xsk_buff_free(buf->xdp);
5326 			buf->xdp = NULL;
5327 			dirty++;
5328 			count++;
5329 			goto read_again;
5330 		}
5331 
5332 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5333 		ctx->priv = priv;
5334 		ctx->desc = p;
5335 		ctx->ndesc = np;
5336 
5337 		/* XDP ZC Frame only support primary buffers for now */
5338 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5339 		len += buf1_len;
5340 
5341 		/* ACS is disabled; strip manually. */
5342 		if (likely(!(status & rx_not_ls))) {
5343 			buf1_len -= ETH_FCS_LEN;
5344 			len -= ETH_FCS_LEN;
5345 		}
5346 
5347 		/* RX buffer is good and fit into a XSK pool buffer */
5348 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5349 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5350 
5351 		prog = READ_ONCE(priv->xdp_prog);
5352 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5353 
5354 		switch (res) {
5355 		case STMMAC_XDP_PASS:
5356 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5357 			xsk_buff_free(buf->xdp);
5358 			break;
5359 		case STMMAC_XDP_CONSUMED:
5360 			xsk_buff_free(buf->xdp);
5361 			rx_dropped++;
5362 			break;
5363 		case STMMAC_XDP_TX:
5364 		case STMMAC_XDP_REDIRECT:
5365 			xdp_status |= res;
5366 			break;
5367 		}
5368 
5369 		buf->xdp = NULL;
5370 		dirty++;
5371 		count++;
5372 	}
5373 
5374 	if (status & rx_not_ls) {
5375 		rx_q->state_saved = true;
5376 		rx_q->state.error = error;
5377 		rx_q->state.len = len;
5378 	}
5379 
5380 	stmmac_finalize_xdp_rx(priv, xdp_status);
5381 
5382 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5383 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5384 	u64_stats_update_end(&rxq_stats->napi_syncp);
5385 
5386 	priv->xstats.rx_dropped += rx_dropped;
5387 	priv->xstats.rx_errors += rx_errors;
5388 
5389 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5390 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5391 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5392 		else
5393 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5394 
5395 		return (int)count;
5396 	}
5397 
5398 	return failure ? limit : (int)count;
5399 }
5400 
5401 /**
5402  * stmmac_rx - manage the receive process
5403  * @priv: driver private structure
5404  * @limit: napi bugget
5405  * @queue: RX queue index.
5406  * Description :  this the function called by the napi poll method.
5407  * It gets all the frames inside the ring.
5408  */
5409 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5410 {
5411 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5412 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5413 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5414 	struct stmmac_channel *ch = &priv->channel[queue];
5415 	unsigned int count = 0, error = 0, len = 0;
5416 	int status = 0, coe = priv->hw->rx_csum;
5417 	unsigned int next_entry = rx_q->cur_rx;
5418 	enum dma_data_direction dma_dir;
5419 	unsigned int desc_size;
5420 	struct sk_buff *skb = NULL;
5421 	struct stmmac_xdp_buff ctx;
5422 	int xdp_status = 0;
5423 	int bufsz;
5424 
5425 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5426 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5427 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5428 
5429 	if (netif_msg_rx_status(priv)) {
5430 		void *rx_head;
5431 
5432 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5433 		if (priv->extend_desc) {
5434 			rx_head = (void *)rx_q->dma_erx;
5435 			desc_size = sizeof(struct dma_extended_desc);
5436 		} else {
5437 			rx_head = (void *)rx_q->dma_rx;
5438 			desc_size = sizeof(struct dma_desc);
5439 		}
5440 
5441 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5442 				    rx_q->dma_rx_phy, desc_size);
5443 	}
5444 	while (count < limit) {
5445 		unsigned int buf1_len = 0, buf2_len = 0;
5446 		enum pkt_hash_types hash_type;
5447 		struct stmmac_rx_buffer *buf;
5448 		struct dma_desc *np, *p;
5449 		int entry;
5450 		u32 hash;
5451 
5452 		if (!count && rx_q->state_saved) {
5453 			skb = rx_q->state.skb;
5454 			error = rx_q->state.error;
5455 			len = rx_q->state.len;
5456 		} else {
5457 			rx_q->state_saved = false;
5458 			skb = NULL;
5459 			error = 0;
5460 			len = 0;
5461 		}
5462 
5463 read_again:
5464 		if (count >= limit)
5465 			break;
5466 
5467 		buf1_len = 0;
5468 		buf2_len = 0;
5469 		entry = next_entry;
5470 		buf = &rx_q->buf_pool[entry];
5471 
5472 		if (priv->extend_desc)
5473 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5474 		else
5475 			p = rx_q->dma_rx + entry;
5476 
5477 		/* read the status of the incoming frame */
5478 		status = stmmac_rx_status(priv, &priv->xstats, p);
5479 		/* check if managed by the DMA otherwise go ahead */
5480 		if (unlikely(status & dma_own))
5481 			break;
5482 
5483 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5484 						priv->dma_conf.dma_rx_size);
5485 		next_entry = rx_q->cur_rx;
5486 
5487 		if (priv->extend_desc)
5488 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5489 		else
5490 			np = rx_q->dma_rx + next_entry;
5491 
5492 		prefetch(np);
5493 
5494 		if (priv->extend_desc)
5495 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5496 		if (unlikely(status == discard_frame)) {
5497 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5498 			buf->page = NULL;
5499 			error = 1;
5500 			if (!priv->hwts_rx_en)
5501 				rx_errors++;
5502 		}
5503 
5504 		if (unlikely(error && (status & rx_not_ls)))
5505 			goto read_again;
5506 		if (unlikely(error)) {
5507 			dev_kfree_skb(skb);
5508 			skb = NULL;
5509 			count++;
5510 			continue;
5511 		}
5512 
5513 		/* Buffer is good. Go on. */
5514 
5515 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5516 		len += buf1_len;
5517 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5518 		len += buf2_len;
5519 
5520 		/* ACS is disabled; strip manually. */
5521 		if (likely(!(status & rx_not_ls))) {
5522 			if (buf2_len) {
5523 				buf2_len -= ETH_FCS_LEN;
5524 				len -= ETH_FCS_LEN;
5525 			} else if (buf1_len) {
5526 				buf1_len -= ETH_FCS_LEN;
5527 				len -= ETH_FCS_LEN;
5528 			}
5529 		}
5530 
5531 		if (!skb) {
5532 			unsigned int pre_len, sync_len;
5533 
5534 			dma_sync_single_for_cpu(priv->device, buf->addr,
5535 						buf1_len, dma_dir);
5536 			net_prefetch(page_address(buf->page) +
5537 				     buf->page_offset);
5538 
5539 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5540 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5541 					 buf->page_offset, buf1_len, true);
5542 
5543 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5544 				  buf->page_offset;
5545 
5546 			ctx.priv = priv;
5547 			ctx.desc = p;
5548 			ctx.ndesc = np;
5549 
5550 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5551 			/* Due xdp_adjust_tail: DMA sync for_device
5552 			 * cover max len CPU touch
5553 			 */
5554 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5555 				   buf->page_offset;
5556 			sync_len = max(sync_len, pre_len);
5557 
5558 			/* For Not XDP_PASS verdict */
5559 			if (IS_ERR(skb)) {
5560 				unsigned int xdp_res = -PTR_ERR(skb);
5561 
5562 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5563 					page_pool_put_page(rx_q->page_pool,
5564 							   virt_to_head_page(ctx.xdp.data),
5565 							   sync_len, true);
5566 					buf->page = NULL;
5567 					rx_dropped++;
5568 
5569 					/* Clear skb as it was set as
5570 					 * status by XDP program.
5571 					 */
5572 					skb = NULL;
5573 
5574 					if (unlikely((status & rx_not_ls)))
5575 						goto read_again;
5576 
5577 					count++;
5578 					continue;
5579 				} else if (xdp_res & (STMMAC_XDP_TX |
5580 						      STMMAC_XDP_REDIRECT)) {
5581 					xdp_status |= xdp_res;
5582 					buf->page = NULL;
5583 					skb = NULL;
5584 					count++;
5585 					continue;
5586 				}
5587 			}
5588 		}
5589 
5590 		if (!skb) {
5591 			unsigned int head_pad_len;
5592 
5593 			/* XDP program may expand or reduce tail */
5594 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5595 
5596 			skb = napi_build_skb(page_address(buf->page),
5597 					     rx_q->napi_skb_frag_size);
5598 			if (!skb) {
5599 				page_pool_recycle_direct(rx_q->page_pool,
5600 							 buf->page);
5601 				rx_dropped++;
5602 				count++;
5603 				goto drain_data;
5604 			}
5605 
5606 			/* XDP program may adjust header */
5607 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5608 			skb_reserve(skb, head_pad_len);
5609 			skb_put(skb, buf1_len);
5610 			skb_mark_for_recycle(skb);
5611 			buf->page = NULL;
5612 		} else if (buf1_len) {
5613 			dma_sync_single_for_cpu(priv->device, buf->addr,
5614 						buf1_len, dma_dir);
5615 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5616 					buf->page, buf->page_offset, buf1_len,
5617 					priv->dma_conf.dma_buf_sz);
5618 			buf->page = NULL;
5619 		}
5620 
5621 		if (buf2_len) {
5622 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5623 						buf2_len, dma_dir);
5624 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5625 					buf->sec_page, 0, buf2_len,
5626 					priv->dma_conf.dma_buf_sz);
5627 			buf->sec_page = NULL;
5628 		}
5629 
5630 drain_data:
5631 		if (likely(status & rx_not_ls))
5632 			goto read_again;
5633 		if (!skb)
5634 			continue;
5635 
5636 		/* Got entire packet into SKB. Finish it. */
5637 
5638 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5639 
5640 		if (priv->hw->hw_vlan_en)
5641 			/* MAC level stripping. */
5642 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5643 		else
5644 			/* Driver level stripping. */
5645 			stmmac_rx_vlan(priv->dev, skb);
5646 
5647 		skb->protocol = eth_type_trans(skb, priv->dev);
5648 
5649 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5650 		    (status & csum_none))
5651 			skb_checksum_none_assert(skb);
5652 		else
5653 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5654 
5655 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5656 			skb_set_hash(skb, hash, hash_type);
5657 
5658 		skb_record_rx_queue(skb, queue);
5659 		napi_gro_receive(&ch->rx_napi, skb);
5660 		skb = NULL;
5661 
5662 		rx_packets++;
5663 		rx_bytes += len;
5664 		count++;
5665 	}
5666 
5667 	if (status & rx_not_ls || skb) {
5668 		rx_q->state_saved = true;
5669 		rx_q->state.skb = skb;
5670 		rx_q->state.error = error;
5671 		rx_q->state.len = len;
5672 	}
5673 
5674 	stmmac_finalize_xdp_rx(priv, xdp_status);
5675 
5676 	stmmac_rx_refill(priv, queue);
5677 
5678 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5679 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5680 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5681 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5682 	u64_stats_update_end(&rxq_stats->napi_syncp);
5683 
5684 	priv->xstats.rx_dropped += rx_dropped;
5685 	priv->xstats.rx_errors += rx_errors;
5686 
5687 	return count;
5688 }
5689 
5690 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5691 {
5692 	struct stmmac_channel *ch =
5693 		container_of(napi, struct stmmac_channel, rx_napi);
5694 	struct stmmac_priv *priv = ch->priv_data;
5695 	struct stmmac_rxq_stats *rxq_stats;
5696 	u32 chan = ch->index;
5697 	int work_done;
5698 
5699 	rxq_stats = &priv->xstats.rxq_stats[chan];
5700 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5701 	u64_stats_inc(&rxq_stats->napi.poll);
5702 	u64_stats_update_end(&rxq_stats->napi_syncp);
5703 
5704 	work_done = stmmac_rx(priv, budget, chan);
5705 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5706 		unsigned long flags;
5707 
5708 		spin_lock_irqsave(&ch->lock, flags);
5709 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5710 		spin_unlock_irqrestore(&ch->lock, flags);
5711 	}
5712 
5713 	return work_done;
5714 }
5715 
5716 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5717 {
5718 	struct stmmac_channel *ch =
5719 		container_of(napi, struct stmmac_channel, tx_napi);
5720 	struct stmmac_priv *priv = ch->priv_data;
5721 	struct stmmac_txq_stats *txq_stats;
5722 	bool pending_packets = false;
5723 	u32 chan = ch->index;
5724 	int work_done;
5725 
5726 	txq_stats = &priv->xstats.txq_stats[chan];
5727 	u64_stats_update_begin(&txq_stats->napi_syncp);
5728 	u64_stats_inc(&txq_stats->napi.poll);
5729 	u64_stats_update_end(&txq_stats->napi_syncp);
5730 
5731 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5732 	work_done = min(work_done, budget);
5733 
5734 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5735 		unsigned long flags;
5736 
5737 		spin_lock_irqsave(&ch->lock, flags);
5738 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5739 		spin_unlock_irqrestore(&ch->lock, flags);
5740 	}
5741 
5742 	/* TX still have packet to handle, check if we need to arm tx timer */
5743 	if (pending_packets)
5744 		stmmac_tx_timer_arm(priv, chan);
5745 
5746 	return work_done;
5747 }
5748 
5749 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5750 {
5751 	struct stmmac_channel *ch =
5752 		container_of(napi, struct stmmac_channel, rxtx_napi);
5753 	struct stmmac_priv *priv = ch->priv_data;
5754 	bool tx_pending_packets = false;
5755 	int rx_done, tx_done, rxtx_done;
5756 	struct stmmac_rxq_stats *rxq_stats;
5757 	struct stmmac_txq_stats *txq_stats;
5758 	u32 chan = ch->index;
5759 
5760 	rxq_stats = &priv->xstats.rxq_stats[chan];
5761 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5762 	u64_stats_inc(&rxq_stats->napi.poll);
5763 	u64_stats_update_end(&rxq_stats->napi_syncp);
5764 
5765 	txq_stats = &priv->xstats.txq_stats[chan];
5766 	u64_stats_update_begin(&txq_stats->napi_syncp);
5767 	u64_stats_inc(&txq_stats->napi.poll);
5768 	u64_stats_update_end(&txq_stats->napi_syncp);
5769 
5770 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5771 	tx_done = min(tx_done, budget);
5772 
5773 	rx_done = stmmac_rx_zc(priv, budget, chan);
5774 
5775 	rxtx_done = max(tx_done, rx_done);
5776 
5777 	/* If either TX or RX work is not complete, return budget
5778 	 * and keep pooling
5779 	 */
5780 	if (rxtx_done >= budget)
5781 		return budget;
5782 
5783 	/* all work done, exit the polling mode */
5784 	if (napi_complete_done(napi, rxtx_done)) {
5785 		unsigned long flags;
5786 
5787 		spin_lock_irqsave(&ch->lock, flags);
5788 		/* Both RX and TX work done are compelte,
5789 		 * so enable both RX & TX IRQs.
5790 		 */
5791 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5792 		spin_unlock_irqrestore(&ch->lock, flags);
5793 	}
5794 
5795 	/* TX still have packet to handle, check if we need to arm tx timer */
5796 	if (tx_pending_packets)
5797 		stmmac_tx_timer_arm(priv, chan);
5798 
5799 	return min(rxtx_done, budget - 1);
5800 }
5801 
5802 /**
5803  *  stmmac_tx_timeout
5804  *  @dev : Pointer to net device structure
5805  *  @txqueue: the index of the hanging transmit queue
5806  *  Description: this function is called when a packet transmission fails to
5807  *   complete within a reasonable time. The driver will mark the error in the
5808  *   netdev structure and arrange for the device to be reset to a sane state
5809  *   in order to transmit a new packet.
5810  */
5811 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5812 {
5813 	struct stmmac_priv *priv = netdev_priv(dev);
5814 
5815 	stmmac_global_err(priv);
5816 }
5817 
5818 /**
5819  *  stmmac_set_rx_mode - entry point for multicast addressing
5820  *  @dev : pointer to the device structure
5821  *  Description:
5822  *  This function is a driver entry point which gets called by the kernel
5823  *  whenever multicast addresses must be enabled/disabled.
5824  *  Return value:
5825  *  void.
5826  *
5827  *  FIXME: This may need RXC to be running, but it may be called with BH
5828  *  disabled, which means we can't call phylink_rx_clk_stop*().
5829  */
5830 static void stmmac_set_rx_mode(struct net_device *dev)
5831 {
5832 	struct stmmac_priv *priv = netdev_priv(dev);
5833 
5834 	stmmac_set_filter(priv, priv->hw, dev);
5835 }
5836 
5837 /**
5838  *  stmmac_change_mtu - entry point to change MTU size for the device.
5839  *  @dev : device pointer.
5840  *  @new_mtu : the new MTU size for the device.
5841  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5842  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5843  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5844  *  Return value:
5845  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5846  *  file on failure.
5847  */
5848 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5849 {
5850 	struct stmmac_priv *priv = netdev_priv(dev);
5851 	int txfifosz = priv->plat->tx_fifo_size;
5852 	struct stmmac_dma_conf *dma_conf;
5853 	const int mtu = new_mtu;
5854 	int ret;
5855 
5856 	if (txfifosz == 0)
5857 		txfifosz = priv->dma_cap.tx_fifo_size;
5858 
5859 	txfifosz /= priv->plat->tx_queues_to_use;
5860 
5861 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5862 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5863 		return -EINVAL;
5864 	}
5865 
5866 	new_mtu = STMMAC_ALIGN(new_mtu);
5867 
5868 	/* If condition true, FIFO is too small or MTU too large */
5869 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5870 		return -EINVAL;
5871 
5872 	if (netif_running(dev)) {
5873 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5874 		/* Try to allocate the new DMA conf with the new mtu */
5875 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5876 		if (IS_ERR(dma_conf)) {
5877 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5878 				   mtu);
5879 			return PTR_ERR(dma_conf);
5880 		}
5881 
5882 		__stmmac_release(dev);
5883 
5884 		ret = __stmmac_open(dev, dma_conf);
5885 		if (ret) {
5886 			free_dma_desc_resources(priv, dma_conf);
5887 			kfree(dma_conf);
5888 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5889 			return ret;
5890 		}
5891 
5892 		kfree(dma_conf);
5893 
5894 		stmmac_set_rx_mode(dev);
5895 	}
5896 
5897 	WRITE_ONCE(dev->mtu, mtu);
5898 	netdev_update_features(dev);
5899 
5900 	return 0;
5901 }
5902 
5903 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5904 					     netdev_features_t features)
5905 {
5906 	struct stmmac_priv *priv = netdev_priv(dev);
5907 
5908 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5909 		features &= ~NETIF_F_RXCSUM;
5910 
5911 	if (!priv->plat->tx_coe)
5912 		features &= ~NETIF_F_CSUM_MASK;
5913 
5914 	/* Some GMAC devices have a bugged Jumbo frame support that
5915 	 * needs to have the Tx COE disabled for oversized frames
5916 	 * (due to limited buffer sizes). In this case we disable
5917 	 * the TX csum insertion in the TDES and not use SF.
5918 	 */
5919 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5920 		features &= ~NETIF_F_CSUM_MASK;
5921 
5922 	/* Disable tso if asked by ethtool */
5923 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5924 		if (features & NETIF_F_TSO)
5925 			priv->tso = true;
5926 		else
5927 			priv->tso = false;
5928 	}
5929 
5930 	return features;
5931 }
5932 
5933 static int stmmac_set_features(struct net_device *netdev,
5934 			       netdev_features_t features)
5935 {
5936 	struct stmmac_priv *priv = netdev_priv(netdev);
5937 
5938 	/* Keep the COE Type in case of csum is supporting */
5939 	if (features & NETIF_F_RXCSUM)
5940 		priv->hw->rx_csum = priv->plat->rx_coe;
5941 	else
5942 		priv->hw->rx_csum = 0;
5943 	/* No check needed because rx_coe has been set before and it will be
5944 	 * fixed in case of issue.
5945 	 */
5946 	stmmac_rx_ipc(priv, priv->hw);
5947 
5948 	if (priv->sph_cap) {
5949 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5950 		u32 chan;
5951 
5952 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5953 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5954 	}
5955 
5956 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5957 		priv->hw->hw_vlan_en = true;
5958 	else
5959 		priv->hw->hw_vlan_en = false;
5960 
5961 	phylink_rx_clk_stop_block(priv->phylink);
5962 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5963 	phylink_rx_clk_stop_unblock(priv->phylink);
5964 
5965 	return 0;
5966 }
5967 
5968 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5969 {
5970 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5971 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5972 	u32 queues_count;
5973 	u32 queue;
5974 	bool xmac;
5975 
5976 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5977 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5978 
5979 	if (priv->irq_wake)
5980 		pm_wakeup_event(priv->device, 0);
5981 
5982 	if (priv->dma_cap.estsel)
5983 		stmmac_est_irq_status(priv, priv, priv->dev,
5984 				      &priv->xstats, tx_cnt);
5985 
5986 	if (stmmac_fpe_supported(priv))
5987 		stmmac_fpe_irq_status(priv);
5988 
5989 	/* To handle GMAC own interrupts */
5990 	if ((priv->plat->has_gmac) || xmac) {
5991 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5992 
5993 		if (unlikely(status)) {
5994 			/* For LPI we need to save the tx status */
5995 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5996 				priv->tx_path_in_lpi_mode = true;
5997 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5998 				priv->tx_path_in_lpi_mode = false;
5999 		}
6000 
6001 		for (queue = 0; queue < queues_count; queue++)
6002 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6003 
6004 		/* PCS link status */
6005 		if (priv->hw->pcs &&
6006 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6007 			if (priv->xstats.pcs_link)
6008 				netif_carrier_on(priv->dev);
6009 			else
6010 				netif_carrier_off(priv->dev);
6011 		}
6012 
6013 		stmmac_timestamp_interrupt(priv, priv);
6014 	}
6015 }
6016 
6017 /**
6018  *  stmmac_interrupt - main ISR
6019  *  @irq: interrupt number.
6020  *  @dev_id: to pass the net device pointer.
6021  *  Description: this is the main driver interrupt service routine.
6022  *  It can call:
6023  *  o DMA service routine (to manage incoming frame reception and transmission
6024  *    status)
6025  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6026  *    interrupts.
6027  */
6028 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6029 {
6030 	struct net_device *dev = (struct net_device *)dev_id;
6031 	struct stmmac_priv *priv = netdev_priv(dev);
6032 
6033 	/* Check if adapter is up */
6034 	if (test_bit(STMMAC_DOWN, &priv->state))
6035 		return IRQ_HANDLED;
6036 
6037 	/* Check ASP error if it isn't delivered via an individual IRQ */
6038 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6039 		return IRQ_HANDLED;
6040 
6041 	/* To handle Common interrupts */
6042 	stmmac_common_interrupt(priv);
6043 
6044 	/* To handle DMA interrupts */
6045 	stmmac_dma_interrupt(priv);
6046 
6047 	return IRQ_HANDLED;
6048 }
6049 
6050 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6051 {
6052 	struct net_device *dev = (struct net_device *)dev_id;
6053 	struct stmmac_priv *priv = netdev_priv(dev);
6054 
6055 	/* Check if adapter is up */
6056 	if (test_bit(STMMAC_DOWN, &priv->state))
6057 		return IRQ_HANDLED;
6058 
6059 	/* To handle Common interrupts */
6060 	stmmac_common_interrupt(priv);
6061 
6062 	return IRQ_HANDLED;
6063 }
6064 
6065 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6066 {
6067 	struct net_device *dev = (struct net_device *)dev_id;
6068 	struct stmmac_priv *priv = netdev_priv(dev);
6069 
6070 	/* Check if adapter is up */
6071 	if (test_bit(STMMAC_DOWN, &priv->state))
6072 		return IRQ_HANDLED;
6073 
6074 	/* Check if a fatal error happened */
6075 	stmmac_safety_feat_interrupt(priv);
6076 
6077 	return IRQ_HANDLED;
6078 }
6079 
6080 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6081 {
6082 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6083 	struct stmmac_dma_conf *dma_conf;
6084 	int chan = tx_q->queue_index;
6085 	struct stmmac_priv *priv;
6086 	int status;
6087 
6088 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6089 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6090 
6091 	/* Check if adapter is up */
6092 	if (test_bit(STMMAC_DOWN, &priv->state))
6093 		return IRQ_HANDLED;
6094 
6095 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6096 
6097 	if (unlikely(status & tx_hard_error_bump_tc)) {
6098 		/* Try to bump up the dma threshold on this failure */
6099 		stmmac_bump_dma_threshold(priv, chan);
6100 	} else if (unlikely(status == tx_hard_error)) {
6101 		stmmac_tx_err(priv, chan);
6102 	}
6103 
6104 	return IRQ_HANDLED;
6105 }
6106 
6107 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6108 {
6109 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6110 	struct stmmac_dma_conf *dma_conf;
6111 	int chan = rx_q->queue_index;
6112 	struct stmmac_priv *priv;
6113 
6114 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6115 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6116 
6117 	/* Check if adapter is up */
6118 	if (test_bit(STMMAC_DOWN, &priv->state))
6119 		return IRQ_HANDLED;
6120 
6121 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6122 
6123 	return IRQ_HANDLED;
6124 }
6125 
6126 /**
6127  *  stmmac_ioctl - Entry point for the Ioctl
6128  *  @dev: Device pointer.
6129  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6130  *  a proprietary structure used to pass information to the driver.
6131  *  @cmd: IOCTL command
6132  *  Description:
6133  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6134  */
6135 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6136 {
6137 	struct stmmac_priv *priv = netdev_priv (dev);
6138 	int ret = -EOPNOTSUPP;
6139 
6140 	if (!netif_running(dev))
6141 		return -EINVAL;
6142 
6143 	switch (cmd) {
6144 	case SIOCGMIIPHY:
6145 	case SIOCGMIIREG:
6146 	case SIOCSMIIREG:
6147 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6148 		break;
6149 	default:
6150 		break;
6151 	}
6152 
6153 	return ret;
6154 }
6155 
6156 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6157 				    void *cb_priv)
6158 {
6159 	struct stmmac_priv *priv = cb_priv;
6160 	int ret = -EOPNOTSUPP;
6161 
6162 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6163 		return ret;
6164 
6165 	__stmmac_disable_all_queues(priv);
6166 
6167 	switch (type) {
6168 	case TC_SETUP_CLSU32:
6169 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6170 		break;
6171 	case TC_SETUP_CLSFLOWER:
6172 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6173 		break;
6174 	default:
6175 		break;
6176 	}
6177 
6178 	stmmac_enable_all_queues(priv);
6179 	return ret;
6180 }
6181 
6182 static LIST_HEAD(stmmac_block_cb_list);
6183 
6184 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6185 			   void *type_data)
6186 {
6187 	struct stmmac_priv *priv = netdev_priv(ndev);
6188 
6189 	switch (type) {
6190 	case TC_QUERY_CAPS:
6191 		return stmmac_tc_query_caps(priv, priv, type_data);
6192 	case TC_SETUP_QDISC_MQPRIO:
6193 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6194 	case TC_SETUP_BLOCK:
6195 		return flow_block_cb_setup_simple(type_data,
6196 						  &stmmac_block_cb_list,
6197 						  stmmac_setup_tc_block_cb,
6198 						  priv, priv, true);
6199 	case TC_SETUP_QDISC_CBS:
6200 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6201 	case TC_SETUP_QDISC_TAPRIO:
6202 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6203 	case TC_SETUP_QDISC_ETF:
6204 		return stmmac_tc_setup_etf(priv, priv, type_data);
6205 	default:
6206 		return -EOPNOTSUPP;
6207 	}
6208 }
6209 
6210 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6211 			       struct net_device *sb_dev)
6212 {
6213 	int gso = skb_shinfo(skb)->gso_type;
6214 
6215 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6216 		/*
6217 		 * There is no way to determine the number of TSO/USO
6218 		 * capable Queues. Let's use always the Queue 0
6219 		 * because if TSO/USO is supported then at least this
6220 		 * one will be capable.
6221 		 */
6222 		return 0;
6223 	}
6224 
6225 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6226 }
6227 
6228 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6229 {
6230 	struct stmmac_priv *priv = netdev_priv(ndev);
6231 	int ret = 0;
6232 
6233 	ret = pm_runtime_resume_and_get(priv->device);
6234 	if (ret < 0)
6235 		return ret;
6236 
6237 	ret = eth_mac_addr(ndev, addr);
6238 	if (ret)
6239 		goto set_mac_error;
6240 
6241 	phylink_rx_clk_stop_block(priv->phylink);
6242 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6243 	phylink_rx_clk_stop_unblock(priv->phylink);
6244 
6245 set_mac_error:
6246 	pm_runtime_put(priv->device);
6247 
6248 	return ret;
6249 }
6250 
6251 #ifdef CONFIG_DEBUG_FS
6252 static struct dentry *stmmac_fs_dir;
6253 
6254 static void sysfs_display_ring(void *head, int size, int extend_desc,
6255 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6256 {
6257 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6258 	struct dma_desc *p = (struct dma_desc *)head;
6259 	unsigned int desc_size;
6260 	dma_addr_t dma_addr;
6261 	int i;
6262 
6263 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6264 	for (i = 0; i < size; i++) {
6265 		dma_addr = dma_phy_addr + i * desc_size;
6266 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6267 				i, &dma_addr,
6268 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6269 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6270 		if (extend_desc)
6271 			p = &(++ep)->basic;
6272 		else
6273 			p++;
6274 	}
6275 }
6276 
6277 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6278 {
6279 	struct net_device *dev = seq->private;
6280 	struct stmmac_priv *priv = netdev_priv(dev);
6281 	u32 rx_count = priv->plat->rx_queues_to_use;
6282 	u32 tx_count = priv->plat->tx_queues_to_use;
6283 	u32 queue;
6284 
6285 	if ((dev->flags & IFF_UP) == 0)
6286 		return 0;
6287 
6288 	for (queue = 0; queue < rx_count; queue++) {
6289 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6290 
6291 		seq_printf(seq, "RX Queue %d:\n", queue);
6292 
6293 		if (priv->extend_desc) {
6294 			seq_printf(seq, "Extended descriptor ring:\n");
6295 			sysfs_display_ring((void *)rx_q->dma_erx,
6296 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6297 		} else {
6298 			seq_printf(seq, "Descriptor ring:\n");
6299 			sysfs_display_ring((void *)rx_q->dma_rx,
6300 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6301 		}
6302 	}
6303 
6304 	for (queue = 0; queue < tx_count; queue++) {
6305 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6306 
6307 		seq_printf(seq, "TX Queue %d:\n", queue);
6308 
6309 		if (priv->extend_desc) {
6310 			seq_printf(seq, "Extended descriptor ring:\n");
6311 			sysfs_display_ring((void *)tx_q->dma_etx,
6312 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6313 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6314 			seq_printf(seq, "Descriptor ring:\n");
6315 			sysfs_display_ring((void *)tx_q->dma_tx,
6316 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6317 		}
6318 	}
6319 
6320 	return 0;
6321 }
6322 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6323 
6324 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6325 {
6326 	static const char * const dwxgmac_timestamp_source[] = {
6327 		"None",
6328 		"Internal",
6329 		"External",
6330 		"Both",
6331 	};
6332 	static const char * const dwxgmac_safety_feature_desc[] = {
6333 		"No",
6334 		"All Safety Features with ECC and Parity",
6335 		"All Safety Features without ECC or Parity",
6336 		"All Safety Features with Parity Only",
6337 		"ECC Only",
6338 		"UNDEFINED",
6339 		"UNDEFINED",
6340 		"UNDEFINED",
6341 	};
6342 	struct net_device *dev = seq->private;
6343 	struct stmmac_priv *priv = netdev_priv(dev);
6344 
6345 	if (!priv->hw_cap_support) {
6346 		seq_printf(seq, "DMA HW features not supported\n");
6347 		return 0;
6348 	}
6349 
6350 	seq_printf(seq, "==============================\n");
6351 	seq_printf(seq, "\tDMA HW features\n");
6352 	seq_printf(seq, "==============================\n");
6353 
6354 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6355 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6356 	seq_printf(seq, "\t1000 Mbps: %s\n",
6357 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6358 	seq_printf(seq, "\tHalf duplex: %s\n",
6359 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6360 	if (priv->plat->has_xgmac) {
6361 		seq_printf(seq,
6362 			   "\tNumber of Additional MAC address registers: %d\n",
6363 			   priv->dma_cap.multi_addr);
6364 	} else {
6365 		seq_printf(seq, "\tHash Filter: %s\n",
6366 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6367 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6368 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6369 	}
6370 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6371 		   (priv->dma_cap.pcs) ? "Y" : "N");
6372 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6373 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6374 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6375 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6376 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6377 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6378 	seq_printf(seq, "\tRMON module: %s\n",
6379 		   (priv->dma_cap.rmon) ? "Y" : "N");
6380 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6381 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6382 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6383 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6384 	if (priv->plat->has_xgmac)
6385 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6386 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6387 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6388 		   (priv->dma_cap.eee) ? "Y" : "N");
6389 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6390 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6391 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6392 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6393 	    priv->plat->has_xgmac) {
6394 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6395 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6396 	} else {
6397 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6398 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6399 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6400 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6401 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6402 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6403 	}
6404 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6405 		   priv->dma_cap.number_rx_channel);
6406 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6407 		   priv->dma_cap.number_tx_channel);
6408 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6409 		   priv->dma_cap.number_rx_queues);
6410 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6411 		   priv->dma_cap.number_tx_queues);
6412 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6413 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6414 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6415 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6416 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6417 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6418 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6419 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6420 		   priv->dma_cap.pps_out_num);
6421 	seq_printf(seq, "\tSafety Features: %s\n",
6422 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6423 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6424 		   priv->dma_cap.frpsel ? "Y" : "N");
6425 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6426 		   priv->dma_cap.host_dma_width);
6427 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6428 		   priv->dma_cap.rssen ? "Y" : "N");
6429 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6430 		   priv->dma_cap.vlhash ? "Y" : "N");
6431 	seq_printf(seq, "\tSplit Header: %s\n",
6432 		   priv->dma_cap.sphen ? "Y" : "N");
6433 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6434 		   priv->dma_cap.vlins ? "Y" : "N");
6435 	seq_printf(seq, "\tDouble VLAN: %s\n",
6436 		   priv->dma_cap.dvlan ? "Y" : "N");
6437 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6438 		   priv->dma_cap.l3l4fnum);
6439 	seq_printf(seq, "\tARP Offloading: %s\n",
6440 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6441 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6442 		   priv->dma_cap.estsel ? "Y" : "N");
6443 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6444 		   priv->dma_cap.fpesel ? "Y" : "N");
6445 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6446 		   priv->dma_cap.tbssel ? "Y" : "N");
6447 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6448 		   priv->dma_cap.tbs_ch_num);
6449 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6450 		   priv->dma_cap.sgfsel ? "Y" : "N");
6451 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6452 		   BIT(priv->dma_cap.ttsfd) >> 1);
6453 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6454 		   priv->dma_cap.numtc);
6455 	seq_printf(seq, "\tDCB Feature: %s\n",
6456 		   priv->dma_cap.dcben ? "Y" : "N");
6457 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6458 		   priv->dma_cap.advthword ? "Y" : "N");
6459 	seq_printf(seq, "\tPTP Offload: %s\n",
6460 		   priv->dma_cap.ptoen ? "Y" : "N");
6461 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6462 		   priv->dma_cap.osten ? "Y" : "N");
6463 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6464 		   priv->dma_cap.pfcen ? "Y" : "N");
6465 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6466 		   BIT(priv->dma_cap.frpes) << 6);
6467 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6468 		   BIT(priv->dma_cap.frpbs) << 6);
6469 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6470 		   priv->dma_cap.frppipe_num);
6471 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6472 		   priv->dma_cap.nrvf_num ?
6473 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6474 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6475 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6476 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6477 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6478 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6479 		   priv->dma_cap.cbtisel ? "Y" : "N");
6480 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6481 		   priv->dma_cap.aux_snapshot_n);
6482 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6483 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6484 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6485 		   priv->dma_cap.edma ? "Y" : "N");
6486 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6487 		   priv->dma_cap.ediffc ? "Y" : "N");
6488 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6489 		   priv->dma_cap.vxn ? "Y" : "N");
6490 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6491 		   priv->dma_cap.dbgmem ? "Y" : "N");
6492 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6493 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6494 	return 0;
6495 }
6496 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6497 
6498 /* Use network device events to rename debugfs file entries.
6499  */
6500 static int stmmac_device_event(struct notifier_block *unused,
6501 			       unsigned long event, void *ptr)
6502 {
6503 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6504 	struct stmmac_priv *priv = netdev_priv(dev);
6505 
6506 	if (dev->netdev_ops != &stmmac_netdev_ops)
6507 		goto done;
6508 
6509 	switch (event) {
6510 	case NETDEV_CHANGENAME:
6511 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6512 		break;
6513 	}
6514 done:
6515 	return NOTIFY_DONE;
6516 }
6517 
6518 static struct notifier_block stmmac_notifier = {
6519 	.notifier_call = stmmac_device_event,
6520 };
6521 
6522 static void stmmac_init_fs(struct net_device *dev)
6523 {
6524 	struct stmmac_priv *priv = netdev_priv(dev);
6525 
6526 	rtnl_lock();
6527 
6528 	/* Create per netdev entries */
6529 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6530 
6531 	/* Entry to report DMA RX/TX rings */
6532 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6533 			    &stmmac_rings_status_fops);
6534 
6535 	/* Entry to report the DMA HW features */
6536 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6537 			    &stmmac_dma_cap_fops);
6538 
6539 	rtnl_unlock();
6540 }
6541 
6542 static void stmmac_exit_fs(struct net_device *dev)
6543 {
6544 	struct stmmac_priv *priv = netdev_priv(dev);
6545 
6546 	debugfs_remove_recursive(priv->dbgfs_dir);
6547 }
6548 #endif /* CONFIG_DEBUG_FS */
6549 
6550 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6551 {
6552 	unsigned char *data = (unsigned char *)&vid_le;
6553 	unsigned char data_byte = 0;
6554 	u32 crc = ~0x0;
6555 	u32 temp = 0;
6556 	int i, bits;
6557 
6558 	bits = get_bitmask_order(VLAN_VID_MASK);
6559 	for (i = 0; i < bits; i++) {
6560 		if ((i % 8) == 0)
6561 			data_byte = data[i / 8];
6562 
6563 		temp = ((crc & 1) ^ data_byte) & 1;
6564 		crc >>= 1;
6565 		data_byte >>= 1;
6566 
6567 		if (temp)
6568 			crc ^= 0xedb88320;
6569 	}
6570 
6571 	return crc;
6572 }
6573 
6574 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6575 {
6576 	u32 crc, hash = 0;
6577 	u16 pmatch = 0;
6578 	int count = 0;
6579 	u16 vid = 0;
6580 
6581 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6582 		__le16 vid_le = cpu_to_le16(vid);
6583 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6584 		hash |= (1 << crc);
6585 		count++;
6586 	}
6587 
6588 	if (!priv->dma_cap.vlhash) {
6589 		if (count > 2) /* VID = 0 always passes filter */
6590 			return -EOPNOTSUPP;
6591 
6592 		pmatch = vid;
6593 		hash = 0;
6594 	}
6595 
6596 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6597 }
6598 
6599 /* FIXME: This may need RXC to be running, but it may be called with BH
6600  * disabled, which means we can't call phylink_rx_clk_stop*().
6601  */
6602 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6603 {
6604 	struct stmmac_priv *priv = netdev_priv(ndev);
6605 	bool is_double = false;
6606 	int ret;
6607 
6608 	ret = pm_runtime_resume_and_get(priv->device);
6609 	if (ret < 0)
6610 		return ret;
6611 
6612 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6613 		is_double = true;
6614 
6615 	set_bit(vid, priv->active_vlans);
6616 	ret = stmmac_vlan_update(priv, is_double);
6617 	if (ret) {
6618 		clear_bit(vid, priv->active_vlans);
6619 		goto err_pm_put;
6620 	}
6621 
6622 	if (priv->hw->num_vlan) {
6623 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6624 		if (ret)
6625 			goto err_pm_put;
6626 	}
6627 err_pm_put:
6628 	pm_runtime_put(priv->device);
6629 
6630 	return ret;
6631 }
6632 
6633 /* FIXME: This may need RXC to be running, but it may be called with BH
6634  * disabled, which means we can't call phylink_rx_clk_stop*().
6635  */
6636 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6637 {
6638 	struct stmmac_priv *priv = netdev_priv(ndev);
6639 	bool is_double = false;
6640 	int ret;
6641 
6642 	ret = pm_runtime_resume_and_get(priv->device);
6643 	if (ret < 0)
6644 		return ret;
6645 
6646 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6647 		is_double = true;
6648 
6649 	clear_bit(vid, priv->active_vlans);
6650 
6651 	if (priv->hw->num_vlan) {
6652 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6653 		if (ret)
6654 			goto del_vlan_error;
6655 	}
6656 
6657 	ret = stmmac_vlan_update(priv, is_double);
6658 
6659 del_vlan_error:
6660 	pm_runtime_put(priv->device);
6661 
6662 	return ret;
6663 }
6664 
6665 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6666 {
6667 	struct stmmac_priv *priv = netdev_priv(dev);
6668 
6669 	switch (bpf->command) {
6670 	case XDP_SETUP_PROG:
6671 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6672 	case XDP_SETUP_XSK_POOL:
6673 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6674 					     bpf->xsk.queue_id);
6675 	default:
6676 		return -EOPNOTSUPP;
6677 	}
6678 }
6679 
6680 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6681 			   struct xdp_frame **frames, u32 flags)
6682 {
6683 	struct stmmac_priv *priv = netdev_priv(dev);
6684 	int cpu = smp_processor_id();
6685 	struct netdev_queue *nq;
6686 	int i, nxmit = 0;
6687 	int queue;
6688 
6689 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6690 		return -ENETDOWN;
6691 
6692 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6693 		return -EINVAL;
6694 
6695 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6696 	nq = netdev_get_tx_queue(priv->dev, queue);
6697 
6698 	__netif_tx_lock(nq, cpu);
6699 	/* Avoids TX time-out as we are sharing with slow path */
6700 	txq_trans_cond_update(nq);
6701 
6702 	for (i = 0; i < num_frames; i++) {
6703 		int res;
6704 
6705 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6706 		if (res == STMMAC_XDP_CONSUMED)
6707 			break;
6708 
6709 		nxmit++;
6710 	}
6711 
6712 	if (flags & XDP_XMIT_FLUSH) {
6713 		stmmac_flush_tx_descriptors(priv, queue);
6714 		stmmac_tx_timer_arm(priv, queue);
6715 	}
6716 
6717 	__netif_tx_unlock(nq);
6718 
6719 	return nxmit;
6720 }
6721 
6722 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6723 {
6724 	struct stmmac_channel *ch = &priv->channel[queue];
6725 	unsigned long flags;
6726 
6727 	spin_lock_irqsave(&ch->lock, flags);
6728 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6729 	spin_unlock_irqrestore(&ch->lock, flags);
6730 
6731 	stmmac_stop_rx_dma(priv, queue);
6732 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6733 }
6734 
6735 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6736 {
6737 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6738 	struct stmmac_channel *ch = &priv->channel[queue];
6739 	unsigned long flags;
6740 	u32 buf_size;
6741 	int ret;
6742 
6743 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6744 	if (ret) {
6745 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6746 		return;
6747 	}
6748 
6749 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6750 	if (ret) {
6751 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6752 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6753 		return;
6754 	}
6755 
6756 	stmmac_reset_rx_queue(priv, queue);
6757 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6758 
6759 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6760 			    rx_q->dma_rx_phy, rx_q->queue_index);
6761 
6762 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6763 			     sizeof(struct dma_desc));
6764 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6765 			       rx_q->rx_tail_addr, rx_q->queue_index);
6766 
6767 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6768 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6769 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6770 				      buf_size,
6771 				      rx_q->queue_index);
6772 	} else {
6773 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6774 				      priv->dma_conf.dma_buf_sz,
6775 				      rx_q->queue_index);
6776 	}
6777 
6778 	stmmac_start_rx_dma(priv, queue);
6779 
6780 	spin_lock_irqsave(&ch->lock, flags);
6781 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6782 	spin_unlock_irqrestore(&ch->lock, flags);
6783 }
6784 
6785 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6786 {
6787 	struct stmmac_channel *ch = &priv->channel[queue];
6788 	unsigned long flags;
6789 
6790 	spin_lock_irqsave(&ch->lock, flags);
6791 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6792 	spin_unlock_irqrestore(&ch->lock, flags);
6793 
6794 	stmmac_stop_tx_dma(priv, queue);
6795 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6796 }
6797 
6798 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6799 {
6800 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6801 	struct stmmac_channel *ch = &priv->channel[queue];
6802 	unsigned long flags;
6803 	int ret;
6804 
6805 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6806 	if (ret) {
6807 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6808 		return;
6809 	}
6810 
6811 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6812 	if (ret) {
6813 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6814 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6815 		return;
6816 	}
6817 
6818 	stmmac_reset_tx_queue(priv, queue);
6819 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6820 
6821 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6822 			    tx_q->dma_tx_phy, tx_q->queue_index);
6823 
6824 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6825 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6826 
6827 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6828 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6829 			       tx_q->tx_tail_addr, tx_q->queue_index);
6830 
6831 	stmmac_start_tx_dma(priv, queue);
6832 
6833 	spin_lock_irqsave(&ch->lock, flags);
6834 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6835 	spin_unlock_irqrestore(&ch->lock, flags);
6836 }
6837 
6838 void stmmac_xdp_release(struct net_device *dev)
6839 {
6840 	struct stmmac_priv *priv = netdev_priv(dev);
6841 	u32 chan;
6842 
6843 	/* Ensure tx function is not running */
6844 	netif_tx_disable(dev);
6845 
6846 	/* Disable NAPI process */
6847 	stmmac_disable_all_queues(priv);
6848 
6849 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6850 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6851 
6852 	/* Free the IRQ lines */
6853 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6854 
6855 	/* Stop TX/RX DMA channels */
6856 	stmmac_stop_all_dma(priv);
6857 
6858 	/* Release and free the Rx/Tx resources */
6859 	free_dma_desc_resources(priv, &priv->dma_conf);
6860 
6861 	/* Disable the MAC Rx/Tx */
6862 	stmmac_mac_set(priv, priv->ioaddr, false);
6863 
6864 	/* set trans_start so we don't get spurious
6865 	 * watchdogs during reset
6866 	 */
6867 	netif_trans_update(dev);
6868 	netif_carrier_off(dev);
6869 }
6870 
6871 int stmmac_xdp_open(struct net_device *dev)
6872 {
6873 	struct stmmac_priv *priv = netdev_priv(dev);
6874 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6875 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6876 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6877 	struct stmmac_rx_queue *rx_q;
6878 	struct stmmac_tx_queue *tx_q;
6879 	u32 buf_size;
6880 	bool sph_en;
6881 	u32 chan;
6882 	int ret;
6883 
6884 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6885 	if (ret < 0) {
6886 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6887 			   __func__);
6888 		goto dma_desc_error;
6889 	}
6890 
6891 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6892 	if (ret < 0) {
6893 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6894 			   __func__);
6895 		goto init_error;
6896 	}
6897 
6898 	stmmac_reset_queues_param(priv);
6899 
6900 	/* DMA CSR Channel configuration */
6901 	for (chan = 0; chan < dma_csr_ch; chan++) {
6902 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6903 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6904 	}
6905 
6906 	/* Adjust Split header */
6907 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6908 
6909 	/* DMA RX Channel Configuration */
6910 	for (chan = 0; chan < rx_cnt; chan++) {
6911 		rx_q = &priv->dma_conf.rx_queue[chan];
6912 
6913 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6914 				    rx_q->dma_rx_phy, chan);
6915 
6916 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6917 				     (rx_q->buf_alloc_num *
6918 				      sizeof(struct dma_desc));
6919 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6920 				       rx_q->rx_tail_addr, chan);
6921 
6922 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6923 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6924 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6925 					      buf_size,
6926 					      rx_q->queue_index);
6927 		} else {
6928 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6929 					      priv->dma_conf.dma_buf_sz,
6930 					      rx_q->queue_index);
6931 		}
6932 
6933 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6934 	}
6935 
6936 	/* DMA TX Channel Configuration */
6937 	for (chan = 0; chan < tx_cnt; chan++) {
6938 		tx_q = &priv->dma_conf.tx_queue[chan];
6939 
6940 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6941 				    tx_q->dma_tx_phy, chan);
6942 
6943 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6944 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6945 				       tx_q->tx_tail_addr, chan);
6946 
6947 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6948 	}
6949 
6950 	/* Enable the MAC Rx/Tx */
6951 	stmmac_mac_set(priv, priv->ioaddr, true);
6952 
6953 	/* Start Rx & Tx DMA Channels */
6954 	stmmac_start_all_dma(priv);
6955 
6956 	ret = stmmac_request_irq(dev);
6957 	if (ret)
6958 		goto irq_error;
6959 
6960 	/* Enable NAPI process*/
6961 	stmmac_enable_all_queues(priv);
6962 	netif_carrier_on(dev);
6963 	netif_tx_start_all_queues(dev);
6964 	stmmac_enable_all_dma_irq(priv);
6965 
6966 	return 0;
6967 
6968 irq_error:
6969 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6970 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6971 
6972 init_error:
6973 	free_dma_desc_resources(priv, &priv->dma_conf);
6974 dma_desc_error:
6975 	return ret;
6976 }
6977 
6978 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6979 {
6980 	struct stmmac_priv *priv = netdev_priv(dev);
6981 	struct stmmac_rx_queue *rx_q;
6982 	struct stmmac_tx_queue *tx_q;
6983 	struct stmmac_channel *ch;
6984 
6985 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6986 	    !netif_carrier_ok(priv->dev))
6987 		return -ENETDOWN;
6988 
6989 	if (!stmmac_xdp_is_enabled(priv))
6990 		return -EINVAL;
6991 
6992 	if (queue >= priv->plat->rx_queues_to_use ||
6993 	    queue >= priv->plat->tx_queues_to_use)
6994 		return -EINVAL;
6995 
6996 	rx_q = &priv->dma_conf.rx_queue[queue];
6997 	tx_q = &priv->dma_conf.tx_queue[queue];
6998 	ch = &priv->channel[queue];
6999 
7000 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7001 		return -EINVAL;
7002 
7003 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7004 		/* EQoS does not have per-DMA channel SW interrupt,
7005 		 * so we schedule RX Napi straight-away.
7006 		 */
7007 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7008 			__napi_schedule(&ch->rxtx_napi);
7009 	}
7010 
7011 	return 0;
7012 }
7013 
7014 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7015 {
7016 	struct stmmac_priv *priv = netdev_priv(dev);
7017 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7018 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7019 	unsigned int start;
7020 	int q;
7021 
7022 	for (q = 0; q < tx_cnt; q++) {
7023 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7024 		u64 tx_packets;
7025 		u64 tx_bytes;
7026 
7027 		do {
7028 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7029 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7030 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7031 		do {
7032 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7033 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7034 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7035 
7036 		stats->tx_packets += tx_packets;
7037 		stats->tx_bytes += tx_bytes;
7038 	}
7039 
7040 	for (q = 0; q < rx_cnt; q++) {
7041 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7042 		u64 rx_packets;
7043 		u64 rx_bytes;
7044 
7045 		do {
7046 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7047 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7048 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7049 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7050 
7051 		stats->rx_packets += rx_packets;
7052 		stats->rx_bytes += rx_bytes;
7053 	}
7054 
7055 	stats->rx_dropped = priv->xstats.rx_dropped;
7056 	stats->rx_errors = priv->xstats.rx_errors;
7057 	stats->tx_dropped = priv->xstats.tx_dropped;
7058 	stats->tx_errors = priv->xstats.tx_errors;
7059 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7060 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7061 	stats->rx_length_errors = priv->xstats.rx_length;
7062 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7063 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7064 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7065 }
7066 
7067 static const struct net_device_ops stmmac_netdev_ops = {
7068 	.ndo_open = stmmac_open,
7069 	.ndo_start_xmit = stmmac_xmit,
7070 	.ndo_stop = stmmac_release,
7071 	.ndo_change_mtu = stmmac_change_mtu,
7072 	.ndo_fix_features = stmmac_fix_features,
7073 	.ndo_set_features = stmmac_set_features,
7074 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7075 	.ndo_tx_timeout = stmmac_tx_timeout,
7076 	.ndo_eth_ioctl = stmmac_ioctl,
7077 	.ndo_get_stats64 = stmmac_get_stats64,
7078 	.ndo_setup_tc = stmmac_setup_tc,
7079 	.ndo_select_queue = stmmac_select_queue,
7080 	.ndo_set_mac_address = stmmac_set_mac_address,
7081 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7082 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7083 	.ndo_bpf = stmmac_bpf,
7084 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7085 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7086 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7087 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7088 };
7089 
7090 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7091 {
7092 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7093 		return;
7094 	if (test_bit(STMMAC_DOWN, &priv->state))
7095 		return;
7096 
7097 	netdev_err(priv->dev, "Reset adapter.\n");
7098 
7099 	rtnl_lock();
7100 	netif_trans_update(priv->dev);
7101 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7102 		usleep_range(1000, 2000);
7103 
7104 	set_bit(STMMAC_DOWN, &priv->state);
7105 	dev_close(priv->dev);
7106 	dev_open(priv->dev, NULL);
7107 	clear_bit(STMMAC_DOWN, &priv->state);
7108 	clear_bit(STMMAC_RESETING, &priv->state);
7109 	rtnl_unlock();
7110 }
7111 
7112 static void stmmac_service_task(struct work_struct *work)
7113 {
7114 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7115 			service_task);
7116 
7117 	stmmac_reset_subtask(priv);
7118 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7119 }
7120 
7121 /**
7122  *  stmmac_hw_init - Init the MAC device
7123  *  @priv: driver private structure
7124  *  Description: this function is to configure the MAC device according to
7125  *  some platform parameters or the HW capability register. It prepares the
7126  *  driver to use either ring or chain modes and to setup either enhanced or
7127  *  normal descriptors.
7128  */
7129 static int stmmac_hw_init(struct stmmac_priv *priv)
7130 {
7131 	int ret;
7132 
7133 	/* dwmac-sun8i only work in chain mode */
7134 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7135 		chain_mode = 1;
7136 	priv->chain_mode = chain_mode;
7137 
7138 	/* Initialize HW Interface */
7139 	ret = stmmac_hwif_init(priv);
7140 	if (ret)
7141 		return ret;
7142 
7143 	/* Get the HW capability (new GMAC newer than 3.50a) */
7144 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7145 	if (priv->hw_cap_support) {
7146 		dev_info(priv->device, "DMA HW capability register supported\n");
7147 
7148 		/* We can override some gmac/dma configuration fields: e.g.
7149 		 * enh_desc, tx_coe (e.g. that are passed through the
7150 		 * platform) with the values from the HW capability
7151 		 * register (if supported).
7152 		 */
7153 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7154 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7155 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7156 		if (priv->dma_cap.hash_tb_sz) {
7157 			priv->hw->multicast_filter_bins =
7158 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7159 			priv->hw->mcast_bits_log2 =
7160 					ilog2(priv->hw->multicast_filter_bins);
7161 		}
7162 
7163 		/* TXCOE doesn't work in thresh DMA mode */
7164 		if (priv->plat->force_thresh_dma_mode)
7165 			priv->plat->tx_coe = 0;
7166 		else
7167 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7168 
7169 		/* In case of GMAC4 rx_coe is from HW cap register. */
7170 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7171 
7172 		if (priv->dma_cap.rx_coe_type2)
7173 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7174 		else if (priv->dma_cap.rx_coe_type1)
7175 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7176 
7177 	} else {
7178 		dev_info(priv->device, "No HW DMA feature register supported\n");
7179 	}
7180 
7181 	if (priv->plat->rx_coe) {
7182 		priv->hw->rx_csum = priv->plat->rx_coe;
7183 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7184 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7185 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7186 	}
7187 	if (priv->plat->tx_coe)
7188 		dev_info(priv->device, "TX Checksum insertion supported\n");
7189 
7190 	if (priv->plat->pmt) {
7191 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7192 		device_set_wakeup_capable(priv->device, 1);
7193 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7194 	}
7195 
7196 	if (priv->dma_cap.tsoen)
7197 		dev_info(priv->device, "TSO supported\n");
7198 
7199 	if (priv->dma_cap.number_rx_queues &&
7200 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7201 		dev_warn(priv->device,
7202 			 "Number of Rx queues (%u) exceeds dma capability\n",
7203 			 priv->plat->rx_queues_to_use);
7204 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7205 	}
7206 	if (priv->dma_cap.number_tx_queues &&
7207 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7208 		dev_warn(priv->device,
7209 			 "Number of Tx queues (%u) exceeds dma capability\n",
7210 			 priv->plat->tx_queues_to_use);
7211 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7212 	}
7213 
7214 	if (priv->dma_cap.rx_fifo_size &&
7215 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7216 		dev_warn(priv->device,
7217 			 "Rx FIFO size (%u) exceeds dma capability\n",
7218 			 priv->plat->rx_fifo_size);
7219 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7220 	}
7221 	if (priv->dma_cap.tx_fifo_size &&
7222 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7223 		dev_warn(priv->device,
7224 			 "Tx FIFO size (%u) exceeds dma capability\n",
7225 			 priv->plat->tx_fifo_size);
7226 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7227 	}
7228 
7229 	priv->hw->vlan_fail_q_en =
7230 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7231 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7232 
7233 	/* Run HW quirks, if any */
7234 	if (priv->hwif_quirks) {
7235 		ret = priv->hwif_quirks(priv);
7236 		if (ret)
7237 			return ret;
7238 	}
7239 
7240 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7241 	 * In some case, for example on bugged HW this feature
7242 	 * has to be disable and this can be done by passing the
7243 	 * riwt_off field from the platform.
7244 	 */
7245 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7246 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7247 		priv->use_riwt = 1;
7248 		dev_info(priv->device,
7249 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7250 	}
7251 
7252 	return 0;
7253 }
7254 
7255 static void stmmac_napi_add(struct net_device *dev)
7256 {
7257 	struct stmmac_priv *priv = netdev_priv(dev);
7258 	u32 queue, maxq;
7259 
7260 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7261 
7262 	for (queue = 0; queue < maxq; queue++) {
7263 		struct stmmac_channel *ch = &priv->channel[queue];
7264 
7265 		ch->priv_data = priv;
7266 		ch->index = queue;
7267 		spin_lock_init(&ch->lock);
7268 
7269 		if (queue < priv->plat->rx_queues_to_use) {
7270 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7271 		}
7272 		if (queue < priv->plat->tx_queues_to_use) {
7273 			netif_napi_add_tx(dev, &ch->tx_napi,
7274 					  stmmac_napi_poll_tx);
7275 		}
7276 		if (queue < priv->plat->rx_queues_to_use &&
7277 		    queue < priv->plat->tx_queues_to_use) {
7278 			netif_napi_add(dev, &ch->rxtx_napi,
7279 				       stmmac_napi_poll_rxtx);
7280 		}
7281 	}
7282 }
7283 
7284 static void stmmac_napi_del(struct net_device *dev)
7285 {
7286 	struct stmmac_priv *priv = netdev_priv(dev);
7287 	u32 queue, maxq;
7288 
7289 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7290 
7291 	for (queue = 0; queue < maxq; queue++) {
7292 		struct stmmac_channel *ch = &priv->channel[queue];
7293 
7294 		if (queue < priv->plat->rx_queues_to_use)
7295 			netif_napi_del(&ch->rx_napi);
7296 		if (queue < priv->plat->tx_queues_to_use)
7297 			netif_napi_del(&ch->tx_napi);
7298 		if (queue < priv->plat->rx_queues_to_use &&
7299 		    queue < priv->plat->tx_queues_to_use) {
7300 			netif_napi_del(&ch->rxtx_napi);
7301 		}
7302 	}
7303 }
7304 
7305 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7306 {
7307 	struct stmmac_priv *priv = netdev_priv(dev);
7308 	int ret = 0, i;
7309 
7310 	if (netif_running(dev))
7311 		stmmac_release(dev);
7312 
7313 	stmmac_napi_del(dev);
7314 
7315 	priv->plat->rx_queues_to_use = rx_cnt;
7316 	priv->plat->tx_queues_to_use = tx_cnt;
7317 	if (!netif_is_rxfh_configured(dev))
7318 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7319 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7320 									rx_cnt);
7321 
7322 	stmmac_napi_add(dev);
7323 
7324 	if (netif_running(dev))
7325 		ret = stmmac_open(dev);
7326 
7327 	return ret;
7328 }
7329 
7330 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7331 {
7332 	struct stmmac_priv *priv = netdev_priv(dev);
7333 	int ret = 0;
7334 
7335 	if (netif_running(dev))
7336 		stmmac_release(dev);
7337 
7338 	priv->dma_conf.dma_rx_size = rx_size;
7339 	priv->dma_conf.dma_tx_size = tx_size;
7340 
7341 	if (netif_running(dev))
7342 		ret = stmmac_open(dev);
7343 
7344 	return ret;
7345 }
7346 
7347 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7348 {
7349 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7350 	struct dma_desc *desc_contains_ts = ctx->desc;
7351 	struct stmmac_priv *priv = ctx->priv;
7352 	struct dma_desc *ndesc = ctx->ndesc;
7353 	struct dma_desc *desc = ctx->desc;
7354 	u64 ns = 0;
7355 
7356 	if (!priv->hwts_rx_en)
7357 		return -ENODATA;
7358 
7359 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7360 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7361 		desc_contains_ts = ndesc;
7362 
7363 	/* Check if timestamp is available */
7364 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7365 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7366 		ns -= priv->plat->cdc_error_adj;
7367 		*timestamp = ns_to_ktime(ns);
7368 		return 0;
7369 	}
7370 
7371 	return -ENODATA;
7372 }
7373 
7374 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7375 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7376 };
7377 
7378 /**
7379  * stmmac_dvr_probe
7380  * @device: device pointer
7381  * @plat_dat: platform data pointer
7382  * @res: stmmac resource pointer
7383  * Description: this is the main probe function used to
7384  * call the alloc_etherdev, allocate the priv structure.
7385  * Return:
7386  * returns 0 on success, otherwise errno.
7387  */
7388 int stmmac_dvr_probe(struct device *device,
7389 		     struct plat_stmmacenet_data *plat_dat,
7390 		     struct stmmac_resources *res)
7391 {
7392 	struct net_device *ndev = NULL;
7393 	struct stmmac_priv *priv;
7394 	u32 rxq;
7395 	int i, ret = 0;
7396 
7397 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7398 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7399 	if (!ndev)
7400 		return -ENOMEM;
7401 
7402 	SET_NETDEV_DEV(ndev, device);
7403 
7404 	priv = netdev_priv(ndev);
7405 	priv->device = device;
7406 	priv->dev = ndev;
7407 
7408 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7409 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7410 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7411 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7412 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7413 	}
7414 
7415 	priv->xstats.pcpu_stats =
7416 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7417 	if (!priv->xstats.pcpu_stats)
7418 		return -ENOMEM;
7419 
7420 	stmmac_set_ethtool_ops(ndev);
7421 	priv->pause_time = pause;
7422 	priv->plat = plat_dat;
7423 	priv->ioaddr = res->addr;
7424 	priv->dev->base_addr = (unsigned long)res->addr;
7425 	priv->plat->dma_cfg->multi_msi_en =
7426 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7427 
7428 	priv->dev->irq = res->irq;
7429 	priv->wol_irq = res->wol_irq;
7430 	priv->lpi_irq = res->lpi_irq;
7431 	priv->sfty_irq = res->sfty_irq;
7432 	priv->sfty_ce_irq = res->sfty_ce_irq;
7433 	priv->sfty_ue_irq = res->sfty_ue_irq;
7434 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7435 		priv->rx_irq[i] = res->rx_irq[i];
7436 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7437 		priv->tx_irq[i] = res->tx_irq[i];
7438 
7439 	if (!is_zero_ether_addr(res->mac))
7440 		eth_hw_addr_set(priv->dev, res->mac);
7441 
7442 	dev_set_drvdata(device, priv->dev);
7443 
7444 	/* Verify driver arguments */
7445 	stmmac_verify_args();
7446 
7447 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7448 	if (!priv->af_xdp_zc_qps)
7449 		return -ENOMEM;
7450 
7451 	/* Allocate workqueue */
7452 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7453 	if (!priv->wq) {
7454 		dev_err(priv->device, "failed to create workqueue\n");
7455 		ret = -ENOMEM;
7456 		goto error_wq_init;
7457 	}
7458 
7459 	INIT_WORK(&priv->service_task, stmmac_service_task);
7460 
7461 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7462 
7463 	/* Override with kernel parameters if supplied XXX CRS XXX
7464 	 * this needs to have multiple instances
7465 	 */
7466 	if ((phyaddr >= 0) && (phyaddr <= 31))
7467 		priv->plat->phy_addr = phyaddr;
7468 
7469 	if (priv->plat->stmmac_rst) {
7470 		ret = reset_control_assert(priv->plat->stmmac_rst);
7471 		reset_control_deassert(priv->plat->stmmac_rst);
7472 		/* Some reset controllers have only reset callback instead of
7473 		 * assert + deassert callbacks pair.
7474 		 */
7475 		if (ret == -ENOTSUPP)
7476 			reset_control_reset(priv->plat->stmmac_rst);
7477 	}
7478 
7479 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7480 	if (ret == -ENOTSUPP)
7481 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7482 			ERR_PTR(ret));
7483 
7484 	/* Wait a bit for the reset to take effect */
7485 	udelay(10);
7486 
7487 	/* Init MAC and get the capabilities */
7488 	ret = stmmac_hw_init(priv);
7489 	if (ret)
7490 		goto error_hw_init;
7491 
7492 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7493 	 */
7494 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7495 		priv->plat->dma_cfg->dche = false;
7496 
7497 	stmmac_check_ether_addr(priv);
7498 
7499 	ndev->netdev_ops = &stmmac_netdev_ops;
7500 
7501 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7502 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7503 
7504 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7505 			    NETIF_F_RXCSUM;
7506 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7507 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7508 
7509 	ret = stmmac_tc_init(priv, priv);
7510 	if (!ret) {
7511 		ndev->hw_features |= NETIF_F_HW_TC;
7512 	}
7513 
7514 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7515 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7516 		if (priv->plat->has_gmac4)
7517 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7518 		priv->tso = true;
7519 		dev_info(priv->device, "TSO feature enabled\n");
7520 	}
7521 
7522 	if (priv->dma_cap.sphen &&
7523 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7524 		ndev->hw_features |= NETIF_F_GRO;
7525 		priv->sph_cap = true;
7526 		priv->sph = priv->sph_cap;
7527 		dev_info(priv->device, "SPH feature enabled\n");
7528 	}
7529 
7530 	/* Ideally our host DMA address width is the same as for the
7531 	 * device. However, it may differ and then we have to use our
7532 	 * host DMA width for allocation and the device DMA width for
7533 	 * register handling.
7534 	 */
7535 	if (priv->plat->host_dma_width)
7536 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7537 	else
7538 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7539 
7540 	if (priv->dma_cap.host_dma_width) {
7541 		ret = dma_set_mask_and_coherent(device,
7542 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7543 		if (!ret) {
7544 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7545 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7546 
7547 			/*
7548 			 * If more than 32 bits can be addressed, make sure to
7549 			 * enable enhanced addressing mode.
7550 			 */
7551 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7552 				priv->plat->dma_cfg->eame = true;
7553 		} else {
7554 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7555 			if (ret) {
7556 				dev_err(priv->device, "Failed to set DMA Mask\n");
7557 				goto error_hw_init;
7558 			}
7559 
7560 			priv->dma_cap.host_dma_width = 32;
7561 		}
7562 	}
7563 
7564 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7565 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7566 #ifdef STMMAC_VLAN_TAG_USED
7567 	/* Both mac100 and gmac support receive VLAN tag detection */
7568 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7569 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7570 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7571 		priv->hw->hw_vlan_en = true;
7572 	}
7573 	if (priv->dma_cap.vlhash) {
7574 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7575 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7576 	}
7577 	if (priv->dma_cap.vlins) {
7578 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7579 		if (priv->dma_cap.dvlan)
7580 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7581 	}
7582 #endif
7583 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7584 
7585 	priv->xstats.threshold = tc;
7586 
7587 	/* Initialize RSS */
7588 	rxq = priv->plat->rx_queues_to_use;
7589 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7590 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7591 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7592 
7593 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7594 		ndev->features |= NETIF_F_RXHASH;
7595 
7596 	ndev->vlan_features |= ndev->features;
7597 
7598 	/* MTU range: 46 - hw-specific max */
7599 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7600 	if (priv->plat->has_xgmac)
7601 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7602 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7603 		ndev->max_mtu = JUMBO_LEN;
7604 	else
7605 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7606 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7607 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7608 	 */
7609 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7610 	    (priv->plat->maxmtu >= ndev->min_mtu))
7611 		ndev->max_mtu = priv->plat->maxmtu;
7612 	else if (priv->plat->maxmtu < ndev->min_mtu)
7613 		dev_warn(priv->device,
7614 			 "%s: warning: maxmtu having invalid value (%d)\n",
7615 			 __func__, priv->plat->maxmtu);
7616 
7617 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7618 
7619 	/* Setup channels NAPI */
7620 	stmmac_napi_add(ndev);
7621 
7622 	mutex_init(&priv->lock);
7623 
7624 	stmmac_fpe_init(priv);
7625 
7626 	stmmac_check_pcs_mode(priv);
7627 
7628 	pm_runtime_get_noresume(device);
7629 	pm_runtime_set_active(device);
7630 	if (!pm_runtime_enabled(device))
7631 		pm_runtime_enable(device);
7632 
7633 	ret = stmmac_mdio_register(ndev);
7634 	if (ret < 0) {
7635 		dev_err_probe(priv->device, ret,
7636 			      "MDIO bus (id: %d) registration failed\n",
7637 			      priv->plat->bus_id);
7638 		goto error_mdio_register;
7639 	}
7640 
7641 	ret = stmmac_pcs_setup(ndev);
7642 	if (ret)
7643 		goto error_pcs_setup;
7644 
7645 	ret = stmmac_phy_setup(priv);
7646 	if (ret) {
7647 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7648 		goto error_phy_setup;
7649 	}
7650 
7651 	ret = register_netdev(ndev);
7652 	if (ret) {
7653 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7654 			__func__, ret);
7655 		goto error_netdev_register;
7656 	}
7657 
7658 #ifdef CONFIG_DEBUG_FS
7659 	stmmac_init_fs(ndev);
7660 #endif
7661 
7662 	if (priv->plat->dump_debug_regs)
7663 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7664 
7665 	/* Let pm_runtime_put() disable the clocks.
7666 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7667 	 */
7668 	pm_runtime_put(device);
7669 
7670 	return ret;
7671 
7672 error_netdev_register:
7673 	phylink_destroy(priv->phylink);
7674 error_phy_setup:
7675 	stmmac_pcs_clean(ndev);
7676 error_pcs_setup:
7677 	stmmac_mdio_unregister(ndev);
7678 error_mdio_register:
7679 	stmmac_napi_del(ndev);
7680 error_hw_init:
7681 	destroy_workqueue(priv->wq);
7682 error_wq_init:
7683 	bitmap_free(priv->af_xdp_zc_qps);
7684 
7685 	return ret;
7686 }
7687 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7688 
7689 /**
7690  * stmmac_dvr_remove
7691  * @dev: device pointer
7692  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7693  * changes the link status, releases the DMA descriptor rings.
7694  */
7695 void stmmac_dvr_remove(struct device *dev)
7696 {
7697 	struct net_device *ndev = dev_get_drvdata(dev);
7698 	struct stmmac_priv *priv = netdev_priv(ndev);
7699 
7700 	netdev_info(priv->dev, "%s: removing driver", __func__);
7701 
7702 	pm_runtime_get_sync(dev);
7703 
7704 	unregister_netdev(ndev);
7705 
7706 #ifdef CONFIG_DEBUG_FS
7707 	stmmac_exit_fs(ndev);
7708 #endif
7709 	phylink_destroy(priv->phylink);
7710 	if (priv->plat->stmmac_rst)
7711 		reset_control_assert(priv->plat->stmmac_rst);
7712 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7713 
7714 	stmmac_pcs_clean(ndev);
7715 	stmmac_mdio_unregister(ndev);
7716 
7717 	destroy_workqueue(priv->wq);
7718 	mutex_destroy(&priv->lock);
7719 	bitmap_free(priv->af_xdp_zc_qps);
7720 
7721 	pm_runtime_disable(dev);
7722 	pm_runtime_put_noidle(dev);
7723 }
7724 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7725 
7726 /**
7727  * stmmac_suspend - suspend callback
7728  * @dev: device pointer
7729  * Description: this is the function to suspend the device and it is called
7730  * by the platform driver to stop the network queue, release the resources,
7731  * program the PMT register (for WoL), clean and release driver resources.
7732  */
7733 int stmmac_suspend(struct device *dev)
7734 {
7735 	struct net_device *ndev = dev_get_drvdata(dev);
7736 	struct stmmac_priv *priv = netdev_priv(ndev);
7737 	u32 chan;
7738 
7739 	if (!ndev || !netif_running(ndev))
7740 		return 0;
7741 
7742 	mutex_lock(&priv->lock);
7743 
7744 	netif_device_detach(ndev);
7745 
7746 	stmmac_disable_all_queues(priv);
7747 
7748 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7749 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7750 
7751 	if (priv->eee_sw_timer_en) {
7752 		priv->tx_path_in_lpi_mode = false;
7753 		timer_delete_sync(&priv->eee_ctrl_timer);
7754 	}
7755 
7756 	/* Stop TX/RX DMA */
7757 	stmmac_stop_all_dma(priv);
7758 
7759 	if (priv->plat->serdes_powerdown)
7760 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7761 
7762 	/* Enable Power down mode by programming the PMT regs */
7763 	if (stmmac_wol_enabled_mac(priv)) {
7764 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7765 		priv->irq_wake = 1;
7766 	} else {
7767 		stmmac_mac_set(priv, priv->ioaddr, false);
7768 		pinctrl_pm_select_sleep_state(priv->device);
7769 	}
7770 
7771 	mutex_unlock(&priv->lock);
7772 
7773 	rtnl_lock();
7774 	if (stmmac_wol_enabled_phy(priv))
7775 		phylink_speed_down(priv->phylink, false);
7776 
7777 	phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
7778 	rtnl_unlock();
7779 
7780 	if (stmmac_fpe_supported(priv))
7781 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7782 
7783 	if (priv->plat->suspend)
7784 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
7785 
7786 	return 0;
7787 }
7788 EXPORT_SYMBOL_GPL(stmmac_suspend);
7789 
7790 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7791 {
7792 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7793 
7794 	rx_q->cur_rx = 0;
7795 	rx_q->dirty_rx = 0;
7796 }
7797 
7798 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7799 {
7800 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7801 
7802 	tx_q->cur_tx = 0;
7803 	tx_q->dirty_tx = 0;
7804 	tx_q->mss = 0;
7805 
7806 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7807 }
7808 
7809 /**
7810  * stmmac_reset_queues_param - reset queue parameters
7811  * @priv: device pointer
7812  */
7813 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7814 {
7815 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7816 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7817 	u32 queue;
7818 
7819 	for (queue = 0; queue < rx_cnt; queue++)
7820 		stmmac_reset_rx_queue(priv, queue);
7821 
7822 	for (queue = 0; queue < tx_cnt; queue++)
7823 		stmmac_reset_tx_queue(priv, queue);
7824 }
7825 
7826 /**
7827  * stmmac_resume - resume callback
7828  * @dev: device pointer
7829  * Description: when resume this function is invoked to setup the DMA and CORE
7830  * in a usable state.
7831  */
7832 int stmmac_resume(struct device *dev)
7833 {
7834 	struct net_device *ndev = dev_get_drvdata(dev);
7835 	struct stmmac_priv *priv = netdev_priv(ndev);
7836 	int ret;
7837 
7838 	if (priv->plat->resume) {
7839 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
7840 		if (ret)
7841 			return ret;
7842 	}
7843 
7844 	if (!netif_running(ndev))
7845 		return 0;
7846 
7847 	/* Power Down bit, into the PM register, is cleared
7848 	 * automatically as soon as a magic packet or a Wake-up frame
7849 	 * is received. Anyway, it's better to manually clear
7850 	 * this bit because it can generate problems while resuming
7851 	 * from another devices (e.g. serial console).
7852 	 */
7853 	if (stmmac_wol_enabled_mac(priv)) {
7854 		mutex_lock(&priv->lock);
7855 		stmmac_pmt(priv, priv->hw, 0);
7856 		mutex_unlock(&priv->lock);
7857 		priv->irq_wake = 0;
7858 	} else {
7859 		pinctrl_pm_select_default_state(priv->device);
7860 		/* reset the phy so that it's ready */
7861 		if (priv->mii)
7862 			stmmac_mdio_reset(priv->mii);
7863 	}
7864 
7865 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7866 	    priv->plat->serdes_powerup) {
7867 		ret = priv->plat->serdes_powerup(ndev,
7868 						 priv->plat->bsp_priv);
7869 
7870 		if (ret < 0)
7871 			return ret;
7872 	}
7873 
7874 	rtnl_lock();
7875 
7876 	/* Prepare the PHY to resume, ensuring that its clocks which are
7877 	 * necessary for the MAC DMA reset to complete are running
7878 	 */
7879 	phylink_prepare_resume(priv->phylink);
7880 
7881 	mutex_lock(&priv->lock);
7882 
7883 	stmmac_reset_queues_param(priv);
7884 
7885 	stmmac_free_tx_skbufs(priv);
7886 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7887 
7888 	ret = stmmac_hw_setup(ndev);
7889 	if (ret < 0) {
7890 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
7891 		mutex_unlock(&priv->lock);
7892 		rtnl_unlock();
7893 		return ret;
7894 	}
7895 
7896 	stmmac_init_timestamping(priv);
7897 
7898 	stmmac_init_coalesce(priv);
7899 	phylink_rx_clk_stop_block(priv->phylink);
7900 	stmmac_set_rx_mode(ndev);
7901 
7902 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7903 	phylink_rx_clk_stop_unblock(priv->phylink);
7904 
7905 	stmmac_enable_all_queues(priv);
7906 	stmmac_enable_all_dma_irq(priv);
7907 
7908 	mutex_unlock(&priv->lock);
7909 
7910 	/* phylink_resume() must be called after the hardware has been
7911 	 * initialised because it may bring the link up immediately in a
7912 	 * workqueue thread, which will race with initialisation.
7913 	 */
7914 	phylink_resume(priv->phylink);
7915 	if (stmmac_wol_enabled_phy(priv))
7916 		phylink_speed_up(priv->phylink);
7917 
7918 	rtnl_unlock();
7919 
7920 	netif_device_attach(ndev);
7921 
7922 	return 0;
7923 }
7924 EXPORT_SYMBOL_GPL(stmmac_resume);
7925 
7926 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
7927 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
7928 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
7929 
7930 #ifndef MODULE
7931 static int __init stmmac_cmdline_opt(char *str)
7932 {
7933 	char *opt;
7934 
7935 	if (!str || !*str)
7936 		return 1;
7937 	while ((opt = strsep(&str, ",")) != NULL) {
7938 		if (!strncmp(opt, "debug:", 6)) {
7939 			if (kstrtoint(opt + 6, 0, &debug))
7940 				goto err;
7941 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7942 			if (kstrtoint(opt + 8, 0, &phyaddr))
7943 				goto err;
7944 		} else if (!strncmp(opt, "tc:", 3)) {
7945 			if (kstrtoint(opt + 3, 0, &tc))
7946 				goto err;
7947 		} else if (!strncmp(opt, "watchdog:", 9)) {
7948 			if (kstrtoint(opt + 9, 0, &watchdog))
7949 				goto err;
7950 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7951 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7952 				goto err;
7953 		} else if (!strncmp(opt, "pause:", 6)) {
7954 			if (kstrtoint(opt + 6, 0, &pause))
7955 				goto err;
7956 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7957 			if (kstrtoint(opt + 10, 0, &eee_timer))
7958 				goto err;
7959 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7960 			if (kstrtoint(opt + 11, 0, &chain_mode))
7961 				goto err;
7962 		}
7963 	}
7964 	return 1;
7965 
7966 err:
7967 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7968 	return 1;
7969 }
7970 
7971 __setup("stmmaceth=", stmmac_cmdline_opt);
7972 #endif /* MODULE */
7973 
7974 static int __init stmmac_init(void)
7975 {
7976 #ifdef CONFIG_DEBUG_FS
7977 	/* Create debugfs main directory if it doesn't exist yet */
7978 	if (!stmmac_fs_dir)
7979 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7980 	register_netdevice_notifier(&stmmac_notifier);
7981 #endif
7982 
7983 	return 0;
7984 }
7985 
7986 static void __exit stmmac_exit(void)
7987 {
7988 #ifdef CONFIG_DEBUG_FS
7989 	unregister_netdevice_notifier(&stmmac_notifier);
7990 	debugfs_remove_recursive(stmmac_fs_dir);
7991 #endif
7992 }
7993 
7994 module_init(stmmac_init)
7995 module_exit(stmmac_exit)
7996 
7997 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7998 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7999 MODULE_LICENSE("GPL");
8000