xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/devlink.h>
44 #include <net/page_pool/helpers.h>
45 #include <net/pkt_cls.h>
46 #include <net/xdp_sock_drv.h>
47 #include "stmmac_ptp.h"
48 #include "stmmac_fpe.h"
49 #include "stmmac.h"
50 #include "stmmac_pcs.h"
51 #include "stmmac_xdp.h"
52 #include <linux/reset.h>
53 #include <linux/of_mdio.h>
54 #include "dwmac1000.h"
55 #include "dwxgmac2.h"
56 #include "hwif.h"
57 
58 /* As long as the interface is active, we keep the timestamping counter enabled
59  * with fine resolution and binary rollover. This avoid non-monotonic behavior
60  * (clock jumps) when changing timestamping settings at runtime.
61  */
62 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
63 
64 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
65 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
66 
67 /* Module parameters */
68 #define TX_TIMEO	5000
69 static int watchdog = TX_TIMEO;
70 module_param(watchdog, int, 0644);
71 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
72 
73 static int debug = -1;
74 module_param(debug, int, 0644);
75 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
76 
77 static int phyaddr = -1;
78 module_param(phyaddr, int, 0444);
79 MODULE_PARM_DESC(phyaddr, "Physical device address");
80 
81 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
82 
83 /* Limit to make sure XDP TX and slow path can coexist */
84 #define STMMAC_XSK_TX_BUDGET_MAX	256
85 #define STMMAC_TX_XSK_AVAIL		16
86 #define STMMAC_RX_FILL_BATCH		16
87 
88 #define STMMAC_XDP_PASS		0
89 #define STMMAC_XDP_CONSUMED	BIT(0)
90 #define STMMAC_XDP_TX		BIT(1)
91 #define STMMAC_XDP_REDIRECT	BIT(2)
92 
93 static int flow_ctrl = 0xdead;
94 module_param(flow_ctrl, int, 0644);
95 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
96 
97 static int pause = PAUSE_TIME;
98 module_param(pause, int, 0644);
99 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
100 
101 #define TC_DEFAULT 64
102 static int tc = TC_DEFAULT;
103 module_param(tc, int, 0644);
104 MODULE_PARM_DESC(tc, "DMA threshold control value");
105 
106 /* This is unused */
107 #define	DEFAULT_BUFSIZE	1536
108 static int buf_sz = DEFAULT_BUFSIZE;
109 module_param(buf_sz, int, 0644);
110 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
111 
112 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
113 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
114 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
115 
116 #define STMMAC_DEFAULT_LPI_TIMER	1000
117 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
118 module_param(eee_timer, uint, 0644);
119 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
120 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121 
122 /* By default the driver will use the ring mode to manage tx and rx descriptors,
123  * but allow user to force to use the chain instead of the ring
124  */
125 static unsigned int chain_mode;
126 module_param(chain_mode, int, 0444);
127 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
128 
129 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
130 /* For MSI interrupts handling */
131 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
133 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
134 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
135 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
138 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
139 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
140 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
141 					  u32 rxmode, u32 chan);
142 
143 #ifdef CONFIG_DEBUG_FS
144 static const struct net_device_ops stmmac_netdev_ops;
145 static void stmmac_init_fs(struct net_device *dev);
146 static void stmmac_exit_fs(struct net_device *dev);
147 #endif
148 
149 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150 
151 struct stmmac_devlink_priv {
152 	struct stmmac_priv *stmmac_priv;
153 };
154 
155 enum stmmac_dl_param_id {
156 	STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
157 	STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
158 };
159 
160 /**
161  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
162  * @bsp_priv: BSP private data structure (unused)
163  * @clk_tx_i: the transmit clock
164  * @interface: the selected interface mode
165  * @speed: the speed that the MAC will be operating at
166  *
167  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
168  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
169  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
170  * the plat_data->set_clk_tx_rate method directly, call it via their own
171  * implementation, or implement their own method should they have more
172  * complex requirements. It is intended to only be used in this method.
173  *
174  * plat_data->clk_tx_i must be filled in.
175  */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)176 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
177 			   phy_interface_t interface, int speed)
178 {
179 	long rate = rgmii_clock(speed);
180 
181 	/* Silently ignore unsupported speeds as rgmii_clock() only
182 	 * supports 10, 100 and 1000Mbps. We do not want to spit
183 	 * errors for 2500 and higher speeds here.
184 	 */
185 	if (rate < 0)
186 		return 0;
187 
188 	return clk_set_rate(clk_tx_i, rate);
189 }
190 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
191 
192 /**
193  * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
194  * @regval: pointer to a u32 for the resulting register value
195  * @blen: pointer to an array of u32 containing the burst length values in bytes
196  * @len: the number of entries in the @blen array
197  */
stmmac_axi_blen_to_mask(u32 * regval,const u32 * blen,size_t len)198 void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
199 {
200 	size_t i;
201 	u32 val;
202 
203 	for (val = i = 0; i < len; i++) {
204 		u32 burst = blen[i];
205 
206 		/* Burst values of zero must be skipped. */
207 		if (!burst)
208 			continue;
209 
210 		/* The valid range for the burst length is 4 to 256 inclusive,
211 		 * and it must be a power of two.
212 		 */
213 		if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
214 			pr_err("stmmac: invalid burst length %u at index %zu\n",
215 			       burst, i);
216 			continue;
217 		}
218 
219 		/* Since burst is a power of two, and the register field starts
220 		 * with burst = 4, shift right by two bits so bit 0 of the field
221 		 * corresponds with the minimum value.
222 		 */
223 		val |= burst >> 2;
224 	}
225 
226 	*regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
227 }
228 EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
229 
230 /**
231  * stmmac_verify_args - verify the driver parameters.
232  * Description: it checks the driver parameters and set a default in case of
233  * errors.
234  */
stmmac_verify_args(void)235 static void stmmac_verify_args(void)
236 {
237 	if (unlikely(watchdog < 0))
238 		watchdog = TX_TIMEO;
239 	if (unlikely((pause < 0) || (pause > 0xffff)))
240 		pause = PAUSE_TIME;
241 
242 	if (flow_ctrl != 0xdead)
243 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
244 }
245 
__stmmac_disable_all_queues(struct stmmac_priv * priv)246 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
247 {
248 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
249 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
250 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
251 	u32 queue;
252 
253 	for (queue = 0; queue < maxq; queue++) {
254 		struct stmmac_channel *ch = &priv->channel[queue];
255 
256 		if (stmmac_xdp_is_enabled(priv) &&
257 		    test_bit(queue, priv->af_xdp_zc_qps)) {
258 			napi_disable(&ch->rxtx_napi);
259 			continue;
260 		}
261 
262 		if (queue < rx_queues_cnt)
263 			napi_disable(&ch->rx_napi);
264 		if (queue < tx_queues_cnt)
265 			napi_disable(&ch->tx_napi);
266 	}
267 }
268 
269 /**
270  * stmmac_disable_all_queues - Disable all queues
271  * @priv: driver private structure
272  */
stmmac_disable_all_queues(struct stmmac_priv * priv)273 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
274 {
275 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
276 	struct stmmac_rx_queue *rx_q;
277 	u32 queue;
278 
279 	/* synchronize_rcu() needed for pending XDP buffers to drain */
280 	for (queue = 0; queue < rx_queues_cnt; queue++) {
281 		rx_q = &priv->dma_conf.rx_queue[queue];
282 		if (rx_q->xsk_pool) {
283 			synchronize_rcu();
284 			break;
285 		}
286 	}
287 
288 	__stmmac_disable_all_queues(priv);
289 }
290 
291 /**
292  * stmmac_enable_all_queues - Enable all queues
293  * @priv: driver private structure
294  */
stmmac_enable_all_queues(struct stmmac_priv * priv)295 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
296 {
297 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
298 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
299 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
300 	u32 queue;
301 
302 	for (queue = 0; queue < maxq; queue++) {
303 		struct stmmac_channel *ch = &priv->channel[queue];
304 
305 		if (stmmac_xdp_is_enabled(priv) &&
306 		    test_bit(queue, priv->af_xdp_zc_qps)) {
307 			napi_enable(&ch->rxtx_napi);
308 			continue;
309 		}
310 
311 		if (queue < rx_queues_cnt)
312 			napi_enable(&ch->rx_napi);
313 		if (queue < tx_queues_cnt)
314 			napi_enable(&ch->tx_napi);
315 	}
316 }
317 
stmmac_service_event_schedule(struct stmmac_priv * priv)318 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
319 {
320 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
321 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
322 		queue_work(priv->wq, &priv->service_task);
323 }
324 
stmmac_global_err(struct stmmac_priv * priv)325 static void stmmac_global_err(struct stmmac_priv *priv)
326 {
327 	netif_carrier_off(priv->dev);
328 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
329 	stmmac_service_event_schedule(priv);
330 }
331 
print_pkt(unsigned char * buf,int len)332 static void print_pkt(unsigned char *buf, int len)
333 {
334 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
335 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
336 }
337 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)338 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
339 {
340 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
341 	u32 avail;
342 
343 	if (tx_q->dirty_tx > tx_q->cur_tx)
344 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
345 	else
346 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
347 
348 	return avail;
349 }
350 
351 /**
352  * stmmac_rx_dirty - Get RX queue dirty
353  * @priv: driver private structure
354  * @queue: RX queue index
355  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)356 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
357 {
358 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
359 	u32 dirty;
360 
361 	if (rx_q->dirty_rx <= rx_q->cur_rx)
362 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
363 	else
364 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
365 
366 	return dirty;
367 }
368 
stmmac_eee_tx_busy(struct stmmac_priv * priv)369 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
370 {
371 	u32 tx_cnt = priv->plat->tx_queues_to_use;
372 	u32 queue;
373 
374 	/* check if all TX queues have the work finished */
375 	for (queue = 0; queue < tx_cnt; queue++) {
376 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
377 
378 		if (tx_q->dirty_tx != tx_q->cur_tx)
379 			return true; /* still unfinished work */
380 	}
381 
382 	return false;
383 }
384 
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)385 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
386 {
387 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
388 }
389 
390 /**
391  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
392  * @priv: driver private structure
393  * Description: this function is to verify and enter in LPI mode in case of
394  * EEE.
395  */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)396 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
397 {
398 	if (stmmac_eee_tx_busy(priv)) {
399 		stmmac_restart_sw_lpi_timer(priv);
400 		return;
401 	}
402 
403 	/* Check and enter in LPI mode */
404 	if (!priv->tx_path_in_lpi_mode)
405 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
406 				    priv->tx_lpi_clk_stop, 0);
407 }
408 
409 /**
410  * stmmac_stop_sw_lpi - stop transmitting LPI
411  * @priv: driver private structure
412  * Description: When using software-controlled LPI, stop transmitting LPI state.
413  */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)414 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
415 {
416 	timer_delete_sync(&priv->eee_ctrl_timer);
417 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
418 	priv->tx_path_in_lpi_mode = false;
419 }
420 
421 /**
422  * stmmac_eee_ctrl_timer - EEE TX SW timer.
423  * @t:  timer_list struct containing private info
424  * Description:
425  *  if there is no data transfer and if we are not in LPI state,
426  *  then MAC Transmitter can be moved to LPI state.
427  */
stmmac_eee_ctrl_timer(struct timer_list * t)428 static void stmmac_eee_ctrl_timer(struct timer_list *t)
429 {
430 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
431 
432 	stmmac_try_to_start_sw_lpi(priv);
433 }
434 
435 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
436  * @priv: driver private structure
437  * @p : descriptor pointer
438  * @skb : the socket buffer
439  * Description :
440  * This function will read timestamp from the descriptor & pass it to stack.
441  * and also perform some sanity checks.
442  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)443 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
444 				   struct dma_desc *p, struct sk_buff *skb)
445 {
446 	struct skb_shared_hwtstamps shhwtstamp;
447 	bool found = false;
448 	u64 ns = 0;
449 
450 	if (!priv->hwts_tx_en)
451 		return;
452 
453 	/* exit if skb doesn't support hw tstamp */
454 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
455 		return;
456 
457 	/* check tx tstamp status */
458 	if (stmmac_get_tx_timestamp_status(priv, p)) {
459 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
460 		found = true;
461 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
462 		found = true;
463 	}
464 
465 	if (found) {
466 		ns -= priv->plat->cdc_error_adj;
467 
468 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
469 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
470 
471 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
472 		/* pass tstamp to stack */
473 		skb_tstamp_tx(skb, &shhwtstamp);
474 	}
475 }
476 
477 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
478  * @priv: driver private structure
479  * @p : descriptor pointer
480  * @np : next descriptor pointer
481  * @skb : the socket buffer
482  * Description :
483  * This function will read received packet's timestamp from the descriptor
484  * and pass it to stack. It also perform some sanity checks.
485  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)486 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
487 				   struct dma_desc *np, struct sk_buff *skb)
488 {
489 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
490 	struct dma_desc *desc = p;
491 	u64 ns = 0;
492 
493 	if (!priv->hwts_rx_en)
494 		return;
495 	/* For GMAC4, the valid timestamp is from CTX next desc. */
496 	if (dwmac_is_xmac(priv->plat->core_type))
497 		desc = np;
498 
499 	/* Check if timestamp is available */
500 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
501 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
502 
503 		ns -= priv->plat->cdc_error_adj;
504 
505 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
506 		shhwtstamp = skb_hwtstamps(skb);
507 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
508 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
509 	} else  {
510 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
511 	}
512 }
513 
stmmac_update_subsecond_increment(struct stmmac_priv * priv)514 static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
515 {
516 	bool xmac = dwmac_is_xmac(priv->plat->core_type);
517 	u32 sec_inc = 0;
518 	u64 temp = 0;
519 
520 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
521 
522 	/* program Sub Second Increment reg */
523 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
524 					   priv->plat->clk_ptp_rate,
525 					   xmac, &sec_inc);
526 	temp = div_u64(1000000000ULL, sec_inc);
527 
528 	/* Store sub second increment for later use */
529 	priv->sub_second_inc = sec_inc;
530 
531 	/* calculate default added value:
532 	 * formula is :
533 	 * addend = (2^32)/freq_div_ratio;
534 	 * where, freq_div_ratio = 1e9ns/sec_inc
535 	 */
536 	temp = (u64)(temp << 32);
537 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
538 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
539 }
540 
541 /**
542  *  stmmac_hwtstamp_set - control hardware timestamping.
543  *  @dev: device pointer.
544  *  @config: the timestamping configuration.
545  *  @extack: netlink extended ack structure for error reporting.
546  *  Description:
547  *  This function configures the MAC to enable/disable both outgoing(TX)
548  *  and incoming(RX) packets time stamping based on user input.
549  *  Return Value:
550  *  0 on success and an appropriate -ve integer on failure.
551  */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)552 static int stmmac_hwtstamp_set(struct net_device *dev,
553 			       struct kernel_hwtstamp_config *config,
554 			       struct netlink_ext_ack *extack)
555 {
556 	struct stmmac_priv *priv = netdev_priv(dev);
557 	u32 ptp_v2 = 0;
558 	u32 tstamp_all = 0;
559 	u32 ptp_over_ipv4_udp = 0;
560 	u32 ptp_over_ipv6_udp = 0;
561 	u32 ptp_over_ethernet = 0;
562 	u32 snap_type_sel = 0;
563 	u32 ts_master_en = 0;
564 	u32 ts_event_en = 0;
565 
566 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
567 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
568 		priv->hwts_tx_en = 0;
569 		priv->hwts_rx_en = 0;
570 
571 		return -EOPNOTSUPP;
572 	}
573 
574 	if (!netif_running(dev)) {
575 		NL_SET_ERR_MSG_MOD(extack,
576 				   "Cannot change timestamping configuration while down");
577 		return -ENODEV;
578 	}
579 
580 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
581 		   __func__, config->flags, config->tx_type, config->rx_filter);
582 
583 	if (config->tx_type != HWTSTAMP_TX_OFF &&
584 	    config->tx_type != HWTSTAMP_TX_ON)
585 		return -ERANGE;
586 
587 	if (priv->adv_ts) {
588 		switch (config->rx_filter) {
589 		case HWTSTAMP_FILTER_NONE:
590 			/* time stamp no incoming packet at all */
591 			config->rx_filter = HWTSTAMP_FILTER_NONE;
592 			break;
593 
594 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
595 			/* PTP v1, UDP, any kind of event packet */
596 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
597 			/* 'xmac' hardware can support Sync, Pdelay_Req and
598 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
599 			 * This leaves Delay_Req timestamps out.
600 			 * Enable all events *and* general purpose message
601 			 * timestamping
602 			 */
603 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
604 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
605 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
606 			break;
607 
608 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
609 			/* PTP v1, UDP, Sync packet */
610 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
611 			/* take time stamp for SYNC messages only */
612 			ts_event_en = PTP_TCR_TSEVNTENA;
613 
614 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
615 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
616 			break;
617 
618 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
619 			/* PTP v1, UDP, Delay_req packet */
620 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
621 			/* take time stamp for Delay_Req messages only */
622 			ts_master_en = PTP_TCR_TSMSTRENA;
623 			ts_event_en = PTP_TCR_TSEVNTENA;
624 
625 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
626 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
627 			break;
628 
629 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
630 			/* PTP v2, UDP, any kind of event packet */
631 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
632 			ptp_v2 = PTP_TCR_TSVER2ENA;
633 			/* take time stamp for all event messages */
634 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
635 
636 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
637 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
638 			break;
639 
640 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
641 			/* PTP v2, UDP, Sync packet */
642 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
643 			ptp_v2 = PTP_TCR_TSVER2ENA;
644 			/* take time stamp for SYNC messages only */
645 			ts_event_en = PTP_TCR_TSEVNTENA;
646 
647 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
648 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
649 			break;
650 
651 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
652 			/* PTP v2, UDP, Delay_req packet */
653 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
654 			ptp_v2 = PTP_TCR_TSVER2ENA;
655 			/* take time stamp for Delay_Req messages only */
656 			ts_master_en = PTP_TCR_TSMSTRENA;
657 			ts_event_en = PTP_TCR_TSEVNTENA;
658 
659 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
660 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
661 			break;
662 
663 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
664 			/* PTP v2/802.AS1 any layer, any kind of event packet */
665 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
666 			ptp_v2 = PTP_TCR_TSVER2ENA;
667 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
668 			if (priv->synopsys_id < DWMAC_CORE_4_10)
669 				ts_event_en = PTP_TCR_TSEVNTENA;
670 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
671 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
672 			ptp_over_ethernet = PTP_TCR_TSIPENA;
673 			break;
674 
675 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
676 			/* PTP v2/802.AS1, any layer, Sync packet */
677 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
678 			ptp_v2 = PTP_TCR_TSVER2ENA;
679 			/* take time stamp for SYNC messages only */
680 			ts_event_en = PTP_TCR_TSEVNTENA;
681 
682 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
683 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
684 			ptp_over_ethernet = PTP_TCR_TSIPENA;
685 			break;
686 
687 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
688 			/* PTP v2/802.AS1, any layer, Delay_req packet */
689 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
690 			ptp_v2 = PTP_TCR_TSVER2ENA;
691 			/* take time stamp for Delay_Req messages only */
692 			ts_master_en = PTP_TCR_TSMSTRENA;
693 			ts_event_en = PTP_TCR_TSEVNTENA;
694 
695 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
696 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
697 			ptp_over_ethernet = PTP_TCR_TSIPENA;
698 			break;
699 
700 		case HWTSTAMP_FILTER_NTP_ALL:
701 		case HWTSTAMP_FILTER_ALL:
702 			/* time stamp any incoming packet */
703 			config->rx_filter = HWTSTAMP_FILTER_ALL;
704 			tstamp_all = PTP_TCR_TSENALL;
705 			break;
706 
707 		default:
708 			return -ERANGE;
709 		}
710 	} else {
711 		switch (config->rx_filter) {
712 		case HWTSTAMP_FILTER_NONE:
713 			config->rx_filter = HWTSTAMP_FILTER_NONE;
714 			break;
715 		default:
716 			/* PTP v1, UDP, any kind of event packet */
717 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
718 			break;
719 		}
720 	}
721 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
722 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
723 
724 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
725 	if (!priv->tsfupdt_coarse)
726 		priv->systime_flags |= PTP_TCR_TSCFUPDT;
727 
728 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
729 		priv->systime_flags |= tstamp_all | ptp_v2 |
730 				       ptp_over_ethernet | ptp_over_ipv6_udp |
731 				       ptp_over_ipv4_udp | ts_event_en |
732 				       ts_master_en | snap_type_sel;
733 	}
734 
735 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
736 
737 	priv->tstamp_config = *config;
738 
739 	return 0;
740 }
741 
742 /**
743  *  stmmac_hwtstamp_get - read hardware timestamping.
744  *  @dev: device pointer.
745  *  @config: the timestamping configuration.
746  *  Description:
747  *  This function obtain the current hardware timestamping settings
748  *  as requested.
749  */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)750 static int stmmac_hwtstamp_get(struct net_device *dev,
751 			       struct kernel_hwtstamp_config *config)
752 {
753 	struct stmmac_priv *priv = netdev_priv(dev);
754 
755 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
756 		return -EOPNOTSUPP;
757 
758 	*config = priv->tstamp_config;
759 
760 	return 0;
761 }
762 
763 /**
764  * stmmac_init_tstamp_counter - init hardware timestamping counter
765  * @priv: driver private structure
766  * @systime_flags: timestamping flags
767  * Description:
768  * Initialize hardware counter for packet timestamping.
769  * This is valid as long as the interface is open and not suspended.
770  * Will be rerun after resuming from suspend, case in which the timestamping
771  * flags updated by stmmac_hwtstamp_set() also need to be restored.
772  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)773 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
774 				      u32 systime_flags)
775 {
776 	struct timespec64 now;
777 
778 	if (!priv->plat->clk_ptp_rate) {
779 		netdev_err(priv->dev, "Invalid PTP clock rate");
780 		return -EINVAL;
781 	}
782 
783 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
784 	priv->systime_flags = systime_flags;
785 
786 	stmmac_update_subsecond_increment(priv);
787 
788 	/* initialize system time */
789 	ktime_get_real_ts64(&now);
790 
791 	/* lower 32 bits of tv_sec are safe until y2106 */
792 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
793 
794 	return 0;
795 }
796 
797 /**
798  * stmmac_init_timestamping - initialise timestamping
799  * @priv: driver private structure
800  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
801  * This is done by looking at the HW cap. register.
802  * This function also registers the ptp driver.
803  */
stmmac_init_timestamping(struct stmmac_priv * priv)804 static int stmmac_init_timestamping(struct stmmac_priv *priv)
805 {
806 	bool xmac = dwmac_is_xmac(priv->plat->core_type);
807 	int ret;
808 
809 	if (priv->plat->ptp_clk_freq_config)
810 		priv->plat->ptp_clk_freq_config(priv);
811 
812 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
813 		netdev_info(priv->dev, "PTP not supported by HW\n");
814 		return -EOPNOTSUPP;
815 	}
816 
817 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
818 					       PTP_TCR_TSCFUPDT);
819 	if (ret) {
820 		netdev_warn(priv->dev, "PTP init failed\n");
821 		return ret;
822 	}
823 
824 	priv->adv_ts = 0;
825 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
826 	if (xmac && priv->dma_cap.atime_stamp)
827 		priv->adv_ts = 1;
828 	/* Dwmac 3.x core with extend_desc can support adv_ts */
829 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
830 		priv->adv_ts = 1;
831 
832 	if (priv->dma_cap.time_stamp)
833 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
834 
835 	if (priv->adv_ts)
836 		netdev_info(priv->dev,
837 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
838 
839 	priv->hwts_tx_en = 0;
840 	priv->hwts_rx_en = 0;
841 
842 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
843 		stmmac_hwtstamp_correct_latency(priv, priv);
844 
845 	return 0;
846 }
847 
stmmac_setup_ptp(struct stmmac_priv * priv)848 static void stmmac_setup_ptp(struct stmmac_priv *priv)
849 {
850 	int ret;
851 
852 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
853 	if (ret < 0)
854 		netdev_warn(priv->dev,
855 			    "failed to enable PTP reference clock: %pe\n",
856 			    ERR_PTR(ret));
857 
858 	if (stmmac_init_timestamping(priv) == 0)
859 		stmmac_ptp_register(priv);
860 }
861 
stmmac_release_ptp(struct stmmac_priv * priv)862 static void stmmac_release_ptp(struct stmmac_priv *priv)
863 {
864 	stmmac_ptp_unregister(priv);
865 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
866 }
867 
868 /**
869  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
870  *  @priv: driver private structure
871  *  @duplex: duplex passed to the next function
872  *  @flow_ctrl: desired flow control modes
873  *  Description: It is used for configuring the flow control in all queues
874  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)875 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
876 				 unsigned int flow_ctrl)
877 {
878 	u32 tx_cnt = priv->plat->tx_queues_to_use;
879 
880 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
881 			 tx_cnt);
882 }
883 
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)884 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
885 					 phy_interface_t interface)
886 {
887 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
888 
889 	/* Refresh the MAC-specific capabilities */
890 	stmmac_mac_update_caps(priv);
891 
892 	config->mac_capabilities = priv->hw->link.caps;
893 
894 	if (priv->plat->max_speed)
895 		phylink_limit_mac_speed(config, priv->plat->max_speed);
896 
897 	return config->mac_capabilities;
898 }
899 
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)900 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
901 						 phy_interface_t interface)
902 {
903 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
904 	struct phylink_pcs *pcs;
905 
906 	if (priv->plat->select_pcs) {
907 		pcs = priv->plat->select_pcs(priv, interface);
908 		if (!IS_ERR(pcs))
909 			return pcs;
910 	}
911 
912 	/* The PCS control register is only relevant for SGMII, TBI and RTBI
913 	 * modes. We no longer support TBI or RTBI, so only configure this
914 	 * register when operating in SGMII mode with the integrated PCS.
915 	 */
916 	if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
917 		return &priv->integrated_pcs->pcs;
918 
919 	return NULL;
920 }
921 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)922 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
923 			      const struct phylink_link_state *state)
924 {
925 	/* Nothing to do, xpcs_config() handles everything */
926 }
927 
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)928 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
929 			     phy_interface_t interface)
930 {
931 	struct net_device *ndev = to_net_dev(config->dev);
932 	struct stmmac_priv *priv = netdev_priv(ndev);
933 
934 	if (priv->plat->mac_finish)
935 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
936 
937 	return 0;
938 }
939 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)940 static void stmmac_mac_link_down(struct phylink_config *config,
941 				 unsigned int mode, phy_interface_t interface)
942 {
943 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
944 
945 	stmmac_mac_set(priv, priv->ioaddr, false);
946 	if (priv->dma_cap.eee)
947 		stmmac_set_eee_pls(priv, priv->hw, false);
948 
949 	if (stmmac_fpe_supported(priv))
950 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
951 }
952 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)953 static void stmmac_mac_link_up(struct phylink_config *config,
954 			       struct phy_device *phy,
955 			       unsigned int mode, phy_interface_t interface,
956 			       int speed, int duplex,
957 			       bool tx_pause, bool rx_pause)
958 {
959 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
960 	unsigned int flow_ctrl;
961 	u32 old_ctrl, ctrl;
962 	int ret;
963 
964 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
965 	    priv->plat->serdes_powerup)
966 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
967 
968 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
969 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
970 
971 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
972 		switch (speed) {
973 		case SPEED_10000:
974 			ctrl |= priv->hw->link.xgmii.speed10000;
975 			break;
976 		case SPEED_5000:
977 			ctrl |= priv->hw->link.xgmii.speed5000;
978 			break;
979 		case SPEED_2500:
980 			ctrl |= priv->hw->link.xgmii.speed2500;
981 			break;
982 		default:
983 			return;
984 		}
985 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
986 		switch (speed) {
987 		case SPEED_100000:
988 			ctrl |= priv->hw->link.xlgmii.speed100000;
989 			break;
990 		case SPEED_50000:
991 			ctrl |= priv->hw->link.xlgmii.speed50000;
992 			break;
993 		case SPEED_40000:
994 			ctrl |= priv->hw->link.xlgmii.speed40000;
995 			break;
996 		case SPEED_25000:
997 			ctrl |= priv->hw->link.xlgmii.speed25000;
998 			break;
999 		case SPEED_10000:
1000 			ctrl |= priv->hw->link.xgmii.speed10000;
1001 			break;
1002 		case SPEED_2500:
1003 			ctrl |= priv->hw->link.speed2500;
1004 			break;
1005 		case SPEED_1000:
1006 			ctrl |= priv->hw->link.speed1000;
1007 			break;
1008 		default:
1009 			return;
1010 		}
1011 	} else {
1012 		switch (speed) {
1013 		case SPEED_2500:
1014 			ctrl |= priv->hw->link.speed2500;
1015 			break;
1016 		case SPEED_1000:
1017 			ctrl |= priv->hw->link.speed1000;
1018 			break;
1019 		case SPEED_100:
1020 			ctrl |= priv->hw->link.speed100;
1021 			break;
1022 		case SPEED_10:
1023 			ctrl |= priv->hw->link.speed10;
1024 			break;
1025 		default:
1026 			return;
1027 		}
1028 	}
1029 
1030 	if (priv->plat->fix_mac_speed)
1031 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1032 
1033 	if (!duplex)
1034 		ctrl &= ~priv->hw->link.duplex;
1035 	else
1036 		ctrl |= priv->hw->link.duplex;
1037 
1038 	/* Flow Control operation */
1039 	if (rx_pause && tx_pause)
1040 		flow_ctrl = FLOW_AUTO;
1041 	else if (rx_pause && !tx_pause)
1042 		flow_ctrl = FLOW_RX;
1043 	else if (!rx_pause && tx_pause)
1044 		flow_ctrl = FLOW_TX;
1045 	else
1046 		flow_ctrl = FLOW_OFF;
1047 
1048 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1049 
1050 	if (ctrl != old_ctrl)
1051 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1052 
1053 	if (priv->plat->set_clk_tx_rate) {
1054 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1055 						priv->plat->clk_tx_i,
1056 						interface, speed);
1057 		if (ret < 0)
1058 			netdev_err(priv->dev,
1059 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
1060 				   phy_modes(interface), speed, ERR_PTR(ret));
1061 	}
1062 
1063 	stmmac_mac_set(priv, priv->ioaddr, true);
1064 	if (priv->dma_cap.eee)
1065 		stmmac_set_eee_pls(priv, priv->hw, true);
1066 
1067 	if (stmmac_fpe_supported(priv))
1068 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1069 
1070 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1071 		stmmac_hwtstamp_correct_latency(priv, priv);
1072 }
1073 
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1074 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1075 {
1076 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1077 
1078 	priv->eee_active = false;
1079 
1080 	mutex_lock(&priv->lock);
1081 
1082 	priv->eee_enabled = false;
1083 
1084 	netdev_dbg(priv->dev, "disable EEE\n");
1085 	priv->eee_sw_timer_en = false;
1086 	timer_delete_sync(&priv->eee_ctrl_timer);
1087 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1088 	priv->tx_path_in_lpi_mode = false;
1089 
1090 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1091 	mutex_unlock(&priv->lock);
1092 }
1093 
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1094 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1095 				    bool tx_clk_stop)
1096 {
1097 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1098 	int ret;
1099 
1100 	priv->tx_lpi_timer = timer;
1101 	priv->eee_active = true;
1102 
1103 	mutex_lock(&priv->lock);
1104 
1105 	priv->eee_enabled = true;
1106 
1107 	/* Update the transmit clock stop according to PHY capability if
1108 	 * the platform allows
1109 	 */
1110 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1111 		priv->tx_lpi_clk_stop = tx_clk_stop;
1112 
1113 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1114 			     STMMAC_DEFAULT_TWT_LS);
1115 
1116 	/* Try to cnfigure the hardware timer. */
1117 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1118 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1119 
1120 	if (ret) {
1121 		/* Hardware timer mode not supported, or value out of range.
1122 		 * Fall back to using software LPI mode
1123 		 */
1124 		priv->eee_sw_timer_en = true;
1125 		stmmac_restart_sw_lpi_timer(priv);
1126 	}
1127 
1128 	mutex_unlock(&priv->lock);
1129 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1130 
1131 	return 0;
1132 }
1133 
stmmac_mac_wol_set(struct phylink_config * config,u32 wolopts,const u8 * sopass)1134 static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
1135 			      const u8 *sopass)
1136 {
1137 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1138 
1139 	device_set_wakeup_enable(priv->device, !!wolopts);
1140 
1141 	mutex_lock(&priv->lock);
1142 	priv->wolopts = wolopts;
1143 	mutex_unlock(&priv->lock);
1144 
1145 	return 0;
1146 }
1147 
1148 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1149 	.mac_get_caps = stmmac_mac_get_caps,
1150 	.mac_select_pcs = stmmac_mac_select_pcs,
1151 	.mac_config = stmmac_mac_config,
1152 	.mac_finish = stmmac_mac_finish,
1153 	.mac_link_down = stmmac_mac_link_down,
1154 	.mac_link_up = stmmac_mac_link_up,
1155 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1156 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1157 	.mac_wol_set = stmmac_mac_wol_set,
1158 };
1159 
1160 /**
1161  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1162  * @priv: driver private structure
1163  * Description: this is to verify if the HW supports the PCS.
1164  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1165  * configured for the TBI, RTBI, or SGMII PHY interface.
1166  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1167 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1168 {
1169 	int interface = priv->plat->phy_interface;
1170 	int speed = priv->plat->mac_port_sel_speed;
1171 
1172 	if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
1173 		netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1174 		priv->hw->pcs = STMMAC_PCS_SGMII;
1175 
1176 		switch (speed) {
1177 		case SPEED_10:
1178 		case SPEED_100:
1179 		case SPEED_1000:
1180 			priv->hw->reverse_sgmii_enable = true;
1181 			break;
1182 
1183 		default:
1184 			dev_warn(priv->device, "invalid port speed\n");
1185 			fallthrough;
1186 		case 0:
1187 			priv->hw->reverse_sgmii_enable = false;
1188 			break;
1189 		}
1190 	}
1191 }
1192 
1193 /**
1194  * stmmac_init_phy - PHY initialization
1195  * @dev: net device structure
1196  * Description: it initializes the driver's PHY state, and attaches the PHY
1197  * to the mac driver.
1198  *  Return value:
1199  *  0 on success
1200  */
stmmac_init_phy(struct net_device * dev)1201 static int stmmac_init_phy(struct net_device *dev)
1202 {
1203 	struct stmmac_priv *priv = netdev_priv(dev);
1204 	int mode = priv->plat->phy_interface;
1205 	struct fwnode_handle *phy_fwnode;
1206 	struct fwnode_handle *fwnode;
1207 	struct ethtool_keee eee;
1208 	int ret;
1209 
1210 	if (!phylink_expects_phy(priv->phylink))
1211 		return 0;
1212 
1213 	if (priv->hw->xpcs &&
1214 	    xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1215 		return 0;
1216 
1217 	fwnode = priv->plat->port_node;
1218 	if (!fwnode)
1219 		fwnode = dev_fwnode(priv->device);
1220 
1221 	if (fwnode)
1222 		phy_fwnode = fwnode_get_phy_node(fwnode);
1223 	else
1224 		phy_fwnode = NULL;
1225 
1226 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1227 	 * manually parse it
1228 	 */
1229 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1230 		int addr = priv->plat->phy_addr;
1231 		struct phy_device *phydev;
1232 
1233 		if (addr < 0) {
1234 			netdev_err(priv->dev, "no phy found\n");
1235 			return -ENODEV;
1236 		}
1237 
1238 		phydev = mdiobus_get_phy(priv->mii, addr);
1239 		if (!phydev) {
1240 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1241 			return -ENODEV;
1242 		}
1243 
1244 		ret = phylink_connect_phy(priv->phylink, phydev);
1245 	} else {
1246 		fwnode_handle_put(phy_fwnode);
1247 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1248 	}
1249 
1250 	if (ret) {
1251 		netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1252 			   ERR_PTR(ret));
1253 		return ret;
1254 	}
1255 
1256 	/* Configure phylib's copy of the LPI timer. Normally,
1257 	 * phylink_config.lpi_timer_default would do this, but there is a
1258 	 * chance that userspace could change the eee_timer setting via sysfs
1259 	 * before the first open. Thus, preserve existing behaviour.
1260 	 */
1261 	if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1262 		eee.tx_lpi_timer = priv->tx_lpi_timer;
1263 		phylink_ethtool_set_eee(priv->phylink, &eee);
1264 	}
1265 
1266 	return 0;
1267 }
1268 
stmmac_phylink_setup(struct stmmac_priv * priv)1269 static int stmmac_phylink_setup(struct stmmac_priv *priv)
1270 {
1271 	struct stmmac_mdio_bus_data *mdio_bus_data;
1272 	struct phylink_config *config;
1273 	struct fwnode_handle *fwnode;
1274 	struct phylink_pcs *pcs;
1275 	struct phylink *phylink;
1276 
1277 	config = &priv->phylink_config;
1278 
1279 	config->dev = &priv->dev->dev;
1280 	config->type = PHYLINK_NETDEV;
1281 	config->mac_managed_pm = true;
1282 
1283 	/* Stmmac always requires an RX clock for hardware initialization */
1284 	config->mac_requires_rxc = true;
1285 
1286 	/* Disable EEE RX clock stop to ensure VLAN register access works
1287 	 * correctly.
1288 	 */
1289 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
1290 	    !(priv->dev->features & NETIF_F_VLAN_FEATURES))
1291 		config->eee_rx_clk_stop_enable = true;
1292 
1293 	/* Set the default transmit clock stop bit based on the platform glue */
1294 	priv->tx_lpi_clk_stop = priv->plat->flags &
1295 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1296 
1297 	mdio_bus_data = priv->plat->mdio_bus_data;
1298 	if (mdio_bus_data)
1299 		config->default_an_inband = mdio_bus_data->default_an_inband;
1300 
1301 	/* Get the PHY interface modes (at the PHY end of the link) that
1302 	 * are supported by the platform.
1303 	 */
1304 	if (priv->plat->get_interfaces)
1305 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1306 					   config->supported_interfaces);
1307 
1308 	/* Set the platform/firmware specified interface mode if the
1309 	 * supported interfaces have not already been provided using
1310 	 * phy_interface as a last resort.
1311 	 */
1312 	if (phy_interface_empty(config->supported_interfaces))
1313 		__set_bit(priv->plat->phy_interface,
1314 			  config->supported_interfaces);
1315 
1316 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1317 	if (priv->hw->xpcs)
1318 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1319 	else
1320 		pcs = priv->hw->phylink_pcs;
1321 
1322 	if (pcs)
1323 		phy_interface_or(config->supported_interfaces,
1324 				 config->supported_interfaces,
1325 				 pcs->supported_interfaces);
1326 
1327 	if (priv->dma_cap.eee) {
1328 		/* Assume all supported interfaces also support LPI */
1329 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1330 		       sizeof(config->lpi_interfaces));
1331 
1332 		/* All full duplex speeds above 100Mbps are supported */
1333 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1334 		config->lpi_timer_default = eee_timer * 1000;
1335 		config->eee_enabled_default = true;
1336 	}
1337 
1338 	config->wol_phy_speed_ctrl = true;
1339 	if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
1340 		config->wol_phy_legacy = true;
1341 	} else {
1342 		if (priv->dma_cap.pmt_remote_wake_up)
1343 			config->wol_mac_support |= WAKE_UCAST;
1344 		if (priv->dma_cap.pmt_magic_frame)
1345 			config->wol_mac_support |= WAKE_MAGIC;
1346 	}
1347 
1348 	fwnode = priv->plat->port_node;
1349 	if (!fwnode)
1350 		fwnode = dev_fwnode(priv->device);
1351 
1352 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1353 				 &stmmac_phylink_mac_ops);
1354 	if (IS_ERR(phylink))
1355 		return PTR_ERR(phylink);
1356 
1357 	priv->phylink = phylink;
1358 	return 0;
1359 }
1360 
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1361 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1362 				    struct stmmac_dma_conf *dma_conf)
1363 {
1364 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1365 	unsigned int desc_size;
1366 	void *head_rx;
1367 	u32 queue;
1368 
1369 	/* Display RX rings */
1370 	for (queue = 0; queue < rx_cnt; queue++) {
1371 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1372 
1373 		pr_info("\tRX Queue %u rings\n", queue);
1374 
1375 		if (priv->extend_desc) {
1376 			head_rx = (void *)rx_q->dma_erx;
1377 			desc_size = sizeof(struct dma_extended_desc);
1378 		} else {
1379 			head_rx = (void *)rx_q->dma_rx;
1380 			desc_size = sizeof(struct dma_desc);
1381 		}
1382 
1383 		/* Display RX ring */
1384 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1385 				    rx_q->dma_rx_phy, desc_size);
1386 	}
1387 }
1388 
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1389 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1390 				    struct stmmac_dma_conf *dma_conf)
1391 {
1392 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1393 	unsigned int desc_size;
1394 	void *head_tx;
1395 	u32 queue;
1396 
1397 	/* Display TX rings */
1398 	for (queue = 0; queue < tx_cnt; queue++) {
1399 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1400 
1401 		pr_info("\tTX Queue %d rings\n", queue);
1402 
1403 		if (priv->extend_desc) {
1404 			head_tx = (void *)tx_q->dma_etx;
1405 			desc_size = sizeof(struct dma_extended_desc);
1406 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1407 			head_tx = (void *)tx_q->dma_entx;
1408 			desc_size = sizeof(struct dma_edesc);
1409 		} else {
1410 			head_tx = (void *)tx_q->dma_tx;
1411 			desc_size = sizeof(struct dma_desc);
1412 		}
1413 
1414 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1415 				    tx_q->dma_tx_phy, desc_size);
1416 	}
1417 }
1418 
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1419 static void stmmac_display_rings(struct stmmac_priv *priv,
1420 				 struct stmmac_dma_conf *dma_conf)
1421 {
1422 	/* Display RX ring */
1423 	stmmac_display_rx_rings(priv, dma_conf);
1424 
1425 	/* Display TX ring */
1426 	stmmac_display_tx_rings(priv, dma_conf);
1427 }
1428 
stmmac_rx_offset(struct stmmac_priv * priv)1429 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1430 {
1431 	if (stmmac_xdp_is_enabled(priv))
1432 		return XDP_PACKET_HEADROOM;
1433 
1434 	return NET_SKB_PAD;
1435 }
1436 
stmmac_set_bfsize(int mtu)1437 static int stmmac_set_bfsize(int mtu)
1438 {
1439 	int ret;
1440 
1441 	if (mtu >= BUF_SIZE_8KiB)
1442 		ret = BUF_SIZE_16KiB;
1443 	else if (mtu >= BUF_SIZE_4KiB)
1444 		ret = BUF_SIZE_8KiB;
1445 	else if (mtu >= BUF_SIZE_2KiB)
1446 		ret = BUF_SIZE_4KiB;
1447 	else if (mtu > DEFAULT_BUFSIZE)
1448 		ret = BUF_SIZE_2KiB;
1449 	else
1450 		ret = DEFAULT_BUFSIZE;
1451 
1452 	return ret;
1453 }
1454 
1455 /**
1456  * stmmac_clear_rx_descriptors - clear RX descriptors
1457  * @priv: driver private structure
1458  * @dma_conf: structure to take the dma data
1459  * @queue: RX queue index
1460  * Description: this function is called to clear the RX descriptors
1461  * in case of both basic and extended descriptors are used.
1462  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1463 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1464 					struct stmmac_dma_conf *dma_conf,
1465 					u32 queue)
1466 {
1467 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1468 	int i;
1469 
1470 	/* Clear the RX descriptors */
1471 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1472 		if (priv->extend_desc)
1473 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1474 					priv->use_riwt, priv->mode,
1475 					(i == dma_conf->dma_rx_size - 1),
1476 					dma_conf->dma_buf_sz);
1477 		else
1478 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1479 					priv->use_riwt, priv->mode,
1480 					(i == dma_conf->dma_rx_size - 1),
1481 					dma_conf->dma_buf_sz);
1482 }
1483 
1484 /**
1485  * stmmac_clear_tx_descriptors - clear tx descriptors
1486  * @priv: driver private structure
1487  * @dma_conf: structure to take the dma data
1488  * @queue: TX queue index.
1489  * Description: this function is called to clear the TX descriptors
1490  * in case of both basic and extended descriptors are used.
1491  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1492 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1493 					struct stmmac_dma_conf *dma_conf,
1494 					u32 queue)
1495 {
1496 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1497 	int i;
1498 
1499 	/* Clear the TX descriptors */
1500 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1501 		int last = (i == (dma_conf->dma_tx_size - 1));
1502 		struct dma_desc *p;
1503 
1504 		if (priv->extend_desc)
1505 			p = &tx_q->dma_etx[i].basic;
1506 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1507 			p = &tx_q->dma_entx[i].basic;
1508 		else
1509 			p = &tx_q->dma_tx[i];
1510 
1511 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1512 	}
1513 }
1514 
1515 /**
1516  * stmmac_clear_descriptors - clear descriptors
1517  * @priv: driver private structure
1518  * @dma_conf: structure to take the dma data
1519  * Description: this function is called to clear the TX and RX descriptors
1520  * in case of both basic and extended descriptors are used.
1521  */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1522 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1523 				     struct stmmac_dma_conf *dma_conf)
1524 {
1525 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1526 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1527 	u32 queue;
1528 
1529 	/* Clear the RX descriptors */
1530 	for (queue = 0; queue < rx_queue_cnt; queue++)
1531 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1532 
1533 	/* Clear the TX descriptors */
1534 	for (queue = 0; queue < tx_queue_cnt; queue++)
1535 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1536 }
1537 
1538 /**
1539  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1540  * @priv: driver private structure
1541  * @dma_conf: structure to take the dma data
1542  * @p: descriptor pointer
1543  * @i: descriptor index
1544  * @flags: gfp flag
1545  * @queue: RX queue index
1546  * Description: this function is called to allocate a receive buffer, perform
1547  * the DMA mapping and init the descriptor.
1548  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1549 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1550 				  struct stmmac_dma_conf *dma_conf,
1551 				  struct dma_desc *p,
1552 				  int i, gfp_t flags, u32 queue)
1553 {
1554 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1555 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1556 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1557 
1558 	if (priv->dma_cap.host_dma_width <= 32)
1559 		gfp |= GFP_DMA32;
1560 
1561 	if (!buf->page) {
1562 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1563 		if (!buf->page)
1564 			return -ENOMEM;
1565 		buf->page_offset = stmmac_rx_offset(priv);
1566 	}
1567 
1568 	if (priv->sph_active && !buf->sec_page) {
1569 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1570 		if (!buf->sec_page)
1571 			return -ENOMEM;
1572 
1573 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1574 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1575 	} else {
1576 		buf->sec_page = NULL;
1577 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1578 	}
1579 
1580 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1581 
1582 	stmmac_set_desc_addr(priv, p, buf->addr);
1583 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1584 		stmmac_init_desc3(priv, p);
1585 
1586 	return 0;
1587 }
1588 
1589 /**
1590  * stmmac_free_rx_buffer - free RX dma buffers
1591  * @priv: private structure
1592  * @rx_q: RX queue
1593  * @i: buffer index.
1594  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1595 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1596 				  struct stmmac_rx_queue *rx_q,
1597 				  int i)
1598 {
1599 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1600 
1601 	if (buf->page)
1602 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1603 	buf->page = NULL;
1604 
1605 	if (buf->sec_page)
1606 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1607 	buf->sec_page = NULL;
1608 }
1609 
1610 /**
1611  * stmmac_free_tx_buffer - free RX dma buffers
1612  * @priv: private structure
1613  * @dma_conf: structure to take the dma data
1614  * @queue: RX queue index
1615  * @i: buffer index.
1616  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1617 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1618 				  struct stmmac_dma_conf *dma_conf,
1619 				  u32 queue, int i)
1620 {
1621 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1622 
1623 	if (tx_q->tx_skbuff_dma[i].buf &&
1624 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1625 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1626 			dma_unmap_page(priv->device,
1627 				       tx_q->tx_skbuff_dma[i].buf,
1628 				       tx_q->tx_skbuff_dma[i].len,
1629 				       DMA_TO_DEVICE);
1630 		else
1631 			dma_unmap_single(priv->device,
1632 					 tx_q->tx_skbuff_dma[i].buf,
1633 					 tx_q->tx_skbuff_dma[i].len,
1634 					 DMA_TO_DEVICE);
1635 	}
1636 
1637 	if (tx_q->xdpf[i] &&
1638 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1639 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1640 		xdp_return_frame(tx_q->xdpf[i]);
1641 		tx_q->xdpf[i] = NULL;
1642 	}
1643 
1644 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1645 		tx_q->xsk_frames_done++;
1646 
1647 	if (tx_q->tx_skbuff[i] &&
1648 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1649 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1650 		tx_q->tx_skbuff[i] = NULL;
1651 	}
1652 
1653 	tx_q->tx_skbuff_dma[i].buf = 0;
1654 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1655 }
1656 
1657 /**
1658  * dma_free_rx_skbufs - free RX dma buffers
1659  * @priv: private structure
1660  * @dma_conf: structure to take the dma data
1661  * @queue: RX queue index
1662  */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1663 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1664 			       struct stmmac_dma_conf *dma_conf,
1665 			       u32 queue)
1666 {
1667 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1668 	int i;
1669 
1670 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1671 		stmmac_free_rx_buffer(priv, rx_q, i);
1672 }
1673 
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1674 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1675 				   struct stmmac_dma_conf *dma_conf,
1676 				   u32 queue, gfp_t flags)
1677 {
1678 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1679 	int i;
1680 
1681 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1682 		struct dma_desc *p;
1683 		int ret;
1684 
1685 		if (priv->extend_desc)
1686 			p = &((rx_q->dma_erx + i)->basic);
1687 		else
1688 			p = rx_q->dma_rx + i;
1689 
1690 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1691 					     queue);
1692 		if (ret)
1693 			return ret;
1694 
1695 		rx_q->buf_alloc_num++;
1696 	}
1697 
1698 	return 0;
1699 }
1700 
1701 /**
1702  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1703  * @priv: private structure
1704  * @dma_conf: structure to take the dma data
1705  * @queue: RX queue index
1706  */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1707 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1708 				struct stmmac_dma_conf *dma_conf,
1709 				u32 queue)
1710 {
1711 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1712 	int i;
1713 
1714 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1715 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1716 
1717 		if (!buf->xdp)
1718 			continue;
1719 
1720 		xsk_buff_free(buf->xdp);
1721 		buf->xdp = NULL;
1722 	}
1723 }
1724 
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1725 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1726 				      struct stmmac_dma_conf *dma_conf,
1727 				      u32 queue)
1728 {
1729 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1730 	int i;
1731 
1732 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1733 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1734 	 * use this macro to make sure no size violations.
1735 	 */
1736 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1737 
1738 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1739 		struct stmmac_rx_buffer *buf;
1740 		dma_addr_t dma_addr;
1741 		struct dma_desc *p;
1742 
1743 		if (priv->extend_desc)
1744 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1745 		else
1746 			p = rx_q->dma_rx + i;
1747 
1748 		buf = &rx_q->buf_pool[i];
1749 
1750 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1751 		if (!buf->xdp)
1752 			return -ENOMEM;
1753 
1754 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1755 		stmmac_set_desc_addr(priv, p, dma_addr);
1756 		rx_q->buf_alloc_num++;
1757 	}
1758 
1759 	return 0;
1760 }
1761 
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1762 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1763 {
1764 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1765 		return NULL;
1766 
1767 	return xsk_get_pool_from_qid(priv->dev, queue);
1768 }
1769 
1770 /**
1771  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1772  * @priv: driver private structure
1773  * @dma_conf: structure to take the dma data
1774  * @queue: RX queue index
1775  * @flags: gfp flag.
1776  * Description: this function initializes the DMA RX descriptors
1777  * and allocates the socket buffers. It supports the chained and ring
1778  * modes.
1779  */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1780 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1781 				    struct stmmac_dma_conf *dma_conf,
1782 				    u32 queue, gfp_t flags)
1783 {
1784 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1785 	int ret;
1786 
1787 	netif_dbg(priv, probe, priv->dev,
1788 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1789 		  (u32)rx_q->dma_rx_phy);
1790 
1791 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1792 
1793 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1794 
1795 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1796 
1797 	if (rx_q->xsk_pool) {
1798 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1799 						   MEM_TYPE_XSK_BUFF_POOL,
1800 						   NULL));
1801 		netdev_info(priv->dev,
1802 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1803 			    rx_q->queue_index);
1804 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1805 	} else {
1806 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1807 						   MEM_TYPE_PAGE_POOL,
1808 						   rx_q->page_pool));
1809 		netdev_info(priv->dev,
1810 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1811 			    rx_q->queue_index);
1812 	}
1813 
1814 	if (rx_q->xsk_pool) {
1815 		/* RX XDP ZC buffer pool may not be populated, e.g.
1816 		 * xdpsock TX-only.
1817 		 */
1818 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1819 	} else {
1820 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1821 		if (ret < 0)
1822 			return -ENOMEM;
1823 	}
1824 
1825 	/* Setup the chained descriptor addresses */
1826 	if (priv->mode == STMMAC_CHAIN_MODE) {
1827 		if (priv->extend_desc)
1828 			stmmac_mode_init(priv, rx_q->dma_erx,
1829 					 rx_q->dma_rx_phy,
1830 					 dma_conf->dma_rx_size, 1);
1831 		else
1832 			stmmac_mode_init(priv, rx_q->dma_rx,
1833 					 rx_q->dma_rx_phy,
1834 					 dma_conf->dma_rx_size, 0);
1835 	}
1836 
1837 	return 0;
1838 }
1839 
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1840 static int init_dma_rx_desc_rings(struct net_device *dev,
1841 				  struct stmmac_dma_conf *dma_conf,
1842 				  gfp_t flags)
1843 {
1844 	struct stmmac_priv *priv = netdev_priv(dev);
1845 	u32 rx_count = priv->plat->rx_queues_to_use;
1846 	int queue;
1847 	int ret;
1848 
1849 	/* RX INITIALIZATION */
1850 	netif_dbg(priv, probe, priv->dev,
1851 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1852 
1853 	for (queue = 0; queue < rx_count; queue++) {
1854 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1855 		if (ret)
1856 			goto err_init_rx_buffers;
1857 	}
1858 
1859 	return 0;
1860 
1861 err_init_rx_buffers:
1862 	while (queue >= 0) {
1863 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1864 
1865 		if (rx_q->xsk_pool)
1866 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1867 		else
1868 			dma_free_rx_skbufs(priv, dma_conf, queue);
1869 
1870 		rx_q->buf_alloc_num = 0;
1871 		rx_q->xsk_pool = NULL;
1872 
1873 		queue--;
1874 	}
1875 
1876 	return ret;
1877 }
1878 
1879 /**
1880  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1881  * @priv: driver private structure
1882  * @dma_conf: structure to take the dma data
1883  * @queue: TX queue index
1884  * Description: this function initializes the DMA TX descriptors
1885  * and allocates the socket buffers. It supports the chained and ring
1886  * modes.
1887  */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1888 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1889 				    struct stmmac_dma_conf *dma_conf,
1890 				    u32 queue)
1891 {
1892 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1893 	int i;
1894 
1895 	netif_dbg(priv, probe, priv->dev,
1896 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1897 		  (u32)tx_q->dma_tx_phy);
1898 
1899 	/* Setup the chained descriptor addresses */
1900 	if (priv->mode == STMMAC_CHAIN_MODE) {
1901 		if (priv->extend_desc)
1902 			stmmac_mode_init(priv, tx_q->dma_etx,
1903 					 tx_q->dma_tx_phy,
1904 					 dma_conf->dma_tx_size, 1);
1905 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1906 			stmmac_mode_init(priv, tx_q->dma_tx,
1907 					 tx_q->dma_tx_phy,
1908 					 dma_conf->dma_tx_size, 0);
1909 	}
1910 
1911 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1912 
1913 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1914 		struct dma_desc *p;
1915 
1916 		if (priv->extend_desc)
1917 			p = &((tx_q->dma_etx + i)->basic);
1918 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1919 			p = &((tx_q->dma_entx + i)->basic);
1920 		else
1921 			p = tx_q->dma_tx + i;
1922 
1923 		stmmac_clear_desc(priv, p);
1924 
1925 		tx_q->tx_skbuff_dma[i].buf = 0;
1926 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1927 		tx_q->tx_skbuff_dma[i].len = 0;
1928 		tx_q->tx_skbuff_dma[i].last_segment = false;
1929 		tx_q->tx_skbuff[i] = NULL;
1930 	}
1931 
1932 	return 0;
1933 }
1934 
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1935 static int init_dma_tx_desc_rings(struct net_device *dev,
1936 				  struct stmmac_dma_conf *dma_conf)
1937 {
1938 	struct stmmac_priv *priv = netdev_priv(dev);
1939 	u32 tx_queue_cnt;
1940 	u32 queue;
1941 
1942 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1943 
1944 	for (queue = 0; queue < tx_queue_cnt; queue++)
1945 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1946 
1947 	return 0;
1948 }
1949 
1950 /**
1951  * init_dma_desc_rings - init the RX/TX descriptor rings
1952  * @dev: net device structure
1953  * @dma_conf: structure to take the dma data
1954  * @flags: gfp flag.
1955  * Description: this function initializes the DMA RX/TX descriptors
1956  * and allocates the socket buffers. It supports the chained and ring
1957  * modes.
1958  */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1959 static int init_dma_desc_rings(struct net_device *dev,
1960 			       struct stmmac_dma_conf *dma_conf,
1961 			       gfp_t flags)
1962 {
1963 	struct stmmac_priv *priv = netdev_priv(dev);
1964 	int ret;
1965 
1966 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1967 	if (ret)
1968 		return ret;
1969 
1970 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1971 
1972 	stmmac_clear_descriptors(priv, dma_conf);
1973 
1974 	if (netif_msg_hw(priv))
1975 		stmmac_display_rings(priv, dma_conf);
1976 
1977 	return ret;
1978 }
1979 
1980 /**
1981  * dma_free_tx_skbufs - free TX dma buffers
1982  * @priv: private structure
1983  * @dma_conf: structure to take the dma data
1984  * @queue: TX queue index
1985  */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1986 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1987 			       struct stmmac_dma_conf *dma_conf,
1988 			       u32 queue)
1989 {
1990 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1991 	int i;
1992 
1993 	tx_q->xsk_frames_done = 0;
1994 
1995 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1996 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1997 
1998 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1999 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2000 		tx_q->xsk_frames_done = 0;
2001 		tx_q->xsk_pool = NULL;
2002 	}
2003 }
2004 
2005 /**
2006  * stmmac_free_tx_skbufs - free TX skb buffers
2007  * @priv: private structure
2008  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)2009 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
2010 {
2011 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
2012 	u32 queue;
2013 
2014 	for (queue = 0; queue < tx_queue_cnt; queue++)
2015 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
2016 }
2017 
2018 /**
2019  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2020  * @priv: private structure
2021  * @dma_conf: structure to take the dma data
2022  * @queue: RX queue index
2023  */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2024 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2025 					 struct stmmac_dma_conf *dma_conf,
2026 					 u32 queue)
2027 {
2028 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2029 
2030 	/* Release the DMA RX socket buffers */
2031 	if (rx_q->xsk_pool)
2032 		dma_free_rx_xskbufs(priv, dma_conf, queue);
2033 	else
2034 		dma_free_rx_skbufs(priv, dma_conf, queue);
2035 
2036 	rx_q->buf_alloc_num = 0;
2037 	rx_q->xsk_pool = NULL;
2038 
2039 	/* Free DMA regions of consistent memory previously allocated */
2040 	if (!priv->extend_desc)
2041 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2042 				  sizeof(struct dma_desc),
2043 				  rx_q->dma_rx, rx_q->dma_rx_phy);
2044 	else
2045 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2046 				  sizeof(struct dma_extended_desc),
2047 				  rx_q->dma_erx, rx_q->dma_rx_phy);
2048 
2049 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2050 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2051 
2052 	kfree(rx_q->buf_pool);
2053 	if (rx_q->page_pool)
2054 		page_pool_destroy(rx_q->page_pool);
2055 }
2056 
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2057 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2058 				       struct stmmac_dma_conf *dma_conf)
2059 {
2060 	u32 rx_count = priv->plat->rx_queues_to_use;
2061 	u32 queue;
2062 
2063 	/* Free RX queue resources */
2064 	for (queue = 0; queue < rx_count; queue++)
2065 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2066 }
2067 
2068 /**
2069  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2070  * @priv: private structure
2071  * @dma_conf: structure to take the dma data
2072  * @queue: TX queue index
2073  */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2074 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2075 					 struct stmmac_dma_conf *dma_conf,
2076 					 u32 queue)
2077 {
2078 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2079 	size_t size;
2080 	void *addr;
2081 
2082 	/* Release the DMA TX socket buffers */
2083 	dma_free_tx_skbufs(priv, dma_conf, queue);
2084 
2085 	if (priv->extend_desc) {
2086 		size = sizeof(struct dma_extended_desc);
2087 		addr = tx_q->dma_etx;
2088 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2089 		size = sizeof(struct dma_edesc);
2090 		addr = tx_q->dma_entx;
2091 	} else {
2092 		size = sizeof(struct dma_desc);
2093 		addr = tx_q->dma_tx;
2094 	}
2095 
2096 	size *= dma_conf->dma_tx_size;
2097 
2098 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2099 
2100 	kfree(tx_q->tx_skbuff_dma);
2101 	kfree(tx_q->tx_skbuff);
2102 }
2103 
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2104 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2105 				       struct stmmac_dma_conf *dma_conf)
2106 {
2107 	u32 tx_count = priv->plat->tx_queues_to_use;
2108 	u32 queue;
2109 
2110 	/* Free TX queue resources */
2111 	for (queue = 0; queue < tx_count; queue++)
2112 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2113 }
2114 
2115 /**
2116  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2117  * @priv: private structure
2118  * @dma_conf: structure to take the dma data
2119  * @queue: RX queue index
2120  * Description: according to which descriptor can be used (extend or basic)
2121  * this function allocates the resources for TX and RX paths. In case of
2122  * reception, for example, it pre-allocated the RX socket buffer in order to
2123  * allow zero-copy mechanism.
2124  */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2125 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2126 					 struct stmmac_dma_conf *dma_conf,
2127 					 u32 queue)
2128 {
2129 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2130 	struct stmmac_channel *ch = &priv->channel[queue];
2131 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2132 	struct page_pool_params pp_params = { 0 };
2133 	unsigned int dma_buf_sz_pad, num_pages;
2134 	unsigned int napi_id;
2135 	int ret;
2136 
2137 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2138 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2139 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2140 
2141 	rx_q->queue_index = queue;
2142 	rx_q->priv_data = priv;
2143 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2144 
2145 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2146 	pp_params.pool_size = dma_conf->dma_rx_size;
2147 	pp_params.order = order_base_2(num_pages);
2148 	pp_params.nid = dev_to_node(priv->device);
2149 	pp_params.dev = priv->device;
2150 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2151 	pp_params.offset = stmmac_rx_offset(priv);
2152 	pp_params.max_len = dma_conf->dma_buf_sz;
2153 
2154 	if (priv->sph_active) {
2155 		pp_params.offset = 0;
2156 		pp_params.max_len += stmmac_rx_offset(priv);
2157 	}
2158 
2159 	rx_q->page_pool = page_pool_create(&pp_params);
2160 	if (IS_ERR(rx_q->page_pool)) {
2161 		ret = PTR_ERR(rx_q->page_pool);
2162 		rx_q->page_pool = NULL;
2163 		return ret;
2164 	}
2165 
2166 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2167 				 sizeof(*rx_q->buf_pool),
2168 				 GFP_KERNEL);
2169 	if (!rx_q->buf_pool)
2170 		return -ENOMEM;
2171 
2172 	if (priv->extend_desc) {
2173 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2174 						   dma_conf->dma_rx_size *
2175 						   sizeof(struct dma_extended_desc),
2176 						   &rx_q->dma_rx_phy,
2177 						   GFP_KERNEL);
2178 		if (!rx_q->dma_erx)
2179 			return -ENOMEM;
2180 
2181 	} else {
2182 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2183 						  dma_conf->dma_rx_size *
2184 						  sizeof(struct dma_desc),
2185 						  &rx_q->dma_rx_phy,
2186 						  GFP_KERNEL);
2187 		if (!rx_q->dma_rx)
2188 			return -ENOMEM;
2189 	}
2190 
2191 	if (stmmac_xdp_is_enabled(priv) &&
2192 	    test_bit(queue, priv->af_xdp_zc_qps))
2193 		napi_id = ch->rxtx_napi.napi_id;
2194 	else
2195 		napi_id = ch->rx_napi.napi_id;
2196 
2197 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2198 			       rx_q->queue_index,
2199 			       napi_id);
2200 	if (ret) {
2201 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2202 		return -EINVAL;
2203 	}
2204 
2205 	return 0;
2206 }
2207 
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2208 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2209 				       struct stmmac_dma_conf *dma_conf)
2210 {
2211 	u32 rx_count = priv->plat->rx_queues_to_use;
2212 	u32 queue;
2213 	int ret;
2214 
2215 	/* RX queues buffers and DMA */
2216 	for (queue = 0; queue < rx_count; queue++) {
2217 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2218 		if (ret)
2219 			goto err_dma;
2220 	}
2221 
2222 	return 0;
2223 
2224 err_dma:
2225 	free_dma_rx_desc_resources(priv, dma_conf);
2226 
2227 	return ret;
2228 }
2229 
2230 /**
2231  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2232  * @priv: private structure
2233  * @dma_conf: structure to take the dma data
2234  * @queue: TX queue index
2235  * Description: according to which descriptor can be used (extend or basic)
2236  * this function allocates the resources for TX and RX paths. In case of
2237  * reception, for example, it pre-allocated the RX socket buffer in order to
2238  * allow zero-copy mechanism.
2239  */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2240 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2241 					 struct stmmac_dma_conf *dma_conf,
2242 					 u32 queue)
2243 {
2244 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2245 	size_t size;
2246 	void *addr;
2247 
2248 	tx_q->queue_index = queue;
2249 	tx_q->priv_data = priv;
2250 
2251 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2252 				      sizeof(*tx_q->tx_skbuff_dma),
2253 				      GFP_KERNEL);
2254 	if (!tx_q->tx_skbuff_dma)
2255 		return -ENOMEM;
2256 
2257 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2258 				  sizeof(struct sk_buff *),
2259 				  GFP_KERNEL);
2260 	if (!tx_q->tx_skbuff)
2261 		return -ENOMEM;
2262 
2263 	if (priv->extend_desc)
2264 		size = sizeof(struct dma_extended_desc);
2265 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2266 		size = sizeof(struct dma_edesc);
2267 	else
2268 		size = sizeof(struct dma_desc);
2269 
2270 	size *= dma_conf->dma_tx_size;
2271 
2272 	addr = dma_alloc_coherent(priv->device, size,
2273 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2274 	if (!addr)
2275 		return -ENOMEM;
2276 
2277 	if (priv->extend_desc)
2278 		tx_q->dma_etx = addr;
2279 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2280 		tx_q->dma_entx = addr;
2281 	else
2282 		tx_q->dma_tx = addr;
2283 
2284 	return 0;
2285 }
2286 
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2287 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2288 				       struct stmmac_dma_conf *dma_conf)
2289 {
2290 	u32 tx_count = priv->plat->tx_queues_to_use;
2291 	u32 queue;
2292 	int ret;
2293 
2294 	/* TX queues buffers and DMA */
2295 	for (queue = 0; queue < tx_count; queue++) {
2296 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2297 		if (ret)
2298 			goto err_dma;
2299 	}
2300 
2301 	return 0;
2302 
2303 err_dma:
2304 	free_dma_tx_desc_resources(priv, dma_conf);
2305 	return ret;
2306 }
2307 
2308 /**
2309  * alloc_dma_desc_resources - alloc TX/RX resources.
2310  * @priv: private structure
2311  * @dma_conf: structure to take the dma data
2312  * Description: according to which descriptor can be used (extend or basic)
2313  * this function allocates the resources for TX and RX paths. In case of
2314  * reception, for example, it pre-allocated the RX socket buffer in order to
2315  * allow zero-copy mechanism.
2316  */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2317 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2318 				    struct stmmac_dma_conf *dma_conf)
2319 {
2320 	/* RX Allocation */
2321 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2322 
2323 	if (ret)
2324 		return ret;
2325 
2326 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2327 
2328 	return ret;
2329 }
2330 
2331 /**
2332  * free_dma_desc_resources - free dma desc resources
2333  * @priv: private structure
2334  * @dma_conf: structure to take the dma data
2335  */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2336 static void free_dma_desc_resources(struct stmmac_priv *priv,
2337 				    struct stmmac_dma_conf *dma_conf)
2338 {
2339 	/* Release the DMA TX socket buffers */
2340 	free_dma_tx_desc_resources(priv, dma_conf);
2341 
2342 	/* Release the DMA RX socket buffers later
2343 	 * to ensure all pending XDP_TX buffers are returned.
2344 	 */
2345 	free_dma_rx_desc_resources(priv, dma_conf);
2346 }
2347 
2348 /**
2349  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2350  *  @priv: driver private structure
2351  *  Description: It is used for enabling the rx queues in the MAC
2352  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2353 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2354 {
2355 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2356 	int queue;
2357 	u8 mode;
2358 
2359 	for (queue = 0; queue < rx_queues_count; queue++) {
2360 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2361 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2362 	}
2363 }
2364 
2365 /**
2366  * stmmac_start_rx_dma - start RX DMA channel
2367  * @priv: driver private structure
2368  * @chan: RX channel index
2369  * Description:
2370  * This starts a RX DMA channel
2371  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2372 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2373 {
2374 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2375 	stmmac_start_rx(priv, priv->ioaddr, chan);
2376 }
2377 
2378 /**
2379  * stmmac_start_tx_dma - start TX DMA channel
2380  * @priv: driver private structure
2381  * @chan: TX channel index
2382  * Description:
2383  * This starts a TX DMA channel
2384  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2385 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2386 {
2387 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2388 	stmmac_start_tx(priv, priv->ioaddr, chan);
2389 }
2390 
2391 /**
2392  * stmmac_stop_rx_dma - stop RX DMA channel
2393  * @priv: driver private structure
2394  * @chan: RX channel index
2395  * Description:
2396  * This stops a RX DMA channel
2397  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2398 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2399 {
2400 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2401 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2402 }
2403 
2404 /**
2405  * stmmac_stop_tx_dma - stop TX DMA channel
2406  * @priv: driver private structure
2407  * @chan: TX channel index
2408  * Description:
2409  * This stops a TX DMA channel
2410  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2411 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2412 {
2413 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2414 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2415 }
2416 
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2417 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2418 {
2419 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2420 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2421 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2422 	u32 chan;
2423 
2424 	for (chan = 0; chan < dma_csr_ch; chan++) {
2425 		struct stmmac_channel *ch = &priv->channel[chan];
2426 		unsigned long flags;
2427 
2428 		spin_lock_irqsave(&ch->lock, flags);
2429 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2430 		spin_unlock_irqrestore(&ch->lock, flags);
2431 	}
2432 }
2433 
2434 /**
2435  * stmmac_start_all_dma - start all RX and TX DMA channels
2436  * @priv: driver private structure
2437  * Description:
2438  * This starts all the RX and TX DMA channels
2439  */
stmmac_start_all_dma(struct stmmac_priv * priv)2440 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2441 {
2442 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2443 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2444 	u32 chan = 0;
2445 
2446 	for (chan = 0; chan < rx_channels_count; chan++)
2447 		stmmac_start_rx_dma(priv, chan);
2448 
2449 	for (chan = 0; chan < tx_channels_count; chan++)
2450 		stmmac_start_tx_dma(priv, chan);
2451 }
2452 
2453 /**
2454  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2455  * @priv: driver private structure
2456  * Description:
2457  * This stops the RX and TX DMA channels
2458  */
stmmac_stop_all_dma(struct stmmac_priv * priv)2459 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2460 {
2461 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2462 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2463 	u32 chan = 0;
2464 
2465 	for (chan = 0; chan < rx_channels_count; chan++)
2466 		stmmac_stop_rx_dma(priv, chan);
2467 
2468 	for (chan = 0; chan < tx_channels_count; chan++)
2469 		stmmac_stop_tx_dma(priv, chan);
2470 }
2471 
2472 /**
2473  *  stmmac_dma_operation_mode - HW DMA operation mode
2474  *  @priv: driver private structure
2475  *  Description: it is used for configuring the DMA operation mode register in
2476  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2477  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2478 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2479 {
2480 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2481 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2482 	int rxfifosz = priv->plat->rx_fifo_size;
2483 	int txfifosz = priv->plat->tx_fifo_size;
2484 	u32 txmode = 0;
2485 	u32 rxmode = 0;
2486 	u32 chan = 0;
2487 	u8 qmode = 0;
2488 
2489 	if (rxfifosz == 0)
2490 		rxfifosz = priv->dma_cap.rx_fifo_size;
2491 	if (txfifosz == 0)
2492 		txfifosz = priv->dma_cap.tx_fifo_size;
2493 
2494 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2495 	if (dwmac_is_xmac(priv->plat->core_type)) {
2496 		rxfifosz /= rx_channels_count;
2497 		txfifosz /= tx_channels_count;
2498 	}
2499 
2500 	if (priv->plat->force_thresh_dma_mode) {
2501 		txmode = tc;
2502 		rxmode = tc;
2503 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2504 		/*
2505 		 * In case of GMAC, SF mode can be enabled
2506 		 * to perform the TX COE in HW. This depends on:
2507 		 * 1) TX COE if actually supported
2508 		 * 2) There is no bugged Jumbo frame support
2509 		 *    that needs to not insert csum in the TDES.
2510 		 */
2511 		txmode = SF_DMA_MODE;
2512 		rxmode = SF_DMA_MODE;
2513 		priv->xstats.threshold = SF_DMA_MODE;
2514 	} else {
2515 		txmode = tc;
2516 		rxmode = SF_DMA_MODE;
2517 	}
2518 
2519 	/* configure all channels */
2520 	for (chan = 0; chan < rx_channels_count; chan++) {
2521 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2522 		u32 buf_size;
2523 
2524 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2525 
2526 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2527 				rxfifosz, qmode);
2528 
2529 		if (rx_q->xsk_pool) {
2530 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2531 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2532 					      buf_size,
2533 					      chan);
2534 		} else {
2535 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2536 					      priv->dma_conf.dma_buf_sz,
2537 					      chan);
2538 		}
2539 	}
2540 
2541 	for (chan = 0; chan < tx_channels_count; chan++) {
2542 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2543 
2544 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2545 				txfifosz, qmode);
2546 	}
2547 }
2548 
stmmac_xsk_request_timestamp(void * _priv)2549 static void stmmac_xsk_request_timestamp(void *_priv)
2550 {
2551 	struct stmmac_metadata_request *meta_req = _priv;
2552 
2553 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2554 	*meta_req->set_ic = true;
2555 }
2556 
stmmac_xsk_fill_timestamp(void * _priv)2557 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2558 {
2559 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2560 	struct stmmac_priv *priv = tx_compl->priv;
2561 	struct dma_desc *desc = tx_compl->desc;
2562 	bool found = false;
2563 	u64 ns = 0;
2564 
2565 	if (!priv->hwts_tx_en)
2566 		return 0;
2567 
2568 	/* check tx tstamp status */
2569 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2570 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2571 		found = true;
2572 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2573 		found = true;
2574 	}
2575 
2576 	if (found) {
2577 		ns -= priv->plat->cdc_error_adj;
2578 		return ns_to_ktime(ns);
2579 	}
2580 
2581 	return 0;
2582 }
2583 
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2584 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2585 {
2586 	struct timespec64 ts = ns_to_timespec64(launch_time);
2587 	struct stmmac_metadata_request *meta_req = _priv;
2588 
2589 	if (meta_req->tbs & STMMAC_TBS_EN)
2590 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2591 				    ts.tv_nsec);
2592 }
2593 
2594 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2595 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2596 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2597 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2598 };
2599 
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2600 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2601 {
2602 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2603 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2604 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2605 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2606 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2607 	unsigned int entry = tx_q->cur_tx;
2608 	struct dma_desc *tx_desc = NULL;
2609 	struct xdp_desc xdp_desc;
2610 	bool work_done = true;
2611 	u32 tx_set_ic_bit = 0;
2612 
2613 	/* Avoids TX time-out as we are sharing with slow path */
2614 	txq_trans_cond_update(nq);
2615 
2616 	budget = min(budget, stmmac_tx_avail(priv, queue));
2617 
2618 	for (; budget > 0; budget--) {
2619 		struct stmmac_metadata_request meta_req;
2620 		struct xsk_tx_metadata *meta = NULL;
2621 		dma_addr_t dma_addr;
2622 		bool set_ic;
2623 
2624 		/* We are sharing with slow path and stop XSK TX desc submission when
2625 		 * available TX ring is less than threshold.
2626 		 */
2627 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2628 		    !netif_carrier_ok(priv->dev)) {
2629 			work_done = false;
2630 			break;
2631 		}
2632 
2633 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2634 			break;
2635 
2636 		if (priv->est && priv->est->enable &&
2637 		    priv->est->max_sdu[queue] &&
2638 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2639 			priv->xstats.max_sdu_txq_drop[queue]++;
2640 			continue;
2641 		}
2642 
2643 		if (likely(priv->extend_desc))
2644 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2645 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2646 			tx_desc = &tx_q->dma_entx[entry].basic;
2647 		else
2648 			tx_desc = tx_q->dma_tx + entry;
2649 
2650 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2651 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2652 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2653 
2654 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2655 
2656 		/* To return XDP buffer to XSK pool, we simple call
2657 		 * xsk_tx_completed(), so we don't need to fill up
2658 		 * 'buf' and 'xdpf'.
2659 		 */
2660 		tx_q->tx_skbuff_dma[entry].buf = 0;
2661 		tx_q->xdpf[entry] = NULL;
2662 
2663 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2664 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2665 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2666 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2667 
2668 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2669 
2670 		tx_q->tx_count_frames++;
2671 
2672 		if (!priv->tx_coal_frames[queue])
2673 			set_ic = false;
2674 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2675 			set_ic = true;
2676 		else
2677 			set_ic = false;
2678 
2679 		meta_req.priv = priv;
2680 		meta_req.tx_desc = tx_desc;
2681 		meta_req.set_ic = &set_ic;
2682 		meta_req.tbs = tx_q->tbs;
2683 		meta_req.edesc = &tx_q->dma_entx[entry];
2684 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2685 					&meta_req);
2686 		if (set_ic) {
2687 			tx_q->tx_count_frames = 0;
2688 			stmmac_set_tx_ic(priv, tx_desc);
2689 			tx_set_ic_bit++;
2690 		}
2691 
2692 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2693 				       csum, priv->mode, true, true,
2694 				       xdp_desc.len);
2695 
2696 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2697 
2698 		xsk_tx_metadata_to_compl(meta,
2699 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2700 
2701 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2702 		entry = tx_q->cur_tx;
2703 	}
2704 	u64_stats_update_begin(&txq_stats->napi_syncp);
2705 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2706 	u64_stats_update_end(&txq_stats->napi_syncp);
2707 
2708 	if (tx_desc) {
2709 		stmmac_flush_tx_descriptors(priv, queue);
2710 		xsk_tx_release(pool);
2711 	}
2712 
2713 	/* Return true if all of the 3 conditions are met
2714 	 *  a) TX Budget is still available
2715 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2716 	 *     pending XSK TX for transmission)
2717 	 */
2718 	return !!budget && work_done;
2719 }
2720 
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2721 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2722 {
2723 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2724 		tc += 64;
2725 
2726 		if (priv->plat->force_thresh_dma_mode)
2727 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2728 		else
2729 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2730 						      chan);
2731 
2732 		priv->xstats.threshold = tc;
2733 	}
2734 }
2735 
2736 /**
2737  * stmmac_tx_clean - to manage the transmission completion
2738  * @priv: driver private structure
2739  * @budget: napi budget limiting this functions packet handling
2740  * @queue: TX queue index
2741  * @pending_packets: signal to arm the TX coal timer
2742  * Description: it reclaims the transmit resources after transmission completes.
2743  * If some packets still needs to be handled, due to TX coalesce, set
2744  * pending_packets to true to make NAPI arm the TX coal timer.
2745  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2746 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2747 			   bool *pending_packets)
2748 {
2749 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2750 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2751 	unsigned int bytes_compl = 0, pkts_compl = 0;
2752 	unsigned int entry, xmits = 0, count = 0;
2753 	u32 tx_packets = 0, tx_errors = 0;
2754 
2755 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2756 
2757 	tx_q->xsk_frames_done = 0;
2758 
2759 	entry = tx_q->dirty_tx;
2760 
2761 	/* Try to clean all TX complete frame in 1 shot */
2762 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2763 		struct xdp_frame *xdpf;
2764 		struct sk_buff *skb;
2765 		struct dma_desc *p;
2766 		int status;
2767 
2768 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2769 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2770 			xdpf = tx_q->xdpf[entry];
2771 			skb = NULL;
2772 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2773 			xdpf = NULL;
2774 			skb = tx_q->tx_skbuff[entry];
2775 		} else {
2776 			xdpf = NULL;
2777 			skb = NULL;
2778 		}
2779 
2780 		if (priv->extend_desc)
2781 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2782 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2783 			p = &tx_q->dma_entx[entry].basic;
2784 		else
2785 			p = tx_q->dma_tx + entry;
2786 
2787 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2788 		/* Check if the descriptor is owned by the DMA */
2789 		if (unlikely(status & tx_dma_own))
2790 			break;
2791 
2792 		count++;
2793 
2794 		/* Make sure descriptor fields are read after reading
2795 		 * the own bit.
2796 		 */
2797 		dma_rmb();
2798 
2799 		/* Just consider the last segment and ...*/
2800 		if (likely(!(status & tx_not_ls))) {
2801 			/* ... verify the status error condition */
2802 			if (unlikely(status & tx_err)) {
2803 				tx_errors++;
2804 				if (unlikely(status & tx_err_bump_tc))
2805 					stmmac_bump_dma_threshold(priv, queue);
2806 			} else {
2807 				tx_packets++;
2808 			}
2809 			if (skb) {
2810 				stmmac_get_tx_hwtstamp(priv, p, skb);
2811 			} else if (tx_q->xsk_pool &&
2812 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2813 				struct stmmac_xsk_tx_complete tx_compl = {
2814 					.priv = priv,
2815 					.desc = p,
2816 				};
2817 
2818 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2819 							 &stmmac_xsk_tx_metadata_ops,
2820 							 &tx_compl);
2821 			}
2822 		}
2823 
2824 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2825 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2826 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2827 				dma_unmap_page(priv->device,
2828 					       tx_q->tx_skbuff_dma[entry].buf,
2829 					       tx_q->tx_skbuff_dma[entry].len,
2830 					       DMA_TO_DEVICE);
2831 			else
2832 				dma_unmap_single(priv->device,
2833 						 tx_q->tx_skbuff_dma[entry].buf,
2834 						 tx_q->tx_skbuff_dma[entry].len,
2835 						 DMA_TO_DEVICE);
2836 			tx_q->tx_skbuff_dma[entry].buf = 0;
2837 			tx_q->tx_skbuff_dma[entry].len = 0;
2838 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2839 		}
2840 
2841 		stmmac_clean_desc3(priv, tx_q, p);
2842 
2843 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2844 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2845 
2846 		if (xdpf &&
2847 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2848 			xdp_return_frame_rx_napi(xdpf);
2849 			tx_q->xdpf[entry] = NULL;
2850 		}
2851 
2852 		if (xdpf &&
2853 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2854 			xdp_return_frame(xdpf);
2855 			tx_q->xdpf[entry] = NULL;
2856 		}
2857 
2858 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2859 			tx_q->xsk_frames_done++;
2860 
2861 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2862 			if (likely(skb)) {
2863 				pkts_compl++;
2864 				bytes_compl += skb->len;
2865 				dev_consume_skb_any(skb);
2866 				tx_q->tx_skbuff[entry] = NULL;
2867 			}
2868 		}
2869 
2870 		stmmac_release_tx_desc(priv, p, priv->mode);
2871 
2872 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2873 	}
2874 	tx_q->dirty_tx = entry;
2875 
2876 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2877 				  pkts_compl, bytes_compl);
2878 
2879 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2880 								queue))) &&
2881 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2882 
2883 		netif_dbg(priv, tx_done, priv->dev,
2884 			  "%s: restart transmit\n", __func__);
2885 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2886 	}
2887 
2888 	if (tx_q->xsk_pool) {
2889 		bool work_done;
2890 
2891 		if (tx_q->xsk_frames_done)
2892 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2893 
2894 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2895 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2896 
2897 		/* For XSK TX, we try to send as many as possible.
2898 		 * If XSK work done (XSK TX desc empty and budget still
2899 		 * available), return "budget - 1" to reenable TX IRQ.
2900 		 * Else, return "budget" to make NAPI continue polling.
2901 		 */
2902 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2903 					       STMMAC_XSK_TX_BUDGET_MAX);
2904 		if (work_done)
2905 			xmits = budget - 1;
2906 		else
2907 			xmits = budget;
2908 	}
2909 
2910 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2911 		stmmac_restart_sw_lpi_timer(priv);
2912 
2913 	/* We still have pending packets, let's call for a new scheduling */
2914 	if (tx_q->dirty_tx != tx_q->cur_tx)
2915 		*pending_packets = true;
2916 
2917 	u64_stats_update_begin(&txq_stats->napi_syncp);
2918 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2919 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2920 	u64_stats_inc(&txq_stats->napi.tx_clean);
2921 	u64_stats_update_end(&txq_stats->napi_syncp);
2922 
2923 	priv->xstats.tx_errors += tx_errors;
2924 
2925 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2926 
2927 	/* Combine decisions from TX clean and XSK TX */
2928 	return max(count, xmits);
2929 }
2930 
2931 /**
2932  * stmmac_tx_err - to manage the tx error
2933  * @priv: driver private structure
2934  * @chan: channel index
2935  * Description: it cleans the descriptors and restarts the transmission
2936  * in case of transmission errors.
2937  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2938 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2939 {
2940 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2941 
2942 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2943 
2944 	stmmac_stop_tx_dma(priv, chan);
2945 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2946 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2947 	stmmac_reset_tx_queue(priv, chan);
2948 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2949 			    tx_q->dma_tx_phy, chan);
2950 	stmmac_start_tx_dma(priv, chan);
2951 
2952 	priv->xstats.tx_errors++;
2953 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2954 }
2955 
2956 /**
2957  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2958  *  @priv: driver private structure
2959  *  @txmode: TX operating mode
2960  *  @rxmode: RX operating mode
2961  *  @chan: channel index
2962  *  Description: it is used for configuring of the DMA operation mode in
2963  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2964  *  mode.
2965  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2966 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2967 					  u32 rxmode, u32 chan)
2968 {
2969 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2970 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2971 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2972 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2973 	int rxfifosz = priv->plat->rx_fifo_size;
2974 	int txfifosz = priv->plat->tx_fifo_size;
2975 
2976 	if (rxfifosz == 0)
2977 		rxfifosz = priv->dma_cap.rx_fifo_size;
2978 	if (txfifosz == 0)
2979 		txfifosz = priv->dma_cap.tx_fifo_size;
2980 
2981 	/* Adjust for real per queue fifo size */
2982 	rxfifosz /= rx_channels_count;
2983 	txfifosz /= tx_channels_count;
2984 
2985 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2986 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2987 }
2988 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2989 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2990 {
2991 	int ret;
2992 
2993 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2994 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2995 	if (ret && (ret != -EINVAL)) {
2996 		stmmac_global_err(priv);
2997 		return true;
2998 	}
2999 
3000 	return false;
3001 }
3002 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)3003 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
3004 {
3005 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3006 						 &priv->xstats, chan, dir);
3007 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
3008 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3009 	struct stmmac_channel *ch = &priv->channel[chan];
3010 	struct napi_struct *rx_napi;
3011 	struct napi_struct *tx_napi;
3012 	unsigned long flags;
3013 
3014 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
3015 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3016 
3017 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3018 		if (napi_schedule_prep(rx_napi)) {
3019 			spin_lock_irqsave(&ch->lock, flags);
3020 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3021 			spin_unlock_irqrestore(&ch->lock, flags);
3022 			__napi_schedule(rx_napi);
3023 		}
3024 	}
3025 
3026 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3027 		if (napi_schedule_prep(tx_napi)) {
3028 			spin_lock_irqsave(&ch->lock, flags);
3029 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3030 			spin_unlock_irqrestore(&ch->lock, flags);
3031 			__napi_schedule(tx_napi);
3032 		}
3033 	}
3034 
3035 	return status;
3036 }
3037 
3038 /**
3039  * stmmac_dma_interrupt - DMA ISR
3040  * @priv: driver private structure
3041  * Description: this is the DMA ISR. It is called by the main ISR.
3042  * It calls the dwmac dma routine and schedule poll method in case of some
3043  * work can be done.
3044  */
stmmac_dma_interrupt(struct stmmac_priv * priv)3045 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3046 {
3047 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3048 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3049 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
3050 				tx_channel_count : rx_channel_count;
3051 	u32 chan;
3052 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3053 
3054 	/* Make sure we never check beyond our status buffer. */
3055 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3056 		channels_to_check = ARRAY_SIZE(status);
3057 
3058 	for (chan = 0; chan < channels_to_check; chan++)
3059 		status[chan] = stmmac_napi_check(priv, chan,
3060 						 DMA_DIR_RXTX);
3061 
3062 	for (chan = 0; chan < tx_channel_count; chan++) {
3063 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3064 			/* Try to bump up the dma threshold on this failure */
3065 			stmmac_bump_dma_threshold(priv, chan);
3066 		} else if (unlikely(status[chan] == tx_hard_error)) {
3067 			stmmac_tx_err(priv, chan);
3068 		}
3069 	}
3070 }
3071 
3072 /**
3073  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3074  * @priv: driver private structure
3075  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3076  */
stmmac_mmc_setup(struct stmmac_priv * priv)3077 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3078 {
3079 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3080 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3081 
3082 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3083 
3084 	if (priv->dma_cap.rmon) {
3085 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3086 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3087 	} else
3088 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3089 }
3090 
3091 /**
3092  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3093  * @priv: driver private structure
3094  * Description:
3095  *  new GMAC chip generations have a new register to indicate the
3096  *  presence of the optional feature/functions.
3097  *  This can be also used to override the value passed through the
3098  *  platform and necessary for old MAC10/100 and GMAC chips.
3099  */
stmmac_get_hw_features(struct stmmac_priv * priv)3100 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3101 {
3102 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3103 }
3104 
3105 /**
3106  * stmmac_check_ether_addr - check if the MAC addr is valid
3107  * @priv: driver private structure
3108  * Description:
3109  * it is to verify if the MAC address is valid, in case of failures it
3110  * generates a random MAC address
3111  */
stmmac_check_ether_addr(struct stmmac_priv * priv)3112 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3113 {
3114 	u8 addr[ETH_ALEN];
3115 
3116 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3117 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3118 		if (is_valid_ether_addr(addr))
3119 			eth_hw_addr_set(priv->dev, addr);
3120 		else
3121 			eth_hw_addr_random(priv->dev);
3122 		dev_info(priv->device, "device MAC address %pM\n",
3123 			 priv->dev->dev_addr);
3124 	}
3125 }
3126 
stmmac_get_phy_intf_sel(phy_interface_t interface)3127 int stmmac_get_phy_intf_sel(phy_interface_t interface)
3128 {
3129 	int phy_intf_sel = -EINVAL;
3130 
3131 	if (interface == PHY_INTERFACE_MODE_MII ||
3132 	    interface == PHY_INTERFACE_MODE_GMII)
3133 		phy_intf_sel = PHY_INTF_SEL_GMII_MII;
3134 	else if (phy_interface_mode_is_rgmii(interface))
3135 		phy_intf_sel = PHY_INTF_SEL_RGMII;
3136 	else if (interface == PHY_INTERFACE_MODE_SGMII)
3137 		phy_intf_sel = PHY_INTF_SEL_SGMII;
3138 	else if (interface == PHY_INTERFACE_MODE_RMII)
3139 		phy_intf_sel = PHY_INTF_SEL_RMII;
3140 	else if (interface == PHY_INTERFACE_MODE_REVMII)
3141 		phy_intf_sel = PHY_INTF_SEL_REVMII;
3142 
3143 	return phy_intf_sel;
3144 }
3145 EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
3146 
stmmac_prereset_configure(struct stmmac_priv * priv)3147 static int stmmac_prereset_configure(struct stmmac_priv *priv)
3148 {
3149 	struct plat_stmmacenet_data *plat_dat = priv->plat;
3150 	phy_interface_t interface;
3151 	int phy_intf_sel, ret;
3152 
3153 	if (!plat_dat->set_phy_intf_sel)
3154 		return 0;
3155 
3156 	interface = plat_dat->phy_interface;
3157 	phy_intf_sel = stmmac_get_phy_intf_sel(interface);
3158 	if (phy_intf_sel < 0) {
3159 		netdev_err(priv->dev,
3160 			   "failed to get phy_intf_sel for %s: %pe\n",
3161 			   phy_modes(interface), ERR_PTR(phy_intf_sel));
3162 		return phy_intf_sel;
3163 	}
3164 
3165 	ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
3166 	if (ret == -EINVAL)
3167 		netdev_err(priv->dev, "platform does not support %s\n",
3168 			   phy_modes(interface));
3169 	else if (ret < 0)
3170 		netdev_err(priv->dev,
3171 			   "platform failed to set interface %s: %pe\n",
3172 			   phy_modes(interface), ERR_PTR(ret));
3173 
3174 	return ret;
3175 }
3176 
3177 /**
3178  * stmmac_init_dma_engine - DMA init.
3179  * @priv: driver private structure
3180  * Description:
3181  * It inits the DMA invoking the specific MAC/GMAC callback.
3182  * Some DMA parameters can be passed from the platform;
3183  * in case of these are not passed a default is kept for the MAC or GMAC.
3184  */
stmmac_init_dma_engine(struct stmmac_priv * priv)3185 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3186 {
3187 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3188 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3189 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3190 	struct stmmac_rx_queue *rx_q;
3191 	struct stmmac_tx_queue *tx_q;
3192 	u32 chan = 0;
3193 	int ret = 0;
3194 
3195 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3196 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3197 		return -EINVAL;
3198 	}
3199 
3200 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3201 		priv->plat->dma_cfg->atds = 1;
3202 
3203 	ret = stmmac_prereset_configure(priv);
3204 	if (ret)
3205 		return ret;
3206 
3207 	ret = stmmac_reset(priv);
3208 	if (ret) {
3209 		netdev_err(priv->dev, "Failed to reset the dma\n");
3210 		return ret;
3211 	}
3212 
3213 	/* DMA Configuration */
3214 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3215 
3216 	if (priv->plat->axi)
3217 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3218 
3219 	/* DMA CSR Channel configuration */
3220 	for (chan = 0; chan < dma_csr_ch; chan++) {
3221 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3222 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3223 	}
3224 
3225 	/* DMA RX Channel Configuration */
3226 	for (chan = 0; chan < rx_channels_count; chan++) {
3227 		rx_q = &priv->dma_conf.rx_queue[chan];
3228 
3229 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3230 				    rx_q->dma_rx_phy, chan);
3231 
3232 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3233 				     (rx_q->buf_alloc_num *
3234 				      sizeof(struct dma_desc));
3235 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3236 				       rx_q->rx_tail_addr, chan);
3237 	}
3238 
3239 	/* DMA TX Channel Configuration */
3240 	for (chan = 0; chan < tx_channels_count; chan++) {
3241 		tx_q = &priv->dma_conf.tx_queue[chan];
3242 
3243 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3244 				    tx_q->dma_tx_phy, chan);
3245 
3246 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3247 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3248 				       tx_q->tx_tail_addr, chan);
3249 	}
3250 
3251 	return ret;
3252 }
3253 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3254 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3255 {
3256 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3257 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3258 	struct stmmac_channel *ch;
3259 	struct napi_struct *napi;
3260 
3261 	if (!tx_coal_timer)
3262 		return;
3263 
3264 	ch = &priv->channel[tx_q->queue_index];
3265 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3266 
3267 	/* Arm timer only if napi is not already scheduled.
3268 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3269 	 * again in the next scheduled napi.
3270 	 */
3271 	if (unlikely(!napi_is_scheduled(napi)))
3272 		hrtimer_start(&tx_q->txtimer,
3273 			      STMMAC_COAL_TIMER(tx_coal_timer),
3274 			      HRTIMER_MODE_REL);
3275 	else
3276 		hrtimer_try_to_cancel(&tx_q->txtimer);
3277 }
3278 
3279 /**
3280  * stmmac_tx_timer - mitigation sw timer for tx.
3281  * @t: data pointer
3282  * Description:
3283  * This is the timer handler to directly invoke the stmmac_tx_clean.
3284  */
stmmac_tx_timer(struct hrtimer * t)3285 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3286 {
3287 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3288 	struct stmmac_priv *priv = tx_q->priv_data;
3289 	struct stmmac_channel *ch;
3290 	struct napi_struct *napi;
3291 
3292 	ch = &priv->channel[tx_q->queue_index];
3293 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3294 
3295 	if (likely(napi_schedule_prep(napi))) {
3296 		unsigned long flags;
3297 
3298 		spin_lock_irqsave(&ch->lock, flags);
3299 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3300 		spin_unlock_irqrestore(&ch->lock, flags);
3301 		__napi_schedule(napi);
3302 	}
3303 
3304 	return HRTIMER_NORESTART;
3305 }
3306 
3307 /**
3308  * stmmac_init_coalesce - init mitigation options.
3309  * @priv: driver private structure
3310  * Description:
3311  * This inits the coalesce parameters: i.e. timer rate,
3312  * timer handler and default threshold used for enabling the
3313  * interrupt on completion bit.
3314  */
stmmac_init_coalesce(struct stmmac_priv * priv)3315 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3316 {
3317 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3318 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3319 	u32 chan;
3320 
3321 	for (chan = 0; chan < tx_channel_count; chan++) {
3322 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3323 
3324 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3325 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3326 
3327 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3328 	}
3329 
3330 	for (chan = 0; chan < rx_channel_count; chan++)
3331 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3332 }
3333 
stmmac_set_rings_length(struct stmmac_priv * priv)3334 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3335 {
3336 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3337 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3338 	u32 chan;
3339 
3340 	/* set TX ring length */
3341 	for (chan = 0; chan < tx_channels_count; chan++)
3342 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3343 				       (priv->dma_conf.dma_tx_size - 1), chan);
3344 
3345 	/* set RX ring length */
3346 	for (chan = 0; chan < rx_channels_count; chan++)
3347 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3348 				       (priv->dma_conf.dma_rx_size - 1), chan);
3349 }
3350 
3351 /**
3352  *  stmmac_set_tx_queue_weight - Set TX queue weight
3353  *  @priv: driver private structure
3354  *  Description: It is used for setting TX queues weight
3355  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3356 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3357 {
3358 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3359 	u32 weight;
3360 	u32 queue;
3361 
3362 	for (queue = 0; queue < tx_queues_count; queue++) {
3363 		weight = priv->plat->tx_queues_cfg[queue].weight;
3364 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3365 	}
3366 }
3367 
3368 /**
3369  *  stmmac_configure_cbs - Configure CBS in TX queue
3370  *  @priv: driver private structure
3371  *  Description: It is used for configuring CBS in AVB TX queues
3372  */
stmmac_configure_cbs(struct stmmac_priv * priv)3373 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3374 {
3375 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3376 	u32 mode_to_use;
3377 	u32 queue;
3378 
3379 	/* queue 0 is reserved for legacy traffic */
3380 	for (queue = 1; queue < tx_queues_count; queue++) {
3381 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3382 		if (mode_to_use == MTL_QUEUE_DCB)
3383 			continue;
3384 
3385 		stmmac_config_cbs(priv, priv->hw,
3386 				priv->plat->tx_queues_cfg[queue].send_slope,
3387 				priv->plat->tx_queues_cfg[queue].idle_slope,
3388 				priv->plat->tx_queues_cfg[queue].high_credit,
3389 				priv->plat->tx_queues_cfg[queue].low_credit,
3390 				queue);
3391 	}
3392 }
3393 
3394 /**
3395  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3396  *  @priv: driver private structure
3397  *  Description: It is used for mapping RX queues to RX dma channels
3398  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3399 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3400 {
3401 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3402 	u32 queue;
3403 	u32 chan;
3404 
3405 	for (queue = 0; queue < rx_queues_count; queue++) {
3406 		chan = priv->plat->rx_queues_cfg[queue].chan;
3407 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3408 	}
3409 }
3410 
3411 /**
3412  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3413  *  @priv: driver private structure
3414  *  Description: It is used for configuring the RX Queue Priority
3415  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3416 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3417 {
3418 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3419 	u32 queue;
3420 	u32 prio;
3421 
3422 	for (queue = 0; queue < rx_queues_count; queue++) {
3423 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3424 			continue;
3425 
3426 		prio = priv->plat->rx_queues_cfg[queue].prio;
3427 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3428 	}
3429 }
3430 
3431 /**
3432  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3433  *  @priv: driver private structure
3434  *  Description: It is used for configuring the TX Queue Priority
3435  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3436 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3437 {
3438 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3439 	u32 queue;
3440 	u32 prio;
3441 
3442 	for (queue = 0; queue < tx_queues_count; queue++) {
3443 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3444 			continue;
3445 
3446 		prio = priv->plat->tx_queues_cfg[queue].prio;
3447 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3448 	}
3449 }
3450 
3451 /**
3452  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3453  *  @priv: driver private structure
3454  *  Description: It is used for configuring the RX queue routing
3455  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3456 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3457 {
3458 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3459 	u32 queue;
3460 	u8 packet;
3461 
3462 	for (queue = 0; queue < rx_queues_count; queue++) {
3463 		/* no specific packet type routing specified for the queue */
3464 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3465 			continue;
3466 
3467 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3468 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3469 	}
3470 }
3471 
stmmac_mac_config_rss(struct stmmac_priv * priv)3472 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3473 {
3474 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3475 		priv->rss.enable = false;
3476 		return;
3477 	}
3478 
3479 	if (priv->dev->features & NETIF_F_RXHASH)
3480 		priv->rss.enable = true;
3481 	else
3482 		priv->rss.enable = false;
3483 
3484 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3485 			     priv->plat->rx_queues_to_use);
3486 }
3487 
3488 /**
3489  *  stmmac_mtl_configuration - Configure MTL
3490  *  @priv: driver private structure
3491  *  Description: It is used for configurring MTL
3492  */
stmmac_mtl_configuration(struct stmmac_priv * priv)3493 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3494 {
3495 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3496 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3497 
3498 	if (tx_queues_count > 1)
3499 		stmmac_set_tx_queue_weight(priv);
3500 
3501 	/* Configure MTL RX algorithms */
3502 	if (rx_queues_count > 1)
3503 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3504 				priv->plat->rx_sched_algorithm);
3505 
3506 	/* Configure MTL TX algorithms */
3507 	if (tx_queues_count > 1)
3508 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3509 				priv->plat->tx_sched_algorithm);
3510 
3511 	/* Configure CBS in AVB TX queues */
3512 	if (tx_queues_count > 1)
3513 		stmmac_configure_cbs(priv);
3514 
3515 	/* Map RX MTL to DMA channels */
3516 	stmmac_rx_queue_dma_chan_map(priv);
3517 
3518 	/* Enable MAC RX Queues */
3519 	stmmac_mac_enable_rx_queues(priv);
3520 
3521 	/* Set RX priorities */
3522 	if (rx_queues_count > 1)
3523 		stmmac_mac_config_rx_queues_prio(priv);
3524 
3525 	/* Set TX priorities */
3526 	if (tx_queues_count > 1)
3527 		stmmac_mac_config_tx_queues_prio(priv);
3528 
3529 	/* Set RX routing */
3530 	if (rx_queues_count > 1)
3531 		stmmac_mac_config_rx_queues_routing(priv);
3532 
3533 	/* Receive Side Scaling */
3534 	if (rx_queues_count > 1)
3535 		stmmac_mac_config_rss(priv);
3536 }
3537 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3538 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3539 {
3540 	if (priv->dma_cap.asp) {
3541 		netdev_info(priv->dev, "Enabling Safety Features\n");
3542 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3543 					  priv->plat->safety_feat_cfg);
3544 	} else {
3545 		netdev_info(priv->dev, "No Safety Features support found\n");
3546 	}
3547 }
3548 
3549 /**
3550  * stmmac_hw_setup - setup mac in a usable state.
3551  *  @dev : pointer to the device structure.
3552  *  Description:
3553  *  this is the main function to setup the HW in a usable state because the
3554  *  dma engine is reset, the core registers are configured (e.g. AXI,
3555  *  Checksum features, timers). The DMA is ready to start receiving and
3556  *  transmitting.
3557  *  Return value:
3558  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3559  *  file on failure.
3560  */
stmmac_hw_setup(struct net_device * dev)3561 static int stmmac_hw_setup(struct net_device *dev)
3562 {
3563 	struct stmmac_priv *priv = netdev_priv(dev);
3564 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3565 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3566 	bool sph_en;
3567 	u32 chan;
3568 	int ret;
3569 
3570 	/* Make sure RX clock is enabled */
3571 	if (priv->hw->phylink_pcs)
3572 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3573 
3574 	/* Note that clk_rx_i must be running for reset to complete. This
3575 	 * clock may also be required when setting the MAC address.
3576 	 *
3577 	 * Block the receive clock stop for LPI mode at the PHY in case
3578 	 * the link is established with EEE mode active.
3579 	 */
3580 	phylink_rx_clk_stop_block(priv->phylink);
3581 
3582 	/* DMA initialization and SW reset */
3583 	ret = stmmac_init_dma_engine(priv);
3584 	if (ret < 0) {
3585 		phylink_rx_clk_stop_unblock(priv->phylink);
3586 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3587 			   __func__);
3588 		return ret;
3589 	}
3590 
3591 	/* Copy the MAC addr into the HW  */
3592 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3593 	phylink_rx_clk_stop_unblock(priv->phylink);
3594 
3595 	/* Initialize the MAC Core */
3596 	stmmac_core_init(priv, priv->hw, dev);
3597 
3598 	/* Initialize MTL*/
3599 	stmmac_mtl_configuration(priv);
3600 
3601 	/* Initialize Safety Features */
3602 	stmmac_safety_feat_configuration(priv);
3603 
3604 	ret = stmmac_rx_ipc(priv, priv->hw);
3605 	if (!ret) {
3606 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3607 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3608 		priv->hw->rx_csum = 0;
3609 	}
3610 
3611 	/* Enable the MAC Rx/Tx */
3612 	stmmac_mac_set(priv, priv->ioaddr, true);
3613 
3614 	/* Set the HW DMA mode and the COE */
3615 	stmmac_dma_operation_mode(priv);
3616 
3617 	stmmac_mmc_setup(priv);
3618 
3619 	if (priv->use_riwt) {
3620 		u32 queue;
3621 
3622 		for (queue = 0; queue < rx_cnt; queue++) {
3623 			if (!priv->rx_riwt[queue])
3624 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3625 
3626 			stmmac_rx_watchdog(priv, priv->ioaddr,
3627 					   priv->rx_riwt[queue], queue);
3628 		}
3629 	}
3630 
3631 	/* set TX and RX rings length */
3632 	stmmac_set_rings_length(priv);
3633 
3634 	/* Enable TSO */
3635 	if (priv->tso) {
3636 		for (chan = 0; chan < tx_cnt; chan++) {
3637 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3638 
3639 			/* TSO and TBS cannot co-exist */
3640 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3641 				continue;
3642 
3643 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3644 		}
3645 	}
3646 
3647 	/* Enable Split Header */
3648 	sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
3649 	for (chan = 0; chan < rx_cnt; chan++)
3650 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3651 
3652 
3653 	/* VLAN Tag Insertion */
3654 	if (priv->dma_cap.vlins)
3655 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3656 
3657 	/* TBS */
3658 	for (chan = 0; chan < tx_cnt; chan++) {
3659 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3660 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3661 
3662 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3663 	}
3664 
3665 	/* Configure real RX and TX queues */
3666 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3667 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3668 
3669 	/* Start the ball rolling... */
3670 	stmmac_start_all_dma(priv);
3671 
3672 	phylink_rx_clk_stop_block(priv->phylink);
3673 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3674 	phylink_rx_clk_stop_unblock(priv->phylink);
3675 
3676 	return 0;
3677 }
3678 
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3679 static void stmmac_free_irq(struct net_device *dev,
3680 			    enum request_irq_err irq_err, int irq_idx)
3681 {
3682 	struct stmmac_priv *priv = netdev_priv(dev);
3683 	int j;
3684 
3685 	switch (irq_err) {
3686 	case REQ_IRQ_ERR_ALL:
3687 		irq_idx = priv->plat->tx_queues_to_use;
3688 		fallthrough;
3689 	case REQ_IRQ_ERR_TX:
3690 		for (j = irq_idx - 1; j >= 0; j--) {
3691 			if (priv->tx_irq[j] > 0) {
3692 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3693 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3694 			}
3695 		}
3696 		irq_idx = priv->plat->rx_queues_to_use;
3697 		fallthrough;
3698 	case REQ_IRQ_ERR_RX:
3699 		for (j = irq_idx - 1; j >= 0; j--) {
3700 			if (priv->rx_irq[j] > 0) {
3701 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3702 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3703 			}
3704 		}
3705 
3706 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3707 			free_irq(priv->sfty_ue_irq, dev);
3708 		fallthrough;
3709 	case REQ_IRQ_ERR_SFTY_UE:
3710 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3711 			free_irq(priv->sfty_ce_irq, dev);
3712 		fallthrough;
3713 	case REQ_IRQ_ERR_SFTY_CE:
3714 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3715 			free_irq(priv->lpi_irq, dev);
3716 		fallthrough;
3717 	case REQ_IRQ_ERR_LPI:
3718 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3719 			free_irq(priv->wol_irq, dev);
3720 		fallthrough;
3721 	case REQ_IRQ_ERR_SFTY:
3722 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3723 			free_irq(priv->sfty_irq, dev);
3724 		fallthrough;
3725 	case REQ_IRQ_ERR_WOL:
3726 		free_irq(dev->irq, dev);
3727 		fallthrough;
3728 	case REQ_IRQ_ERR_MAC:
3729 	case REQ_IRQ_ERR_NO:
3730 		/* If MAC IRQ request error, no more IRQ to free */
3731 		break;
3732 	}
3733 }
3734 
stmmac_request_irq_multi_msi(struct net_device * dev)3735 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3736 {
3737 	struct stmmac_priv *priv = netdev_priv(dev);
3738 	enum request_irq_err irq_err;
3739 	int irq_idx = 0;
3740 	char *int_name;
3741 	int ret;
3742 	int i;
3743 
3744 	/* For common interrupt */
3745 	int_name = priv->int_name_mac;
3746 	sprintf(int_name, "%s:%s", dev->name, "mac");
3747 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3748 			  0, int_name, dev);
3749 	if (unlikely(ret < 0)) {
3750 		netdev_err(priv->dev,
3751 			   "%s: alloc mac MSI %d (error: %d)\n",
3752 			   __func__, dev->irq, ret);
3753 		irq_err = REQ_IRQ_ERR_MAC;
3754 		goto irq_error;
3755 	}
3756 
3757 	/* Request the Wake IRQ in case of another line
3758 	 * is used for WoL
3759 	 */
3760 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3761 		int_name = priv->int_name_wol;
3762 		sprintf(int_name, "%s:%s", dev->name, "wol");
3763 		ret = request_irq(priv->wol_irq,
3764 				  stmmac_mac_interrupt,
3765 				  0, int_name, dev);
3766 		if (unlikely(ret < 0)) {
3767 			netdev_err(priv->dev,
3768 				   "%s: alloc wol MSI %d (error: %d)\n",
3769 				   __func__, priv->wol_irq, ret);
3770 			irq_err = REQ_IRQ_ERR_WOL;
3771 			goto irq_error;
3772 		}
3773 	}
3774 
3775 	/* Request the LPI IRQ in case of another line
3776 	 * is used for LPI
3777 	 */
3778 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3779 		int_name = priv->int_name_lpi;
3780 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3781 		ret = request_irq(priv->lpi_irq,
3782 				  stmmac_mac_interrupt,
3783 				  0, int_name, dev);
3784 		if (unlikely(ret < 0)) {
3785 			netdev_err(priv->dev,
3786 				   "%s: alloc lpi MSI %d (error: %d)\n",
3787 				   __func__, priv->lpi_irq, ret);
3788 			irq_err = REQ_IRQ_ERR_LPI;
3789 			goto irq_error;
3790 		}
3791 	}
3792 
3793 	/* Request the common Safety Feature Correctible/Uncorrectible
3794 	 * Error line in case of another line is used
3795 	 */
3796 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3797 		int_name = priv->int_name_sfty;
3798 		sprintf(int_name, "%s:%s", dev->name, "safety");
3799 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3800 				  0, int_name, dev);
3801 		if (unlikely(ret < 0)) {
3802 			netdev_err(priv->dev,
3803 				   "%s: alloc sfty MSI %d (error: %d)\n",
3804 				   __func__, priv->sfty_irq, ret);
3805 			irq_err = REQ_IRQ_ERR_SFTY;
3806 			goto irq_error;
3807 		}
3808 	}
3809 
3810 	/* Request the Safety Feature Correctible Error line in
3811 	 * case of another line is used
3812 	 */
3813 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3814 		int_name = priv->int_name_sfty_ce;
3815 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3816 		ret = request_irq(priv->sfty_ce_irq,
3817 				  stmmac_safety_interrupt,
3818 				  0, int_name, dev);
3819 		if (unlikely(ret < 0)) {
3820 			netdev_err(priv->dev,
3821 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3822 				   __func__, priv->sfty_ce_irq, ret);
3823 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3824 			goto irq_error;
3825 		}
3826 	}
3827 
3828 	/* Request the Safety Feature Uncorrectible Error line in
3829 	 * case of another line is used
3830 	 */
3831 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3832 		int_name = priv->int_name_sfty_ue;
3833 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3834 		ret = request_irq(priv->sfty_ue_irq,
3835 				  stmmac_safety_interrupt,
3836 				  0, int_name, dev);
3837 		if (unlikely(ret < 0)) {
3838 			netdev_err(priv->dev,
3839 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3840 				   __func__, priv->sfty_ue_irq, ret);
3841 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3842 			goto irq_error;
3843 		}
3844 	}
3845 
3846 	/* Request Rx MSI irq */
3847 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3848 		if (i >= MTL_MAX_RX_QUEUES)
3849 			break;
3850 		if (priv->rx_irq[i] == 0)
3851 			continue;
3852 
3853 		int_name = priv->int_name_rx_irq[i];
3854 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3855 		ret = request_irq(priv->rx_irq[i],
3856 				  stmmac_msi_intr_rx,
3857 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3858 		if (unlikely(ret < 0)) {
3859 			netdev_err(priv->dev,
3860 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3861 				   __func__, i, priv->rx_irq[i], ret);
3862 			irq_err = REQ_IRQ_ERR_RX;
3863 			irq_idx = i;
3864 			goto irq_error;
3865 		}
3866 		irq_set_affinity_hint(priv->rx_irq[i],
3867 				      cpumask_of(i % num_online_cpus()));
3868 	}
3869 
3870 	/* Request Tx MSI irq */
3871 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3872 		if (i >= MTL_MAX_TX_QUEUES)
3873 			break;
3874 		if (priv->tx_irq[i] == 0)
3875 			continue;
3876 
3877 		int_name = priv->int_name_tx_irq[i];
3878 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3879 		ret = request_irq(priv->tx_irq[i],
3880 				  stmmac_msi_intr_tx,
3881 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3882 		if (unlikely(ret < 0)) {
3883 			netdev_err(priv->dev,
3884 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3885 				   __func__, i, priv->tx_irq[i], ret);
3886 			irq_err = REQ_IRQ_ERR_TX;
3887 			irq_idx = i;
3888 			goto irq_error;
3889 		}
3890 		irq_set_affinity_hint(priv->tx_irq[i],
3891 				      cpumask_of(i % num_online_cpus()));
3892 	}
3893 
3894 	return 0;
3895 
3896 irq_error:
3897 	stmmac_free_irq(dev, irq_err, irq_idx);
3898 	return ret;
3899 }
3900 
stmmac_request_irq_single(struct net_device * dev)3901 static int stmmac_request_irq_single(struct net_device *dev)
3902 {
3903 	struct stmmac_priv *priv = netdev_priv(dev);
3904 	enum request_irq_err irq_err;
3905 	int ret;
3906 
3907 	ret = request_irq(dev->irq, stmmac_interrupt,
3908 			  IRQF_SHARED, dev->name, dev);
3909 	if (unlikely(ret < 0)) {
3910 		netdev_err(priv->dev,
3911 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3912 			   __func__, dev->irq, ret);
3913 		irq_err = REQ_IRQ_ERR_MAC;
3914 		goto irq_error;
3915 	}
3916 
3917 	/* Request the Wake IRQ in case of another line
3918 	 * is used for WoL
3919 	 */
3920 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3921 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3922 				  IRQF_SHARED, dev->name, dev);
3923 		if (unlikely(ret < 0)) {
3924 			netdev_err(priv->dev,
3925 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3926 				   __func__, priv->wol_irq, ret);
3927 			irq_err = REQ_IRQ_ERR_WOL;
3928 			goto irq_error;
3929 		}
3930 	}
3931 
3932 	/* Request the IRQ lines */
3933 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3934 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3935 				  IRQF_SHARED, dev->name, dev);
3936 		if (unlikely(ret < 0)) {
3937 			netdev_err(priv->dev,
3938 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3939 				   __func__, priv->lpi_irq, ret);
3940 			irq_err = REQ_IRQ_ERR_LPI;
3941 			goto irq_error;
3942 		}
3943 	}
3944 
3945 	/* Request the common Safety Feature Correctible/Uncorrectible
3946 	 * Error line in case of another line is used
3947 	 */
3948 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3949 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3950 				  IRQF_SHARED, dev->name, dev);
3951 		if (unlikely(ret < 0)) {
3952 			netdev_err(priv->dev,
3953 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3954 				   __func__, priv->sfty_irq, ret);
3955 			irq_err = REQ_IRQ_ERR_SFTY;
3956 			goto irq_error;
3957 		}
3958 	}
3959 
3960 	return 0;
3961 
3962 irq_error:
3963 	stmmac_free_irq(dev, irq_err, 0);
3964 	return ret;
3965 }
3966 
stmmac_request_irq(struct net_device * dev)3967 static int stmmac_request_irq(struct net_device *dev)
3968 {
3969 	struct stmmac_priv *priv = netdev_priv(dev);
3970 	int ret;
3971 
3972 	/* Request the IRQ lines */
3973 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3974 		ret = stmmac_request_irq_multi_msi(dev);
3975 	else
3976 		ret = stmmac_request_irq_single(dev);
3977 
3978 	return ret;
3979 }
3980 
3981 /**
3982  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3983  *  @priv: driver private structure
3984  *  @mtu: MTU to setup the dma queue and buf with
3985  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3986  *  Allocate the Tx/Rx DMA queue and init them.
3987  *  Return value:
3988  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3989  */
3990 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3991 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3992 {
3993 	struct stmmac_dma_conf *dma_conf;
3994 	int chan, bfsize, ret;
3995 
3996 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3997 	if (!dma_conf) {
3998 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3999 			   __func__);
4000 		return ERR_PTR(-ENOMEM);
4001 	}
4002 
4003 	/* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
4004 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
4005 	if (bfsize < 0)
4006 		bfsize = 0;
4007 
4008 	if (bfsize < BUF_SIZE_16KiB)
4009 		bfsize = stmmac_set_bfsize(mtu);
4010 
4011 	dma_conf->dma_buf_sz = bfsize;
4012 	/* Chose the tx/rx size from the already defined one in the
4013 	 * priv struct. (if defined)
4014 	 */
4015 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
4016 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
4017 
4018 	if (!dma_conf->dma_tx_size)
4019 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
4020 	if (!dma_conf->dma_rx_size)
4021 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
4022 
4023 	/* Earlier check for TBS */
4024 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
4025 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
4026 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
4027 
4028 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
4029 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
4030 	}
4031 
4032 	ret = alloc_dma_desc_resources(priv, dma_conf);
4033 	if (ret < 0) {
4034 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4035 			   __func__);
4036 		goto alloc_error;
4037 	}
4038 
4039 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4040 	if (ret < 0) {
4041 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4042 			   __func__);
4043 		goto init_error;
4044 	}
4045 
4046 	return dma_conf;
4047 
4048 init_error:
4049 	free_dma_desc_resources(priv, dma_conf);
4050 alloc_error:
4051 	kfree(dma_conf);
4052 	return ERR_PTR(ret);
4053 }
4054 
4055 /**
4056  *  __stmmac_open - open entry point of the driver
4057  *  @dev : pointer to the device structure.
4058  *  @dma_conf :  structure to take the dma data
4059  *  Description:
4060  *  This function is the open entry point of the driver.
4061  *  Return value:
4062  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4063  *  file on failure.
4064  */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)4065 static int __stmmac_open(struct net_device *dev,
4066 			 struct stmmac_dma_conf *dma_conf)
4067 {
4068 	struct stmmac_priv *priv = netdev_priv(dev);
4069 	u32 chan;
4070 	int ret;
4071 
4072 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4073 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4074 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4075 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4076 
4077 	stmmac_reset_queues_param(priv);
4078 
4079 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4080 	    priv->plat->serdes_powerup) {
4081 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4082 		if (ret < 0) {
4083 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4084 				   __func__);
4085 			goto init_error;
4086 		}
4087 	}
4088 
4089 	ret = stmmac_hw_setup(dev);
4090 	if (ret < 0) {
4091 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4092 		goto init_error;
4093 	}
4094 
4095 	stmmac_setup_ptp(priv);
4096 
4097 	stmmac_init_coalesce(priv);
4098 
4099 	phylink_start(priv->phylink);
4100 
4101 	ret = stmmac_request_irq(dev);
4102 	if (ret)
4103 		goto irq_error;
4104 
4105 	stmmac_enable_all_queues(priv);
4106 	netif_tx_start_all_queues(priv->dev);
4107 	stmmac_enable_all_dma_irq(priv);
4108 
4109 	return 0;
4110 
4111 irq_error:
4112 	phylink_stop(priv->phylink);
4113 
4114 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4115 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4116 
4117 	stmmac_release_ptp(priv);
4118 init_error:
4119 	return ret;
4120 }
4121 
stmmac_open(struct net_device * dev)4122 static int stmmac_open(struct net_device *dev)
4123 {
4124 	struct stmmac_priv *priv = netdev_priv(dev);
4125 	struct stmmac_dma_conf *dma_conf;
4126 	int ret;
4127 
4128 	/* Initialise the tx lpi timer, converting from msec to usec */
4129 	if (!priv->tx_lpi_timer)
4130 		priv->tx_lpi_timer = eee_timer * 1000;
4131 
4132 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4133 	if (IS_ERR(dma_conf))
4134 		return PTR_ERR(dma_conf);
4135 
4136 	ret = pm_runtime_resume_and_get(priv->device);
4137 	if (ret < 0)
4138 		goto err_dma_resources;
4139 
4140 	ret = stmmac_init_phy(dev);
4141 	if (ret)
4142 		goto err_runtime_pm;
4143 
4144 	ret = __stmmac_open(dev, dma_conf);
4145 	if (ret)
4146 		goto err_disconnect_phy;
4147 
4148 	kfree(dma_conf);
4149 
4150 	/* We may have called phylink_speed_down before */
4151 	phylink_speed_up(priv->phylink);
4152 
4153 	return ret;
4154 
4155 err_disconnect_phy:
4156 	phylink_disconnect_phy(priv->phylink);
4157 err_runtime_pm:
4158 	pm_runtime_put(priv->device);
4159 err_dma_resources:
4160 	free_dma_desc_resources(priv, dma_conf);
4161 	kfree(dma_conf);
4162 	return ret;
4163 }
4164 
__stmmac_release(struct net_device * dev)4165 static void __stmmac_release(struct net_device *dev)
4166 {
4167 	struct stmmac_priv *priv = netdev_priv(dev);
4168 	u32 chan;
4169 
4170 	/* Stop and disconnect the PHY */
4171 	phylink_stop(priv->phylink);
4172 
4173 	stmmac_disable_all_queues(priv);
4174 
4175 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4176 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4177 
4178 	netif_tx_disable(dev);
4179 
4180 	/* Free the IRQ lines */
4181 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4182 
4183 	/* Stop TX/RX DMA and clear the descriptors */
4184 	stmmac_stop_all_dma(priv);
4185 
4186 	/* Release and free the Rx/Tx resources */
4187 	free_dma_desc_resources(priv, &priv->dma_conf);
4188 
4189 	/* Powerdown Serdes if there is */
4190 	if (priv->plat->serdes_powerdown)
4191 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4192 
4193 	stmmac_release_ptp(priv);
4194 
4195 	if (stmmac_fpe_supported(priv))
4196 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4197 }
4198 
4199 /**
4200  *  stmmac_release - close entry point of the driver
4201  *  @dev : device pointer.
4202  *  Description:
4203  *  This is the stop entry point of the driver.
4204  */
stmmac_release(struct net_device * dev)4205 static int stmmac_release(struct net_device *dev)
4206 {
4207 	struct stmmac_priv *priv = netdev_priv(dev);
4208 
4209 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4210 	 * suspended when phylink_stop() is called below. Set the PHY
4211 	 * to its slowest speed to save power.
4212 	 */
4213 	if (device_may_wakeup(priv->device))
4214 		phylink_speed_down(priv->phylink, false);
4215 
4216 	__stmmac_release(dev);
4217 
4218 	phylink_disconnect_phy(priv->phylink);
4219 	pm_runtime_put(priv->device);
4220 
4221 	return 0;
4222 }
4223 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4224 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4225 			       struct stmmac_tx_queue *tx_q)
4226 {
4227 	struct dma_desc *p;
4228 	u16 tag = 0x0;
4229 
4230 	if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
4231 		return false;
4232 
4233 	tag = skb_vlan_tag_get(skb);
4234 
4235 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4236 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4237 	else
4238 		p = &tx_q->dma_tx[tx_q->cur_tx];
4239 
4240 	if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
4241 		return false;
4242 
4243 	stmmac_set_tx_owner(priv, p);
4244 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4245 	return true;
4246 }
4247 
4248 /**
4249  *  stmmac_tso_allocator - close entry point of the driver
4250  *  @priv: driver private structure
4251  *  @des: buffer start address
4252  *  @total_len: total length to fill in descriptors
4253  *  @last_segment: condition for the last descriptor
4254  *  @queue: TX queue index
4255  *  Description:
4256  *  This function fills descriptor and request new descriptors according to
4257  *  buffer length to fill
4258  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4259 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4260 				 int total_len, bool last_segment, u32 queue)
4261 {
4262 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4263 	struct dma_desc *desc;
4264 	u32 buff_size;
4265 	int tmp_len;
4266 
4267 	tmp_len = total_len;
4268 
4269 	while (tmp_len > 0) {
4270 		dma_addr_t curr_addr;
4271 
4272 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4273 						priv->dma_conf.dma_tx_size);
4274 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4275 
4276 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4277 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4278 		else
4279 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4280 
4281 		curr_addr = des + (total_len - tmp_len);
4282 		stmmac_set_desc_addr(priv, desc, curr_addr);
4283 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4284 			    TSO_MAX_BUFF_SIZE : tmp_len;
4285 
4286 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4287 				0, 1,
4288 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4289 				0, 0);
4290 
4291 		tmp_len -= TSO_MAX_BUFF_SIZE;
4292 	}
4293 }
4294 
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4295 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4296 {
4297 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4298 	int desc_size;
4299 
4300 	if (likely(priv->extend_desc))
4301 		desc_size = sizeof(struct dma_extended_desc);
4302 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4303 		desc_size = sizeof(struct dma_edesc);
4304 	else
4305 		desc_size = sizeof(struct dma_desc);
4306 
4307 	/* The own bit must be the latest setting done when prepare the
4308 	 * descriptor and then barrier is needed to make sure that
4309 	 * all is coherent before granting the DMA engine.
4310 	 */
4311 	wmb();
4312 
4313 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4314 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4315 }
4316 
4317 /**
4318  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4319  *  @skb : the socket buffer
4320  *  @dev : device pointer
4321  *  Description: this is the transmit function that is called on TSO frames
4322  *  (support available on GMAC4 and newer chips).
4323  *  Diagram below show the ring programming in case of TSO frames:
4324  *
4325  *  First Descriptor
4326  *   --------
4327  *   | DES0 |---> buffer1 = L2/L3/L4 header
4328  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4329  *   |      |     width is 32-bit, but we never use it.
4330  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4331  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4332  *   |      |     or 48-bit, and we always use it.
4333  *   | DES2 |---> buffer1 len
4334  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4335  *   --------
4336  *   --------
4337  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4338  *   | DES1 |---> same as the First Descriptor
4339  *   | DES2 |---> buffer1 len
4340  *   | DES3 |
4341  *   --------
4342  *	|
4343  *     ...
4344  *	|
4345  *   --------
4346  *   | DES0 |---> buffer1 = Split TCP Payload
4347  *   | DES1 |---> same as the First Descriptor
4348  *   | DES2 |---> buffer1 len
4349  *   | DES3 |
4350  *   --------
4351  *
4352  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4353  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4354 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4355 {
4356 	struct dma_desc *desc, *first, *mss_desc = NULL;
4357 	struct stmmac_priv *priv = netdev_priv(dev);
4358 	unsigned int first_entry, tx_packets;
4359 	struct stmmac_txq_stats *txq_stats;
4360 	struct stmmac_tx_queue *tx_q;
4361 	u32 pay_len, mss, queue;
4362 	int i, first_tx, nfrags;
4363 	u8 proto_hdr_len, hdr;
4364 	dma_addr_t des;
4365 	bool set_ic;
4366 
4367 	/* Always insert VLAN tag to SKB payload for TSO frames.
4368 	 *
4369 	 * Never insert VLAN tag by HW, since segments splited by
4370 	 * TSO engine will be un-tagged by mistake.
4371 	 */
4372 	if (skb_vlan_tag_present(skb)) {
4373 		skb = __vlan_hwaccel_push_inside(skb);
4374 		if (unlikely(!skb)) {
4375 			priv->xstats.tx_dropped++;
4376 			return NETDEV_TX_OK;
4377 		}
4378 	}
4379 
4380 	nfrags = skb_shinfo(skb)->nr_frags;
4381 	queue = skb_get_queue_mapping(skb);
4382 
4383 	tx_q = &priv->dma_conf.tx_queue[queue];
4384 	txq_stats = &priv->xstats.txq_stats[queue];
4385 	first_tx = tx_q->cur_tx;
4386 
4387 	/* Compute header lengths */
4388 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4389 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4390 		hdr = sizeof(struct udphdr);
4391 	} else {
4392 		proto_hdr_len = skb_tcp_all_headers(skb);
4393 		hdr = tcp_hdrlen(skb);
4394 	}
4395 
4396 	/* Desc availability based on threshold should be enough safe */
4397 	if (unlikely(stmmac_tx_avail(priv, queue) <
4398 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4399 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4400 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4401 								queue));
4402 			/* This is a hard error, log it. */
4403 			netdev_err(priv->dev,
4404 				   "%s: Tx Ring full when queue awake\n",
4405 				   __func__);
4406 		}
4407 		return NETDEV_TX_BUSY;
4408 	}
4409 
4410 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4411 
4412 	mss = skb_shinfo(skb)->gso_size;
4413 
4414 	/* set new MSS value if needed */
4415 	if (mss != tx_q->mss) {
4416 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4417 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4418 		else
4419 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4420 
4421 		stmmac_set_mss(priv, mss_desc, mss);
4422 		tx_q->mss = mss;
4423 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4424 						priv->dma_conf.dma_tx_size);
4425 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4426 	}
4427 
4428 	if (netif_msg_tx_queued(priv)) {
4429 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4430 			__func__, hdr, proto_hdr_len, pay_len, mss);
4431 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4432 			skb->data_len);
4433 	}
4434 
4435 	first_entry = tx_q->cur_tx;
4436 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4437 
4438 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4439 		desc = &tx_q->dma_entx[first_entry].basic;
4440 	else
4441 		desc = &tx_q->dma_tx[first_entry];
4442 	first = desc;
4443 
4444 	/* first descriptor: fill Headers on Buf1 */
4445 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4446 			     DMA_TO_DEVICE);
4447 	if (dma_mapping_error(priv->device, des))
4448 		goto dma_map_err;
4449 
4450 	stmmac_set_desc_addr(priv, first, des);
4451 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4452 			     (nfrags == 0), queue);
4453 
4454 	/* In case two or more DMA transmit descriptors are allocated for this
4455 	 * non-paged SKB data, the DMA buffer address should be saved to
4456 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4457 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4458 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4459 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4460 	 * sooner or later.
4461 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4462 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4463 	 * this DMA buffer right after the DMA engine completely finishes the
4464 	 * full buffer transmission.
4465 	 */
4466 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4467 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4468 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4469 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4470 
4471 	/* Prepare fragments */
4472 	for (i = 0; i < nfrags; i++) {
4473 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4474 
4475 		des = skb_frag_dma_map(priv->device, frag, 0,
4476 				       skb_frag_size(frag),
4477 				       DMA_TO_DEVICE);
4478 		if (dma_mapping_error(priv->device, des))
4479 			goto dma_map_err;
4480 
4481 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4482 				     (i == nfrags - 1), queue);
4483 
4484 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4485 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4486 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4487 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4488 	}
4489 
4490 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4491 
4492 	/* Only the last descriptor gets to point to the skb. */
4493 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4494 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4495 
4496 	/* Manage tx mitigation */
4497 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4498 	tx_q->tx_count_frames += tx_packets;
4499 
4500 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4501 		set_ic = true;
4502 	else if (!priv->tx_coal_frames[queue])
4503 		set_ic = false;
4504 	else if (tx_packets > priv->tx_coal_frames[queue])
4505 		set_ic = true;
4506 	else if ((tx_q->tx_count_frames %
4507 		  priv->tx_coal_frames[queue]) < tx_packets)
4508 		set_ic = true;
4509 	else
4510 		set_ic = false;
4511 
4512 	if (set_ic) {
4513 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4514 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4515 		else
4516 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4517 
4518 		tx_q->tx_count_frames = 0;
4519 		stmmac_set_tx_ic(priv, desc);
4520 	}
4521 
4522 	/* We've used all descriptors we need for this skb, however,
4523 	 * advance cur_tx so that it references a fresh descriptor.
4524 	 * ndo_start_xmit will fill this descriptor the next time it's
4525 	 * called and stmmac_tx_clean may clean up to this descriptor.
4526 	 */
4527 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4528 
4529 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4530 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4531 			  __func__);
4532 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4533 	}
4534 
4535 	u64_stats_update_begin(&txq_stats->q_syncp);
4536 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4537 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4538 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4539 	if (set_ic)
4540 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4541 	u64_stats_update_end(&txq_stats->q_syncp);
4542 
4543 	if (priv->sarc_type)
4544 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4545 
4546 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4547 		     priv->hwts_tx_en)) {
4548 		/* declare that device is doing timestamping */
4549 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4550 		stmmac_enable_tx_timestamp(priv, first);
4551 	}
4552 
4553 	/* Complete the first descriptor before granting the DMA */
4554 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4555 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4556 				   hdr / 4, (skb->len - proto_hdr_len));
4557 
4558 	/* If context desc is used to change MSS */
4559 	if (mss_desc) {
4560 		/* Make sure that first descriptor has been completely
4561 		 * written, including its own bit. This is because MSS is
4562 		 * actually before first descriptor, so we need to make
4563 		 * sure that MSS's own bit is the last thing written.
4564 		 */
4565 		dma_wmb();
4566 		stmmac_set_tx_owner(priv, mss_desc);
4567 	}
4568 
4569 	if (netif_msg_pktdata(priv)) {
4570 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4571 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4572 			tx_q->cur_tx, first, nfrags);
4573 		pr_info(">>> frame to be transmitted: ");
4574 		print_pkt(skb->data, skb_headlen(skb));
4575 	}
4576 
4577 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4578 	skb_tx_timestamp(skb);
4579 
4580 	stmmac_flush_tx_descriptors(priv, queue);
4581 	stmmac_tx_timer_arm(priv, queue);
4582 
4583 	return NETDEV_TX_OK;
4584 
4585 dma_map_err:
4586 	dev_err(priv->device, "Tx dma map failed\n");
4587 	dev_kfree_skb(skb);
4588 	priv->xstats.tx_dropped++;
4589 	return NETDEV_TX_OK;
4590 }
4591 
4592 /**
4593  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4594  * @skb: socket buffer to check
4595  *
4596  * Check if a packet has an ethertype that will trigger the IP header checks
4597  * and IP/TCP checksum engine of the stmmac core.
4598  *
4599  * Return: true if the ethertype can trigger the checksum engine, false
4600  * otherwise
4601  */
stmmac_has_ip_ethertype(struct sk_buff * skb)4602 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4603 {
4604 	int depth = 0;
4605 	__be16 proto;
4606 
4607 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4608 				    &depth);
4609 
4610 	return (depth <= ETH_HLEN) &&
4611 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4612 }
4613 
4614 /**
4615  *  stmmac_xmit - Tx entry point of the driver
4616  *  @skb : the socket buffer
4617  *  @dev : device pointer
4618  *  Description : this is the tx entry point of the driver.
4619  *  It programs the chain or the ring and supports oversized frames
4620  *  and SG feature.
4621  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4622 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4623 {
4624 	bool enh_desc, has_vlan, set_ic, is_jumbo = false;
4625 	struct stmmac_priv *priv = netdev_priv(dev);
4626 	unsigned int nopaged_len = skb_headlen(skb);
4627 	u32 queue = skb_get_queue_mapping(skb);
4628 	int nfrags = skb_shinfo(skb)->nr_frags;
4629 	unsigned int first_entry, tx_packets;
4630 	int gso = skb_shinfo(skb)->gso_type;
4631 	struct stmmac_txq_stats *txq_stats;
4632 	struct dma_edesc *tbs_desc = NULL;
4633 	struct dma_desc *desc, *first;
4634 	struct stmmac_tx_queue *tx_q;
4635 	int i, csum_insertion = 0;
4636 	int entry, first_tx;
4637 	dma_addr_t des;
4638 	u32 sdu_len;
4639 
4640 	tx_q = &priv->dma_conf.tx_queue[queue];
4641 	txq_stats = &priv->xstats.txq_stats[queue];
4642 	first_tx = tx_q->cur_tx;
4643 
4644 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4645 		stmmac_stop_sw_lpi(priv);
4646 
4647 	/* Manage oversized TCP frames for GMAC4 device */
4648 	if (skb_is_gso(skb) && priv->tso) {
4649 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4650 			return stmmac_tso_xmit(skb, dev);
4651 		if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
4652 		    (gso & SKB_GSO_UDP_L4))
4653 			return stmmac_tso_xmit(skb, dev);
4654 	}
4655 
4656 	if (priv->est && priv->est->enable &&
4657 	    priv->est->max_sdu[queue]) {
4658 		sdu_len = skb->len;
4659 		/* Add VLAN tag length if VLAN tag insertion offload is requested */
4660 		if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
4661 			sdu_len += VLAN_HLEN;
4662 		if (sdu_len > priv->est->max_sdu[queue]) {
4663 			priv->xstats.max_sdu_txq_drop[queue]++;
4664 			goto max_sdu_err;
4665 		}
4666 	}
4667 
4668 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4669 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4670 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4671 								queue));
4672 			/* This is a hard error, log it. */
4673 			netdev_err(priv->dev,
4674 				   "%s: Tx Ring full when queue awake\n",
4675 				   __func__);
4676 		}
4677 		return NETDEV_TX_BUSY;
4678 	}
4679 
4680 	/* Check if VLAN can be inserted by HW */
4681 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4682 
4683 	entry = tx_q->cur_tx;
4684 	first_entry = entry;
4685 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4686 
4687 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4688 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4689 	 * queues. In that case, checksum offloading for those queues that don't
4690 	 * support tx coe needs to fallback to software checksum calculation.
4691 	 *
4692 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4693 	 * also have to be checksummed in software.
4694 	 */
4695 	if (csum_insertion &&
4696 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4697 	     !stmmac_has_ip_ethertype(skb))) {
4698 		if (unlikely(skb_checksum_help(skb)))
4699 			goto dma_map_err;
4700 		csum_insertion = !csum_insertion;
4701 	}
4702 
4703 	if (likely(priv->extend_desc))
4704 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4705 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4706 		desc = &tx_q->dma_entx[entry].basic;
4707 	else
4708 		desc = tx_q->dma_tx + entry;
4709 
4710 	first = desc;
4711 
4712 	if (has_vlan)
4713 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4714 
4715 	enh_desc = priv->plat->enh_desc;
4716 	/* To program the descriptors according to the size of the frame */
4717 	if (enh_desc)
4718 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4719 
4720 	if (unlikely(is_jumbo)) {
4721 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4722 		if (unlikely(entry < 0) && (entry != -EINVAL))
4723 			goto dma_map_err;
4724 	}
4725 
4726 	for (i = 0; i < nfrags; i++) {
4727 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4728 		int len = skb_frag_size(frag);
4729 		bool last_segment = (i == (nfrags - 1));
4730 
4731 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4732 		WARN_ON(tx_q->tx_skbuff[entry]);
4733 
4734 		if (likely(priv->extend_desc))
4735 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4736 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4737 			desc = &tx_q->dma_entx[entry].basic;
4738 		else
4739 			desc = tx_q->dma_tx + entry;
4740 
4741 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4742 				       DMA_TO_DEVICE);
4743 		if (dma_mapping_error(priv->device, des))
4744 			goto dma_map_err; /* should reuse desc w/o issues */
4745 
4746 		tx_q->tx_skbuff_dma[entry].buf = des;
4747 
4748 		stmmac_set_desc_addr(priv, desc, des);
4749 
4750 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4751 		tx_q->tx_skbuff_dma[entry].len = len;
4752 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4753 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4754 
4755 		/* Prepare the descriptor and set the own bit too */
4756 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4757 				priv->mode, 1, last_segment, skb->len);
4758 	}
4759 
4760 	/* Only the last descriptor gets to point to the skb. */
4761 	tx_q->tx_skbuff[entry] = skb;
4762 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4763 
4764 	/* According to the coalesce parameter the IC bit for the latest
4765 	 * segment is reset and the timer re-started to clean the tx status.
4766 	 * This approach takes care about the fragments: desc is the first
4767 	 * element in case of no SG.
4768 	 */
4769 	tx_packets = (entry + 1) - first_tx;
4770 	tx_q->tx_count_frames += tx_packets;
4771 
4772 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4773 		set_ic = true;
4774 	else if (!priv->tx_coal_frames[queue])
4775 		set_ic = false;
4776 	else if (tx_packets > priv->tx_coal_frames[queue])
4777 		set_ic = true;
4778 	else if ((tx_q->tx_count_frames %
4779 		  priv->tx_coal_frames[queue]) < tx_packets)
4780 		set_ic = true;
4781 	else
4782 		set_ic = false;
4783 
4784 	if (set_ic) {
4785 		if (likely(priv->extend_desc))
4786 			desc = &tx_q->dma_etx[entry].basic;
4787 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4788 			desc = &tx_q->dma_entx[entry].basic;
4789 		else
4790 			desc = &tx_q->dma_tx[entry];
4791 
4792 		tx_q->tx_count_frames = 0;
4793 		stmmac_set_tx_ic(priv, desc);
4794 	}
4795 
4796 	/* We've used all descriptors we need for this skb, however,
4797 	 * advance cur_tx so that it references a fresh descriptor.
4798 	 * ndo_start_xmit will fill this descriptor the next time it's
4799 	 * called and stmmac_tx_clean may clean up to this descriptor.
4800 	 */
4801 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4802 	tx_q->cur_tx = entry;
4803 
4804 	if (netif_msg_pktdata(priv)) {
4805 		netdev_dbg(priv->dev,
4806 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4807 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4808 			   entry, first, nfrags);
4809 
4810 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4811 		print_pkt(skb->data, skb->len);
4812 	}
4813 
4814 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4815 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4816 			  __func__);
4817 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4818 	}
4819 
4820 	u64_stats_update_begin(&txq_stats->q_syncp);
4821 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4822 	if (set_ic)
4823 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4824 	u64_stats_update_end(&txq_stats->q_syncp);
4825 
4826 	if (priv->sarc_type)
4827 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4828 
4829 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4830 	 * problems because all the descriptors are actually ready to be
4831 	 * passed to the DMA engine.
4832 	 */
4833 	if (likely(!is_jumbo)) {
4834 		bool last_segment = (nfrags == 0);
4835 
4836 		des = dma_map_single(priv->device, skb->data,
4837 				     nopaged_len, DMA_TO_DEVICE);
4838 		if (dma_mapping_error(priv->device, des))
4839 			goto dma_map_err;
4840 
4841 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4842 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4843 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4844 
4845 		stmmac_set_desc_addr(priv, first, des);
4846 
4847 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4848 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4849 
4850 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4851 			     priv->hwts_tx_en)) {
4852 			/* declare that device is doing timestamping */
4853 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4854 			stmmac_enable_tx_timestamp(priv, first);
4855 		}
4856 
4857 		/* Prepare the first descriptor setting the OWN bit too */
4858 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4859 				csum_insertion, priv->mode, 0, last_segment,
4860 				skb->len);
4861 	}
4862 
4863 	if (tx_q->tbs & STMMAC_TBS_EN) {
4864 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4865 
4866 		tbs_desc = &tx_q->dma_entx[first_entry];
4867 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4868 	}
4869 
4870 	stmmac_set_tx_owner(priv, first);
4871 
4872 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4873 
4874 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4875 	skb_tx_timestamp(skb);
4876 	stmmac_flush_tx_descriptors(priv, queue);
4877 	stmmac_tx_timer_arm(priv, queue);
4878 
4879 	return NETDEV_TX_OK;
4880 
4881 dma_map_err:
4882 	netdev_err(priv->dev, "Tx DMA map failed\n");
4883 max_sdu_err:
4884 	dev_kfree_skb(skb);
4885 	priv->xstats.tx_dropped++;
4886 	return NETDEV_TX_OK;
4887 }
4888 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4889 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4890 {
4891 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4892 	__be16 vlan_proto = veth->h_vlan_proto;
4893 	u16 vlanid;
4894 
4895 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4896 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4897 	    (vlan_proto == htons(ETH_P_8021AD) &&
4898 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4899 		/* pop the vlan tag */
4900 		vlanid = ntohs(veth->h_vlan_TCI);
4901 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4902 		skb_pull(skb, VLAN_HLEN);
4903 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4904 	}
4905 }
4906 
4907 /**
4908  * stmmac_rx_refill - refill used skb preallocated buffers
4909  * @priv: driver private structure
4910  * @queue: RX queue index
4911  * Description : this is to reallocate the skb for the reception process
4912  * that is based on zero-copy.
4913  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4914 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4915 {
4916 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4917 	int dirty = stmmac_rx_dirty(priv, queue);
4918 	unsigned int entry = rx_q->dirty_rx;
4919 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4920 
4921 	if (priv->dma_cap.host_dma_width <= 32)
4922 		gfp |= GFP_DMA32;
4923 
4924 	while (dirty-- > 0) {
4925 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4926 		struct dma_desc *p;
4927 		bool use_rx_wd;
4928 
4929 		if (priv->extend_desc)
4930 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4931 		else
4932 			p = rx_q->dma_rx + entry;
4933 
4934 		if (!buf->page) {
4935 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4936 			if (!buf->page)
4937 				break;
4938 		}
4939 
4940 		if (priv->sph_active && !buf->sec_page) {
4941 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4942 			if (!buf->sec_page)
4943 				break;
4944 
4945 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4946 		}
4947 
4948 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4949 
4950 		stmmac_set_desc_addr(priv, p, buf->addr);
4951 		if (priv->sph_active)
4952 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4953 		else
4954 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4955 		stmmac_refill_desc3(priv, rx_q, p);
4956 
4957 		rx_q->rx_count_frames++;
4958 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4959 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4960 			rx_q->rx_count_frames = 0;
4961 
4962 		use_rx_wd = !priv->rx_coal_frames[queue];
4963 		use_rx_wd |= rx_q->rx_count_frames > 0;
4964 		if (!priv->use_riwt)
4965 			use_rx_wd = false;
4966 
4967 		dma_wmb();
4968 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4969 
4970 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4971 	}
4972 	rx_q->dirty_rx = entry;
4973 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4974 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4975 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4976 	/* Wake up Rx DMA from the suspend state if required */
4977 	stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
4978 }
4979 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4980 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4981 				       struct dma_desc *p,
4982 				       int status, unsigned int len)
4983 {
4984 	unsigned int plen = 0, hlen = 0;
4985 	int coe = priv->hw->rx_csum;
4986 
4987 	/* Not first descriptor, buffer is always zero */
4988 	if (priv->sph_active && len)
4989 		return 0;
4990 
4991 	/* First descriptor, get split header length */
4992 	stmmac_get_rx_header_len(priv, p, &hlen);
4993 	if (priv->sph_active && hlen) {
4994 		priv->xstats.rx_split_hdr_pkt_n++;
4995 		return hlen;
4996 	}
4997 
4998 	/* First descriptor, not last descriptor and not split header */
4999 	if (status & rx_not_ls)
5000 		return priv->dma_conf.dma_buf_sz;
5001 
5002 	plen = stmmac_get_rx_frame_len(priv, p, coe);
5003 
5004 	/* First descriptor and last descriptor and not split header */
5005 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
5006 }
5007 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)5008 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
5009 				       struct dma_desc *p,
5010 				       int status, unsigned int len)
5011 {
5012 	int coe = priv->hw->rx_csum;
5013 	unsigned int plen = 0;
5014 
5015 	/* Not split header, buffer is not available */
5016 	if (!priv->sph_active)
5017 		return 0;
5018 
5019 	/* Not last descriptor */
5020 	if (status & rx_not_ls)
5021 		return priv->dma_conf.dma_buf_sz;
5022 
5023 	plen = stmmac_get_rx_frame_len(priv, p, coe);
5024 
5025 	/* Last descriptor */
5026 	return plen - len;
5027 }
5028 
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)5029 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
5030 				struct xdp_frame *xdpf, bool dma_map)
5031 {
5032 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
5033 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
5034 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
5035 	unsigned int entry = tx_q->cur_tx;
5036 	struct dma_desc *tx_desc;
5037 	dma_addr_t dma_addr;
5038 	bool set_ic;
5039 
5040 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
5041 		return STMMAC_XDP_CONSUMED;
5042 
5043 	if (priv->est && priv->est->enable &&
5044 	    priv->est->max_sdu[queue] &&
5045 	    xdpf->len > priv->est->max_sdu[queue]) {
5046 		priv->xstats.max_sdu_txq_drop[queue]++;
5047 		return STMMAC_XDP_CONSUMED;
5048 	}
5049 
5050 	if (likely(priv->extend_desc))
5051 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5052 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5053 		tx_desc = &tx_q->dma_entx[entry].basic;
5054 	else
5055 		tx_desc = tx_q->dma_tx + entry;
5056 
5057 	if (dma_map) {
5058 		dma_addr = dma_map_single(priv->device, xdpf->data,
5059 					  xdpf->len, DMA_TO_DEVICE);
5060 		if (dma_mapping_error(priv->device, dma_addr))
5061 			return STMMAC_XDP_CONSUMED;
5062 
5063 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5064 	} else {
5065 		struct page *page = virt_to_page(xdpf->data);
5066 
5067 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5068 			   xdpf->headroom;
5069 		dma_sync_single_for_device(priv->device, dma_addr,
5070 					   xdpf->len, DMA_BIDIRECTIONAL);
5071 
5072 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5073 	}
5074 
5075 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5076 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
5077 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5078 	tx_q->tx_skbuff_dma[entry].last_segment = true;
5079 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5080 
5081 	tx_q->xdpf[entry] = xdpf;
5082 
5083 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5084 
5085 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5086 			       csum, priv->mode, true, true,
5087 			       xdpf->len);
5088 
5089 	tx_q->tx_count_frames++;
5090 
5091 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5092 		set_ic = true;
5093 	else
5094 		set_ic = false;
5095 
5096 	if (set_ic) {
5097 		tx_q->tx_count_frames = 0;
5098 		stmmac_set_tx_ic(priv, tx_desc);
5099 		u64_stats_update_begin(&txq_stats->q_syncp);
5100 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5101 		u64_stats_update_end(&txq_stats->q_syncp);
5102 	}
5103 
5104 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5105 
5106 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5107 	tx_q->cur_tx = entry;
5108 
5109 	return STMMAC_XDP_TX;
5110 }
5111 
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5112 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5113 				   int cpu)
5114 {
5115 	int index = cpu;
5116 
5117 	if (unlikely(index < 0))
5118 		index = 0;
5119 
5120 	while (index >= priv->plat->tx_queues_to_use)
5121 		index -= priv->plat->tx_queues_to_use;
5122 
5123 	return index;
5124 }
5125 
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5126 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5127 				struct xdp_buff *xdp)
5128 {
5129 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5130 	int cpu = smp_processor_id();
5131 	struct netdev_queue *nq;
5132 	int queue;
5133 	int res;
5134 
5135 	if (unlikely(!xdpf))
5136 		return STMMAC_XDP_CONSUMED;
5137 
5138 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5139 	nq = netdev_get_tx_queue(priv->dev, queue);
5140 
5141 	__netif_tx_lock(nq, cpu);
5142 	/* Avoids TX time-out as we are sharing with slow path */
5143 	txq_trans_cond_update(nq);
5144 
5145 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5146 	if (res == STMMAC_XDP_TX)
5147 		stmmac_flush_tx_descriptors(priv, queue);
5148 
5149 	__netif_tx_unlock(nq);
5150 
5151 	return res;
5152 }
5153 
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5154 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5155 				 struct bpf_prog *prog,
5156 				 struct xdp_buff *xdp)
5157 {
5158 	u32 act;
5159 	int res;
5160 
5161 	act = bpf_prog_run_xdp(prog, xdp);
5162 	switch (act) {
5163 	case XDP_PASS:
5164 		res = STMMAC_XDP_PASS;
5165 		break;
5166 	case XDP_TX:
5167 		res = stmmac_xdp_xmit_back(priv, xdp);
5168 		break;
5169 	case XDP_REDIRECT:
5170 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5171 			res = STMMAC_XDP_CONSUMED;
5172 		else
5173 			res = STMMAC_XDP_REDIRECT;
5174 		break;
5175 	default:
5176 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5177 		fallthrough;
5178 	case XDP_ABORTED:
5179 		trace_xdp_exception(priv->dev, prog, act);
5180 		fallthrough;
5181 	case XDP_DROP:
5182 		res = STMMAC_XDP_CONSUMED;
5183 		break;
5184 	}
5185 
5186 	return res;
5187 }
5188 
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5189 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5190 					   struct xdp_buff *xdp)
5191 {
5192 	struct bpf_prog *prog;
5193 	int res;
5194 
5195 	prog = READ_ONCE(priv->xdp_prog);
5196 	if (!prog) {
5197 		res = STMMAC_XDP_PASS;
5198 		goto out;
5199 	}
5200 
5201 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5202 out:
5203 	return ERR_PTR(-res);
5204 }
5205 
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5206 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5207 				   int xdp_status)
5208 {
5209 	int cpu = smp_processor_id();
5210 	int queue;
5211 
5212 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5213 
5214 	if (xdp_status & STMMAC_XDP_TX)
5215 		stmmac_tx_timer_arm(priv, queue);
5216 
5217 	if (xdp_status & STMMAC_XDP_REDIRECT)
5218 		xdp_do_flush();
5219 }
5220 
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5221 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5222 					       struct xdp_buff *xdp)
5223 {
5224 	unsigned int metasize = xdp->data - xdp->data_meta;
5225 	unsigned int datasize = xdp->data_end - xdp->data;
5226 	struct sk_buff *skb;
5227 
5228 	skb = napi_alloc_skb(&ch->rxtx_napi,
5229 			     xdp->data_end - xdp->data_hard_start);
5230 	if (unlikely(!skb))
5231 		return NULL;
5232 
5233 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5234 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5235 	if (metasize)
5236 		skb_metadata_set(skb, metasize);
5237 
5238 	return skb;
5239 }
5240 
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5241 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5242 				   struct dma_desc *p, struct dma_desc *np,
5243 				   struct xdp_buff *xdp)
5244 {
5245 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5246 	struct stmmac_channel *ch = &priv->channel[queue];
5247 	unsigned int len = xdp->data_end - xdp->data;
5248 	enum pkt_hash_types hash_type;
5249 	int coe = priv->hw->rx_csum;
5250 	struct sk_buff *skb;
5251 	u32 hash;
5252 
5253 	skb = stmmac_construct_skb_zc(ch, xdp);
5254 	if (!skb) {
5255 		priv->xstats.rx_dropped++;
5256 		return;
5257 	}
5258 
5259 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5260 	if (priv->hw->hw_vlan_en)
5261 		/* MAC level stripping. */
5262 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5263 	else
5264 		/* Driver level stripping. */
5265 		stmmac_rx_vlan(priv->dev, skb);
5266 	skb->protocol = eth_type_trans(skb, priv->dev);
5267 
5268 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5269 		skb_checksum_none_assert(skb);
5270 	else
5271 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5272 
5273 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5274 		skb_set_hash(skb, hash, hash_type);
5275 
5276 	skb_record_rx_queue(skb, queue);
5277 	napi_gro_receive(&ch->rxtx_napi, skb);
5278 
5279 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5280 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5281 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5282 	u64_stats_update_end(&rxq_stats->napi_syncp);
5283 }
5284 
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5285 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5286 {
5287 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5288 	unsigned int entry = rx_q->dirty_rx;
5289 	struct dma_desc *rx_desc = NULL;
5290 	bool ret = true;
5291 
5292 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5293 
5294 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5295 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5296 		dma_addr_t dma_addr;
5297 		bool use_rx_wd;
5298 
5299 		if (!buf->xdp) {
5300 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5301 			if (!buf->xdp) {
5302 				ret = false;
5303 				break;
5304 			}
5305 		}
5306 
5307 		if (priv->extend_desc)
5308 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5309 		else
5310 			rx_desc = rx_q->dma_rx + entry;
5311 
5312 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5313 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5314 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5315 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5316 
5317 		rx_q->rx_count_frames++;
5318 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5319 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5320 			rx_q->rx_count_frames = 0;
5321 
5322 		use_rx_wd = !priv->rx_coal_frames[queue];
5323 		use_rx_wd |= rx_q->rx_count_frames > 0;
5324 		if (!priv->use_riwt)
5325 			use_rx_wd = false;
5326 
5327 		dma_wmb();
5328 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5329 
5330 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5331 	}
5332 
5333 	if (rx_desc) {
5334 		rx_q->dirty_rx = entry;
5335 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5336 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5337 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5338 	}
5339 
5340 	return ret;
5341 }
5342 
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5343 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5344 {
5345 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5346 	 * to represent incoming packet, whereas cb field in the same structure
5347 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5348 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5349 	 */
5350 	return (struct stmmac_xdp_buff *)xdp;
5351 }
5352 
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5353 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5354 {
5355 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5356 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5357 	unsigned int count = 0, error = 0, len = 0;
5358 	int dirty = stmmac_rx_dirty(priv, queue);
5359 	unsigned int next_entry = rx_q->cur_rx;
5360 	u32 rx_errors = 0, rx_dropped = 0;
5361 	unsigned int desc_size;
5362 	struct bpf_prog *prog;
5363 	bool failure = false;
5364 	int xdp_status = 0;
5365 	int status = 0;
5366 
5367 	if (netif_msg_rx_status(priv)) {
5368 		void *rx_head;
5369 
5370 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5371 		if (priv->extend_desc) {
5372 			rx_head = (void *)rx_q->dma_erx;
5373 			desc_size = sizeof(struct dma_extended_desc);
5374 		} else {
5375 			rx_head = (void *)rx_q->dma_rx;
5376 			desc_size = sizeof(struct dma_desc);
5377 		}
5378 
5379 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5380 				    rx_q->dma_rx_phy, desc_size);
5381 	}
5382 	while (count < limit) {
5383 		struct stmmac_rx_buffer *buf;
5384 		struct stmmac_xdp_buff *ctx;
5385 		unsigned int buf1_len = 0;
5386 		struct dma_desc *np, *p;
5387 		int entry;
5388 		int res;
5389 
5390 		if (!count && rx_q->state_saved) {
5391 			error = rx_q->state.error;
5392 			len = rx_q->state.len;
5393 		} else {
5394 			rx_q->state_saved = false;
5395 			error = 0;
5396 			len = 0;
5397 		}
5398 
5399 read_again:
5400 		if (count >= limit)
5401 			break;
5402 
5403 		buf1_len = 0;
5404 		entry = next_entry;
5405 		buf = &rx_q->buf_pool[entry];
5406 
5407 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5408 			failure = failure ||
5409 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5410 			dirty = 0;
5411 		}
5412 
5413 		if (priv->extend_desc)
5414 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5415 		else
5416 			p = rx_q->dma_rx + entry;
5417 
5418 		/* read the status of the incoming frame */
5419 		status = stmmac_rx_status(priv, &priv->xstats, p);
5420 		/* check if managed by the DMA otherwise go ahead */
5421 		if (unlikely(status & dma_own))
5422 			break;
5423 
5424 		/* Prefetch the next RX descriptor */
5425 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5426 						priv->dma_conf.dma_rx_size);
5427 		next_entry = rx_q->cur_rx;
5428 
5429 		if (priv->extend_desc)
5430 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5431 		else
5432 			np = rx_q->dma_rx + next_entry;
5433 
5434 		prefetch(np);
5435 
5436 		/* Ensure a valid XSK buffer before proceed */
5437 		if (!buf->xdp)
5438 			break;
5439 
5440 		if (priv->extend_desc)
5441 			stmmac_rx_extended_status(priv, &priv->xstats,
5442 						  rx_q->dma_erx + entry);
5443 		if (unlikely(status == discard_frame)) {
5444 			xsk_buff_free(buf->xdp);
5445 			buf->xdp = NULL;
5446 			dirty++;
5447 			error = 1;
5448 			if (!priv->hwts_rx_en)
5449 				rx_errors++;
5450 		}
5451 
5452 		if (unlikely(error && (status & rx_not_ls)))
5453 			goto read_again;
5454 		if (unlikely(error)) {
5455 			count++;
5456 			continue;
5457 		}
5458 
5459 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5460 		if (likely(status & rx_not_ls)) {
5461 			xsk_buff_free(buf->xdp);
5462 			buf->xdp = NULL;
5463 			dirty++;
5464 			count++;
5465 			goto read_again;
5466 		}
5467 
5468 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5469 		ctx->priv = priv;
5470 		ctx->desc = p;
5471 		ctx->ndesc = np;
5472 
5473 		/* XDP ZC Frame only support primary buffers for now */
5474 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5475 		len += buf1_len;
5476 
5477 		/* ACS is disabled; strip manually. */
5478 		if (likely(!(status & rx_not_ls))) {
5479 			buf1_len -= ETH_FCS_LEN;
5480 			len -= ETH_FCS_LEN;
5481 		}
5482 
5483 		/* RX buffer is good and fit into a XSK pool buffer */
5484 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5485 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5486 
5487 		prog = READ_ONCE(priv->xdp_prog);
5488 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5489 
5490 		switch (res) {
5491 		case STMMAC_XDP_PASS:
5492 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5493 			xsk_buff_free(buf->xdp);
5494 			break;
5495 		case STMMAC_XDP_CONSUMED:
5496 			xsk_buff_free(buf->xdp);
5497 			rx_dropped++;
5498 			break;
5499 		case STMMAC_XDP_TX:
5500 		case STMMAC_XDP_REDIRECT:
5501 			xdp_status |= res;
5502 			break;
5503 		}
5504 
5505 		buf->xdp = NULL;
5506 		dirty++;
5507 		count++;
5508 	}
5509 
5510 	if (status & rx_not_ls) {
5511 		rx_q->state_saved = true;
5512 		rx_q->state.error = error;
5513 		rx_q->state.len = len;
5514 	}
5515 
5516 	stmmac_finalize_xdp_rx(priv, xdp_status);
5517 
5518 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5519 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5520 	u64_stats_update_end(&rxq_stats->napi_syncp);
5521 
5522 	priv->xstats.rx_dropped += rx_dropped;
5523 	priv->xstats.rx_errors += rx_errors;
5524 
5525 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5526 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5527 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5528 		else
5529 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5530 
5531 		return (int)count;
5532 	}
5533 
5534 	return failure ? limit : (int)count;
5535 }
5536 
5537 /**
5538  * stmmac_rx - manage the receive process
5539  * @priv: driver private structure
5540  * @limit: napi bugget
5541  * @queue: RX queue index.
5542  * Description :  this the function called by the napi poll method.
5543  * It gets all the frames inside the ring.
5544  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5545 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5546 {
5547 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5548 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5549 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5550 	struct stmmac_channel *ch = &priv->channel[queue];
5551 	unsigned int count = 0, error = 0, len = 0;
5552 	int status = 0, coe = priv->hw->rx_csum;
5553 	unsigned int next_entry = rx_q->cur_rx;
5554 	enum dma_data_direction dma_dir;
5555 	unsigned int desc_size;
5556 	struct sk_buff *skb = NULL;
5557 	struct stmmac_xdp_buff ctx;
5558 	int xdp_status = 0;
5559 	int bufsz;
5560 
5561 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5562 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5563 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5564 
5565 	if (netif_msg_rx_status(priv)) {
5566 		void *rx_head;
5567 
5568 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5569 		if (priv->extend_desc) {
5570 			rx_head = (void *)rx_q->dma_erx;
5571 			desc_size = sizeof(struct dma_extended_desc);
5572 		} else {
5573 			rx_head = (void *)rx_q->dma_rx;
5574 			desc_size = sizeof(struct dma_desc);
5575 		}
5576 
5577 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5578 				    rx_q->dma_rx_phy, desc_size);
5579 	}
5580 	while (count < limit) {
5581 		unsigned int buf1_len = 0, buf2_len = 0;
5582 		enum pkt_hash_types hash_type;
5583 		struct stmmac_rx_buffer *buf;
5584 		struct dma_desc *np, *p;
5585 		int entry;
5586 		u32 hash;
5587 
5588 		if (!count && rx_q->state_saved) {
5589 			skb = rx_q->state.skb;
5590 			error = rx_q->state.error;
5591 			len = rx_q->state.len;
5592 		} else {
5593 			rx_q->state_saved = false;
5594 			skb = NULL;
5595 			error = 0;
5596 			len = 0;
5597 		}
5598 
5599 read_again:
5600 		if (count >= limit)
5601 			break;
5602 
5603 		buf1_len = 0;
5604 		buf2_len = 0;
5605 		entry = next_entry;
5606 		buf = &rx_q->buf_pool[entry];
5607 
5608 		if (priv->extend_desc)
5609 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5610 		else
5611 			p = rx_q->dma_rx + entry;
5612 
5613 		/* read the status of the incoming frame */
5614 		status = stmmac_rx_status(priv, &priv->xstats, p);
5615 		/* check if managed by the DMA otherwise go ahead */
5616 		if (unlikely(status & dma_own))
5617 			break;
5618 
5619 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5620 						priv->dma_conf.dma_rx_size);
5621 		next_entry = rx_q->cur_rx;
5622 
5623 		if (priv->extend_desc)
5624 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5625 		else
5626 			np = rx_q->dma_rx + next_entry;
5627 
5628 		prefetch(np);
5629 
5630 		if (priv->extend_desc)
5631 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5632 		if (unlikely(status == discard_frame)) {
5633 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5634 			buf->page = NULL;
5635 			error = 1;
5636 			if (!priv->hwts_rx_en)
5637 				rx_errors++;
5638 		}
5639 
5640 		if (unlikely(error && (status & rx_not_ls)))
5641 			goto read_again;
5642 		if (unlikely(error)) {
5643 			dev_kfree_skb(skb);
5644 			skb = NULL;
5645 			count++;
5646 			continue;
5647 		}
5648 
5649 		/* Buffer is good. Go on. */
5650 
5651 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5652 		len += buf1_len;
5653 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5654 		len += buf2_len;
5655 
5656 		/* ACS is disabled; strip manually. */
5657 		if (likely(!(status & rx_not_ls))) {
5658 			if (buf2_len) {
5659 				buf2_len -= ETH_FCS_LEN;
5660 				len -= ETH_FCS_LEN;
5661 			} else if (buf1_len) {
5662 				buf1_len -= ETH_FCS_LEN;
5663 				len -= ETH_FCS_LEN;
5664 			}
5665 		}
5666 
5667 		if (!skb) {
5668 			unsigned int pre_len, sync_len;
5669 
5670 			dma_sync_single_for_cpu(priv->device, buf->addr,
5671 						buf1_len, dma_dir);
5672 			net_prefetch(page_address(buf->page) +
5673 				     buf->page_offset);
5674 
5675 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5676 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5677 					 buf->page_offset, buf1_len, true);
5678 
5679 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5680 				  buf->page_offset;
5681 
5682 			ctx.priv = priv;
5683 			ctx.desc = p;
5684 			ctx.ndesc = np;
5685 
5686 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5687 			/* Due xdp_adjust_tail: DMA sync for_device
5688 			 * cover max len CPU touch
5689 			 */
5690 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5691 				   buf->page_offset;
5692 			sync_len = max(sync_len, pre_len);
5693 
5694 			/* For Not XDP_PASS verdict */
5695 			if (IS_ERR(skb)) {
5696 				unsigned int xdp_res = -PTR_ERR(skb);
5697 
5698 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5699 					page_pool_put_page(rx_q->page_pool,
5700 							   virt_to_head_page(ctx.xdp.data),
5701 							   sync_len, true);
5702 					buf->page = NULL;
5703 					rx_dropped++;
5704 
5705 					/* Clear skb as it was set as
5706 					 * status by XDP program.
5707 					 */
5708 					skb = NULL;
5709 
5710 					if (unlikely((status & rx_not_ls)))
5711 						goto read_again;
5712 
5713 					count++;
5714 					continue;
5715 				} else if (xdp_res & (STMMAC_XDP_TX |
5716 						      STMMAC_XDP_REDIRECT)) {
5717 					xdp_status |= xdp_res;
5718 					buf->page = NULL;
5719 					skb = NULL;
5720 					count++;
5721 					continue;
5722 				}
5723 			}
5724 		}
5725 
5726 		if (!skb) {
5727 			unsigned int head_pad_len;
5728 
5729 			/* XDP program may expand or reduce tail */
5730 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5731 
5732 			skb = napi_build_skb(page_address(buf->page),
5733 					     rx_q->napi_skb_frag_size);
5734 			if (!skb) {
5735 				page_pool_recycle_direct(rx_q->page_pool,
5736 							 buf->page);
5737 				rx_dropped++;
5738 				count++;
5739 				goto drain_data;
5740 			}
5741 
5742 			/* XDP program may adjust header */
5743 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5744 			skb_reserve(skb, head_pad_len);
5745 			skb_put(skb, buf1_len);
5746 			skb_mark_for_recycle(skb);
5747 			buf->page = NULL;
5748 		} else if (buf1_len) {
5749 			dma_sync_single_for_cpu(priv->device, buf->addr,
5750 						buf1_len, dma_dir);
5751 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5752 					buf->page, buf->page_offset, buf1_len,
5753 					priv->dma_conf.dma_buf_sz);
5754 			buf->page = NULL;
5755 		}
5756 
5757 		if (buf2_len) {
5758 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5759 						buf2_len, dma_dir);
5760 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5761 					buf->sec_page, 0, buf2_len,
5762 					priv->dma_conf.dma_buf_sz);
5763 			buf->sec_page = NULL;
5764 		}
5765 
5766 drain_data:
5767 		if (likely(status & rx_not_ls))
5768 			goto read_again;
5769 		if (!skb)
5770 			continue;
5771 
5772 		/* Got entire packet into SKB. Finish it. */
5773 
5774 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5775 
5776 		if (priv->hw->hw_vlan_en)
5777 			/* MAC level stripping. */
5778 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5779 		else
5780 			/* Driver level stripping. */
5781 			stmmac_rx_vlan(priv->dev, skb);
5782 
5783 		skb->protocol = eth_type_trans(skb, priv->dev);
5784 
5785 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5786 		    (status & csum_none))
5787 			skb_checksum_none_assert(skb);
5788 		else
5789 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5790 
5791 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5792 			skb_set_hash(skb, hash, hash_type);
5793 
5794 		skb_record_rx_queue(skb, queue);
5795 		napi_gro_receive(&ch->rx_napi, skb);
5796 		skb = NULL;
5797 
5798 		rx_packets++;
5799 		rx_bytes += len;
5800 		count++;
5801 	}
5802 
5803 	if (status & rx_not_ls || skb) {
5804 		rx_q->state_saved = true;
5805 		rx_q->state.skb = skb;
5806 		rx_q->state.error = error;
5807 		rx_q->state.len = len;
5808 	}
5809 
5810 	stmmac_finalize_xdp_rx(priv, xdp_status);
5811 
5812 	stmmac_rx_refill(priv, queue);
5813 
5814 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5815 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5816 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5817 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5818 	u64_stats_update_end(&rxq_stats->napi_syncp);
5819 
5820 	priv->xstats.rx_dropped += rx_dropped;
5821 	priv->xstats.rx_errors += rx_errors;
5822 
5823 	return count;
5824 }
5825 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5826 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5827 {
5828 	struct stmmac_channel *ch =
5829 		container_of(napi, struct stmmac_channel, rx_napi);
5830 	struct stmmac_priv *priv = ch->priv_data;
5831 	struct stmmac_rxq_stats *rxq_stats;
5832 	u32 chan = ch->index;
5833 	int work_done;
5834 
5835 	rxq_stats = &priv->xstats.rxq_stats[chan];
5836 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5837 	u64_stats_inc(&rxq_stats->napi.poll);
5838 	u64_stats_update_end(&rxq_stats->napi_syncp);
5839 
5840 	work_done = stmmac_rx(priv, budget, chan);
5841 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5842 		unsigned long flags;
5843 
5844 		spin_lock_irqsave(&ch->lock, flags);
5845 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5846 		spin_unlock_irqrestore(&ch->lock, flags);
5847 	}
5848 
5849 	return work_done;
5850 }
5851 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5852 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5853 {
5854 	struct stmmac_channel *ch =
5855 		container_of(napi, struct stmmac_channel, tx_napi);
5856 	struct stmmac_priv *priv = ch->priv_data;
5857 	struct stmmac_txq_stats *txq_stats;
5858 	bool pending_packets = false;
5859 	u32 chan = ch->index;
5860 	int work_done;
5861 
5862 	txq_stats = &priv->xstats.txq_stats[chan];
5863 	u64_stats_update_begin(&txq_stats->napi_syncp);
5864 	u64_stats_inc(&txq_stats->napi.poll);
5865 	u64_stats_update_end(&txq_stats->napi_syncp);
5866 
5867 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5868 	work_done = min(work_done, budget);
5869 
5870 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5871 		unsigned long flags;
5872 
5873 		spin_lock_irqsave(&ch->lock, flags);
5874 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5875 		spin_unlock_irqrestore(&ch->lock, flags);
5876 	}
5877 
5878 	/* TX still have packet to handle, check if we need to arm tx timer */
5879 	if (pending_packets)
5880 		stmmac_tx_timer_arm(priv, chan);
5881 
5882 	return work_done;
5883 }
5884 
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5885 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5886 {
5887 	struct stmmac_channel *ch =
5888 		container_of(napi, struct stmmac_channel, rxtx_napi);
5889 	struct stmmac_priv *priv = ch->priv_data;
5890 	bool tx_pending_packets = false;
5891 	int rx_done, tx_done, rxtx_done;
5892 	struct stmmac_rxq_stats *rxq_stats;
5893 	struct stmmac_txq_stats *txq_stats;
5894 	u32 chan = ch->index;
5895 
5896 	rxq_stats = &priv->xstats.rxq_stats[chan];
5897 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5898 	u64_stats_inc(&rxq_stats->napi.poll);
5899 	u64_stats_update_end(&rxq_stats->napi_syncp);
5900 
5901 	txq_stats = &priv->xstats.txq_stats[chan];
5902 	u64_stats_update_begin(&txq_stats->napi_syncp);
5903 	u64_stats_inc(&txq_stats->napi.poll);
5904 	u64_stats_update_end(&txq_stats->napi_syncp);
5905 
5906 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5907 	tx_done = min(tx_done, budget);
5908 
5909 	rx_done = stmmac_rx_zc(priv, budget, chan);
5910 
5911 	rxtx_done = max(tx_done, rx_done);
5912 
5913 	/* If either TX or RX work is not complete, return budget
5914 	 * and keep pooling
5915 	 */
5916 	if (rxtx_done >= budget)
5917 		return budget;
5918 
5919 	/* all work done, exit the polling mode */
5920 	if (napi_complete_done(napi, rxtx_done)) {
5921 		unsigned long flags;
5922 
5923 		spin_lock_irqsave(&ch->lock, flags);
5924 		/* Both RX and TX work done are compelte,
5925 		 * so enable both RX & TX IRQs.
5926 		 */
5927 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5928 		spin_unlock_irqrestore(&ch->lock, flags);
5929 	}
5930 
5931 	/* TX still have packet to handle, check if we need to arm tx timer */
5932 	if (tx_pending_packets)
5933 		stmmac_tx_timer_arm(priv, chan);
5934 
5935 	return min(rxtx_done, budget - 1);
5936 }
5937 
5938 /**
5939  *  stmmac_tx_timeout
5940  *  @dev : Pointer to net device structure
5941  *  @txqueue: the index of the hanging transmit queue
5942  *  Description: this function is called when a packet transmission fails to
5943  *   complete within a reasonable time. The driver will mark the error in the
5944  *   netdev structure and arrange for the device to be reset to a sane state
5945  *   in order to transmit a new packet.
5946  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5947 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5948 {
5949 	struct stmmac_priv *priv = netdev_priv(dev);
5950 
5951 	stmmac_global_err(priv);
5952 }
5953 
5954 /**
5955  *  stmmac_set_rx_mode - entry point for multicast addressing
5956  *  @dev : pointer to the device structure
5957  *  Description:
5958  *  This function is a driver entry point which gets called by the kernel
5959  *  whenever multicast addresses must be enabled/disabled.
5960  *  Return value:
5961  *  void.
5962  *
5963  *  FIXME: This may need RXC to be running, but it may be called with BH
5964  *  disabled, which means we can't call phylink_rx_clk_stop*().
5965  */
stmmac_set_rx_mode(struct net_device * dev)5966 static void stmmac_set_rx_mode(struct net_device *dev)
5967 {
5968 	struct stmmac_priv *priv = netdev_priv(dev);
5969 
5970 	stmmac_set_filter(priv, priv->hw, dev);
5971 }
5972 
5973 /**
5974  *  stmmac_change_mtu - entry point to change MTU size for the device.
5975  *  @dev : device pointer.
5976  *  @new_mtu : the new MTU size for the device.
5977  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5978  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5979  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5980  *  Return value:
5981  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5982  *  file on failure.
5983  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5984 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5985 {
5986 	struct stmmac_priv *priv = netdev_priv(dev);
5987 	int txfifosz = priv->plat->tx_fifo_size;
5988 	struct stmmac_dma_conf *dma_conf;
5989 	const int mtu = new_mtu;
5990 	int ret;
5991 
5992 	if (txfifosz == 0)
5993 		txfifosz = priv->dma_cap.tx_fifo_size;
5994 
5995 	txfifosz /= priv->plat->tx_queues_to_use;
5996 
5997 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5998 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5999 		return -EINVAL;
6000 	}
6001 
6002 	new_mtu = STMMAC_ALIGN(new_mtu);
6003 
6004 	/* If condition true, FIFO is too small or MTU too large */
6005 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
6006 		return -EINVAL;
6007 
6008 	if (netif_running(dev)) {
6009 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
6010 		/* Try to allocate the new DMA conf with the new mtu */
6011 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
6012 		if (IS_ERR(dma_conf)) {
6013 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
6014 				   mtu);
6015 			return PTR_ERR(dma_conf);
6016 		}
6017 
6018 		__stmmac_release(dev);
6019 
6020 		ret = __stmmac_open(dev, dma_conf);
6021 		if (ret) {
6022 			free_dma_desc_resources(priv, dma_conf);
6023 			kfree(dma_conf);
6024 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
6025 			return ret;
6026 		}
6027 
6028 		kfree(dma_conf);
6029 
6030 		stmmac_set_rx_mode(dev);
6031 	}
6032 
6033 	WRITE_ONCE(dev->mtu, mtu);
6034 	netdev_update_features(dev);
6035 
6036 	return 0;
6037 }
6038 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)6039 static netdev_features_t stmmac_fix_features(struct net_device *dev,
6040 					     netdev_features_t features)
6041 {
6042 	struct stmmac_priv *priv = netdev_priv(dev);
6043 
6044 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
6045 		features &= ~NETIF_F_RXCSUM;
6046 
6047 	if (!priv->plat->tx_coe)
6048 		features &= ~NETIF_F_CSUM_MASK;
6049 
6050 	/* Some GMAC devices have a bugged Jumbo frame support that
6051 	 * needs to have the Tx COE disabled for oversized frames
6052 	 * (due to limited buffer sizes). In this case we disable
6053 	 * the TX csum insertion in the TDES and not use SF.
6054 	 */
6055 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6056 		features &= ~NETIF_F_CSUM_MASK;
6057 
6058 	/* Disable tso if asked by ethtool */
6059 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6060 		if (features & NETIF_F_TSO)
6061 			priv->tso = true;
6062 		else
6063 			priv->tso = false;
6064 	}
6065 
6066 	return features;
6067 }
6068 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)6069 static int stmmac_set_features(struct net_device *netdev,
6070 			       netdev_features_t features)
6071 {
6072 	struct stmmac_priv *priv = netdev_priv(netdev);
6073 
6074 	/* Keep the COE Type in case of csum is supporting */
6075 	if (features & NETIF_F_RXCSUM)
6076 		priv->hw->rx_csum = priv->plat->rx_coe;
6077 	else
6078 		priv->hw->rx_csum = 0;
6079 	/* No check needed because rx_coe has been set before and it will be
6080 	 * fixed in case of issue.
6081 	 */
6082 	stmmac_rx_ipc(priv, priv->hw);
6083 
6084 	if (priv->sph_capable) {
6085 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
6086 		u32 chan;
6087 
6088 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6089 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6090 	}
6091 
6092 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
6093 		priv->hw->hw_vlan_en = true;
6094 	else
6095 		priv->hw->hw_vlan_en = false;
6096 
6097 	phylink_rx_clk_stop_block(priv->phylink);
6098 	stmmac_set_hw_vlan_mode(priv, priv->hw);
6099 	phylink_rx_clk_stop_unblock(priv->phylink);
6100 
6101 	return 0;
6102 }
6103 
stmmac_common_interrupt(struct stmmac_priv * priv)6104 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6105 {
6106 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6107 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6108 	u32 queues_count;
6109 	u32 queue;
6110 	bool xmac;
6111 
6112 	xmac = dwmac_is_xmac(priv->plat->core_type);
6113 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6114 
6115 	if (priv->irq_wake)
6116 		pm_wakeup_event(priv->device, 0);
6117 
6118 	if (priv->dma_cap.estsel)
6119 		stmmac_est_irq_status(priv, priv, priv->dev,
6120 				      &priv->xstats, tx_cnt);
6121 
6122 	if (stmmac_fpe_supported(priv))
6123 		stmmac_fpe_irq_status(priv);
6124 
6125 	/* To handle GMAC own interrupts */
6126 	if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
6127 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6128 
6129 		if (unlikely(status)) {
6130 			/* For LPI we need to save the tx status */
6131 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6132 				priv->tx_path_in_lpi_mode = true;
6133 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6134 				priv->tx_path_in_lpi_mode = false;
6135 		}
6136 
6137 		for (queue = 0; queue < queues_count; queue++)
6138 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6139 
6140 		stmmac_timestamp_interrupt(priv, priv);
6141 	}
6142 }
6143 
6144 /**
6145  *  stmmac_interrupt - main ISR
6146  *  @irq: interrupt number.
6147  *  @dev_id: to pass the net device pointer.
6148  *  Description: this is the main driver interrupt service routine.
6149  *  It can call:
6150  *  o DMA service routine (to manage incoming frame reception and transmission
6151  *    status)
6152  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6153  *    interrupts.
6154  */
stmmac_interrupt(int irq,void * dev_id)6155 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6156 {
6157 	struct net_device *dev = (struct net_device *)dev_id;
6158 	struct stmmac_priv *priv = netdev_priv(dev);
6159 
6160 	/* Check if adapter is up */
6161 	if (test_bit(STMMAC_DOWN, &priv->state))
6162 		return IRQ_HANDLED;
6163 
6164 	/* Check ASP error if it isn't delivered via an individual IRQ */
6165 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6166 		return IRQ_HANDLED;
6167 
6168 	/* To handle Common interrupts */
6169 	stmmac_common_interrupt(priv);
6170 
6171 	/* To handle DMA interrupts */
6172 	stmmac_dma_interrupt(priv);
6173 
6174 	return IRQ_HANDLED;
6175 }
6176 
stmmac_mac_interrupt(int irq,void * dev_id)6177 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6178 {
6179 	struct net_device *dev = (struct net_device *)dev_id;
6180 	struct stmmac_priv *priv = netdev_priv(dev);
6181 
6182 	/* Check if adapter is up */
6183 	if (test_bit(STMMAC_DOWN, &priv->state))
6184 		return IRQ_HANDLED;
6185 
6186 	/* To handle Common interrupts */
6187 	stmmac_common_interrupt(priv);
6188 
6189 	return IRQ_HANDLED;
6190 }
6191 
stmmac_safety_interrupt(int irq,void * dev_id)6192 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6193 {
6194 	struct net_device *dev = (struct net_device *)dev_id;
6195 	struct stmmac_priv *priv = netdev_priv(dev);
6196 
6197 	/* Check if adapter is up */
6198 	if (test_bit(STMMAC_DOWN, &priv->state))
6199 		return IRQ_HANDLED;
6200 
6201 	/* Check if a fatal error happened */
6202 	stmmac_safety_feat_interrupt(priv);
6203 
6204 	return IRQ_HANDLED;
6205 }
6206 
stmmac_msi_intr_tx(int irq,void * data)6207 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6208 {
6209 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6210 	struct stmmac_dma_conf *dma_conf;
6211 	int chan = tx_q->queue_index;
6212 	struct stmmac_priv *priv;
6213 	int status;
6214 
6215 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6216 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6217 
6218 	/* Check if adapter is up */
6219 	if (test_bit(STMMAC_DOWN, &priv->state))
6220 		return IRQ_HANDLED;
6221 
6222 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6223 
6224 	if (unlikely(status & tx_hard_error_bump_tc)) {
6225 		/* Try to bump up the dma threshold on this failure */
6226 		stmmac_bump_dma_threshold(priv, chan);
6227 	} else if (unlikely(status == tx_hard_error)) {
6228 		stmmac_tx_err(priv, chan);
6229 	}
6230 
6231 	return IRQ_HANDLED;
6232 }
6233 
stmmac_msi_intr_rx(int irq,void * data)6234 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6235 {
6236 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6237 	struct stmmac_dma_conf *dma_conf;
6238 	int chan = rx_q->queue_index;
6239 	struct stmmac_priv *priv;
6240 
6241 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6242 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6243 
6244 	/* Check if adapter is up */
6245 	if (test_bit(STMMAC_DOWN, &priv->state))
6246 		return IRQ_HANDLED;
6247 
6248 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6249 
6250 	return IRQ_HANDLED;
6251 }
6252 
6253 /**
6254  *  stmmac_ioctl - Entry point for the Ioctl
6255  *  @dev: Device pointer.
6256  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6257  *  a proprietary structure used to pass information to the driver.
6258  *  @cmd: IOCTL command
6259  *  Description:
6260  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6261  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6262 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6263 {
6264 	struct stmmac_priv *priv = netdev_priv (dev);
6265 	int ret = -EOPNOTSUPP;
6266 
6267 	if (!netif_running(dev))
6268 		return -EINVAL;
6269 
6270 	switch (cmd) {
6271 	case SIOCGMIIPHY:
6272 	case SIOCGMIIREG:
6273 	case SIOCSMIIREG:
6274 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6275 		break;
6276 	default:
6277 		break;
6278 	}
6279 
6280 	return ret;
6281 }
6282 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6283 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6284 				    void *cb_priv)
6285 {
6286 	struct stmmac_priv *priv = cb_priv;
6287 	int ret = -EOPNOTSUPP;
6288 
6289 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6290 		return ret;
6291 
6292 	__stmmac_disable_all_queues(priv);
6293 
6294 	switch (type) {
6295 	case TC_SETUP_CLSU32:
6296 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6297 		break;
6298 	case TC_SETUP_CLSFLOWER:
6299 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6300 		break;
6301 	default:
6302 		break;
6303 	}
6304 
6305 	stmmac_enable_all_queues(priv);
6306 	return ret;
6307 }
6308 
6309 static LIST_HEAD(stmmac_block_cb_list);
6310 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6311 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6312 			   void *type_data)
6313 {
6314 	struct stmmac_priv *priv = netdev_priv(ndev);
6315 
6316 	switch (type) {
6317 	case TC_QUERY_CAPS:
6318 		return stmmac_tc_query_caps(priv, priv, type_data);
6319 	case TC_SETUP_QDISC_MQPRIO:
6320 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6321 	case TC_SETUP_BLOCK:
6322 		return flow_block_cb_setup_simple(type_data,
6323 						  &stmmac_block_cb_list,
6324 						  stmmac_setup_tc_block_cb,
6325 						  priv, priv, true);
6326 	case TC_SETUP_QDISC_CBS:
6327 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6328 	case TC_SETUP_QDISC_TAPRIO:
6329 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6330 	case TC_SETUP_QDISC_ETF:
6331 		return stmmac_tc_setup_etf(priv, priv, type_data);
6332 	default:
6333 		return -EOPNOTSUPP;
6334 	}
6335 }
6336 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6337 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6338 			       struct net_device *sb_dev)
6339 {
6340 	int gso = skb_shinfo(skb)->gso_type;
6341 
6342 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6343 		/*
6344 		 * There is no way to determine the number of TSO/USO
6345 		 * capable Queues. Let's use always the Queue 0
6346 		 * because if TSO/USO is supported then at least this
6347 		 * one will be capable.
6348 		 */
6349 		return 0;
6350 	}
6351 
6352 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6353 }
6354 
stmmac_set_mac_address(struct net_device * ndev,void * addr)6355 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6356 {
6357 	struct stmmac_priv *priv = netdev_priv(ndev);
6358 	int ret = 0;
6359 
6360 	ret = pm_runtime_resume_and_get(priv->device);
6361 	if (ret < 0)
6362 		return ret;
6363 
6364 	ret = eth_mac_addr(ndev, addr);
6365 	if (ret)
6366 		goto set_mac_error;
6367 
6368 	phylink_rx_clk_stop_block(priv->phylink);
6369 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6370 	phylink_rx_clk_stop_unblock(priv->phylink);
6371 
6372 set_mac_error:
6373 	pm_runtime_put(priv->device);
6374 
6375 	return ret;
6376 }
6377 
6378 #ifdef CONFIG_DEBUG_FS
6379 static struct dentry *stmmac_fs_dir;
6380 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6381 static void sysfs_display_ring(void *head, int size, int extend_desc,
6382 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6383 {
6384 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6385 	struct dma_desc *p = (struct dma_desc *)head;
6386 	unsigned int desc_size;
6387 	dma_addr_t dma_addr;
6388 	int i;
6389 
6390 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6391 	for (i = 0; i < size; i++) {
6392 		dma_addr = dma_phy_addr + i * desc_size;
6393 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6394 				i, &dma_addr,
6395 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6396 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6397 		if (extend_desc)
6398 			p = &(++ep)->basic;
6399 		else
6400 			p++;
6401 	}
6402 }
6403 
stmmac_rings_status_show(struct seq_file * seq,void * v)6404 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6405 {
6406 	struct net_device *dev = seq->private;
6407 	struct stmmac_priv *priv = netdev_priv(dev);
6408 	u32 rx_count = priv->plat->rx_queues_to_use;
6409 	u32 tx_count = priv->plat->tx_queues_to_use;
6410 	u32 queue;
6411 
6412 	if ((dev->flags & IFF_UP) == 0)
6413 		return 0;
6414 
6415 	for (queue = 0; queue < rx_count; queue++) {
6416 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6417 
6418 		seq_printf(seq, "RX Queue %d:\n", queue);
6419 
6420 		if (priv->extend_desc) {
6421 			seq_printf(seq, "Extended descriptor ring:\n");
6422 			sysfs_display_ring((void *)rx_q->dma_erx,
6423 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6424 		} else {
6425 			seq_printf(seq, "Descriptor ring:\n");
6426 			sysfs_display_ring((void *)rx_q->dma_rx,
6427 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6428 		}
6429 	}
6430 
6431 	for (queue = 0; queue < tx_count; queue++) {
6432 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6433 
6434 		seq_printf(seq, "TX Queue %d:\n", queue);
6435 
6436 		if (priv->extend_desc) {
6437 			seq_printf(seq, "Extended descriptor ring:\n");
6438 			sysfs_display_ring((void *)tx_q->dma_etx,
6439 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6440 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6441 			seq_printf(seq, "Descriptor ring:\n");
6442 			sysfs_display_ring((void *)tx_q->dma_tx,
6443 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6444 		}
6445 	}
6446 
6447 	return 0;
6448 }
6449 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6450 
stmmac_dma_cap_show(struct seq_file * seq,void * v)6451 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6452 {
6453 	static const char * const dwxgmac_timestamp_source[] = {
6454 		"None",
6455 		"Internal",
6456 		"External",
6457 		"Both",
6458 	};
6459 	static const char * const dwxgmac_safety_feature_desc[] = {
6460 		"No",
6461 		"All Safety Features with ECC and Parity",
6462 		"All Safety Features without ECC or Parity",
6463 		"All Safety Features with Parity Only",
6464 		"ECC Only",
6465 		"UNDEFINED",
6466 		"UNDEFINED",
6467 		"UNDEFINED",
6468 	};
6469 	struct net_device *dev = seq->private;
6470 	struct stmmac_priv *priv = netdev_priv(dev);
6471 
6472 	if (!priv->hw_cap_support) {
6473 		seq_printf(seq, "DMA HW features not supported\n");
6474 		return 0;
6475 	}
6476 
6477 	seq_printf(seq, "==============================\n");
6478 	seq_printf(seq, "\tDMA HW features\n");
6479 	seq_printf(seq, "==============================\n");
6480 
6481 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6482 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6483 	seq_printf(seq, "\t1000 Mbps: %s\n",
6484 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6485 	seq_printf(seq, "\tHalf duplex: %s\n",
6486 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6487 	if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
6488 		seq_printf(seq,
6489 			   "\tNumber of Additional MAC address registers: %d\n",
6490 			   priv->dma_cap.multi_addr);
6491 	} else {
6492 		seq_printf(seq, "\tHash Filter: %s\n",
6493 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6494 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6495 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6496 	}
6497 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6498 		   (priv->dma_cap.pcs) ? "Y" : "N");
6499 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6500 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6501 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6502 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6503 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6504 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6505 	seq_printf(seq, "\tRMON module: %s\n",
6506 		   (priv->dma_cap.rmon) ? "Y" : "N");
6507 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6508 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6509 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6510 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6511 	if (priv->plat->core_type == DWMAC_CORE_XGMAC)
6512 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6513 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6514 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6515 		   (priv->dma_cap.eee) ? "Y" : "N");
6516 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6517 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6518 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6519 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6520 	    priv->plat->core_type == DWMAC_CORE_XGMAC) {
6521 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6522 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6523 	} else {
6524 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6525 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6526 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6527 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6528 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6529 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6530 	}
6531 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6532 		   priv->dma_cap.number_rx_channel);
6533 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6534 		   priv->dma_cap.number_tx_channel);
6535 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6536 		   priv->dma_cap.number_rx_queues);
6537 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6538 		   priv->dma_cap.number_tx_queues);
6539 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6540 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6541 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6542 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6543 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6544 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6545 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6546 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6547 		   priv->dma_cap.pps_out_num);
6548 	seq_printf(seq, "\tSafety Features: %s\n",
6549 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6550 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6551 		   priv->dma_cap.frpsel ? "Y" : "N");
6552 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6553 		   priv->dma_cap.host_dma_width);
6554 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6555 		   priv->dma_cap.rssen ? "Y" : "N");
6556 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6557 		   priv->dma_cap.vlhash ? "Y" : "N");
6558 	seq_printf(seq, "\tSplit Header: %s\n",
6559 		   priv->dma_cap.sphen ? "Y" : "N");
6560 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6561 		   priv->dma_cap.vlins ? "Y" : "N");
6562 	seq_printf(seq, "\tDouble VLAN: %s\n",
6563 		   priv->dma_cap.dvlan ? "Y" : "N");
6564 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6565 		   priv->dma_cap.l3l4fnum);
6566 	seq_printf(seq, "\tARP Offloading: %s\n",
6567 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6568 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6569 		   priv->dma_cap.estsel ? "Y" : "N");
6570 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6571 		   priv->dma_cap.fpesel ? "Y" : "N");
6572 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6573 		   priv->dma_cap.tbssel ? "Y" : "N");
6574 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6575 		   priv->dma_cap.tbs_ch_num);
6576 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6577 		   priv->dma_cap.sgfsel ? "Y" : "N");
6578 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6579 		   BIT(priv->dma_cap.ttsfd) >> 1);
6580 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6581 		   priv->dma_cap.numtc);
6582 	seq_printf(seq, "\tDCB Feature: %s\n",
6583 		   priv->dma_cap.dcben ? "Y" : "N");
6584 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6585 		   priv->dma_cap.advthword ? "Y" : "N");
6586 	seq_printf(seq, "\tPTP Offload: %s\n",
6587 		   priv->dma_cap.ptoen ? "Y" : "N");
6588 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6589 		   priv->dma_cap.osten ? "Y" : "N");
6590 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6591 		   priv->dma_cap.pfcen ? "Y" : "N");
6592 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6593 		   BIT(priv->dma_cap.frpes) << 6);
6594 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6595 		   BIT(priv->dma_cap.frpbs) << 6);
6596 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6597 		   priv->dma_cap.frppipe_num);
6598 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6599 		   priv->dma_cap.nrvf_num ?
6600 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6601 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6602 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6603 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6604 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6605 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6606 		   priv->dma_cap.cbtisel ? "Y" : "N");
6607 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6608 		   priv->dma_cap.aux_snapshot_n);
6609 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6610 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6611 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6612 		   priv->dma_cap.edma ? "Y" : "N");
6613 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6614 		   priv->dma_cap.ediffc ? "Y" : "N");
6615 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6616 		   priv->dma_cap.vxn ? "Y" : "N");
6617 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6618 		   priv->dma_cap.dbgmem ? "Y" : "N");
6619 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6620 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6621 	return 0;
6622 }
6623 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6624 
6625 /* Use network device events to rename debugfs file entries.
6626  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6627 static int stmmac_device_event(struct notifier_block *unused,
6628 			       unsigned long event, void *ptr)
6629 {
6630 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6631 	struct stmmac_priv *priv = netdev_priv(dev);
6632 
6633 	if (dev->netdev_ops != &stmmac_netdev_ops)
6634 		goto done;
6635 
6636 	switch (event) {
6637 	case NETDEV_CHANGENAME:
6638 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6639 		break;
6640 	}
6641 done:
6642 	return NOTIFY_DONE;
6643 }
6644 
6645 static struct notifier_block stmmac_notifier = {
6646 	.notifier_call = stmmac_device_event,
6647 };
6648 
stmmac_init_fs(struct net_device * dev)6649 static void stmmac_init_fs(struct net_device *dev)
6650 {
6651 	struct stmmac_priv *priv = netdev_priv(dev);
6652 
6653 	rtnl_lock();
6654 
6655 	/* Create per netdev entries */
6656 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6657 
6658 	/* Entry to report DMA RX/TX rings */
6659 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6660 			    &stmmac_rings_status_fops);
6661 
6662 	/* Entry to report the DMA HW features */
6663 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6664 			    &stmmac_dma_cap_fops);
6665 
6666 	rtnl_unlock();
6667 }
6668 
stmmac_exit_fs(struct net_device * dev)6669 static void stmmac_exit_fs(struct net_device *dev)
6670 {
6671 	struct stmmac_priv *priv = netdev_priv(dev);
6672 
6673 	debugfs_remove_recursive(priv->dbgfs_dir);
6674 }
6675 #endif /* CONFIG_DEBUG_FS */
6676 
stmmac_vid_crc32_le(__le16 vid_le)6677 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6678 {
6679 	unsigned char *data = (unsigned char *)&vid_le;
6680 	unsigned char data_byte = 0;
6681 	u32 crc = ~0x0;
6682 	u32 temp = 0;
6683 	int i, bits;
6684 
6685 	bits = get_bitmask_order(VLAN_VID_MASK);
6686 	for (i = 0; i < bits; i++) {
6687 		if ((i % 8) == 0)
6688 			data_byte = data[i / 8];
6689 
6690 		temp = ((crc & 1) ^ data_byte) & 1;
6691 		crc >>= 1;
6692 		data_byte >>= 1;
6693 
6694 		if (temp)
6695 			crc ^= 0xedb88320;
6696 	}
6697 
6698 	return crc;
6699 }
6700 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6701 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6702 {
6703 	u32 crc, hash = 0;
6704 	u16 pmatch = 0;
6705 	int count = 0;
6706 	u16 vid = 0;
6707 
6708 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6709 		__le16 vid_le = cpu_to_le16(vid);
6710 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6711 		hash |= (1 << crc);
6712 		count++;
6713 	}
6714 
6715 	if (!priv->dma_cap.vlhash) {
6716 		if (count > 2) /* VID = 0 always passes filter */
6717 			return -EOPNOTSUPP;
6718 
6719 		pmatch = vid;
6720 		hash = 0;
6721 	}
6722 
6723 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6724 }
6725 
6726 /* FIXME: This may need RXC to be running, but it may be called with BH
6727  * disabled, which means we can't call phylink_rx_clk_stop*().
6728  */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6729 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6730 {
6731 	struct stmmac_priv *priv = netdev_priv(ndev);
6732 	bool is_double = false;
6733 	int ret;
6734 
6735 	ret = pm_runtime_resume_and_get(priv->device);
6736 	if (ret < 0)
6737 		return ret;
6738 
6739 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6740 		is_double = true;
6741 
6742 	set_bit(vid, priv->active_vlans);
6743 	ret = stmmac_vlan_update(priv, is_double);
6744 	if (ret) {
6745 		clear_bit(vid, priv->active_vlans);
6746 		goto err_pm_put;
6747 	}
6748 
6749 	if (priv->hw->num_vlan) {
6750 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6751 		if (ret)
6752 			goto err_pm_put;
6753 	}
6754 err_pm_put:
6755 	pm_runtime_put(priv->device);
6756 
6757 	return ret;
6758 }
6759 
6760 /* FIXME: This may need RXC to be running, but it may be called with BH
6761  * disabled, which means we can't call phylink_rx_clk_stop*().
6762  */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6763 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6764 {
6765 	struct stmmac_priv *priv = netdev_priv(ndev);
6766 	bool is_double = false;
6767 	int ret;
6768 
6769 	ret = pm_runtime_resume_and_get(priv->device);
6770 	if (ret < 0)
6771 		return ret;
6772 
6773 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6774 		is_double = true;
6775 
6776 	clear_bit(vid, priv->active_vlans);
6777 
6778 	if (priv->hw->num_vlan) {
6779 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6780 		if (ret)
6781 			goto del_vlan_error;
6782 	}
6783 
6784 	ret = stmmac_vlan_update(priv, is_double);
6785 
6786 del_vlan_error:
6787 	pm_runtime_put(priv->device);
6788 
6789 	return ret;
6790 }
6791 
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6792 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6793 {
6794 	struct stmmac_priv *priv = netdev_priv(dev);
6795 
6796 	switch (bpf->command) {
6797 	case XDP_SETUP_PROG:
6798 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6799 	case XDP_SETUP_XSK_POOL:
6800 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6801 					     bpf->xsk.queue_id);
6802 	default:
6803 		return -EOPNOTSUPP;
6804 	}
6805 }
6806 
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6807 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6808 			   struct xdp_frame **frames, u32 flags)
6809 {
6810 	struct stmmac_priv *priv = netdev_priv(dev);
6811 	int cpu = smp_processor_id();
6812 	struct netdev_queue *nq;
6813 	int i, nxmit = 0;
6814 	int queue;
6815 
6816 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6817 		return -ENETDOWN;
6818 
6819 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6820 		return -EINVAL;
6821 
6822 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6823 	nq = netdev_get_tx_queue(priv->dev, queue);
6824 
6825 	__netif_tx_lock(nq, cpu);
6826 	/* Avoids TX time-out as we are sharing with slow path */
6827 	txq_trans_cond_update(nq);
6828 
6829 	for (i = 0; i < num_frames; i++) {
6830 		int res;
6831 
6832 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6833 		if (res == STMMAC_XDP_CONSUMED)
6834 			break;
6835 
6836 		nxmit++;
6837 	}
6838 
6839 	if (flags & XDP_XMIT_FLUSH) {
6840 		stmmac_flush_tx_descriptors(priv, queue);
6841 		stmmac_tx_timer_arm(priv, queue);
6842 	}
6843 
6844 	__netif_tx_unlock(nq);
6845 
6846 	return nxmit;
6847 }
6848 
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6849 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6850 {
6851 	struct stmmac_channel *ch = &priv->channel[queue];
6852 	unsigned long flags;
6853 
6854 	spin_lock_irqsave(&ch->lock, flags);
6855 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6856 	spin_unlock_irqrestore(&ch->lock, flags);
6857 
6858 	stmmac_stop_rx_dma(priv, queue);
6859 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6860 }
6861 
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6862 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6863 {
6864 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6865 	struct stmmac_channel *ch = &priv->channel[queue];
6866 	unsigned long flags;
6867 	u32 buf_size;
6868 	int ret;
6869 
6870 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6871 	if (ret) {
6872 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6873 		return;
6874 	}
6875 
6876 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6877 	if (ret) {
6878 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6879 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6880 		return;
6881 	}
6882 
6883 	stmmac_reset_rx_queue(priv, queue);
6884 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6885 
6886 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6887 			    rx_q->dma_rx_phy, rx_q->queue_index);
6888 
6889 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6890 			     sizeof(struct dma_desc));
6891 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6892 			       rx_q->rx_tail_addr, rx_q->queue_index);
6893 
6894 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6895 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6896 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6897 				      buf_size,
6898 				      rx_q->queue_index);
6899 	} else {
6900 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6901 				      priv->dma_conf.dma_buf_sz,
6902 				      rx_q->queue_index);
6903 	}
6904 
6905 	stmmac_start_rx_dma(priv, queue);
6906 
6907 	spin_lock_irqsave(&ch->lock, flags);
6908 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6909 	spin_unlock_irqrestore(&ch->lock, flags);
6910 }
6911 
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6912 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6913 {
6914 	struct stmmac_channel *ch = &priv->channel[queue];
6915 	unsigned long flags;
6916 
6917 	spin_lock_irqsave(&ch->lock, flags);
6918 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6919 	spin_unlock_irqrestore(&ch->lock, flags);
6920 
6921 	stmmac_stop_tx_dma(priv, queue);
6922 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6923 }
6924 
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6925 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6926 {
6927 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6928 	struct stmmac_channel *ch = &priv->channel[queue];
6929 	unsigned long flags;
6930 	int ret;
6931 
6932 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6933 	if (ret) {
6934 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6935 		return;
6936 	}
6937 
6938 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6939 	if (ret) {
6940 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6941 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6942 		return;
6943 	}
6944 
6945 	stmmac_reset_tx_queue(priv, queue);
6946 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6947 
6948 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6949 			    tx_q->dma_tx_phy, tx_q->queue_index);
6950 
6951 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6952 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6953 
6954 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6955 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6956 			       tx_q->tx_tail_addr, tx_q->queue_index);
6957 
6958 	stmmac_start_tx_dma(priv, queue);
6959 
6960 	spin_lock_irqsave(&ch->lock, flags);
6961 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6962 	spin_unlock_irqrestore(&ch->lock, flags);
6963 }
6964 
stmmac_xdp_release(struct net_device * dev)6965 void stmmac_xdp_release(struct net_device *dev)
6966 {
6967 	struct stmmac_priv *priv = netdev_priv(dev);
6968 	u32 chan;
6969 
6970 	/* Ensure tx function is not running */
6971 	netif_tx_disable(dev);
6972 
6973 	/* Disable NAPI process */
6974 	stmmac_disable_all_queues(priv);
6975 
6976 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6977 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6978 
6979 	/* Free the IRQ lines */
6980 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6981 
6982 	/* Stop TX/RX DMA channels */
6983 	stmmac_stop_all_dma(priv);
6984 
6985 	/* Release and free the Rx/Tx resources */
6986 	free_dma_desc_resources(priv, &priv->dma_conf);
6987 
6988 	/* Disable the MAC Rx/Tx */
6989 	stmmac_mac_set(priv, priv->ioaddr, false);
6990 
6991 	/* set trans_start so we don't get spurious
6992 	 * watchdogs during reset
6993 	 */
6994 	netif_trans_update(dev);
6995 	netif_carrier_off(dev);
6996 }
6997 
stmmac_xdp_open(struct net_device * dev)6998 int stmmac_xdp_open(struct net_device *dev)
6999 {
7000 	struct stmmac_priv *priv = netdev_priv(dev);
7001 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7002 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7003 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
7004 	struct stmmac_rx_queue *rx_q;
7005 	struct stmmac_tx_queue *tx_q;
7006 	u32 buf_size;
7007 	bool sph_en;
7008 	u32 chan;
7009 	int ret;
7010 
7011 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
7012 	if (ret < 0) {
7013 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
7014 			   __func__);
7015 		goto dma_desc_error;
7016 	}
7017 
7018 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
7019 	if (ret < 0) {
7020 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
7021 			   __func__);
7022 		goto init_error;
7023 	}
7024 
7025 	stmmac_reset_queues_param(priv);
7026 
7027 	/* DMA CSR Channel configuration */
7028 	for (chan = 0; chan < dma_csr_ch; chan++) {
7029 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
7030 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
7031 	}
7032 
7033 	/* Adjust Split header */
7034 	sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
7035 
7036 	/* DMA RX Channel Configuration */
7037 	for (chan = 0; chan < rx_cnt; chan++) {
7038 		rx_q = &priv->dma_conf.rx_queue[chan];
7039 
7040 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7041 				    rx_q->dma_rx_phy, chan);
7042 
7043 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7044 				     (rx_q->buf_alloc_num *
7045 				      sizeof(struct dma_desc));
7046 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7047 				       rx_q->rx_tail_addr, chan);
7048 
7049 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7050 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7051 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7052 					      buf_size,
7053 					      rx_q->queue_index);
7054 		} else {
7055 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7056 					      priv->dma_conf.dma_buf_sz,
7057 					      rx_q->queue_index);
7058 		}
7059 
7060 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7061 	}
7062 
7063 	/* DMA TX Channel Configuration */
7064 	for (chan = 0; chan < tx_cnt; chan++) {
7065 		tx_q = &priv->dma_conf.tx_queue[chan];
7066 
7067 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7068 				    tx_q->dma_tx_phy, chan);
7069 
7070 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7071 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7072 				       tx_q->tx_tail_addr, chan);
7073 
7074 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7075 	}
7076 
7077 	/* Enable the MAC Rx/Tx */
7078 	stmmac_mac_set(priv, priv->ioaddr, true);
7079 
7080 	/* Start Rx & Tx DMA Channels */
7081 	stmmac_start_all_dma(priv);
7082 
7083 	ret = stmmac_request_irq(dev);
7084 	if (ret)
7085 		goto irq_error;
7086 
7087 	/* Enable NAPI process*/
7088 	stmmac_enable_all_queues(priv);
7089 	netif_carrier_on(dev);
7090 	netif_tx_start_all_queues(dev);
7091 	stmmac_enable_all_dma_irq(priv);
7092 
7093 	return 0;
7094 
7095 irq_error:
7096 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7097 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7098 
7099 init_error:
7100 	free_dma_desc_resources(priv, &priv->dma_conf);
7101 dma_desc_error:
7102 	return ret;
7103 }
7104 
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7105 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7106 {
7107 	struct stmmac_priv *priv = netdev_priv(dev);
7108 	struct stmmac_rx_queue *rx_q;
7109 	struct stmmac_tx_queue *tx_q;
7110 	struct stmmac_channel *ch;
7111 
7112 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7113 	    !netif_carrier_ok(priv->dev))
7114 		return -ENETDOWN;
7115 
7116 	if (!stmmac_xdp_is_enabled(priv))
7117 		return -EINVAL;
7118 
7119 	if (queue >= priv->plat->rx_queues_to_use ||
7120 	    queue >= priv->plat->tx_queues_to_use)
7121 		return -EINVAL;
7122 
7123 	rx_q = &priv->dma_conf.rx_queue[queue];
7124 	tx_q = &priv->dma_conf.tx_queue[queue];
7125 	ch = &priv->channel[queue];
7126 
7127 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7128 		return -EINVAL;
7129 
7130 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7131 		/* EQoS does not have per-DMA channel SW interrupt,
7132 		 * so we schedule RX Napi straight-away.
7133 		 */
7134 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7135 			__napi_schedule(&ch->rxtx_napi);
7136 	}
7137 
7138 	return 0;
7139 }
7140 
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7141 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7142 {
7143 	struct stmmac_priv *priv = netdev_priv(dev);
7144 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7145 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7146 	unsigned int start;
7147 	int q;
7148 
7149 	for (q = 0; q < tx_cnt; q++) {
7150 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7151 		u64 tx_packets;
7152 		u64 tx_bytes;
7153 
7154 		do {
7155 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7156 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7157 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7158 		do {
7159 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7160 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7161 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7162 
7163 		stats->tx_packets += tx_packets;
7164 		stats->tx_bytes += tx_bytes;
7165 	}
7166 
7167 	for (q = 0; q < rx_cnt; q++) {
7168 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7169 		u64 rx_packets;
7170 		u64 rx_bytes;
7171 
7172 		do {
7173 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7174 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7175 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7176 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7177 
7178 		stats->rx_packets += rx_packets;
7179 		stats->rx_bytes += rx_bytes;
7180 	}
7181 
7182 	stats->rx_dropped = priv->xstats.rx_dropped;
7183 	stats->rx_errors = priv->xstats.rx_errors;
7184 	stats->tx_dropped = priv->xstats.tx_dropped;
7185 	stats->tx_errors = priv->xstats.tx_errors;
7186 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7187 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7188 	stats->rx_length_errors = priv->xstats.rx_length;
7189 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7190 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7191 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7192 }
7193 
7194 static const struct net_device_ops stmmac_netdev_ops = {
7195 	.ndo_open = stmmac_open,
7196 	.ndo_start_xmit = stmmac_xmit,
7197 	.ndo_stop = stmmac_release,
7198 	.ndo_change_mtu = stmmac_change_mtu,
7199 	.ndo_fix_features = stmmac_fix_features,
7200 	.ndo_set_features = stmmac_set_features,
7201 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7202 	.ndo_tx_timeout = stmmac_tx_timeout,
7203 	.ndo_eth_ioctl = stmmac_ioctl,
7204 	.ndo_get_stats64 = stmmac_get_stats64,
7205 	.ndo_setup_tc = stmmac_setup_tc,
7206 	.ndo_select_queue = stmmac_select_queue,
7207 	.ndo_set_mac_address = stmmac_set_mac_address,
7208 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7209 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7210 	.ndo_bpf = stmmac_bpf,
7211 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7212 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7213 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7214 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7215 };
7216 
stmmac_reset_subtask(struct stmmac_priv * priv)7217 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7218 {
7219 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7220 		return;
7221 	if (test_bit(STMMAC_DOWN, &priv->state))
7222 		return;
7223 
7224 	netdev_err(priv->dev, "Reset adapter.\n");
7225 
7226 	rtnl_lock();
7227 	netif_trans_update(priv->dev);
7228 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7229 		usleep_range(1000, 2000);
7230 
7231 	set_bit(STMMAC_DOWN, &priv->state);
7232 	dev_close(priv->dev);
7233 	dev_open(priv->dev, NULL);
7234 	clear_bit(STMMAC_DOWN, &priv->state);
7235 	clear_bit(STMMAC_RESETING, &priv->state);
7236 	rtnl_unlock();
7237 }
7238 
stmmac_service_task(struct work_struct * work)7239 static void stmmac_service_task(struct work_struct *work)
7240 {
7241 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7242 			service_task);
7243 
7244 	stmmac_reset_subtask(priv);
7245 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7246 }
7247 
7248 /**
7249  *  stmmac_hw_init - Init the MAC device
7250  *  @priv: driver private structure
7251  *  Description: this function is to configure the MAC device according to
7252  *  some platform parameters or the HW capability register. It prepares the
7253  *  driver to use either ring or chain modes and to setup either enhanced or
7254  *  normal descriptors.
7255  */
stmmac_hw_init(struct stmmac_priv * priv)7256 static int stmmac_hw_init(struct stmmac_priv *priv)
7257 {
7258 	int ret;
7259 
7260 	/* dwmac-sun8i only work in chain mode */
7261 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7262 		chain_mode = 1;
7263 	priv->chain_mode = chain_mode;
7264 
7265 	/* Initialize HW Interface */
7266 	ret = stmmac_hwif_init(priv);
7267 	if (ret)
7268 		return ret;
7269 
7270 	/* Get the HW capability (new GMAC newer than 3.50a) */
7271 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7272 	if (priv->hw_cap_support) {
7273 		dev_info(priv->device, "DMA HW capability register supported\n");
7274 
7275 		/* We can override some gmac/dma configuration fields: e.g.
7276 		 * enh_desc, tx_coe (e.g. that are passed through the
7277 		 * platform) with the values from the HW capability
7278 		 * register (if supported).
7279 		 */
7280 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7281 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7282 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7283 		if (priv->dma_cap.hash_tb_sz) {
7284 			priv->hw->multicast_filter_bins =
7285 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7286 			priv->hw->mcast_bits_log2 =
7287 					ilog2(priv->hw->multicast_filter_bins);
7288 		}
7289 
7290 		/* TXCOE doesn't work in thresh DMA mode */
7291 		if (priv->plat->force_thresh_dma_mode)
7292 			priv->plat->tx_coe = 0;
7293 		else
7294 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7295 
7296 		/* In case of GMAC4 rx_coe is from HW cap register. */
7297 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7298 
7299 		if (priv->dma_cap.rx_coe_type2)
7300 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7301 		else if (priv->dma_cap.rx_coe_type1)
7302 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7303 
7304 	} else {
7305 		dev_info(priv->device, "No HW DMA feature register supported\n");
7306 	}
7307 
7308 	if (priv->plat->rx_coe) {
7309 		priv->hw->rx_csum = priv->plat->rx_coe;
7310 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7311 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7312 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7313 	}
7314 	if (priv->plat->tx_coe)
7315 		dev_info(priv->device, "TX Checksum insertion supported\n");
7316 
7317 	if (priv->plat->pmt) {
7318 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7319 		device_set_wakeup_capable(priv->device, 1);
7320 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7321 	}
7322 
7323 	if (priv->dma_cap.tsoen)
7324 		dev_info(priv->device, "TSO supported\n");
7325 
7326 	if (priv->dma_cap.number_rx_queues &&
7327 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7328 		dev_warn(priv->device,
7329 			 "Number of Rx queues (%u) exceeds dma capability\n",
7330 			 priv->plat->rx_queues_to_use);
7331 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7332 	}
7333 	if (priv->dma_cap.number_tx_queues &&
7334 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7335 		dev_warn(priv->device,
7336 			 "Number of Tx queues (%u) exceeds dma capability\n",
7337 			 priv->plat->tx_queues_to_use);
7338 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7339 	}
7340 
7341 	if (priv->dma_cap.rx_fifo_size &&
7342 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7343 		dev_warn(priv->device,
7344 			 "Rx FIFO size (%u) exceeds dma capability\n",
7345 			 priv->plat->rx_fifo_size);
7346 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7347 	}
7348 	if (priv->dma_cap.tx_fifo_size &&
7349 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7350 		dev_warn(priv->device,
7351 			 "Tx FIFO size (%u) exceeds dma capability\n",
7352 			 priv->plat->tx_fifo_size);
7353 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7354 	}
7355 
7356 	priv->hw->vlan_fail_q_en =
7357 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7358 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7359 
7360 	/* Run HW quirks, if any */
7361 	if (priv->hwif_quirks) {
7362 		ret = priv->hwif_quirks(priv);
7363 		if (ret)
7364 			return ret;
7365 	}
7366 
7367 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7368 	 * In some case, for example on bugged HW this feature
7369 	 * has to be disable and this can be done by passing the
7370 	 * riwt_off field from the platform.
7371 	 */
7372 	if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
7373 	     priv->plat->core_type == DWMAC_CORE_XGMAC) &&
7374 	    !priv->plat->riwt_off) {
7375 		priv->use_riwt = 1;
7376 		dev_info(priv->device,
7377 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7378 	}
7379 
7380 	/* Unimplemented PCS init (as indicated by stmmac_do_callback()
7381 	 * perversely returning -EINVAL) is non-fatal.
7382 	 */
7383 	ret = stmmac_mac_pcs_init(priv);
7384 	if (ret != -EINVAL)
7385 		return ret;
7386 
7387 	return 0;
7388 }
7389 
stmmac_napi_add(struct net_device * dev)7390 static void stmmac_napi_add(struct net_device *dev)
7391 {
7392 	struct stmmac_priv *priv = netdev_priv(dev);
7393 	u32 queue, maxq;
7394 
7395 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7396 
7397 	for (queue = 0; queue < maxq; queue++) {
7398 		struct stmmac_channel *ch = &priv->channel[queue];
7399 
7400 		ch->priv_data = priv;
7401 		ch->index = queue;
7402 		spin_lock_init(&ch->lock);
7403 
7404 		if (queue < priv->plat->rx_queues_to_use) {
7405 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7406 		}
7407 		if (queue < priv->plat->tx_queues_to_use) {
7408 			netif_napi_add_tx(dev, &ch->tx_napi,
7409 					  stmmac_napi_poll_tx);
7410 		}
7411 		if (queue < priv->plat->rx_queues_to_use &&
7412 		    queue < priv->plat->tx_queues_to_use) {
7413 			netif_napi_add(dev, &ch->rxtx_napi,
7414 				       stmmac_napi_poll_rxtx);
7415 		}
7416 	}
7417 }
7418 
stmmac_napi_del(struct net_device * dev)7419 static void stmmac_napi_del(struct net_device *dev)
7420 {
7421 	struct stmmac_priv *priv = netdev_priv(dev);
7422 	u32 queue, maxq;
7423 
7424 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7425 
7426 	for (queue = 0; queue < maxq; queue++) {
7427 		struct stmmac_channel *ch = &priv->channel[queue];
7428 
7429 		if (queue < priv->plat->rx_queues_to_use)
7430 			netif_napi_del(&ch->rx_napi);
7431 		if (queue < priv->plat->tx_queues_to_use)
7432 			netif_napi_del(&ch->tx_napi);
7433 		if (queue < priv->plat->rx_queues_to_use &&
7434 		    queue < priv->plat->tx_queues_to_use) {
7435 			netif_napi_del(&ch->rxtx_napi);
7436 		}
7437 	}
7438 }
7439 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7440 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7441 {
7442 	struct stmmac_priv *priv = netdev_priv(dev);
7443 	int ret = 0, i;
7444 
7445 	if (netif_running(dev))
7446 		stmmac_release(dev);
7447 
7448 	stmmac_napi_del(dev);
7449 
7450 	priv->plat->rx_queues_to_use = rx_cnt;
7451 	priv->plat->tx_queues_to_use = tx_cnt;
7452 	if (!netif_is_rxfh_configured(dev))
7453 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7454 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7455 									rx_cnt);
7456 
7457 	stmmac_napi_add(dev);
7458 
7459 	if (netif_running(dev))
7460 		ret = stmmac_open(dev);
7461 
7462 	return ret;
7463 }
7464 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7465 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7466 {
7467 	struct stmmac_priv *priv = netdev_priv(dev);
7468 	int ret = 0;
7469 
7470 	if (netif_running(dev))
7471 		stmmac_release(dev);
7472 
7473 	priv->dma_conf.dma_rx_size = rx_size;
7474 	priv->dma_conf.dma_tx_size = tx_size;
7475 
7476 	if (netif_running(dev))
7477 		ret = stmmac_open(dev);
7478 
7479 	return ret;
7480 }
7481 
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7482 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7483 {
7484 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7485 	struct dma_desc *desc_contains_ts = ctx->desc;
7486 	struct stmmac_priv *priv = ctx->priv;
7487 	struct dma_desc *ndesc = ctx->ndesc;
7488 	struct dma_desc *desc = ctx->desc;
7489 	u64 ns = 0;
7490 
7491 	if (!priv->hwts_rx_en)
7492 		return -ENODATA;
7493 
7494 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7495 	if (dwmac_is_xmac(priv->plat->core_type))
7496 		desc_contains_ts = ndesc;
7497 
7498 	/* Check if timestamp is available */
7499 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7500 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7501 		ns -= priv->plat->cdc_error_adj;
7502 		*timestamp = ns_to_ktime(ns);
7503 		return 0;
7504 	}
7505 
7506 	return -ENODATA;
7507 }
7508 
7509 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7510 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7511 };
7512 
stmmac_dl_ts_coarse_set(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7513 static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
7514 				   struct devlink_param_gset_ctx *ctx,
7515 				   struct netlink_ext_ack *extack)
7516 {
7517 	struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7518 	struct stmmac_priv *priv = dl_priv->stmmac_priv;
7519 
7520 	priv->tsfupdt_coarse = ctx->val.vbool;
7521 
7522 	if (priv->tsfupdt_coarse)
7523 		priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
7524 	else
7525 		priv->systime_flags |= PTP_TCR_TSCFUPDT;
7526 
7527 	/* In Coarse mode, we can use a smaller subsecond increment, let's
7528 	 * reconfigure the systime, subsecond increment and addend.
7529 	 */
7530 	stmmac_update_subsecond_increment(priv);
7531 
7532 	return 0;
7533 }
7534 
stmmac_dl_ts_coarse_get(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7535 static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
7536 				   struct devlink_param_gset_ctx *ctx,
7537 				   struct netlink_ext_ack *extack)
7538 {
7539 	struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7540 	struct stmmac_priv *priv = dl_priv->stmmac_priv;
7541 
7542 	ctx->val.vbool = priv->tsfupdt_coarse;
7543 
7544 	return 0;
7545 }
7546 
7547 static const struct devlink_param stmmac_devlink_params[] = {
7548 	DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
7549 			     DEVLINK_PARAM_TYPE_BOOL,
7550 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
7551 			     stmmac_dl_ts_coarse_get,
7552 			     stmmac_dl_ts_coarse_set, NULL),
7553 };
7554 
7555 /* None of the generic devlink parameters are implemented */
7556 static const struct devlink_ops stmmac_devlink_ops = {};
7557 
stmmac_register_devlink(struct stmmac_priv * priv)7558 static int stmmac_register_devlink(struct stmmac_priv *priv)
7559 {
7560 	struct stmmac_devlink_priv *dl_priv;
7561 	int ret;
7562 
7563 	/* For now, what is exposed over devlink is only relevant when
7564 	 * timestamping is available and we have a valid ptp clock rate
7565 	 */
7566 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
7567 	    !priv->plat->clk_ptp_rate)
7568 		return 0;
7569 
7570 	priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
7571 				      priv->device);
7572 	if (!priv->devlink)
7573 		return -ENOMEM;
7574 
7575 	dl_priv = devlink_priv(priv->devlink);
7576 	dl_priv->stmmac_priv = priv;
7577 
7578 	ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
7579 				      ARRAY_SIZE(stmmac_devlink_params));
7580 	if (ret)
7581 		goto dl_free;
7582 
7583 	devlink_register(priv->devlink);
7584 	return 0;
7585 
7586 dl_free:
7587 	devlink_free(priv->devlink);
7588 
7589 	return ret;
7590 }
7591 
stmmac_unregister_devlink(struct stmmac_priv * priv)7592 static void stmmac_unregister_devlink(struct stmmac_priv *priv)
7593 {
7594 	if (!priv->devlink)
7595 		return;
7596 
7597 	devlink_unregister(priv->devlink);
7598 	devlink_params_unregister(priv->devlink, stmmac_devlink_params,
7599 				  ARRAY_SIZE(stmmac_devlink_params));
7600 	devlink_free(priv->devlink);
7601 }
7602 
stmmac_plat_dat_alloc(struct device * dev)7603 struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
7604 {
7605 	struct plat_stmmacenet_data *plat_dat;
7606 	int i;
7607 
7608 	plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
7609 	if (!plat_dat)
7610 		return NULL;
7611 
7612 	/* Set the defaults:
7613 	 * - phy autodetection
7614 	 * - determine GMII_Address CR field from CSR clock
7615 	 * - allow MTU up to JUMBO_LEN
7616 	 * - hash table size
7617 	 * - one unicast filter entry
7618 	 */
7619 	plat_dat->phy_addr = -1;
7620 	plat_dat->clk_csr = -1;
7621 	plat_dat->maxmtu = JUMBO_LEN;
7622 	plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
7623 	plat_dat->unicast_filter_entries = 1;
7624 
7625 	/* Set the mtl defaults */
7626 	plat_dat->tx_queues_to_use = 1;
7627 	plat_dat->rx_queues_to_use = 1;
7628 
7629 	/* Setup the default RX queue channel map */
7630 	for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
7631 		plat_dat->rx_queues_cfg[i].chan = i;
7632 
7633 	return plat_dat;
7634 }
7635 EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
7636 
__stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7637 static int __stmmac_dvr_probe(struct device *device,
7638 			      struct plat_stmmacenet_data *plat_dat,
7639 			      struct stmmac_resources *res)
7640 {
7641 	struct net_device *ndev = NULL;
7642 	struct stmmac_priv *priv;
7643 	u32 rxq;
7644 	int i, ret = 0;
7645 
7646 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7647 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7648 	if (!ndev)
7649 		return -ENOMEM;
7650 
7651 	SET_NETDEV_DEV(ndev, device);
7652 
7653 	priv = netdev_priv(ndev);
7654 	priv->device = device;
7655 	priv->dev = ndev;
7656 
7657 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7658 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7659 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7660 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7661 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7662 	}
7663 
7664 	priv->xstats.pcpu_stats =
7665 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7666 	if (!priv->xstats.pcpu_stats)
7667 		return -ENOMEM;
7668 
7669 	stmmac_set_ethtool_ops(ndev);
7670 	priv->pause_time = pause;
7671 	priv->plat = plat_dat;
7672 	priv->ioaddr = res->addr;
7673 	priv->dev->base_addr = (unsigned long)res->addr;
7674 	priv->plat->dma_cfg->multi_msi_en =
7675 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7676 
7677 	priv->dev->irq = res->irq;
7678 	priv->wol_irq = res->wol_irq;
7679 	priv->lpi_irq = res->lpi_irq;
7680 	priv->sfty_irq = res->sfty_irq;
7681 	priv->sfty_ce_irq = res->sfty_ce_irq;
7682 	priv->sfty_ue_irq = res->sfty_ue_irq;
7683 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7684 		priv->rx_irq[i] = res->rx_irq[i];
7685 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7686 		priv->tx_irq[i] = res->tx_irq[i];
7687 
7688 	if (!is_zero_ether_addr(res->mac))
7689 		eth_hw_addr_set(priv->dev, res->mac);
7690 
7691 	dev_set_drvdata(device, priv->dev);
7692 
7693 	/* Verify driver arguments */
7694 	stmmac_verify_args();
7695 
7696 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7697 	if (!priv->af_xdp_zc_qps)
7698 		return -ENOMEM;
7699 
7700 	/* Allocate workqueue */
7701 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7702 	if (!priv->wq) {
7703 		dev_err(priv->device, "failed to create workqueue\n");
7704 		ret = -ENOMEM;
7705 		goto error_wq_init;
7706 	}
7707 
7708 	INIT_WORK(&priv->service_task, stmmac_service_task);
7709 
7710 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7711 
7712 	/* Override with kernel parameters if supplied XXX CRS XXX
7713 	 * this needs to have multiple instances
7714 	 */
7715 	if ((phyaddr >= 0) && (phyaddr <= 31))
7716 		priv->plat->phy_addr = phyaddr;
7717 
7718 	if (priv->plat->stmmac_rst) {
7719 		ret = reset_control_assert(priv->plat->stmmac_rst);
7720 		reset_control_deassert(priv->plat->stmmac_rst);
7721 		/* Some reset controllers have only reset callback instead of
7722 		 * assert + deassert callbacks pair.
7723 		 */
7724 		if (ret == -ENOTSUPP)
7725 			reset_control_reset(priv->plat->stmmac_rst);
7726 	}
7727 
7728 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7729 	if (ret == -ENOTSUPP)
7730 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7731 			ERR_PTR(ret));
7732 
7733 	/* Wait a bit for the reset to take effect */
7734 	udelay(10);
7735 
7736 	/* Init MAC and get the capabilities */
7737 	ret = stmmac_hw_init(priv);
7738 	if (ret)
7739 		goto error_hw_init;
7740 
7741 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7742 	 */
7743 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7744 		priv->plat->dma_cfg->dche = false;
7745 
7746 	stmmac_check_ether_addr(priv);
7747 
7748 	ndev->netdev_ops = &stmmac_netdev_ops;
7749 
7750 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7751 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7752 
7753 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7754 			    NETIF_F_RXCSUM;
7755 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7756 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7757 
7758 	ret = stmmac_tc_init(priv, priv);
7759 	if (!ret) {
7760 		ndev->hw_features |= NETIF_F_HW_TC;
7761 	}
7762 
7763 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7764 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7765 		if (priv->plat->core_type == DWMAC_CORE_GMAC4)
7766 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7767 		priv->tso = true;
7768 		dev_info(priv->device, "TSO feature enabled\n");
7769 	}
7770 
7771 	if (priv->dma_cap.sphen &&
7772 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7773 		ndev->hw_features |= NETIF_F_GRO;
7774 		priv->sph_capable = true;
7775 		priv->sph_active = priv->sph_capable;
7776 		dev_info(priv->device, "SPH feature enabled\n");
7777 	}
7778 
7779 	/* Ideally our host DMA address width is the same as for the
7780 	 * device. However, it may differ and then we have to use our
7781 	 * host DMA width for allocation and the device DMA width for
7782 	 * register handling.
7783 	 */
7784 	if (priv->plat->host_dma_width)
7785 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7786 	else
7787 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7788 
7789 	if (priv->dma_cap.host_dma_width) {
7790 		ret = dma_set_mask_and_coherent(device,
7791 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7792 		if (!ret) {
7793 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7794 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7795 
7796 			/*
7797 			 * If more than 32 bits can be addressed, make sure to
7798 			 * enable enhanced addressing mode.
7799 			 */
7800 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7801 				priv->plat->dma_cfg->eame = true;
7802 		} else {
7803 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7804 			if (ret) {
7805 				dev_err(priv->device, "Failed to set DMA Mask\n");
7806 				goto error_hw_init;
7807 			}
7808 
7809 			priv->dma_cap.host_dma_width = 32;
7810 		}
7811 	}
7812 
7813 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7814 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7815 #ifdef STMMAC_VLAN_TAG_USED
7816 	/* Both mac100 and gmac support receive VLAN tag detection */
7817 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7818 	if (dwmac_is_xmac(priv->plat->core_type)) {
7819 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7820 		priv->hw->hw_vlan_en = true;
7821 	}
7822 	if (priv->dma_cap.vlhash) {
7823 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7824 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7825 	}
7826 	if (priv->dma_cap.vlins)
7827 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7828 #endif
7829 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7830 
7831 	priv->xstats.threshold = tc;
7832 
7833 	/* Initialize RSS */
7834 	rxq = priv->plat->rx_queues_to_use;
7835 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7836 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7837 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7838 
7839 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7840 		ndev->features |= NETIF_F_RXHASH;
7841 
7842 	ndev->vlan_features |= ndev->features;
7843 
7844 	/* MTU range: 46 - hw-specific max */
7845 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7846 
7847 	if (priv->plat->core_type == DWMAC_CORE_XGMAC)
7848 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7849 	else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
7850 		ndev->max_mtu = JUMBO_LEN;
7851 	else
7852 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7853 
7854 	/* Warn if the platform's maxmtu is smaller than the minimum MTU,
7855 	 * otherwise clamp the maximum MTU above to the platform's maxmtu.
7856 	 */
7857 	if (priv->plat->maxmtu < ndev->min_mtu)
7858 		dev_warn(priv->device,
7859 			 "%s: warning: maxmtu having invalid value (%d)\n",
7860 			 __func__, priv->plat->maxmtu);
7861 	else if (priv->plat->maxmtu < ndev->max_mtu)
7862 		ndev->max_mtu = priv->plat->maxmtu;
7863 
7864 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7865 
7866 	/* Setup channels NAPI */
7867 	stmmac_napi_add(ndev);
7868 
7869 	mutex_init(&priv->lock);
7870 
7871 	stmmac_fpe_init(priv);
7872 
7873 	stmmac_check_pcs_mode(priv);
7874 
7875 	pm_runtime_get_noresume(device);
7876 	pm_runtime_set_active(device);
7877 	if (!pm_runtime_enabled(device))
7878 		pm_runtime_enable(device);
7879 
7880 	ret = stmmac_mdio_register(ndev);
7881 	if (ret < 0) {
7882 		dev_err_probe(priv->device, ret,
7883 			      "MDIO bus (id: %d) registration failed\n",
7884 			      priv->plat->bus_id);
7885 		goto error_mdio_register;
7886 	}
7887 
7888 	ret = stmmac_pcs_setup(ndev);
7889 	if (ret)
7890 		goto error_pcs_setup;
7891 
7892 	ret = stmmac_phylink_setup(priv);
7893 	if (ret) {
7894 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7895 		goto error_phy_setup;
7896 	}
7897 
7898 	ret = stmmac_register_devlink(priv);
7899 	if (ret)
7900 		goto error_devlink_setup;
7901 
7902 	ret = register_netdev(ndev);
7903 	if (ret) {
7904 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7905 			__func__, ret);
7906 		goto error_netdev_register;
7907 	}
7908 
7909 #ifdef CONFIG_DEBUG_FS
7910 	stmmac_init_fs(ndev);
7911 #endif
7912 
7913 	if (priv->plat->dump_debug_regs)
7914 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7915 
7916 	/* Let pm_runtime_put() disable the clocks.
7917 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7918 	 */
7919 	pm_runtime_put(device);
7920 
7921 	return ret;
7922 
7923 error_netdev_register:
7924 	stmmac_unregister_devlink(priv);
7925 error_devlink_setup:
7926 	phylink_destroy(priv->phylink);
7927 error_phy_setup:
7928 	stmmac_pcs_clean(ndev);
7929 error_pcs_setup:
7930 	stmmac_mdio_unregister(ndev);
7931 error_mdio_register:
7932 	stmmac_napi_del(ndev);
7933 error_hw_init:
7934 	destroy_workqueue(priv->wq);
7935 error_wq_init:
7936 	bitmap_free(priv->af_xdp_zc_qps);
7937 
7938 	return ret;
7939 }
7940 
7941 /**
7942  * stmmac_dvr_probe
7943  * @dev: device pointer
7944  * @plat_dat: platform data pointer
7945  * @res: stmmac resource pointer
7946  * Description: this is the main probe function used to
7947  * call the alloc_etherdev, allocate the priv structure.
7948  * Return:
7949  * returns 0 on success, otherwise errno.
7950  */
stmmac_dvr_probe(struct device * dev,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7951 int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
7952 		     struct stmmac_resources *res)
7953 {
7954 	int ret;
7955 
7956 	if (plat_dat->init) {
7957 		ret = plat_dat->init(dev, plat_dat->bsp_priv);
7958 		if (ret)
7959 			return ret;
7960 	}
7961 
7962 	ret = __stmmac_dvr_probe(dev, plat_dat, res);
7963 	if (ret && plat_dat->exit)
7964 		plat_dat->exit(dev, plat_dat->bsp_priv);
7965 
7966 	return ret;
7967 }
7968 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7969 
7970 /**
7971  * stmmac_dvr_remove
7972  * @dev: device pointer
7973  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7974  * changes the link status, releases the DMA descriptor rings.
7975  */
stmmac_dvr_remove(struct device * dev)7976 void stmmac_dvr_remove(struct device *dev)
7977 {
7978 	struct net_device *ndev = dev_get_drvdata(dev);
7979 	struct stmmac_priv *priv = netdev_priv(ndev);
7980 
7981 	netdev_info(priv->dev, "%s: removing driver", __func__);
7982 
7983 	pm_runtime_get_sync(dev);
7984 
7985 	unregister_netdev(ndev);
7986 
7987 #ifdef CONFIG_DEBUG_FS
7988 	stmmac_exit_fs(ndev);
7989 #endif
7990 	stmmac_unregister_devlink(priv);
7991 
7992 	phylink_destroy(priv->phylink);
7993 	if (priv->plat->stmmac_rst)
7994 		reset_control_assert(priv->plat->stmmac_rst);
7995 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7996 
7997 	stmmac_pcs_clean(ndev);
7998 	stmmac_mdio_unregister(ndev);
7999 
8000 	destroy_workqueue(priv->wq);
8001 	mutex_destroy(&priv->lock);
8002 	bitmap_free(priv->af_xdp_zc_qps);
8003 
8004 	pm_runtime_disable(dev);
8005 	pm_runtime_put_noidle(dev);
8006 
8007 	if (priv->plat->exit)
8008 		priv->plat->exit(dev, priv->plat->bsp_priv);
8009 }
8010 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
8011 
8012 /**
8013  * stmmac_suspend - suspend callback
8014  * @dev: device pointer
8015  * Description: this is the function to suspend the device and it is called
8016  * by the platform driver to stop the network queue, release the resources,
8017  * program the PMT register (for WoL), clean and release driver resources.
8018  */
stmmac_suspend(struct device * dev)8019 int stmmac_suspend(struct device *dev)
8020 {
8021 	struct net_device *ndev = dev_get_drvdata(dev);
8022 	struct stmmac_priv *priv = netdev_priv(ndev);
8023 	u32 chan;
8024 
8025 	if (!ndev || !netif_running(ndev))
8026 		return 0;
8027 
8028 	mutex_lock(&priv->lock);
8029 
8030 	netif_device_detach(ndev);
8031 
8032 	stmmac_disable_all_queues(priv);
8033 
8034 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
8035 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
8036 
8037 	if (priv->eee_sw_timer_en) {
8038 		priv->tx_path_in_lpi_mode = false;
8039 		timer_delete_sync(&priv->eee_ctrl_timer);
8040 	}
8041 
8042 	/* Stop TX/RX DMA */
8043 	stmmac_stop_all_dma(priv);
8044 
8045 	if (priv->plat->serdes_powerdown)
8046 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
8047 
8048 	/* Enable Power down mode by programming the PMT regs */
8049 	if (priv->wolopts) {
8050 		stmmac_pmt(priv, priv->hw, priv->wolopts);
8051 		priv->irq_wake = 1;
8052 	} else {
8053 		stmmac_mac_set(priv, priv->ioaddr, false);
8054 		pinctrl_pm_select_sleep_state(priv->device);
8055 	}
8056 
8057 	mutex_unlock(&priv->lock);
8058 
8059 	rtnl_lock();
8060 	phylink_suspend(priv->phylink, !!priv->wolopts);
8061 	rtnl_unlock();
8062 
8063 	if (stmmac_fpe_supported(priv))
8064 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
8065 
8066 	if (priv->plat->suspend)
8067 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
8068 
8069 	return 0;
8070 }
8071 EXPORT_SYMBOL_GPL(stmmac_suspend);
8072 
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)8073 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
8074 {
8075 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
8076 
8077 	rx_q->cur_rx = 0;
8078 	rx_q->dirty_rx = 0;
8079 }
8080 
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)8081 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
8082 {
8083 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
8084 
8085 	tx_q->cur_tx = 0;
8086 	tx_q->dirty_tx = 0;
8087 	tx_q->mss = 0;
8088 
8089 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
8090 }
8091 
8092 /**
8093  * stmmac_reset_queues_param - reset queue parameters
8094  * @priv: device pointer
8095  */
stmmac_reset_queues_param(struct stmmac_priv * priv)8096 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
8097 {
8098 	u32 rx_cnt = priv->plat->rx_queues_to_use;
8099 	u32 tx_cnt = priv->plat->tx_queues_to_use;
8100 	u32 queue;
8101 
8102 	for (queue = 0; queue < rx_cnt; queue++)
8103 		stmmac_reset_rx_queue(priv, queue);
8104 
8105 	for (queue = 0; queue < tx_cnt; queue++)
8106 		stmmac_reset_tx_queue(priv, queue);
8107 }
8108 
8109 /**
8110  * stmmac_resume - resume callback
8111  * @dev: device pointer
8112  * Description: when resume this function is invoked to setup the DMA and CORE
8113  * in a usable state.
8114  */
stmmac_resume(struct device * dev)8115 int stmmac_resume(struct device *dev)
8116 {
8117 	struct net_device *ndev = dev_get_drvdata(dev);
8118 	struct stmmac_priv *priv = netdev_priv(ndev);
8119 	int ret;
8120 
8121 	if (priv->plat->resume) {
8122 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
8123 		if (ret)
8124 			return ret;
8125 	}
8126 
8127 	if (!netif_running(ndev))
8128 		return 0;
8129 
8130 	/* Power Down bit, into the PM register, is cleared
8131 	 * automatically as soon as a magic packet or a Wake-up frame
8132 	 * is received. Anyway, it's better to manually clear
8133 	 * this bit because it can generate problems while resuming
8134 	 * from another devices (e.g. serial console).
8135 	 */
8136 	if (priv->wolopts) {
8137 		mutex_lock(&priv->lock);
8138 		stmmac_pmt(priv, priv->hw, 0);
8139 		mutex_unlock(&priv->lock);
8140 		priv->irq_wake = 0;
8141 	} else {
8142 		pinctrl_pm_select_default_state(priv->device);
8143 		/* reset the phy so that it's ready */
8144 		if (priv->mii)
8145 			stmmac_mdio_reset(priv->mii);
8146 	}
8147 
8148 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
8149 	    priv->plat->serdes_powerup) {
8150 		ret = priv->plat->serdes_powerup(ndev,
8151 						 priv->plat->bsp_priv);
8152 
8153 		if (ret < 0)
8154 			return ret;
8155 	}
8156 
8157 	rtnl_lock();
8158 
8159 	/* Prepare the PHY to resume, ensuring that its clocks which are
8160 	 * necessary for the MAC DMA reset to complete are running
8161 	 */
8162 	phylink_prepare_resume(priv->phylink);
8163 
8164 	mutex_lock(&priv->lock);
8165 
8166 	stmmac_reset_queues_param(priv);
8167 
8168 	stmmac_free_tx_skbufs(priv);
8169 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8170 
8171 	ret = stmmac_hw_setup(ndev);
8172 	if (ret < 0) {
8173 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
8174 		mutex_unlock(&priv->lock);
8175 		rtnl_unlock();
8176 		return ret;
8177 	}
8178 
8179 	stmmac_init_timestamping(priv);
8180 
8181 	stmmac_init_coalesce(priv);
8182 	phylink_rx_clk_stop_block(priv->phylink);
8183 	stmmac_set_rx_mode(ndev);
8184 
8185 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8186 	phylink_rx_clk_stop_unblock(priv->phylink);
8187 
8188 	stmmac_enable_all_queues(priv);
8189 	stmmac_enable_all_dma_irq(priv);
8190 
8191 	mutex_unlock(&priv->lock);
8192 
8193 	/* phylink_resume() must be called after the hardware has been
8194 	 * initialised because it may bring the link up immediately in a
8195 	 * workqueue thread, which will race with initialisation.
8196 	 */
8197 	phylink_resume(priv->phylink);
8198 	rtnl_unlock();
8199 
8200 	netif_device_attach(ndev);
8201 
8202 	return 0;
8203 }
8204 EXPORT_SYMBOL_GPL(stmmac_resume);
8205 
8206 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
8207 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
8208 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
8209 
8210 #ifndef MODULE
stmmac_cmdline_opt(char * str)8211 static int __init stmmac_cmdline_opt(char *str)
8212 {
8213 	char *opt;
8214 
8215 	if (!str || !*str)
8216 		return 1;
8217 	while ((opt = strsep(&str, ",")) != NULL) {
8218 		if (!strncmp(opt, "debug:", 6)) {
8219 			if (kstrtoint(opt + 6, 0, &debug))
8220 				goto err;
8221 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8222 			if (kstrtoint(opt + 8, 0, &phyaddr))
8223 				goto err;
8224 		} else if (!strncmp(opt, "tc:", 3)) {
8225 			if (kstrtoint(opt + 3, 0, &tc))
8226 				goto err;
8227 		} else if (!strncmp(opt, "watchdog:", 9)) {
8228 			if (kstrtoint(opt + 9, 0, &watchdog))
8229 				goto err;
8230 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8231 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8232 				goto err;
8233 		} else if (!strncmp(opt, "pause:", 6)) {
8234 			if (kstrtoint(opt + 6, 0, &pause))
8235 				goto err;
8236 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8237 			if (kstrtoint(opt + 10, 0, &eee_timer))
8238 				goto err;
8239 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8240 			if (kstrtoint(opt + 11, 0, &chain_mode))
8241 				goto err;
8242 		}
8243 	}
8244 	return 1;
8245 
8246 err:
8247 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8248 	return 1;
8249 }
8250 
8251 __setup("stmmaceth=", stmmac_cmdline_opt);
8252 #endif /* MODULE */
8253 
stmmac_init(void)8254 static int __init stmmac_init(void)
8255 {
8256 #ifdef CONFIG_DEBUG_FS
8257 	/* Create debugfs main directory if it doesn't exist yet */
8258 	if (!stmmac_fs_dir)
8259 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8260 	register_netdevice_notifier(&stmmac_notifier);
8261 #endif
8262 
8263 	return 0;
8264 }
8265 
stmmac_exit(void)8266 static void __exit stmmac_exit(void)
8267 {
8268 #ifdef CONFIG_DEBUG_FS
8269 	unregister_netdevice_notifier(&stmmac_notifier);
8270 	debugfs_remove_recursive(stmmac_fs_dir);
8271 #endif
8272 }
8273 
8274 module_init(stmmac_init)
8275 module_exit(stmmac_exit)
8276 
8277 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8278 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8279 MODULE_LICENSE("GPL");
8280