xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 0a80e38d0fe1fe7b59c1e93ad908c4148a15926a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/devlink.h>
44 #include <net/page_pool/helpers.h>
45 #include <net/pkt_cls.h>
46 #include <net/xdp_sock_drv.h>
47 #include "stmmac_ptp.h"
48 #include "stmmac_fpe.h"
49 #include "stmmac.h"
50 #include "stmmac_pcs.h"
51 #include "stmmac_xdp.h"
52 #include <linux/reset.h>
53 #include <linux/of_mdio.h>
54 #include "dwmac1000.h"
55 #include "dwxgmac2.h"
56 #include "hwif.h"
57 
58 /* As long as the interface is active, we keep the timestamping counter enabled
59  * with fine resolution and binary rollover. This avoid non-monotonic behavior
60  * (clock jumps) when changing timestamping settings at runtime.
61  */
62 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
63 
64 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
65 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
66 
67 /* Module parameters */
68 #define TX_TIMEO	5000
69 static int watchdog = TX_TIMEO;
70 module_param(watchdog, int, 0644);
71 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
72 
73 static int debug = -1;
74 module_param(debug, int, 0644);
75 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
76 
77 static int phyaddr = -1;
78 module_param(phyaddr, int, 0444);
79 MODULE_PARM_DESC(phyaddr, "Physical device address");
80 
81 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
82 
83 /* Limit to make sure XDP TX and slow path can coexist */
84 #define STMMAC_XSK_TX_BUDGET_MAX	256
85 #define STMMAC_TX_XSK_AVAIL		16
86 #define STMMAC_RX_FILL_BATCH		16
87 
88 #define STMMAC_XDP_PASS		0
89 #define STMMAC_XDP_CONSUMED	BIT(0)
90 #define STMMAC_XDP_TX		BIT(1)
91 #define STMMAC_XDP_REDIRECT	BIT(2)
92 #define STMMAC_XSK_CONSUMED	BIT(3)
93 
94 static int flow_ctrl = 0xdead;
95 module_param(flow_ctrl, int, 0644);
96 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
97 
98 static int pause = PAUSE_TIME;
99 module_param(pause, int, 0644);
100 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
101 
102 #define TC_DEFAULT 64
103 static int tc = TC_DEFAULT;
104 module_param(tc, int, 0644);
105 MODULE_PARM_DESC(tc, "DMA threshold control value");
106 
107 /* This is unused */
108 #define	DEFAULT_BUFSIZE	1536
109 static int buf_sz = DEFAULT_BUFSIZE;
110 module_param(buf_sz, int, 0644);
111 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
112 
113 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
114 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
115 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
116 
117 #define STMMAC_DEFAULT_LPI_TIMER	1000
118 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
119 module_param(eee_timer, uint, 0644);
120 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
121 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
122 
123 /* By default the driver will use the ring mode to manage tx and rx descriptors,
124  * but allow user to force to use the chain instead of the ring
125  */
126 static unsigned int chain_mode;
127 module_param(chain_mode, int, 0444);
128 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
129 
130 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
131 /* For MSI interrupts handling */
132 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
133 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
134 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
135 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
136 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
139 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
140 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
141 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
142 					  u32 rxmode, u32 chan);
143 
144 #ifdef CONFIG_DEBUG_FS
145 static const struct net_device_ops stmmac_netdev_ops;
146 static void stmmac_init_fs(struct net_device *dev);
147 static void stmmac_exit_fs(struct net_device *dev);
148 #endif
149 
150 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
151 
152 struct stmmac_devlink_priv {
153 	struct stmmac_priv *stmmac_priv;
154 };
155 
156 enum stmmac_dl_param_id {
157 	STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
158 	STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
159 };
160 
161 /**
162  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
163  * @bsp_priv: BSP private data structure (unused)
164  * @clk_tx_i: the transmit clock
165  * @interface: the selected interface mode
166  * @speed: the speed that the MAC will be operating at
167  *
168  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
169  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
170  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
171  * the plat_data->set_clk_tx_rate method directly, call it via their own
172  * implementation, or implement their own method should they have more
173  * complex requirements. It is intended to only be used in this method.
174  *
175  * plat_data->clk_tx_i must be filled in.
176  */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)177 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
178 			   phy_interface_t interface, int speed)
179 {
180 	long rate = rgmii_clock(speed);
181 
182 	/* Silently ignore unsupported speeds as rgmii_clock() only
183 	 * supports 10, 100 and 1000Mbps. We do not want to spit
184 	 * errors for 2500 and higher speeds here.
185 	 */
186 	if (rate < 0)
187 		return 0;
188 
189 	return clk_set_rate(clk_tx_i, rate);
190 }
191 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
192 
193 /**
194  * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
195  * @regval: pointer to a u32 for the resulting register value
196  * @blen: pointer to an array of u32 containing the burst length values in bytes
197  * @len: the number of entries in the @blen array
198  */
stmmac_axi_blen_to_mask(u32 * regval,const u32 * blen,size_t len)199 void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
200 {
201 	size_t i;
202 	u32 val;
203 
204 	for (val = i = 0; i < len; i++) {
205 		u32 burst = blen[i];
206 
207 		/* Burst values of zero must be skipped. */
208 		if (!burst)
209 			continue;
210 
211 		/* The valid range for the burst length is 4 to 256 inclusive,
212 		 * and it must be a power of two.
213 		 */
214 		if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
215 			pr_err("stmmac: invalid burst length %u at index %zu\n",
216 			       burst, i);
217 			continue;
218 		}
219 
220 		/* Since burst is a power of two, and the register field starts
221 		 * with burst = 4, shift right by two bits so bit 0 of the field
222 		 * corresponds with the minimum value.
223 		 */
224 		val |= burst >> 2;
225 	}
226 
227 	*regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
228 }
229 EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
230 
231 /**
232  * stmmac_verify_args - verify the driver parameters.
233  * Description: it checks the driver parameters and set a default in case of
234  * errors.
235  */
stmmac_verify_args(void)236 static void stmmac_verify_args(void)
237 {
238 	if (unlikely(watchdog < 0))
239 		watchdog = TX_TIMEO;
240 	if (unlikely((pause < 0) || (pause > 0xffff)))
241 		pause = PAUSE_TIME;
242 
243 	if (flow_ctrl != 0xdead)
244 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
245 }
246 
__stmmac_disable_all_queues(struct stmmac_priv * priv)247 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
248 {
249 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
250 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
251 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
252 	u32 queue;
253 
254 	for (queue = 0; queue < maxq; queue++) {
255 		struct stmmac_channel *ch = &priv->channel[queue];
256 
257 		if (stmmac_xdp_is_enabled(priv) &&
258 		    test_bit(queue, priv->af_xdp_zc_qps)) {
259 			napi_disable(&ch->rxtx_napi);
260 			continue;
261 		}
262 
263 		if (queue < rx_queues_cnt)
264 			napi_disable(&ch->rx_napi);
265 		if (queue < tx_queues_cnt)
266 			napi_disable(&ch->tx_napi);
267 	}
268 }
269 
270 /**
271  * stmmac_disable_all_queues - Disable all queues
272  * @priv: driver private structure
273  */
stmmac_disable_all_queues(struct stmmac_priv * priv)274 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
275 {
276 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
277 	struct stmmac_rx_queue *rx_q;
278 	u32 queue;
279 
280 	/* synchronize_rcu() needed for pending XDP buffers to drain */
281 	for (queue = 0; queue < rx_queues_cnt; queue++) {
282 		rx_q = &priv->dma_conf.rx_queue[queue];
283 		if (rx_q->xsk_pool) {
284 			synchronize_rcu();
285 			break;
286 		}
287 	}
288 
289 	__stmmac_disable_all_queues(priv);
290 }
291 
292 /**
293  * stmmac_enable_all_queues - Enable all queues
294  * @priv: driver private structure
295  */
stmmac_enable_all_queues(struct stmmac_priv * priv)296 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
297 {
298 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
299 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
300 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
301 	u32 queue;
302 
303 	for (queue = 0; queue < maxq; queue++) {
304 		struct stmmac_channel *ch = &priv->channel[queue];
305 
306 		if (stmmac_xdp_is_enabled(priv) &&
307 		    test_bit(queue, priv->af_xdp_zc_qps)) {
308 			napi_enable(&ch->rxtx_napi);
309 			continue;
310 		}
311 
312 		if (queue < rx_queues_cnt)
313 			napi_enable(&ch->rx_napi);
314 		if (queue < tx_queues_cnt)
315 			napi_enable(&ch->tx_napi);
316 	}
317 }
318 
stmmac_service_event_schedule(struct stmmac_priv * priv)319 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
320 {
321 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
322 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
323 		queue_work(priv->wq, &priv->service_task);
324 }
325 
stmmac_global_err(struct stmmac_priv * priv)326 static void stmmac_global_err(struct stmmac_priv *priv)
327 {
328 	netif_carrier_off(priv->dev);
329 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
330 	stmmac_service_event_schedule(priv);
331 }
332 
print_pkt(unsigned char * buf,int len)333 static void print_pkt(unsigned char *buf, int len)
334 {
335 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
336 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
337 }
338 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)339 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
340 {
341 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
342 	u32 avail;
343 
344 	if (tx_q->dirty_tx > tx_q->cur_tx)
345 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
346 	else
347 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
348 
349 	return avail;
350 }
351 
352 /**
353  * stmmac_rx_dirty - Get RX queue dirty
354  * @priv: driver private structure
355  * @queue: RX queue index
356  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)357 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
358 {
359 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
360 	u32 dirty;
361 
362 	if (rx_q->dirty_rx <= rx_q->cur_rx)
363 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
364 	else
365 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
366 
367 	return dirty;
368 }
369 
stmmac_eee_tx_busy(struct stmmac_priv * priv)370 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
371 {
372 	u32 tx_cnt = priv->plat->tx_queues_to_use;
373 	u32 queue;
374 
375 	/* check if all TX queues have the work finished */
376 	for (queue = 0; queue < tx_cnt; queue++) {
377 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
378 
379 		if (tx_q->dirty_tx != tx_q->cur_tx)
380 			return true; /* still unfinished work */
381 	}
382 
383 	return false;
384 }
385 
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)386 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
387 {
388 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
389 }
390 
391 /**
392  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
393  * @priv: driver private structure
394  * Description: this function is to verify and enter in LPI mode in case of
395  * EEE.
396  */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)397 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
398 {
399 	if (stmmac_eee_tx_busy(priv)) {
400 		stmmac_restart_sw_lpi_timer(priv);
401 		return;
402 	}
403 
404 	/* Check and enter in LPI mode */
405 	if (!priv->tx_path_in_lpi_mode)
406 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
407 				    priv->tx_lpi_clk_stop, 0);
408 }
409 
410 /**
411  * stmmac_stop_sw_lpi - stop transmitting LPI
412  * @priv: driver private structure
413  * Description: When using software-controlled LPI, stop transmitting LPI state.
414  */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)415 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
416 {
417 	timer_delete_sync(&priv->eee_ctrl_timer);
418 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
419 	priv->tx_path_in_lpi_mode = false;
420 }
421 
422 /**
423  * stmmac_eee_ctrl_timer - EEE TX SW timer.
424  * @t:  timer_list struct containing private info
425  * Description:
426  *  if there is no data transfer and if we are not in LPI state,
427  *  then MAC Transmitter can be moved to LPI state.
428  */
stmmac_eee_ctrl_timer(struct timer_list * t)429 static void stmmac_eee_ctrl_timer(struct timer_list *t)
430 {
431 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
432 
433 	stmmac_try_to_start_sw_lpi(priv);
434 }
435 
436 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
437  * @priv: driver private structure
438  * @p : descriptor pointer
439  * @skb : the socket buffer
440  * Description :
441  * This function will read timestamp from the descriptor & pass it to stack.
442  * and also perform some sanity checks.
443  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)444 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
445 				   struct dma_desc *p, struct sk_buff *skb)
446 {
447 	struct skb_shared_hwtstamps shhwtstamp;
448 	bool found = false;
449 	u64 ns = 0;
450 
451 	if (!priv->hwts_tx_en)
452 		return;
453 
454 	/* exit if skb doesn't support hw tstamp */
455 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
456 		return;
457 
458 	/* check tx tstamp status */
459 	if (stmmac_get_tx_timestamp_status(priv, p)) {
460 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
461 		found = true;
462 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
463 		found = true;
464 	}
465 
466 	if (found) {
467 		ns -= priv->plat->cdc_error_adj;
468 
469 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
470 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
471 
472 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
473 		/* pass tstamp to stack */
474 		skb_tstamp_tx(skb, &shhwtstamp);
475 	}
476 }
477 
478 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
479  * @priv: driver private structure
480  * @p : descriptor pointer
481  * @np : next descriptor pointer
482  * @skb : the socket buffer
483  * Description :
484  * This function will read received packet's timestamp from the descriptor
485  * and pass it to stack. It also perform some sanity checks.
486  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)487 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
488 				   struct dma_desc *np, struct sk_buff *skb)
489 {
490 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
491 	struct dma_desc *desc = p;
492 	u64 ns = 0;
493 
494 	if (!priv->hwts_rx_en)
495 		return;
496 	/* For GMAC4, the valid timestamp is from CTX next desc. */
497 	if (dwmac_is_xmac(priv->plat->core_type))
498 		desc = np;
499 
500 	/* Check if timestamp is available */
501 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
502 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
503 
504 		ns -= priv->plat->cdc_error_adj;
505 
506 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
507 		shhwtstamp = skb_hwtstamps(skb);
508 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
509 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
510 	} else  {
511 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
512 	}
513 }
514 
stmmac_update_subsecond_increment(struct stmmac_priv * priv)515 static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
516 {
517 	bool xmac = dwmac_is_xmac(priv->plat->core_type);
518 	u32 sec_inc = 0;
519 	u64 temp = 0;
520 
521 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
522 
523 	/* program Sub Second Increment reg */
524 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
525 					   priv->plat->clk_ptp_rate,
526 					   xmac, &sec_inc);
527 	temp = div_u64(1000000000ULL, sec_inc);
528 
529 	/* Store sub second increment for later use */
530 	priv->sub_second_inc = sec_inc;
531 
532 	/* calculate default added value:
533 	 * formula is :
534 	 * addend = (2^32)/freq_div_ratio;
535 	 * where, freq_div_ratio = 1e9ns/sec_inc
536 	 */
537 	temp = (u64)(temp << 32);
538 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
539 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
540 }
541 
542 /**
543  *  stmmac_hwtstamp_set - control hardware timestamping.
544  *  @dev: device pointer.
545  *  @config: the timestamping configuration.
546  *  @extack: netlink extended ack structure for error reporting.
547  *  Description:
548  *  This function configures the MAC to enable/disable both outgoing(TX)
549  *  and incoming(RX) packets time stamping based on user input.
550  *  Return Value:
551  *  0 on success and an appropriate -ve integer on failure.
552  */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)553 static int stmmac_hwtstamp_set(struct net_device *dev,
554 			       struct kernel_hwtstamp_config *config,
555 			       struct netlink_ext_ack *extack)
556 {
557 	struct stmmac_priv *priv = netdev_priv(dev);
558 	u32 ptp_v2 = 0;
559 	u32 tstamp_all = 0;
560 	u32 ptp_over_ipv4_udp = 0;
561 	u32 ptp_over_ipv6_udp = 0;
562 	u32 ptp_over_ethernet = 0;
563 	u32 snap_type_sel = 0;
564 	u32 ts_master_en = 0;
565 	u32 ts_event_en = 0;
566 
567 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
568 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
569 		priv->hwts_tx_en = 0;
570 		priv->hwts_rx_en = 0;
571 
572 		return -EOPNOTSUPP;
573 	}
574 
575 	if (!netif_running(dev)) {
576 		NL_SET_ERR_MSG_MOD(extack,
577 				   "Cannot change timestamping configuration while down");
578 		return -ENODEV;
579 	}
580 
581 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
582 		   __func__, config->flags, config->tx_type, config->rx_filter);
583 
584 	if (config->tx_type != HWTSTAMP_TX_OFF &&
585 	    config->tx_type != HWTSTAMP_TX_ON)
586 		return -ERANGE;
587 
588 	if (priv->adv_ts) {
589 		switch (config->rx_filter) {
590 		case HWTSTAMP_FILTER_NONE:
591 			/* time stamp no incoming packet at all */
592 			config->rx_filter = HWTSTAMP_FILTER_NONE;
593 			break;
594 
595 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
596 			/* PTP v1, UDP, any kind of event packet */
597 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
598 			/* 'xmac' hardware can support Sync, Pdelay_Req and
599 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
600 			 * This leaves Delay_Req timestamps out.
601 			 * Enable all events *and* general purpose message
602 			 * timestamping
603 			 */
604 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
605 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
606 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
607 			break;
608 
609 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
610 			/* PTP v1, UDP, Sync packet */
611 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
612 			/* take time stamp for SYNC messages only */
613 			ts_event_en = PTP_TCR_TSEVNTENA;
614 
615 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
616 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
617 			break;
618 
619 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
620 			/* PTP v1, UDP, Delay_req packet */
621 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
622 			/* take time stamp for Delay_Req messages only */
623 			ts_master_en = PTP_TCR_TSMSTRENA;
624 			ts_event_en = PTP_TCR_TSEVNTENA;
625 
626 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
627 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
628 			break;
629 
630 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
631 			/* PTP v2, UDP, any kind of event packet */
632 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
633 			ptp_v2 = PTP_TCR_TSVER2ENA;
634 			/* take time stamp for all event messages */
635 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
636 
637 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
638 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
639 			break;
640 
641 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
642 			/* PTP v2, UDP, Sync packet */
643 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
644 			ptp_v2 = PTP_TCR_TSVER2ENA;
645 			/* take time stamp for SYNC messages only */
646 			ts_event_en = PTP_TCR_TSEVNTENA;
647 
648 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
649 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
650 			break;
651 
652 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
653 			/* PTP v2, UDP, Delay_req packet */
654 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
655 			ptp_v2 = PTP_TCR_TSVER2ENA;
656 			/* take time stamp for Delay_Req messages only */
657 			ts_master_en = PTP_TCR_TSMSTRENA;
658 			ts_event_en = PTP_TCR_TSEVNTENA;
659 
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
665 			/* PTP v2/802.AS1 any layer, any kind of event packet */
666 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
667 			ptp_v2 = PTP_TCR_TSVER2ENA;
668 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
669 			if (priv->synopsys_id < DWMAC_CORE_4_10)
670 				ts_event_en = PTP_TCR_TSEVNTENA;
671 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
672 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
673 			ptp_over_ethernet = PTP_TCR_TSIPENA;
674 			break;
675 
676 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
677 			/* PTP v2/802.AS1, any layer, Sync packet */
678 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
679 			ptp_v2 = PTP_TCR_TSVER2ENA;
680 			/* take time stamp for SYNC messages only */
681 			ts_event_en = PTP_TCR_TSEVNTENA;
682 
683 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685 			ptp_over_ethernet = PTP_TCR_TSIPENA;
686 			break;
687 
688 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
689 			/* PTP v2/802.AS1, any layer, Delay_req packet */
690 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
691 			ptp_v2 = PTP_TCR_TSVER2ENA;
692 			/* take time stamp for Delay_Req messages only */
693 			ts_master_en = PTP_TCR_TSMSTRENA;
694 			ts_event_en = PTP_TCR_TSEVNTENA;
695 
696 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
697 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
698 			ptp_over_ethernet = PTP_TCR_TSIPENA;
699 			break;
700 
701 		case HWTSTAMP_FILTER_NTP_ALL:
702 		case HWTSTAMP_FILTER_ALL:
703 			/* time stamp any incoming packet */
704 			config->rx_filter = HWTSTAMP_FILTER_ALL;
705 			tstamp_all = PTP_TCR_TSENALL;
706 			break;
707 
708 		default:
709 			return -ERANGE;
710 		}
711 	} else {
712 		switch (config->rx_filter) {
713 		case HWTSTAMP_FILTER_NONE:
714 			config->rx_filter = HWTSTAMP_FILTER_NONE;
715 			break;
716 		default:
717 			/* PTP v1, UDP, any kind of event packet */
718 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
719 			break;
720 		}
721 	}
722 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
723 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
724 
725 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
726 	if (!priv->tsfupdt_coarse)
727 		priv->systime_flags |= PTP_TCR_TSCFUPDT;
728 
729 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
730 		priv->systime_flags |= tstamp_all | ptp_v2 |
731 				       ptp_over_ethernet | ptp_over_ipv6_udp |
732 				       ptp_over_ipv4_udp | ts_event_en |
733 				       ts_master_en | snap_type_sel;
734 	}
735 
736 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
737 
738 	priv->tstamp_config = *config;
739 
740 	return 0;
741 }
742 
743 /**
744  *  stmmac_hwtstamp_get - read hardware timestamping.
745  *  @dev: device pointer.
746  *  @config: the timestamping configuration.
747  *  Description:
748  *  This function obtain the current hardware timestamping settings
749  *  as requested.
750  */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)751 static int stmmac_hwtstamp_get(struct net_device *dev,
752 			       struct kernel_hwtstamp_config *config)
753 {
754 	struct stmmac_priv *priv = netdev_priv(dev);
755 
756 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
757 		return -EOPNOTSUPP;
758 
759 	*config = priv->tstamp_config;
760 
761 	return 0;
762 }
763 
764 /**
765  * stmmac_init_tstamp_counter - init hardware timestamping counter
766  * @priv: driver private structure
767  * @systime_flags: timestamping flags
768  * Description:
769  * Initialize hardware counter for packet timestamping.
770  * This is valid as long as the interface is open and not suspended.
771  * Will be rerun after resuming from suspend, case in which the timestamping
772  * flags updated by stmmac_hwtstamp_set() also need to be restored.
773  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)774 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
775 				      u32 systime_flags)
776 {
777 	struct timespec64 now;
778 
779 	if (!priv->plat->clk_ptp_rate) {
780 		netdev_err(priv->dev, "Invalid PTP clock rate");
781 		return -EINVAL;
782 	}
783 
784 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
785 	priv->systime_flags = systime_flags;
786 
787 	stmmac_update_subsecond_increment(priv);
788 
789 	/* initialize system time */
790 	ktime_get_real_ts64(&now);
791 
792 	/* lower 32 bits of tv_sec are safe until y2106 */
793 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
794 
795 	return 0;
796 }
797 
798 /**
799  * stmmac_init_timestamping - initialise timestamping
800  * @priv: driver private structure
801  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
802  * This is done by looking at the HW cap. register.
803  * This function also registers the ptp driver.
804  */
stmmac_init_timestamping(struct stmmac_priv * priv)805 static int stmmac_init_timestamping(struct stmmac_priv *priv)
806 {
807 	bool xmac = dwmac_is_xmac(priv->plat->core_type);
808 	int ret;
809 
810 	if (priv->plat->ptp_clk_freq_config)
811 		priv->plat->ptp_clk_freq_config(priv);
812 
813 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
814 		netdev_info(priv->dev, "PTP not supported by HW\n");
815 		return -EOPNOTSUPP;
816 	}
817 
818 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
819 					       PTP_TCR_TSCFUPDT);
820 	if (ret) {
821 		netdev_warn(priv->dev, "PTP init failed\n");
822 		return ret;
823 	}
824 
825 	priv->adv_ts = 0;
826 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
827 	if (xmac && priv->dma_cap.atime_stamp)
828 		priv->adv_ts = 1;
829 	/* Dwmac 3.x core with extend_desc can support adv_ts */
830 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
831 		priv->adv_ts = 1;
832 
833 	if (priv->dma_cap.time_stamp)
834 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
835 
836 	if (priv->adv_ts)
837 		netdev_info(priv->dev,
838 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
839 
840 	priv->hwts_tx_en = 0;
841 	priv->hwts_rx_en = 0;
842 
843 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
844 		stmmac_hwtstamp_correct_latency(priv, priv);
845 
846 	return 0;
847 }
848 
stmmac_setup_ptp(struct stmmac_priv * priv)849 static void stmmac_setup_ptp(struct stmmac_priv *priv)
850 {
851 	int ret;
852 
853 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
854 	if (ret < 0)
855 		netdev_warn(priv->dev,
856 			    "failed to enable PTP reference clock: %pe\n",
857 			    ERR_PTR(ret));
858 
859 	if (stmmac_init_timestamping(priv) == 0)
860 		stmmac_ptp_register(priv);
861 }
862 
stmmac_release_ptp(struct stmmac_priv * priv)863 static void stmmac_release_ptp(struct stmmac_priv *priv)
864 {
865 	stmmac_ptp_unregister(priv);
866 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
867 }
868 
869 /**
870  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
871  *  @priv: driver private structure
872  *  @duplex: duplex passed to the next function
873  *  @flow_ctrl: desired flow control modes
874  *  Description: It is used for configuring the flow control in all queues
875  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)876 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
877 				 unsigned int flow_ctrl)
878 {
879 	u32 tx_cnt = priv->plat->tx_queues_to_use;
880 
881 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
882 			 tx_cnt);
883 }
884 
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)885 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
886 					 phy_interface_t interface)
887 {
888 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
889 
890 	/* Refresh the MAC-specific capabilities */
891 	stmmac_mac_update_caps(priv);
892 
893 	config->mac_capabilities = priv->hw->link.caps;
894 
895 	if (priv->plat->max_speed)
896 		phylink_limit_mac_speed(config, priv->plat->max_speed);
897 
898 	return config->mac_capabilities;
899 }
900 
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)901 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
902 						 phy_interface_t interface)
903 {
904 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
905 	struct phylink_pcs *pcs;
906 
907 	if (priv->plat->select_pcs) {
908 		pcs = priv->plat->select_pcs(priv, interface);
909 		if (!IS_ERR(pcs))
910 			return pcs;
911 	}
912 
913 	/* The PCS control register is only relevant for SGMII, TBI and RTBI
914 	 * modes. We no longer support TBI or RTBI, so only configure this
915 	 * register when operating in SGMII mode with the integrated PCS.
916 	 */
917 	if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
918 		return &priv->integrated_pcs->pcs;
919 
920 	return NULL;
921 }
922 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)923 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
924 			      const struct phylink_link_state *state)
925 {
926 	/* Nothing to do, xpcs_config() handles everything */
927 }
928 
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)929 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
930 			     phy_interface_t interface)
931 {
932 	struct net_device *ndev = to_net_dev(config->dev);
933 	struct stmmac_priv *priv = netdev_priv(ndev);
934 
935 	if (priv->plat->mac_finish)
936 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
937 
938 	return 0;
939 }
940 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)941 static void stmmac_mac_link_down(struct phylink_config *config,
942 				 unsigned int mode, phy_interface_t interface)
943 {
944 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
945 
946 	stmmac_mac_set(priv, priv->ioaddr, false);
947 	if (priv->dma_cap.eee)
948 		stmmac_set_eee_pls(priv, priv->hw, false);
949 
950 	if (stmmac_fpe_supported(priv))
951 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
952 }
953 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)954 static void stmmac_mac_link_up(struct phylink_config *config,
955 			       struct phy_device *phy,
956 			       unsigned int mode, phy_interface_t interface,
957 			       int speed, int duplex,
958 			       bool tx_pause, bool rx_pause)
959 {
960 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
961 	unsigned int flow_ctrl;
962 	u32 old_ctrl, ctrl;
963 	int ret;
964 
965 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
966 	    priv->plat->serdes_powerup)
967 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
968 
969 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
970 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
971 
972 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
973 		switch (speed) {
974 		case SPEED_10000:
975 			ctrl |= priv->hw->link.xgmii.speed10000;
976 			break;
977 		case SPEED_5000:
978 			ctrl |= priv->hw->link.xgmii.speed5000;
979 			break;
980 		case SPEED_2500:
981 			ctrl |= priv->hw->link.xgmii.speed2500;
982 			break;
983 		default:
984 			return;
985 		}
986 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
987 		switch (speed) {
988 		case SPEED_100000:
989 			ctrl |= priv->hw->link.xlgmii.speed100000;
990 			break;
991 		case SPEED_50000:
992 			ctrl |= priv->hw->link.xlgmii.speed50000;
993 			break;
994 		case SPEED_40000:
995 			ctrl |= priv->hw->link.xlgmii.speed40000;
996 			break;
997 		case SPEED_25000:
998 			ctrl |= priv->hw->link.xlgmii.speed25000;
999 			break;
1000 		case SPEED_10000:
1001 			ctrl |= priv->hw->link.xgmii.speed10000;
1002 			break;
1003 		case SPEED_2500:
1004 			ctrl |= priv->hw->link.speed2500;
1005 			break;
1006 		case SPEED_1000:
1007 			ctrl |= priv->hw->link.speed1000;
1008 			break;
1009 		default:
1010 			return;
1011 		}
1012 	} else {
1013 		switch (speed) {
1014 		case SPEED_2500:
1015 			ctrl |= priv->hw->link.speed2500;
1016 			break;
1017 		case SPEED_1000:
1018 			ctrl |= priv->hw->link.speed1000;
1019 			break;
1020 		case SPEED_100:
1021 			ctrl |= priv->hw->link.speed100;
1022 			break;
1023 		case SPEED_10:
1024 			ctrl |= priv->hw->link.speed10;
1025 			break;
1026 		default:
1027 			return;
1028 		}
1029 	}
1030 
1031 	if (priv->plat->fix_mac_speed)
1032 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1033 
1034 	if (!duplex)
1035 		ctrl &= ~priv->hw->link.duplex;
1036 	else
1037 		ctrl |= priv->hw->link.duplex;
1038 
1039 	/* Flow Control operation */
1040 	if (rx_pause && tx_pause)
1041 		flow_ctrl = FLOW_AUTO;
1042 	else if (rx_pause && !tx_pause)
1043 		flow_ctrl = FLOW_RX;
1044 	else if (!rx_pause && tx_pause)
1045 		flow_ctrl = FLOW_TX;
1046 	else
1047 		flow_ctrl = FLOW_OFF;
1048 
1049 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1050 
1051 	if (ctrl != old_ctrl)
1052 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1053 
1054 	if (priv->plat->set_clk_tx_rate) {
1055 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1056 						priv->plat->clk_tx_i,
1057 						interface, speed);
1058 		if (ret < 0)
1059 			netdev_err(priv->dev,
1060 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
1061 				   phy_modes(interface), speed, ERR_PTR(ret));
1062 	}
1063 
1064 	stmmac_mac_set(priv, priv->ioaddr, true);
1065 	if (priv->dma_cap.eee)
1066 		stmmac_set_eee_pls(priv, priv->hw, true);
1067 
1068 	if (stmmac_fpe_supported(priv))
1069 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1070 
1071 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1072 		stmmac_hwtstamp_correct_latency(priv, priv);
1073 }
1074 
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1075 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1076 {
1077 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1078 
1079 	priv->eee_active = false;
1080 
1081 	mutex_lock(&priv->lock);
1082 
1083 	priv->eee_enabled = false;
1084 
1085 	netdev_dbg(priv->dev, "disable EEE\n");
1086 	priv->eee_sw_timer_en = false;
1087 	timer_delete_sync(&priv->eee_ctrl_timer);
1088 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1089 	priv->tx_path_in_lpi_mode = false;
1090 
1091 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1092 	mutex_unlock(&priv->lock);
1093 }
1094 
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1095 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1096 				    bool tx_clk_stop)
1097 {
1098 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1099 	int ret;
1100 
1101 	priv->tx_lpi_timer = timer;
1102 	priv->eee_active = true;
1103 
1104 	mutex_lock(&priv->lock);
1105 
1106 	priv->eee_enabled = true;
1107 
1108 	/* Update the transmit clock stop according to PHY capability if
1109 	 * the platform allows
1110 	 */
1111 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1112 		priv->tx_lpi_clk_stop = tx_clk_stop;
1113 
1114 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1115 			     STMMAC_DEFAULT_TWT_LS);
1116 
1117 	/* Try to cnfigure the hardware timer. */
1118 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1119 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1120 
1121 	if (ret) {
1122 		/* Hardware timer mode not supported, or value out of range.
1123 		 * Fall back to using software LPI mode
1124 		 */
1125 		priv->eee_sw_timer_en = true;
1126 		stmmac_restart_sw_lpi_timer(priv);
1127 	}
1128 
1129 	mutex_unlock(&priv->lock);
1130 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1131 
1132 	return 0;
1133 }
1134 
stmmac_mac_wol_set(struct phylink_config * config,u32 wolopts,const u8 * sopass)1135 static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
1136 			      const u8 *sopass)
1137 {
1138 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1139 
1140 	device_set_wakeup_enable(priv->device, !!wolopts);
1141 
1142 	mutex_lock(&priv->lock);
1143 	priv->wolopts = wolopts;
1144 	mutex_unlock(&priv->lock);
1145 
1146 	return 0;
1147 }
1148 
1149 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1150 	.mac_get_caps = stmmac_mac_get_caps,
1151 	.mac_select_pcs = stmmac_mac_select_pcs,
1152 	.mac_config = stmmac_mac_config,
1153 	.mac_finish = stmmac_mac_finish,
1154 	.mac_link_down = stmmac_mac_link_down,
1155 	.mac_link_up = stmmac_mac_link_up,
1156 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1157 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1158 	.mac_wol_set = stmmac_mac_wol_set,
1159 };
1160 
1161 /**
1162  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1163  * @priv: driver private structure
1164  * Description: this is to verify if the HW supports the PCS.
1165  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1166  * configured for the TBI, RTBI, or SGMII PHY interface.
1167  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1168 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1169 {
1170 	int interface = priv->plat->phy_interface;
1171 	int speed = priv->plat->mac_port_sel_speed;
1172 
1173 	if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
1174 		netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1175 		priv->hw->pcs = STMMAC_PCS_SGMII;
1176 
1177 		switch (speed) {
1178 		case SPEED_10:
1179 		case SPEED_100:
1180 		case SPEED_1000:
1181 			priv->hw->reverse_sgmii_enable = true;
1182 			break;
1183 
1184 		default:
1185 			dev_warn(priv->device, "invalid port speed\n");
1186 			fallthrough;
1187 		case 0:
1188 			priv->hw->reverse_sgmii_enable = false;
1189 			break;
1190 		}
1191 	}
1192 }
1193 
1194 /**
1195  * stmmac_init_phy - PHY initialization
1196  * @dev: net device structure
1197  * Description: it initializes the driver's PHY state, and attaches the PHY
1198  * to the mac driver.
1199  *  Return value:
1200  *  0 on success
1201  */
stmmac_init_phy(struct net_device * dev)1202 static int stmmac_init_phy(struct net_device *dev)
1203 {
1204 	struct stmmac_priv *priv = netdev_priv(dev);
1205 	int mode = priv->plat->phy_interface;
1206 	struct fwnode_handle *phy_fwnode;
1207 	struct fwnode_handle *fwnode;
1208 	struct ethtool_keee eee;
1209 	int ret;
1210 
1211 	if (!phylink_expects_phy(priv->phylink))
1212 		return 0;
1213 
1214 	if (priv->hw->xpcs &&
1215 	    xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1216 		return 0;
1217 
1218 	fwnode = priv->plat->port_node;
1219 	if (!fwnode)
1220 		fwnode = dev_fwnode(priv->device);
1221 
1222 	if (fwnode)
1223 		phy_fwnode = fwnode_get_phy_node(fwnode);
1224 	else
1225 		phy_fwnode = NULL;
1226 
1227 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1228 	 * manually parse it
1229 	 */
1230 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1231 		int addr = priv->plat->phy_addr;
1232 		struct phy_device *phydev;
1233 
1234 		if (addr < 0) {
1235 			netdev_err(priv->dev, "no phy found\n");
1236 			return -ENODEV;
1237 		}
1238 
1239 		phydev = mdiobus_get_phy(priv->mii, addr);
1240 		if (!phydev) {
1241 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1242 			return -ENODEV;
1243 		}
1244 
1245 		ret = phylink_connect_phy(priv->phylink, phydev);
1246 	} else {
1247 		fwnode_handle_put(phy_fwnode);
1248 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1249 	}
1250 
1251 	if (ret) {
1252 		netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1253 			   ERR_PTR(ret));
1254 		return ret;
1255 	}
1256 
1257 	/* Configure phylib's copy of the LPI timer. Normally,
1258 	 * phylink_config.lpi_timer_default would do this, but there is a
1259 	 * chance that userspace could change the eee_timer setting via sysfs
1260 	 * before the first open. Thus, preserve existing behaviour.
1261 	 */
1262 	if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1263 		eee.tx_lpi_timer = priv->tx_lpi_timer;
1264 		phylink_ethtool_set_eee(priv->phylink, &eee);
1265 	}
1266 
1267 	return 0;
1268 }
1269 
stmmac_phylink_setup(struct stmmac_priv * priv)1270 static int stmmac_phylink_setup(struct stmmac_priv *priv)
1271 {
1272 	struct stmmac_mdio_bus_data *mdio_bus_data;
1273 	struct phylink_config *config;
1274 	struct fwnode_handle *fwnode;
1275 	struct phylink_pcs *pcs;
1276 	struct phylink *phylink;
1277 
1278 	config = &priv->phylink_config;
1279 
1280 	config->dev = &priv->dev->dev;
1281 	config->type = PHYLINK_NETDEV;
1282 	config->mac_managed_pm = true;
1283 
1284 	/* Stmmac always requires an RX clock for hardware initialization */
1285 	config->mac_requires_rxc = true;
1286 
1287 	/* Disable EEE RX clock stop to ensure VLAN register access works
1288 	 * correctly.
1289 	 */
1290 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
1291 	    !(priv->dev->features & NETIF_F_VLAN_FEATURES))
1292 		config->eee_rx_clk_stop_enable = true;
1293 
1294 	/* Set the default transmit clock stop bit based on the platform glue */
1295 	priv->tx_lpi_clk_stop = priv->plat->flags &
1296 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1297 
1298 	mdio_bus_data = priv->plat->mdio_bus_data;
1299 	if (mdio_bus_data)
1300 		config->default_an_inband = mdio_bus_data->default_an_inband;
1301 
1302 	/* Get the PHY interface modes (at the PHY end of the link) that
1303 	 * are supported by the platform.
1304 	 */
1305 	if (priv->plat->get_interfaces)
1306 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1307 					   config->supported_interfaces);
1308 
1309 	/* Set the platform/firmware specified interface mode if the
1310 	 * supported interfaces have not already been provided using
1311 	 * phy_interface as a last resort.
1312 	 */
1313 	if (phy_interface_empty(config->supported_interfaces))
1314 		__set_bit(priv->plat->phy_interface,
1315 			  config->supported_interfaces);
1316 
1317 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1318 	if (priv->hw->xpcs)
1319 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1320 	else
1321 		pcs = priv->hw->phylink_pcs;
1322 
1323 	if (pcs)
1324 		phy_interface_or(config->supported_interfaces,
1325 				 config->supported_interfaces,
1326 				 pcs->supported_interfaces);
1327 
1328 	if (priv->dma_cap.eee) {
1329 		/* Assume all supported interfaces also support LPI */
1330 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1331 		       sizeof(config->lpi_interfaces));
1332 
1333 		/* All full duplex speeds above 100Mbps are supported */
1334 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1335 		config->lpi_timer_default = eee_timer * 1000;
1336 		config->eee_enabled_default = true;
1337 	}
1338 
1339 	config->wol_phy_speed_ctrl = true;
1340 	if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
1341 		config->wol_phy_legacy = true;
1342 	} else {
1343 		if (priv->dma_cap.pmt_remote_wake_up)
1344 			config->wol_mac_support |= WAKE_UCAST;
1345 		if (priv->dma_cap.pmt_magic_frame)
1346 			config->wol_mac_support |= WAKE_MAGIC;
1347 	}
1348 
1349 	fwnode = priv->plat->port_node;
1350 	if (!fwnode)
1351 		fwnode = dev_fwnode(priv->device);
1352 
1353 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1354 				 &stmmac_phylink_mac_ops);
1355 	if (IS_ERR(phylink))
1356 		return PTR_ERR(phylink);
1357 
1358 	priv->phylink = phylink;
1359 	return 0;
1360 }
1361 
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1362 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1363 				    struct stmmac_dma_conf *dma_conf)
1364 {
1365 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1366 	unsigned int desc_size;
1367 	void *head_rx;
1368 	u32 queue;
1369 
1370 	/* Display RX rings */
1371 	for (queue = 0; queue < rx_cnt; queue++) {
1372 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1373 
1374 		pr_info("\tRX Queue %u rings\n", queue);
1375 
1376 		if (priv->extend_desc) {
1377 			head_rx = (void *)rx_q->dma_erx;
1378 			desc_size = sizeof(struct dma_extended_desc);
1379 		} else {
1380 			head_rx = (void *)rx_q->dma_rx;
1381 			desc_size = sizeof(struct dma_desc);
1382 		}
1383 
1384 		/* Display RX ring */
1385 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1386 				    rx_q->dma_rx_phy, desc_size);
1387 	}
1388 }
1389 
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1390 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1391 				    struct stmmac_dma_conf *dma_conf)
1392 {
1393 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1394 	unsigned int desc_size;
1395 	void *head_tx;
1396 	u32 queue;
1397 
1398 	/* Display TX rings */
1399 	for (queue = 0; queue < tx_cnt; queue++) {
1400 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1401 
1402 		pr_info("\tTX Queue %d rings\n", queue);
1403 
1404 		if (priv->extend_desc) {
1405 			head_tx = (void *)tx_q->dma_etx;
1406 			desc_size = sizeof(struct dma_extended_desc);
1407 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1408 			head_tx = (void *)tx_q->dma_entx;
1409 			desc_size = sizeof(struct dma_edesc);
1410 		} else {
1411 			head_tx = (void *)tx_q->dma_tx;
1412 			desc_size = sizeof(struct dma_desc);
1413 		}
1414 
1415 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1416 				    tx_q->dma_tx_phy, desc_size);
1417 	}
1418 }
1419 
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1420 static void stmmac_display_rings(struct stmmac_priv *priv,
1421 				 struct stmmac_dma_conf *dma_conf)
1422 {
1423 	/* Display RX ring */
1424 	stmmac_display_rx_rings(priv, dma_conf);
1425 
1426 	/* Display TX ring */
1427 	stmmac_display_tx_rings(priv, dma_conf);
1428 }
1429 
stmmac_rx_offset(struct stmmac_priv * priv)1430 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1431 {
1432 	if (stmmac_xdp_is_enabled(priv))
1433 		return XDP_PACKET_HEADROOM;
1434 
1435 	return NET_SKB_PAD;
1436 }
1437 
stmmac_set_bfsize(int mtu)1438 static int stmmac_set_bfsize(int mtu)
1439 {
1440 	int ret;
1441 
1442 	if (mtu >= BUF_SIZE_8KiB)
1443 		ret = BUF_SIZE_16KiB;
1444 	else if (mtu >= BUF_SIZE_4KiB)
1445 		ret = BUF_SIZE_8KiB;
1446 	else if (mtu >= BUF_SIZE_2KiB)
1447 		ret = BUF_SIZE_4KiB;
1448 	else if (mtu > DEFAULT_BUFSIZE)
1449 		ret = BUF_SIZE_2KiB;
1450 	else
1451 		ret = DEFAULT_BUFSIZE;
1452 
1453 	return ret;
1454 }
1455 
1456 /**
1457  * stmmac_clear_rx_descriptors - clear RX descriptors
1458  * @priv: driver private structure
1459  * @dma_conf: structure to take the dma data
1460  * @queue: RX queue index
1461  * Description: this function is called to clear the RX descriptors
1462  * in case of both basic and extended descriptors are used.
1463  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1464 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1465 					struct stmmac_dma_conf *dma_conf,
1466 					u32 queue)
1467 {
1468 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1469 	int i;
1470 
1471 	/* Clear the RX descriptors */
1472 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1473 		if (priv->extend_desc)
1474 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1475 					priv->use_riwt, priv->mode,
1476 					(i == dma_conf->dma_rx_size - 1),
1477 					dma_conf->dma_buf_sz);
1478 		else
1479 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1480 					priv->use_riwt, priv->mode,
1481 					(i == dma_conf->dma_rx_size - 1),
1482 					dma_conf->dma_buf_sz);
1483 }
1484 
1485 /**
1486  * stmmac_clear_tx_descriptors - clear tx descriptors
1487  * @priv: driver private structure
1488  * @dma_conf: structure to take the dma data
1489  * @queue: TX queue index.
1490  * Description: this function is called to clear the TX descriptors
1491  * in case of both basic and extended descriptors are used.
1492  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1493 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1494 					struct stmmac_dma_conf *dma_conf,
1495 					u32 queue)
1496 {
1497 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1498 	int i;
1499 
1500 	/* Clear the TX descriptors */
1501 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1502 		int last = (i == (dma_conf->dma_tx_size - 1));
1503 		struct dma_desc *p;
1504 
1505 		if (priv->extend_desc)
1506 			p = &tx_q->dma_etx[i].basic;
1507 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1508 			p = &tx_q->dma_entx[i].basic;
1509 		else
1510 			p = &tx_q->dma_tx[i];
1511 
1512 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1513 	}
1514 }
1515 
1516 /**
1517  * stmmac_clear_descriptors - clear descriptors
1518  * @priv: driver private structure
1519  * @dma_conf: structure to take the dma data
1520  * Description: this function is called to clear the TX and RX descriptors
1521  * in case of both basic and extended descriptors are used.
1522  */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1523 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1524 				     struct stmmac_dma_conf *dma_conf)
1525 {
1526 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1527 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1528 	u32 queue;
1529 
1530 	/* Clear the RX descriptors */
1531 	for (queue = 0; queue < rx_queue_cnt; queue++)
1532 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1533 
1534 	/* Clear the TX descriptors */
1535 	for (queue = 0; queue < tx_queue_cnt; queue++)
1536 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1537 }
1538 
1539 /**
1540  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1541  * @priv: driver private structure
1542  * @dma_conf: structure to take the dma data
1543  * @p: descriptor pointer
1544  * @i: descriptor index
1545  * @flags: gfp flag
1546  * @queue: RX queue index
1547  * Description: this function is called to allocate a receive buffer, perform
1548  * the DMA mapping and init the descriptor.
1549  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1550 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1551 				  struct stmmac_dma_conf *dma_conf,
1552 				  struct dma_desc *p,
1553 				  int i, gfp_t flags, u32 queue)
1554 {
1555 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1556 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1557 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1558 
1559 	if (priv->dma_cap.host_dma_width <= 32)
1560 		gfp |= GFP_DMA32;
1561 
1562 	if (!buf->page) {
1563 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1564 		if (!buf->page)
1565 			return -ENOMEM;
1566 		buf->page_offset = stmmac_rx_offset(priv);
1567 	}
1568 
1569 	if (priv->sph_active && !buf->sec_page) {
1570 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1571 		if (!buf->sec_page)
1572 			return -ENOMEM;
1573 
1574 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1575 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1576 	} else {
1577 		buf->sec_page = NULL;
1578 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1579 	}
1580 
1581 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1582 
1583 	stmmac_set_desc_addr(priv, p, buf->addr);
1584 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1585 		stmmac_init_desc3(priv, p);
1586 
1587 	return 0;
1588 }
1589 
1590 /**
1591  * stmmac_free_rx_buffer - free RX dma buffers
1592  * @priv: private structure
1593  * @rx_q: RX queue
1594  * @i: buffer index.
1595  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1596 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1597 				  struct stmmac_rx_queue *rx_q,
1598 				  int i)
1599 {
1600 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1601 
1602 	if (buf->page)
1603 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1604 	buf->page = NULL;
1605 
1606 	if (buf->sec_page)
1607 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1608 	buf->sec_page = NULL;
1609 }
1610 
1611 /**
1612  * stmmac_free_tx_buffer - free RX dma buffers
1613  * @priv: private structure
1614  * @dma_conf: structure to take the dma data
1615  * @queue: RX queue index
1616  * @i: buffer index.
1617  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1618 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1619 				  struct stmmac_dma_conf *dma_conf,
1620 				  u32 queue, int i)
1621 {
1622 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1623 
1624 	if (tx_q->tx_skbuff_dma[i].buf &&
1625 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1626 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1627 			dma_unmap_page(priv->device,
1628 				       tx_q->tx_skbuff_dma[i].buf,
1629 				       tx_q->tx_skbuff_dma[i].len,
1630 				       DMA_TO_DEVICE);
1631 		else
1632 			dma_unmap_single(priv->device,
1633 					 tx_q->tx_skbuff_dma[i].buf,
1634 					 tx_q->tx_skbuff_dma[i].len,
1635 					 DMA_TO_DEVICE);
1636 	}
1637 
1638 	if (tx_q->xdpf[i] &&
1639 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1640 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1641 		xdp_return_frame(tx_q->xdpf[i]);
1642 		tx_q->xdpf[i] = NULL;
1643 	}
1644 
1645 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1646 		tx_q->xsk_frames_done++;
1647 
1648 	if (tx_q->tx_skbuff[i] &&
1649 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1650 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1651 		tx_q->tx_skbuff[i] = NULL;
1652 	}
1653 
1654 	tx_q->tx_skbuff_dma[i].buf = 0;
1655 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1656 }
1657 
1658 /**
1659  * dma_free_rx_skbufs - free RX dma buffers
1660  * @priv: private structure
1661  * @dma_conf: structure to take the dma data
1662  * @queue: RX queue index
1663  */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1664 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1665 			       struct stmmac_dma_conf *dma_conf,
1666 			       u32 queue)
1667 {
1668 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1669 	int i;
1670 
1671 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1672 		stmmac_free_rx_buffer(priv, rx_q, i);
1673 }
1674 
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1675 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1676 				   struct stmmac_dma_conf *dma_conf,
1677 				   u32 queue, gfp_t flags)
1678 {
1679 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1680 	int i;
1681 
1682 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1683 		struct dma_desc *p;
1684 		int ret;
1685 
1686 		if (priv->extend_desc)
1687 			p = &((rx_q->dma_erx + i)->basic);
1688 		else
1689 			p = rx_q->dma_rx + i;
1690 
1691 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1692 					     queue);
1693 		if (ret)
1694 			return ret;
1695 
1696 		rx_q->buf_alloc_num++;
1697 	}
1698 
1699 	return 0;
1700 }
1701 
1702 /**
1703  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1704  * @priv: private structure
1705  * @dma_conf: structure to take the dma data
1706  * @queue: RX queue index
1707  */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1708 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1709 				struct stmmac_dma_conf *dma_conf,
1710 				u32 queue)
1711 {
1712 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1713 	int i;
1714 
1715 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1716 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1717 
1718 		if (!buf->xdp)
1719 			continue;
1720 
1721 		xsk_buff_free(buf->xdp);
1722 		buf->xdp = NULL;
1723 	}
1724 }
1725 
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1726 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1727 				      struct stmmac_dma_conf *dma_conf,
1728 				      u32 queue)
1729 {
1730 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1731 	int i;
1732 
1733 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1734 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1735 	 * use this macro to make sure no size violations.
1736 	 */
1737 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1738 
1739 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1740 		struct stmmac_rx_buffer *buf;
1741 		dma_addr_t dma_addr;
1742 		struct dma_desc *p;
1743 
1744 		if (priv->extend_desc)
1745 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1746 		else
1747 			p = rx_q->dma_rx + i;
1748 
1749 		buf = &rx_q->buf_pool[i];
1750 
1751 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1752 		if (!buf->xdp)
1753 			return -ENOMEM;
1754 
1755 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1756 		stmmac_set_desc_addr(priv, p, dma_addr);
1757 		rx_q->buf_alloc_num++;
1758 	}
1759 
1760 	return 0;
1761 }
1762 
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1763 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1764 {
1765 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1766 		return NULL;
1767 
1768 	return xsk_get_pool_from_qid(priv->dev, queue);
1769 }
1770 
1771 /**
1772  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1773  * @priv: driver private structure
1774  * @dma_conf: structure to take the dma data
1775  * @queue: RX queue index
1776  * @flags: gfp flag.
1777  * Description: this function initializes the DMA RX descriptors
1778  * and allocates the socket buffers. It supports the chained and ring
1779  * modes.
1780  */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1781 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1782 				    struct stmmac_dma_conf *dma_conf,
1783 				    u32 queue, gfp_t flags)
1784 {
1785 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1786 	int ret;
1787 
1788 	netif_dbg(priv, probe, priv->dev,
1789 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1790 		  (u32)rx_q->dma_rx_phy);
1791 
1792 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1793 
1794 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1795 
1796 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1797 
1798 	if (rx_q->xsk_pool) {
1799 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1800 						   MEM_TYPE_XSK_BUFF_POOL,
1801 						   NULL));
1802 		netdev_info(priv->dev,
1803 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1804 			    rx_q->queue_index);
1805 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1806 	} else {
1807 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1808 						   MEM_TYPE_PAGE_POOL,
1809 						   rx_q->page_pool));
1810 		netdev_info(priv->dev,
1811 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1812 			    rx_q->queue_index);
1813 	}
1814 
1815 	if (rx_q->xsk_pool) {
1816 		/* RX XDP ZC buffer pool may not be populated, e.g.
1817 		 * xdpsock TX-only.
1818 		 */
1819 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1820 	} else {
1821 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1822 		if (ret < 0)
1823 			return -ENOMEM;
1824 	}
1825 
1826 	/* Setup the chained descriptor addresses */
1827 	if (priv->mode == STMMAC_CHAIN_MODE) {
1828 		if (priv->extend_desc)
1829 			stmmac_mode_init(priv, rx_q->dma_erx,
1830 					 rx_q->dma_rx_phy,
1831 					 dma_conf->dma_rx_size, 1);
1832 		else
1833 			stmmac_mode_init(priv, rx_q->dma_rx,
1834 					 rx_q->dma_rx_phy,
1835 					 dma_conf->dma_rx_size, 0);
1836 	}
1837 
1838 	return 0;
1839 }
1840 
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1841 static int init_dma_rx_desc_rings(struct net_device *dev,
1842 				  struct stmmac_dma_conf *dma_conf,
1843 				  gfp_t flags)
1844 {
1845 	struct stmmac_priv *priv = netdev_priv(dev);
1846 	u32 rx_count = priv->plat->rx_queues_to_use;
1847 	int queue;
1848 	int ret;
1849 
1850 	/* RX INITIALIZATION */
1851 	netif_dbg(priv, probe, priv->dev,
1852 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1853 
1854 	for (queue = 0; queue < rx_count; queue++) {
1855 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1856 		if (ret)
1857 			goto err_init_rx_buffers;
1858 	}
1859 
1860 	return 0;
1861 
1862 err_init_rx_buffers:
1863 	while (queue >= 0) {
1864 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1865 
1866 		if (rx_q->xsk_pool)
1867 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1868 		else
1869 			dma_free_rx_skbufs(priv, dma_conf, queue);
1870 
1871 		rx_q->buf_alloc_num = 0;
1872 		rx_q->xsk_pool = NULL;
1873 
1874 		queue--;
1875 	}
1876 
1877 	return ret;
1878 }
1879 
1880 /**
1881  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1882  * @priv: driver private structure
1883  * @dma_conf: structure to take the dma data
1884  * @queue: TX queue index
1885  * Description: this function initializes the DMA TX descriptors
1886  * and allocates the socket buffers. It supports the chained and ring
1887  * modes.
1888  */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1889 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1890 				    struct stmmac_dma_conf *dma_conf,
1891 				    u32 queue)
1892 {
1893 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1894 	int i;
1895 
1896 	netif_dbg(priv, probe, priv->dev,
1897 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1898 		  (u32)tx_q->dma_tx_phy);
1899 
1900 	/* Setup the chained descriptor addresses */
1901 	if (priv->mode == STMMAC_CHAIN_MODE) {
1902 		if (priv->extend_desc)
1903 			stmmac_mode_init(priv, tx_q->dma_etx,
1904 					 tx_q->dma_tx_phy,
1905 					 dma_conf->dma_tx_size, 1);
1906 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1907 			stmmac_mode_init(priv, tx_q->dma_tx,
1908 					 tx_q->dma_tx_phy,
1909 					 dma_conf->dma_tx_size, 0);
1910 	}
1911 
1912 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1913 
1914 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1915 		struct dma_desc *p;
1916 
1917 		if (priv->extend_desc)
1918 			p = &((tx_q->dma_etx + i)->basic);
1919 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1920 			p = &((tx_q->dma_entx + i)->basic);
1921 		else
1922 			p = tx_q->dma_tx + i;
1923 
1924 		stmmac_clear_desc(priv, p);
1925 
1926 		tx_q->tx_skbuff_dma[i].buf = 0;
1927 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1928 		tx_q->tx_skbuff_dma[i].len = 0;
1929 		tx_q->tx_skbuff_dma[i].last_segment = false;
1930 		tx_q->tx_skbuff[i] = NULL;
1931 	}
1932 
1933 	return 0;
1934 }
1935 
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1936 static int init_dma_tx_desc_rings(struct net_device *dev,
1937 				  struct stmmac_dma_conf *dma_conf)
1938 {
1939 	struct stmmac_priv *priv = netdev_priv(dev);
1940 	u32 tx_queue_cnt;
1941 	u32 queue;
1942 
1943 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1944 
1945 	for (queue = 0; queue < tx_queue_cnt; queue++)
1946 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1947 
1948 	return 0;
1949 }
1950 
1951 /**
1952  * init_dma_desc_rings - init the RX/TX descriptor rings
1953  * @dev: net device structure
1954  * @dma_conf: structure to take the dma data
1955  * @flags: gfp flag.
1956  * Description: this function initializes the DMA RX/TX descriptors
1957  * and allocates the socket buffers. It supports the chained and ring
1958  * modes.
1959  */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1960 static int init_dma_desc_rings(struct net_device *dev,
1961 			       struct stmmac_dma_conf *dma_conf,
1962 			       gfp_t flags)
1963 {
1964 	struct stmmac_priv *priv = netdev_priv(dev);
1965 	int ret;
1966 
1967 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1968 	if (ret)
1969 		return ret;
1970 
1971 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1972 
1973 	stmmac_clear_descriptors(priv, dma_conf);
1974 
1975 	if (netif_msg_hw(priv))
1976 		stmmac_display_rings(priv, dma_conf);
1977 
1978 	return ret;
1979 }
1980 
1981 /**
1982  * dma_free_tx_skbufs - free TX dma buffers
1983  * @priv: private structure
1984  * @dma_conf: structure to take the dma data
1985  * @queue: TX queue index
1986  */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1987 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1988 			       struct stmmac_dma_conf *dma_conf,
1989 			       u32 queue)
1990 {
1991 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1992 	int i;
1993 
1994 	tx_q->xsk_frames_done = 0;
1995 
1996 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1997 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1998 
1999 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
2000 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2001 		tx_q->xsk_frames_done = 0;
2002 		tx_q->xsk_pool = NULL;
2003 	}
2004 }
2005 
2006 /**
2007  * stmmac_free_tx_skbufs - free TX skb buffers
2008  * @priv: private structure
2009  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)2010 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
2011 {
2012 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
2013 	u32 queue;
2014 
2015 	for (queue = 0; queue < tx_queue_cnt; queue++)
2016 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
2017 }
2018 
2019 /**
2020  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2021  * @priv: private structure
2022  * @dma_conf: structure to take the dma data
2023  * @queue: RX queue index
2024  */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2025 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2026 					 struct stmmac_dma_conf *dma_conf,
2027 					 u32 queue)
2028 {
2029 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2030 
2031 	/* Release the DMA RX socket buffers */
2032 	if (rx_q->xsk_pool)
2033 		dma_free_rx_xskbufs(priv, dma_conf, queue);
2034 	else
2035 		dma_free_rx_skbufs(priv, dma_conf, queue);
2036 
2037 	rx_q->buf_alloc_num = 0;
2038 	rx_q->xsk_pool = NULL;
2039 
2040 	/* Free DMA regions of consistent memory previously allocated */
2041 	if (!priv->extend_desc)
2042 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2043 				  sizeof(struct dma_desc),
2044 				  rx_q->dma_rx, rx_q->dma_rx_phy);
2045 	else
2046 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2047 				  sizeof(struct dma_extended_desc),
2048 				  rx_q->dma_erx, rx_q->dma_rx_phy);
2049 
2050 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2051 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2052 
2053 	kfree(rx_q->buf_pool);
2054 	if (rx_q->page_pool)
2055 		page_pool_destroy(rx_q->page_pool);
2056 }
2057 
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2058 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2059 				       struct stmmac_dma_conf *dma_conf)
2060 {
2061 	u32 rx_count = priv->plat->rx_queues_to_use;
2062 	u32 queue;
2063 
2064 	/* Free RX queue resources */
2065 	for (queue = 0; queue < rx_count; queue++)
2066 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
2067 }
2068 
2069 /**
2070  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2071  * @priv: private structure
2072  * @dma_conf: structure to take the dma data
2073  * @queue: TX queue index
2074  */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2075 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2076 					 struct stmmac_dma_conf *dma_conf,
2077 					 u32 queue)
2078 {
2079 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2080 	size_t size;
2081 	void *addr;
2082 
2083 	/* Release the DMA TX socket buffers */
2084 	dma_free_tx_skbufs(priv, dma_conf, queue);
2085 
2086 	if (priv->extend_desc) {
2087 		size = sizeof(struct dma_extended_desc);
2088 		addr = tx_q->dma_etx;
2089 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2090 		size = sizeof(struct dma_edesc);
2091 		addr = tx_q->dma_entx;
2092 	} else {
2093 		size = sizeof(struct dma_desc);
2094 		addr = tx_q->dma_tx;
2095 	}
2096 
2097 	size *= dma_conf->dma_tx_size;
2098 
2099 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2100 
2101 	kfree(tx_q->tx_skbuff_dma);
2102 	kfree(tx_q->tx_skbuff);
2103 }
2104 
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2105 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2106 				       struct stmmac_dma_conf *dma_conf)
2107 {
2108 	u32 tx_count = priv->plat->tx_queues_to_use;
2109 	u32 queue;
2110 
2111 	/* Free TX queue resources */
2112 	for (queue = 0; queue < tx_count; queue++)
2113 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2114 }
2115 
2116 /**
2117  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2118  * @priv: private structure
2119  * @dma_conf: structure to take the dma data
2120  * @queue: RX queue index
2121  * Description: according to which descriptor can be used (extend or basic)
2122  * this function allocates the resources for TX and RX paths. In case of
2123  * reception, for example, it pre-allocated the RX socket buffer in order to
2124  * allow zero-copy mechanism.
2125  */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2126 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2127 					 struct stmmac_dma_conf *dma_conf,
2128 					 u32 queue)
2129 {
2130 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2131 	struct stmmac_channel *ch = &priv->channel[queue];
2132 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2133 	struct page_pool_params pp_params = { 0 };
2134 	unsigned int dma_buf_sz_pad, num_pages;
2135 	unsigned int napi_id;
2136 	int ret;
2137 
2138 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2139 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2140 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2141 
2142 	rx_q->queue_index = queue;
2143 	rx_q->priv_data = priv;
2144 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2145 
2146 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2147 	pp_params.pool_size = dma_conf->dma_rx_size;
2148 	pp_params.order = order_base_2(num_pages);
2149 	pp_params.nid = dev_to_node(priv->device);
2150 	pp_params.dev = priv->device;
2151 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2152 	pp_params.offset = stmmac_rx_offset(priv);
2153 	pp_params.max_len = dma_conf->dma_buf_sz;
2154 
2155 	if (priv->sph_active) {
2156 		pp_params.offset = 0;
2157 		pp_params.max_len += stmmac_rx_offset(priv);
2158 	}
2159 
2160 	rx_q->page_pool = page_pool_create(&pp_params);
2161 	if (IS_ERR(rx_q->page_pool)) {
2162 		ret = PTR_ERR(rx_q->page_pool);
2163 		rx_q->page_pool = NULL;
2164 		return ret;
2165 	}
2166 
2167 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2168 				 sizeof(*rx_q->buf_pool),
2169 				 GFP_KERNEL);
2170 	if (!rx_q->buf_pool)
2171 		return -ENOMEM;
2172 
2173 	if (priv->extend_desc) {
2174 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2175 						   dma_conf->dma_rx_size *
2176 						   sizeof(struct dma_extended_desc),
2177 						   &rx_q->dma_rx_phy,
2178 						   GFP_KERNEL);
2179 		if (!rx_q->dma_erx)
2180 			return -ENOMEM;
2181 
2182 	} else {
2183 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2184 						  dma_conf->dma_rx_size *
2185 						  sizeof(struct dma_desc),
2186 						  &rx_q->dma_rx_phy,
2187 						  GFP_KERNEL);
2188 		if (!rx_q->dma_rx)
2189 			return -ENOMEM;
2190 	}
2191 
2192 	if (stmmac_xdp_is_enabled(priv) &&
2193 	    test_bit(queue, priv->af_xdp_zc_qps))
2194 		napi_id = ch->rxtx_napi.napi_id;
2195 	else
2196 		napi_id = ch->rx_napi.napi_id;
2197 
2198 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2199 			       rx_q->queue_index,
2200 			       napi_id);
2201 	if (ret) {
2202 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2203 		return -EINVAL;
2204 	}
2205 
2206 	return 0;
2207 }
2208 
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2209 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2210 				       struct stmmac_dma_conf *dma_conf)
2211 {
2212 	u32 rx_count = priv->plat->rx_queues_to_use;
2213 	u32 queue;
2214 	int ret;
2215 
2216 	/* RX queues buffers and DMA */
2217 	for (queue = 0; queue < rx_count; queue++) {
2218 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2219 		if (ret)
2220 			goto err_dma;
2221 	}
2222 
2223 	return 0;
2224 
2225 err_dma:
2226 	free_dma_rx_desc_resources(priv, dma_conf);
2227 
2228 	return ret;
2229 }
2230 
2231 /**
2232  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2233  * @priv: private structure
2234  * @dma_conf: structure to take the dma data
2235  * @queue: TX queue index
2236  * Description: according to which descriptor can be used (extend or basic)
2237  * this function allocates the resources for TX and RX paths. In case of
2238  * reception, for example, it pre-allocated the RX socket buffer in order to
2239  * allow zero-copy mechanism.
2240  */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2241 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2242 					 struct stmmac_dma_conf *dma_conf,
2243 					 u32 queue)
2244 {
2245 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2246 	size_t size;
2247 	void *addr;
2248 
2249 	tx_q->queue_index = queue;
2250 	tx_q->priv_data = priv;
2251 
2252 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2253 				      sizeof(*tx_q->tx_skbuff_dma),
2254 				      GFP_KERNEL);
2255 	if (!tx_q->tx_skbuff_dma)
2256 		return -ENOMEM;
2257 
2258 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2259 				  sizeof(struct sk_buff *),
2260 				  GFP_KERNEL);
2261 	if (!tx_q->tx_skbuff)
2262 		return -ENOMEM;
2263 
2264 	if (priv->extend_desc)
2265 		size = sizeof(struct dma_extended_desc);
2266 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2267 		size = sizeof(struct dma_edesc);
2268 	else
2269 		size = sizeof(struct dma_desc);
2270 
2271 	size *= dma_conf->dma_tx_size;
2272 
2273 	addr = dma_alloc_coherent(priv->device, size,
2274 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2275 	if (!addr)
2276 		return -ENOMEM;
2277 
2278 	if (priv->extend_desc)
2279 		tx_q->dma_etx = addr;
2280 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2281 		tx_q->dma_entx = addr;
2282 	else
2283 		tx_q->dma_tx = addr;
2284 
2285 	return 0;
2286 }
2287 
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2288 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2289 				       struct stmmac_dma_conf *dma_conf)
2290 {
2291 	u32 tx_count = priv->plat->tx_queues_to_use;
2292 	u32 queue;
2293 	int ret;
2294 
2295 	/* TX queues buffers and DMA */
2296 	for (queue = 0; queue < tx_count; queue++) {
2297 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2298 		if (ret)
2299 			goto err_dma;
2300 	}
2301 
2302 	return 0;
2303 
2304 err_dma:
2305 	free_dma_tx_desc_resources(priv, dma_conf);
2306 	return ret;
2307 }
2308 
2309 /**
2310  * alloc_dma_desc_resources - alloc TX/RX resources.
2311  * @priv: private structure
2312  * @dma_conf: structure to take the dma data
2313  * Description: according to which descriptor can be used (extend or basic)
2314  * this function allocates the resources for TX and RX paths. In case of
2315  * reception, for example, it pre-allocated the RX socket buffer in order to
2316  * allow zero-copy mechanism.
2317  */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2318 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2319 				    struct stmmac_dma_conf *dma_conf)
2320 {
2321 	/* RX Allocation */
2322 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2323 
2324 	if (ret)
2325 		return ret;
2326 
2327 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2328 
2329 	return ret;
2330 }
2331 
2332 /**
2333  * free_dma_desc_resources - free dma desc resources
2334  * @priv: private structure
2335  * @dma_conf: structure to take the dma data
2336  */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2337 static void free_dma_desc_resources(struct stmmac_priv *priv,
2338 				    struct stmmac_dma_conf *dma_conf)
2339 {
2340 	/* Release the DMA TX socket buffers */
2341 	free_dma_tx_desc_resources(priv, dma_conf);
2342 
2343 	/* Release the DMA RX socket buffers later
2344 	 * to ensure all pending XDP_TX buffers are returned.
2345 	 */
2346 	free_dma_rx_desc_resources(priv, dma_conf);
2347 }
2348 
2349 /**
2350  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2351  *  @priv: driver private structure
2352  *  Description: It is used for enabling the rx queues in the MAC
2353  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2354 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2355 {
2356 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2357 	int queue;
2358 	u8 mode;
2359 
2360 	for (queue = 0; queue < rx_queues_count; queue++) {
2361 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2362 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2363 	}
2364 }
2365 
2366 /**
2367  * stmmac_start_rx_dma - start RX DMA channel
2368  * @priv: driver private structure
2369  * @chan: RX channel index
2370  * Description:
2371  * This starts a RX DMA channel
2372  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2373 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2374 {
2375 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2376 	stmmac_start_rx(priv, priv->ioaddr, chan);
2377 }
2378 
2379 /**
2380  * stmmac_start_tx_dma - start TX DMA channel
2381  * @priv: driver private structure
2382  * @chan: TX channel index
2383  * Description:
2384  * This starts a TX DMA channel
2385  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2386 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2387 {
2388 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2389 	stmmac_start_tx(priv, priv->ioaddr, chan);
2390 }
2391 
2392 /**
2393  * stmmac_stop_rx_dma - stop RX DMA channel
2394  * @priv: driver private structure
2395  * @chan: RX channel index
2396  * Description:
2397  * This stops a RX DMA channel
2398  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2399 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2400 {
2401 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2402 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2403 }
2404 
2405 /**
2406  * stmmac_stop_tx_dma - stop TX DMA channel
2407  * @priv: driver private structure
2408  * @chan: TX channel index
2409  * Description:
2410  * This stops a TX DMA channel
2411  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2412 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2413 {
2414 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2415 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2416 }
2417 
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2418 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2419 {
2420 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2421 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2422 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2423 	u32 chan;
2424 
2425 	for (chan = 0; chan < dma_csr_ch; chan++) {
2426 		struct stmmac_channel *ch = &priv->channel[chan];
2427 		unsigned long flags;
2428 
2429 		spin_lock_irqsave(&ch->lock, flags);
2430 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2431 		spin_unlock_irqrestore(&ch->lock, flags);
2432 	}
2433 }
2434 
2435 /**
2436  * stmmac_start_all_dma - start all RX and TX DMA channels
2437  * @priv: driver private structure
2438  * Description:
2439  * This starts all the RX and TX DMA channels
2440  */
stmmac_start_all_dma(struct stmmac_priv * priv)2441 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2442 {
2443 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2444 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2445 	u32 chan = 0;
2446 
2447 	for (chan = 0; chan < rx_channels_count; chan++)
2448 		stmmac_start_rx_dma(priv, chan);
2449 
2450 	for (chan = 0; chan < tx_channels_count; chan++)
2451 		stmmac_start_tx_dma(priv, chan);
2452 }
2453 
2454 /**
2455  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2456  * @priv: driver private structure
2457  * Description:
2458  * This stops the RX and TX DMA channels
2459  */
stmmac_stop_all_dma(struct stmmac_priv * priv)2460 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2461 {
2462 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2463 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2464 	u32 chan = 0;
2465 
2466 	for (chan = 0; chan < rx_channels_count; chan++)
2467 		stmmac_stop_rx_dma(priv, chan);
2468 
2469 	for (chan = 0; chan < tx_channels_count; chan++)
2470 		stmmac_stop_tx_dma(priv, chan);
2471 }
2472 
2473 /**
2474  *  stmmac_dma_operation_mode - HW DMA operation mode
2475  *  @priv: driver private structure
2476  *  Description: it is used for configuring the DMA operation mode register in
2477  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2478  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2479 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2480 {
2481 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2482 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2483 	int rxfifosz = priv->plat->rx_fifo_size;
2484 	int txfifosz = priv->plat->tx_fifo_size;
2485 	u32 txmode = 0;
2486 	u32 rxmode = 0;
2487 	u32 chan = 0;
2488 	u8 qmode = 0;
2489 
2490 	if (rxfifosz == 0)
2491 		rxfifosz = priv->dma_cap.rx_fifo_size;
2492 	if (txfifosz == 0)
2493 		txfifosz = priv->dma_cap.tx_fifo_size;
2494 
2495 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2496 	if (dwmac_is_xmac(priv->plat->core_type)) {
2497 		rxfifosz /= rx_channels_count;
2498 		txfifosz /= tx_channels_count;
2499 	}
2500 
2501 	if (priv->plat->force_thresh_dma_mode) {
2502 		txmode = tc;
2503 		rxmode = tc;
2504 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2505 		/*
2506 		 * In case of GMAC, SF mode can be enabled
2507 		 * to perform the TX COE in HW. This depends on:
2508 		 * 1) TX COE if actually supported
2509 		 * 2) There is no bugged Jumbo frame support
2510 		 *    that needs to not insert csum in the TDES.
2511 		 */
2512 		txmode = SF_DMA_MODE;
2513 		rxmode = SF_DMA_MODE;
2514 		priv->xstats.threshold = SF_DMA_MODE;
2515 	} else {
2516 		txmode = tc;
2517 		rxmode = SF_DMA_MODE;
2518 	}
2519 
2520 	/* configure all channels */
2521 	for (chan = 0; chan < rx_channels_count; chan++) {
2522 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2523 		u32 buf_size;
2524 
2525 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2526 
2527 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2528 				rxfifosz, qmode);
2529 
2530 		if (rx_q->xsk_pool) {
2531 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2532 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2533 					      buf_size,
2534 					      chan);
2535 		} else {
2536 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2537 					      priv->dma_conf.dma_buf_sz,
2538 					      chan);
2539 		}
2540 	}
2541 
2542 	for (chan = 0; chan < tx_channels_count; chan++) {
2543 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2544 
2545 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2546 				txfifosz, qmode);
2547 	}
2548 }
2549 
stmmac_xsk_request_timestamp(void * _priv)2550 static void stmmac_xsk_request_timestamp(void *_priv)
2551 {
2552 	struct stmmac_metadata_request *meta_req = _priv;
2553 
2554 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2555 	*meta_req->set_ic = true;
2556 }
2557 
stmmac_xsk_fill_timestamp(void * _priv)2558 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2559 {
2560 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2561 	struct stmmac_priv *priv = tx_compl->priv;
2562 	struct dma_desc *desc = tx_compl->desc;
2563 	bool found = false;
2564 	u64 ns = 0;
2565 
2566 	if (!priv->hwts_tx_en)
2567 		return 0;
2568 
2569 	/* check tx tstamp status */
2570 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2571 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2572 		found = true;
2573 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2574 		found = true;
2575 	}
2576 
2577 	if (found) {
2578 		ns -= priv->plat->cdc_error_adj;
2579 		return ns_to_ktime(ns);
2580 	}
2581 
2582 	return 0;
2583 }
2584 
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2585 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2586 {
2587 	struct timespec64 ts = ns_to_timespec64(launch_time);
2588 	struct stmmac_metadata_request *meta_req = _priv;
2589 
2590 	if (meta_req->tbs & STMMAC_TBS_EN)
2591 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2592 				    ts.tv_nsec);
2593 }
2594 
2595 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2596 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2597 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2598 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2599 };
2600 
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2601 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2602 {
2603 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2604 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2605 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2606 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2607 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2608 	unsigned int entry = tx_q->cur_tx;
2609 	struct dma_desc *tx_desc = NULL;
2610 	struct xdp_desc xdp_desc;
2611 	bool work_done = true;
2612 	u32 tx_set_ic_bit = 0;
2613 
2614 	/* Avoids TX time-out as we are sharing with slow path */
2615 	txq_trans_cond_update(nq);
2616 
2617 	budget = min(budget, stmmac_tx_avail(priv, queue));
2618 
2619 	for (; budget > 0; budget--) {
2620 		struct stmmac_metadata_request meta_req;
2621 		struct xsk_tx_metadata *meta = NULL;
2622 		dma_addr_t dma_addr;
2623 		bool set_ic;
2624 
2625 		/* We are sharing with slow path and stop XSK TX desc submission when
2626 		 * available TX ring is less than threshold.
2627 		 */
2628 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2629 		    !netif_carrier_ok(priv->dev)) {
2630 			work_done = false;
2631 			break;
2632 		}
2633 
2634 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2635 			break;
2636 
2637 		if (priv->est && priv->est->enable &&
2638 		    priv->est->max_sdu[queue] &&
2639 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2640 			priv->xstats.max_sdu_txq_drop[queue]++;
2641 			continue;
2642 		}
2643 
2644 		if (likely(priv->extend_desc))
2645 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2646 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2647 			tx_desc = &tx_q->dma_entx[entry].basic;
2648 		else
2649 			tx_desc = tx_q->dma_tx + entry;
2650 
2651 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2652 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2653 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2654 
2655 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2656 
2657 		/* To return XDP buffer to XSK pool, we simple call
2658 		 * xsk_tx_completed(), so we don't need to fill up
2659 		 * 'buf' and 'xdpf'.
2660 		 */
2661 		tx_q->tx_skbuff_dma[entry].buf = 0;
2662 		tx_q->xdpf[entry] = NULL;
2663 
2664 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2665 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2666 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2667 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2668 
2669 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2670 
2671 		tx_q->tx_count_frames++;
2672 
2673 		if (!priv->tx_coal_frames[queue])
2674 			set_ic = false;
2675 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2676 			set_ic = true;
2677 		else
2678 			set_ic = false;
2679 
2680 		meta_req.priv = priv;
2681 		meta_req.tx_desc = tx_desc;
2682 		meta_req.set_ic = &set_ic;
2683 		meta_req.tbs = tx_q->tbs;
2684 		meta_req.edesc = &tx_q->dma_entx[entry];
2685 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2686 					&meta_req);
2687 		if (set_ic) {
2688 			tx_q->tx_count_frames = 0;
2689 			stmmac_set_tx_ic(priv, tx_desc);
2690 			tx_set_ic_bit++;
2691 		}
2692 
2693 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2694 				       csum, priv->mode, true, true,
2695 				       xdp_desc.len);
2696 
2697 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2698 
2699 		xsk_tx_metadata_to_compl(meta,
2700 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2701 
2702 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2703 		entry = tx_q->cur_tx;
2704 	}
2705 	u64_stats_update_begin(&txq_stats->napi_syncp);
2706 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2707 	u64_stats_update_end(&txq_stats->napi_syncp);
2708 
2709 	if (tx_desc) {
2710 		stmmac_flush_tx_descriptors(priv, queue);
2711 		xsk_tx_release(pool);
2712 	}
2713 
2714 	/* Return true if all of the 3 conditions are met
2715 	 *  a) TX Budget is still available
2716 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2717 	 *     pending XSK TX for transmission)
2718 	 */
2719 	return !!budget && work_done;
2720 }
2721 
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2722 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2723 {
2724 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2725 		tc += 64;
2726 
2727 		if (priv->plat->force_thresh_dma_mode)
2728 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2729 		else
2730 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2731 						      chan);
2732 
2733 		priv->xstats.threshold = tc;
2734 	}
2735 }
2736 
2737 /**
2738  * stmmac_tx_clean - to manage the transmission completion
2739  * @priv: driver private structure
2740  * @budget: napi budget limiting this functions packet handling
2741  * @queue: TX queue index
2742  * @pending_packets: signal to arm the TX coal timer
2743  * Description: it reclaims the transmit resources after transmission completes.
2744  * If some packets still needs to be handled, due to TX coalesce, set
2745  * pending_packets to true to make NAPI arm the TX coal timer.
2746  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2747 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2748 			   bool *pending_packets)
2749 {
2750 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2751 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2752 	unsigned int bytes_compl = 0, pkts_compl = 0;
2753 	unsigned int entry, xmits = 0, count = 0;
2754 	u32 tx_packets = 0, tx_errors = 0;
2755 
2756 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2757 
2758 	tx_q->xsk_frames_done = 0;
2759 
2760 	entry = tx_q->dirty_tx;
2761 
2762 	/* Try to clean all TX complete frame in 1 shot */
2763 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2764 		struct xdp_frame *xdpf;
2765 		struct sk_buff *skb;
2766 		struct dma_desc *p;
2767 		int status;
2768 
2769 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2770 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2771 			xdpf = tx_q->xdpf[entry];
2772 			skb = NULL;
2773 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2774 			xdpf = NULL;
2775 			skb = tx_q->tx_skbuff[entry];
2776 		} else {
2777 			xdpf = NULL;
2778 			skb = NULL;
2779 		}
2780 
2781 		if (priv->extend_desc)
2782 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2783 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2784 			p = &tx_q->dma_entx[entry].basic;
2785 		else
2786 			p = tx_q->dma_tx + entry;
2787 
2788 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2789 		/* Check if the descriptor is owned by the DMA */
2790 		if (unlikely(status & tx_dma_own))
2791 			break;
2792 
2793 		count++;
2794 
2795 		/* Make sure descriptor fields are read after reading
2796 		 * the own bit.
2797 		 */
2798 		dma_rmb();
2799 
2800 		/* Just consider the last segment and ...*/
2801 		if (likely(!(status & tx_not_ls))) {
2802 			/* ... verify the status error condition */
2803 			if (unlikely(status & tx_err)) {
2804 				tx_errors++;
2805 				if (unlikely(status & tx_err_bump_tc))
2806 					stmmac_bump_dma_threshold(priv, queue);
2807 			} else {
2808 				tx_packets++;
2809 			}
2810 			if (skb) {
2811 				stmmac_get_tx_hwtstamp(priv, p, skb);
2812 			} else if (tx_q->xsk_pool &&
2813 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2814 				struct stmmac_xsk_tx_complete tx_compl = {
2815 					.priv = priv,
2816 					.desc = p,
2817 				};
2818 
2819 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2820 							 &stmmac_xsk_tx_metadata_ops,
2821 							 &tx_compl);
2822 			}
2823 		}
2824 
2825 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2826 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2827 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2828 				dma_unmap_page(priv->device,
2829 					       tx_q->tx_skbuff_dma[entry].buf,
2830 					       tx_q->tx_skbuff_dma[entry].len,
2831 					       DMA_TO_DEVICE);
2832 			else
2833 				dma_unmap_single(priv->device,
2834 						 tx_q->tx_skbuff_dma[entry].buf,
2835 						 tx_q->tx_skbuff_dma[entry].len,
2836 						 DMA_TO_DEVICE);
2837 			tx_q->tx_skbuff_dma[entry].buf = 0;
2838 			tx_q->tx_skbuff_dma[entry].len = 0;
2839 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2840 		}
2841 
2842 		stmmac_clean_desc3(priv, tx_q, p);
2843 
2844 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2845 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2846 
2847 		if (xdpf &&
2848 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2849 			xdp_return_frame_rx_napi(xdpf);
2850 			tx_q->xdpf[entry] = NULL;
2851 		}
2852 
2853 		if (xdpf &&
2854 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2855 			xdp_return_frame(xdpf);
2856 			tx_q->xdpf[entry] = NULL;
2857 		}
2858 
2859 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2860 			tx_q->xsk_frames_done++;
2861 
2862 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2863 			if (likely(skb)) {
2864 				pkts_compl++;
2865 				bytes_compl += skb->len;
2866 				dev_consume_skb_any(skb);
2867 				tx_q->tx_skbuff[entry] = NULL;
2868 			}
2869 		}
2870 
2871 		stmmac_release_tx_desc(priv, p, priv->mode);
2872 
2873 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2874 	}
2875 	tx_q->dirty_tx = entry;
2876 
2877 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2878 				  pkts_compl, bytes_compl);
2879 
2880 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2881 								queue))) &&
2882 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2883 
2884 		netif_dbg(priv, tx_done, priv->dev,
2885 			  "%s: restart transmit\n", __func__);
2886 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2887 	}
2888 
2889 	if (tx_q->xsk_pool) {
2890 		bool work_done;
2891 
2892 		if (tx_q->xsk_frames_done)
2893 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2894 
2895 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2896 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2897 
2898 		/* For XSK TX, we try to send as many as possible.
2899 		 * If XSK work done (XSK TX desc empty and budget still
2900 		 * available), return "budget - 1" to reenable TX IRQ.
2901 		 * Else, return "budget" to make NAPI continue polling.
2902 		 */
2903 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2904 					       STMMAC_XSK_TX_BUDGET_MAX);
2905 		if (work_done)
2906 			xmits = budget - 1;
2907 		else
2908 			xmits = budget;
2909 	}
2910 
2911 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2912 		stmmac_restart_sw_lpi_timer(priv);
2913 
2914 	/* We still have pending packets, let's call for a new scheduling */
2915 	if (tx_q->dirty_tx != tx_q->cur_tx)
2916 		*pending_packets = true;
2917 
2918 	u64_stats_update_begin(&txq_stats->napi_syncp);
2919 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2920 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2921 	u64_stats_inc(&txq_stats->napi.tx_clean);
2922 	u64_stats_update_end(&txq_stats->napi_syncp);
2923 
2924 	priv->xstats.tx_errors += tx_errors;
2925 
2926 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2927 
2928 	/* Combine decisions from TX clean and XSK TX */
2929 	return max(count, xmits);
2930 }
2931 
2932 /**
2933  * stmmac_tx_err - to manage the tx error
2934  * @priv: driver private structure
2935  * @chan: channel index
2936  * Description: it cleans the descriptors and restarts the transmission
2937  * in case of transmission errors.
2938  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2939 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2940 {
2941 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2942 
2943 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2944 
2945 	stmmac_stop_tx_dma(priv, chan);
2946 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2947 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2948 	stmmac_reset_tx_queue(priv, chan);
2949 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2950 			    tx_q->dma_tx_phy, chan);
2951 	stmmac_start_tx_dma(priv, chan);
2952 
2953 	priv->xstats.tx_errors++;
2954 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2955 }
2956 
2957 /**
2958  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2959  *  @priv: driver private structure
2960  *  @txmode: TX operating mode
2961  *  @rxmode: RX operating mode
2962  *  @chan: channel index
2963  *  Description: it is used for configuring of the DMA operation mode in
2964  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2965  *  mode.
2966  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2967 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2968 					  u32 rxmode, u32 chan)
2969 {
2970 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2971 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2972 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2973 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2974 	int rxfifosz = priv->plat->rx_fifo_size;
2975 	int txfifosz = priv->plat->tx_fifo_size;
2976 
2977 	if (rxfifosz == 0)
2978 		rxfifosz = priv->dma_cap.rx_fifo_size;
2979 	if (txfifosz == 0)
2980 		txfifosz = priv->dma_cap.tx_fifo_size;
2981 
2982 	/* Adjust for real per queue fifo size */
2983 	rxfifosz /= rx_channels_count;
2984 	txfifosz /= tx_channels_count;
2985 
2986 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2987 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2988 }
2989 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2990 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2991 {
2992 	int ret;
2993 
2994 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2995 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2996 	if (ret && (ret != -EINVAL)) {
2997 		stmmac_global_err(priv);
2998 		return true;
2999 	}
3000 
3001 	return false;
3002 }
3003 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)3004 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
3005 {
3006 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3007 						 &priv->xstats, chan, dir);
3008 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
3009 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3010 	struct stmmac_channel *ch = &priv->channel[chan];
3011 	struct napi_struct *rx_napi;
3012 	struct napi_struct *tx_napi;
3013 	unsigned long flags;
3014 
3015 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
3016 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3017 
3018 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3019 		if (napi_schedule_prep(rx_napi)) {
3020 			spin_lock_irqsave(&ch->lock, flags);
3021 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3022 			spin_unlock_irqrestore(&ch->lock, flags);
3023 			__napi_schedule(rx_napi);
3024 		}
3025 	}
3026 
3027 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3028 		if (napi_schedule_prep(tx_napi)) {
3029 			spin_lock_irqsave(&ch->lock, flags);
3030 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3031 			spin_unlock_irqrestore(&ch->lock, flags);
3032 			__napi_schedule(tx_napi);
3033 		}
3034 	}
3035 
3036 	return status;
3037 }
3038 
3039 /**
3040  * stmmac_dma_interrupt - DMA ISR
3041  * @priv: driver private structure
3042  * Description: this is the DMA ISR. It is called by the main ISR.
3043  * It calls the dwmac dma routine and schedule poll method in case of some
3044  * work can be done.
3045  */
stmmac_dma_interrupt(struct stmmac_priv * priv)3046 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3047 {
3048 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3049 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3050 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
3051 				tx_channel_count : rx_channel_count;
3052 	u32 chan;
3053 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3054 
3055 	/* Make sure we never check beyond our status buffer. */
3056 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3057 		channels_to_check = ARRAY_SIZE(status);
3058 
3059 	for (chan = 0; chan < channels_to_check; chan++)
3060 		status[chan] = stmmac_napi_check(priv, chan,
3061 						 DMA_DIR_RXTX);
3062 
3063 	for (chan = 0; chan < tx_channel_count; chan++) {
3064 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3065 			/* Try to bump up the dma threshold on this failure */
3066 			stmmac_bump_dma_threshold(priv, chan);
3067 		} else if (unlikely(status[chan] == tx_hard_error)) {
3068 			stmmac_tx_err(priv, chan);
3069 		}
3070 	}
3071 }
3072 
3073 /**
3074  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3075  * @priv: driver private structure
3076  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3077  */
stmmac_mmc_setup(struct stmmac_priv * priv)3078 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3079 {
3080 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3081 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3082 
3083 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3084 
3085 	if (priv->dma_cap.rmon) {
3086 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3087 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3088 	} else
3089 		netdev_info(priv->dev, "No MAC Management Counters available\n");
3090 }
3091 
3092 /**
3093  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3094  * @priv: driver private structure
3095  * Description:
3096  *  new GMAC chip generations have a new register to indicate the
3097  *  presence of the optional feature/functions.
3098  *  This can be also used to override the value passed through the
3099  *  platform and necessary for old MAC10/100 and GMAC chips.
3100  */
stmmac_get_hw_features(struct stmmac_priv * priv)3101 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3102 {
3103 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3104 }
3105 
3106 /**
3107  * stmmac_check_ether_addr - check if the MAC addr is valid
3108  * @priv: driver private structure
3109  * Description:
3110  * it is to verify if the MAC address is valid, in case of failures it
3111  * generates a random MAC address
3112  */
stmmac_check_ether_addr(struct stmmac_priv * priv)3113 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3114 {
3115 	u8 addr[ETH_ALEN];
3116 
3117 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3118 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3119 		if (is_valid_ether_addr(addr))
3120 			eth_hw_addr_set(priv->dev, addr);
3121 		else
3122 			eth_hw_addr_random(priv->dev);
3123 		dev_info(priv->device, "device MAC address %pM\n",
3124 			 priv->dev->dev_addr);
3125 	}
3126 }
3127 
stmmac_get_phy_intf_sel(phy_interface_t interface)3128 int stmmac_get_phy_intf_sel(phy_interface_t interface)
3129 {
3130 	int phy_intf_sel = -EINVAL;
3131 
3132 	if (interface == PHY_INTERFACE_MODE_MII ||
3133 	    interface == PHY_INTERFACE_MODE_GMII)
3134 		phy_intf_sel = PHY_INTF_SEL_GMII_MII;
3135 	else if (phy_interface_mode_is_rgmii(interface))
3136 		phy_intf_sel = PHY_INTF_SEL_RGMII;
3137 	else if (interface == PHY_INTERFACE_MODE_SGMII)
3138 		phy_intf_sel = PHY_INTF_SEL_SGMII;
3139 	else if (interface == PHY_INTERFACE_MODE_RMII)
3140 		phy_intf_sel = PHY_INTF_SEL_RMII;
3141 	else if (interface == PHY_INTERFACE_MODE_REVMII)
3142 		phy_intf_sel = PHY_INTF_SEL_REVMII;
3143 
3144 	return phy_intf_sel;
3145 }
3146 EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
3147 
stmmac_prereset_configure(struct stmmac_priv * priv)3148 static int stmmac_prereset_configure(struct stmmac_priv *priv)
3149 {
3150 	struct plat_stmmacenet_data *plat_dat = priv->plat;
3151 	phy_interface_t interface;
3152 	int phy_intf_sel, ret;
3153 
3154 	if (!plat_dat->set_phy_intf_sel)
3155 		return 0;
3156 
3157 	interface = plat_dat->phy_interface;
3158 	phy_intf_sel = stmmac_get_phy_intf_sel(interface);
3159 	if (phy_intf_sel < 0) {
3160 		netdev_err(priv->dev,
3161 			   "failed to get phy_intf_sel for %s: %pe\n",
3162 			   phy_modes(interface), ERR_PTR(phy_intf_sel));
3163 		return phy_intf_sel;
3164 	}
3165 
3166 	ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
3167 	if (ret == -EINVAL)
3168 		netdev_err(priv->dev, "platform does not support %s\n",
3169 			   phy_modes(interface));
3170 	else if (ret < 0)
3171 		netdev_err(priv->dev,
3172 			   "platform failed to set interface %s: %pe\n",
3173 			   phy_modes(interface), ERR_PTR(ret));
3174 
3175 	return ret;
3176 }
3177 
3178 /**
3179  * stmmac_init_dma_engine - DMA init.
3180  * @priv: driver private structure
3181  * Description:
3182  * It inits the DMA invoking the specific MAC/GMAC callback.
3183  * Some DMA parameters can be passed from the platform;
3184  * in case of these are not passed a default is kept for the MAC or GMAC.
3185  */
stmmac_init_dma_engine(struct stmmac_priv * priv)3186 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3187 {
3188 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3189 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3190 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3191 	struct stmmac_rx_queue *rx_q;
3192 	struct stmmac_tx_queue *tx_q;
3193 	u32 chan = 0;
3194 	int ret = 0;
3195 
3196 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3197 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3198 		return -EINVAL;
3199 	}
3200 
3201 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3202 		priv->plat->dma_cfg->atds = 1;
3203 
3204 	ret = stmmac_prereset_configure(priv);
3205 	if (ret)
3206 		return ret;
3207 
3208 	ret = stmmac_reset(priv);
3209 	if (ret) {
3210 		netdev_err(priv->dev, "Failed to reset the dma\n");
3211 		return ret;
3212 	}
3213 
3214 	/* DMA Configuration */
3215 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3216 
3217 	if (priv->plat->axi)
3218 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3219 
3220 	/* DMA CSR Channel configuration */
3221 	for (chan = 0; chan < dma_csr_ch; chan++) {
3222 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3223 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3224 	}
3225 
3226 	/* DMA RX Channel Configuration */
3227 	for (chan = 0; chan < rx_channels_count; chan++) {
3228 		rx_q = &priv->dma_conf.rx_queue[chan];
3229 
3230 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3231 				    rx_q->dma_rx_phy, chan);
3232 
3233 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3234 				     (rx_q->buf_alloc_num *
3235 				      sizeof(struct dma_desc));
3236 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3237 				       rx_q->rx_tail_addr, chan);
3238 	}
3239 
3240 	/* DMA TX Channel Configuration */
3241 	for (chan = 0; chan < tx_channels_count; chan++) {
3242 		tx_q = &priv->dma_conf.tx_queue[chan];
3243 
3244 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3245 				    tx_q->dma_tx_phy, chan);
3246 
3247 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3248 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3249 				       tx_q->tx_tail_addr, chan);
3250 	}
3251 
3252 	return ret;
3253 }
3254 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3255 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3256 {
3257 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3258 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3259 	struct stmmac_channel *ch;
3260 	struct napi_struct *napi;
3261 
3262 	if (!tx_coal_timer)
3263 		return;
3264 
3265 	ch = &priv->channel[tx_q->queue_index];
3266 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3267 
3268 	/* Arm timer only if napi is not already scheduled.
3269 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3270 	 * again in the next scheduled napi.
3271 	 */
3272 	if (unlikely(!napi_is_scheduled(napi)))
3273 		hrtimer_start(&tx_q->txtimer,
3274 			      STMMAC_COAL_TIMER(tx_coal_timer),
3275 			      HRTIMER_MODE_REL);
3276 	else
3277 		hrtimer_try_to_cancel(&tx_q->txtimer);
3278 }
3279 
3280 /**
3281  * stmmac_tx_timer - mitigation sw timer for tx.
3282  * @t: data pointer
3283  * Description:
3284  * This is the timer handler to directly invoke the stmmac_tx_clean.
3285  */
stmmac_tx_timer(struct hrtimer * t)3286 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3287 {
3288 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3289 	struct stmmac_priv *priv = tx_q->priv_data;
3290 	struct stmmac_channel *ch;
3291 	struct napi_struct *napi;
3292 
3293 	ch = &priv->channel[tx_q->queue_index];
3294 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3295 
3296 	if (likely(napi_schedule_prep(napi))) {
3297 		unsigned long flags;
3298 
3299 		spin_lock_irqsave(&ch->lock, flags);
3300 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3301 		spin_unlock_irqrestore(&ch->lock, flags);
3302 		__napi_schedule(napi);
3303 	}
3304 
3305 	return HRTIMER_NORESTART;
3306 }
3307 
3308 /**
3309  * stmmac_init_coalesce - init mitigation options.
3310  * @priv: driver private structure
3311  * Description:
3312  * This inits the coalesce parameters: i.e. timer rate,
3313  * timer handler and default threshold used for enabling the
3314  * interrupt on completion bit.
3315  */
stmmac_init_coalesce(struct stmmac_priv * priv)3316 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3317 {
3318 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3319 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3320 	u32 chan;
3321 
3322 	for (chan = 0; chan < tx_channel_count; chan++) {
3323 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3324 
3325 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3326 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3327 
3328 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3329 	}
3330 
3331 	for (chan = 0; chan < rx_channel_count; chan++)
3332 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3333 }
3334 
stmmac_set_rings_length(struct stmmac_priv * priv)3335 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3336 {
3337 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3338 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3339 	u32 chan;
3340 
3341 	/* set TX ring length */
3342 	for (chan = 0; chan < tx_channels_count; chan++)
3343 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3344 				       (priv->dma_conf.dma_tx_size - 1), chan);
3345 
3346 	/* set RX ring length */
3347 	for (chan = 0; chan < rx_channels_count; chan++)
3348 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3349 				       (priv->dma_conf.dma_rx_size - 1), chan);
3350 }
3351 
3352 /**
3353  *  stmmac_set_tx_queue_weight - Set TX queue weight
3354  *  @priv: driver private structure
3355  *  Description: It is used for setting TX queues weight
3356  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3357 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3358 {
3359 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3360 	u32 weight;
3361 	u32 queue;
3362 
3363 	for (queue = 0; queue < tx_queues_count; queue++) {
3364 		weight = priv->plat->tx_queues_cfg[queue].weight;
3365 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3366 	}
3367 }
3368 
3369 /**
3370  *  stmmac_configure_cbs - Configure CBS in TX queue
3371  *  @priv: driver private structure
3372  *  Description: It is used for configuring CBS in AVB TX queues
3373  */
stmmac_configure_cbs(struct stmmac_priv * priv)3374 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3375 {
3376 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3377 	u32 mode_to_use;
3378 	u32 queue;
3379 
3380 	/* queue 0 is reserved for legacy traffic */
3381 	for (queue = 1; queue < tx_queues_count; queue++) {
3382 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3383 		if (mode_to_use == MTL_QUEUE_DCB)
3384 			continue;
3385 
3386 		stmmac_config_cbs(priv, priv->hw,
3387 				priv->plat->tx_queues_cfg[queue].send_slope,
3388 				priv->plat->tx_queues_cfg[queue].idle_slope,
3389 				priv->plat->tx_queues_cfg[queue].high_credit,
3390 				priv->plat->tx_queues_cfg[queue].low_credit,
3391 				queue);
3392 	}
3393 }
3394 
3395 /**
3396  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3397  *  @priv: driver private structure
3398  *  Description: It is used for mapping RX queues to RX dma channels
3399  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3400 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3401 {
3402 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3403 	u32 queue;
3404 	u32 chan;
3405 
3406 	for (queue = 0; queue < rx_queues_count; queue++) {
3407 		chan = priv->plat->rx_queues_cfg[queue].chan;
3408 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3409 	}
3410 }
3411 
3412 /**
3413  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3414  *  @priv: driver private structure
3415  *  Description: It is used for configuring the RX Queue Priority
3416  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3417 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3418 {
3419 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3420 	u32 queue;
3421 	u32 prio;
3422 
3423 	for (queue = 0; queue < rx_queues_count; queue++) {
3424 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3425 			continue;
3426 
3427 		prio = priv->plat->rx_queues_cfg[queue].prio;
3428 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3429 	}
3430 }
3431 
3432 /**
3433  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3434  *  @priv: driver private structure
3435  *  Description: It is used for configuring the TX Queue Priority
3436  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3437 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3438 {
3439 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3440 	u32 queue;
3441 	u32 prio;
3442 
3443 	for (queue = 0; queue < tx_queues_count; queue++) {
3444 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3445 			continue;
3446 
3447 		prio = priv->plat->tx_queues_cfg[queue].prio;
3448 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3449 	}
3450 }
3451 
3452 /**
3453  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3454  *  @priv: driver private structure
3455  *  Description: It is used for configuring the RX queue routing
3456  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3457 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3458 {
3459 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3460 	u32 queue;
3461 	u8 packet;
3462 
3463 	for (queue = 0; queue < rx_queues_count; queue++) {
3464 		/* no specific packet type routing specified for the queue */
3465 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3466 			continue;
3467 
3468 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3469 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3470 	}
3471 }
3472 
stmmac_mac_config_rss(struct stmmac_priv * priv)3473 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3474 {
3475 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3476 		priv->rss.enable = false;
3477 		return;
3478 	}
3479 
3480 	if (priv->dev->features & NETIF_F_RXHASH)
3481 		priv->rss.enable = true;
3482 	else
3483 		priv->rss.enable = false;
3484 
3485 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3486 			     priv->plat->rx_queues_to_use);
3487 }
3488 
3489 /**
3490  *  stmmac_mtl_configuration - Configure MTL
3491  *  @priv: driver private structure
3492  *  Description: It is used for configurring MTL
3493  */
stmmac_mtl_configuration(struct stmmac_priv * priv)3494 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3495 {
3496 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3497 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3498 
3499 	if (tx_queues_count > 1)
3500 		stmmac_set_tx_queue_weight(priv);
3501 
3502 	/* Configure MTL RX algorithms */
3503 	if (rx_queues_count > 1)
3504 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3505 				priv->plat->rx_sched_algorithm);
3506 
3507 	/* Configure MTL TX algorithms */
3508 	if (tx_queues_count > 1)
3509 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3510 				priv->plat->tx_sched_algorithm);
3511 
3512 	/* Configure CBS in AVB TX queues */
3513 	if (tx_queues_count > 1)
3514 		stmmac_configure_cbs(priv);
3515 
3516 	/* Map RX MTL to DMA channels */
3517 	stmmac_rx_queue_dma_chan_map(priv);
3518 
3519 	/* Enable MAC RX Queues */
3520 	stmmac_mac_enable_rx_queues(priv);
3521 
3522 	/* Set RX priorities */
3523 	if (rx_queues_count > 1)
3524 		stmmac_mac_config_rx_queues_prio(priv);
3525 
3526 	/* Set TX priorities */
3527 	if (tx_queues_count > 1)
3528 		stmmac_mac_config_tx_queues_prio(priv);
3529 
3530 	/* Set RX routing */
3531 	if (rx_queues_count > 1)
3532 		stmmac_mac_config_rx_queues_routing(priv);
3533 
3534 	/* Receive Side Scaling */
3535 	if (rx_queues_count > 1)
3536 		stmmac_mac_config_rss(priv);
3537 }
3538 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3539 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3540 {
3541 	if (priv->dma_cap.asp) {
3542 		netdev_info(priv->dev, "Enabling Safety Features\n");
3543 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3544 					  priv->plat->safety_feat_cfg);
3545 	} else {
3546 		netdev_info(priv->dev, "No Safety Features support found\n");
3547 	}
3548 }
3549 
3550 /**
3551  * stmmac_hw_setup - setup mac in a usable state.
3552  *  @dev : pointer to the device structure.
3553  *  Description:
3554  *  this is the main function to setup the HW in a usable state because the
3555  *  dma engine is reset, the core registers are configured (e.g. AXI,
3556  *  Checksum features, timers). The DMA is ready to start receiving and
3557  *  transmitting.
3558  *  Return value:
3559  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3560  *  file on failure.
3561  */
stmmac_hw_setup(struct net_device * dev)3562 static int stmmac_hw_setup(struct net_device *dev)
3563 {
3564 	struct stmmac_priv *priv = netdev_priv(dev);
3565 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3566 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3567 	bool sph_en;
3568 	u32 chan;
3569 	int ret;
3570 
3571 	/* Make sure RX clock is enabled */
3572 	if (priv->hw->phylink_pcs)
3573 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3574 
3575 	/* Note that clk_rx_i must be running for reset to complete. This
3576 	 * clock may also be required when setting the MAC address.
3577 	 *
3578 	 * Block the receive clock stop for LPI mode at the PHY in case
3579 	 * the link is established with EEE mode active.
3580 	 */
3581 	phylink_rx_clk_stop_block(priv->phylink);
3582 
3583 	/* DMA initialization and SW reset */
3584 	ret = stmmac_init_dma_engine(priv);
3585 	if (ret < 0) {
3586 		phylink_rx_clk_stop_unblock(priv->phylink);
3587 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3588 			   __func__);
3589 		return ret;
3590 	}
3591 
3592 	/* Copy the MAC addr into the HW  */
3593 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3594 	phylink_rx_clk_stop_unblock(priv->phylink);
3595 
3596 	/* Initialize the MAC Core */
3597 	stmmac_core_init(priv, priv->hw, dev);
3598 
3599 	/* Initialize MTL*/
3600 	stmmac_mtl_configuration(priv);
3601 
3602 	/* Initialize Safety Features */
3603 	stmmac_safety_feat_configuration(priv);
3604 
3605 	ret = stmmac_rx_ipc(priv, priv->hw);
3606 	if (!ret) {
3607 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3608 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3609 		priv->hw->rx_csum = 0;
3610 	}
3611 
3612 	/* Enable the MAC Rx/Tx */
3613 	stmmac_mac_set(priv, priv->ioaddr, true);
3614 
3615 	/* Set the HW DMA mode and the COE */
3616 	stmmac_dma_operation_mode(priv);
3617 
3618 	stmmac_mmc_setup(priv);
3619 
3620 	if (priv->use_riwt) {
3621 		u32 queue;
3622 
3623 		for (queue = 0; queue < rx_cnt; queue++) {
3624 			if (!priv->rx_riwt[queue])
3625 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3626 
3627 			stmmac_rx_watchdog(priv, priv->ioaddr,
3628 					   priv->rx_riwt[queue], queue);
3629 		}
3630 	}
3631 
3632 	/* set TX and RX rings length */
3633 	stmmac_set_rings_length(priv);
3634 
3635 	/* Enable TSO */
3636 	if (priv->tso) {
3637 		for (chan = 0; chan < tx_cnt; chan++) {
3638 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3639 
3640 			/* TSO and TBS cannot co-exist */
3641 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3642 				continue;
3643 
3644 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3645 		}
3646 	}
3647 
3648 	/* Enable Split Header */
3649 	sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
3650 	for (chan = 0; chan < rx_cnt; chan++)
3651 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3652 
3653 
3654 	/* VLAN Tag Insertion */
3655 	if (priv->dma_cap.vlins)
3656 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3657 
3658 	/* TBS */
3659 	for (chan = 0; chan < tx_cnt; chan++) {
3660 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3661 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3662 
3663 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3664 	}
3665 
3666 	/* Configure real RX and TX queues */
3667 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3668 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3669 
3670 	/* Start the ball rolling... */
3671 	stmmac_start_all_dma(priv);
3672 
3673 	phylink_rx_clk_stop_block(priv->phylink);
3674 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3675 	phylink_rx_clk_stop_unblock(priv->phylink);
3676 
3677 	return 0;
3678 }
3679 
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3680 static void stmmac_free_irq(struct net_device *dev,
3681 			    enum request_irq_err irq_err, int irq_idx)
3682 {
3683 	struct stmmac_priv *priv = netdev_priv(dev);
3684 	int j;
3685 
3686 	switch (irq_err) {
3687 	case REQ_IRQ_ERR_ALL:
3688 		irq_idx = priv->plat->tx_queues_to_use;
3689 		fallthrough;
3690 	case REQ_IRQ_ERR_TX:
3691 		for (j = irq_idx - 1; j >= 0; j--) {
3692 			if (priv->tx_irq[j] > 0) {
3693 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3694 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3695 			}
3696 		}
3697 		irq_idx = priv->plat->rx_queues_to_use;
3698 		fallthrough;
3699 	case REQ_IRQ_ERR_RX:
3700 		for (j = irq_idx - 1; j >= 0; j--) {
3701 			if (priv->rx_irq[j] > 0) {
3702 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3703 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3704 			}
3705 		}
3706 
3707 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3708 			free_irq(priv->sfty_ue_irq, dev);
3709 		fallthrough;
3710 	case REQ_IRQ_ERR_SFTY_UE:
3711 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3712 			free_irq(priv->sfty_ce_irq, dev);
3713 		fallthrough;
3714 	case REQ_IRQ_ERR_SFTY_CE:
3715 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3716 			free_irq(priv->lpi_irq, dev);
3717 		fallthrough;
3718 	case REQ_IRQ_ERR_LPI:
3719 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3720 			free_irq(priv->wol_irq, dev);
3721 		fallthrough;
3722 	case REQ_IRQ_ERR_SFTY:
3723 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3724 			free_irq(priv->sfty_irq, dev);
3725 		fallthrough;
3726 	case REQ_IRQ_ERR_WOL:
3727 		free_irq(dev->irq, dev);
3728 		fallthrough;
3729 	case REQ_IRQ_ERR_MAC:
3730 	case REQ_IRQ_ERR_NO:
3731 		/* If MAC IRQ request error, no more IRQ to free */
3732 		break;
3733 	}
3734 }
3735 
stmmac_request_irq_multi_msi(struct net_device * dev)3736 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3737 {
3738 	struct stmmac_priv *priv = netdev_priv(dev);
3739 	enum request_irq_err irq_err;
3740 	int irq_idx = 0;
3741 	char *int_name;
3742 	int ret;
3743 	int i;
3744 
3745 	/* For common interrupt */
3746 	int_name = priv->int_name_mac;
3747 	sprintf(int_name, "%s:%s", dev->name, "mac");
3748 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3749 			  0, int_name, dev);
3750 	if (unlikely(ret < 0)) {
3751 		netdev_err(priv->dev,
3752 			   "%s: alloc mac MSI %d (error: %d)\n",
3753 			   __func__, dev->irq, ret);
3754 		irq_err = REQ_IRQ_ERR_MAC;
3755 		goto irq_error;
3756 	}
3757 
3758 	/* Request the Wake IRQ in case of another line
3759 	 * is used for WoL
3760 	 */
3761 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3762 		int_name = priv->int_name_wol;
3763 		sprintf(int_name, "%s:%s", dev->name, "wol");
3764 		ret = request_irq(priv->wol_irq,
3765 				  stmmac_mac_interrupt,
3766 				  0, int_name, dev);
3767 		if (unlikely(ret < 0)) {
3768 			netdev_err(priv->dev,
3769 				   "%s: alloc wol MSI %d (error: %d)\n",
3770 				   __func__, priv->wol_irq, ret);
3771 			irq_err = REQ_IRQ_ERR_WOL;
3772 			goto irq_error;
3773 		}
3774 	}
3775 
3776 	/* Request the LPI IRQ in case of another line
3777 	 * is used for LPI
3778 	 */
3779 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3780 		int_name = priv->int_name_lpi;
3781 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3782 		ret = request_irq(priv->lpi_irq,
3783 				  stmmac_mac_interrupt,
3784 				  0, int_name, dev);
3785 		if (unlikely(ret < 0)) {
3786 			netdev_err(priv->dev,
3787 				   "%s: alloc lpi MSI %d (error: %d)\n",
3788 				   __func__, priv->lpi_irq, ret);
3789 			irq_err = REQ_IRQ_ERR_LPI;
3790 			goto irq_error;
3791 		}
3792 	}
3793 
3794 	/* Request the common Safety Feature Correctible/Uncorrectible
3795 	 * Error line in case of another line is used
3796 	 */
3797 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3798 		int_name = priv->int_name_sfty;
3799 		sprintf(int_name, "%s:%s", dev->name, "safety");
3800 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3801 				  0, int_name, dev);
3802 		if (unlikely(ret < 0)) {
3803 			netdev_err(priv->dev,
3804 				   "%s: alloc sfty MSI %d (error: %d)\n",
3805 				   __func__, priv->sfty_irq, ret);
3806 			irq_err = REQ_IRQ_ERR_SFTY;
3807 			goto irq_error;
3808 		}
3809 	}
3810 
3811 	/* Request the Safety Feature Correctible Error line in
3812 	 * case of another line is used
3813 	 */
3814 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3815 		int_name = priv->int_name_sfty_ce;
3816 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3817 		ret = request_irq(priv->sfty_ce_irq,
3818 				  stmmac_safety_interrupt,
3819 				  0, int_name, dev);
3820 		if (unlikely(ret < 0)) {
3821 			netdev_err(priv->dev,
3822 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3823 				   __func__, priv->sfty_ce_irq, ret);
3824 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3825 			goto irq_error;
3826 		}
3827 	}
3828 
3829 	/* Request the Safety Feature Uncorrectible Error line in
3830 	 * case of another line is used
3831 	 */
3832 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3833 		int_name = priv->int_name_sfty_ue;
3834 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3835 		ret = request_irq(priv->sfty_ue_irq,
3836 				  stmmac_safety_interrupt,
3837 				  0, int_name, dev);
3838 		if (unlikely(ret < 0)) {
3839 			netdev_err(priv->dev,
3840 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3841 				   __func__, priv->sfty_ue_irq, ret);
3842 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3843 			goto irq_error;
3844 		}
3845 	}
3846 
3847 	/* Request Rx MSI irq */
3848 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3849 		if (i >= MTL_MAX_RX_QUEUES)
3850 			break;
3851 		if (priv->rx_irq[i] == 0)
3852 			continue;
3853 
3854 		int_name = priv->int_name_rx_irq[i];
3855 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3856 		ret = request_irq(priv->rx_irq[i],
3857 				  stmmac_msi_intr_rx,
3858 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3859 		if (unlikely(ret < 0)) {
3860 			netdev_err(priv->dev,
3861 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3862 				   __func__, i, priv->rx_irq[i], ret);
3863 			irq_err = REQ_IRQ_ERR_RX;
3864 			irq_idx = i;
3865 			goto irq_error;
3866 		}
3867 		irq_set_affinity_hint(priv->rx_irq[i],
3868 				      cpumask_of(i % num_online_cpus()));
3869 	}
3870 
3871 	/* Request Tx MSI irq */
3872 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3873 		if (i >= MTL_MAX_TX_QUEUES)
3874 			break;
3875 		if (priv->tx_irq[i] == 0)
3876 			continue;
3877 
3878 		int_name = priv->int_name_tx_irq[i];
3879 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3880 		ret = request_irq(priv->tx_irq[i],
3881 				  stmmac_msi_intr_tx,
3882 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3883 		if (unlikely(ret < 0)) {
3884 			netdev_err(priv->dev,
3885 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3886 				   __func__, i, priv->tx_irq[i], ret);
3887 			irq_err = REQ_IRQ_ERR_TX;
3888 			irq_idx = i;
3889 			goto irq_error;
3890 		}
3891 		irq_set_affinity_hint(priv->tx_irq[i],
3892 				      cpumask_of(i % num_online_cpus()));
3893 	}
3894 
3895 	return 0;
3896 
3897 irq_error:
3898 	stmmac_free_irq(dev, irq_err, irq_idx);
3899 	return ret;
3900 }
3901 
stmmac_request_irq_single(struct net_device * dev)3902 static int stmmac_request_irq_single(struct net_device *dev)
3903 {
3904 	struct stmmac_priv *priv = netdev_priv(dev);
3905 	enum request_irq_err irq_err;
3906 	int ret;
3907 
3908 	ret = request_irq(dev->irq, stmmac_interrupt,
3909 			  IRQF_SHARED, dev->name, dev);
3910 	if (unlikely(ret < 0)) {
3911 		netdev_err(priv->dev,
3912 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3913 			   __func__, dev->irq, ret);
3914 		irq_err = REQ_IRQ_ERR_MAC;
3915 		goto irq_error;
3916 	}
3917 
3918 	/* Request the Wake IRQ in case of another line
3919 	 * is used for WoL
3920 	 */
3921 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3922 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3923 				  IRQF_SHARED, dev->name, dev);
3924 		if (unlikely(ret < 0)) {
3925 			netdev_err(priv->dev,
3926 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3927 				   __func__, priv->wol_irq, ret);
3928 			irq_err = REQ_IRQ_ERR_WOL;
3929 			goto irq_error;
3930 		}
3931 	}
3932 
3933 	/* Request the IRQ lines */
3934 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3935 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3936 				  IRQF_SHARED, dev->name, dev);
3937 		if (unlikely(ret < 0)) {
3938 			netdev_err(priv->dev,
3939 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3940 				   __func__, priv->lpi_irq, ret);
3941 			irq_err = REQ_IRQ_ERR_LPI;
3942 			goto irq_error;
3943 		}
3944 	}
3945 
3946 	/* Request the common Safety Feature Correctible/Uncorrectible
3947 	 * Error line in case of another line is used
3948 	 */
3949 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3950 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3951 				  IRQF_SHARED, dev->name, dev);
3952 		if (unlikely(ret < 0)) {
3953 			netdev_err(priv->dev,
3954 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3955 				   __func__, priv->sfty_irq, ret);
3956 			irq_err = REQ_IRQ_ERR_SFTY;
3957 			goto irq_error;
3958 		}
3959 	}
3960 
3961 	return 0;
3962 
3963 irq_error:
3964 	stmmac_free_irq(dev, irq_err, 0);
3965 	return ret;
3966 }
3967 
stmmac_request_irq(struct net_device * dev)3968 static int stmmac_request_irq(struct net_device *dev)
3969 {
3970 	struct stmmac_priv *priv = netdev_priv(dev);
3971 	int ret;
3972 
3973 	/* Request the IRQ lines */
3974 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3975 		ret = stmmac_request_irq_multi_msi(dev);
3976 	else
3977 		ret = stmmac_request_irq_single(dev);
3978 
3979 	return ret;
3980 }
3981 
3982 /**
3983  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3984  *  @priv: driver private structure
3985  *  @mtu: MTU to setup the dma queue and buf with
3986  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3987  *  Allocate the Tx/Rx DMA queue and init them.
3988  *  Return value:
3989  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3990  */
3991 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3992 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3993 {
3994 	struct stmmac_dma_conf *dma_conf;
3995 	int chan, bfsize, ret;
3996 
3997 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3998 	if (!dma_conf) {
3999 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
4000 			   __func__);
4001 		return ERR_PTR(-ENOMEM);
4002 	}
4003 
4004 	/* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
4005 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
4006 	if (bfsize < 0)
4007 		bfsize = 0;
4008 
4009 	if (bfsize < BUF_SIZE_16KiB)
4010 		bfsize = stmmac_set_bfsize(mtu);
4011 
4012 	dma_conf->dma_buf_sz = bfsize;
4013 	/* Chose the tx/rx size from the already defined one in the
4014 	 * priv struct. (if defined)
4015 	 */
4016 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
4017 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
4018 
4019 	if (!dma_conf->dma_tx_size)
4020 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
4021 	if (!dma_conf->dma_rx_size)
4022 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
4023 
4024 	/* Earlier check for TBS */
4025 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
4026 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
4027 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
4028 
4029 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
4030 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
4031 	}
4032 
4033 	ret = alloc_dma_desc_resources(priv, dma_conf);
4034 	if (ret < 0) {
4035 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4036 			   __func__);
4037 		goto alloc_error;
4038 	}
4039 
4040 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4041 	if (ret < 0) {
4042 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4043 			   __func__);
4044 		goto init_error;
4045 	}
4046 
4047 	return dma_conf;
4048 
4049 init_error:
4050 	free_dma_desc_resources(priv, dma_conf);
4051 alloc_error:
4052 	kfree(dma_conf);
4053 	return ERR_PTR(ret);
4054 }
4055 
4056 /**
4057  *  __stmmac_open - open entry point of the driver
4058  *  @dev : pointer to the device structure.
4059  *  @dma_conf :  structure to take the dma data
4060  *  Description:
4061  *  This function is the open entry point of the driver.
4062  *  Return value:
4063  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4064  *  file on failure.
4065  */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)4066 static int __stmmac_open(struct net_device *dev,
4067 			 struct stmmac_dma_conf *dma_conf)
4068 {
4069 	struct stmmac_priv *priv = netdev_priv(dev);
4070 	u32 chan;
4071 	int ret;
4072 
4073 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4074 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4075 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4076 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4077 
4078 	stmmac_reset_queues_param(priv);
4079 
4080 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4081 	    priv->plat->serdes_powerup) {
4082 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4083 		if (ret < 0) {
4084 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4085 				   __func__);
4086 			goto init_error;
4087 		}
4088 	}
4089 
4090 	ret = stmmac_hw_setup(dev);
4091 	if (ret < 0) {
4092 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4093 		goto init_error;
4094 	}
4095 
4096 	stmmac_setup_ptp(priv);
4097 
4098 	stmmac_init_coalesce(priv);
4099 
4100 	phylink_start(priv->phylink);
4101 
4102 	ret = stmmac_request_irq(dev);
4103 	if (ret)
4104 		goto irq_error;
4105 
4106 	stmmac_enable_all_queues(priv);
4107 	netif_tx_start_all_queues(priv->dev);
4108 	stmmac_enable_all_dma_irq(priv);
4109 
4110 	return 0;
4111 
4112 irq_error:
4113 	phylink_stop(priv->phylink);
4114 
4115 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4116 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4117 
4118 	stmmac_release_ptp(priv);
4119 init_error:
4120 	return ret;
4121 }
4122 
stmmac_open(struct net_device * dev)4123 static int stmmac_open(struct net_device *dev)
4124 {
4125 	struct stmmac_priv *priv = netdev_priv(dev);
4126 	struct stmmac_dma_conf *dma_conf;
4127 	int ret;
4128 
4129 	/* Initialise the tx lpi timer, converting from msec to usec */
4130 	if (!priv->tx_lpi_timer)
4131 		priv->tx_lpi_timer = eee_timer * 1000;
4132 
4133 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4134 	if (IS_ERR(dma_conf))
4135 		return PTR_ERR(dma_conf);
4136 
4137 	ret = pm_runtime_resume_and_get(priv->device);
4138 	if (ret < 0)
4139 		goto err_dma_resources;
4140 
4141 	ret = stmmac_init_phy(dev);
4142 	if (ret)
4143 		goto err_runtime_pm;
4144 
4145 	ret = __stmmac_open(dev, dma_conf);
4146 	if (ret)
4147 		goto err_disconnect_phy;
4148 
4149 	kfree(dma_conf);
4150 
4151 	/* We may have called phylink_speed_down before */
4152 	phylink_speed_up(priv->phylink);
4153 
4154 	return ret;
4155 
4156 err_disconnect_phy:
4157 	phylink_disconnect_phy(priv->phylink);
4158 err_runtime_pm:
4159 	pm_runtime_put(priv->device);
4160 err_dma_resources:
4161 	free_dma_desc_resources(priv, dma_conf);
4162 	kfree(dma_conf);
4163 	return ret;
4164 }
4165 
__stmmac_release(struct net_device * dev)4166 static void __stmmac_release(struct net_device *dev)
4167 {
4168 	struct stmmac_priv *priv = netdev_priv(dev);
4169 	u32 chan;
4170 
4171 	/* Stop and disconnect the PHY */
4172 	phylink_stop(priv->phylink);
4173 
4174 	stmmac_disable_all_queues(priv);
4175 
4176 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4177 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4178 
4179 	netif_tx_disable(dev);
4180 
4181 	/* Free the IRQ lines */
4182 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4183 
4184 	/* Stop TX/RX DMA and clear the descriptors */
4185 	stmmac_stop_all_dma(priv);
4186 
4187 	/* Release and free the Rx/Tx resources */
4188 	free_dma_desc_resources(priv, &priv->dma_conf);
4189 
4190 	/* Powerdown Serdes if there is */
4191 	if (priv->plat->serdes_powerdown)
4192 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4193 
4194 	stmmac_release_ptp(priv);
4195 
4196 	if (stmmac_fpe_supported(priv))
4197 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4198 }
4199 
4200 /**
4201  *  stmmac_release - close entry point of the driver
4202  *  @dev : device pointer.
4203  *  Description:
4204  *  This is the stop entry point of the driver.
4205  */
stmmac_release(struct net_device * dev)4206 static int stmmac_release(struct net_device *dev)
4207 {
4208 	struct stmmac_priv *priv = netdev_priv(dev);
4209 
4210 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4211 	 * suspended when phylink_stop() is called below. Set the PHY
4212 	 * to its slowest speed to save power.
4213 	 */
4214 	if (device_may_wakeup(priv->device))
4215 		phylink_speed_down(priv->phylink, false);
4216 
4217 	__stmmac_release(dev);
4218 
4219 	phylink_disconnect_phy(priv->phylink);
4220 	pm_runtime_put(priv->device);
4221 
4222 	return 0;
4223 }
4224 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4225 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4226 			       struct stmmac_tx_queue *tx_q)
4227 {
4228 	struct dma_desc *p;
4229 	u16 tag = 0x0;
4230 
4231 	if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
4232 		return false;
4233 
4234 	tag = skb_vlan_tag_get(skb);
4235 
4236 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4237 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4238 	else
4239 		p = &tx_q->dma_tx[tx_q->cur_tx];
4240 
4241 	if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
4242 		return false;
4243 
4244 	stmmac_set_tx_owner(priv, p);
4245 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4246 	return true;
4247 }
4248 
4249 /**
4250  *  stmmac_tso_allocator - close entry point of the driver
4251  *  @priv: driver private structure
4252  *  @des: buffer start address
4253  *  @total_len: total length to fill in descriptors
4254  *  @last_segment: condition for the last descriptor
4255  *  @queue: TX queue index
4256  *  Description:
4257  *  This function fills descriptor and request new descriptors according to
4258  *  buffer length to fill
4259  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4260 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4261 				 int total_len, bool last_segment, u32 queue)
4262 {
4263 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4264 	struct dma_desc *desc;
4265 	u32 buff_size;
4266 	int tmp_len;
4267 
4268 	tmp_len = total_len;
4269 
4270 	while (tmp_len > 0) {
4271 		dma_addr_t curr_addr;
4272 
4273 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4274 						priv->dma_conf.dma_tx_size);
4275 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4276 
4277 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4278 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4279 		else
4280 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4281 
4282 		curr_addr = des + (total_len - tmp_len);
4283 		stmmac_set_desc_addr(priv, desc, curr_addr);
4284 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4285 			    TSO_MAX_BUFF_SIZE : tmp_len;
4286 
4287 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4288 				0, 1,
4289 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4290 				0, 0);
4291 
4292 		tmp_len -= TSO_MAX_BUFF_SIZE;
4293 	}
4294 }
4295 
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4296 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4297 {
4298 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4299 	int desc_size;
4300 
4301 	if (likely(priv->extend_desc))
4302 		desc_size = sizeof(struct dma_extended_desc);
4303 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4304 		desc_size = sizeof(struct dma_edesc);
4305 	else
4306 		desc_size = sizeof(struct dma_desc);
4307 
4308 	/* The own bit must be the latest setting done when prepare the
4309 	 * descriptor and then barrier is needed to make sure that
4310 	 * all is coherent before granting the DMA engine.
4311 	 */
4312 	wmb();
4313 
4314 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4315 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4316 }
4317 
4318 /**
4319  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4320  *  @skb : the socket buffer
4321  *  @dev : device pointer
4322  *  Description: this is the transmit function that is called on TSO frames
4323  *  (support available on GMAC4 and newer chips).
4324  *  Diagram below show the ring programming in case of TSO frames:
4325  *
4326  *  First Descriptor
4327  *   --------
4328  *   | DES0 |---> buffer1 = L2/L3/L4 header
4329  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4330  *   |      |     width is 32-bit, but we never use it.
4331  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4332  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4333  *   |      |     or 48-bit, and we always use it.
4334  *   | DES2 |---> buffer1 len
4335  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4336  *   --------
4337  *   --------
4338  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4339  *   | DES1 |---> same as the First Descriptor
4340  *   | DES2 |---> buffer1 len
4341  *   | DES3 |
4342  *   --------
4343  *	|
4344  *     ...
4345  *	|
4346  *   --------
4347  *   | DES0 |---> buffer1 = Split TCP Payload
4348  *   | DES1 |---> same as the First Descriptor
4349  *   | DES2 |---> buffer1 len
4350  *   | DES3 |
4351  *   --------
4352  *
4353  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4354  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4355 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4356 {
4357 	struct dma_desc *desc, *first, *mss_desc = NULL;
4358 	struct stmmac_priv *priv = netdev_priv(dev);
4359 	unsigned int first_entry, tx_packets;
4360 	struct stmmac_txq_stats *txq_stats;
4361 	struct stmmac_tx_queue *tx_q;
4362 	bool set_ic, is_last_segment;
4363 	u32 pay_len, mss, queue;
4364 	int i, first_tx, nfrags;
4365 	u8 proto_hdr_len, hdr;
4366 	dma_addr_t des;
4367 
4368 	/* Always insert VLAN tag to SKB payload for TSO frames.
4369 	 *
4370 	 * Never insert VLAN tag by HW, since segments splited by
4371 	 * TSO engine will be un-tagged by mistake.
4372 	 */
4373 	if (skb_vlan_tag_present(skb)) {
4374 		skb = __vlan_hwaccel_push_inside(skb);
4375 		if (unlikely(!skb)) {
4376 			priv->xstats.tx_dropped++;
4377 			return NETDEV_TX_OK;
4378 		}
4379 	}
4380 
4381 	nfrags = skb_shinfo(skb)->nr_frags;
4382 	queue = skb_get_queue_mapping(skb);
4383 
4384 	tx_q = &priv->dma_conf.tx_queue[queue];
4385 	txq_stats = &priv->xstats.txq_stats[queue];
4386 	first_tx = tx_q->cur_tx;
4387 
4388 	/* Compute header lengths */
4389 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4390 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4391 		hdr = sizeof(struct udphdr);
4392 	} else {
4393 		proto_hdr_len = skb_tcp_all_headers(skb);
4394 		hdr = tcp_hdrlen(skb);
4395 	}
4396 
4397 	/* Desc availability based on threshold should be enough safe */
4398 	if (unlikely(stmmac_tx_avail(priv, queue) <
4399 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4400 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4401 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4402 								queue));
4403 			/* This is a hard error, log it. */
4404 			netdev_err(priv->dev,
4405 				   "%s: Tx Ring full when queue awake\n",
4406 				   __func__);
4407 		}
4408 		return NETDEV_TX_BUSY;
4409 	}
4410 
4411 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4412 
4413 	mss = skb_shinfo(skb)->gso_size;
4414 
4415 	/* set new MSS value if needed */
4416 	if (mss != tx_q->mss) {
4417 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4418 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4419 		else
4420 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4421 
4422 		stmmac_set_mss(priv, mss_desc, mss);
4423 		tx_q->mss = mss;
4424 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4425 						priv->dma_conf.dma_tx_size);
4426 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4427 	}
4428 
4429 	if (netif_msg_tx_queued(priv)) {
4430 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4431 			__func__, hdr, proto_hdr_len, pay_len, mss);
4432 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4433 			skb->data_len);
4434 	}
4435 
4436 	first_entry = tx_q->cur_tx;
4437 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4438 
4439 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4440 		desc = &tx_q->dma_entx[first_entry].basic;
4441 	else
4442 		desc = &tx_q->dma_tx[first_entry];
4443 	first = desc;
4444 
4445 	/* first descriptor: fill Headers on Buf1 */
4446 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4447 			     DMA_TO_DEVICE);
4448 	if (dma_mapping_error(priv->device, des))
4449 		goto dma_map_err;
4450 
4451 	stmmac_set_desc_addr(priv, first, des);
4452 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4453 			     (nfrags == 0), queue);
4454 
4455 	/* In case two or more DMA transmit descriptors are allocated for this
4456 	 * non-paged SKB data, the DMA buffer address should be saved to
4457 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4458 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4459 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4460 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4461 	 * sooner or later.
4462 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4463 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4464 	 * this DMA buffer right after the DMA engine completely finishes the
4465 	 * full buffer transmission.
4466 	 */
4467 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4468 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4469 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4470 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4471 
4472 	/* Prepare fragments */
4473 	for (i = 0; i < nfrags; i++) {
4474 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4475 
4476 		des = skb_frag_dma_map(priv->device, frag, 0,
4477 				       skb_frag_size(frag),
4478 				       DMA_TO_DEVICE);
4479 		if (dma_mapping_error(priv->device, des))
4480 			goto dma_map_err;
4481 
4482 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4483 				     (i == nfrags - 1), queue);
4484 
4485 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4486 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4487 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4488 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4489 	}
4490 
4491 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4492 
4493 	/* Only the last descriptor gets to point to the skb. */
4494 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4495 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4496 
4497 	/* Manage tx mitigation */
4498 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4499 	tx_q->tx_count_frames += tx_packets;
4500 
4501 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4502 		set_ic = true;
4503 	else if (!priv->tx_coal_frames[queue])
4504 		set_ic = false;
4505 	else if (tx_packets > priv->tx_coal_frames[queue])
4506 		set_ic = true;
4507 	else if ((tx_q->tx_count_frames %
4508 		  priv->tx_coal_frames[queue]) < tx_packets)
4509 		set_ic = true;
4510 	else
4511 		set_ic = false;
4512 
4513 	if (set_ic) {
4514 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4515 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4516 		else
4517 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4518 
4519 		tx_q->tx_count_frames = 0;
4520 		stmmac_set_tx_ic(priv, desc);
4521 	}
4522 
4523 	/* We've used all descriptors we need for this skb, however,
4524 	 * advance cur_tx so that it references a fresh descriptor.
4525 	 * ndo_start_xmit will fill this descriptor the next time it's
4526 	 * called and stmmac_tx_clean may clean up to this descriptor.
4527 	 */
4528 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4529 
4530 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4531 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4532 			  __func__);
4533 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4534 	}
4535 
4536 	u64_stats_update_begin(&txq_stats->q_syncp);
4537 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4538 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4539 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4540 	if (set_ic)
4541 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4542 	u64_stats_update_end(&txq_stats->q_syncp);
4543 
4544 	if (priv->sarc_type)
4545 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4546 
4547 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4548 		     priv->hwts_tx_en)) {
4549 		/* declare that device is doing timestamping */
4550 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4551 		stmmac_enable_tx_timestamp(priv, first);
4552 	}
4553 
4554 	/* If we only have one entry used, then the first entry is the last
4555 	 * segment.
4556 	 */
4557 	is_last_segment = ((tx_q->cur_tx - first_entry) &
4558 			   (priv->dma_conf.dma_tx_size - 1)) == 1;
4559 
4560 	/* Complete the first descriptor before granting the DMA */
4561 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4562 				   is_last_segment, hdr / 4,
4563 				   skb->len - proto_hdr_len);
4564 
4565 	/* If context desc is used to change MSS */
4566 	if (mss_desc) {
4567 		/* Make sure that first descriptor has been completely
4568 		 * written, including its own bit. This is because MSS is
4569 		 * actually before first descriptor, so we need to make
4570 		 * sure that MSS's own bit is the last thing written.
4571 		 */
4572 		dma_wmb();
4573 		stmmac_set_tx_owner(priv, mss_desc);
4574 	}
4575 
4576 	if (netif_msg_pktdata(priv)) {
4577 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4578 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4579 			tx_q->cur_tx, first, nfrags);
4580 		pr_info(">>> frame to be transmitted: ");
4581 		print_pkt(skb->data, skb_headlen(skb));
4582 	}
4583 
4584 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4585 	skb_tx_timestamp(skb);
4586 
4587 	stmmac_flush_tx_descriptors(priv, queue);
4588 	stmmac_tx_timer_arm(priv, queue);
4589 
4590 	return NETDEV_TX_OK;
4591 
4592 dma_map_err:
4593 	dev_err(priv->device, "Tx dma map failed\n");
4594 	dev_kfree_skb(skb);
4595 	priv->xstats.tx_dropped++;
4596 	return NETDEV_TX_OK;
4597 }
4598 
4599 /**
4600  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4601  * @skb: socket buffer to check
4602  *
4603  * Check if a packet has an ethertype that will trigger the IP header checks
4604  * and IP/TCP checksum engine of the stmmac core.
4605  *
4606  * Return: true if the ethertype can trigger the checksum engine, false
4607  * otherwise
4608  */
stmmac_has_ip_ethertype(struct sk_buff * skb)4609 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4610 {
4611 	int depth = 0;
4612 	__be16 proto;
4613 
4614 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4615 				    &depth);
4616 
4617 	return (depth <= ETH_HLEN) &&
4618 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4619 }
4620 
4621 /**
4622  *  stmmac_xmit - Tx entry point of the driver
4623  *  @skb : the socket buffer
4624  *  @dev : device pointer
4625  *  Description : this is the tx entry point of the driver.
4626  *  It programs the chain or the ring and supports oversized frames
4627  *  and SG feature.
4628  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4629 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4630 {
4631 	bool enh_desc, has_vlan, set_ic, is_jumbo = false;
4632 	struct stmmac_priv *priv = netdev_priv(dev);
4633 	unsigned int nopaged_len = skb_headlen(skb);
4634 	u32 queue = skb_get_queue_mapping(skb);
4635 	int nfrags = skb_shinfo(skb)->nr_frags;
4636 	unsigned int first_entry, tx_packets;
4637 	int gso = skb_shinfo(skb)->gso_type;
4638 	struct stmmac_txq_stats *txq_stats;
4639 	struct dma_edesc *tbs_desc = NULL;
4640 	struct dma_desc *desc, *first;
4641 	struct stmmac_tx_queue *tx_q;
4642 	int i, csum_insertion = 0;
4643 	int entry, first_tx;
4644 	dma_addr_t des;
4645 	u32 sdu_len;
4646 
4647 	tx_q = &priv->dma_conf.tx_queue[queue];
4648 	txq_stats = &priv->xstats.txq_stats[queue];
4649 	first_tx = tx_q->cur_tx;
4650 
4651 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4652 		stmmac_stop_sw_lpi(priv);
4653 
4654 	/* Manage oversized TCP frames for GMAC4 device */
4655 	if (skb_is_gso(skb) && priv->tso) {
4656 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4657 			return stmmac_tso_xmit(skb, dev);
4658 		if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
4659 		    (gso & SKB_GSO_UDP_L4))
4660 			return stmmac_tso_xmit(skb, dev);
4661 	}
4662 
4663 	if (priv->est && priv->est->enable &&
4664 	    priv->est->max_sdu[queue]) {
4665 		sdu_len = skb->len;
4666 		/* Add VLAN tag length if VLAN tag insertion offload is requested */
4667 		if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
4668 			sdu_len += VLAN_HLEN;
4669 		if (sdu_len > priv->est->max_sdu[queue]) {
4670 			priv->xstats.max_sdu_txq_drop[queue]++;
4671 			goto max_sdu_err;
4672 		}
4673 	}
4674 
4675 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4676 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4677 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4678 								queue));
4679 			/* This is a hard error, log it. */
4680 			netdev_err(priv->dev,
4681 				   "%s: Tx Ring full when queue awake\n",
4682 				   __func__);
4683 		}
4684 		return NETDEV_TX_BUSY;
4685 	}
4686 
4687 	/* Check if VLAN can be inserted by HW */
4688 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4689 
4690 	entry = tx_q->cur_tx;
4691 	first_entry = entry;
4692 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4693 
4694 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4695 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4696 	 * queues. In that case, checksum offloading for those queues that don't
4697 	 * support tx coe needs to fallback to software checksum calculation.
4698 	 *
4699 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4700 	 * also have to be checksummed in software.
4701 	 */
4702 	if (csum_insertion &&
4703 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4704 	     !stmmac_has_ip_ethertype(skb))) {
4705 		if (unlikely(skb_checksum_help(skb)))
4706 			goto dma_map_err;
4707 		csum_insertion = !csum_insertion;
4708 	}
4709 
4710 	if (likely(priv->extend_desc))
4711 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4712 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4713 		desc = &tx_q->dma_entx[entry].basic;
4714 	else
4715 		desc = tx_q->dma_tx + entry;
4716 
4717 	first = desc;
4718 
4719 	if (has_vlan)
4720 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4721 
4722 	enh_desc = priv->plat->enh_desc;
4723 	/* To program the descriptors according to the size of the frame */
4724 	if (enh_desc)
4725 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4726 
4727 	if (unlikely(is_jumbo)) {
4728 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4729 		if (unlikely(entry < 0) && (entry != -EINVAL))
4730 			goto dma_map_err;
4731 	}
4732 
4733 	for (i = 0; i < nfrags; i++) {
4734 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4735 		int len = skb_frag_size(frag);
4736 		bool last_segment = (i == (nfrags - 1));
4737 
4738 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4739 		WARN_ON(tx_q->tx_skbuff[entry]);
4740 
4741 		if (likely(priv->extend_desc))
4742 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4743 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4744 			desc = &tx_q->dma_entx[entry].basic;
4745 		else
4746 			desc = tx_q->dma_tx + entry;
4747 
4748 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4749 				       DMA_TO_DEVICE);
4750 		if (dma_mapping_error(priv->device, des))
4751 			goto dma_map_err; /* should reuse desc w/o issues */
4752 
4753 		tx_q->tx_skbuff_dma[entry].buf = des;
4754 
4755 		stmmac_set_desc_addr(priv, desc, des);
4756 
4757 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4758 		tx_q->tx_skbuff_dma[entry].len = len;
4759 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4760 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4761 
4762 		/* Prepare the descriptor and set the own bit too */
4763 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4764 				priv->mode, 1, last_segment, skb->len);
4765 	}
4766 
4767 	/* Only the last descriptor gets to point to the skb. */
4768 	tx_q->tx_skbuff[entry] = skb;
4769 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4770 
4771 	/* According to the coalesce parameter the IC bit for the latest
4772 	 * segment is reset and the timer re-started to clean the tx status.
4773 	 * This approach takes care about the fragments: desc is the first
4774 	 * element in case of no SG.
4775 	 */
4776 	tx_packets = (entry + 1) - first_tx;
4777 	tx_q->tx_count_frames += tx_packets;
4778 
4779 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4780 		set_ic = true;
4781 	else if (!priv->tx_coal_frames[queue])
4782 		set_ic = false;
4783 	else if (tx_packets > priv->tx_coal_frames[queue])
4784 		set_ic = true;
4785 	else if ((tx_q->tx_count_frames %
4786 		  priv->tx_coal_frames[queue]) < tx_packets)
4787 		set_ic = true;
4788 	else
4789 		set_ic = false;
4790 
4791 	if (set_ic) {
4792 		if (likely(priv->extend_desc))
4793 			desc = &tx_q->dma_etx[entry].basic;
4794 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4795 			desc = &tx_q->dma_entx[entry].basic;
4796 		else
4797 			desc = &tx_q->dma_tx[entry];
4798 
4799 		tx_q->tx_count_frames = 0;
4800 		stmmac_set_tx_ic(priv, desc);
4801 	}
4802 
4803 	/* We've used all descriptors we need for this skb, however,
4804 	 * advance cur_tx so that it references a fresh descriptor.
4805 	 * ndo_start_xmit will fill this descriptor the next time it's
4806 	 * called and stmmac_tx_clean may clean up to this descriptor.
4807 	 */
4808 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4809 	tx_q->cur_tx = entry;
4810 
4811 	if (netif_msg_pktdata(priv)) {
4812 		netdev_dbg(priv->dev,
4813 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4814 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4815 			   entry, first, nfrags);
4816 
4817 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4818 		print_pkt(skb->data, skb->len);
4819 	}
4820 
4821 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4822 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4823 			  __func__);
4824 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4825 	}
4826 
4827 	u64_stats_update_begin(&txq_stats->q_syncp);
4828 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4829 	if (set_ic)
4830 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4831 	u64_stats_update_end(&txq_stats->q_syncp);
4832 
4833 	if (priv->sarc_type)
4834 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4835 
4836 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4837 	 * problems because all the descriptors are actually ready to be
4838 	 * passed to the DMA engine.
4839 	 */
4840 	if (likely(!is_jumbo)) {
4841 		bool last_segment = (nfrags == 0);
4842 
4843 		des = dma_map_single(priv->device, skb->data,
4844 				     nopaged_len, DMA_TO_DEVICE);
4845 		if (dma_mapping_error(priv->device, des))
4846 			goto dma_map_err;
4847 
4848 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4849 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4850 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4851 
4852 		stmmac_set_desc_addr(priv, first, des);
4853 
4854 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4855 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4856 
4857 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4858 			     priv->hwts_tx_en)) {
4859 			/* declare that device is doing timestamping */
4860 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4861 			stmmac_enable_tx_timestamp(priv, first);
4862 		}
4863 
4864 		/* Prepare the first descriptor setting the OWN bit too */
4865 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4866 				csum_insertion, priv->mode, 0, last_segment,
4867 				skb->len);
4868 	}
4869 
4870 	if (tx_q->tbs & STMMAC_TBS_EN) {
4871 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4872 
4873 		tbs_desc = &tx_q->dma_entx[first_entry];
4874 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4875 	}
4876 
4877 	stmmac_set_tx_owner(priv, first);
4878 
4879 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4880 
4881 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4882 	skb_tx_timestamp(skb);
4883 	stmmac_flush_tx_descriptors(priv, queue);
4884 	stmmac_tx_timer_arm(priv, queue);
4885 
4886 	return NETDEV_TX_OK;
4887 
4888 dma_map_err:
4889 	netdev_err(priv->dev, "Tx DMA map failed\n");
4890 max_sdu_err:
4891 	dev_kfree_skb(skb);
4892 	priv->xstats.tx_dropped++;
4893 	return NETDEV_TX_OK;
4894 }
4895 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4896 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4897 {
4898 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4899 	__be16 vlan_proto = veth->h_vlan_proto;
4900 	u16 vlanid;
4901 
4902 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4903 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4904 	    (vlan_proto == htons(ETH_P_8021AD) &&
4905 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4906 		/* pop the vlan tag */
4907 		vlanid = ntohs(veth->h_vlan_TCI);
4908 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4909 		skb_pull(skb, VLAN_HLEN);
4910 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4911 	}
4912 }
4913 
4914 /**
4915  * stmmac_rx_refill - refill used skb preallocated buffers
4916  * @priv: driver private structure
4917  * @queue: RX queue index
4918  * Description : this is to reallocate the skb for the reception process
4919  * that is based on zero-copy.
4920  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4921 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4922 {
4923 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4924 	int dirty = stmmac_rx_dirty(priv, queue);
4925 	unsigned int entry = rx_q->dirty_rx;
4926 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4927 
4928 	if (priv->dma_cap.host_dma_width <= 32)
4929 		gfp |= GFP_DMA32;
4930 
4931 	while (dirty-- > 0) {
4932 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4933 		struct dma_desc *p;
4934 		bool use_rx_wd;
4935 
4936 		if (priv->extend_desc)
4937 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4938 		else
4939 			p = rx_q->dma_rx + entry;
4940 
4941 		if (!buf->page) {
4942 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4943 			if (!buf->page)
4944 				break;
4945 		}
4946 
4947 		if (priv->sph_active && !buf->sec_page) {
4948 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4949 			if (!buf->sec_page)
4950 				break;
4951 
4952 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4953 		}
4954 
4955 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4956 
4957 		stmmac_set_desc_addr(priv, p, buf->addr);
4958 		if (priv->sph_active)
4959 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4960 		else
4961 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4962 		stmmac_refill_desc3(priv, rx_q, p);
4963 
4964 		rx_q->rx_count_frames++;
4965 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4966 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4967 			rx_q->rx_count_frames = 0;
4968 
4969 		use_rx_wd = !priv->rx_coal_frames[queue];
4970 		use_rx_wd |= rx_q->rx_count_frames > 0;
4971 		if (!priv->use_riwt)
4972 			use_rx_wd = false;
4973 
4974 		dma_wmb();
4975 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4976 
4977 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4978 	}
4979 	rx_q->dirty_rx = entry;
4980 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4981 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4982 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4983 	/* Wake up Rx DMA from the suspend state if required */
4984 	stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
4985 }
4986 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4987 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4988 				       struct dma_desc *p,
4989 				       int status, unsigned int len)
4990 {
4991 	unsigned int plen = 0, hlen = 0;
4992 	int coe = priv->hw->rx_csum;
4993 
4994 	/* Not first descriptor, buffer is always zero */
4995 	if (priv->sph_active && len)
4996 		return 0;
4997 
4998 	/* First descriptor, get split header length */
4999 	stmmac_get_rx_header_len(priv, p, &hlen);
5000 	if (priv->sph_active && hlen) {
5001 		priv->xstats.rx_split_hdr_pkt_n++;
5002 		return hlen;
5003 	}
5004 
5005 	/* First descriptor, not last descriptor and not split header */
5006 	if (status & rx_not_ls)
5007 		return priv->dma_conf.dma_buf_sz;
5008 
5009 	plen = stmmac_get_rx_frame_len(priv, p, coe);
5010 
5011 	/* First descriptor and last descriptor and not split header */
5012 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
5013 }
5014 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)5015 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
5016 				       struct dma_desc *p,
5017 				       int status, unsigned int len)
5018 {
5019 	int coe = priv->hw->rx_csum;
5020 	unsigned int plen = 0;
5021 
5022 	/* Not split header, buffer is not available */
5023 	if (!priv->sph_active)
5024 		return 0;
5025 
5026 	/* Not last descriptor */
5027 	if (status & rx_not_ls)
5028 		return priv->dma_conf.dma_buf_sz;
5029 
5030 	plen = stmmac_get_rx_frame_len(priv, p, coe);
5031 
5032 	/* Last descriptor */
5033 	return plen - len;
5034 }
5035 
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)5036 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
5037 				struct xdp_frame *xdpf, bool dma_map)
5038 {
5039 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
5040 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
5041 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
5042 	unsigned int entry = tx_q->cur_tx;
5043 	struct dma_desc *tx_desc;
5044 	dma_addr_t dma_addr;
5045 	bool set_ic;
5046 
5047 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
5048 		return STMMAC_XDP_CONSUMED;
5049 
5050 	if (priv->est && priv->est->enable &&
5051 	    priv->est->max_sdu[queue] &&
5052 	    xdpf->len > priv->est->max_sdu[queue]) {
5053 		priv->xstats.max_sdu_txq_drop[queue]++;
5054 		return STMMAC_XDP_CONSUMED;
5055 	}
5056 
5057 	if (likely(priv->extend_desc))
5058 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5059 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5060 		tx_desc = &tx_q->dma_entx[entry].basic;
5061 	else
5062 		tx_desc = tx_q->dma_tx + entry;
5063 
5064 	if (dma_map) {
5065 		dma_addr = dma_map_single(priv->device, xdpf->data,
5066 					  xdpf->len, DMA_TO_DEVICE);
5067 		if (dma_mapping_error(priv->device, dma_addr))
5068 			return STMMAC_XDP_CONSUMED;
5069 
5070 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5071 	} else {
5072 		struct page *page = virt_to_page(xdpf->data);
5073 
5074 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5075 			   xdpf->headroom;
5076 		dma_sync_single_for_device(priv->device, dma_addr,
5077 					   xdpf->len, DMA_BIDIRECTIONAL);
5078 
5079 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5080 	}
5081 
5082 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5083 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
5084 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5085 	tx_q->tx_skbuff_dma[entry].last_segment = true;
5086 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5087 
5088 	tx_q->xdpf[entry] = xdpf;
5089 
5090 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5091 
5092 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5093 			       csum, priv->mode, true, true,
5094 			       xdpf->len);
5095 
5096 	tx_q->tx_count_frames++;
5097 
5098 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5099 		set_ic = true;
5100 	else
5101 		set_ic = false;
5102 
5103 	if (set_ic) {
5104 		tx_q->tx_count_frames = 0;
5105 		stmmac_set_tx_ic(priv, tx_desc);
5106 		u64_stats_update_begin(&txq_stats->q_syncp);
5107 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5108 		u64_stats_update_end(&txq_stats->q_syncp);
5109 	}
5110 
5111 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5112 
5113 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5114 	tx_q->cur_tx = entry;
5115 
5116 	return STMMAC_XDP_TX;
5117 }
5118 
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5119 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5120 				   int cpu)
5121 {
5122 	int index = cpu;
5123 
5124 	if (unlikely(index < 0))
5125 		index = 0;
5126 
5127 	while (index >= priv->plat->tx_queues_to_use)
5128 		index -= priv->plat->tx_queues_to_use;
5129 
5130 	return index;
5131 }
5132 
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5133 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5134 				struct xdp_buff *xdp)
5135 {
5136 	bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
5137 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5138 	int cpu = smp_processor_id();
5139 	struct netdev_queue *nq;
5140 	int queue;
5141 	int res;
5142 
5143 	if (unlikely(!xdpf))
5144 		return STMMAC_XDP_CONSUMED;
5145 
5146 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5147 	nq = netdev_get_tx_queue(priv->dev, queue);
5148 
5149 	__netif_tx_lock(nq, cpu);
5150 	/* Avoids TX time-out as we are sharing with slow path */
5151 	txq_trans_cond_update(nq);
5152 
5153 	/* For zero copy XDP_TX action, dma_map is true */
5154 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc);
5155 	if (res == STMMAC_XDP_TX) {
5156 		stmmac_flush_tx_descriptors(priv, queue);
5157 	} else if (res == STMMAC_XDP_CONSUMED && zc) {
5158 		/* xdp has been freed by xdp_convert_buff_to_frame(),
5159 		 * no need to call xsk_buff_free() again, so return
5160 		 * STMMAC_XSK_CONSUMED.
5161 		 */
5162 		res = STMMAC_XSK_CONSUMED;
5163 		xdp_return_frame(xdpf);
5164 	}
5165 
5166 	__netif_tx_unlock(nq);
5167 
5168 	return res;
5169 }
5170 
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5171 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5172 				 struct bpf_prog *prog,
5173 				 struct xdp_buff *xdp)
5174 {
5175 	u32 act;
5176 	int res;
5177 
5178 	act = bpf_prog_run_xdp(prog, xdp);
5179 	switch (act) {
5180 	case XDP_PASS:
5181 		res = STMMAC_XDP_PASS;
5182 		break;
5183 	case XDP_TX:
5184 		res = stmmac_xdp_xmit_back(priv, xdp);
5185 		break;
5186 	case XDP_REDIRECT:
5187 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5188 			res = STMMAC_XDP_CONSUMED;
5189 		else
5190 			res = STMMAC_XDP_REDIRECT;
5191 		break;
5192 	default:
5193 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5194 		fallthrough;
5195 	case XDP_ABORTED:
5196 		trace_xdp_exception(priv->dev, prog, act);
5197 		fallthrough;
5198 	case XDP_DROP:
5199 		res = STMMAC_XDP_CONSUMED;
5200 		break;
5201 	}
5202 
5203 	return res;
5204 }
5205 
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5206 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5207 					   struct xdp_buff *xdp)
5208 {
5209 	struct bpf_prog *prog;
5210 	int res;
5211 
5212 	prog = READ_ONCE(priv->xdp_prog);
5213 	if (!prog) {
5214 		res = STMMAC_XDP_PASS;
5215 		goto out;
5216 	}
5217 
5218 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5219 out:
5220 	return ERR_PTR(-res);
5221 }
5222 
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5223 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5224 				   int xdp_status)
5225 {
5226 	int cpu = smp_processor_id();
5227 	int queue;
5228 
5229 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5230 
5231 	if (xdp_status & STMMAC_XDP_TX)
5232 		stmmac_tx_timer_arm(priv, queue);
5233 
5234 	if (xdp_status & STMMAC_XDP_REDIRECT)
5235 		xdp_do_flush();
5236 }
5237 
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5238 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5239 					       struct xdp_buff *xdp)
5240 {
5241 	unsigned int metasize = xdp->data - xdp->data_meta;
5242 	unsigned int datasize = xdp->data_end - xdp->data;
5243 	struct sk_buff *skb;
5244 
5245 	skb = napi_alloc_skb(&ch->rxtx_napi,
5246 			     xdp->data_end - xdp->data_hard_start);
5247 	if (unlikely(!skb))
5248 		return NULL;
5249 
5250 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5251 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5252 	if (metasize)
5253 		skb_metadata_set(skb, metasize);
5254 
5255 	return skb;
5256 }
5257 
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5258 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5259 				   struct dma_desc *p, struct dma_desc *np,
5260 				   struct xdp_buff *xdp)
5261 {
5262 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5263 	struct stmmac_channel *ch = &priv->channel[queue];
5264 	unsigned int len = xdp->data_end - xdp->data;
5265 	enum pkt_hash_types hash_type;
5266 	int coe = priv->hw->rx_csum;
5267 	struct sk_buff *skb;
5268 	u32 hash;
5269 
5270 	skb = stmmac_construct_skb_zc(ch, xdp);
5271 	if (!skb) {
5272 		priv->xstats.rx_dropped++;
5273 		return;
5274 	}
5275 
5276 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5277 	if (priv->hw->hw_vlan_en)
5278 		/* MAC level stripping. */
5279 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5280 	else
5281 		/* Driver level stripping. */
5282 		stmmac_rx_vlan(priv->dev, skb);
5283 	skb->protocol = eth_type_trans(skb, priv->dev);
5284 
5285 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5286 		skb_checksum_none_assert(skb);
5287 	else
5288 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5289 
5290 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5291 		skb_set_hash(skb, hash, hash_type);
5292 
5293 	skb_record_rx_queue(skb, queue);
5294 	napi_gro_receive(&ch->rxtx_napi, skb);
5295 
5296 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5297 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5298 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5299 	u64_stats_update_end(&rxq_stats->napi_syncp);
5300 }
5301 
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5302 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5303 {
5304 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5305 	unsigned int entry = rx_q->dirty_rx;
5306 	struct dma_desc *rx_desc = NULL;
5307 	bool ret = true;
5308 
5309 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5310 
5311 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5312 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5313 		dma_addr_t dma_addr;
5314 		bool use_rx_wd;
5315 
5316 		if (!buf->xdp) {
5317 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5318 			if (!buf->xdp) {
5319 				ret = false;
5320 				break;
5321 			}
5322 		}
5323 
5324 		if (priv->extend_desc)
5325 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5326 		else
5327 			rx_desc = rx_q->dma_rx + entry;
5328 
5329 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5330 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5331 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5332 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5333 
5334 		rx_q->rx_count_frames++;
5335 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5336 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5337 			rx_q->rx_count_frames = 0;
5338 
5339 		use_rx_wd = !priv->rx_coal_frames[queue];
5340 		use_rx_wd |= rx_q->rx_count_frames > 0;
5341 		if (!priv->use_riwt)
5342 			use_rx_wd = false;
5343 
5344 		dma_wmb();
5345 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5346 
5347 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5348 	}
5349 
5350 	if (rx_desc) {
5351 		rx_q->dirty_rx = entry;
5352 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5353 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5354 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5355 	}
5356 
5357 	return ret;
5358 }
5359 
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5360 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5361 {
5362 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5363 	 * to represent incoming packet, whereas cb field in the same structure
5364 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5365 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5366 	 */
5367 	return (struct stmmac_xdp_buff *)xdp;
5368 }
5369 
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5370 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5371 {
5372 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5373 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5374 	unsigned int count = 0, error = 0, len = 0;
5375 	int dirty = stmmac_rx_dirty(priv, queue);
5376 	unsigned int next_entry = rx_q->cur_rx;
5377 	u32 rx_errors = 0, rx_dropped = 0;
5378 	unsigned int desc_size;
5379 	struct bpf_prog *prog;
5380 	bool failure = false;
5381 	int xdp_status = 0;
5382 	int status = 0;
5383 
5384 	if (netif_msg_rx_status(priv)) {
5385 		void *rx_head;
5386 
5387 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5388 		if (priv->extend_desc) {
5389 			rx_head = (void *)rx_q->dma_erx;
5390 			desc_size = sizeof(struct dma_extended_desc);
5391 		} else {
5392 			rx_head = (void *)rx_q->dma_rx;
5393 			desc_size = sizeof(struct dma_desc);
5394 		}
5395 
5396 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5397 				    rx_q->dma_rx_phy, desc_size);
5398 	}
5399 	while (count < limit) {
5400 		struct stmmac_rx_buffer *buf;
5401 		struct stmmac_xdp_buff *ctx;
5402 		unsigned int buf1_len = 0;
5403 		struct dma_desc *np, *p;
5404 		int entry;
5405 		int res;
5406 
5407 		if (!count && rx_q->state_saved) {
5408 			error = rx_q->state.error;
5409 			len = rx_q->state.len;
5410 		} else {
5411 			rx_q->state_saved = false;
5412 			error = 0;
5413 			len = 0;
5414 		}
5415 
5416 read_again:
5417 		if (count >= limit)
5418 			break;
5419 
5420 		buf1_len = 0;
5421 		entry = next_entry;
5422 		buf = &rx_q->buf_pool[entry];
5423 
5424 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5425 			failure = failure ||
5426 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5427 			dirty = 0;
5428 		}
5429 
5430 		if (priv->extend_desc)
5431 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5432 		else
5433 			p = rx_q->dma_rx + entry;
5434 
5435 		/* read the status of the incoming frame */
5436 		status = stmmac_rx_status(priv, &priv->xstats, p);
5437 		/* check if managed by the DMA otherwise go ahead */
5438 		if (unlikely(status & dma_own))
5439 			break;
5440 
5441 		/* Prefetch the next RX descriptor */
5442 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5443 						priv->dma_conf.dma_rx_size);
5444 		next_entry = rx_q->cur_rx;
5445 
5446 		if (priv->extend_desc)
5447 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5448 		else
5449 			np = rx_q->dma_rx + next_entry;
5450 
5451 		prefetch(np);
5452 
5453 		/* Ensure a valid XSK buffer before proceed */
5454 		if (!buf->xdp)
5455 			break;
5456 
5457 		if (priv->extend_desc)
5458 			stmmac_rx_extended_status(priv, &priv->xstats,
5459 						  rx_q->dma_erx + entry);
5460 		if (unlikely(status == discard_frame)) {
5461 			xsk_buff_free(buf->xdp);
5462 			buf->xdp = NULL;
5463 			dirty++;
5464 			error = 1;
5465 			if (!priv->hwts_rx_en)
5466 				rx_errors++;
5467 		}
5468 
5469 		if (unlikely(error && (status & rx_not_ls)))
5470 			goto read_again;
5471 		if (unlikely(error)) {
5472 			count++;
5473 			continue;
5474 		}
5475 
5476 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5477 		if (likely(status & rx_not_ls)) {
5478 			xsk_buff_free(buf->xdp);
5479 			buf->xdp = NULL;
5480 			dirty++;
5481 			count++;
5482 			goto read_again;
5483 		}
5484 
5485 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5486 		ctx->priv = priv;
5487 		ctx->desc = p;
5488 		ctx->ndesc = np;
5489 
5490 		/* XDP ZC Frame only support primary buffers for now */
5491 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5492 		len += buf1_len;
5493 
5494 		/* ACS is disabled; strip manually. */
5495 		if (likely(!(status & rx_not_ls))) {
5496 			buf1_len -= ETH_FCS_LEN;
5497 			len -= ETH_FCS_LEN;
5498 		}
5499 
5500 		/* RX buffer is good and fit into a XSK pool buffer */
5501 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5502 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5503 
5504 		prog = READ_ONCE(priv->xdp_prog);
5505 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5506 
5507 		switch (res) {
5508 		case STMMAC_XDP_PASS:
5509 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5510 			xsk_buff_free(buf->xdp);
5511 			break;
5512 		case STMMAC_XDP_CONSUMED:
5513 			xsk_buff_free(buf->xdp);
5514 			fallthrough;
5515 		case STMMAC_XSK_CONSUMED:
5516 			rx_dropped++;
5517 			break;
5518 		case STMMAC_XDP_TX:
5519 		case STMMAC_XDP_REDIRECT:
5520 			xdp_status |= res;
5521 			break;
5522 		}
5523 
5524 		buf->xdp = NULL;
5525 		dirty++;
5526 		count++;
5527 	}
5528 
5529 	if (status & rx_not_ls) {
5530 		rx_q->state_saved = true;
5531 		rx_q->state.error = error;
5532 		rx_q->state.len = len;
5533 	}
5534 
5535 	stmmac_finalize_xdp_rx(priv, xdp_status);
5536 
5537 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5538 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5539 	u64_stats_update_end(&rxq_stats->napi_syncp);
5540 
5541 	priv->xstats.rx_dropped += rx_dropped;
5542 	priv->xstats.rx_errors += rx_errors;
5543 
5544 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5545 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5546 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5547 		else
5548 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5549 
5550 		return (int)count;
5551 	}
5552 
5553 	return failure ? limit : (int)count;
5554 }
5555 
5556 /**
5557  * stmmac_rx - manage the receive process
5558  * @priv: driver private structure
5559  * @limit: napi bugget
5560  * @queue: RX queue index.
5561  * Description :  this the function called by the napi poll method.
5562  * It gets all the frames inside the ring.
5563  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5564 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5565 {
5566 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5567 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5568 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5569 	struct stmmac_channel *ch = &priv->channel[queue];
5570 	unsigned int count = 0, error = 0, len = 0;
5571 	int status = 0, coe = priv->hw->rx_csum;
5572 	unsigned int next_entry = rx_q->cur_rx;
5573 	enum dma_data_direction dma_dir;
5574 	unsigned int desc_size;
5575 	struct sk_buff *skb = NULL;
5576 	struct stmmac_xdp_buff ctx;
5577 	int xdp_status = 0;
5578 	int bufsz;
5579 
5580 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5581 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5582 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5583 
5584 	if (netif_msg_rx_status(priv)) {
5585 		void *rx_head;
5586 
5587 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5588 		if (priv->extend_desc) {
5589 			rx_head = (void *)rx_q->dma_erx;
5590 			desc_size = sizeof(struct dma_extended_desc);
5591 		} else {
5592 			rx_head = (void *)rx_q->dma_rx;
5593 			desc_size = sizeof(struct dma_desc);
5594 		}
5595 
5596 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5597 				    rx_q->dma_rx_phy, desc_size);
5598 	}
5599 	while (count < limit) {
5600 		unsigned int buf1_len = 0, buf2_len = 0;
5601 		enum pkt_hash_types hash_type;
5602 		struct stmmac_rx_buffer *buf;
5603 		struct dma_desc *np, *p;
5604 		int entry;
5605 		u32 hash;
5606 
5607 		if (!count && rx_q->state_saved) {
5608 			skb = rx_q->state.skb;
5609 			error = rx_q->state.error;
5610 			len = rx_q->state.len;
5611 		} else {
5612 			rx_q->state_saved = false;
5613 			skb = NULL;
5614 			error = 0;
5615 			len = 0;
5616 		}
5617 
5618 read_again:
5619 		if (count >= limit)
5620 			break;
5621 
5622 		buf1_len = 0;
5623 		buf2_len = 0;
5624 		entry = next_entry;
5625 		buf = &rx_q->buf_pool[entry];
5626 
5627 		if (priv->extend_desc)
5628 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5629 		else
5630 			p = rx_q->dma_rx + entry;
5631 
5632 		/* read the status of the incoming frame */
5633 		status = stmmac_rx_status(priv, &priv->xstats, p);
5634 		/* check if managed by the DMA otherwise go ahead */
5635 		if (unlikely(status & dma_own))
5636 			break;
5637 
5638 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5639 						priv->dma_conf.dma_rx_size);
5640 		next_entry = rx_q->cur_rx;
5641 
5642 		if (priv->extend_desc)
5643 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5644 		else
5645 			np = rx_q->dma_rx + next_entry;
5646 
5647 		prefetch(np);
5648 
5649 		if (priv->extend_desc)
5650 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5651 		if (unlikely(status == discard_frame)) {
5652 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5653 			buf->page = NULL;
5654 			error = 1;
5655 			if (!priv->hwts_rx_en)
5656 				rx_errors++;
5657 		}
5658 
5659 		if (unlikely(error && (status & rx_not_ls)))
5660 			goto read_again;
5661 		if (unlikely(error)) {
5662 			dev_kfree_skb(skb);
5663 			skb = NULL;
5664 			count++;
5665 			continue;
5666 		}
5667 
5668 		/* Buffer is good. Go on. */
5669 
5670 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5671 		len += buf1_len;
5672 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5673 		len += buf2_len;
5674 
5675 		/* ACS is disabled; strip manually. */
5676 		if (likely(!(status & rx_not_ls))) {
5677 			if (buf2_len) {
5678 				buf2_len -= ETH_FCS_LEN;
5679 				len -= ETH_FCS_LEN;
5680 			} else if (buf1_len) {
5681 				buf1_len -= ETH_FCS_LEN;
5682 				len -= ETH_FCS_LEN;
5683 			}
5684 		}
5685 
5686 		if (!skb) {
5687 			unsigned int pre_len, sync_len;
5688 
5689 			dma_sync_single_for_cpu(priv->device, buf->addr,
5690 						buf1_len, dma_dir);
5691 			net_prefetch(page_address(buf->page) +
5692 				     buf->page_offset);
5693 
5694 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5695 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5696 					 buf->page_offset, buf1_len, true);
5697 
5698 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5699 				  buf->page_offset;
5700 
5701 			ctx.priv = priv;
5702 			ctx.desc = p;
5703 			ctx.ndesc = np;
5704 
5705 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5706 			/* Due xdp_adjust_tail: DMA sync for_device
5707 			 * cover max len CPU touch
5708 			 */
5709 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5710 				   buf->page_offset;
5711 			sync_len = max(sync_len, pre_len);
5712 
5713 			/* For Not XDP_PASS verdict */
5714 			if (IS_ERR(skb)) {
5715 				unsigned int xdp_res = -PTR_ERR(skb);
5716 
5717 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5718 					page_pool_put_page(rx_q->page_pool,
5719 							   virt_to_head_page(ctx.xdp.data),
5720 							   sync_len, true);
5721 					buf->page = NULL;
5722 					rx_dropped++;
5723 
5724 					/* Clear skb as it was set as
5725 					 * status by XDP program.
5726 					 */
5727 					skb = NULL;
5728 
5729 					if (unlikely((status & rx_not_ls)))
5730 						goto read_again;
5731 
5732 					count++;
5733 					continue;
5734 				} else if (xdp_res & (STMMAC_XDP_TX |
5735 						      STMMAC_XDP_REDIRECT)) {
5736 					xdp_status |= xdp_res;
5737 					buf->page = NULL;
5738 					skb = NULL;
5739 					count++;
5740 					continue;
5741 				}
5742 			}
5743 		}
5744 
5745 		if (!skb) {
5746 			unsigned int head_pad_len;
5747 
5748 			/* XDP program may expand or reduce tail */
5749 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5750 
5751 			skb = napi_build_skb(page_address(buf->page),
5752 					     rx_q->napi_skb_frag_size);
5753 			if (!skb) {
5754 				page_pool_recycle_direct(rx_q->page_pool,
5755 							 buf->page);
5756 				rx_dropped++;
5757 				count++;
5758 				goto drain_data;
5759 			}
5760 
5761 			/* XDP program may adjust header */
5762 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5763 			skb_reserve(skb, head_pad_len);
5764 			skb_put(skb, buf1_len);
5765 			skb_mark_for_recycle(skb);
5766 			buf->page = NULL;
5767 		} else if (buf1_len) {
5768 			dma_sync_single_for_cpu(priv->device, buf->addr,
5769 						buf1_len, dma_dir);
5770 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5771 					buf->page, buf->page_offset, buf1_len,
5772 					priv->dma_conf.dma_buf_sz);
5773 			buf->page = NULL;
5774 		}
5775 
5776 		if (buf2_len) {
5777 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5778 						buf2_len, dma_dir);
5779 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5780 					buf->sec_page, 0, buf2_len,
5781 					priv->dma_conf.dma_buf_sz);
5782 			buf->sec_page = NULL;
5783 		}
5784 
5785 drain_data:
5786 		if (likely(status & rx_not_ls))
5787 			goto read_again;
5788 		if (!skb)
5789 			continue;
5790 
5791 		/* Got entire packet into SKB. Finish it. */
5792 
5793 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5794 
5795 		if (priv->hw->hw_vlan_en)
5796 			/* MAC level stripping. */
5797 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5798 		else
5799 			/* Driver level stripping. */
5800 			stmmac_rx_vlan(priv->dev, skb);
5801 
5802 		skb->protocol = eth_type_trans(skb, priv->dev);
5803 
5804 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5805 		    (status & csum_none))
5806 			skb_checksum_none_assert(skb);
5807 		else
5808 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5809 
5810 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5811 			skb_set_hash(skb, hash, hash_type);
5812 
5813 		skb_record_rx_queue(skb, queue);
5814 		napi_gro_receive(&ch->rx_napi, skb);
5815 		skb = NULL;
5816 
5817 		rx_packets++;
5818 		rx_bytes += len;
5819 		count++;
5820 	}
5821 
5822 	if (status & rx_not_ls || skb) {
5823 		rx_q->state_saved = true;
5824 		rx_q->state.skb = skb;
5825 		rx_q->state.error = error;
5826 		rx_q->state.len = len;
5827 	}
5828 
5829 	stmmac_finalize_xdp_rx(priv, xdp_status);
5830 
5831 	stmmac_rx_refill(priv, queue);
5832 
5833 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5834 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5835 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5836 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5837 	u64_stats_update_end(&rxq_stats->napi_syncp);
5838 
5839 	priv->xstats.rx_dropped += rx_dropped;
5840 	priv->xstats.rx_errors += rx_errors;
5841 
5842 	return count;
5843 }
5844 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5845 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5846 {
5847 	struct stmmac_channel *ch =
5848 		container_of(napi, struct stmmac_channel, rx_napi);
5849 	struct stmmac_priv *priv = ch->priv_data;
5850 	struct stmmac_rxq_stats *rxq_stats;
5851 	u32 chan = ch->index;
5852 	int work_done;
5853 
5854 	rxq_stats = &priv->xstats.rxq_stats[chan];
5855 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5856 	u64_stats_inc(&rxq_stats->napi.poll);
5857 	u64_stats_update_end(&rxq_stats->napi_syncp);
5858 
5859 	work_done = stmmac_rx(priv, budget, chan);
5860 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5861 		unsigned long flags;
5862 
5863 		spin_lock_irqsave(&ch->lock, flags);
5864 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5865 		spin_unlock_irqrestore(&ch->lock, flags);
5866 	}
5867 
5868 	return work_done;
5869 }
5870 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5871 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5872 {
5873 	struct stmmac_channel *ch =
5874 		container_of(napi, struct stmmac_channel, tx_napi);
5875 	struct stmmac_priv *priv = ch->priv_data;
5876 	struct stmmac_txq_stats *txq_stats;
5877 	bool pending_packets = false;
5878 	u32 chan = ch->index;
5879 	int work_done;
5880 
5881 	txq_stats = &priv->xstats.txq_stats[chan];
5882 	u64_stats_update_begin(&txq_stats->napi_syncp);
5883 	u64_stats_inc(&txq_stats->napi.poll);
5884 	u64_stats_update_end(&txq_stats->napi_syncp);
5885 
5886 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5887 	work_done = min(work_done, budget);
5888 
5889 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5890 		unsigned long flags;
5891 
5892 		spin_lock_irqsave(&ch->lock, flags);
5893 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5894 		spin_unlock_irqrestore(&ch->lock, flags);
5895 	}
5896 
5897 	/* TX still have packet to handle, check if we need to arm tx timer */
5898 	if (pending_packets)
5899 		stmmac_tx_timer_arm(priv, chan);
5900 
5901 	return work_done;
5902 }
5903 
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5904 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5905 {
5906 	struct stmmac_channel *ch =
5907 		container_of(napi, struct stmmac_channel, rxtx_napi);
5908 	struct stmmac_priv *priv = ch->priv_data;
5909 	bool tx_pending_packets = false;
5910 	int rx_done, tx_done, rxtx_done;
5911 	struct stmmac_rxq_stats *rxq_stats;
5912 	struct stmmac_txq_stats *txq_stats;
5913 	u32 chan = ch->index;
5914 
5915 	rxq_stats = &priv->xstats.rxq_stats[chan];
5916 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5917 	u64_stats_inc(&rxq_stats->napi.poll);
5918 	u64_stats_update_end(&rxq_stats->napi_syncp);
5919 
5920 	txq_stats = &priv->xstats.txq_stats[chan];
5921 	u64_stats_update_begin(&txq_stats->napi_syncp);
5922 	u64_stats_inc(&txq_stats->napi.poll);
5923 	u64_stats_update_end(&txq_stats->napi_syncp);
5924 
5925 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5926 	tx_done = min(tx_done, budget);
5927 
5928 	rx_done = stmmac_rx_zc(priv, budget, chan);
5929 
5930 	rxtx_done = max(tx_done, rx_done);
5931 
5932 	/* If either TX or RX work is not complete, return budget
5933 	 * and keep pooling
5934 	 */
5935 	if (rxtx_done >= budget)
5936 		return budget;
5937 
5938 	/* all work done, exit the polling mode */
5939 	if (napi_complete_done(napi, rxtx_done)) {
5940 		unsigned long flags;
5941 
5942 		spin_lock_irqsave(&ch->lock, flags);
5943 		/* Both RX and TX work done are compelte,
5944 		 * so enable both RX & TX IRQs.
5945 		 */
5946 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5947 		spin_unlock_irqrestore(&ch->lock, flags);
5948 	}
5949 
5950 	/* TX still have packet to handle, check if we need to arm tx timer */
5951 	if (tx_pending_packets)
5952 		stmmac_tx_timer_arm(priv, chan);
5953 
5954 	return min(rxtx_done, budget - 1);
5955 }
5956 
5957 /**
5958  *  stmmac_tx_timeout
5959  *  @dev : Pointer to net device structure
5960  *  @txqueue: the index of the hanging transmit queue
5961  *  Description: this function is called when a packet transmission fails to
5962  *   complete within a reasonable time. The driver will mark the error in the
5963  *   netdev structure and arrange for the device to be reset to a sane state
5964  *   in order to transmit a new packet.
5965  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5966 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5967 {
5968 	struct stmmac_priv *priv = netdev_priv(dev);
5969 
5970 	stmmac_global_err(priv);
5971 }
5972 
5973 /**
5974  *  stmmac_set_rx_mode - entry point for multicast addressing
5975  *  @dev : pointer to the device structure
5976  *  Description:
5977  *  This function is a driver entry point which gets called by the kernel
5978  *  whenever multicast addresses must be enabled/disabled.
5979  *  Return value:
5980  *  void.
5981  *
5982  *  FIXME: This may need RXC to be running, but it may be called with BH
5983  *  disabled, which means we can't call phylink_rx_clk_stop*().
5984  */
stmmac_set_rx_mode(struct net_device * dev)5985 static void stmmac_set_rx_mode(struct net_device *dev)
5986 {
5987 	struct stmmac_priv *priv = netdev_priv(dev);
5988 
5989 	stmmac_set_filter(priv, priv->hw, dev);
5990 }
5991 
5992 /**
5993  *  stmmac_change_mtu - entry point to change MTU size for the device.
5994  *  @dev : device pointer.
5995  *  @new_mtu : the new MTU size for the device.
5996  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5997  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5998  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5999  *  Return value:
6000  *  0 on success and an appropriate (-)ve integer as defined in errno.h
6001  *  file on failure.
6002  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)6003 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
6004 {
6005 	struct stmmac_priv *priv = netdev_priv(dev);
6006 	int txfifosz = priv->plat->tx_fifo_size;
6007 	struct stmmac_dma_conf *dma_conf;
6008 	const int mtu = new_mtu;
6009 	int ret;
6010 
6011 	if (txfifosz == 0)
6012 		txfifosz = priv->dma_cap.tx_fifo_size;
6013 
6014 	txfifosz /= priv->plat->tx_queues_to_use;
6015 
6016 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
6017 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
6018 		return -EINVAL;
6019 	}
6020 
6021 	new_mtu = STMMAC_ALIGN(new_mtu);
6022 
6023 	/* If condition true, FIFO is too small or MTU too large */
6024 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
6025 		return -EINVAL;
6026 
6027 	if (netif_running(dev)) {
6028 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
6029 		/* Try to allocate the new DMA conf with the new mtu */
6030 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
6031 		if (IS_ERR(dma_conf)) {
6032 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
6033 				   mtu);
6034 			return PTR_ERR(dma_conf);
6035 		}
6036 
6037 		__stmmac_release(dev);
6038 
6039 		ret = __stmmac_open(dev, dma_conf);
6040 		if (ret) {
6041 			free_dma_desc_resources(priv, dma_conf);
6042 			kfree(dma_conf);
6043 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
6044 			return ret;
6045 		}
6046 
6047 		kfree(dma_conf);
6048 
6049 		stmmac_set_rx_mode(dev);
6050 	}
6051 
6052 	WRITE_ONCE(dev->mtu, mtu);
6053 	netdev_update_features(dev);
6054 
6055 	return 0;
6056 }
6057 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)6058 static netdev_features_t stmmac_fix_features(struct net_device *dev,
6059 					     netdev_features_t features)
6060 {
6061 	struct stmmac_priv *priv = netdev_priv(dev);
6062 
6063 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
6064 		features &= ~NETIF_F_RXCSUM;
6065 
6066 	if (!priv->plat->tx_coe)
6067 		features &= ~NETIF_F_CSUM_MASK;
6068 
6069 	/* Some GMAC devices have a bugged Jumbo frame support that
6070 	 * needs to have the Tx COE disabled for oversized frames
6071 	 * (due to limited buffer sizes). In this case we disable
6072 	 * the TX csum insertion in the TDES and not use SF.
6073 	 */
6074 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6075 		features &= ~NETIF_F_CSUM_MASK;
6076 
6077 	/* Disable tso if asked by ethtool */
6078 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6079 		if (features & NETIF_F_TSO)
6080 			priv->tso = true;
6081 		else
6082 			priv->tso = false;
6083 	}
6084 
6085 	return features;
6086 }
6087 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)6088 static int stmmac_set_features(struct net_device *netdev,
6089 			       netdev_features_t features)
6090 {
6091 	struct stmmac_priv *priv = netdev_priv(netdev);
6092 
6093 	/* Keep the COE Type in case of csum is supporting */
6094 	if (features & NETIF_F_RXCSUM)
6095 		priv->hw->rx_csum = priv->plat->rx_coe;
6096 	else
6097 		priv->hw->rx_csum = 0;
6098 	/* No check needed because rx_coe has been set before and it will be
6099 	 * fixed in case of issue.
6100 	 */
6101 	stmmac_rx_ipc(priv, priv->hw);
6102 
6103 	if (priv->sph_capable) {
6104 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
6105 		u32 chan;
6106 
6107 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6108 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6109 	}
6110 
6111 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
6112 		priv->hw->hw_vlan_en = true;
6113 	else
6114 		priv->hw->hw_vlan_en = false;
6115 
6116 	phylink_rx_clk_stop_block(priv->phylink);
6117 	stmmac_set_hw_vlan_mode(priv, priv->hw);
6118 	phylink_rx_clk_stop_unblock(priv->phylink);
6119 
6120 	return 0;
6121 }
6122 
stmmac_common_interrupt(struct stmmac_priv * priv)6123 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6124 {
6125 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6126 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6127 	u32 queues_count;
6128 	u32 queue;
6129 	bool xmac;
6130 
6131 	xmac = dwmac_is_xmac(priv->plat->core_type);
6132 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6133 
6134 	if (priv->irq_wake)
6135 		pm_wakeup_event(priv->device, 0);
6136 
6137 	if (priv->dma_cap.estsel)
6138 		stmmac_est_irq_status(priv, priv, priv->dev,
6139 				      &priv->xstats, tx_cnt);
6140 
6141 	if (stmmac_fpe_supported(priv))
6142 		stmmac_fpe_irq_status(priv);
6143 
6144 	/* To handle GMAC own interrupts */
6145 	if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
6146 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6147 
6148 		if (unlikely(status)) {
6149 			/* For LPI we need to save the tx status */
6150 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6151 				priv->tx_path_in_lpi_mode = true;
6152 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6153 				priv->tx_path_in_lpi_mode = false;
6154 		}
6155 
6156 		for (queue = 0; queue < queues_count; queue++)
6157 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6158 
6159 		stmmac_timestamp_interrupt(priv, priv);
6160 	}
6161 }
6162 
6163 /**
6164  *  stmmac_interrupt - main ISR
6165  *  @irq: interrupt number.
6166  *  @dev_id: to pass the net device pointer.
6167  *  Description: this is the main driver interrupt service routine.
6168  *  It can call:
6169  *  o DMA service routine (to manage incoming frame reception and transmission
6170  *    status)
6171  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6172  *    interrupts.
6173  */
stmmac_interrupt(int irq,void * dev_id)6174 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6175 {
6176 	struct net_device *dev = (struct net_device *)dev_id;
6177 	struct stmmac_priv *priv = netdev_priv(dev);
6178 
6179 	/* Check if adapter is up */
6180 	if (test_bit(STMMAC_DOWN, &priv->state))
6181 		return IRQ_HANDLED;
6182 
6183 	/* Check ASP error if it isn't delivered via an individual IRQ */
6184 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6185 		return IRQ_HANDLED;
6186 
6187 	/* To handle Common interrupts */
6188 	stmmac_common_interrupt(priv);
6189 
6190 	/* To handle DMA interrupts */
6191 	stmmac_dma_interrupt(priv);
6192 
6193 	return IRQ_HANDLED;
6194 }
6195 
stmmac_mac_interrupt(int irq,void * dev_id)6196 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6197 {
6198 	struct net_device *dev = (struct net_device *)dev_id;
6199 	struct stmmac_priv *priv = netdev_priv(dev);
6200 
6201 	/* Check if adapter is up */
6202 	if (test_bit(STMMAC_DOWN, &priv->state))
6203 		return IRQ_HANDLED;
6204 
6205 	/* To handle Common interrupts */
6206 	stmmac_common_interrupt(priv);
6207 
6208 	return IRQ_HANDLED;
6209 }
6210 
stmmac_safety_interrupt(int irq,void * dev_id)6211 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6212 {
6213 	struct net_device *dev = (struct net_device *)dev_id;
6214 	struct stmmac_priv *priv = netdev_priv(dev);
6215 
6216 	/* Check if adapter is up */
6217 	if (test_bit(STMMAC_DOWN, &priv->state))
6218 		return IRQ_HANDLED;
6219 
6220 	/* Check if a fatal error happened */
6221 	stmmac_safety_feat_interrupt(priv);
6222 
6223 	return IRQ_HANDLED;
6224 }
6225 
stmmac_msi_intr_tx(int irq,void * data)6226 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6227 {
6228 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6229 	struct stmmac_dma_conf *dma_conf;
6230 	int chan = tx_q->queue_index;
6231 	struct stmmac_priv *priv;
6232 	int status;
6233 
6234 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6235 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6236 
6237 	/* Check if adapter is up */
6238 	if (test_bit(STMMAC_DOWN, &priv->state))
6239 		return IRQ_HANDLED;
6240 
6241 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6242 
6243 	if (unlikely(status & tx_hard_error_bump_tc)) {
6244 		/* Try to bump up the dma threshold on this failure */
6245 		stmmac_bump_dma_threshold(priv, chan);
6246 	} else if (unlikely(status == tx_hard_error)) {
6247 		stmmac_tx_err(priv, chan);
6248 	}
6249 
6250 	return IRQ_HANDLED;
6251 }
6252 
stmmac_msi_intr_rx(int irq,void * data)6253 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6254 {
6255 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6256 	struct stmmac_dma_conf *dma_conf;
6257 	int chan = rx_q->queue_index;
6258 	struct stmmac_priv *priv;
6259 
6260 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6261 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6262 
6263 	/* Check if adapter is up */
6264 	if (test_bit(STMMAC_DOWN, &priv->state))
6265 		return IRQ_HANDLED;
6266 
6267 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6268 
6269 	return IRQ_HANDLED;
6270 }
6271 
6272 /**
6273  *  stmmac_ioctl - Entry point for the Ioctl
6274  *  @dev: Device pointer.
6275  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6276  *  a proprietary structure used to pass information to the driver.
6277  *  @cmd: IOCTL command
6278  *  Description:
6279  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6280  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6281 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6282 {
6283 	struct stmmac_priv *priv = netdev_priv (dev);
6284 	int ret = -EOPNOTSUPP;
6285 
6286 	if (!netif_running(dev))
6287 		return -EINVAL;
6288 
6289 	switch (cmd) {
6290 	case SIOCGMIIPHY:
6291 	case SIOCGMIIREG:
6292 	case SIOCSMIIREG:
6293 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6294 		break;
6295 	default:
6296 		break;
6297 	}
6298 
6299 	return ret;
6300 }
6301 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6302 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6303 				    void *cb_priv)
6304 {
6305 	struct stmmac_priv *priv = cb_priv;
6306 	int ret = -EOPNOTSUPP;
6307 
6308 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6309 		return ret;
6310 
6311 	__stmmac_disable_all_queues(priv);
6312 
6313 	switch (type) {
6314 	case TC_SETUP_CLSU32:
6315 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6316 		break;
6317 	case TC_SETUP_CLSFLOWER:
6318 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6319 		break;
6320 	default:
6321 		break;
6322 	}
6323 
6324 	stmmac_enable_all_queues(priv);
6325 	return ret;
6326 }
6327 
6328 static LIST_HEAD(stmmac_block_cb_list);
6329 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6330 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6331 			   void *type_data)
6332 {
6333 	struct stmmac_priv *priv = netdev_priv(ndev);
6334 
6335 	switch (type) {
6336 	case TC_QUERY_CAPS:
6337 		return stmmac_tc_query_caps(priv, priv, type_data);
6338 	case TC_SETUP_QDISC_MQPRIO:
6339 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6340 	case TC_SETUP_BLOCK:
6341 		return flow_block_cb_setup_simple(type_data,
6342 						  &stmmac_block_cb_list,
6343 						  stmmac_setup_tc_block_cb,
6344 						  priv, priv, true);
6345 	case TC_SETUP_QDISC_CBS:
6346 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6347 	case TC_SETUP_QDISC_TAPRIO:
6348 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6349 	case TC_SETUP_QDISC_ETF:
6350 		return stmmac_tc_setup_etf(priv, priv, type_data);
6351 	default:
6352 		return -EOPNOTSUPP;
6353 	}
6354 }
6355 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6356 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6357 			       struct net_device *sb_dev)
6358 {
6359 	int gso = skb_shinfo(skb)->gso_type;
6360 
6361 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6362 		/*
6363 		 * There is no way to determine the number of TSO/USO
6364 		 * capable Queues. Let's use always the Queue 0
6365 		 * because if TSO/USO is supported then at least this
6366 		 * one will be capable.
6367 		 */
6368 		return 0;
6369 	}
6370 
6371 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6372 }
6373 
stmmac_set_mac_address(struct net_device * ndev,void * addr)6374 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6375 {
6376 	struct stmmac_priv *priv = netdev_priv(ndev);
6377 	int ret = 0;
6378 
6379 	ret = pm_runtime_resume_and_get(priv->device);
6380 	if (ret < 0)
6381 		return ret;
6382 
6383 	ret = eth_mac_addr(ndev, addr);
6384 	if (ret)
6385 		goto set_mac_error;
6386 
6387 	phylink_rx_clk_stop_block(priv->phylink);
6388 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6389 	phylink_rx_clk_stop_unblock(priv->phylink);
6390 
6391 set_mac_error:
6392 	pm_runtime_put(priv->device);
6393 
6394 	return ret;
6395 }
6396 
6397 #ifdef CONFIG_DEBUG_FS
6398 static struct dentry *stmmac_fs_dir;
6399 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6400 static void sysfs_display_ring(void *head, int size, int extend_desc,
6401 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6402 {
6403 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6404 	struct dma_desc *p = (struct dma_desc *)head;
6405 	unsigned int desc_size;
6406 	dma_addr_t dma_addr;
6407 	int i;
6408 
6409 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6410 	for (i = 0; i < size; i++) {
6411 		dma_addr = dma_phy_addr + i * desc_size;
6412 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6413 				i, &dma_addr,
6414 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6415 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6416 		if (extend_desc)
6417 			p = &(++ep)->basic;
6418 		else
6419 			p++;
6420 	}
6421 }
6422 
stmmac_rings_status_show(struct seq_file * seq,void * v)6423 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6424 {
6425 	struct net_device *dev = seq->private;
6426 	struct stmmac_priv *priv = netdev_priv(dev);
6427 	u32 rx_count = priv->plat->rx_queues_to_use;
6428 	u32 tx_count = priv->plat->tx_queues_to_use;
6429 	u32 queue;
6430 
6431 	if ((dev->flags & IFF_UP) == 0)
6432 		return 0;
6433 
6434 	for (queue = 0; queue < rx_count; queue++) {
6435 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6436 
6437 		seq_printf(seq, "RX Queue %d:\n", queue);
6438 
6439 		if (priv->extend_desc) {
6440 			seq_printf(seq, "Extended descriptor ring:\n");
6441 			sysfs_display_ring((void *)rx_q->dma_erx,
6442 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6443 		} else {
6444 			seq_printf(seq, "Descriptor ring:\n");
6445 			sysfs_display_ring((void *)rx_q->dma_rx,
6446 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6447 		}
6448 	}
6449 
6450 	for (queue = 0; queue < tx_count; queue++) {
6451 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6452 
6453 		seq_printf(seq, "TX Queue %d:\n", queue);
6454 
6455 		if (priv->extend_desc) {
6456 			seq_printf(seq, "Extended descriptor ring:\n");
6457 			sysfs_display_ring((void *)tx_q->dma_etx,
6458 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6459 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6460 			seq_printf(seq, "Descriptor ring:\n");
6461 			sysfs_display_ring((void *)tx_q->dma_tx,
6462 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6463 		}
6464 	}
6465 
6466 	return 0;
6467 }
6468 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6469 
stmmac_dma_cap_show(struct seq_file * seq,void * v)6470 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6471 {
6472 	static const char * const dwxgmac_timestamp_source[] = {
6473 		"None",
6474 		"Internal",
6475 		"External",
6476 		"Both",
6477 	};
6478 	static const char * const dwxgmac_safety_feature_desc[] = {
6479 		"No",
6480 		"All Safety Features with ECC and Parity",
6481 		"All Safety Features without ECC or Parity",
6482 		"All Safety Features with Parity Only",
6483 		"ECC Only",
6484 		"UNDEFINED",
6485 		"UNDEFINED",
6486 		"UNDEFINED",
6487 	};
6488 	struct net_device *dev = seq->private;
6489 	struct stmmac_priv *priv = netdev_priv(dev);
6490 
6491 	if (!priv->hw_cap_support) {
6492 		seq_printf(seq, "DMA HW features not supported\n");
6493 		return 0;
6494 	}
6495 
6496 	seq_printf(seq, "==============================\n");
6497 	seq_printf(seq, "\tDMA HW features\n");
6498 	seq_printf(seq, "==============================\n");
6499 
6500 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6501 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6502 	seq_printf(seq, "\t1000 Mbps: %s\n",
6503 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6504 	seq_printf(seq, "\tHalf duplex: %s\n",
6505 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6506 	if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
6507 		seq_printf(seq,
6508 			   "\tNumber of Additional MAC address registers: %d\n",
6509 			   priv->dma_cap.multi_addr);
6510 	} else {
6511 		seq_printf(seq, "\tHash Filter: %s\n",
6512 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6513 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6514 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6515 	}
6516 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6517 		   (priv->dma_cap.pcs) ? "Y" : "N");
6518 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6519 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6520 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6521 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6522 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6523 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6524 	seq_printf(seq, "\tRMON module: %s\n",
6525 		   (priv->dma_cap.rmon) ? "Y" : "N");
6526 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6527 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6528 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6529 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6530 	if (priv->plat->core_type == DWMAC_CORE_XGMAC)
6531 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6532 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6533 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6534 		   (priv->dma_cap.eee) ? "Y" : "N");
6535 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6536 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6537 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6538 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6539 	    priv->plat->core_type == DWMAC_CORE_XGMAC) {
6540 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6541 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6542 	} else {
6543 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6544 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6545 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6546 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6547 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6548 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6549 	}
6550 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6551 		   priv->dma_cap.number_rx_channel);
6552 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6553 		   priv->dma_cap.number_tx_channel);
6554 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6555 		   priv->dma_cap.number_rx_queues);
6556 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6557 		   priv->dma_cap.number_tx_queues);
6558 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6559 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6560 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6561 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6562 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6563 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6564 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6565 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6566 		   priv->dma_cap.pps_out_num);
6567 	seq_printf(seq, "\tSafety Features: %s\n",
6568 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6569 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6570 		   priv->dma_cap.frpsel ? "Y" : "N");
6571 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6572 		   priv->dma_cap.host_dma_width);
6573 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6574 		   priv->dma_cap.rssen ? "Y" : "N");
6575 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6576 		   priv->dma_cap.vlhash ? "Y" : "N");
6577 	seq_printf(seq, "\tSplit Header: %s\n",
6578 		   priv->dma_cap.sphen ? "Y" : "N");
6579 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6580 		   priv->dma_cap.vlins ? "Y" : "N");
6581 	seq_printf(seq, "\tDouble VLAN: %s\n",
6582 		   priv->dma_cap.dvlan ? "Y" : "N");
6583 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6584 		   priv->dma_cap.l3l4fnum);
6585 	seq_printf(seq, "\tARP Offloading: %s\n",
6586 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6587 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6588 		   priv->dma_cap.estsel ? "Y" : "N");
6589 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6590 		   priv->dma_cap.fpesel ? "Y" : "N");
6591 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6592 		   priv->dma_cap.tbssel ? "Y" : "N");
6593 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6594 		   priv->dma_cap.tbs_ch_num);
6595 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6596 		   priv->dma_cap.sgfsel ? "Y" : "N");
6597 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6598 		   BIT(priv->dma_cap.ttsfd) >> 1);
6599 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6600 		   priv->dma_cap.numtc);
6601 	seq_printf(seq, "\tDCB Feature: %s\n",
6602 		   priv->dma_cap.dcben ? "Y" : "N");
6603 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6604 		   priv->dma_cap.advthword ? "Y" : "N");
6605 	seq_printf(seq, "\tPTP Offload: %s\n",
6606 		   priv->dma_cap.ptoen ? "Y" : "N");
6607 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6608 		   priv->dma_cap.osten ? "Y" : "N");
6609 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6610 		   priv->dma_cap.pfcen ? "Y" : "N");
6611 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6612 		   BIT(priv->dma_cap.frpes) << 6);
6613 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6614 		   BIT(priv->dma_cap.frpbs) << 6);
6615 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6616 		   priv->dma_cap.frppipe_num);
6617 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6618 		   priv->dma_cap.nrvf_num ?
6619 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6620 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6621 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6622 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6623 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6624 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6625 		   priv->dma_cap.cbtisel ? "Y" : "N");
6626 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6627 		   priv->dma_cap.aux_snapshot_n);
6628 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6629 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6630 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6631 		   priv->dma_cap.edma ? "Y" : "N");
6632 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6633 		   priv->dma_cap.ediffc ? "Y" : "N");
6634 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6635 		   priv->dma_cap.vxn ? "Y" : "N");
6636 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6637 		   priv->dma_cap.dbgmem ? "Y" : "N");
6638 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6639 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6640 	return 0;
6641 }
6642 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6643 
6644 /* Use network device events to rename debugfs file entries.
6645  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6646 static int stmmac_device_event(struct notifier_block *unused,
6647 			       unsigned long event, void *ptr)
6648 {
6649 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6650 	struct stmmac_priv *priv = netdev_priv(dev);
6651 
6652 	if (dev->netdev_ops != &stmmac_netdev_ops)
6653 		goto done;
6654 
6655 	switch (event) {
6656 	case NETDEV_CHANGENAME:
6657 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6658 		break;
6659 	}
6660 done:
6661 	return NOTIFY_DONE;
6662 }
6663 
6664 static struct notifier_block stmmac_notifier = {
6665 	.notifier_call = stmmac_device_event,
6666 };
6667 
stmmac_init_fs(struct net_device * dev)6668 static void stmmac_init_fs(struct net_device *dev)
6669 {
6670 	struct stmmac_priv *priv = netdev_priv(dev);
6671 
6672 	rtnl_lock();
6673 
6674 	/* Create per netdev entries */
6675 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6676 
6677 	/* Entry to report DMA RX/TX rings */
6678 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6679 			    &stmmac_rings_status_fops);
6680 
6681 	/* Entry to report the DMA HW features */
6682 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6683 			    &stmmac_dma_cap_fops);
6684 
6685 	rtnl_unlock();
6686 }
6687 
stmmac_exit_fs(struct net_device * dev)6688 static void stmmac_exit_fs(struct net_device *dev)
6689 {
6690 	struct stmmac_priv *priv = netdev_priv(dev);
6691 
6692 	debugfs_remove_recursive(priv->dbgfs_dir);
6693 }
6694 #endif /* CONFIG_DEBUG_FS */
6695 
stmmac_vid_crc32_le(__le16 vid_le)6696 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6697 {
6698 	unsigned char *data = (unsigned char *)&vid_le;
6699 	unsigned char data_byte = 0;
6700 	u32 crc = ~0x0;
6701 	u32 temp = 0;
6702 	int i, bits;
6703 
6704 	bits = get_bitmask_order(VLAN_VID_MASK);
6705 	for (i = 0; i < bits; i++) {
6706 		if ((i % 8) == 0)
6707 			data_byte = data[i / 8];
6708 
6709 		temp = ((crc & 1) ^ data_byte) & 1;
6710 		crc >>= 1;
6711 		data_byte >>= 1;
6712 
6713 		if (temp)
6714 			crc ^= 0xedb88320;
6715 	}
6716 
6717 	return crc;
6718 }
6719 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6720 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6721 {
6722 	u32 crc, hash = 0;
6723 	u16 pmatch = 0;
6724 	int count = 0;
6725 	u16 vid = 0;
6726 
6727 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6728 		__le16 vid_le = cpu_to_le16(vid);
6729 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6730 		hash |= (1 << crc);
6731 		count++;
6732 	}
6733 
6734 	if (!priv->dma_cap.vlhash) {
6735 		if (count > 2) /* VID = 0 always passes filter */
6736 			return -EOPNOTSUPP;
6737 
6738 		pmatch = vid;
6739 		hash = 0;
6740 	}
6741 
6742 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6743 }
6744 
6745 /* FIXME: This may need RXC to be running, but it may be called with BH
6746  * disabled, which means we can't call phylink_rx_clk_stop*().
6747  */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6748 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6749 {
6750 	struct stmmac_priv *priv = netdev_priv(ndev);
6751 	bool is_double = false;
6752 	int ret;
6753 
6754 	ret = pm_runtime_resume_and_get(priv->device);
6755 	if (ret < 0)
6756 		return ret;
6757 
6758 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6759 		is_double = true;
6760 
6761 	set_bit(vid, priv->active_vlans);
6762 	ret = stmmac_vlan_update(priv, is_double);
6763 	if (ret) {
6764 		clear_bit(vid, priv->active_vlans);
6765 		goto err_pm_put;
6766 	}
6767 
6768 	if (priv->hw->num_vlan) {
6769 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6770 		if (ret)
6771 			goto err_pm_put;
6772 	}
6773 err_pm_put:
6774 	pm_runtime_put(priv->device);
6775 
6776 	return ret;
6777 }
6778 
6779 /* FIXME: This may need RXC to be running, but it may be called with BH
6780  * disabled, which means we can't call phylink_rx_clk_stop*().
6781  */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6782 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6783 {
6784 	struct stmmac_priv *priv = netdev_priv(ndev);
6785 	bool is_double = false;
6786 	int ret;
6787 
6788 	ret = pm_runtime_resume_and_get(priv->device);
6789 	if (ret < 0)
6790 		return ret;
6791 
6792 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6793 		is_double = true;
6794 
6795 	clear_bit(vid, priv->active_vlans);
6796 
6797 	if (priv->hw->num_vlan) {
6798 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6799 		if (ret)
6800 			goto del_vlan_error;
6801 	}
6802 
6803 	ret = stmmac_vlan_update(priv, is_double);
6804 
6805 del_vlan_error:
6806 	pm_runtime_put(priv->device);
6807 
6808 	return ret;
6809 }
6810 
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6811 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6812 {
6813 	struct stmmac_priv *priv = netdev_priv(dev);
6814 
6815 	switch (bpf->command) {
6816 	case XDP_SETUP_PROG:
6817 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6818 	case XDP_SETUP_XSK_POOL:
6819 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6820 					     bpf->xsk.queue_id);
6821 	default:
6822 		return -EOPNOTSUPP;
6823 	}
6824 }
6825 
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6826 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6827 			   struct xdp_frame **frames, u32 flags)
6828 {
6829 	struct stmmac_priv *priv = netdev_priv(dev);
6830 	int cpu = smp_processor_id();
6831 	struct netdev_queue *nq;
6832 	int i, nxmit = 0;
6833 	int queue;
6834 
6835 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6836 		return -ENETDOWN;
6837 
6838 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6839 		return -EINVAL;
6840 
6841 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6842 	nq = netdev_get_tx_queue(priv->dev, queue);
6843 
6844 	__netif_tx_lock(nq, cpu);
6845 	/* Avoids TX time-out as we are sharing with slow path */
6846 	txq_trans_cond_update(nq);
6847 
6848 	for (i = 0; i < num_frames; i++) {
6849 		int res;
6850 
6851 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6852 		if (res == STMMAC_XDP_CONSUMED)
6853 			break;
6854 
6855 		nxmit++;
6856 	}
6857 
6858 	if (flags & XDP_XMIT_FLUSH) {
6859 		stmmac_flush_tx_descriptors(priv, queue);
6860 		stmmac_tx_timer_arm(priv, queue);
6861 	}
6862 
6863 	__netif_tx_unlock(nq);
6864 
6865 	return nxmit;
6866 }
6867 
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6868 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6869 {
6870 	struct stmmac_channel *ch = &priv->channel[queue];
6871 	unsigned long flags;
6872 
6873 	spin_lock_irqsave(&ch->lock, flags);
6874 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6875 	spin_unlock_irqrestore(&ch->lock, flags);
6876 
6877 	stmmac_stop_rx_dma(priv, queue);
6878 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6879 }
6880 
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6881 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6882 {
6883 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6884 	struct stmmac_channel *ch = &priv->channel[queue];
6885 	unsigned long flags;
6886 	u32 buf_size;
6887 	int ret;
6888 
6889 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6890 	if (ret) {
6891 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6892 		return;
6893 	}
6894 
6895 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6896 	if (ret) {
6897 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6898 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6899 		return;
6900 	}
6901 
6902 	stmmac_reset_rx_queue(priv, queue);
6903 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6904 
6905 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6906 			    rx_q->dma_rx_phy, rx_q->queue_index);
6907 
6908 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6909 			     sizeof(struct dma_desc));
6910 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6911 			       rx_q->rx_tail_addr, rx_q->queue_index);
6912 
6913 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6914 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6915 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6916 				      buf_size,
6917 				      rx_q->queue_index);
6918 	} else {
6919 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6920 				      priv->dma_conf.dma_buf_sz,
6921 				      rx_q->queue_index);
6922 	}
6923 
6924 	stmmac_start_rx_dma(priv, queue);
6925 
6926 	spin_lock_irqsave(&ch->lock, flags);
6927 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6928 	spin_unlock_irqrestore(&ch->lock, flags);
6929 }
6930 
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6931 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6932 {
6933 	struct stmmac_channel *ch = &priv->channel[queue];
6934 	unsigned long flags;
6935 
6936 	spin_lock_irqsave(&ch->lock, flags);
6937 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6938 	spin_unlock_irqrestore(&ch->lock, flags);
6939 
6940 	stmmac_stop_tx_dma(priv, queue);
6941 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6942 }
6943 
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6944 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6945 {
6946 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6947 	struct stmmac_channel *ch = &priv->channel[queue];
6948 	unsigned long flags;
6949 	int ret;
6950 
6951 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6952 	if (ret) {
6953 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6954 		return;
6955 	}
6956 
6957 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6958 	if (ret) {
6959 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6960 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6961 		return;
6962 	}
6963 
6964 	stmmac_reset_tx_queue(priv, queue);
6965 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6966 
6967 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6968 			    tx_q->dma_tx_phy, tx_q->queue_index);
6969 
6970 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6971 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6972 
6973 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6974 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6975 			       tx_q->tx_tail_addr, tx_q->queue_index);
6976 
6977 	stmmac_start_tx_dma(priv, queue);
6978 
6979 	spin_lock_irqsave(&ch->lock, flags);
6980 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6981 	spin_unlock_irqrestore(&ch->lock, flags);
6982 }
6983 
stmmac_xdp_release(struct net_device * dev)6984 void stmmac_xdp_release(struct net_device *dev)
6985 {
6986 	struct stmmac_priv *priv = netdev_priv(dev);
6987 	u32 chan;
6988 
6989 	/* Ensure tx function is not running */
6990 	netif_tx_disable(dev);
6991 
6992 	/* Disable NAPI process */
6993 	stmmac_disable_all_queues(priv);
6994 
6995 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6996 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6997 
6998 	/* Free the IRQ lines */
6999 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
7000 
7001 	/* Stop TX/RX DMA channels */
7002 	stmmac_stop_all_dma(priv);
7003 
7004 	/* Release and free the Rx/Tx resources */
7005 	free_dma_desc_resources(priv, &priv->dma_conf);
7006 
7007 	/* Disable the MAC Rx/Tx */
7008 	stmmac_mac_set(priv, priv->ioaddr, false);
7009 
7010 	/* set trans_start so we don't get spurious
7011 	 * watchdogs during reset
7012 	 */
7013 	netif_trans_update(dev);
7014 	netif_carrier_off(dev);
7015 }
7016 
stmmac_xdp_open(struct net_device * dev)7017 int stmmac_xdp_open(struct net_device *dev)
7018 {
7019 	struct stmmac_priv *priv = netdev_priv(dev);
7020 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7021 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7022 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
7023 	struct stmmac_rx_queue *rx_q;
7024 	struct stmmac_tx_queue *tx_q;
7025 	u32 buf_size;
7026 	bool sph_en;
7027 	u32 chan;
7028 	int ret;
7029 
7030 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
7031 	if (ret < 0) {
7032 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
7033 			   __func__);
7034 		goto dma_desc_error;
7035 	}
7036 
7037 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
7038 	if (ret < 0) {
7039 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
7040 			   __func__);
7041 		goto init_error;
7042 	}
7043 
7044 	stmmac_reset_queues_param(priv);
7045 
7046 	/* DMA CSR Channel configuration */
7047 	for (chan = 0; chan < dma_csr_ch; chan++) {
7048 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
7049 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
7050 	}
7051 
7052 	/* Adjust Split header */
7053 	sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
7054 
7055 	/* DMA RX Channel Configuration */
7056 	for (chan = 0; chan < rx_cnt; chan++) {
7057 		rx_q = &priv->dma_conf.rx_queue[chan];
7058 
7059 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7060 				    rx_q->dma_rx_phy, chan);
7061 
7062 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7063 				     (rx_q->buf_alloc_num *
7064 				      sizeof(struct dma_desc));
7065 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7066 				       rx_q->rx_tail_addr, chan);
7067 
7068 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7069 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7070 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7071 					      buf_size,
7072 					      rx_q->queue_index);
7073 		} else {
7074 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
7075 					      priv->dma_conf.dma_buf_sz,
7076 					      rx_q->queue_index);
7077 		}
7078 
7079 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7080 	}
7081 
7082 	/* DMA TX Channel Configuration */
7083 	for (chan = 0; chan < tx_cnt; chan++) {
7084 		tx_q = &priv->dma_conf.tx_queue[chan];
7085 
7086 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7087 				    tx_q->dma_tx_phy, chan);
7088 
7089 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7090 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7091 				       tx_q->tx_tail_addr, chan);
7092 
7093 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7094 	}
7095 
7096 	/* Enable the MAC Rx/Tx */
7097 	stmmac_mac_set(priv, priv->ioaddr, true);
7098 
7099 	/* Start Rx & Tx DMA Channels */
7100 	stmmac_start_all_dma(priv);
7101 
7102 	ret = stmmac_request_irq(dev);
7103 	if (ret)
7104 		goto irq_error;
7105 
7106 	/* Enable NAPI process*/
7107 	stmmac_enable_all_queues(priv);
7108 	netif_carrier_on(dev);
7109 	netif_tx_start_all_queues(dev);
7110 	stmmac_enable_all_dma_irq(priv);
7111 
7112 	return 0;
7113 
7114 irq_error:
7115 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7116 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7117 
7118 init_error:
7119 	free_dma_desc_resources(priv, &priv->dma_conf);
7120 dma_desc_error:
7121 	return ret;
7122 }
7123 
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7124 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7125 {
7126 	struct stmmac_priv *priv = netdev_priv(dev);
7127 	struct stmmac_rx_queue *rx_q;
7128 	struct stmmac_tx_queue *tx_q;
7129 	struct stmmac_channel *ch;
7130 
7131 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7132 	    !netif_carrier_ok(priv->dev))
7133 		return -ENETDOWN;
7134 
7135 	if (!stmmac_xdp_is_enabled(priv))
7136 		return -EINVAL;
7137 
7138 	if (queue >= priv->plat->rx_queues_to_use ||
7139 	    queue >= priv->plat->tx_queues_to_use)
7140 		return -EINVAL;
7141 
7142 	rx_q = &priv->dma_conf.rx_queue[queue];
7143 	tx_q = &priv->dma_conf.tx_queue[queue];
7144 	ch = &priv->channel[queue];
7145 
7146 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7147 		return -EINVAL;
7148 
7149 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7150 		/* EQoS does not have per-DMA channel SW interrupt,
7151 		 * so we schedule RX Napi straight-away.
7152 		 */
7153 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7154 			__napi_schedule(&ch->rxtx_napi);
7155 	}
7156 
7157 	return 0;
7158 }
7159 
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7160 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7161 {
7162 	struct stmmac_priv *priv = netdev_priv(dev);
7163 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7164 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7165 	unsigned int start;
7166 	int q;
7167 
7168 	for (q = 0; q < tx_cnt; q++) {
7169 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7170 		u64 tx_packets;
7171 		u64 tx_bytes;
7172 
7173 		do {
7174 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7175 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7176 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7177 		do {
7178 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7179 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7180 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7181 
7182 		stats->tx_packets += tx_packets;
7183 		stats->tx_bytes += tx_bytes;
7184 	}
7185 
7186 	for (q = 0; q < rx_cnt; q++) {
7187 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7188 		u64 rx_packets;
7189 		u64 rx_bytes;
7190 
7191 		do {
7192 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7193 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7194 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7195 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7196 
7197 		stats->rx_packets += rx_packets;
7198 		stats->rx_bytes += rx_bytes;
7199 	}
7200 
7201 	stats->rx_dropped = priv->xstats.rx_dropped;
7202 	stats->rx_errors = priv->xstats.rx_errors;
7203 	stats->tx_dropped = priv->xstats.tx_dropped;
7204 	stats->tx_errors = priv->xstats.tx_errors;
7205 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7206 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7207 	stats->rx_length_errors = priv->xstats.rx_length;
7208 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7209 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7210 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7211 }
7212 
7213 static const struct net_device_ops stmmac_netdev_ops = {
7214 	.ndo_open = stmmac_open,
7215 	.ndo_start_xmit = stmmac_xmit,
7216 	.ndo_stop = stmmac_release,
7217 	.ndo_change_mtu = stmmac_change_mtu,
7218 	.ndo_fix_features = stmmac_fix_features,
7219 	.ndo_set_features = stmmac_set_features,
7220 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7221 	.ndo_tx_timeout = stmmac_tx_timeout,
7222 	.ndo_eth_ioctl = stmmac_ioctl,
7223 	.ndo_get_stats64 = stmmac_get_stats64,
7224 	.ndo_setup_tc = stmmac_setup_tc,
7225 	.ndo_select_queue = stmmac_select_queue,
7226 	.ndo_set_mac_address = stmmac_set_mac_address,
7227 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7228 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7229 	.ndo_bpf = stmmac_bpf,
7230 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7231 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7232 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7233 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7234 };
7235 
stmmac_reset_subtask(struct stmmac_priv * priv)7236 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7237 {
7238 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7239 		return;
7240 	if (test_bit(STMMAC_DOWN, &priv->state))
7241 		return;
7242 
7243 	netdev_err(priv->dev, "Reset adapter.\n");
7244 
7245 	rtnl_lock();
7246 	netif_trans_update(priv->dev);
7247 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7248 		usleep_range(1000, 2000);
7249 
7250 	set_bit(STMMAC_DOWN, &priv->state);
7251 	dev_close(priv->dev);
7252 	dev_open(priv->dev, NULL);
7253 	clear_bit(STMMAC_DOWN, &priv->state);
7254 	clear_bit(STMMAC_RESETING, &priv->state);
7255 	rtnl_unlock();
7256 }
7257 
stmmac_service_task(struct work_struct * work)7258 static void stmmac_service_task(struct work_struct *work)
7259 {
7260 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7261 			service_task);
7262 
7263 	stmmac_reset_subtask(priv);
7264 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7265 }
7266 
7267 /**
7268  *  stmmac_hw_init - Init the MAC device
7269  *  @priv: driver private structure
7270  *  Description: this function is to configure the MAC device according to
7271  *  some platform parameters or the HW capability register. It prepares the
7272  *  driver to use either ring or chain modes and to setup either enhanced or
7273  *  normal descriptors.
7274  */
stmmac_hw_init(struct stmmac_priv * priv)7275 static int stmmac_hw_init(struct stmmac_priv *priv)
7276 {
7277 	int ret;
7278 
7279 	/* dwmac-sun8i only work in chain mode */
7280 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7281 		chain_mode = 1;
7282 	priv->chain_mode = chain_mode;
7283 
7284 	/* Initialize HW Interface */
7285 	ret = stmmac_hwif_init(priv);
7286 	if (ret)
7287 		return ret;
7288 
7289 	/* Get the HW capability (new GMAC newer than 3.50a) */
7290 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7291 	if (priv->hw_cap_support) {
7292 		dev_info(priv->device, "DMA HW capability register supported\n");
7293 
7294 		/* We can override some gmac/dma configuration fields: e.g.
7295 		 * enh_desc, tx_coe (e.g. that are passed through the
7296 		 * platform) with the values from the HW capability
7297 		 * register (if supported).
7298 		 */
7299 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7300 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7301 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7302 		if (priv->dma_cap.hash_tb_sz) {
7303 			priv->hw->multicast_filter_bins =
7304 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7305 			priv->hw->mcast_bits_log2 =
7306 					ilog2(priv->hw->multicast_filter_bins);
7307 		}
7308 
7309 		/* TXCOE doesn't work in thresh DMA mode */
7310 		if (priv->plat->force_thresh_dma_mode)
7311 			priv->plat->tx_coe = 0;
7312 		else
7313 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7314 
7315 		/* In case of GMAC4 rx_coe is from HW cap register. */
7316 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7317 
7318 		if (priv->dma_cap.rx_coe_type2)
7319 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7320 		else if (priv->dma_cap.rx_coe_type1)
7321 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7322 
7323 	} else {
7324 		dev_info(priv->device, "No HW DMA feature register supported\n");
7325 	}
7326 
7327 	if (priv->plat->rx_coe) {
7328 		priv->hw->rx_csum = priv->plat->rx_coe;
7329 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7330 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7331 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7332 	}
7333 	if (priv->plat->tx_coe)
7334 		dev_info(priv->device, "TX Checksum insertion supported\n");
7335 
7336 	if (priv->plat->pmt) {
7337 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7338 		device_set_wakeup_capable(priv->device, 1);
7339 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7340 	}
7341 
7342 	if (priv->dma_cap.tsoen)
7343 		dev_info(priv->device, "TSO supported\n");
7344 
7345 	if (priv->dma_cap.number_rx_queues &&
7346 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7347 		dev_warn(priv->device,
7348 			 "Number of Rx queues (%u) exceeds dma capability\n",
7349 			 priv->plat->rx_queues_to_use);
7350 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7351 	}
7352 	if (priv->dma_cap.number_tx_queues &&
7353 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7354 		dev_warn(priv->device,
7355 			 "Number of Tx queues (%u) exceeds dma capability\n",
7356 			 priv->plat->tx_queues_to_use);
7357 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7358 	}
7359 
7360 	if (priv->dma_cap.rx_fifo_size &&
7361 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7362 		dev_warn(priv->device,
7363 			 "Rx FIFO size (%u) exceeds dma capability\n",
7364 			 priv->plat->rx_fifo_size);
7365 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7366 	}
7367 	if (priv->dma_cap.tx_fifo_size &&
7368 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7369 		dev_warn(priv->device,
7370 			 "Tx FIFO size (%u) exceeds dma capability\n",
7371 			 priv->plat->tx_fifo_size);
7372 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7373 	}
7374 
7375 	priv->hw->vlan_fail_q_en =
7376 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7377 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7378 
7379 	/* Run HW quirks, if any */
7380 	if (priv->hwif_quirks) {
7381 		ret = priv->hwif_quirks(priv);
7382 		if (ret)
7383 			return ret;
7384 	}
7385 
7386 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7387 	 * In some case, for example on bugged HW this feature
7388 	 * has to be disable and this can be done by passing the
7389 	 * riwt_off field from the platform.
7390 	 */
7391 	if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
7392 	     priv->plat->core_type == DWMAC_CORE_XGMAC) &&
7393 	    !priv->plat->riwt_off) {
7394 		priv->use_riwt = 1;
7395 		dev_info(priv->device,
7396 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7397 	}
7398 
7399 	/* Unimplemented PCS init (as indicated by stmmac_do_callback()
7400 	 * perversely returning -EINVAL) is non-fatal.
7401 	 */
7402 	ret = stmmac_mac_pcs_init(priv);
7403 	if (ret != -EINVAL)
7404 		return ret;
7405 
7406 	return 0;
7407 }
7408 
stmmac_napi_add(struct net_device * dev)7409 static void stmmac_napi_add(struct net_device *dev)
7410 {
7411 	struct stmmac_priv *priv = netdev_priv(dev);
7412 	u32 queue, maxq;
7413 
7414 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7415 
7416 	for (queue = 0; queue < maxq; queue++) {
7417 		struct stmmac_channel *ch = &priv->channel[queue];
7418 
7419 		ch->priv_data = priv;
7420 		ch->index = queue;
7421 		spin_lock_init(&ch->lock);
7422 
7423 		if (queue < priv->plat->rx_queues_to_use) {
7424 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7425 		}
7426 		if (queue < priv->plat->tx_queues_to_use) {
7427 			netif_napi_add_tx(dev, &ch->tx_napi,
7428 					  stmmac_napi_poll_tx);
7429 		}
7430 		if (queue < priv->plat->rx_queues_to_use &&
7431 		    queue < priv->plat->tx_queues_to_use) {
7432 			netif_napi_add(dev, &ch->rxtx_napi,
7433 				       stmmac_napi_poll_rxtx);
7434 		}
7435 	}
7436 }
7437 
stmmac_napi_del(struct net_device * dev)7438 static void stmmac_napi_del(struct net_device *dev)
7439 {
7440 	struct stmmac_priv *priv = netdev_priv(dev);
7441 	u32 queue, maxq;
7442 
7443 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7444 
7445 	for (queue = 0; queue < maxq; queue++) {
7446 		struct stmmac_channel *ch = &priv->channel[queue];
7447 
7448 		if (queue < priv->plat->rx_queues_to_use)
7449 			netif_napi_del(&ch->rx_napi);
7450 		if (queue < priv->plat->tx_queues_to_use)
7451 			netif_napi_del(&ch->tx_napi);
7452 		if (queue < priv->plat->rx_queues_to_use &&
7453 		    queue < priv->plat->tx_queues_to_use) {
7454 			netif_napi_del(&ch->rxtx_napi);
7455 		}
7456 	}
7457 }
7458 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7459 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7460 {
7461 	struct stmmac_priv *priv = netdev_priv(dev);
7462 	int ret = 0, i;
7463 
7464 	if (netif_running(dev))
7465 		stmmac_release(dev);
7466 
7467 	stmmac_napi_del(dev);
7468 
7469 	priv->plat->rx_queues_to_use = rx_cnt;
7470 	priv->plat->tx_queues_to_use = tx_cnt;
7471 	if (!netif_is_rxfh_configured(dev))
7472 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7473 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7474 									rx_cnt);
7475 
7476 	stmmac_napi_add(dev);
7477 
7478 	if (netif_running(dev))
7479 		ret = stmmac_open(dev);
7480 
7481 	return ret;
7482 }
7483 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7484 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7485 {
7486 	struct stmmac_priv *priv = netdev_priv(dev);
7487 	int ret = 0;
7488 
7489 	if (netif_running(dev))
7490 		stmmac_release(dev);
7491 
7492 	priv->dma_conf.dma_rx_size = rx_size;
7493 	priv->dma_conf.dma_tx_size = tx_size;
7494 
7495 	if (netif_running(dev))
7496 		ret = stmmac_open(dev);
7497 
7498 	return ret;
7499 }
7500 
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7501 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7502 {
7503 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7504 	struct dma_desc *desc_contains_ts = ctx->desc;
7505 	struct stmmac_priv *priv = ctx->priv;
7506 	struct dma_desc *ndesc = ctx->ndesc;
7507 	struct dma_desc *desc = ctx->desc;
7508 	u64 ns = 0;
7509 
7510 	if (!priv->hwts_rx_en)
7511 		return -ENODATA;
7512 
7513 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7514 	if (dwmac_is_xmac(priv->plat->core_type))
7515 		desc_contains_ts = ndesc;
7516 
7517 	/* Check if timestamp is available */
7518 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7519 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7520 		ns -= priv->plat->cdc_error_adj;
7521 		*timestamp = ns_to_ktime(ns);
7522 		return 0;
7523 	}
7524 
7525 	return -ENODATA;
7526 }
7527 
7528 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7529 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7530 };
7531 
stmmac_dl_ts_coarse_set(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7532 static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
7533 				   struct devlink_param_gset_ctx *ctx,
7534 				   struct netlink_ext_ack *extack)
7535 {
7536 	struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7537 	struct stmmac_priv *priv = dl_priv->stmmac_priv;
7538 
7539 	priv->tsfupdt_coarse = ctx->val.vbool;
7540 
7541 	if (priv->tsfupdt_coarse)
7542 		priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
7543 	else
7544 		priv->systime_flags |= PTP_TCR_TSCFUPDT;
7545 
7546 	/* In Coarse mode, we can use a smaller subsecond increment, let's
7547 	 * reconfigure the systime, subsecond increment and addend.
7548 	 */
7549 	stmmac_update_subsecond_increment(priv);
7550 
7551 	return 0;
7552 }
7553 
stmmac_dl_ts_coarse_get(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7554 static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
7555 				   struct devlink_param_gset_ctx *ctx,
7556 				   struct netlink_ext_ack *extack)
7557 {
7558 	struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7559 	struct stmmac_priv *priv = dl_priv->stmmac_priv;
7560 
7561 	ctx->val.vbool = priv->tsfupdt_coarse;
7562 
7563 	return 0;
7564 }
7565 
7566 static const struct devlink_param stmmac_devlink_params[] = {
7567 	DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
7568 			     DEVLINK_PARAM_TYPE_BOOL,
7569 			     BIT(DEVLINK_PARAM_CMODE_RUNTIME),
7570 			     stmmac_dl_ts_coarse_get,
7571 			     stmmac_dl_ts_coarse_set, NULL),
7572 };
7573 
7574 /* None of the generic devlink parameters are implemented */
7575 static const struct devlink_ops stmmac_devlink_ops = {};
7576 
stmmac_register_devlink(struct stmmac_priv * priv)7577 static int stmmac_register_devlink(struct stmmac_priv *priv)
7578 {
7579 	struct stmmac_devlink_priv *dl_priv;
7580 	int ret;
7581 
7582 	/* For now, what is exposed over devlink is only relevant when
7583 	 * timestamping is available and we have a valid ptp clock rate
7584 	 */
7585 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
7586 	    !priv->plat->clk_ptp_rate)
7587 		return 0;
7588 
7589 	priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
7590 				      priv->device);
7591 	if (!priv->devlink)
7592 		return -ENOMEM;
7593 
7594 	dl_priv = devlink_priv(priv->devlink);
7595 	dl_priv->stmmac_priv = priv;
7596 
7597 	ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
7598 				      ARRAY_SIZE(stmmac_devlink_params));
7599 	if (ret)
7600 		goto dl_free;
7601 
7602 	devlink_register(priv->devlink);
7603 	return 0;
7604 
7605 dl_free:
7606 	devlink_free(priv->devlink);
7607 
7608 	return ret;
7609 }
7610 
stmmac_unregister_devlink(struct stmmac_priv * priv)7611 static void stmmac_unregister_devlink(struct stmmac_priv *priv)
7612 {
7613 	if (!priv->devlink)
7614 		return;
7615 
7616 	devlink_unregister(priv->devlink);
7617 	devlink_params_unregister(priv->devlink, stmmac_devlink_params,
7618 				  ARRAY_SIZE(stmmac_devlink_params));
7619 	devlink_free(priv->devlink);
7620 }
7621 
stmmac_plat_dat_alloc(struct device * dev)7622 struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
7623 {
7624 	struct plat_stmmacenet_data *plat_dat;
7625 	int i;
7626 
7627 	plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
7628 	if (!plat_dat)
7629 		return NULL;
7630 
7631 	/* Set the defaults:
7632 	 * - phy autodetection
7633 	 * - determine GMII_Address CR field from CSR clock
7634 	 * - allow MTU up to JUMBO_LEN
7635 	 * - hash table size
7636 	 * - one unicast filter entry
7637 	 */
7638 	plat_dat->phy_addr = -1;
7639 	plat_dat->clk_csr = -1;
7640 	plat_dat->maxmtu = JUMBO_LEN;
7641 	plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
7642 	plat_dat->unicast_filter_entries = 1;
7643 
7644 	/* Set the mtl defaults */
7645 	plat_dat->tx_queues_to_use = 1;
7646 	plat_dat->rx_queues_to_use = 1;
7647 
7648 	/* Setup the default RX queue channel map */
7649 	for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
7650 		plat_dat->rx_queues_cfg[i].chan = i;
7651 
7652 	return plat_dat;
7653 }
7654 EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
7655 
__stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7656 static int __stmmac_dvr_probe(struct device *device,
7657 			      struct plat_stmmacenet_data *plat_dat,
7658 			      struct stmmac_resources *res)
7659 {
7660 	struct net_device *ndev = NULL;
7661 	struct stmmac_priv *priv;
7662 	u32 rxq;
7663 	int i, ret = 0;
7664 
7665 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7666 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7667 	if (!ndev)
7668 		return -ENOMEM;
7669 
7670 	SET_NETDEV_DEV(ndev, device);
7671 
7672 	priv = netdev_priv(ndev);
7673 	priv->device = device;
7674 	priv->dev = ndev;
7675 
7676 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7677 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7678 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7679 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7680 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7681 	}
7682 
7683 	priv->xstats.pcpu_stats =
7684 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7685 	if (!priv->xstats.pcpu_stats)
7686 		return -ENOMEM;
7687 
7688 	stmmac_set_ethtool_ops(ndev);
7689 	priv->pause_time = pause;
7690 	priv->plat = plat_dat;
7691 	priv->ioaddr = res->addr;
7692 	priv->dev->base_addr = (unsigned long)res->addr;
7693 	priv->plat->dma_cfg->multi_msi_en =
7694 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7695 
7696 	priv->dev->irq = res->irq;
7697 	priv->wol_irq = res->wol_irq;
7698 	priv->lpi_irq = res->lpi_irq;
7699 	priv->sfty_irq = res->sfty_irq;
7700 	priv->sfty_ce_irq = res->sfty_ce_irq;
7701 	priv->sfty_ue_irq = res->sfty_ue_irq;
7702 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7703 		priv->rx_irq[i] = res->rx_irq[i];
7704 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7705 		priv->tx_irq[i] = res->tx_irq[i];
7706 
7707 	if (!is_zero_ether_addr(res->mac))
7708 		eth_hw_addr_set(priv->dev, res->mac);
7709 
7710 	dev_set_drvdata(device, priv->dev);
7711 
7712 	/* Verify driver arguments */
7713 	stmmac_verify_args();
7714 
7715 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7716 	if (!priv->af_xdp_zc_qps)
7717 		return -ENOMEM;
7718 
7719 	/* Allocate workqueue */
7720 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7721 	if (!priv->wq) {
7722 		dev_err(priv->device, "failed to create workqueue\n");
7723 		ret = -ENOMEM;
7724 		goto error_wq_init;
7725 	}
7726 
7727 	INIT_WORK(&priv->service_task, stmmac_service_task);
7728 
7729 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7730 
7731 	/* Override with kernel parameters if supplied XXX CRS XXX
7732 	 * this needs to have multiple instances
7733 	 */
7734 	if ((phyaddr >= 0) && (phyaddr <= 31))
7735 		priv->plat->phy_addr = phyaddr;
7736 
7737 	if (priv->plat->stmmac_rst) {
7738 		ret = reset_control_assert(priv->plat->stmmac_rst);
7739 		reset_control_deassert(priv->plat->stmmac_rst);
7740 		/* Some reset controllers have only reset callback instead of
7741 		 * assert + deassert callbacks pair.
7742 		 */
7743 		if (ret == -ENOTSUPP)
7744 			reset_control_reset(priv->plat->stmmac_rst);
7745 	}
7746 
7747 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7748 	if (ret == -ENOTSUPP)
7749 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7750 			ERR_PTR(ret));
7751 
7752 	/* Wait a bit for the reset to take effect */
7753 	udelay(10);
7754 
7755 	/* Init MAC and get the capabilities */
7756 	ret = stmmac_hw_init(priv);
7757 	if (ret)
7758 		goto error_hw_init;
7759 
7760 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7761 	 */
7762 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7763 		priv->plat->dma_cfg->dche = false;
7764 
7765 	stmmac_check_ether_addr(priv);
7766 
7767 	ndev->netdev_ops = &stmmac_netdev_ops;
7768 
7769 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7770 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7771 
7772 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7773 			    NETIF_F_RXCSUM;
7774 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7775 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7776 
7777 	ret = stmmac_tc_init(priv, priv);
7778 	if (!ret) {
7779 		ndev->hw_features |= NETIF_F_HW_TC;
7780 	}
7781 
7782 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7783 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7784 		if (priv->plat->core_type == DWMAC_CORE_GMAC4)
7785 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7786 		priv->tso = true;
7787 		dev_info(priv->device, "TSO feature enabled\n");
7788 	}
7789 
7790 	if (priv->dma_cap.sphen &&
7791 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7792 		ndev->hw_features |= NETIF_F_GRO;
7793 		priv->sph_capable = true;
7794 		priv->sph_active = priv->sph_capable;
7795 		dev_info(priv->device, "SPH feature enabled\n");
7796 	}
7797 
7798 	/* Ideally our host DMA address width is the same as for the
7799 	 * device. However, it may differ and then we have to use our
7800 	 * host DMA width for allocation and the device DMA width for
7801 	 * register handling.
7802 	 */
7803 	if (priv->plat->host_dma_width)
7804 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7805 	else
7806 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7807 
7808 	if (priv->dma_cap.host_dma_width) {
7809 		ret = dma_set_mask_and_coherent(device,
7810 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7811 		if (!ret) {
7812 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7813 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7814 
7815 			/*
7816 			 * If more than 32 bits can be addressed, make sure to
7817 			 * enable enhanced addressing mode.
7818 			 */
7819 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7820 				priv->plat->dma_cfg->eame = true;
7821 		} else {
7822 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7823 			if (ret) {
7824 				dev_err(priv->device, "Failed to set DMA Mask\n");
7825 				goto error_hw_init;
7826 			}
7827 
7828 			priv->dma_cap.host_dma_width = 32;
7829 		}
7830 	}
7831 
7832 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7833 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7834 #ifdef STMMAC_VLAN_TAG_USED
7835 	/* Both mac100 and gmac support receive VLAN tag detection */
7836 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7837 	if (dwmac_is_xmac(priv->plat->core_type)) {
7838 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7839 		priv->hw->hw_vlan_en = true;
7840 	}
7841 	if (priv->dma_cap.vlhash) {
7842 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7843 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7844 	}
7845 	if (priv->dma_cap.vlins)
7846 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7847 #endif
7848 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7849 
7850 	priv->xstats.threshold = tc;
7851 
7852 	/* Initialize RSS */
7853 	rxq = priv->plat->rx_queues_to_use;
7854 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7855 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7856 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7857 
7858 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7859 		ndev->features |= NETIF_F_RXHASH;
7860 
7861 	ndev->vlan_features |= ndev->features;
7862 
7863 	/* MTU range: 46 - hw-specific max */
7864 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7865 
7866 	if (priv->plat->core_type == DWMAC_CORE_XGMAC)
7867 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7868 	else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
7869 		ndev->max_mtu = JUMBO_LEN;
7870 	else
7871 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7872 
7873 	/* Warn if the platform's maxmtu is smaller than the minimum MTU,
7874 	 * otherwise clamp the maximum MTU above to the platform's maxmtu.
7875 	 */
7876 	if (priv->plat->maxmtu < ndev->min_mtu)
7877 		dev_warn(priv->device,
7878 			 "%s: warning: maxmtu having invalid value (%d)\n",
7879 			 __func__, priv->plat->maxmtu);
7880 	else if (priv->plat->maxmtu < ndev->max_mtu)
7881 		ndev->max_mtu = priv->plat->maxmtu;
7882 
7883 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7884 
7885 	/* Setup channels NAPI */
7886 	stmmac_napi_add(ndev);
7887 
7888 	mutex_init(&priv->lock);
7889 
7890 	stmmac_fpe_init(priv);
7891 
7892 	stmmac_check_pcs_mode(priv);
7893 
7894 	pm_runtime_get_noresume(device);
7895 	pm_runtime_set_active(device);
7896 	if (!pm_runtime_enabled(device))
7897 		pm_runtime_enable(device);
7898 
7899 	ret = stmmac_mdio_register(ndev);
7900 	if (ret < 0) {
7901 		dev_err_probe(priv->device, ret,
7902 			      "MDIO bus (id: %d) registration failed\n",
7903 			      priv->plat->bus_id);
7904 		goto error_mdio_register;
7905 	}
7906 
7907 	ret = stmmac_pcs_setup(ndev);
7908 	if (ret)
7909 		goto error_pcs_setup;
7910 
7911 	ret = stmmac_phylink_setup(priv);
7912 	if (ret) {
7913 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7914 		goto error_phy_setup;
7915 	}
7916 
7917 	ret = stmmac_register_devlink(priv);
7918 	if (ret)
7919 		goto error_devlink_setup;
7920 
7921 	ret = register_netdev(ndev);
7922 	if (ret) {
7923 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7924 			__func__, ret);
7925 		goto error_netdev_register;
7926 	}
7927 
7928 #ifdef CONFIG_DEBUG_FS
7929 	stmmac_init_fs(ndev);
7930 #endif
7931 
7932 	if (priv->plat->dump_debug_regs)
7933 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7934 
7935 	/* Let pm_runtime_put() disable the clocks.
7936 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7937 	 */
7938 	pm_runtime_put(device);
7939 
7940 	return ret;
7941 
7942 error_netdev_register:
7943 	stmmac_unregister_devlink(priv);
7944 error_devlink_setup:
7945 	phylink_destroy(priv->phylink);
7946 error_phy_setup:
7947 	stmmac_pcs_clean(ndev);
7948 error_pcs_setup:
7949 	stmmac_mdio_unregister(ndev);
7950 error_mdio_register:
7951 	stmmac_napi_del(ndev);
7952 error_hw_init:
7953 	destroy_workqueue(priv->wq);
7954 error_wq_init:
7955 	bitmap_free(priv->af_xdp_zc_qps);
7956 
7957 	return ret;
7958 }
7959 
7960 /**
7961  * stmmac_dvr_probe
7962  * @dev: device pointer
7963  * @plat_dat: platform data pointer
7964  * @res: stmmac resource pointer
7965  * Description: this is the main probe function used to
7966  * call the alloc_etherdev, allocate the priv structure.
7967  * Return:
7968  * returns 0 on success, otherwise errno.
7969  */
stmmac_dvr_probe(struct device * dev,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7970 int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
7971 		     struct stmmac_resources *res)
7972 {
7973 	int ret;
7974 
7975 	if (plat_dat->init) {
7976 		ret = plat_dat->init(dev, plat_dat->bsp_priv);
7977 		if (ret)
7978 			return ret;
7979 	}
7980 
7981 	ret = __stmmac_dvr_probe(dev, plat_dat, res);
7982 	if (ret && plat_dat->exit)
7983 		plat_dat->exit(dev, plat_dat->bsp_priv);
7984 
7985 	return ret;
7986 }
7987 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7988 
7989 /**
7990  * stmmac_dvr_remove
7991  * @dev: device pointer
7992  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7993  * changes the link status, releases the DMA descriptor rings.
7994  */
stmmac_dvr_remove(struct device * dev)7995 void stmmac_dvr_remove(struct device *dev)
7996 {
7997 	struct net_device *ndev = dev_get_drvdata(dev);
7998 	struct stmmac_priv *priv = netdev_priv(ndev);
7999 
8000 	netdev_info(priv->dev, "%s: removing driver", __func__);
8001 
8002 	pm_runtime_get_sync(dev);
8003 
8004 	unregister_netdev(ndev);
8005 
8006 #ifdef CONFIG_DEBUG_FS
8007 	stmmac_exit_fs(ndev);
8008 #endif
8009 	stmmac_unregister_devlink(priv);
8010 
8011 	phylink_destroy(priv->phylink);
8012 	if (priv->plat->stmmac_rst)
8013 		reset_control_assert(priv->plat->stmmac_rst);
8014 	reset_control_assert(priv->plat->stmmac_ahb_rst);
8015 
8016 	stmmac_pcs_clean(ndev);
8017 	stmmac_mdio_unregister(ndev);
8018 
8019 	destroy_workqueue(priv->wq);
8020 	mutex_destroy(&priv->lock);
8021 	bitmap_free(priv->af_xdp_zc_qps);
8022 
8023 	pm_runtime_disable(dev);
8024 	pm_runtime_put_noidle(dev);
8025 
8026 	if (priv->plat->exit)
8027 		priv->plat->exit(dev, priv->plat->bsp_priv);
8028 }
8029 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
8030 
8031 /**
8032  * stmmac_suspend - suspend callback
8033  * @dev: device pointer
8034  * Description: this is the function to suspend the device and it is called
8035  * by the platform driver to stop the network queue, release the resources,
8036  * program the PMT register (for WoL), clean and release driver resources.
8037  */
stmmac_suspend(struct device * dev)8038 int stmmac_suspend(struct device *dev)
8039 {
8040 	struct net_device *ndev = dev_get_drvdata(dev);
8041 	struct stmmac_priv *priv = netdev_priv(ndev);
8042 	u32 chan;
8043 
8044 	if (!ndev || !netif_running(ndev))
8045 		return 0;
8046 
8047 	mutex_lock(&priv->lock);
8048 
8049 	netif_device_detach(ndev);
8050 
8051 	stmmac_disable_all_queues(priv);
8052 
8053 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
8054 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
8055 
8056 	if (priv->eee_sw_timer_en) {
8057 		priv->tx_path_in_lpi_mode = false;
8058 		timer_delete_sync(&priv->eee_ctrl_timer);
8059 	}
8060 
8061 	/* Stop TX/RX DMA */
8062 	stmmac_stop_all_dma(priv);
8063 
8064 	if (priv->plat->serdes_powerdown)
8065 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
8066 
8067 	/* Enable Power down mode by programming the PMT regs */
8068 	if (priv->wolopts) {
8069 		stmmac_pmt(priv, priv->hw, priv->wolopts);
8070 		priv->irq_wake = 1;
8071 	} else {
8072 		stmmac_mac_set(priv, priv->ioaddr, false);
8073 		pinctrl_pm_select_sleep_state(priv->device);
8074 	}
8075 
8076 	mutex_unlock(&priv->lock);
8077 
8078 	rtnl_lock();
8079 	phylink_suspend(priv->phylink, !!priv->wolopts);
8080 	rtnl_unlock();
8081 
8082 	if (stmmac_fpe_supported(priv))
8083 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
8084 
8085 	if (priv->plat->suspend)
8086 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
8087 
8088 	return 0;
8089 }
8090 EXPORT_SYMBOL_GPL(stmmac_suspend);
8091 
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)8092 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
8093 {
8094 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
8095 
8096 	rx_q->cur_rx = 0;
8097 	rx_q->dirty_rx = 0;
8098 }
8099 
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)8100 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
8101 {
8102 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
8103 
8104 	tx_q->cur_tx = 0;
8105 	tx_q->dirty_tx = 0;
8106 	tx_q->mss = 0;
8107 
8108 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
8109 }
8110 
8111 /**
8112  * stmmac_reset_queues_param - reset queue parameters
8113  * @priv: device pointer
8114  */
stmmac_reset_queues_param(struct stmmac_priv * priv)8115 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
8116 {
8117 	u32 rx_cnt = priv->plat->rx_queues_to_use;
8118 	u32 tx_cnt = priv->plat->tx_queues_to_use;
8119 	u32 queue;
8120 
8121 	for (queue = 0; queue < rx_cnt; queue++)
8122 		stmmac_reset_rx_queue(priv, queue);
8123 
8124 	for (queue = 0; queue < tx_cnt; queue++)
8125 		stmmac_reset_tx_queue(priv, queue);
8126 }
8127 
8128 /**
8129  * stmmac_resume - resume callback
8130  * @dev: device pointer
8131  * Description: when resume this function is invoked to setup the DMA and CORE
8132  * in a usable state.
8133  */
stmmac_resume(struct device * dev)8134 int stmmac_resume(struct device *dev)
8135 {
8136 	struct net_device *ndev = dev_get_drvdata(dev);
8137 	struct stmmac_priv *priv = netdev_priv(ndev);
8138 	int ret;
8139 
8140 	if (priv->plat->resume) {
8141 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
8142 		if (ret)
8143 			return ret;
8144 	}
8145 
8146 	if (!netif_running(ndev))
8147 		return 0;
8148 
8149 	/* Power Down bit, into the PM register, is cleared
8150 	 * automatically as soon as a magic packet or a Wake-up frame
8151 	 * is received. Anyway, it's better to manually clear
8152 	 * this bit because it can generate problems while resuming
8153 	 * from another devices (e.g. serial console).
8154 	 */
8155 	if (priv->wolopts) {
8156 		mutex_lock(&priv->lock);
8157 		stmmac_pmt(priv, priv->hw, 0);
8158 		mutex_unlock(&priv->lock);
8159 		priv->irq_wake = 0;
8160 	} else {
8161 		pinctrl_pm_select_default_state(priv->device);
8162 		/* reset the phy so that it's ready */
8163 		if (priv->mii)
8164 			stmmac_mdio_reset(priv->mii);
8165 	}
8166 
8167 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
8168 	    priv->plat->serdes_powerup) {
8169 		ret = priv->plat->serdes_powerup(ndev,
8170 						 priv->plat->bsp_priv);
8171 
8172 		if (ret < 0)
8173 			return ret;
8174 	}
8175 
8176 	rtnl_lock();
8177 
8178 	/* Prepare the PHY to resume, ensuring that its clocks which are
8179 	 * necessary for the MAC DMA reset to complete are running
8180 	 */
8181 	phylink_prepare_resume(priv->phylink);
8182 
8183 	mutex_lock(&priv->lock);
8184 
8185 	stmmac_reset_queues_param(priv);
8186 
8187 	stmmac_free_tx_skbufs(priv);
8188 	stmmac_clear_descriptors(priv, &priv->dma_conf);
8189 
8190 	ret = stmmac_hw_setup(ndev);
8191 	if (ret < 0) {
8192 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
8193 		mutex_unlock(&priv->lock);
8194 		rtnl_unlock();
8195 		return ret;
8196 	}
8197 
8198 	stmmac_init_timestamping(priv);
8199 
8200 	stmmac_init_coalesce(priv);
8201 	phylink_rx_clk_stop_block(priv->phylink);
8202 	stmmac_set_rx_mode(ndev);
8203 
8204 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8205 	phylink_rx_clk_stop_unblock(priv->phylink);
8206 
8207 	stmmac_enable_all_queues(priv);
8208 	stmmac_enable_all_dma_irq(priv);
8209 
8210 	mutex_unlock(&priv->lock);
8211 
8212 	/* phylink_resume() must be called after the hardware has been
8213 	 * initialised because it may bring the link up immediately in a
8214 	 * workqueue thread, which will race with initialisation.
8215 	 */
8216 	phylink_resume(priv->phylink);
8217 	rtnl_unlock();
8218 
8219 	netif_device_attach(ndev);
8220 
8221 	return 0;
8222 }
8223 EXPORT_SYMBOL_GPL(stmmac_resume);
8224 
8225 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
8226 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
8227 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
8228 
8229 #ifndef MODULE
stmmac_cmdline_opt(char * str)8230 static int __init stmmac_cmdline_opt(char *str)
8231 {
8232 	char *opt;
8233 
8234 	if (!str || !*str)
8235 		return 1;
8236 	while ((opt = strsep(&str, ",")) != NULL) {
8237 		if (!strncmp(opt, "debug:", 6)) {
8238 			if (kstrtoint(opt + 6, 0, &debug))
8239 				goto err;
8240 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8241 			if (kstrtoint(opt + 8, 0, &phyaddr))
8242 				goto err;
8243 		} else if (!strncmp(opt, "tc:", 3)) {
8244 			if (kstrtoint(opt + 3, 0, &tc))
8245 				goto err;
8246 		} else if (!strncmp(opt, "watchdog:", 9)) {
8247 			if (kstrtoint(opt + 9, 0, &watchdog))
8248 				goto err;
8249 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8250 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8251 				goto err;
8252 		} else if (!strncmp(opt, "pause:", 6)) {
8253 			if (kstrtoint(opt + 6, 0, &pause))
8254 				goto err;
8255 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8256 			if (kstrtoint(opt + 10, 0, &eee_timer))
8257 				goto err;
8258 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8259 			if (kstrtoint(opt + 11, 0, &chain_mode))
8260 				goto err;
8261 		}
8262 	}
8263 	return 1;
8264 
8265 err:
8266 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8267 	return 1;
8268 }
8269 
8270 __setup("stmmaceth=", stmmac_cmdline_opt);
8271 #endif /* MODULE */
8272 
stmmac_init(void)8273 static int __init stmmac_init(void)
8274 {
8275 #ifdef CONFIG_DEBUG_FS
8276 	/* Create debugfs main directory if it doesn't exist yet */
8277 	if (!stmmac_fs_dir)
8278 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8279 	register_netdevice_notifier(&stmmac_notifier);
8280 #endif
8281 
8282 	return 0;
8283 }
8284 
stmmac_exit(void)8285 static void __exit stmmac_exit(void)
8286 {
8287 #ifdef CONFIG_DEBUG_FS
8288 	unregister_netdevice_notifier(&stmmac_notifier);
8289 	debugfs_remove_recursive(stmmac_fs_dir);
8290 #endif
8291 }
8292 
8293 module_init(stmmac_init)
8294 module_exit(stmmac_exit)
8295 
8296 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8297 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8298 MODULE_LICENSE("GPL");
8299