xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision 90602c251cda8a1e526efb250f28c1ea3f87cd78)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53 
54 /* As long as the interface is active, we keep the timestamping counter enabled
55  * with fine resolution and binary rollover. This avoid non-monotonic behavior
56  * (clock jumps) when changing timestamping settings at runtime.
57  */
58 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 				 PTP_TCR_TSCTRLSSR)
60 
61 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
63 
64 /* Module parameters */
65 #define TX_TIMEO	5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69 
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73 
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77 
78 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x)	((x)->dma_conf.dma_rx_size / 4)
80 
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX	256
83 #define STMMAC_TX_XSK_AVAIL		16
84 #define STMMAC_RX_FILL_BATCH		16
85 
86 #define STMMAC_XDP_PASS		0
87 #define STMMAC_XDP_CONSUMED	BIT(0)
88 #define STMMAC_XDP_TX		BIT(1)
89 #define STMMAC_XDP_REDIRECT	BIT(2)
90 
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94 
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98 
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103 
104 #define	DEFAULT_BUFSIZE	1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108 
109 #define	STMMAC_RX_COPYBREAK	256
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 	int ret = 0;
153 
154 	if (enabled) {
155 		ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 		if (ret)
157 			return ret;
158 		ret = clk_prepare_enable(priv->plat->pclk);
159 		if (ret) {
160 			clk_disable_unprepare(priv->plat->stmmac_clk);
161 			return ret;
162 		}
163 		if (priv->plat->clks_config) {
164 			ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 			if (ret) {
166 				clk_disable_unprepare(priv->plat->stmmac_clk);
167 				clk_disable_unprepare(priv->plat->pclk);
168 				return ret;
169 			}
170 		}
171 	} else {
172 		clk_disable_unprepare(priv->plat->stmmac_clk);
173 		clk_disable_unprepare(priv->plat->pclk);
174 		if (priv->plat->clks_config)
175 			priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 	}
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 		buf_sz = DEFAULT_BUFSIZE;
193 	if (unlikely(flow_ctrl > 1))
194 		flow_ctrl = FLOW_AUTO;
195 	else if (likely(flow_ctrl < 0))
196 		flow_ctrl = FLOW_OFF;
197 	if (unlikely((pause < 0) || (pause > 0xffff)))
198 		pause = PAUSE_TIME;
199 	if (eee_timer < 0)
200 		eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202 
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 	u32 queue;
209 
210 	for (queue = 0; queue < maxq; queue++) {
211 		struct stmmac_channel *ch = &priv->channel[queue];
212 
213 		if (stmmac_xdp_is_enabled(priv) &&
214 		    test_bit(queue, priv->af_xdp_zc_qps)) {
215 			napi_disable(&ch->rxtx_napi);
216 			continue;
217 		}
218 
219 		if (queue < rx_queues_cnt)
220 			napi_disable(&ch->rx_napi);
221 		if (queue < tx_queues_cnt)
222 			napi_disable(&ch->tx_napi);
223 	}
224 }
225 
226 /**
227  * stmmac_disable_all_queues - Disable all queues
228  * @priv: driver private structure
229  */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 	struct stmmac_rx_queue *rx_q;
234 	u32 queue;
235 
236 	/* synchronize_rcu() needed for pending XDP buffers to drain */
237 	for (queue = 0; queue < rx_queues_cnt; queue++) {
238 		rx_q = &priv->dma_conf.rx_queue[queue];
239 		if (rx_q->xsk_pool) {
240 			synchronize_rcu();
241 			break;
242 		}
243 	}
244 
245 	__stmmac_disable_all_queues(priv);
246 }
247 
248 /**
249  * stmmac_enable_all_queues - Enable all queues
250  * @priv: driver private structure
251  */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 	u32 queue;
258 
259 	for (queue = 0; queue < maxq; queue++) {
260 		struct stmmac_channel *ch = &priv->channel[queue];
261 
262 		if (stmmac_xdp_is_enabled(priv) &&
263 		    test_bit(queue, priv->af_xdp_zc_qps)) {
264 			napi_enable(&ch->rxtx_napi);
265 			continue;
266 		}
267 
268 		if (queue < rx_queues_cnt)
269 			napi_enable(&ch->rx_napi);
270 		if (queue < tx_queues_cnt)
271 			napi_enable(&ch->tx_napi);
272 	}
273 }
274 
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 		queue_work(priv->wq, &priv->service_task);
280 }
281 
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 	netif_carrier_off(priv->dev);
285 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 	stmmac_service_event_schedule(priv);
287 }
288 
289 /**
290  * stmmac_clk_csr_set - dynamically set the MDC clock
291  * @priv: driver private structure
292  * Description: this is to dynamically set the MDC clock according to the csr
293  * clock input.
294  * Note:
295  *	If a specific clk_csr value is passed from the platform
296  *	this means that the CSR Clock Range selection cannot be
297  *	changed at run-time and it is fixed (as reported in the driver
298  *	documentation). Viceversa the driver will try to set the MDC
299  *	clock dynamically according to the actual clock input.
300  */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 	u32 clk_rate;
304 
305 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306 
307 	/* Platform provided default clk_csr would be assumed valid
308 	 * for all other cases except for the below mentioned ones.
309 	 * For values higher than the IEEE 802.3 specified frequency
310 	 * we can not estimate the proper divider as it is not known
311 	 * the frequency of clk_csr_i. So we do not change the default
312 	 * divider.
313 	 */
314 	if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 		if (clk_rate < CSR_F_35M)
316 			priv->clk_csr = STMMAC_CSR_20_35M;
317 		else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 			priv->clk_csr = STMMAC_CSR_35_60M;
319 		else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 			priv->clk_csr = STMMAC_CSR_60_100M;
321 		else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 			priv->clk_csr = STMMAC_CSR_100_150M;
323 		else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 			priv->clk_csr = STMMAC_CSR_150_250M;
325 		else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 			priv->clk_csr = STMMAC_CSR_250_300M;
327 	}
328 
329 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 		if (clk_rate > 160000000)
331 			priv->clk_csr = 0x03;
332 		else if (clk_rate > 80000000)
333 			priv->clk_csr = 0x02;
334 		else if (clk_rate > 40000000)
335 			priv->clk_csr = 0x01;
336 		else
337 			priv->clk_csr = 0;
338 	}
339 
340 	if (priv->plat->has_xgmac) {
341 		if (clk_rate > 400000000)
342 			priv->clk_csr = 0x5;
343 		else if (clk_rate > 350000000)
344 			priv->clk_csr = 0x4;
345 		else if (clk_rate > 300000000)
346 			priv->clk_csr = 0x3;
347 		else if (clk_rate > 250000000)
348 			priv->clk_csr = 0x2;
349 		else if (clk_rate > 150000000)
350 			priv->clk_csr = 0x1;
351 		else
352 			priv->clk_csr = 0x0;
353 	}
354 }
355 
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 	u32 avail;
366 
367 	if (tx_q->dirty_tx > tx_q->cur_tx)
368 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 	else
370 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371 
372 	return avail;
373 }
374 
375 /**
376  * stmmac_rx_dirty - Get RX queue dirty
377  * @priv: driver private structure
378  * @queue: RX queue index
379  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 	u32 dirty;
384 
385 	if (rx_q->dirty_rx <= rx_q->cur_rx)
386 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 	else
388 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389 
390 	return dirty;
391 }
392 
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 	int tx_lpi_timer;
396 
397 	/* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 	priv->eee_sw_timer_en = en ? 0 : 1;
399 	tx_lpi_timer  = en ? priv->tx_lpi_timer : 0;
400 	stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402 
403 /**
404  * stmmac_enable_eee_mode - check and enter in LPI mode
405  * @priv: driver private structure
406  * Description: this function is to verify and enter in LPI mode in case of
407  * EEE.
408  */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 	u32 tx_cnt = priv->plat->tx_queues_to_use;
412 	u32 queue;
413 
414 	/* check if all TX queues have the work finished */
415 	for (queue = 0; queue < tx_cnt; queue++) {
416 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417 
418 		if (tx_q->dirty_tx != tx_q->cur_tx)
419 			return -EBUSY; /* still unfinished work */
420 	}
421 
422 	/* Check and enter in LPI mode */
423 	if (!priv->tx_path_in_lpi_mode)
424 		stmmac_set_eee_mode(priv, priv->hw,
425 			priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 	return 0;
427 }
428 
429 /**
430  * stmmac_disable_eee_mode - disable and exit from LPI mode
431  * @priv: driver private structure
432  * Description: this function is to exit and disable EEE in case of
433  * LPI state is true. This is called by the xmit.
434  */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 	if (!priv->eee_sw_timer_en) {
438 		stmmac_lpi_entry_timer_config(priv, 0);
439 		return;
440 	}
441 
442 	stmmac_reset_eee_mode(priv, priv->hw);
443 	del_timer_sync(&priv->eee_ctrl_timer);
444 	priv->tx_path_in_lpi_mode = false;
445 }
446 
447 /**
448  * stmmac_eee_ctrl_timer - EEE TX SW timer.
449  * @t:  timer_list struct containing private info
450  * Description:
451  *  if there is no data transfer and if we are not in LPI state,
452  *  then MAC Transmitter can be moved to LPI state.
453  */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 	struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457 
458 	if (stmmac_enable_eee_mode(priv))
459 		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461 
462 /**
463  * stmmac_eee_init - init EEE
464  * @priv: driver private structure
465  * Description:
466  *  if the GMAC supports the EEE (from the HW cap reg) and the phy device
467  *  can also manage EEE, this function enable the LPI state and start related
468  *  timer.
469  */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 	int eee_tw_timer = priv->eee_tw_timer;
473 
474 	/* Check if MAC core supports the EEE feature. */
475 	if (!priv->dma_cap.eee)
476 		return false;
477 
478 	mutex_lock(&priv->lock);
479 
480 	/* Check if it needs to be deactivated */
481 	if (!priv->eee_active) {
482 		if (priv->eee_enabled) {
483 			netdev_dbg(priv->dev, "disable EEE\n");
484 			stmmac_lpi_entry_timer_config(priv, 0);
485 			del_timer_sync(&priv->eee_ctrl_timer);
486 			stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
487 			if (priv->hw->xpcs)
488 				xpcs_config_eee(priv->hw->xpcs,
489 						priv->plat->mult_fact_100ns,
490 						false);
491 		}
492 		mutex_unlock(&priv->lock);
493 		return false;
494 	}
495 
496 	if (priv->eee_active && !priv->eee_enabled) {
497 		timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
498 		stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
499 				     eee_tw_timer);
500 		if (priv->hw->xpcs)
501 			xpcs_config_eee(priv->hw->xpcs,
502 					priv->plat->mult_fact_100ns,
503 					true);
504 	}
505 
506 	if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
507 		del_timer_sync(&priv->eee_ctrl_timer);
508 		priv->tx_path_in_lpi_mode = false;
509 		stmmac_lpi_entry_timer_config(priv, 1);
510 	} else {
511 		stmmac_lpi_entry_timer_config(priv, 0);
512 		mod_timer(&priv->eee_ctrl_timer,
513 			  STMMAC_LPI_T(priv->tx_lpi_timer));
514 	}
515 
516 	mutex_unlock(&priv->lock);
517 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
518 	return true;
519 }
520 
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
522  * @priv: driver private structure
523  * @p : descriptor pointer
524  * @skb : the socket buffer
525  * Description :
526  * This function will read timestamp from the descriptor & pass it to stack.
527  * and also perform some sanity checks.
528  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)529 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
530 				   struct dma_desc *p, struct sk_buff *skb)
531 {
532 	struct skb_shared_hwtstamps shhwtstamp;
533 	bool found = false;
534 	u64 ns = 0;
535 
536 	if (!priv->hwts_tx_en)
537 		return;
538 
539 	/* exit if skb doesn't support hw tstamp */
540 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
541 		return;
542 
543 	/* check tx tstamp status */
544 	if (stmmac_get_tx_timestamp_status(priv, p)) {
545 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
546 		found = true;
547 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
548 		found = true;
549 	}
550 
551 	if (found) {
552 		ns -= priv->plat->cdc_error_adj;
553 
554 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
555 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
556 
557 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
558 		/* pass tstamp to stack */
559 		skb_tstamp_tx(skb, &shhwtstamp);
560 	}
561 }
562 
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
564  * @priv: driver private structure
565  * @p : descriptor pointer
566  * @np : next descriptor pointer
567  * @skb : the socket buffer
568  * Description :
569  * This function will read received packet's timestamp from the descriptor
570  * and pass it to stack. It also perform some sanity checks.
571  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)572 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
573 				   struct dma_desc *np, struct sk_buff *skb)
574 {
575 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
576 	struct dma_desc *desc = p;
577 	u64 ns = 0;
578 
579 	if (!priv->hwts_rx_en)
580 		return;
581 	/* For GMAC4, the valid timestamp is from CTX next desc. */
582 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
583 		desc = np;
584 
585 	/* Check if timestamp is available */
586 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
587 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
588 
589 		ns -= priv->plat->cdc_error_adj;
590 
591 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
592 		shhwtstamp = skb_hwtstamps(skb);
593 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
594 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
595 	} else  {
596 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
597 	}
598 }
599 
600 /**
601  *  stmmac_hwtstamp_set - control hardware timestamping.
602  *  @dev: device pointer.
603  *  @ifr: An IOCTL specific structure, that can contain a pointer to
604  *  a proprietary structure used to pass information to the driver.
605  *  Description:
606  *  This function configures the MAC to enable/disable both outgoing(TX)
607  *  and incoming(RX) packets time stamping based on user input.
608  *  Return Value:
609  *  0 on success and an appropriate -ve integer on failure.
610  */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)611 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
612 {
613 	struct stmmac_priv *priv = netdev_priv(dev);
614 	struct hwtstamp_config config;
615 	u32 ptp_v2 = 0;
616 	u32 tstamp_all = 0;
617 	u32 ptp_over_ipv4_udp = 0;
618 	u32 ptp_over_ipv6_udp = 0;
619 	u32 ptp_over_ethernet = 0;
620 	u32 snap_type_sel = 0;
621 	u32 ts_master_en = 0;
622 	u32 ts_event_en = 0;
623 
624 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
625 		netdev_alert(priv->dev, "No support for HW time stamping\n");
626 		priv->hwts_tx_en = 0;
627 		priv->hwts_rx_en = 0;
628 
629 		return -EOPNOTSUPP;
630 	}
631 
632 	if (copy_from_user(&config, ifr->ifr_data,
633 			   sizeof(config)))
634 		return -EFAULT;
635 
636 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 		   __func__, config.flags, config.tx_type, config.rx_filter);
638 
639 	if (config.tx_type != HWTSTAMP_TX_OFF &&
640 	    config.tx_type != HWTSTAMP_TX_ON)
641 		return -ERANGE;
642 
643 	if (priv->adv_ts) {
644 		switch (config.rx_filter) {
645 		case HWTSTAMP_FILTER_NONE:
646 			/* time stamp no incoming packet at all */
647 			config.rx_filter = HWTSTAMP_FILTER_NONE;
648 			break;
649 
650 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
651 			/* PTP v1, UDP, any kind of event packet */
652 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
653 			/* 'xmac' hardware can support Sync, Pdelay_Req and
654 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
655 			 * This leaves Delay_Req timestamps out.
656 			 * Enable all events *and* general purpose message
657 			 * timestamping
658 			 */
659 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
660 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 			break;
663 
664 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
665 			/* PTP v1, UDP, Sync packet */
666 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
667 			/* take time stamp for SYNC messages only */
668 			ts_event_en = PTP_TCR_TSEVNTENA;
669 
670 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
671 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
672 			break;
673 
674 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
675 			/* PTP v1, UDP, Delay_req packet */
676 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
677 			/* take time stamp for Delay_Req messages only */
678 			ts_master_en = PTP_TCR_TSMSTRENA;
679 			ts_event_en = PTP_TCR_TSEVNTENA;
680 
681 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
682 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
683 			break;
684 
685 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
686 			/* PTP v2, UDP, any kind of event packet */
687 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
688 			ptp_v2 = PTP_TCR_TSVER2ENA;
689 			/* take time stamp for all event messages */
690 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
691 
692 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
693 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
694 			break;
695 
696 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
697 			/* PTP v2, UDP, Sync packet */
698 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
699 			ptp_v2 = PTP_TCR_TSVER2ENA;
700 			/* take time stamp for SYNC messages only */
701 			ts_event_en = PTP_TCR_TSEVNTENA;
702 
703 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
704 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
705 			break;
706 
707 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
708 			/* PTP v2, UDP, Delay_req packet */
709 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
710 			ptp_v2 = PTP_TCR_TSVER2ENA;
711 			/* take time stamp for Delay_Req messages only */
712 			ts_master_en = PTP_TCR_TSMSTRENA;
713 			ts_event_en = PTP_TCR_TSEVNTENA;
714 
715 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 			break;
718 
719 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
720 			/* PTP v2/802.AS1 any layer, any kind of event packet */
721 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
722 			ptp_v2 = PTP_TCR_TSVER2ENA;
723 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
724 			if (priv->synopsys_id < DWMAC_CORE_4_10)
725 				ts_event_en = PTP_TCR_TSEVNTENA;
726 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 			ptp_over_ethernet = PTP_TCR_TSIPENA;
729 			break;
730 
731 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
732 			/* PTP v2/802.AS1, any layer, Sync packet */
733 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
734 			ptp_v2 = PTP_TCR_TSVER2ENA;
735 			/* take time stamp for SYNC messages only */
736 			ts_event_en = PTP_TCR_TSEVNTENA;
737 
738 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
739 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
740 			ptp_over_ethernet = PTP_TCR_TSIPENA;
741 			break;
742 
743 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
744 			/* PTP v2/802.AS1, any layer, Delay_req packet */
745 			config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
746 			ptp_v2 = PTP_TCR_TSVER2ENA;
747 			/* take time stamp for Delay_Req messages only */
748 			ts_master_en = PTP_TCR_TSMSTRENA;
749 			ts_event_en = PTP_TCR_TSEVNTENA;
750 
751 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
752 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
753 			ptp_over_ethernet = PTP_TCR_TSIPENA;
754 			break;
755 
756 		case HWTSTAMP_FILTER_NTP_ALL:
757 		case HWTSTAMP_FILTER_ALL:
758 			/* time stamp any incoming packet */
759 			config.rx_filter = HWTSTAMP_FILTER_ALL;
760 			tstamp_all = PTP_TCR_TSENALL;
761 			break;
762 
763 		default:
764 			return -ERANGE;
765 		}
766 	} else {
767 		switch (config.rx_filter) {
768 		case HWTSTAMP_FILTER_NONE:
769 			config.rx_filter = HWTSTAMP_FILTER_NONE;
770 			break;
771 		default:
772 			/* PTP v1, UDP, any kind of event packet */
773 			config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
774 			break;
775 		}
776 	}
777 	priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
778 	priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
779 
780 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
781 
782 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
783 		priv->systime_flags |= tstamp_all | ptp_v2 |
784 				       ptp_over_ethernet | ptp_over_ipv6_udp |
785 				       ptp_over_ipv4_udp | ts_event_en |
786 				       ts_master_en | snap_type_sel;
787 	}
788 
789 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
790 
791 	memcpy(&priv->tstamp_config, &config, sizeof(config));
792 
793 	return copy_to_user(ifr->ifr_data, &config,
794 			    sizeof(config)) ? -EFAULT : 0;
795 }
796 
797 /**
798  *  stmmac_hwtstamp_get - read hardware timestamping.
799  *  @dev: device pointer.
800  *  @ifr: An IOCTL specific structure, that can contain a pointer to
801  *  a proprietary structure used to pass information to the driver.
802  *  Description:
803  *  This function obtain the current hardware timestamping settings
804  *  as requested.
805  */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)806 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
807 {
808 	struct stmmac_priv *priv = netdev_priv(dev);
809 	struct hwtstamp_config *config = &priv->tstamp_config;
810 
811 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
812 		return -EOPNOTSUPP;
813 
814 	return copy_to_user(ifr->ifr_data, config,
815 			    sizeof(*config)) ? -EFAULT : 0;
816 }
817 
818 /**
819  * stmmac_init_tstamp_counter - init hardware timestamping counter
820  * @priv: driver private structure
821  * @systime_flags: timestamping flags
822  * Description:
823  * Initialize hardware counter for packet timestamping.
824  * This is valid as long as the interface is open and not suspended.
825  * Will be rerun after resuming from suspend, case in which the timestamping
826  * flags updated by stmmac_hwtstamp_set() also need to be restored.
827  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)828 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
829 {
830 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
831 	struct timespec64 now;
832 	u32 sec_inc = 0;
833 	u64 temp = 0;
834 
835 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
836 		return -EOPNOTSUPP;
837 
838 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
839 	priv->systime_flags = systime_flags;
840 
841 	/* program Sub Second Increment reg */
842 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
843 					   priv->plat->clk_ptp_rate,
844 					   xmac, &sec_inc);
845 	temp = div_u64(1000000000ULL, sec_inc);
846 
847 	/* Store sub second increment for later use */
848 	priv->sub_second_inc = sec_inc;
849 
850 	/* calculate default added value:
851 	 * formula is :
852 	 * addend = (2^32)/freq_div_ratio;
853 	 * where, freq_div_ratio = 1e9ns/sec_inc
854 	 */
855 	temp = (u64)(temp << 32);
856 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
857 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
858 
859 	/* initialize system time */
860 	ktime_get_real_ts64(&now);
861 
862 	/* lower 32 bits of tv_sec are safe until y2106 */
863 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
864 
865 	return 0;
866 }
867 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
868 
869 /**
870  * stmmac_init_ptp - init PTP
871  * @priv: driver private structure
872  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
873  * This is done by looking at the HW cap. register.
874  * This function also registers the ptp driver.
875  */
stmmac_init_ptp(struct stmmac_priv * priv)876 static int stmmac_init_ptp(struct stmmac_priv *priv)
877 {
878 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
879 	int ret;
880 
881 	if (priv->plat->ptp_clk_freq_config)
882 		priv->plat->ptp_clk_freq_config(priv);
883 
884 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
885 	if (ret)
886 		return ret;
887 
888 	priv->adv_ts = 0;
889 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
890 	if (xmac && priv->dma_cap.atime_stamp)
891 		priv->adv_ts = 1;
892 	/* Dwmac 3.x core with extend_desc can support adv_ts */
893 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
894 		priv->adv_ts = 1;
895 
896 	if (priv->dma_cap.time_stamp)
897 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
898 
899 	if (priv->adv_ts)
900 		netdev_info(priv->dev,
901 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
902 
903 	priv->hwts_tx_en = 0;
904 	priv->hwts_rx_en = 0;
905 
906 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
907 		stmmac_hwtstamp_correct_latency(priv, priv);
908 
909 	return 0;
910 }
911 
stmmac_release_ptp(struct stmmac_priv * priv)912 static void stmmac_release_ptp(struct stmmac_priv *priv)
913 {
914 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
915 	stmmac_ptp_unregister(priv);
916 }
917 
918 /**
919  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
920  *  @priv: driver private structure
921  *  @duplex: duplex passed to the next function
922  *  Description: It is used for configuring the flow control in all queues
923  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)924 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
925 {
926 	u32 tx_cnt = priv->plat->tx_queues_to_use;
927 
928 	stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
929 			priv->pause, tx_cnt);
930 }
931 
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)932 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
933 					 phy_interface_t interface)
934 {
935 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
936 
937 	/* Refresh the MAC-specific capabilities */
938 	stmmac_mac_update_caps(priv);
939 
940 	config->mac_capabilities = priv->hw->link.caps;
941 
942 	if (priv->plat->max_speed)
943 		phylink_limit_mac_speed(config, priv->plat->max_speed);
944 
945 	return config->mac_capabilities;
946 }
947 
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)948 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
949 						 phy_interface_t interface)
950 {
951 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952 	struct phylink_pcs *pcs;
953 
954 	if (priv->plat->select_pcs) {
955 		pcs = priv->plat->select_pcs(priv, interface);
956 		if (!IS_ERR(pcs))
957 			return pcs;
958 	}
959 
960 	return NULL;
961 }
962 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)963 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
964 			      const struct phylink_link_state *state)
965 {
966 	/* Nothing to do, xpcs_config() handles everything */
967 }
968 
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)969 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
970 {
971 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
972 	unsigned long flags;
973 
974 	timer_shutdown_sync(&fpe_cfg->verify_timer);
975 
976 	spin_lock_irqsave(&fpe_cfg->lock, flags);
977 
978 	if (is_up && fpe_cfg->pmac_enabled) {
979 		/* VERIFY process requires pmac enabled when NIC comes up */
980 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
981 				     priv->plat->tx_queues_to_use,
982 				     priv->plat->rx_queues_to_use,
983 				     false, true);
984 
985 		/* New link => maybe new partner => new verification process */
986 		stmmac_fpe_apply(priv);
987 	} else {
988 		/* No link => turn off EFPE */
989 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
990 				     priv->plat->tx_queues_to_use,
991 				     priv->plat->rx_queues_to_use,
992 				     false, false);
993 	}
994 
995 	spin_unlock_irqrestore(&fpe_cfg->lock, flags);
996 }
997 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)998 static void stmmac_mac_link_down(struct phylink_config *config,
999 				 unsigned int mode, phy_interface_t interface)
1000 {
1001 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1002 
1003 	stmmac_mac_set(priv, priv->ioaddr, false);
1004 	priv->eee_active = false;
1005 	priv->tx_lpi_enabled = false;
1006 	priv->eee_enabled = stmmac_eee_init(priv);
1007 	stmmac_set_eee_pls(priv, priv->hw, false);
1008 
1009 	if (priv->dma_cap.fpesel)
1010 		stmmac_fpe_link_state_handle(priv, false);
1011 }
1012 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1013 static void stmmac_mac_link_up(struct phylink_config *config,
1014 			       struct phy_device *phy,
1015 			       unsigned int mode, phy_interface_t interface,
1016 			       int speed, int duplex,
1017 			       bool tx_pause, bool rx_pause)
1018 {
1019 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1020 	u32 old_ctrl, ctrl;
1021 
1022 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1023 	    priv->plat->serdes_powerup)
1024 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1025 
1026 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1027 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1028 
1029 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
1030 		switch (speed) {
1031 		case SPEED_10000:
1032 			ctrl |= priv->hw->link.xgmii.speed10000;
1033 			break;
1034 		case SPEED_5000:
1035 			ctrl |= priv->hw->link.xgmii.speed5000;
1036 			break;
1037 		case SPEED_2500:
1038 			ctrl |= priv->hw->link.xgmii.speed2500;
1039 			break;
1040 		default:
1041 			return;
1042 		}
1043 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1044 		switch (speed) {
1045 		case SPEED_100000:
1046 			ctrl |= priv->hw->link.xlgmii.speed100000;
1047 			break;
1048 		case SPEED_50000:
1049 			ctrl |= priv->hw->link.xlgmii.speed50000;
1050 			break;
1051 		case SPEED_40000:
1052 			ctrl |= priv->hw->link.xlgmii.speed40000;
1053 			break;
1054 		case SPEED_25000:
1055 			ctrl |= priv->hw->link.xlgmii.speed25000;
1056 			break;
1057 		case SPEED_10000:
1058 			ctrl |= priv->hw->link.xgmii.speed10000;
1059 			break;
1060 		case SPEED_2500:
1061 			ctrl |= priv->hw->link.speed2500;
1062 			break;
1063 		case SPEED_1000:
1064 			ctrl |= priv->hw->link.speed1000;
1065 			break;
1066 		default:
1067 			return;
1068 		}
1069 	} else {
1070 		switch (speed) {
1071 		case SPEED_2500:
1072 			ctrl |= priv->hw->link.speed2500;
1073 			break;
1074 		case SPEED_1000:
1075 			ctrl |= priv->hw->link.speed1000;
1076 			break;
1077 		case SPEED_100:
1078 			ctrl |= priv->hw->link.speed100;
1079 			break;
1080 		case SPEED_10:
1081 			ctrl |= priv->hw->link.speed10;
1082 			break;
1083 		default:
1084 			return;
1085 		}
1086 	}
1087 
1088 	priv->speed = speed;
1089 
1090 	if (priv->plat->fix_mac_speed)
1091 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1092 
1093 	if (!duplex)
1094 		ctrl &= ~priv->hw->link.duplex;
1095 	else
1096 		ctrl |= priv->hw->link.duplex;
1097 
1098 	/* Flow Control operation */
1099 	if (rx_pause && tx_pause)
1100 		priv->flow_ctrl = FLOW_AUTO;
1101 	else if (rx_pause && !tx_pause)
1102 		priv->flow_ctrl = FLOW_RX;
1103 	else if (!rx_pause && tx_pause)
1104 		priv->flow_ctrl = FLOW_TX;
1105 	else
1106 		priv->flow_ctrl = FLOW_OFF;
1107 
1108 	stmmac_mac_flow_ctrl(priv, duplex);
1109 
1110 	if (ctrl != old_ctrl)
1111 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1112 
1113 	stmmac_mac_set(priv, priv->ioaddr, true);
1114 	if (phy && priv->dma_cap.eee) {
1115 		priv->eee_active =
1116 			phy_init_eee(phy, !(priv->plat->flags &
1117 				STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1118 		priv->eee_enabled = stmmac_eee_init(priv);
1119 		priv->tx_lpi_enabled = priv->eee_enabled;
1120 		stmmac_set_eee_pls(priv, priv->hw, true);
1121 	}
1122 
1123 	if (priv->dma_cap.fpesel)
1124 		stmmac_fpe_link_state_handle(priv, true);
1125 
1126 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1127 		stmmac_hwtstamp_correct_latency(priv, priv);
1128 }
1129 
1130 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1131 	.mac_get_caps = stmmac_mac_get_caps,
1132 	.mac_select_pcs = stmmac_mac_select_pcs,
1133 	.mac_config = stmmac_mac_config,
1134 	.mac_link_down = stmmac_mac_link_down,
1135 	.mac_link_up = stmmac_mac_link_up,
1136 };
1137 
1138 /**
1139  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1140  * @priv: driver private structure
1141  * Description: this is to verify if the HW supports the PCS.
1142  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1143  * configured for the TBI, RTBI, or SGMII PHY interface.
1144  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1145 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1146 {
1147 	int interface = priv->plat->mac_interface;
1148 
1149 	if (priv->dma_cap.pcs) {
1150 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1151 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1152 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1153 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1154 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1155 			priv->hw->pcs = STMMAC_PCS_RGMII;
1156 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1157 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1158 			priv->hw->pcs = STMMAC_PCS_SGMII;
1159 		}
1160 	}
1161 }
1162 
1163 /**
1164  * stmmac_init_phy - PHY initialization
1165  * @dev: net device structure
1166  * Description: it initializes the driver's PHY state, and attaches the PHY
1167  * to the mac driver.
1168  *  Return value:
1169  *  0 on success
1170  */
stmmac_init_phy(struct net_device * dev)1171 static int stmmac_init_phy(struct net_device *dev)
1172 {
1173 	struct stmmac_priv *priv = netdev_priv(dev);
1174 	struct fwnode_handle *phy_fwnode;
1175 	struct fwnode_handle *fwnode;
1176 	int ret;
1177 
1178 	if (!phylink_expects_phy(priv->phylink))
1179 		return 0;
1180 
1181 	fwnode = priv->plat->port_node;
1182 	if (!fwnode)
1183 		fwnode = dev_fwnode(priv->device);
1184 
1185 	if (fwnode)
1186 		phy_fwnode = fwnode_get_phy_node(fwnode);
1187 	else
1188 		phy_fwnode = NULL;
1189 
1190 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1191 	 * manually parse it
1192 	 */
1193 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1194 		int addr = priv->plat->phy_addr;
1195 		struct phy_device *phydev;
1196 
1197 		if (addr < 0) {
1198 			netdev_err(priv->dev, "no phy found\n");
1199 			return -ENODEV;
1200 		}
1201 
1202 		phydev = mdiobus_get_phy(priv->mii, addr);
1203 		if (!phydev) {
1204 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1205 			return -ENODEV;
1206 		}
1207 
1208 		ret = phylink_connect_phy(priv->phylink, phydev);
1209 	} else {
1210 		fwnode_handle_put(phy_fwnode);
1211 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1212 	}
1213 
1214 	if (!priv->plat->pmt) {
1215 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1216 
1217 		phylink_ethtool_get_wol(priv->phylink, &wol);
1218 		device_set_wakeup_capable(priv->device, !!wol.supported);
1219 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1220 	}
1221 
1222 	return ret;
1223 }
1224 
stmmac_phy_setup(struct stmmac_priv * priv)1225 static int stmmac_phy_setup(struct stmmac_priv *priv)
1226 {
1227 	struct stmmac_mdio_bus_data *mdio_bus_data;
1228 	int mode = priv->plat->phy_interface;
1229 	struct fwnode_handle *fwnode;
1230 	struct phylink *phylink;
1231 
1232 	priv->phylink_config.dev = &priv->dev->dev;
1233 	priv->phylink_config.type = PHYLINK_NETDEV;
1234 	priv->phylink_config.mac_managed_pm = true;
1235 
1236 	/* Stmmac always requires an RX clock for hardware initialization */
1237 	priv->phylink_config.mac_requires_rxc = true;
1238 
1239 	mdio_bus_data = priv->plat->mdio_bus_data;
1240 	if (mdio_bus_data)
1241 		priv->phylink_config.default_an_inband =
1242 			mdio_bus_data->default_an_inband;
1243 
1244 	/* Set the platform/firmware specified interface mode. Note, phylink
1245 	 * deals with the PHY interface mode, not the MAC interface mode.
1246 	 */
1247 	__set_bit(mode, priv->phylink_config.supported_interfaces);
1248 
1249 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1250 	if (priv->hw->xpcs)
1251 		xpcs_get_interfaces(priv->hw->xpcs,
1252 				    priv->phylink_config.supported_interfaces);
1253 
1254 	fwnode = priv->plat->port_node;
1255 	if (!fwnode)
1256 		fwnode = dev_fwnode(priv->device);
1257 
1258 	phylink = phylink_create(&priv->phylink_config, fwnode,
1259 				 mode, &stmmac_phylink_mac_ops);
1260 	if (IS_ERR(phylink))
1261 		return PTR_ERR(phylink);
1262 
1263 	priv->phylink = phylink;
1264 	return 0;
1265 }
1266 
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1267 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1268 				    struct stmmac_dma_conf *dma_conf)
1269 {
1270 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1271 	unsigned int desc_size;
1272 	void *head_rx;
1273 	u32 queue;
1274 
1275 	/* Display RX rings */
1276 	for (queue = 0; queue < rx_cnt; queue++) {
1277 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1278 
1279 		pr_info("\tRX Queue %u rings\n", queue);
1280 
1281 		if (priv->extend_desc) {
1282 			head_rx = (void *)rx_q->dma_erx;
1283 			desc_size = sizeof(struct dma_extended_desc);
1284 		} else {
1285 			head_rx = (void *)rx_q->dma_rx;
1286 			desc_size = sizeof(struct dma_desc);
1287 		}
1288 
1289 		/* Display RX ring */
1290 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1291 				    rx_q->dma_rx_phy, desc_size);
1292 	}
1293 }
1294 
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1295 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1296 				    struct stmmac_dma_conf *dma_conf)
1297 {
1298 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1299 	unsigned int desc_size;
1300 	void *head_tx;
1301 	u32 queue;
1302 
1303 	/* Display TX rings */
1304 	for (queue = 0; queue < tx_cnt; queue++) {
1305 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1306 
1307 		pr_info("\tTX Queue %d rings\n", queue);
1308 
1309 		if (priv->extend_desc) {
1310 			head_tx = (void *)tx_q->dma_etx;
1311 			desc_size = sizeof(struct dma_extended_desc);
1312 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1313 			head_tx = (void *)tx_q->dma_entx;
1314 			desc_size = sizeof(struct dma_edesc);
1315 		} else {
1316 			head_tx = (void *)tx_q->dma_tx;
1317 			desc_size = sizeof(struct dma_desc);
1318 		}
1319 
1320 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1321 				    tx_q->dma_tx_phy, desc_size);
1322 	}
1323 }
1324 
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1325 static void stmmac_display_rings(struct stmmac_priv *priv,
1326 				 struct stmmac_dma_conf *dma_conf)
1327 {
1328 	/* Display RX ring */
1329 	stmmac_display_rx_rings(priv, dma_conf);
1330 
1331 	/* Display TX ring */
1332 	stmmac_display_tx_rings(priv, dma_conf);
1333 }
1334 
stmmac_set_bfsize(int mtu,int bufsize)1335 static int stmmac_set_bfsize(int mtu, int bufsize)
1336 {
1337 	int ret = bufsize;
1338 
1339 	if (mtu >= BUF_SIZE_8KiB)
1340 		ret = BUF_SIZE_16KiB;
1341 	else if (mtu >= BUF_SIZE_4KiB)
1342 		ret = BUF_SIZE_8KiB;
1343 	else if (mtu >= BUF_SIZE_2KiB)
1344 		ret = BUF_SIZE_4KiB;
1345 	else if (mtu > DEFAULT_BUFSIZE)
1346 		ret = BUF_SIZE_2KiB;
1347 	else
1348 		ret = DEFAULT_BUFSIZE;
1349 
1350 	return ret;
1351 }
1352 
1353 /**
1354  * stmmac_clear_rx_descriptors - clear RX descriptors
1355  * @priv: driver private structure
1356  * @dma_conf: structure to take the dma data
1357  * @queue: RX queue index
1358  * Description: this function is called to clear the RX descriptors
1359  * in case of both basic and extended descriptors are used.
1360  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1361 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1362 					struct stmmac_dma_conf *dma_conf,
1363 					u32 queue)
1364 {
1365 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1366 	int i;
1367 
1368 	/* Clear the RX descriptors */
1369 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1370 		if (priv->extend_desc)
1371 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1372 					priv->use_riwt, priv->mode,
1373 					(i == dma_conf->dma_rx_size - 1),
1374 					dma_conf->dma_buf_sz);
1375 		else
1376 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1377 					priv->use_riwt, priv->mode,
1378 					(i == dma_conf->dma_rx_size - 1),
1379 					dma_conf->dma_buf_sz);
1380 }
1381 
1382 /**
1383  * stmmac_clear_tx_descriptors - clear tx descriptors
1384  * @priv: driver private structure
1385  * @dma_conf: structure to take the dma data
1386  * @queue: TX queue index.
1387  * Description: this function is called to clear the TX descriptors
1388  * in case of both basic and extended descriptors are used.
1389  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1390 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1391 					struct stmmac_dma_conf *dma_conf,
1392 					u32 queue)
1393 {
1394 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1395 	int i;
1396 
1397 	/* Clear the TX descriptors */
1398 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1399 		int last = (i == (dma_conf->dma_tx_size - 1));
1400 		struct dma_desc *p;
1401 
1402 		if (priv->extend_desc)
1403 			p = &tx_q->dma_etx[i].basic;
1404 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1405 			p = &tx_q->dma_entx[i].basic;
1406 		else
1407 			p = &tx_q->dma_tx[i];
1408 
1409 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1410 	}
1411 }
1412 
1413 /**
1414  * stmmac_clear_descriptors - clear descriptors
1415  * @priv: driver private structure
1416  * @dma_conf: structure to take the dma data
1417  * Description: this function is called to clear the TX and RX descriptors
1418  * in case of both basic and extended descriptors are used.
1419  */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1420 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1421 				     struct stmmac_dma_conf *dma_conf)
1422 {
1423 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1424 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1425 	u32 queue;
1426 
1427 	/* Clear the RX descriptors */
1428 	for (queue = 0; queue < rx_queue_cnt; queue++)
1429 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1430 
1431 	/* Clear the TX descriptors */
1432 	for (queue = 0; queue < tx_queue_cnt; queue++)
1433 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1434 }
1435 
1436 /**
1437  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1438  * @priv: driver private structure
1439  * @dma_conf: structure to take the dma data
1440  * @p: descriptor pointer
1441  * @i: descriptor index
1442  * @flags: gfp flag
1443  * @queue: RX queue index
1444  * Description: this function is called to allocate a receive buffer, perform
1445  * the DMA mapping and init the descriptor.
1446  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1447 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1448 				  struct stmmac_dma_conf *dma_conf,
1449 				  struct dma_desc *p,
1450 				  int i, gfp_t flags, u32 queue)
1451 {
1452 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1453 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1454 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1455 
1456 	if (priv->dma_cap.host_dma_width <= 32)
1457 		gfp |= GFP_DMA32;
1458 
1459 	if (!buf->page) {
1460 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1461 		if (!buf->page)
1462 			return -ENOMEM;
1463 		buf->page_offset = stmmac_rx_offset(priv);
1464 	}
1465 
1466 	if (priv->sph && !buf->sec_page) {
1467 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1468 		if (!buf->sec_page)
1469 			return -ENOMEM;
1470 
1471 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1472 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1473 	} else {
1474 		buf->sec_page = NULL;
1475 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1476 	}
1477 
1478 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1479 
1480 	stmmac_set_desc_addr(priv, p, buf->addr);
1481 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1482 		stmmac_init_desc3(priv, p);
1483 
1484 	return 0;
1485 }
1486 
1487 /**
1488  * stmmac_free_rx_buffer - free RX dma buffers
1489  * @priv: private structure
1490  * @rx_q: RX queue
1491  * @i: buffer index.
1492  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1493 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1494 				  struct stmmac_rx_queue *rx_q,
1495 				  int i)
1496 {
1497 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1498 
1499 	if (buf->page)
1500 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1501 	buf->page = NULL;
1502 
1503 	if (buf->sec_page)
1504 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1505 	buf->sec_page = NULL;
1506 }
1507 
1508 /**
1509  * stmmac_free_tx_buffer - free RX dma buffers
1510  * @priv: private structure
1511  * @dma_conf: structure to take the dma data
1512  * @queue: RX queue index
1513  * @i: buffer index.
1514  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1515 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1516 				  struct stmmac_dma_conf *dma_conf,
1517 				  u32 queue, int i)
1518 {
1519 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1520 
1521 	if (tx_q->tx_skbuff_dma[i].buf &&
1522 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1523 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1524 			dma_unmap_page(priv->device,
1525 				       tx_q->tx_skbuff_dma[i].buf,
1526 				       tx_q->tx_skbuff_dma[i].len,
1527 				       DMA_TO_DEVICE);
1528 		else
1529 			dma_unmap_single(priv->device,
1530 					 tx_q->tx_skbuff_dma[i].buf,
1531 					 tx_q->tx_skbuff_dma[i].len,
1532 					 DMA_TO_DEVICE);
1533 	}
1534 
1535 	if (tx_q->xdpf[i] &&
1536 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1537 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1538 		xdp_return_frame(tx_q->xdpf[i]);
1539 		tx_q->xdpf[i] = NULL;
1540 	}
1541 
1542 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1543 		tx_q->xsk_frames_done++;
1544 
1545 	if (tx_q->tx_skbuff[i] &&
1546 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1547 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1548 		tx_q->tx_skbuff[i] = NULL;
1549 	}
1550 
1551 	tx_q->tx_skbuff_dma[i].buf = 0;
1552 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1553 }
1554 
1555 /**
1556  * dma_free_rx_skbufs - free RX dma buffers
1557  * @priv: private structure
1558  * @dma_conf: structure to take the dma data
1559  * @queue: RX queue index
1560  */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1561 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1562 			       struct stmmac_dma_conf *dma_conf,
1563 			       u32 queue)
1564 {
1565 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1566 	int i;
1567 
1568 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1569 		stmmac_free_rx_buffer(priv, rx_q, i);
1570 }
1571 
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1572 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1573 				   struct stmmac_dma_conf *dma_conf,
1574 				   u32 queue, gfp_t flags)
1575 {
1576 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1577 	int i;
1578 
1579 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1580 		struct dma_desc *p;
1581 		int ret;
1582 
1583 		if (priv->extend_desc)
1584 			p = &((rx_q->dma_erx + i)->basic);
1585 		else
1586 			p = rx_q->dma_rx + i;
1587 
1588 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1589 					     queue);
1590 		if (ret)
1591 			return ret;
1592 
1593 		rx_q->buf_alloc_num++;
1594 	}
1595 
1596 	return 0;
1597 }
1598 
1599 /**
1600  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1601  * @priv: private structure
1602  * @dma_conf: structure to take the dma data
1603  * @queue: RX queue index
1604  */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1605 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1606 				struct stmmac_dma_conf *dma_conf,
1607 				u32 queue)
1608 {
1609 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1610 	int i;
1611 
1612 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1613 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1614 
1615 		if (!buf->xdp)
1616 			continue;
1617 
1618 		xsk_buff_free(buf->xdp);
1619 		buf->xdp = NULL;
1620 	}
1621 }
1622 
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1623 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1624 				      struct stmmac_dma_conf *dma_conf,
1625 				      u32 queue)
1626 {
1627 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1628 	int i;
1629 
1630 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1631 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1632 	 * use this macro to make sure no size violations.
1633 	 */
1634 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1635 
1636 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1637 		struct stmmac_rx_buffer *buf;
1638 		dma_addr_t dma_addr;
1639 		struct dma_desc *p;
1640 
1641 		if (priv->extend_desc)
1642 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1643 		else
1644 			p = rx_q->dma_rx + i;
1645 
1646 		buf = &rx_q->buf_pool[i];
1647 
1648 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1649 		if (!buf->xdp)
1650 			return -ENOMEM;
1651 
1652 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1653 		stmmac_set_desc_addr(priv, p, dma_addr);
1654 		rx_q->buf_alloc_num++;
1655 	}
1656 
1657 	return 0;
1658 }
1659 
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1660 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1661 {
1662 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1663 		return NULL;
1664 
1665 	return xsk_get_pool_from_qid(priv->dev, queue);
1666 }
1667 
1668 /**
1669  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1670  * @priv: driver private structure
1671  * @dma_conf: structure to take the dma data
1672  * @queue: RX queue index
1673  * @flags: gfp flag.
1674  * Description: this function initializes the DMA RX descriptors
1675  * and allocates the socket buffers. It supports the chained and ring
1676  * modes.
1677  */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1678 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1679 				    struct stmmac_dma_conf *dma_conf,
1680 				    u32 queue, gfp_t flags)
1681 {
1682 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1683 	int ret;
1684 
1685 	netif_dbg(priv, probe, priv->dev,
1686 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1687 		  (u32)rx_q->dma_rx_phy);
1688 
1689 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1690 
1691 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1692 
1693 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1694 
1695 	if (rx_q->xsk_pool) {
1696 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1697 						   MEM_TYPE_XSK_BUFF_POOL,
1698 						   NULL));
1699 		netdev_info(priv->dev,
1700 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1701 			    rx_q->queue_index);
1702 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1703 	} else {
1704 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1705 						   MEM_TYPE_PAGE_POOL,
1706 						   rx_q->page_pool));
1707 		netdev_info(priv->dev,
1708 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1709 			    rx_q->queue_index);
1710 	}
1711 
1712 	if (rx_q->xsk_pool) {
1713 		/* RX XDP ZC buffer pool may not be populated, e.g.
1714 		 * xdpsock TX-only.
1715 		 */
1716 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1717 	} else {
1718 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1719 		if (ret < 0)
1720 			return -ENOMEM;
1721 	}
1722 
1723 	/* Setup the chained descriptor addresses */
1724 	if (priv->mode == STMMAC_CHAIN_MODE) {
1725 		if (priv->extend_desc)
1726 			stmmac_mode_init(priv, rx_q->dma_erx,
1727 					 rx_q->dma_rx_phy,
1728 					 dma_conf->dma_rx_size, 1);
1729 		else
1730 			stmmac_mode_init(priv, rx_q->dma_rx,
1731 					 rx_q->dma_rx_phy,
1732 					 dma_conf->dma_rx_size, 0);
1733 	}
1734 
1735 	return 0;
1736 }
1737 
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1738 static int init_dma_rx_desc_rings(struct net_device *dev,
1739 				  struct stmmac_dma_conf *dma_conf,
1740 				  gfp_t flags)
1741 {
1742 	struct stmmac_priv *priv = netdev_priv(dev);
1743 	u32 rx_count = priv->plat->rx_queues_to_use;
1744 	int queue;
1745 	int ret;
1746 
1747 	/* RX INITIALIZATION */
1748 	netif_dbg(priv, probe, priv->dev,
1749 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1750 
1751 	for (queue = 0; queue < rx_count; queue++) {
1752 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1753 		if (ret)
1754 			goto err_init_rx_buffers;
1755 	}
1756 
1757 	return 0;
1758 
1759 err_init_rx_buffers:
1760 	while (queue >= 0) {
1761 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1762 
1763 		if (rx_q->xsk_pool)
1764 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1765 		else
1766 			dma_free_rx_skbufs(priv, dma_conf, queue);
1767 
1768 		rx_q->buf_alloc_num = 0;
1769 		rx_q->xsk_pool = NULL;
1770 
1771 		queue--;
1772 	}
1773 
1774 	return ret;
1775 }
1776 
1777 /**
1778  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1779  * @priv: driver private structure
1780  * @dma_conf: structure to take the dma data
1781  * @queue: TX queue index
1782  * Description: this function initializes the DMA TX descriptors
1783  * and allocates the socket buffers. It supports the chained and ring
1784  * modes.
1785  */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1786 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1787 				    struct stmmac_dma_conf *dma_conf,
1788 				    u32 queue)
1789 {
1790 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1791 	int i;
1792 
1793 	netif_dbg(priv, probe, priv->dev,
1794 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1795 		  (u32)tx_q->dma_tx_phy);
1796 
1797 	/* Setup the chained descriptor addresses */
1798 	if (priv->mode == STMMAC_CHAIN_MODE) {
1799 		if (priv->extend_desc)
1800 			stmmac_mode_init(priv, tx_q->dma_etx,
1801 					 tx_q->dma_tx_phy,
1802 					 dma_conf->dma_tx_size, 1);
1803 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1804 			stmmac_mode_init(priv, tx_q->dma_tx,
1805 					 tx_q->dma_tx_phy,
1806 					 dma_conf->dma_tx_size, 0);
1807 	}
1808 
1809 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1810 
1811 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1812 		struct dma_desc *p;
1813 
1814 		if (priv->extend_desc)
1815 			p = &((tx_q->dma_etx + i)->basic);
1816 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1817 			p = &((tx_q->dma_entx + i)->basic);
1818 		else
1819 			p = tx_q->dma_tx + i;
1820 
1821 		stmmac_clear_desc(priv, p);
1822 
1823 		tx_q->tx_skbuff_dma[i].buf = 0;
1824 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1825 		tx_q->tx_skbuff_dma[i].len = 0;
1826 		tx_q->tx_skbuff_dma[i].last_segment = false;
1827 		tx_q->tx_skbuff[i] = NULL;
1828 	}
1829 
1830 	return 0;
1831 }
1832 
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1833 static int init_dma_tx_desc_rings(struct net_device *dev,
1834 				  struct stmmac_dma_conf *dma_conf)
1835 {
1836 	struct stmmac_priv *priv = netdev_priv(dev);
1837 	u32 tx_queue_cnt;
1838 	u32 queue;
1839 
1840 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1841 
1842 	for (queue = 0; queue < tx_queue_cnt; queue++)
1843 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1844 
1845 	return 0;
1846 }
1847 
1848 /**
1849  * init_dma_desc_rings - init the RX/TX descriptor rings
1850  * @dev: net device structure
1851  * @dma_conf: structure to take the dma data
1852  * @flags: gfp flag.
1853  * Description: this function initializes the DMA RX/TX descriptors
1854  * and allocates the socket buffers. It supports the chained and ring
1855  * modes.
1856  */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1857 static int init_dma_desc_rings(struct net_device *dev,
1858 			       struct stmmac_dma_conf *dma_conf,
1859 			       gfp_t flags)
1860 {
1861 	struct stmmac_priv *priv = netdev_priv(dev);
1862 	int ret;
1863 
1864 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1865 	if (ret)
1866 		return ret;
1867 
1868 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1869 
1870 	stmmac_clear_descriptors(priv, dma_conf);
1871 
1872 	if (netif_msg_hw(priv))
1873 		stmmac_display_rings(priv, dma_conf);
1874 
1875 	return ret;
1876 }
1877 
1878 /**
1879  * dma_free_tx_skbufs - free TX dma buffers
1880  * @priv: private structure
1881  * @dma_conf: structure to take the dma data
1882  * @queue: TX queue index
1883  */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1884 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1885 			       struct stmmac_dma_conf *dma_conf,
1886 			       u32 queue)
1887 {
1888 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1889 	int i;
1890 
1891 	tx_q->xsk_frames_done = 0;
1892 
1893 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1894 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1895 
1896 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1897 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1898 		tx_q->xsk_frames_done = 0;
1899 		tx_q->xsk_pool = NULL;
1900 	}
1901 }
1902 
1903 /**
1904  * stmmac_free_tx_skbufs - free TX skb buffers
1905  * @priv: private structure
1906  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1907 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1908 {
1909 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1910 	u32 queue;
1911 
1912 	for (queue = 0; queue < tx_queue_cnt; queue++)
1913 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1914 }
1915 
1916 /**
1917  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1918  * @priv: private structure
1919  * @dma_conf: structure to take the dma data
1920  * @queue: RX queue index
1921  */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1922 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1923 					 struct stmmac_dma_conf *dma_conf,
1924 					 u32 queue)
1925 {
1926 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1927 
1928 	/* Release the DMA RX socket buffers */
1929 	if (rx_q->xsk_pool)
1930 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1931 	else
1932 		dma_free_rx_skbufs(priv, dma_conf, queue);
1933 
1934 	rx_q->buf_alloc_num = 0;
1935 	rx_q->xsk_pool = NULL;
1936 
1937 	/* Free DMA regions of consistent memory previously allocated */
1938 	if (!priv->extend_desc)
1939 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1940 				  sizeof(struct dma_desc),
1941 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1942 	else
1943 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1944 				  sizeof(struct dma_extended_desc),
1945 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1946 
1947 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1948 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1949 
1950 	kfree(rx_q->buf_pool);
1951 	if (rx_q->page_pool)
1952 		page_pool_destroy(rx_q->page_pool);
1953 }
1954 
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1955 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1956 				       struct stmmac_dma_conf *dma_conf)
1957 {
1958 	u32 rx_count = priv->plat->rx_queues_to_use;
1959 	u32 queue;
1960 
1961 	/* Free RX queue resources */
1962 	for (queue = 0; queue < rx_count; queue++)
1963 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1964 }
1965 
1966 /**
1967  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1968  * @priv: private structure
1969  * @dma_conf: structure to take the dma data
1970  * @queue: TX queue index
1971  */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1972 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1973 					 struct stmmac_dma_conf *dma_conf,
1974 					 u32 queue)
1975 {
1976 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1977 	size_t size;
1978 	void *addr;
1979 
1980 	/* Release the DMA TX socket buffers */
1981 	dma_free_tx_skbufs(priv, dma_conf, queue);
1982 
1983 	if (priv->extend_desc) {
1984 		size = sizeof(struct dma_extended_desc);
1985 		addr = tx_q->dma_etx;
1986 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1987 		size = sizeof(struct dma_edesc);
1988 		addr = tx_q->dma_entx;
1989 	} else {
1990 		size = sizeof(struct dma_desc);
1991 		addr = tx_q->dma_tx;
1992 	}
1993 
1994 	size *= dma_conf->dma_tx_size;
1995 
1996 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1997 
1998 	kfree(tx_q->tx_skbuff_dma);
1999 	kfree(tx_q->tx_skbuff);
2000 }
2001 
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2002 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2003 				       struct stmmac_dma_conf *dma_conf)
2004 {
2005 	u32 tx_count = priv->plat->tx_queues_to_use;
2006 	u32 queue;
2007 
2008 	/* Free TX queue resources */
2009 	for (queue = 0; queue < tx_count; queue++)
2010 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2011 }
2012 
2013 /**
2014  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2015  * @priv: private structure
2016  * @dma_conf: structure to take the dma data
2017  * @queue: RX queue index
2018  * Description: according to which descriptor can be used (extend or basic)
2019  * this function allocates the resources for TX and RX paths. In case of
2020  * reception, for example, it pre-allocated the RX socket buffer in order to
2021  * allow zero-copy mechanism.
2022  */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2023 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2024 					 struct stmmac_dma_conf *dma_conf,
2025 					 u32 queue)
2026 {
2027 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2028 	struct stmmac_channel *ch = &priv->channel[queue];
2029 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2030 	struct page_pool_params pp_params = { 0 };
2031 	unsigned int num_pages;
2032 	unsigned int napi_id;
2033 	int ret;
2034 
2035 	rx_q->queue_index = queue;
2036 	rx_q->priv_data = priv;
2037 
2038 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2039 	pp_params.pool_size = dma_conf->dma_rx_size;
2040 	num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2041 	pp_params.order = ilog2(num_pages);
2042 	pp_params.nid = dev_to_node(priv->device);
2043 	pp_params.dev = priv->device;
2044 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2045 	pp_params.offset = stmmac_rx_offset(priv);
2046 	pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2047 
2048 	rx_q->page_pool = page_pool_create(&pp_params);
2049 	if (IS_ERR(rx_q->page_pool)) {
2050 		ret = PTR_ERR(rx_q->page_pool);
2051 		rx_q->page_pool = NULL;
2052 		return ret;
2053 	}
2054 
2055 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2056 				 sizeof(*rx_q->buf_pool),
2057 				 GFP_KERNEL);
2058 	if (!rx_q->buf_pool)
2059 		return -ENOMEM;
2060 
2061 	if (priv->extend_desc) {
2062 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2063 						   dma_conf->dma_rx_size *
2064 						   sizeof(struct dma_extended_desc),
2065 						   &rx_q->dma_rx_phy,
2066 						   GFP_KERNEL);
2067 		if (!rx_q->dma_erx)
2068 			return -ENOMEM;
2069 
2070 	} else {
2071 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2072 						  dma_conf->dma_rx_size *
2073 						  sizeof(struct dma_desc),
2074 						  &rx_q->dma_rx_phy,
2075 						  GFP_KERNEL);
2076 		if (!rx_q->dma_rx)
2077 			return -ENOMEM;
2078 	}
2079 
2080 	if (stmmac_xdp_is_enabled(priv) &&
2081 	    test_bit(queue, priv->af_xdp_zc_qps))
2082 		napi_id = ch->rxtx_napi.napi_id;
2083 	else
2084 		napi_id = ch->rx_napi.napi_id;
2085 
2086 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2087 			       rx_q->queue_index,
2088 			       napi_id);
2089 	if (ret) {
2090 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2091 		return -EINVAL;
2092 	}
2093 
2094 	return 0;
2095 }
2096 
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2097 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2098 				       struct stmmac_dma_conf *dma_conf)
2099 {
2100 	u32 rx_count = priv->plat->rx_queues_to_use;
2101 	u32 queue;
2102 	int ret;
2103 
2104 	/* RX queues buffers and DMA */
2105 	for (queue = 0; queue < rx_count; queue++) {
2106 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2107 		if (ret)
2108 			goto err_dma;
2109 	}
2110 
2111 	return 0;
2112 
2113 err_dma:
2114 	free_dma_rx_desc_resources(priv, dma_conf);
2115 
2116 	return ret;
2117 }
2118 
2119 /**
2120  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2121  * @priv: private structure
2122  * @dma_conf: structure to take the dma data
2123  * @queue: TX queue index
2124  * Description: according to which descriptor can be used (extend or basic)
2125  * this function allocates the resources for TX and RX paths. In case of
2126  * reception, for example, it pre-allocated the RX socket buffer in order to
2127  * allow zero-copy mechanism.
2128  */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2129 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2130 					 struct stmmac_dma_conf *dma_conf,
2131 					 u32 queue)
2132 {
2133 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2134 	size_t size;
2135 	void *addr;
2136 
2137 	tx_q->queue_index = queue;
2138 	tx_q->priv_data = priv;
2139 
2140 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2141 				      sizeof(*tx_q->tx_skbuff_dma),
2142 				      GFP_KERNEL);
2143 	if (!tx_q->tx_skbuff_dma)
2144 		return -ENOMEM;
2145 
2146 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2147 				  sizeof(struct sk_buff *),
2148 				  GFP_KERNEL);
2149 	if (!tx_q->tx_skbuff)
2150 		return -ENOMEM;
2151 
2152 	if (priv->extend_desc)
2153 		size = sizeof(struct dma_extended_desc);
2154 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2155 		size = sizeof(struct dma_edesc);
2156 	else
2157 		size = sizeof(struct dma_desc);
2158 
2159 	size *= dma_conf->dma_tx_size;
2160 
2161 	addr = dma_alloc_coherent(priv->device, size,
2162 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2163 	if (!addr)
2164 		return -ENOMEM;
2165 
2166 	if (priv->extend_desc)
2167 		tx_q->dma_etx = addr;
2168 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2169 		tx_q->dma_entx = addr;
2170 	else
2171 		tx_q->dma_tx = addr;
2172 
2173 	return 0;
2174 }
2175 
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2176 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2177 				       struct stmmac_dma_conf *dma_conf)
2178 {
2179 	u32 tx_count = priv->plat->tx_queues_to_use;
2180 	u32 queue;
2181 	int ret;
2182 
2183 	/* TX queues buffers and DMA */
2184 	for (queue = 0; queue < tx_count; queue++) {
2185 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2186 		if (ret)
2187 			goto err_dma;
2188 	}
2189 
2190 	return 0;
2191 
2192 err_dma:
2193 	free_dma_tx_desc_resources(priv, dma_conf);
2194 	return ret;
2195 }
2196 
2197 /**
2198  * alloc_dma_desc_resources - alloc TX/RX resources.
2199  * @priv: private structure
2200  * @dma_conf: structure to take the dma data
2201  * Description: according to which descriptor can be used (extend or basic)
2202  * this function allocates the resources for TX and RX paths. In case of
2203  * reception, for example, it pre-allocated the RX socket buffer in order to
2204  * allow zero-copy mechanism.
2205  */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2206 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2207 				    struct stmmac_dma_conf *dma_conf)
2208 {
2209 	/* RX Allocation */
2210 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2211 
2212 	if (ret)
2213 		return ret;
2214 
2215 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2216 
2217 	return ret;
2218 }
2219 
2220 /**
2221  * free_dma_desc_resources - free dma desc resources
2222  * @priv: private structure
2223  * @dma_conf: structure to take the dma data
2224  */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2225 static void free_dma_desc_resources(struct stmmac_priv *priv,
2226 				    struct stmmac_dma_conf *dma_conf)
2227 {
2228 	/* Release the DMA TX socket buffers */
2229 	free_dma_tx_desc_resources(priv, dma_conf);
2230 
2231 	/* Release the DMA RX socket buffers later
2232 	 * to ensure all pending XDP_TX buffers are returned.
2233 	 */
2234 	free_dma_rx_desc_resources(priv, dma_conf);
2235 }
2236 
2237 /**
2238  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2239  *  @priv: driver private structure
2240  *  Description: It is used for enabling the rx queues in the MAC
2241  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2242 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2243 {
2244 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2245 	int queue;
2246 	u8 mode;
2247 
2248 	for (queue = 0; queue < rx_queues_count; queue++) {
2249 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2250 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2251 	}
2252 }
2253 
2254 /**
2255  * stmmac_start_rx_dma - start RX DMA channel
2256  * @priv: driver private structure
2257  * @chan: RX channel index
2258  * Description:
2259  * This starts a RX DMA channel
2260  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2261 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2262 {
2263 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2264 	stmmac_start_rx(priv, priv->ioaddr, chan);
2265 }
2266 
2267 /**
2268  * stmmac_start_tx_dma - start TX DMA channel
2269  * @priv: driver private structure
2270  * @chan: TX channel index
2271  * Description:
2272  * This starts a TX DMA channel
2273  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2274 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2275 {
2276 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2277 	stmmac_start_tx(priv, priv->ioaddr, chan);
2278 }
2279 
2280 /**
2281  * stmmac_stop_rx_dma - stop RX DMA channel
2282  * @priv: driver private structure
2283  * @chan: RX channel index
2284  * Description:
2285  * This stops a RX DMA channel
2286  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2287 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2288 {
2289 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2290 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2291 }
2292 
2293 /**
2294  * stmmac_stop_tx_dma - stop TX DMA channel
2295  * @priv: driver private structure
2296  * @chan: TX channel index
2297  * Description:
2298  * This stops a TX DMA channel
2299  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2300 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2301 {
2302 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2303 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2304 }
2305 
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2306 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2307 {
2308 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2309 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2310 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2311 	u32 chan;
2312 
2313 	for (chan = 0; chan < dma_csr_ch; chan++) {
2314 		struct stmmac_channel *ch = &priv->channel[chan];
2315 		unsigned long flags;
2316 
2317 		spin_lock_irqsave(&ch->lock, flags);
2318 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2319 		spin_unlock_irqrestore(&ch->lock, flags);
2320 	}
2321 }
2322 
2323 /**
2324  * stmmac_start_all_dma - start all RX and TX DMA channels
2325  * @priv: driver private structure
2326  * Description:
2327  * This starts all the RX and TX DMA channels
2328  */
stmmac_start_all_dma(struct stmmac_priv * priv)2329 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2330 {
2331 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2332 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2333 	u32 chan = 0;
2334 
2335 	for (chan = 0; chan < rx_channels_count; chan++)
2336 		stmmac_start_rx_dma(priv, chan);
2337 
2338 	for (chan = 0; chan < tx_channels_count; chan++)
2339 		stmmac_start_tx_dma(priv, chan);
2340 }
2341 
2342 /**
2343  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2344  * @priv: driver private structure
2345  * Description:
2346  * This stops the RX and TX DMA channels
2347  */
stmmac_stop_all_dma(struct stmmac_priv * priv)2348 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2349 {
2350 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2351 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2352 	u32 chan = 0;
2353 
2354 	for (chan = 0; chan < rx_channels_count; chan++)
2355 		stmmac_stop_rx_dma(priv, chan);
2356 
2357 	for (chan = 0; chan < tx_channels_count; chan++)
2358 		stmmac_stop_tx_dma(priv, chan);
2359 }
2360 
2361 /**
2362  *  stmmac_dma_operation_mode - HW DMA operation mode
2363  *  @priv: driver private structure
2364  *  Description: it is used for configuring the DMA operation mode register in
2365  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2366  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2367 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2368 {
2369 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2370 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2371 	int rxfifosz = priv->plat->rx_fifo_size;
2372 	int txfifosz = priv->plat->tx_fifo_size;
2373 	u32 txmode = 0;
2374 	u32 rxmode = 0;
2375 	u32 chan = 0;
2376 	u8 qmode = 0;
2377 
2378 	if (rxfifosz == 0)
2379 		rxfifosz = priv->dma_cap.rx_fifo_size;
2380 	if (txfifosz == 0)
2381 		txfifosz = priv->dma_cap.tx_fifo_size;
2382 
2383 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2384 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2385 		rxfifosz /= rx_channels_count;
2386 		txfifosz /= tx_channels_count;
2387 	}
2388 
2389 	if (priv->plat->force_thresh_dma_mode) {
2390 		txmode = tc;
2391 		rxmode = tc;
2392 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2393 		/*
2394 		 * In case of GMAC, SF mode can be enabled
2395 		 * to perform the TX COE in HW. This depends on:
2396 		 * 1) TX COE if actually supported
2397 		 * 2) There is no bugged Jumbo frame support
2398 		 *    that needs to not insert csum in the TDES.
2399 		 */
2400 		txmode = SF_DMA_MODE;
2401 		rxmode = SF_DMA_MODE;
2402 		priv->xstats.threshold = SF_DMA_MODE;
2403 	} else {
2404 		txmode = tc;
2405 		rxmode = SF_DMA_MODE;
2406 	}
2407 
2408 	/* configure all channels */
2409 	for (chan = 0; chan < rx_channels_count; chan++) {
2410 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2411 		u32 buf_size;
2412 
2413 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2414 
2415 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2416 				rxfifosz, qmode);
2417 
2418 		if (rx_q->xsk_pool) {
2419 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2420 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 					      buf_size,
2422 					      chan);
2423 		} else {
2424 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2425 					      priv->dma_conf.dma_buf_sz,
2426 					      chan);
2427 		}
2428 	}
2429 
2430 	for (chan = 0; chan < tx_channels_count; chan++) {
2431 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2432 
2433 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2434 				txfifosz, qmode);
2435 	}
2436 }
2437 
stmmac_xsk_request_timestamp(void * _priv)2438 static void stmmac_xsk_request_timestamp(void *_priv)
2439 {
2440 	struct stmmac_metadata_request *meta_req = _priv;
2441 
2442 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2443 	*meta_req->set_ic = true;
2444 }
2445 
stmmac_xsk_fill_timestamp(void * _priv)2446 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2447 {
2448 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2449 	struct stmmac_priv *priv = tx_compl->priv;
2450 	struct dma_desc *desc = tx_compl->desc;
2451 	bool found = false;
2452 	u64 ns = 0;
2453 
2454 	if (!priv->hwts_tx_en)
2455 		return 0;
2456 
2457 	/* check tx tstamp status */
2458 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2459 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2460 		found = true;
2461 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2462 		found = true;
2463 	}
2464 
2465 	if (found) {
2466 		ns -= priv->plat->cdc_error_adj;
2467 		return ns_to_ktime(ns);
2468 	}
2469 
2470 	return 0;
2471 }
2472 
2473 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2474 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2475 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2476 };
2477 
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2478 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2479 {
2480 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2481 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2482 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2483 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2484 	unsigned int entry = tx_q->cur_tx;
2485 	struct dma_desc *tx_desc = NULL;
2486 	struct xdp_desc xdp_desc;
2487 	bool work_done = true;
2488 	u32 tx_set_ic_bit = 0;
2489 
2490 	/* Avoids TX time-out as we are sharing with slow path */
2491 	txq_trans_cond_update(nq);
2492 
2493 	budget = min(budget, stmmac_tx_avail(priv, queue));
2494 
2495 	while (budget-- > 0) {
2496 		struct stmmac_metadata_request meta_req;
2497 		struct xsk_tx_metadata *meta = NULL;
2498 		dma_addr_t dma_addr;
2499 		bool set_ic;
2500 
2501 		/* We are sharing with slow path and stop XSK TX desc submission when
2502 		 * available TX ring is less than threshold.
2503 		 */
2504 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2505 		    !netif_carrier_ok(priv->dev)) {
2506 			work_done = false;
2507 			break;
2508 		}
2509 
2510 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2511 			break;
2512 
2513 		if (priv->est && priv->est->enable &&
2514 		    priv->est->max_sdu[queue] &&
2515 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2516 			priv->xstats.max_sdu_txq_drop[queue]++;
2517 			continue;
2518 		}
2519 
2520 		if (likely(priv->extend_desc))
2521 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2522 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2523 			tx_desc = &tx_q->dma_entx[entry].basic;
2524 		else
2525 			tx_desc = tx_q->dma_tx + entry;
2526 
2527 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2528 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2529 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2530 
2531 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2532 
2533 		/* To return XDP buffer to XSK pool, we simple call
2534 		 * xsk_tx_completed(), so we don't need to fill up
2535 		 * 'buf' and 'xdpf'.
2536 		 */
2537 		tx_q->tx_skbuff_dma[entry].buf = 0;
2538 		tx_q->xdpf[entry] = NULL;
2539 
2540 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2541 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2542 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2543 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2544 
2545 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2546 
2547 		tx_q->tx_count_frames++;
2548 
2549 		if (!priv->tx_coal_frames[queue])
2550 			set_ic = false;
2551 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2552 			set_ic = true;
2553 		else
2554 			set_ic = false;
2555 
2556 		meta_req.priv = priv;
2557 		meta_req.tx_desc = tx_desc;
2558 		meta_req.set_ic = &set_ic;
2559 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2560 					&meta_req);
2561 		if (set_ic) {
2562 			tx_q->tx_count_frames = 0;
2563 			stmmac_set_tx_ic(priv, tx_desc);
2564 			tx_set_ic_bit++;
2565 		}
2566 
2567 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2568 				       true, priv->mode, true, true,
2569 				       xdp_desc.len);
2570 
2571 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2572 
2573 		xsk_tx_metadata_to_compl(meta,
2574 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2575 
2576 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2577 		entry = tx_q->cur_tx;
2578 	}
2579 	u64_stats_update_begin(&txq_stats->napi_syncp);
2580 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2581 	u64_stats_update_end(&txq_stats->napi_syncp);
2582 
2583 	if (tx_desc) {
2584 		stmmac_flush_tx_descriptors(priv, queue);
2585 		xsk_tx_release(pool);
2586 	}
2587 
2588 	/* Return true if all of the 3 conditions are met
2589 	 *  a) TX Budget is still available
2590 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2591 	 *     pending XSK TX for transmission)
2592 	 */
2593 	return !!budget && work_done;
2594 }
2595 
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2596 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2597 {
2598 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2599 		tc += 64;
2600 
2601 		if (priv->plat->force_thresh_dma_mode)
2602 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2603 		else
2604 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2605 						      chan);
2606 
2607 		priv->xstats.threshold = tc;
2608 	}
2609 }
2610 
2611 /**
2612  * stmmac_tx_clean - to manage the transmission completion
2613  * @priv: driver private structure
2614  * @budget: napi budget limiting this functions packet handling
2615  * @queue: TX queue index
2616  * @pending_packets: signal to arm the TX coal timer
2617  * Description: it reclaims the transmit resources after transmission completes.
2618  * If some packets still needs to be handled, due to TX coalesce, set
2619  * pending_packets to true to make NAPI arm the TX coal timer.
2620  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2621 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2622 			   bool *pending_packets)
2623 {
2624 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2625 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2626 	unsigned int bytes_compl = 0, pkts_compl = 0;
2627 	unsigned int entry, xmits = 0, count = 0;
2628 	u32 tx_packets = 0, tx_errors = 0;
2629 
2630 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2631 
2632 	tx_q->xsk_frames_done = 0;
2633 
2634 	entry = tx_q->dirty_tx;
2635 
2636 	/* Try to clean all TX complete frame in 1 shot */
2637 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2638 		struct xdp_frame *xdpf;
2639 		struct sk_buff *skb;
2640 		struct dma_desc *p;
2641 		int status;
2642 
2643 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2644 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2645 			xdpf = tx_q->xdpf[entry];
2646 			skb = NULL;
2647 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2648 			xdpf = NULL;
2649 			skb = tx_q->tx_skbuff[entry];
2650 		} else {
2651 			xdpf = NULL;
2652 			skb = NULL;
2653 		}
2654 
2655 		if (priv->extend_desc)
2656 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2657 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2658 			p = &tx_q->dma_entx[entry].basic;
2659 		else
2660 			p = tx_q->dma_tx + entry;
2661 
2662 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2663 		/* Check if the descriptor is owned by the DMA */
2664 		if (unlikely(status & tx_dma_own))
2665 			break;
2666 
2667 		count++;
2668 
2669 		/* Make sure descriptor fields are read after reading
2670 		 * the own bit.
2671 		 */
2672 		dma_rmb();
2673 
2674 		/* Just consider the last segment and ...*/
2675 		if (likely(!(status & tx_not_ls))) {
2676 			/* ... verify the status error condition */
2677 			if (unlikely(status & tx_err)) {
2678 				tx_errors++;
2679 				if (unlikely(status & tx_err_bump_tc))
2680 					stmmac_bump_dma_threshold(priv, queue);
2681 			} else {
2682 				tx_packets++;
2683 			}
2684 			if (skb) {
2685 				stmmac_get_tx_hwtstamp(priv, p, skb);
2686 			} else if (tx_q->xsk_pool &&
2687 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2688 				struct stmmac_xsk_tx_complete tx_compl = {
2689 					.priv = priv,
2690 					.desc = p,
2691 				};
2692 
2693 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2694 							 &stmmac_xsk_tx_metadata_ops,
2695 							 &tx_compl);
2696 			}
2697 		}
2698 
2699 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2700 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2701 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2702 				dma_unmap_page(priv->device,
2703 					       tx_q->tx_skbuff_dma[entry].buf,
2704 					       tx_q->tx_skbuff_dma[entry].len,
2705 					       DMA_TO_DEVICE);
2706 			else
2707 				dma_unmap_single(priv->device,
2708 						 tx_q->tx_skbuff_dma[entry].buf,
2709 						 tx_q->tx_skbuff_dma[entry].len,
2710 						 DMA_TO_DEVICE);
2711 			tx_q->tx_skbuff_dma[entry].buf = 0;
2712 			tx_q->tx_skbuff_dma[entry].len = 0;
2713 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2714 		}
2715 
2716 		stmmac_clean_desc3(priv, tx_q, p);
2717 
2718 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2719 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2720 
2721 		if (xdpf &&
2722 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2723 			xdp_return_frame_rx_napi(xdpf);
2724 			tx_q->xdpf[entry] = NULL;
2725 		}
2726 
2727 		if (xdpf &&
2728 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2729 			xdp_return_frame(xdpf);
2730 			tx_q->xdpf[entry] = NULL;
2731 		}
2732 
2733 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2734 			tx_q->xsk_frames_done++;
2735 
2736 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2737 			if (likely(skb)) {
2738 				pkts_compl++;
2739 				bytes_compl += skb->len;
2740 				dev_consume_skb_any(skb);
2741 				tx_q->tx_skbuff[entry] = NULL;
2742 			}
2743 		}
2744 
2745 		stmmac_release_tx_desc(priv, p, priv->mode);
2746 
2747 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2748 	}
2749 	tx_q->dirty_tx = entry;
2750 
2751 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2752 				  pkts_compl, bytes_compl);
2753 
2754 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2755 								queue))) &&
2756 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2757 
2758 		netif_dbg(priv, tx_done, priv->dev,
2759 			  "%s: restart transmit\n", __func__);
2760 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2761 	}
2762 
2763 	if (tx_q->xsk_pool) {
2764 		bool work_done;
2765 
2766 		if (tx_q->xsk_frames_done)
2767 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2768 
2769 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2770 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2771 
2772 		/* For XSK TX, we try to send as many as possible.
2773 		 * If XSK work done (XSK TX desc empty and budget still
2774 		 * available), return "budget - 1" to reenable TX IRQ.
2775 		 * Else, return "budget" to make NAPI continue polling.
2776 		 */
2777 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2778 					       STMMAC_XSK_TX_BUDGET_MAX);
2779 		if (work_done)
2780 			xmits = budget - 1;
2781 		else
2782 			xmits = budget;
2783 	}
2784 
2785 	if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2786 	    priv->eee_sw_timer_en) {
2787 		if (stmmac_enable_eee_mode(priv))
2788 			mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2789 	}
2790 
2791 	/* We still have pending packets, let's call for a new scheduling */
2792 	if (tx_q->dirty_tx != tx_q->cur_tx)
2793 		*pending_packets = true;
2794 
2795 	u64_stats_update_begin(&txq_stats->napi_syncp);
2796 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2797 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2798 	u64_stats_inc(&txq_stats->napi.tx_clean);
2799 	u64_stats_update_end(&txq_stats->napi_syncp);
2800 
2801 	priv->xstats.tx_errors += tx_errors;
2802 
2803 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2804 
2805 	/* Combine decisions from TX clean and XSK TX */
2806 	return max(count, xmits);
2807 }
2808 
2809 /**
2810  * stmmac_tx_err - to manage the tx error
2811  * @priv: driver private structure
2812  * @chan: channel index
2813  * Description: it cleans the descriptors and restarts the transmission
2814  * in case of transmission errors.
2815  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2816 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2817 {
2818 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2819 
2820 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2821 
2822 	stmmac_stop_tx_dma(priv, chan);
2823 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2824 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2825 	stmmac_reset_tx_queue(priv, chan);
2826 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2827 			    tx_q->dma_tx_phy, chan);
2828 	stmmac_start_tx_dma(priv, chan);
2829 
2830 	priv->xstats.tx_errors++;
2831 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2832 }
2833 
2834 /**
2835  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2836  *  @priv: driver private structure
2837  *  @txmode: TX operating mode
2838  *  @rxmode: RX operating mode
2839  *  @chan: channel index
2840  *  Description: it is used for configuring of the DMA operation mode in
2841  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2842  *  mode.
2843  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2844 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2845 					  u32 rxmode, u32 chan)
2846 {
2847 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2848 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2849 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2850 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2851 	int rxfifosz = priv->plat->rx_fifo_size;
2852 	int txfifosz = priv->plat->tx_fifo_size;
2853 
2854 	if (rxfifosz == 0)
2855 		rxfifosz = priv->dma_cap.rx_fifo_size;
2856 	if (txfifosz == 0)
2857 		txfifosz = priv->dma_cap.tx_fifo_size;
2858 
2859 	/* Adjust for real per queue fifo size */
2860 	rxfifosz /= rx_channels_count;
2861 	txfifosz /= tx_channels_count;
2862 
2863 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2864 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2865 }
2866 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2867 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2868 {
2869 	int ret;
2870 
2871 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2872 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2873 	if (ret && (ret != -EINVAL)) {
2874 		stmmac_global_err(priv);
2875 		return true;
2876 	}
2877 
2878 	return false;
2879 }
2880 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2881 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2882 {
2883 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2884 						 &priv->xstats, chan, dir);
2885 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2886 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2887 	struct stmmac_channel *ch = &priv->channel[chan];
2888 	struct napi_struct *rx_napi;
2889 	struct napi_struct *tx_napi;
2890 	unsigned long flags;
2891 
2892 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2893 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2894 
2895 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2896 		if (napi_schedule_prep(rx_napi)) {
2897 			spin_lock_irqsave(&ch->lock, flags);
2898 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2899 			spin_unlock_irqrestore(&ch->lock, flags);
2900 			__napi_schedule(rx_napi);
2901 		}
2902 	}
2903 
2904 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2905 		if (napi_schedule_prep(tx_napi)) {
2906 			spin_lock_irqsave(&ch->lock, flags);
2907 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2908 			spin_unlock_irqrestore(&ch->lock, flags);
2909 			__napi_schedule(tx_napi);
2910 		}
2911 	}
2912 
2913 	return status;
2914 }
2915 
2916 /**
2917  * stmmac_dma_interrupt - DMA ISR
2918  * @priv: driver private structure
2919  * Description: this is the DMA ISR. It is called by the main ISR.
2920  * It calls the dwmac dma routine and schedule poll method in case of some
2921  * work can be done.
2922  */
stmmac_dma_interrupt(struct stmmac_priv * priv)2923 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2924 {
2925 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2926 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2927 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2928 				tx_channel_count : rx_channel_count;
2929 	u32 chan;
2930 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2931 
2932 	/* Make sure we never check beyond our status buffer. */
2933 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2934 		channels_to_check = ARRAY_SIZE(status);
2935 
2936 	for (chan = 0; chan < channels_to_check; chan++)
2937 		status[chan] = stmmac_napi_check(priv, chan,
2938 						 DMA_DIR_RXTX);
2939 
2940 	for (chan = 0; chan < tx_channel_count; chan++) {
2941 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2942 			/* Try to bump up the dma threshold on this failure */
2943 			stmmac_bump_dma_threshold(priv, chan);
2944 		} else if (unlikely(status[chan] == tx_hard_error)) {
2945 			stmmac_tx_err(priv, chan);
2946 		}
2947 	}
2948 }
2949 
2950 /**
2951  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2952  * @priv: driver private structure
2953  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2954  */
stmmac_mmc_setup(struct stmmac_priv * priv)2955 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2956 {
2957 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2958 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2959 
2960 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2961 
2962 	if (priv->dma_cap.rmon) {
2963 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2964 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2965 	} else
2966 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2967 }
2968 
2969 /**
2970  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2971  * @priv: driver private structure
2972  * Description:
2973  *  new GMAC chip generations have a new register to indicate the
2974  *  presence of the optional feature/functions.
2975  *  This can be also used to override the value passed through the
2976  *  platform and necessary for old MAC10/100 and GMAC chips.
2977  */
stmmac_get_hw_features(struct stmmac_priv * priv)2978 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2979 {
2980 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2981 }
2982 
2983 /**
2984  * stmmac_check_ether_addr - check if the MAC addr is valid
2985  * @priv: driver private structure
2986  * Description:
2987  * it is to verify if the MAC address is valid, in case of failures it
2988  * generates a random MAC address
2989  */
stmmac_check_ether_addr(struct stmmac_priv * priv)2990 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2991 {
2992 	u8 addr[ETH_ALEN];
2993 
2994 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2995 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2996 		if (is_valid_ether_addr(addr))
2997 			eth_hw_addr_set(priv->dev, addr);
2998 		else
2999 			eth_hw_addr_random(priv->dev);
3000 		dev_info(priv->device, "device MAC address %pM\n",
3001 			 priv->dev->dev_addr);
3002 	}
3003 }
3004 
3005 /**
3006  * stmmac_init_dma_engine - DMA init.
3007  * @priv: driver private structure
3008  * Description:
3009  * It inits the DMA invoking the specific MAC/GMAC callback.
3010  * Some DMA parameters can be passed from the platform;
3011  * in case of these are not passed a default is kept for the MAC or GMAC.
3012  */
stmmac_init_dma_engine(struct stmmac_priv * priv)3013 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3014 {
3015 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3016 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3017 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3018 	struct stmmac_rx_queue *rx_q;
3019 	struct stmmac_tx_queue *tx_q;
3020 	u32 chan = 0;
3021 	int ret = 0;
3022 
3023 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3024 		dev_err(priv->device, "Invalid DMA configuration\n");
3025 		return -EINVAL;
3026 	}
3027 
3028 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3029 		priv->plat->dma_cfg->atds = 1;
3030 
3031 	ret = stmmac_reset(priv, priv->ioaddr);
3032 	if (ret) {
3033 		dev_err(priv->device, "Failed to reset the dma\n");
3034 		return ret;
3035 	}
3036 
3037 	/* DMA Configuration */
3038 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3039 
3040 	if (priv->plat->axi)
3041 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3042 
3043 	/* DMA CSR Channel configuration */
3044 	for (chan = 0; chan < dma_csr_ch; chan++) {
3045 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3046 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3047 	}
3048 
3049 	/* DMA RX Channel Configuration */
3050 	for (chan = 0; chan < rx_channels_count; chan++) {
3051 		rx_q = &priv->dma_conf.rx_queue[chan];
3052 
3053 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3054 				    rx_q->dma_rx_phy, chan);
3055 
3056 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3057 				     (rx_q->buf_alloc_num *
3058 				      sizeof(struct dma_desc));
3059 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3060 				       rx_q->rx_tail_addr, chan);
3061 	}
3062 
3063 	/* DMA TX Channel Configuration */
3064 	for (chan = 0; chan < tx_channels_count; chan++) {
3065 		tx_q = &priv->dma_conf.tx_queue[chan];
3066 
3067 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3068 				    tx_q->dma_tx_phy, chan);
3069 
3070 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3071 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3072 				       tx_q->tx_tail_addr, chan);
3073 	}
3074 
3075 	return ret;
3076 }
3077 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3078 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3079 {
3080 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3081 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3082 	struct stmmac_channel *ch;
3083 	struct napi_struct *napi;
3084 
3085 	if (!tx_coal_timer)
3086 		return;
3087 
3088 	ch = &priv->channel[tx_q->queue_index];
3089 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3090 
3091 	/* Arm timer only if napi is not already scheduled.
3092 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3093 	 * again in the next scheduled napi.
3094 	 */
3095 	if (unlikely(!napi_is_scheduled(napi)))
3096 		hrtimer_start(&tx_q->txtimer,
3097 			      STMMAC_COAL_TIMER(tx_coal_timer),
3098 			      HRTIMER_MODE_REL);
3099 	else
3100 		hrtimer_try_to_cancel(&tx_q->txtimer);
3101 }
3102 
3103 /**
3104  * stmmac_tx_timer - mitigation sw timer for tx.
3105  * @t: data pointer
3106  * Description:
3107  * This is the timer handler to directly invoke the stmmac_tx_clean.
3108  */
stmmac_tx_timer(struct hrtimer * t)3109 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3110 {
3111 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3112 	struct stmmac_priv *priv = tx_q->priv_data;
3113 	struct stmmac_channel *ch;
3114 	struct napi_struct *napi;
3115 
3116 	ch = &priv->channel[tx_q->queue_index];
3117 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3118 
3119 	if (likely(napi_schedule_prep(napi))) {
3120 		unsigned long flags;
3121 
3122 		spin_lock_irqsave(&ch->lock, flags);
3123 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3124 		spin_unlock_irqrestore(&ch->lock, flags);
3125 		__napi_schedule(napi);
3126 	}
3127 
3128 	return HRTIMER_NORESTART;
3129 }
3130 
3131 /**
3132  * stmmac_init_coalesce - init mitigation options.
3133  * @priv: driver private structure
3134  * Description:
3135  * This inits the coalesce parameters: i.e. timer rate,
3136  * timer handler and default threshold used for enabling the
3137  * interrupt on completion bit.
3138  */
stmmac_init_coalesce(struct stmmac_priv * priv)3139 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3140 {
3141 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3142 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3143 	u32 chan;
3144 
3145 	for (chan = 0; chan < tx_channel_count; chan++) {
3146 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3147 
3148 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3149 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3150 
3151 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3152 		tx_q->txtimer.function = stmmac_tx_timer;
3153 	}
3154 
3155 	for (chan = 0; chan < rx_channel_count; chan++)
3156 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3157 }
3158 
stmmac_set_rings_length(struct stmmac_priv * priv)3159 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3160 {
3161 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3162 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3163 	u32 chan;
3164 
3165 	/* set TX ring length */
3166 	for (chan = 0; chan < tx_channels_count; chan++)
3167 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3168 				       (priv->dma_conf.dma_tx_size - 1), chan);
3169 
3170 	/* set RX ring length */
3171 	for (chan = 0; chan < rx_channels_count; chan++)
3172 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3173 				       (priv->dma_conf.dma_rx_size - 1), chan);
3174 }
3175 
3176 /**
3177  *  stmmac_set_tx_queue_weight - Set TX queue weight
3178  *  @priv: driver private structure
3179  *  Description: It is used for setting TX queues weight
3180  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3181 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3182 {
3183 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3184 	u32 weight;
3185 	u32 queue;
3186 
3187 	for (queue = 0; queue < tx_queues_count; queue++) {
3188 		weight = priv->plat->tx_queues_cfg[queue].weight;
3189 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3190 	}
3191 }
3192 
3193 /**
3194  *  stmmac_configure_cbs - Configure CBS in TX queue
3195  *  @priv: driver private structure
3196  *  Description: It is used for configuring CBS in AVB TX queues
3197  */
stmmac_configure_cbs(struct stmmac_priv * priv)3198 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3199 {
3200 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3201 	u32 mode_to_use;
3202 	u32 queue;
3203 
3204 	/* queue 0 is reserved for legacy traffic */
3205 	for (queue = 1; queue < tx_queues_count; queue++) {
3206 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3207 		if (mode_to_use == MTL_QUEUE_DCB)
3208 			continue;
3209 
3210 		stmmac_config_cbs(priv, priv->hw,
3211 				priv->plat->tx_queues_cfg[queue].send_slope,
3212 				priv->plat->tx_queues_cfg[queue].idle_slope,
3213 				priv->plat->tx_queues_cfg[queue].high_credit,
3214 				priv->plat->tx_queues_cfg[queue].low_credit,
3215 				queue);
3216 	}
3217 }
3218 
3219 /**
3220  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3221  *  @priv: driver private structure
3222  *  Description: It is used for mapping RX queues to RX dma channels
3223  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3224 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3225 {
3226 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3227 	u32 queue;
3228 	u32 chan;
3229 
3230 	for (queue = 0; queue < rx_queues_count; queue++) {
3231 		chan = priv->plat->rx_queues_cfg[queue].chan;
3232 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3233 	}
3234 }
3235 
3236 /**
3237  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3238  *  @priv: driver private structure
3239  *  Description: It is used for configuring the RX Queue Priority
3240  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3241 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3242 {
3243 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3244 	u32 queue;
3245 	u32 prio;
3246 
3247 	for (queue = 0; queue < rx_queues_count; queue++) {
3248 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3249 			continue;
3250 
3251 		prio = priv->plat->rx_queues_cfg[queue].prio;
3252 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3253 	}
3254 }
3255 
3256 /**
3257  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3258  *  @priv: driver private structure
3259  *  Description: It is used for configuring the TX Queue Priority
3260  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3261 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3262 {
3263 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3264 	u32 queue;
3265 	u32 prio;
3266 
3267 	for (queue = 0; queue < tx_queues_count; queue++) {
3268 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3269 			continue;
3270 
3271 		prio = priv->plat->tx_queues_cfg[queue].prio;
3272 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3273 	}
3274 }
3275 
3276 /**
3277  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3278  *  @priv: driver private structure
3279  *  Description: It is used for configuring the RX queue routing
3280  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3281 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3282 {
3283 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3284 	u32 queue;
3285 	u8 packet;
3286 
3287 	for (queue = 0; queue < rx_queues_count; queue++) {
3288 		/* no specific packet type routing specified for the queue */
3289 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3290 			continue;
3291 
3292 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3293 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3294 	}
3295 }
3296 
stmmac_mac_config_rss(struct stmmac_priv * priv)3297 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3298 {
3299 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3300 		priv->rss.enable = false;
3301 		return;
3302 	}
3303 
3304 	if (priv->dev->features & NETIF_F_RXHASH)
3305 		priv->rss.enable = true;
3306 	else
3307 		priv->rss.enable = false;
3308 
3309 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3310 			     priv->plat->rx_queues_to_use);
3311 }
3312 
3313 /**
3314  *  stmmac_mtl_configuration - Configure MTL
3315  *  @priv: driver private structure
3316  *  Description: It is used for configurring MTL
3317  */
stmmac_mtl_configuration(struct stmmac_priv * priv)3318 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3319 {
3320 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3321 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3322 
3323 	if (tx_queues_count > 1)
3324 		stmmac_set_tx_queue_weight(priv);
3325 
3326 	/* Configure MTL RX algorithms */
3327 	if (rx_queues_count > 1)
3328 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3329 				priv->plat->rx_sched_algorithm);
3330 
3331 	/* Configure MTL TX algorithms */
3332 	if (tx_queues_count > 1)
3333 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3334 				priv->plat->tx_sched_algorithm);
3335 
3336 	/* Configure CBS in AVB TX queues */
3337 	if (tx_queues_count > 1)
3338 		stmmac_configure_cbs(priv);
3339 
3340 	/* Map RX MTL to DMA channels */
3341 	stmmac_rx_queue_dma_chan_map(priv);
3342 
3343 	/* Enable MAC RX Queues */
3344 	stmmac_mac_enable_rx_queues(priv);
3345 
3346 	/* Set RX priorities */
3347 	if (rx_queues_count > 1)
3348 		stmmac_mac_config_rx_queues_prio(priv);
3349 
3350 	/* Set TX priorities */
3351 	if (tx_queues_count > 1)
3352 		stmmac_mac_config_tx_queues_prio(priv);
3353 
3354 	/* Set RX routing */
3355 	if (rx_queues_count > 1)
3356 		stmmac_mac_config_rx_queues_routing(priv);
3357 
3358 	/* Receive Side Scaling */
3359 	if (rx_queues_count > 1)
3360 		stmmac_mac_config_rss(priv);
3361 }
3362 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3363 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3364 {
3365 	if (priv->dma_cap.asp) {
3366 		netdev_info(priv->dev, "Enabling Safety Features\n");
3367 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3368 					  priv->plat->safety_feat_cfg);
3369 	} else {
3370 		netdev_info(priv->dev, "No Safety Features support found\n");
3371 	}
3372 }
3373 
3374 /**
3375  * stmmac_hw_setup - setup mac in a usable state.
3376  *  @dev : pointer to the device structure.
3377  *  @ptp_register: register PTP if set
3378  *  Description:
3379  *  this is the main function to setup the HW in a usable state because the
3380  *  dma engine is reset, the core registers are configured (e.g. AXI,
3381  *  Checksum features, timers). The DMA is ready to start receiving and
3382  *  transmitting.
3383  *  Return value:
3384  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3385  *  file on failure.
3386  */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3387 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3388 {
3389 	struct stmmac_priv *priv = netdev_priv(dev);
3390 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3391 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3392 	bool sph_en;
3393 	u32 chan;
3394 	int ret;
3395 
3396 	/* Make sure RX clock is enabled */
3397 	if (priv->hw->phylink_pcs)
3398 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3399 
3400 	/* DMA initialization and SW reset */
3401 	ret = stmmac_init_dma_engine(priv);
3402 	if (ret < 0) {
3403 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3404 			   __func__);
3405 		return ret;
3406 	}
3407 
3408 	/* Copy the MAC addr into the HW  */
3409 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3410 
3411 	/* PS and related bits will be programmed according to the speed */
3412 	if (priv->hw->pcs) {
3413 		int speed = priv->plat->mac_port_sel_speed;
3414 
3415 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3416 		    (speed == SPEED_1000)) {
3417 			priv->hw->ps = speed;
3418 		} else {
3419 			dev_warn(priv->device, "invalid port speed\n");
3420 			priv->hw->ps = 0;
3421 		}
3422 	}
3423 
3424 	/* Initialize the MAC Core */
3425 	stmmac_core_init(priv, priv->hw, dev);
3426 
3427 	/* Initialize MTL*/
3428 	stmmac_mtl_configuration(priv);
3429 
3430 	/* Initialize Safety Features */
3431 	stmmac_safety_feat_configuration(priv);
3432 
3433 	ret = stmmac_rx_ipc(priv, priv->hw);
3434 	if (!ret) {
3435 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3436 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3437 		priv->hw->rx_csum = 0;
3438 	}
3439 
3440 	/* Enable the MAC Rx/Tx */
3441 	stmmac_mac_set(priv, priv->ioaddr, true);
3442 
3443 	/* Set the HW DMA mode and the COE */
3444 	stmmac_dma_operation_mode(priv);
3445 
3446 	stmmac_mmc_setup(priv);
3447 
3448 	if (ptp_register) {
3449 		ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3450 		if (ret < 0)
3451 			netdev_warn(priv->dev,
3452 				    "failed to enable PTP reference clock: %pe\n",
3453 				    ERR_PTR(ret));
3454 	}
3455 
3456 	ret = stmmac_init_ptp(priv);
3457 	if (ret == -EOPNOTSUPP)
3458 		netdev_info(priv->dev, "PTP not supported by HW\n");
3459 	else if (ret)
3460 		netdev_warn(priv->dev, "PTP init failed\n");
3461 	else if (ptp_register)
3462 		stmmac_ptp_register(priv);
3463 
3464 	priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3465 
3466 	/* Convert the timer from msec to usec */
3467 	if (!priv->tx_lpi_timer)
3468 		priv->tx_lpi_timer = eee_timer * 1000;
3469 
3470 	if (priv->use_riwt) {
3471 		u32 queue;
3472 
3473 		for (queue = 0; queue < rx_cnt; queue++) {
3474 			if (!priv->rx_riwt[queue])
3475 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3476 
3477 			stmmac_rx_watchdog(priv, priv->ioaddr,
3478 					   priv->rx_riwt[queue], queue);
3479 		}
3480 	}
3481 
3482 	if (priv->hw->pcs)
3483 		stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3484 
3485 	/* set TX and RX rings length */
3486 	stmmac_set_rings_length(priv);
3487 
3488 	/* Enable TSO */
3489 	if (priv->tso) {
3490 		for (chan = 0; chan < tx_cnt; chan++) {
3491 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3492 
3493 			/* TSO and TBS cannot co-exist */
3494 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3495 				continue;
3496 
3497 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3498 		}
3499 	}
3500 
3501 	/* Enable Split Header */
3502 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3503 	for (chan = 0; chan < rx_cnt; chan++)
3504 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3505 
3506 
3507 	/* VLAN Tag Insertion */
3508 	if (priv->dma_cap.vlins)
3509 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3510 
3511 	/* TBS */
3512 	for (chan = 0; chan < tx_cnt; chan++) {
3513 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3514 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3515 
3516 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3517 	}
3518 
3519 	/* Configure real RX and TX queues */
3520 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3521 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3522 
3523 	/* Start the ball rolling... */
3524 	stmmac_start_all_dma(priv);
3525 
3526 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3527 
3528 	return 0;
3529 }
3530 
stmmac_hw_teardown(struct net_device * dev)3531 static void stmmac_hw_teardown(struct net_device *dev)
3532 {
3533 	struct stmmac_priv *priv = netdev_priv(dev);
3534 
3535 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
3536 }
3537 
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3538 static void stmmac_free_irq(struct net_device *dev,
3539 			    enum request_irq_err irq_err, int irq_idx)
3540 {
3541 	struct stmmac_priv *priv = netdev_priv(dev);
3542 	int j;
3543 
3544 	switch (irq_err) {
3545 	case REQ_IRQ_ERR_ALL:
3546 		irq_idx = priv->plat->tx_queues_to_use;
3547 		fallthrough;
3548 	case REQ_IRQ_ERR_TX:
3549 		for (j = irq_idx - 1; j >= 0; j--) {
3550 			if (priv->tx_irq[j] > 0) {
3551 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3552 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3553 			}
3554 		}
3555 		irq_idx = priv->plat->rx_queues_to_use;
3556 		fallthrough;
3557 	case REQ_IRQ_ERR_RX:
3558 		for (j = irq_idx - 1; j >= 0; j--) {
3559 			if (priv->rx_irq[j] > 0) {
3560 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3561 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3562 			}
3563 		}
3564 
3565 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3566 			free_irq(priv->sfty_ue_irq, dev);
3567 		fallthrough;
3568 	case REQ_IRQ_ERR_SFTY_UE:
3569 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3570 			free_irq(priv->sfty_ce_irq, dev);
3571 		fallthrough;
3572 	case REQ_IRQ_ERR_SFTY_CE:
3573 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3574 			free_irq(priv->lpi_irq, dev);
3575 		fallthrough;
3576 	case REQ_IRQ_ERR_LPI:
3577 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3578 			free_irq(priv->wol_irq, dev);
3579 		fallthrough;
3580 	case REQ_IRQ_ERR_SFTY:
3581 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3582 			free_irq(priv->sfty_irq, dev);
3583 		fallthrough;
3584 	case REQ_IRQ_ERR_WOL:
3585 		free_irq(dev->irq, dev);
3586 		fallthrough;
3587 	case REQ_IRQ_ERR_MAC:
3588 	case REQ_IRQ_ERR_NO:
3589 		/* If MAC IRQ request error, no more IRQ to free */
3590 		break;
3591 	}
3592 }
3593 
stmmac_request_irq_multi_msi(struct net_device * dev)3594 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3595 {
3596 	struct stmmac_priv *priv = netdev_priv(dev);
3597 	enum request_irq_err irq_err;
3598 	cpumask_t cpu_mask;
3599 	int irq_idx = 0;
3600 	char *int_name;
3601 	int ret;
3602 	int i;
3603 
3604 	/* For common interrupt */
3605 	int_name = priv->int_name_mac;
3606 	sprintf(int_name, "%s:%s", dev->name, "mac");
3607 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3608 			  0, int_name, dev);
3609 	if (unlikely(ret < 0)) {
3610 		netdev_err(priv->dev,
3611 			   "%s: alloc mac MSI %d (error: %d)\n",
3612 			   __func__, dev->irq, ret);
3613 		irq_err = REQ_IRQ_ERR_MAC;
3614 		goto irq_error;
3615 	}
3616 
3617 	/* Request the Wake IRQ in case of another line
3618 	 * is used for WoL
3619 	 */
3620 	priv->wol_irq_disabled = true;
3621 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3622 		int_name = priv->int_name_wol;
3623 		sprintf(int_name, "%s:%s", dev->name, "wol");
3624 		ret = request_irq(priv->wol_irq,
3625 				  stmmac_mac_interrupt,
3626 				  0, int_name, dev);
3627 		if (unlikely(ret < 0)) {
3628 			netdev_err(priv->dev,
3629 				   "%s: alloc wol MSI %d (error: %d)\n",
3630 				   __func__, priv->wol_irq, ret);
3631 			irq_err = REQ_IRQ_ERR_WOL;
3632 			goto irq_error;
3633 		}
3634 	}
3635 
3636 	/* Request the LPI IRQ in case of another line
3637 	 * is used for LPI
3638 	 */
3639 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3640 		int_name = priv->int_name_lpi;
3641 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3642 		ret = request_irq(priv->lpi_irq,
3643 				  stmmac_mac_interrupt,
3644 				  0, int_name, dev);
3645 		if (unlikely(ret < 0)) {
3646 			netdev_err(priv->dev,
3647 				   "%s: alloc lpi MSI %d (error: %d)\n",
3648 				   __func__, priv->lpi_irq, ret);
3649 			irq_err = REQ_IRQ_ERR_LPI;
3650 			goto irq_error;
3651 		}
3652 	}
3653 
3654 	/* Request the common Safety Feature Correctible/Uncorrectible
3655 	 * Error line in case of another line is used
3656 	 */
3657 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3658 		int_name = priv->int_name_sfty;
3659 		sprintf(int_name, "%s:%s", dev->name, "safety");
3660 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3661 				  0, int_name, dev);
3662 		if (unlikely(ret < 0)) {
3663 			netdev_err(priv->dev,
3664 				   "%s: alloc sfty MSI %d (error: %d)\n",
3665 				   __func__, priv->sfty_irq, ret);
3666 			irq_err = REQ_IRQ_ERR_SFTY;
3667 			goto irq_error;
3668 		}
3669 	}
3670 
3671 	/* Request the Safety Feature Correctible Error line in
3672 	 * case of another line is used
3673 	 */
3674 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3675 		int_name = priv->int_name_sfty_ce;
3676 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3677 		ret = request_irq(priv->sfty_ce_irq,
3678 				  stmmac_safety_interrupt,
3679 				  0, int_name, dev);
3680 		if (unlikely(ret < 0)) {
3681 			netdev_err(priv->dev,
3682 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3683 				   __func__, priv->sfty_ce_irq, ret);
3684 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3685 			goto irq_error;
3686 		}
3687 	}
3688 
3689 	/* Request the Safety Feature Uncorrectible Error line in
3690 	 * case of another line is used
3691 	 */
3692 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3693 		int_name = priv->int_name_sfty_ue;
3694 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3695 		ret = request_irq(priv->sfty_ue_irq,
3696 				  stmmac_safety_interrupt,
3697 				  0, int_name, dev);
3698 		if (unlikely(ret < 0)) {
3699 			netdev_err(priv->dev,
3700 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3701 				   __func__, priv->sfty_ue_irq, ret);
3702 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3703 			goto irq_error;
3704 		}
3705 	}
3706 
3707 	/* Request Rx MSI irq */
3708 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3709 		if (i >= MTL_MAX_RX_QUEUES)
3710 			break;
3711 		if (priv->rx_irq[i] == 0)
3712 			continue;
3713 
3714 		int_name = priv->int_name_rx_irq[i];
3715 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3716 		ret = request_irq(priv->rx_irq[i],
3717 				  stmmac_msi_intr_rx,
3718 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3719 		if (unlikely(ret < 0)) {
3720 			netdev_err(priv->dev,
3721 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3722 				   __func__, i, priv->rx_irq[i], ret);
3723 			irq_err = REQ_IRQ_ERR_RX;
3724 			irq_idx = i;
3725 			goto irq_error;
3726 		}
3727 		cpumask_clear(&cpu_mask);
3728 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3729 		irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3730 	}
3731 
3732 	/* Request Tx MSI irq */
3733 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3734 		if (i >= MTL_MAX_TX_QUEUES)
3735 			break;
3736 		if (priv->tx_irq[i] == 0)
3737 			continue;
3738 
3739 		int_name = priv->int_name_tx_irq[i];
3740 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3741 		ret = request_irq(priv->tx_irq[i],
3742 				  stmmac_msi_intr_tx,
3743 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3744 		if (unlikely(ret < 0)) {
3745 			netdev_err(priv->dev,
3746 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3747 				   __func__, i, priv->tx_irq[i], ret);
3748 			irq_err = REQ_IRQ_ERR_TX;
3749 			irq_idx = i;
3750 			goto irq_error;
3751 		}
3752 		cpumask_clear(&cpu_mask);
3753 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3754 		irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3755 	}
3756 
3757 	return 0;
3758 
3759 irq_error:
3760 	stmmac_free_irq(dev, irq_err, irq_idx);
3761 	return ret;
3762 }
3763 
stmmac_request_irq_single(struct net_device * dev)3764 static int stmmac_request_irq_single(struct net_device *dev)
3765 {
3766 	struct stmmac_priv *priv = netdev_priv(dev);
3767 	enum request_irq_err irq_err;
3768 	int ret;
3769 
3770 	ret = request_irq(dev->irq, stmmac_interrupt,
3771 			  IRQF_SHARED, dev->name, dev);
3772 	if (unlikely(ret < 0)) {
3773 		netdev_err(priv->dev,
3774 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3775 			   __func__, dev->irq, ret);
3776 		irq_err = REQ_IRQ_ERR_MAC;
3777 		goto irq_error;
3778 	}
3779 
3780 	/* Request the Wake IRQ in case of another line
3781 	 * is used for WoL
3782 	 */
3783 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3784 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3785 				  IRQF_SHARED, dev->name, dev);
3786 		if (unlikely(ret < 0)) {
3787 			netdev_err(priv->dev,
3788 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3789 				   __func__, priv->wol_irq, ret);
3790 			irq_err = REQ_IRQ_ERR_WOL;
3791 			goto irq_error;
3792 		}
3793 	}
3794 
3795 	/* Request the IRQ lines */
3796 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3797 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3798 				  IRQF_SHARED, dev->name, dev);
3799 		if (unlikely(ret < 0)) {
3800 			netdev_err(priv->dev,
3801 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3802 				   __func__, priv->lpi_irq, ret);
3803 			irq_err = REQ_IRQ_ERR_LPI;
3804 			goto irq_error;
3805 		}
3806 	}
3807 
3808 	/* Request the common Safety Feature Correctible/Uncorrectible
3809 	 * Error line in case of another line is used
3810 	 */
3811 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3812 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3813 				  IRQF_SHARED, dev->name, dev);
3814 		if (unlikely(ret < 0)) {
3815 			netdev_err(priv->dev,
3816 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3817 				   __func__, priv->sfty_irq, ret);
3818 			irq_err = REQ_IRQ_ERR_SFTY;
3819 			goto irq_error;
3820 		}
3821 	}
3822 
3823 	return 0;
3824 
3825 irq_error:
3826 	stmmac_free_irq(dev, irq_err, 0);
3827 	return ret;
3828 }
3829 
stmmac_request_irq(struct net_device * dev)3830 static int stmmac_request_irq(struct net_device *dev)
3831 {
3832 	struct stmmac_priv *priv = netdev_priv(dev);
3833 	int ret;
3834 
3835 	/* Request the IRQ lines */
3836 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3837 		ret = stmmac_request_irq_multi_msi(dev);
3838 	else
3839 		ret = stmmac_request_irq_single(dev);
3840 
3841 	return ret;
3842 }
3843 
3844 /**
3845  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3846  *  @priv: driver private structure
3847  *  @mtu: MTU to setup the dma queue and buf with
3848  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3849  *  Allocate the Tx/Rx DMA queue and init them.
3850  *  Return value:
3851  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3852  */
3853 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3854 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3855 {
3856 	struct stmmac_dma_conf *dma_conf;
3857 	int chan, bfsize, ret;
3858 
3859 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3860 	if (!dma_conf) {
3861 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3862 			   __func__);
3863 		return ERR_PTR(-ENOMEM);
3864 	}
3865 
3866 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3867 	if (bfsize < 0)
3868 		bfsize = 0;
3869 
3870 	if (bfsize < BUF_SIZE_16KiB)
3871 		bfsize = stmmac_set_bfsize(mtu, 0);
3872 
3873 	dma_conf->dma_buf_sz = bfsize;
3874 	/* Chose the tx/rx size from the already defined one in the
3875 	 * priv struct. (if defined)
3876 	 */
3877 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3878 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3879 
3880 	if (!dma_conf->dma_tx_size)
3881 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3882 	if (!dma_conf->dma_rx_size)
3883 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3884 
3885 	/* Earlier check for TBS */
3886 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3887 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3888 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3889 
3890 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3891 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3892 	}
3893 
3894 	ret = alloc_dma_desc_resources(priv, dma_conf);
3895 	if (ret < 0) {
3896 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3897 			   __func__);
3898 		goto alloc_error;
3899 	}
3900 
3901 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3902 	if (ret < 0) {
3903 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3904 			   __func__);
3905 		goto init_error;
3906 	}
3907 
3908 	return dma_conf;
3909 
3910 init_error:
3911 	free_dma_desc_resources(priv, dma_conf);
3912 alloc_error:
3913 	kfree(dma_conf);
3914 	return ERR_PTR(ret);
3915 }
3916 
3917 /**
3918  *  __stmmac_open - open entry point of the driver
3919  *  @dev : pointer to the device structure.
3920  *  @dma_conf :  structure to take the dma data
3921  *  Description:
3922  *  This function is the open entry point of the driver.
3923  *  Return value:
3924  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3925  *  file on failure.
3926  */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3927 static int __stmmac_open(struct net_device *dev,
3928 			 struct stmmac_dma_conf *dma_conf)
3929 {
3930 	struct stmmac_priv *priv = netdev_priv(dev);
3931 	int mode = priv->plat->phy_interface;
3932 	u32 chan;
3933 	int ret;
3934 
3935 	ret = pm_runtime_resume_and_get(priv->device);
3936 	if (ret < 0)
3937 		return ret;
3938 
3939 	if ((!priv->hw->xpcs ||
3940 	     xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3941 		ret = stmmac_init_phy(dev);
3942 		if (ret) {
3943 			netdev_err(priv->dev,
3944 				   "%s: Cannot attach to PHY (error: %d)\n",
3945 				   __func__, ret);
3946 			goto init_phy_error;
3947 		}
3948 	}
3949 
3950 	priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3951 
3952 	buf_sz = dma_conf->dma_buf_sz;
3953 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3954 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3955 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3956 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3957 
3958 	stmmac_reset_queues_param(priv);
3959 
3960 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3961 	    priv->plat->serdes_powerup) {
3962 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3963 		if (ret < 0) {
3964 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3965 				   __func__);
3966 			goto init_error;
3967 		}
3968 	}
3969 
3970 	ret = stmmac_hw_setup(dev, true);
3971 	if (ret < 0) {
3972 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3973 		goto init_error;
3974 	}
3975 
3976 	stmmac_init_coalesce(priv);
3977 
3978 	phylink_start(priv->phylink);
3979 	/* We may have called phylink_speed_down before */
3980 	phylink_speed_up(priv->phylink);
3981 
3982 	ret = stmmac_request_irq(dev);
3983 	if (ret)
3984 		goto irq_error;
3985 
3986 	stmmac_enable_all_queues(priv);
3987 	netif_tx_start_all_queues(priv->dev);
3988 	stmmac_enable_all_dma_irq(priv);
3989 
3990 	return 0;
3991 
3992 irq_error:
3993 	phylink_stop(priv->phylink);
3994 
3995 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3996 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3997 
3998 	stmmac_hw_teardown(dev);
3999 init_error:
4000 	phylink_disconnect_phy(priv->phylink);
4001 init_phy_error:
4002 	pm_runtime_put(priv->device);
4003 	return ret;
4004 }
4005 
stmmac_open(struct net_device * dev)4006 static int stmmac_open(struct net_device *dev)
4007 {
4008 	struct stmmac_priv *priv = netdev_priv(dev);
4009 	struct stmmac_dma_conf *dma_conf;
4010 	int ret;
4011 
4012 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4013 	if (IS_ERR(dma_conf))
4014 		return PTR_ERR(dma_conf);
4015 
4016 	ret = __stmmac_open(dev, dma_conf);
4017 	if (ret)
4018 		free_dma_desc_resources(priv, dma_conf);
4019 
4020 	kfree(dma_conf);
4021 	return ret;
4022 }
4023 
4024 /**
4025  *  stmmac_release - close entry point of the driver
4026  *  @dev : device pointer.
4027  *  Description:
4028  *  This is the stop entry point of the driver.
4029  */
stmmac_release(struct net_device * dev)4030 static int stmmac_release(struct net_device *dev)
4031 {
4032 	struct stmmac_priv *priv = netdev_priv(dev);
4033 	u32 chan;
4034 
4035 	if (device_may_wakeup(priv->device))
4036 		phylink_speed_down(priv->phylink, false);
4037 	/* Stop and disconnect the PHY */
4038 	phylink_stop(priv->phylink);
4039 	phylink_disconnect_phy(priv->phylink);
4040 
4041 	stmmac_disable_all_queues(priv);
4042 
4043 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4044 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4045 
4046 	netif_tx_disable(dev);
4047 
4048 	/* Free the IRQ lines */
4049 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4050 
4051 	if (priv->eee_enabled) {
4052 		priv->tx_path_in_lpi_mode = false;
4053 		del_timer_sync(&priv->eee_ctrl_timer);
4054 	}
4055 
4056 	/* Stop TX/RX DMA and clear the descriptors */
4057 	stmmac_stop_all_dma(priv);
4058 
4059 	/* Release and free the Rx/Tx resources */
4060 	free_dma_desc_resources(priv, &priv->dma_conf);
4061 
4062 	/* Disable the MAC Rx/Tx */
4063 	stmmac_mac_set(priv, priv->ioaddr, false);
4064 
4065 	/* Powerdown Serdes if there is */
4066 	if (priv->plat->serdes_powerdown)
4067 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4068 
4069 	stmmac_release_ptp(priv);
4070 
4071 	if (priv->dma_cap.fpesel)
4072 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4073 
4074 	pm_runtime_put(priv->device);
4075 
4076 	return 0;
4077 }
4078 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4079 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4080 			       struct stmmac_tx_queue *tx_q)
4081 {
4082 	u16 tag = 0x0, inner_tag = 0x0;
4083 	u32 inner_type = 0x0;
4084 	struct dma_desc *p;
4085 
4086 	if (!priv->dma_cap.vlins)
4087 		return false;
4088 	if (!skb_vlan_tag_present(skb))
4089 		return false;
4090 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4091 		inner_tag = skb_vlan_tag_get(skb);
4092 		inner_type = STMMAC_VLAN_INSERT;
4093 	}
4094 
4095 	tag = skb_vlan_tag_get(skb);
4096 
4097 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4098 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4099 	else
4100 		p = &tx_q->dma_tx[tx_q->cur_tx];
4101 
4102 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4103 		return false;
4104 
4105 	stmmac_set_tx_owner(priv, p);
4106 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4107 	return true;
4108 }
4109 
4110 /**
4111  *  stmmac_tso_allocator - close entry point of the driver
4112  *  @priv: driver private structure
4113  *  @des: buffer start address
4114  *  @total_len: total length to fill in descriptors
4115  *  @last_segment: condition for the last descriptor
4116  *  @queue: TX queue index
4117  *  Description:
4118  *  This function fills descriptor and request new descriptors according to
4119  *  buffer length to fill
4120  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4121 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4122 				 int total_len, bool last_segment, u32 queue)
4123 {
4124 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4125 	struct dma_desc *desc;
4126 	u32 buff_size;
4127 	int tmp_len;
4128 
4129 	tmp_len = total_len;
4130 
4131 	while (tmp_len > 0) {
4132 		dma_addr_t curr_addr;
4133 
4134 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4135 						priv->dma_conf.dma_tx_size);
4136 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4137 
4138 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4139 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4140 		else
4141 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4142 
4143 		curr_addr = des + (total_len - tmp_len);
4144 		if (priv->dma_cap.addr64 <= 32)
4145 			desc->des0 = cpu_to_le32(curr_addr);
4146 		else
4147 			stmmac_set_desc_addr(priv, desc, curr_addr);
4148 
4149 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4150 			    TSO_MAX_BUFF_SIZE : tmp_len;
4151 
4152 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4153 				0, 1,
4154 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4155 				0, 0);
4156 
4157 		tmp_len -= TSO_MAX_BUFF_SIZE;
4158 	}
4159 }
4160 
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4161 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4162 {
4163 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4164 	int desc_size;
4165 
4166 	if (likely(priv->extend_desc))
4167 		desc_size = sizeof(struct dma_extended_desc);
4168 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4169 		desc_size = sizeof(struct dma_edesc);
4170 	else
4171 		desc_size = sizeof(struct dma_desc);
4172 
4173 	/* The own bit must be the latest setting done when prepare the
4174 	 * descriptor and then barrier is needed to make sure that
4175 	 * all is coherent before granting the DMA engine.
4176 	 */
4177 	wmb();
4178 
4179 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4180 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4181 }
4182 
4183 /**
4184  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4185  *  @skb : the socket buffer
4186  *  @dev : device pointer
4187  *  Description: this is the transmit function that is called on TSO frames
4188  *  (support available on GMAC4 and newer chips).
4189  *  Diagram below show the ring programming in case of TSO frames:
4190  *
4191  *  First Descriptor
4192  *   --------
4193  *   | DES0 |---> buffer1 = L2/L3/L4 header
4194  *   | DES1 |---> TCP Payload (can continue on next descr...)
4195  *   | DES2 |---> buffer 1 and 2 len
4196  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4197  *   --------
4198  *	|
4199  *     ...
4200  *	|
4201  *   --------
4202  *   | DES0 | --| Split TCP Payload on Buffers 1 and 2
4203  *   | DES1 | --|
4204  *   | DES2 | --> buffer 1 and 2 len
4205  *   | DES3 |
4206  *   --------
4207  *
4208  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4209  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4210 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4211 {
4212 	struct dma_desc *desc, *first, *mss_desc = NULL;
4213 	struct stmmac_priv *priv = netdev_priv(dev);
4214 	int tmp_pay_len = 0, first_tx, nfrags;
4215 	unsigned int first_entry, tx_packets;
4216 	struct stmmac_txq_stats *txq_stats;
4217 	struct stmmac_tx_queue *tx_q;
4218 	u32 pay_len, mss, queue;
4219 	u8 proto_hdr_len, hdr;
4220 	dma_addr_t des;
4221 	bool set_ic;
4222 	int i;
4223 
4224 	/* Always insert VLAN tag to SKB payload for TSO frames.
4225 	 *
4226 	 * Never insert VLAN tag by HW, since segments splited by
4227 	 * TSO engine will be un-tagged by mistake.
4228 	 */
4229 	if (skb_vlan_tag_present(skb)) {
4230 		skb = __vlan_hwaccel_push_inside(skb);
4231 		if (unlikely(!skb)) {
4232 			priv->xstats.tx_dropped++;
4233 			return NETDEV_TX_OK;
4234 		}
4235 	}
4236 
4237 	nfrags = skb_shinfo(skb)->nr_frags;
4238 	queue = skb_get_queue_mapping(skb);
4239 
4240 	tx_q = &priv->dma_conf.tx_queue[queue];
4241 	txq_stats = &priv->xstats.txq_stats[queue];
4242 	first_tx = tx_q->cur_tx;
4243 
4244 	/* Compute header lengths */
4245 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4246 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4247 		hdr = sizeof(struct udphdr);
4248 	} else {
4249 		proto_hdr_len = skb_tcp_all_headers(skb);
4250 		hdr = tcp_hdrlen(skb);
4251 	}
4252 
4253 	/* Desc availability based on threshold should be enough safe */
4254 	if (unlikely(stmmac_tx_avail(priv, queue) <
4255 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4256 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4257 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4258 								queue));
4259 			/* This is a hard error, log it. */
4260 			netdev_err(priv->dev,
4261 				   "%s: Tx Ring full when queue awake\n",
4262 				   __func__);
4263 		}
4264 		return NETDEV_TX_BUSY;
4265 	}
4266 
4267 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4268 
4269 	mss = skb_shinfo(skb)->gso_size;
4270 
4271 	/* set new MSS value if needed */
4272 	if (mss != tx_q->mss) {
4273 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4274 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4275 		else
4276 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4277 
4278 		stmmac_set_mss(priv, mss_desc, mss);
4279 		tx_q->mss = mss;
4280 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4281 						priv->dma_conf.dma_tx_size);
4282 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4283 	}
4284 
4285 	if (netif_msg_tx_queued(priv)) {
4286 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4287 			__func__, hdr, proto_hdr_len, pay_len, mss);
4288 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4289 			skb->data_len);
4290 	}
4291 
4292 	first_entry = tx_q->cur_tx;
4293 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4294 
4295 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4296 		desc = &tx_q->dma_entx[first_entry].basic;
4297 	else
4298 		desc = &tx_q->dma_tx[first_entry];
4299 	first = desc;
4300 
4301 	/* first descriptor: fill Headers on Buf1 */
4302 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4303 			     DMA_TO_DEVICE);
4304 	if (dma_mapping_error(priv->device, des))
4305 		goto dma_map_err;
4306 
4307 	if (priv->dma_cap.addr64 <= 32) {
4308 		first->des0 = cpu_to_le32(des);
4309 
4310 		/* Fill start of payload in buff2 of first descriptor */
4311 		if (pay_len)
4312 			first->des1 = cpu_to_le32(des + proto_hdr_len);
4313 
4314 		/* If needed take extra descriptors to fill the remaining payload */
4315 		tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4316 	} else {
4317 		stmmac_set_desc_addr(priv, first, des);
4318 		tmp_pay_len = pay_len;
4319 		des += proto_hdr_len;
4320 		pay_len = 0;
4321 	}
4322 
4323 	stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4324 
4325 	/* In case two or more DMA transmit descriptors are allocated for this
4326 	 * non-paged SKB data, the DMA buffer address should be saved to
4327 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4328 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4329 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4330 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4331 	 * sooner or later.
4332 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4333 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4334 	 * this DMA buffer right after the DMA engine completely finishes the
4335 	 * full buffer transmission.
4336 	 */
4337 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4338 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4339 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4340 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4341 
4342 	/* Prepare fragments */
4343 	for (i = 0; i < nfrags; i++) {
4344 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4345 
4346 		des = skb_frag_dma_map(priv->device, frag, 0,
4347 				       skb_frag_size(frag),
4348 				       DMA_TO_DEVICE);
4349 		if (dma_mapping_error(priv->device, des))
4350 			goto dma_map_err;
4351 
4352 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4353 				     (i == nfrags - 1), queue);
4354 
4355 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4356 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4357 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4358 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4359 	}
4360 
4361 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4362 
4363 	/* Only the last descriptor gets to point to the skb. */
4364 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4365 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4366 
4367 	/* Manage tx mitigation */
4368 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4369 	tx_q->tx_count_frames += tx_packets;
4370 
4371 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4372 		set_ic = true;
4373 	else if (!priv->tx_coal_frames[queue])
4374 		set_ic = false;
4375 	else if (tx_packets > priv->tx_coal_frames[queue])
4376 		set_ic = true;
4377 	else if ((tx_q->tx_count_frames %
4378 		  priv->tx_coal_frames[queue]) < tx_packets)
4379 		set_ic = true;
4380 	else
4381 		set_ic = false;
4382 
4383 	if (set_ic) {
4384 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4385 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4386 		else
4387 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4388 
4389 		tx_q->tx_count_frames = 0;
4390 		stmmac_set_tx_ic(priv, desc);
4391 	}
4392 
4393 	/* We've used all descriptors we need for this skb, however,
4394 	 * advance cur_tx so that it references a fresh descriptor.
4395 	 * ndo_start_xmit will fill this descriptor the next time it's
4396 	 * called and stmmac_tx_clean may clean up to this descriptor.
4397 	 */
4398 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4399 
4400 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4401 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4402 			  __func__);
4403 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4404 	}
4405 
4406 	u64_stats_update_begin(&txq_stats->q_syncp);
4407 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4408 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4409 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4410 	if (set_ic)
4411 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4412 	u64_stats_update_end(&txq_stats->q_syncp);
4413 
4414 	if (priv->sarc_type)
4415 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4416 
4417 	skb_tx_timestamp(skb);
4418 
4419 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4420 		     priv->hwts_tx_en)) {
4421 		/* declare that device is doing timestamping */
4422 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4423 		stmmac_enable_tx_timestamp(priv, first);
4424 	}
4425 
4426 	/* Complete the first descriptor before granting the DMA */
4427 	stmmac_prepare_tso_tx_desc(priv, first, 1,
4428 			proto_hdr_len,
4429 			pay_len,
4430 			1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4431 			hdr / 4, (skb->len - proto_hdr_len));
4432 
4433 	/* If context desc is used to change MSS */
4434 	if (mss_desc) {
4435 		/* Make sure that first descriptor has been completely
4436 		 * written, including its own bit. This is because MSS is
4437 		 * actually before first descriptor, so we need to make
4438 		 * sure that MSS's own bit is the last thing written.
4439 		 */
4440 		dma_wmb();
4441 		stmmac_set_tx_owner(priv, mss_desc);
4442 	}
4443 
4444 	if (netif_msg_pktdata(priv)) {
4445 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4446 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4447 			tx_q->cur_tx, first, nfrags);
4448 		pr_info(">>> frame to be transmitted: ");
4449 		print_pkt(skb->data, skb_headlen(skb));
4450 	}
4451 
4452 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4453 
4454 	stmmac_flush_tx_descriptors(priv, queue);
4455 	stmmac_tx_timer_arm(priv, queue);
4456 
4457 	return NETDEV_TX_OK;
4458 
4459 dma_map_err:
4460 	dev_err(priv->device, "Tx dma map failed\n");
4461 	dev_kfree_skb(skb);
4462 	priv->xstats.tx_dropped++;
4463 	return NETDEV_TX_OK;
4464 }
4465 
4466 /**
4467  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4468  * @skb: socket buffer to check
4469  *
4470  * Check if a packet has an ethertype that will trigger the IP header checks
4471  * and IP/TCP checksum engine of the stmmac core.
4472  *
4473  * Return: true if the ethertype can trigger the checksum engine, false
4474  * otherwise
4475  */
stmmac_has_ip_ethertype(struct sk_buff * skb)4476 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4477 {
4478 	int depth = 0;
4479 	__be16 proto;
4480 
4481 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4482 				    &depth);
4483 
4484 	return (depth <= ETH_HLEN) &&
4485 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4486 }
4487 
4488 /**
4489  *  stmmac_xmit - Tx entry point of the driver
4490  *  @skb : the socket buffer
4491  *  @dev : device pointer
4492  *  Description : this is the tx entry point of the driver.
4493  *  It programs the chain or the ring and supports oversized frames
4494  *  and SG feature.
4495  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4496 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4497 {
4498 	unsigned int first_entry, tx_packets, enh_desc;
4499 	struct stmmac_priv *priv = netdev_priv(dev);
4500 	unsigned int nopaged_len = skb_headlen(skb);
4501 	int i, csum_insertion = 0, is_jumbo = 0;
4502 	u32 queue = skb_get_queue_mapping(skb);
4503 	int nfrags = skb_shinfo(skb)->nr_frags;
4504 	int gso = skb_shinfo(skb)->gso_type;
4505 	struct stmmac_txq_stats *txq_stats;
4506 	struct dma_edesc *tbs_desc = NULL;
4507 	struct dma_desc *desc, *first;
4508 	struct stmmac_tx_queue *tx_q;
4509 	bool has_vlan, set_ic;
4510 	int entry, first_tx;
4511 	dma_addr_t des;
4512 
4513 	tx_q = &priv->dma_conf.tx_queue[queue];
4514 	txq_stats = &priv->xstats.txq_stats[queue];
4515 	first_tx = tx_q->cur_tx;
4516 
4517 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4518 		stmmac_disable_eee_mode(priv);
4519 
4520 	/* Manage oversized TCP frames for GMAC4 device */
4521 	if (skb_is_gso(skb) && priv->tso) {
4522 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4523 			return stmmac_tso_xmit(skb, dev);
4524 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4525 			return stmmac_tso_xmit(skb, dev);
4526 	}
4527 
4528 	if (priv->est && priv->est->enable &&
4529 	    priv->est->max_sdu[queue] &&
4530 	    skb->len > priv->est->max_sdu[queue]){
4531 		priv->xstats.max_sdu_txq_drop[queue]++;
4532 		goto max_sdu_err;
4533 	}
4534 
4535 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4536 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4537 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4538 								queue));
4539 			/* This is a hard error, log it. */
4540 			netdev_err(priv->dev,
4541 				   "%s: Tx Ring full when queue awake\n",
4542 				   __func__);
4543 		}
4544 		return NETDEV_TX_BUSY;
4545 	}
4546 
4547 	/* Check if VLAN can be inserted by HW */
4548 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4549 
4550 	entry = tx_q->cur_tx;
4551 	first_entry = entry;
4552 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4553 
4554 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4555 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4556 	 * queues. In that case, checksum offloading for those queues that don't
4557 	 * support tx coe needs to fallback to software checksum calculation.
4558 	 *
4559 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4560 	 * also have to be checksummed in software.
4561 	 */
4562 	if (csum_insertion &&
4563 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4564 	     !stmmac_has_ip_ethertype(skb))) {
4565 		if (unlikely(skb_checksum_help(skb)))
4566 			goto dma_map_err;
4567 		csum_insertion = !csum_insertion;
4568 	}
4569 
4570 	if (likely(priv->extend_desc))
4571 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4572 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4573 		desc = &tx_q->dma_entx[entry].basic;
4574 	else
4575 		desc = tx_q->dma_tx + entry;
4576 
4577 	first = desc;
4578 
4579 	if (has_vlan)
4580 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4581 
4582 	enh_desc = priv->plat->enh_desc;
4583 	/* To program the descriptors according to the size of the frame */
4584 	if (enh_desc)
4585 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4586 
4587 	if (unlikely(is_jumbo)) {
4588 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4589 		if (unlikely(entry < 0) && (entry != -EINVAL))
4590 			goto dma_map_err;
4591 	}
4592 
4593 	for (i = 0; i < nfrags; i++) {
4594 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4595 		int len = skb_frag_size(frag);
4596 		bool last_segment = (i == (nfrags - 1));
4597 
4598 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4599 		WARN_ON(tx_q->tx_skbuff[entry]);
4600 
4601 		if (likely(priv->extend_desc))
4602 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4603 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4604 			desc = &tx_q->dma_entx[entry].basic;
4605 		else
4606 			desc = tx_q->dma_tx + entry;
4607 
4608 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4609 				       DMA_TO_DEVICE);
4610 		if (dma_mapping_error(priv->device, des))
4611 			goto dma_map_err; /* should reuse desc w/o issues */
4612 
4613 		tx_q->tx_skbuff_dma[entry].buf = des;
4614 
4615 		stmmac_set_desc_addr(priv, desc, des);
4616 
4617 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4618 		tx_q->tx_skbuff_dma[entry].len = len;
4619 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4620 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4621 
4622 		/* Prepare the descriptor and set the own bit too */
4623 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4624 				priv->mode, 1, last_segment, skb->len);
4625 	}
4626 
4627 	/* Only the last descriptor gets to point to the skb. */
4628 	tx_q->tx_skbuff[entry] = skb;
4629 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4630 
4631 	/* According to the coalesce parameter the IC bit for the latest
4632 	 * segment is reset and the timer re-started to clean the tx status.
4633 	 * This approach takes care about the fragments: desc is the first
4634 	 * element in case of no SG.
4635 	 */
4636 	tx_packets = (entry + 1) - first_tx;
4637 	tx_q->tx_count_frames += tx_packets;
4638 
4639 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4640 		set_ic = true;
4641 	else if (!priv->tx_coal_frames[queue])
4642 		set_ic = false;
4643 	else if (tx_packets > priv->tx_coal_frames[queue])
4644 		set_ic = true;
4645 	else if ((tx_q->tx_count_frames %
4646 		  priv->tx_coal_frames[queue]) < tx_packets)
4647 		set_ic = true;
4648 	else
4649 		set_ic = false;
4650 
4651 	if (set_ic) {
4652 		if (likely(priv->extend_desc))
4653 			desc = &tx_q->dma_etx[entry].basic;
4654 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4655 			desc = &tx_q->dma_entx[entry].basic;
4656 		else
4657 			desc = &tx_q->dma_tx[entry];
4658 
4659 		tx_q->tx_count_frames = 0;
4660 		stmmac_set_tx_ic(priv, desc);
4661 	}
4662 
4663 	/* We've used all descriptors we need for this skb, however,
4664 	 * advance cur_tx so that it references a fresh descriptor.
4665 	 * ndo_start_xmit will fill this descriptor the next time it's
4666 	 * called and stmmac_tx_clean may clean up to this descriptor.
4667 	 */
4668 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4669 	tx_q->cur_tx = entry;
4670 
4671 	if (netif_msg_pktdata(priv)) {
4672 		netdev_dbg(priv->dev,
4673 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4674 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4675 			   entry, first, nfrags);
4676 
4677 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4678 		print_pkt(skb->data, skb->len);
4679 	}
4680 
4681 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4682 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4683 			  __func__);
4684 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4685 	}
4686 
4687 	u64_stats_update_begin(&txq_stats->q_syncp);
4688 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4689 	if (set_ic)
4690 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4691 	u64_stats_update_end(&txq_stats->q_syncp);
4692 
4693 	if (priv->sarc_type)
4694 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4695 
4696 	skb_tx_timestamp(skb);
4697 
4698 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4699 	 * problems because all the descriptors are actually ready to be
4700 	 * passed to the DMA engine.
4701 	 */
4702 	if (likely(!is_jumbo)) {
4703 		bool last_segment = (nfrags == 0);
4704 
4705 		des = dma_map_single(priv->device, skb->data,
4706 				     nopaged_len, DMA_TO_DEVICE);
4707 		if (dma_mapping_error(priv->device, des))
4708 			goto dma_map_err;
4709 
4710 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4711 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4712 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4713 
4714 		stmmac_set_desc_addr(priv, first, des);
4715 
4716 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4717 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4718 
4719 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4720 			     priv->hwts_tx_en)) {
4721 			/* declare that device is doing timestamping */
4722 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4723 			stmmac_enable_tx_timestamp(priv, first);
4724 		}
4725 
4726 		/* Prepare the first descriptor setting the OWN bit too */
4727 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4728 				csum_insertion, priv->mode, 0, last_segment,
4729 				skb->len);
4730 	}
4731 
4732 	if (tx_q->tbs & STMMAC_TBS_EN) {
4733 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4734 
4735 		tbs_desc = &tx_q->dma_entx[first_entry];
4736 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4737 	}
4738 
4739 	stmmac_set_tx_owner(priv, first);
4740 
4741 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4742 
4743 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4744 
4745 	stmmac_flush_tx_descriptors(priv, queue);
4746 	stmmac_tx_timer_arm(priv, queue);
4747 
4748 	return NETDEV_TX_OK;
4749 
4750 dma_map_err:
4751 	netdev_err(priv->dev, "Tx DMA map failed\n");
4752 max_sdu_err:
4753 	dev_kfree_skb(skb);
4754 	priv->xstats.tx_dropped++;
4755 	return NETDEV_TX_OK;
4756 }
4757 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4758 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4759 {
4760 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4761 	__be16 vlan_proto = veth->h_vlan_proto;
4762 	u16 vlanid;
4763 
4764 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4765 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4766 	    (vlan_proto == htons(ETH_P_8021AD) &&
4767 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4768 		/* pop the vlan tag */
4769 		vlanid = ntohs(veth->h_vlan_TCI);
4770 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4771 		skb_pull(skb, VLAN_HLEN);
4772 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4773 	}
4774 }
4775 
4776 /**
4777  * stmmac_rx_refill - refill used skb preallocated buffers
4778  * @priv: driver private structure
4779  * @queue: RX queue index
4780  * Description : this is to reallocate the skb for the reception process
4781  * that is based on zero-copy.
4782  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4783 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4784 {
4785 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4786 	int dirty = stmmac_rx_dirty(priv, queue);
4787 	unsigned int entry = rx_q->dirty_rx;
4788 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4789 
4790 	if (priv->dma_cap.host_dma_width <= 32)
4791 		gfp |= GFP_DMA32;
4792 
4793 	while (dirty-- > 0) {
4794 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4795 		struct dma_desc *p;
4796 		bool use_rx_wd;
4797 
4798 		if (priv->extend_desc)
4799 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4800 		else
4801 			p = rx_q->dma_rx + entry;
4802 
4803 		if (!buf->page) {
4804 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4805 			if (!buf->page)
4806 				break;
4807 		}
4808 
4809 		if (priv->sph && !buf->sec_page) {
4810 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4811 			if (!buf->sec_page)
4812 				break;
4813 
4814 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4815 		}
4816 
4817 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4818 
4819 		stmmac_set_desc_addr(priv, p, buf->addr);
4820 		if (priv->sph)
4821 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4822 		else
4823 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4824 		stmmac_refill_desc3(priv, rx_q, p);
4825 
4826 		rx_q->rx_count_frames++;
4827 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4828 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4829 			rx_q->rx_count_frames = 0;
4830 
4831 		use_rx_wd = !priv->rx_coal_frames[queue];
4832 		use_rx_wd |= rx_q->rx_count_frames > 0;
4833 		if (!priv->use_riwt)
4834 			use_rx_wd = false;
4835 
4836 		dma_wmb();
4837 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4838 
4839 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4840 	}
4841 	rx_q->dirty_rx = entry;
4842 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4843 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4844 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4845 }
4846 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4847 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4848 				       struct dma_desc *p,
4849 				       int status, unsigned int len)
4850 {
4851 	unsigned int plen = 0, hlen = 0;
4852 	int coe = priv->hw->rx_csum;
4853 
4854 	/* Not first descriptor, buffer is always zero */
4855 	if (priv->sph && len)
4856 		return 0;
4857 
4858 	/* First descriptor, get split header length */
4859 	stmmac_get_rx_header_len(priv, p, &hlen);
4860 	if (priv->sph && hlen) {
4861 		priv->xstats.rx_split_hdr_pkt_n++;
4862 		return hlen;
4863 	}
4864 
4865 	/* First descriptor, not last descriptor and not split header */
4866 	if (status & rx_not_ls)
4867 		return priv->dma_conf.dma_buf_sz;
4868 
4869 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4870 
4871 	/* First descriptor and last descriptor and not split header */
4872 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4873 }
4874 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4875 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4876 				       struct dma_desc *p,
4877 				       int status, unsigned int len)
4878 {
4879 	int coe = priv->hw->rx_csum;
4880 	unsigned int plen = 0;
4881 
4882 	/* Not split header, buffer is not available */
4883 	if (!priv->sph)
4884 		return 0;
4885 
4886 	/* Not last descriptor */
4887 	if (status & rx_not_ls)
4888 		return priv->dma_conf.dma_buf_sz;
4889 
4890 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4891 
4892 	/* Last descriptor */
4893 	return plen - len;
4894 }
4895 
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4896 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4897 				struct xdp_frame *xdpf, bool dma_map)
4898 {
4899 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4900 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4901 	unsigned int entry = tx_q->cur_tx;
4902 	struct dma_desc *tx_desc;
4903 	dma_addr_t dma_addr;
4904 	bool set_ic;
4905 
4906 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4907 		return STMMAC_XDP_CONSUMED;
4908 
4909 	if (priv->est && priv->est->enable &&
4910 	    priv->est->max_sdu[queue] &&
4911 	    xdpf->len > priv->est->max_sdu[queue]) {
4912 		priv->xstats.max_sdu_txq_drop[queue]++;
4913 		return STMMAC_XDP_CONSUMED;
4914 	}
4915 
4916 	if (likely(priv->extend_desc))
4917 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4918 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4919 		tx_desc = &tx_q->dma_entx[entry].basic;
4920 	else
4921 		tx_desc = tx_q->dma_tx + entry;
4922 
4923 	if (dma_map) {
4924 		dma_addr = dma_map_single(priv->device, xdpf->data,
4925 					  xdpf->len, DMA_TO_DEVICE);
4926 		if (dma_mapping_error(priv->device, dma_addr))
4927 			return STMMAC_XDP_CONSUMED;
4928 
4929 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4930 	} else {
4931 		struct page *page = virt_to_page(xdpf->data);
4932 
4933 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4934 			   xdpf->headroom;
4935 		dma_sync_single_for_device(priv->device, dma_addr,
4936 					   xdpf->len, DMA_BIDIRECTIONAL);
4937 
4938 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4939 	}
4940 
4941 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4942 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4943 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4944 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4945 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4946 
4947 	tx_q->xdpf[entry] = xdpf;
4948 
4949 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4950 
4951 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4952 			       true, priv->mode, true, true,
4953 			       xdpf->len);
4954 
4955 	tx_q->tx_count_frames++;
4956 
4957 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4958 		set_ic = true;
4959 	else
4960 		set_ic = false;
4961 
4962 	if (set_ic) {
4963 		tx_q->tx_count_frames = 0;
4964 		stmmac_set_tx_ic(priv, tx_desc);
4965 		u64_stats_update_begin(&txq_stats->q_syncp);
4966 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4967 		u64_stats_update_end(&txq_stats->q_syncp);
4968 	}
4969 
4970 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4971 
4972 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4973 	tx_q->cur_tx = entry;
4974 
4975 	return STMMAC_XDP_TX;
4976 }
4977 
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4978 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4979 				   int cpu)
4980 {
4981 	int index = cpu;
4982 
4983 	if (unlikely(index < 0))
4984 		index = 0;
4985 
4986 	while (index >= priv->plat->tx_queues_to_use)
4987 		index -= priv->plat->tx_queues_to_use;
4988 
4989 	return index;
4990 }
4991 
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4992 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4993 				struct xdp_buff *xdp)
4994 {
4995 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4996 	int cpu = smp_processor_id();
4997 	struct netdev_queue *nq;
4998 	int queue;
4999 	int res;
5000 
5001 	if (unlikely(!xdpf))
5002 		return STMMAC_XDP_CONSUMED;
5003 
5004 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5005 	nq = netdev_get_tx_queue(priv->dev, queue);
5006 
5007 	__netif_tx_lock(nq, cpu);
5008 	/* Avoids TX time-out as we are sharing with slow path */
5009 	txq_trans_cond_update(nq);
5010 
5011 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5012 	if (res == STMMAC_XDP_TX)
5013 		stmmac_flush_tx_descriptors(priv, queue);
5014 
5015 	__netif_tx_unlock(nq);
5016 
5017 	return res;
5018 }
5019 
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5020 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5021 				 struct bpf_prog *prog,
5022 				 struct xdp_buff *xdp)
5023 {
5024 	u32 act;
5025 	int res;
5026 
5027 	act = bpf_prog_run_xdp(prog, xdp);
5028 	switch (act) {
5029 	case XDP_PASS:
5030 		res = STMMAC_XDP_PASS;
5031 		break;
5032 	case XDP_TX:
5033 		res = stmmac_xdp_xmit_back(priv, xdp);
5034 		break;
5035 	case XDP_REDIRECT:
5036 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5037 			res = STMMAC_XDP_CONSUMED;
5038 		else
5039 			res = STMMAC_XDP_REDIRECT;
5040 		break;
5041 	default:
5042 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5043 		fallthrough;
5044 	case XDP_ABORTED:
5045 		trace_xdp_exception(priv->dev, prog, act);
5046 		fallthrough;
5047 	case XDP_DROP:
5048 		res = STMMAC_XDP_CONSUMED;
5049 		break;
5050 	}
5051 
5052 	return res;
5053 }
5054 
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5055 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5056 					   struct xdp_buff *xdp)
5057 {
5058 	struct bpf_prog *prog;
5059 	int res;
5060 
5061 	prog = READ_ONCE(priv->xdp_prog);
5062 	if (!prog) {
5063 		res = STMMAC_XDP_PASS;
5064 		goto out;
5065 	}
5066 
5067 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5068 out:
5069 	return ERR_PTR(-res);
5070 }
5071 
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5072 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5073 				   int xdp_status)
5074 {
5075 	int cpu = smp_processor_id();
5076 	int queue;
5077 
5078 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5079 
5080 	if (xdp_status & STMMAC_XDP_TX)
5081 		stmmac_tx_timer_arm(priv, queue);
5082 
5083 	if (xdp_status & STMMAC_XDP_REDIRECT)
5084 		xdp_do_flush();
5085 }
5086 
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5087 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5088 					       struct xdp_buff *xdp)
5089 {
5090 	unsigned int metasize = xdp->data - xdp->data_meta;
5091 	unsigned int datasize = xdp->data_end - xdp->data;
5092 	struct sk_buff *skb;
5093 
5094 	skb = napi_alloc_skb(&ch->rxtx_napi,
5095 			     xdp->data_end - xdp->data_hard_start);
5096 	if (unlikely(!skb))
5097 		return NULL;
5098 
5099 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5100 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5101 	if (metasize)
5102 		skb_metadata_set(skb, metasize);
5103 
5104 	return skb;
5105 }
5106 
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5107 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5108 				   struct dma_desc *p, struct dma_desc *np,
5109 				   struct xdp_buff *xdp)
5110 {
5111 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5112 	struct stmmac_channel *ch = &priv->channel[queue];
5113 	unsigned int len = xdp->data_end - xdp->data;
5114 	enum pkt_hash_types hash_type;
5115 	int coe = priv->hw->rx_csum;
5116 	struct sk_buff *skb;
5117 	u32 hash;
5118 
5119 	skb = stmmac_construct_skb_zc(ch, xdp);
5120 	if (!skb) {
5121 		priv->xstats.rx_dropped++;
5122 		return;
5123 	}
5124 
5125 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5126 	if (priv->hw->hw_vlan_en)
5127 		/* MAC level stripping. */
5128 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5129 	else
5130 		/* Driver level stripping. */
5131 		stmmac_rx_vlan(priv->dev, skb);
5132 	skb->protocol = eth_type_trans(skb, priv->dev);
5133 
5134 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5135 		skb_checksum_none_assert(skb);
5136 	else
5137 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5138 
5139 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5140 		skb_set_hash(skb, hash, hash_type);
5141 
5142 	skb_record_rx_queue(skb, queue);
5143 	napi_gro_receive(&ch->rxtx_napi, skb);
5144 
5145 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5146 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5147 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5148 	u64_stats_update_end(&rxq_stats->napi_syncp);
5149 }
5150 
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5151 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5152 {
5153 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5154 	unsigned int entry = rx_q->dirty_rx;
5155 	struct dma_desc *rx_desc = NULL;
5156 	bool ret = true;
5157 
5158 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5159 
5160 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5161 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5162 		dma_addr_t dma_addr;
5163 		bool use_rx_wd;
5164 
5165 		if (!buf->xdp) {
5166 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5167 			if (!buf->xdp) {
5168 				ret = false;
5169 				break;
5170 			}
5171 		}
5172 
5173 		if (priv->extend_desc)
5174 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5175 		else
5176 			rx_desc = rx_q->dma_rx + entry;
5177 
5178 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5179 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5180 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5181 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5182 
5183 		rx_q->rx_count_frames++;
5184 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5185 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5186 			rx_q->rx_count_frames = 0;
5187 
5188 		use_rx_wd = !priv->rx_coal_frames[queue];
5189 		use_rx_wd |= rx_q->rx_count_frames > 0;
5190 		if (!priv->use_riwt)
5191 			use_rx_wd = false;
5192 
5193 		dma_wmb();
5194 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5195 
5196 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5197 	}
5198 
5199 	if (rx_desc) {
5200 		rx_q->dirty_rx = entry;
5201 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5202 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5203 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5204 	}
5205 
5206 	return ret;
5207 }
5208 
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5209 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5210 {
5211 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5212 	 * to represent incoming packet, whereas cb field in the same structure
5213 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5214 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5215 	 */
5216 	return (struct stmmac_xdp_buff *)xdp;
5217 }
5218 
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5219 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5220 {
5221 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5222 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5223 	unsigned int count = 0, error = 0, len = 0;
5224 	int dirty = stmmac_rx_dirty(priv, queue);
5225 	unsigned int next_entry = rx_q->cur_rx;
5226 	u32 rx_errors = 0, rx_dropped = 0;
5227 	unsigned int desc_size;
5228 	struct bpf_prog *prog;
5229 	bool failure = false;
5230 	int xdp_status = 0;
5231 	int status = 0;
5232 
5233 	if (netif_msg_rx_status(priv)) {
5234 		void *rx_head;
5235 
5236 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5237 		if (priv->extend_desc) {
5238 			rx_head = (void *)rx_q->dma_erx;
5239 			desc_size = sizeof(struct dma_extended_desc);
5240 		} else {
5241 			rx_head = (void *)rx_q->dma_rx;
5242 			desc_size = sizeof(struct dma_desc);
5243 		}
5244 
5245 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5246 				    rx_q->dma_rx_phy, desc_size);
5247 	}
5248 	while (count < limit) {
5249 		struct stmmac_rx_buffer *buf;
5250 		struct stmmac_xdp_buff *ctx;
5251 		unsigned int buf1_len = 0;
5252 		struct dma_desc *np, *p;
5253 		int entry;
5254 		int res;
5255 
5256 		if (!count && rx_q->state_saved) {
5257 			error = rx_q->state.error;
5258 			len = rx_q->state.len;
5259 		} else {
5260 			rx_q->state_saved = false;
5261 			error = 0;
5262 			len = 0;
5263 		}
5264 
5265 		if (count >= limit)
5266 			break;
5267 
5268 read_again:
5269 		buf1_len = 0;
5270 		entry = next_entry;
5271 		buf = &rx_q->buf_pool[entry];
5272 
5273 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5274 			failure = failure ||
5275 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5276 			dirty = 0;
5277 		}
5278 
5279 		if (priv->extend_desc)
5280 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5281 		else
5282 			p = rx_q->dma_rx + entry;
5283 
5284 		/* read the status of the incoming frame */
5285 		status = stmmac_rx_status(priv, &priv->xstats, p);
5286 		/* check if managed by the DMA otherwise go ahead */
5287 		if (unlikely(status & dma_own))
5288 			break;
5289 
5290 		/* Prefetch the next RX descriptor */
5291 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5292 						priv->dma_conf.dma_rx_size);
5293 		next_entry = rx_q->cur_rx;
5294 
5295 		if (priv->extend_desc)
5296 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5297 		else
5298 			np = rx_q->dma_rx + next_entry;
5299 
5300 		prefetch(np);
5301 
5302 		/* Ensure a valid XSK buffer before proceed */
5303 		if (!buf->xdp)
5304 			break;
5305 
5306 		if (priv->extend_desc)
5307 			stmmac_rx_extended_status(priv, &priv->xstats,
5308 						  rx_q->dma_erx + entry);
5309 		if (unlikely(status == discard_frame)) {
5310 			xsk_buff_free(buf->xdp);
5311 			buf->xdp = NULL;
5312 			dirty++;
5313 			error = 1;
5314 			if (!priv->hwts_rx_en)
5315 				rx_errors++;
5316 		}
5317 
5318 		if (unlikely(error && (status & rx_not_ls)))
5319 			goto read_again;
5320 		if (unlikely(error)) {
5321 			count++;
5322 			continue;
5323 		}
5324 
5325 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5326 		if (likely(status & rx_not_ls)) {
5327 			xsk_buff_free(buf->xdp);
5328 			buf->xdp = NULL;
5329 			dirty++;
5330 			count++;
5331 			goto read_again;
5332 		}
5333 
5334 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5335 		ctx->priv = priv;
5336 		ctx->desc = p;
5337 		ctx->ndesc = np;
5338 
5339 		/* XDP ZC Frame only support primary buffers for now */
5340 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5341 		len += buf1_len;
5342 
5343 		/* ACS is disabled; strip manually. */
5344 		if (likely(!(status & rx_not_ls))) {
5345 			buf1_len -= ETH_FCS_LEN;
5346 			len -= ETH_FCS_LEN;
5347 		}
5348 
5349 		/* RX buffer is good and fit into a XSK pool buffer */
5350 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5351 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5352 
5353 		prog = READ_ONCE(priv->xdp_prog);
5354 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5355 
5356 		switch (res) {
5357 		case STMMAC_XDP_PASS:
5358 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5359 			xsk_buff_free(buf->xdp);
5360 			break;
5361 		case STMMAC_XDP_CONSUMED:
5362 			xsk_buff_free(buf->xdp);
5363 			rx_dropped++;
5364 			break;
5365 		case STMMAC_XDP_TX:
5366 		case STMMAC_XDP_REDIRECT:
5367 			xdp_status |= res;
5368 			break;
5369 		}
5370 
5371 		buf->xdp = NULL;
5372 		dirty++;
5373 		count++;
5374 	}
5375 
5376 	if (status & rx_not_ls) {
5377 		rx_q->state_saved = true;
5378 		rx_q->state.error = error;
5379 		rx_q->state.len = len;
5380 	}
5381 
5382 	stmmac_finalize_xdp_rx(priv, xdp_status);
5383 
5384 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5385 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5386 	u64_stats_update_end(&rxq_stats->napi_syncp);
5387 
5388 	priv->xstats.rx_dropped += rx_dropped;
5389 	priv->xstats.rx_errors += rx_errors;
5390 
5391 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5392 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5393 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5394 		else
5395 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5396 
5397 		return (int)count;
5398 	}
5399 
5400 	return failure ? limit : (int)count;
5401 }
5402 
5403 /**
5404  * stmmac_rx - manage the receive process
5405  * @priv: driver private structure
5406  * @limit: napi bugget
5407  * @queue: RX queue index.
5408  * Description :  this the function called by the napi poll method.
5409  * It gets all the frames inside the ring.
5410  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5411 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5412 {
5413 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5414 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5415 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5416 	struct stmmac_channel *ch = &priv->channel[queue];
5417 	unsigned int count = 0, error = 0, len = 0;
5418 	int status = 0, coe = priv->hw->rx_csum;
5419 	unsigned int next_entry = rx_q->cur_rx;
5420 	enum dma_data_direction dma_dir;
5421 	unsigned int desc_size;
5422 	struct sk_buff *skb = NULL;
5423 	struct stmmac_xdp_buff ctx;
5424 	int xdp_status = 0;
5425 	int buf_sz;
5426 
5427 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5428 	buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5429 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5430 
5431 	if (netif_msg_rx_status(priv)) {
5432 		void *rx_head;
5433 
5434 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5435 		if (priv->extend_desc) {
5436 			rx_head = (void *)rx_q->dma_erx;
5437 			desc_size = sizeof(struct dma_extended_desc);
5438 		} else {
5439 			rx_head = (void *)rx_q->dma_rx;
5440 			desc_size = sizeof(struct dma_desc);
5441 		}
5442 
5443 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5444 				    rx_q->dma_rx_phy, desc_size);
5445 	}
5446 	while (count < limit) {
5447 		unsigned int buf1_len = 0, buf2_len = 0;
5448 		enum pkt_hash_types hash_type;
5449 		struct stmmac_rx_buffer *buf;
5450 		struct dma_desc *np, *p;
5451 		int entry;
5452 		u32 hash;
5453 
5454 		if (!count && rx_q->state_saved) {
5455 			skb = rx_q->state.skb;
5456 			error = rx_q->state.error;
5457 			len = rx_q->state.len;
5458 		} else {
5459 			rx_q->state_saved = false;
5460 			skb = NULL;
5461 			error = 0;
5462 			len = 0;
5463 		}
5464 
5465 read_again:
5466 		if (count >= limit)
5467 			break;
5468 
5469 		buf1_len = 0;
5470 		buf2_len = 0;
5471 		entry = next_entry;
5472 		buf = &rx_q->buf_pool[entry];
5473 
5474 		if (priv->extend_desc)
5475 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5476 		else
5477 			p = rx_q->dma_rx + entry;
5478 
5479 		/* read the status of the incoming frame */
5480 		status = stmmac_rx_status(priv, &priv->xstats, p);
5481 		/* check if managed by the DMA otherwise go ahead */
5482 		if (unlikely(status & dma_own))
5483 			break;
5484 
5485 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5486 						priv->dma_conf.dma_rx_size);
5487 		next_entry = rx_q->cur_rx;
5488 
5489 		if (priv->extend_desc)
5490 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5491 		else
5492 			np = rx_q->dma_rx + next_entry;
5493 
5494 		prefetch(np);
5495 
5496 		if (priv->extend_desc)
5497 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5498 		if (unlikely(status == discard_frame)) {
5499 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5500 			buf->page = NULL;
5501 			error = 1;
5502 			if (!priv->hwts_rx_en)
5503 				rx_errors++;
5504 		}
5505 
5506 		if (unlikely(error && (status & rx_not_ls)))
5507 			goto read_again;
5508 		if (unlikely(error)) {
5509 			dev_kfree_skb(skb);
5510 			skb = NULL;
5511 			count++;
5512 			continue;
5513 		}
5514 
5515 		/* Buffer is good. Go on. */
5516 
5517 		prefetch(page_address(buf->page) + buf->page_offset);
5518 		if (buf->sec_page)
5519 			prefetch(page_address(buf->sec_page));
5520 
5521 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5522 		len += buf1_len;
5523 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5524 		len += buf2_len;
5525 
5526 		/* ACS is disabled; strip manually. */
5527 		if (likely(!(status & rx_not_ls))) {
5528 			if (buf2_len) {
5529 				buf2_len -= ETH_FCS_LEN;
5530 				len -= ETH_FCS_LEN;
5531 			} else if (buf1_len) {
5532 				buf1_len -= ETH_FCS_LEN;
5533 				len -= ETH_FCS_LEN;
5534 			}
5535 		}
5536 
5537 		if (!skb) {
5538 			unsigned int pre_len, sync_len;
5539 
5540 			dma_sync_single_for_cpu(priv->device, buf->addr,
5541 						buf1_len, dma_dir);
5542 
5543 			xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5544 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5545 					 buf->page_offset, buf1_len, true);
5546 
5547 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5548 				  buf->page_offset;
5549 
5550 			ctx.priv = priv;
5551 			ctx.desc = p;
5552 			ctx.ndesc = np;
5553 
5554 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5555 			/* Due xdp_adjust_tail: DMA sync for_device
5556 			 * cover max len CPU touch
5557 			 */
5558 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5559 				   buf->page_offset;
5560 			sync_len = max(sync_len, pre_len);
5561 
5562 			/* For Not XDP_PASS verdict */
5563 			if (IS_ERR(skb)) {
5564 				unsigned int xdp_res = -PTR_ERR(skb);
5565 
5566 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5567 					page_pool_put_page(rx_q->page_pool,
5568 							   virt_to_head_page(ctx.xdp.data),
5569 							   sync_len, true);
5570 					buf->page = NULL;
5571 					rx_dropped++;
5572 
5573 					/* Clear skb as it was set as
5574 					 * status by XDP program.
5575 					 */
5576 					skb = NULL;
5577 
5578 					if (unlikely((status & rx_not_ls)))
5579 						goto read_again;
5580 
5581 					count++;
5582 					continue;
5583 				} else if (xdp_res & (STMMAC_XDP_TX |
5584 						      STMMAC_XDP_REDIRECT)) {
5585 					xdp_status |= xdp_res;
5586 					buf->page = NULL;
5587 					skb = NULL;
5588 					count++;
5589 					continue;
5590 				}
5591 			}
5592 		}
5593 
5594 		if (!skb) {
5595 			/* XDP program may expand or reduce tail */
5596 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5597 
5598 			skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5599 			if (!skb) {
5600 				rx_dropped++;
5601 				count++;
5602 				goto drain_data;
5603 			}
5604 
5605 			/* XDP program may adjust header */
5606 			skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5607 			skb_put(skb, buf1_len);
5608 
5609 			/* Data payload copied into SKB, page ready for recycle */
5610 			page_pool_recycle_direct(rx_q->page_pool, buf->page);
5611 			buf->page = NULL;
5612 		} else if (buf1_len) {
5613 			dma_sync_single_for_cpu(priv->device, buf->addr,
5614 						buf1_len, dma_dir);
5615 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5616 					buf->page, buf->page_offset, buf1_len,
5617 					priv->dma_conf.dma_buf_sz);
5618 
5619 			/* Data payload appended into SKB */
5620 			skb_mark_for_recycle(skb);
5621 			buf->page = NULL;
5622 		}
5623 
5624 		if (buf2_len) {
5625 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5626 						buf2_len, dma_dir);
5627 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5628 					buf->sec_page, 0, buf2_len,
5629 					priv->dma_conf.dma_buf_sz);
5630 
5631 			/* Data payload appended into SKB */
5632 			skb_mark_for_recycle(skb);
5633 			buf->sec_page = NULL;
5634 		}
5635 
5636 drain_data:
5637 		if (likely(status & rx_not_ls))
5638 			goto read_again;
5639 		if (!skb)
5640 			continue;
5641 
5642 		/* Got entire packet into SKB. Finish it. */
5643 
5644 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5645 
5646 		if (priv->hw->hw_vlan_en)
5647 			/* MAC level stripping. */
5648 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5649 		else
5650 			/* Driver level stripping. */
5651 			stmmac_rx_vlan(priv->dev, skb);
5652 
5653 		skb->protocol = eth_type_trans(skb, priv->dev);
5654 
5655 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5656 			skb_checksum_none_assert(skb);
5657 		else
5658 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5659 
5660 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5661 			skb_set_hash(skb, hash, hash_type);
5662 
5663 		skb_record_rx_queue(skb, queue);
5664 		napi_gro_receive(&ch->rx_napi, skb);
5665 		skb = NULL;
5666 
5667 		rx_packets++;
5668 		rx_bytes += len;
5669 		count++;
5670 	}
5671 
5672 	if (status & rx_not_ls || skb) {
5673 		rx_q->state_saved = true;
5674 		rx_q->state.skb = skb;
5675 		rx_q->state.error = error;
5676 		rx_q->state.len = len;
5677 	}
5678 
5679 	stmmac_finalize_xdp_rx(priv, xdp_status);
5680 
5681 	stmmac_rx_refill(priv, queue);
5682 
5683 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5684 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5685 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5686 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5687 	u64_stats_update_end(&rxq_stats->napi_syncp);
5688 
5689 	priv->xstats.rx_dropped += rx_dropped;
5690 	priv->xstats.rx_errors += rx_errors;
5691 
5692 	return count;
5693 }
5694 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5695 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5696 {
5697 	struct stmmac_channel *ch =
5698 		container_of(napi, struct stmmac_channel, rx_napi);
5699 	struct stmmac_priv *priv = ch->priv_data;
5700 	struct stmmac_rxq_stats *rxq_stats;
5701 	u32 chan = ch->index;
5702 	int work_done;
5703 
5704 	rxq_stats = &priv->xstats.rxq_stats[chan];
5705 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5706 	u64_stats_inc(&rxq_stats->napi.poll);
5707 	u64_stats_update_end(&rxq_stats->napi_syncp);
5708 
5709 	work_done = stmmac_rx(priv, budget, chan);
5710 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5711 		unsigned long flags;
5712 
5713 		spin_lock_irqsave(&ch->lock, flags);
5714 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5715 		spin_unlock_irqrestore(&ch->lock, flags);
5716 	}
5717 
5718 	return work_done;
5719 }
5720 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5721 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5722 {
5723 	struct stmmac_channel *ch =
5724 		container_of(napi, struct stmmac_channel, tx_napi);
5725 	struct stmmac_priv *priv = ch->priv_data;
5726 	struct stmmac_txq_stats *txq_stats;
5727 	bool pending_packets = false;
5728 	u32 chan = ch->index;
5729 	int work_done;
5730 
5731 	txq_stats = &priv->xstats.txq_stats[chan];
5732 	u64_stats_update_begin(&txq_stats->napi_syncp);
5733 	u64_stats_inc(&txq_stats->napi.poll);
5734 	u64_stats_update_end(&txq_stats->napi_syncp);
5735 
5736 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5737 	work_done = min(work_done, budget);
5738 
5739 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5740 		unsigned long flags;
5741 
5742 		spin_lock_irqsave(&ch->lock, flags);
5743 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5744 		spin_unlock_irqrestore(&ch->lock, flags);
5745 	}
5746 
5747 	/* TX still have packet to handle, check if we need to arm tx timer */
5748 	if (pending_packets)
5749 		stmmac_tx_timer_arm(priv, chan);
5750 
5751 	return work_done;
5752 }
5753 
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5754 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5755 {
5756 	struct stmmac_channel *ch =
5757 		container_of(napi, struct stmmac_channel, rxtx_napi);
5758 	struct stmmac_priv *priv = ch->priv_data;
5759 	bool tx_pending_packets = false;
5760 	int rx_done, tx_done, rxtx_done;
5761 	struct stmmac_rxq_stats *rxq_stats;
5762 	struct stmmac_txq_stats *txq_stats;
5763 	u32 chan = ch->index;
5764 
5765 	rxq_stats = &priv->xstats.rxq_stats[chan];
5766 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5767 	u64_stats_inc(&rxq_stats->napi.poll);
5768 	u64_stats_update_end(&rxq_stats->napi_syncp);
5769 
5770 	txq_stats = &priv->xstats.txq_stats[chan];
5771 	u64_stats_update_begin(&txq_stats->napi_syncp);
5772 	u64_stats_inc(&txq_stats->napi.poll);
5773 	u64_stats_update_end(&txq_stats->napi_syncp);
5774 
5775 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5776 	tx_done = min(tx_done, budget);
5777 
5778 	rx_done = stmmac_rx_zc(priv, budget, chan);
5779 
5780 	rxtx_done = max(tx_done, rx_done);
5781 
5782 	/* If either TX or RX work is not complete, return budget
5783 	 * and keep pooling
5784 	 */
5785 	if (rxtx_done >= budget)
5786 		return budget;
5787 
5788 	/* all work done, exit the polling mode */
5789 	if (napi_complete_done(napi, rxtx_done)) {
5790 		unsigned long flags;
5791 
5792 		spin_lock_irqsave(&ch->lock, flags);
5793 		/* Both RX and TX work done are compelte,
5794 		 * so enable both RX & TX IRQs.
5795 		 */
5796 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5797 		spin_unlock_irqrestore(&ch->lock, flags);
5798 	}
5799 
5800 	/* TX still have packet to handle, check if we need to arm tx timer */
5801 	if (tx_pending_packets)
5802 		stmmac_tx_timer_arm(priv, chan);
5803 
5804 	return min(rxtx_done, budget - 1);
5805 }
5806 
5807 /**
5808  *  stmmac_tx_timeout
5809  *  @dev : Pointer to net device structure
5810  *  @txqueue: the index of the hanging transmit queue
5811  *  Description: this function is called when a packet transmission fails to
5812  *   complete within a reasonable time. The driver will mark the error in the
5813  *   netdev structure and arrange for the device to be reset to a sane state
5814  *   in order to transmit a new packet.
5815  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5816 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5817 {
5818 	struct stmmac_priv *priv = netdev_priv(dev);
5819 
5820 	stmmac_global_err(priv);
5821 }
5822 
5823 /**
5824  *  stmmac_set_rx_mode - entry point for multicast addressing
5825  *  @dev : pointer to the device structure
5826  *  Description:
5827  *  This function is a driver entry point which gets called by the kernel
5828  *  whenever multicast addresses must be enabled/disabled.
5829  *  Return value:
5830  *  void.
5831  */
stmmac_set_rx_mode(struct net_device * dev)5832 static void stmmac_set_rx_mode(struct net_device *dev)
5833 {
5834 	struct stmmac_priv *priv = netdev_priv(dev);
5835 
5836 	stmmac_set_filter(priv, priv->hw, dev);
5837 }
5838 
5839 /**
5840  *  stmmac_change_mtu - entry point to change MTU size for the device.
5841  *  @dev : device pointer.
5842  *  @new_mtu : the new MTU size for the device.
5843  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5844  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5845  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5846  *  Return value:
5847  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5848  *  file on failure.
5849  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5850 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5851 {
5852 	struct stmmac_priv *priv = netdev_priv(dev);
5853 	int txfifosz = priv->plat->tx_fifo_size;
5854 	struct stmmac_dma_conf *dma_conf;
5855 	const int mtu = new_mtu;
5856 	int ret;
5857 
5858 	if (txfifosz == 0)
5859 		txfifosz = priv->dma_cap.tx_fifo_size;
5860 
5861 	txfifosz /= priv->plat->tx_queues_to_use;
5862 
5863 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5864 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5865 		return -EINVAL;
5866 	}
5867 
5868 	new_mtu = STMMAC_ALIGN(new_mtu);
5869 
5870 	/* If condition true, FIFO is too small or MTU too large */
5871 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5872 		return -EINVAL;
5873 
5874 	if (netif_running(dev)) {
5875 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5876 		/* Try to allocate the new DMA conf with the new mtu */
5877 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5878 		if (IS_ERR(dma_conf)) {
5879 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5880 				   mtu);
5881 			return PTR_ERR(dma_conf);
5882 		}
5883 
5884 		stmmac_release(dev);
5885 
5886 		ret = __stmmac_open(dev, dma_conf);
5887 		if (ret) {
5888 			free_dma_desc_resources(priv, dma_conf);
5889 			kfree(dma_conf);
5890 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5891 			return ret;
5892 		}
5893 
5894 		kfree(dma_conf);
5895 
5896 		stmmac_set_rx_mode(dev);
5897 	}
5898 
5899 	WRITE_ONCE(dev->mtu, mtu);
5900 	netdev_update_features(dev);
5901 
5902 	return 0;
5903 }
5904 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5905 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5906 					     netdev_features_t features)
5907 {
5908 	struct stmmac_priv *priv = netdev_priv(dev);
5909 
5910 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5911 		features &= ~NETIF_F_RXCSUM;
5912 
5913 	if (!priv->plat->tx_coe)
5914 		features &= ~NETIF_F_CSUM_MASK;
5915 
5916 	/* Some GMAC devices have a bugged Jumbo frame support that
5917 	 * needs to have the Tx COE disabled for oversized frames
5918 	 * (due to limited buffer sizes). In this case we disable
5919 	 * the TX csum insertion in the TDES and not use SF.
5920 	 */
5921 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5922 		features &= ~NETIF_F_CSUM_MASK;
5923 
5924 	/* Disable tso if asked by ethtool */
5925 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5926 		if (features & NETIF_F_TSO)
5927 			priv->tso = true;
5928 		else
5929 			priv->tso = false;
5930 	}
5931 
5932 	return features;
5933 }
5934 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5935 static int stmmac_set_features(struct net_device *netdev,
5936 			       netdev_features_t features)
5937 {
5938 	struct stmmac_priv *priv = netdev_priv(netdev);
5939 
5940 	/* Keep the COE Type in case of csum is supporting */
5941 	if (features & NETIF_F_RXCSUM)
5942 		priv->hw->rx_csum = priv->plat->rx_coe;
5943 	else
5944 		priv->hw->rx_csum = 0;
5945 	/* No check needed because rx_coe has been set before and it will be
5946 	 * fixed in case of issue.
5947 	 */
5948 	stmmac_rx_ipc(priv, priv->hw);
5949 
5950 	if (priv->sph_cap) {
5951 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5952 		u32 chan;
5953 
5954 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5955 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5956 	}
5957 
5958 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5959 		priv->hw->hw_vlan_en = true;
5960 	else
5961 		priv->hw->hw_vlan_en = false;
5962 
5963 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5964 
5965 	return 0;
5966 }
5967 
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5968 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5969 {
5970 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
5971 
5972 	/* This is interrupt context, just spin_lock() */
5973 	spin_lock(&fpe_cfg->lock);
5974 
5975 	if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN)
5976 		goto unlock_out;
5977 
5978 	/* LP has sent verify mPacket */
5979 	if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER)
5980 		stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
5981 					MPACKET_RESPONSE);
5982 
5983 	/* Local has sent verify mPacket */
5984 	if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER &&
5985 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED)
5986 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
5987 
5988 	/* LP has sent response mPacket */
5989 	if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP &&
5990 	    fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING)
5991 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
5992 
5993 unlock_out:
5994 	spin_unlock(&fpe_cfg->lock);
5995 }
5996 
stmmac_common_interrupt(struct stmmac_priv * priv)5997 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5998 {
5999 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6000 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6001 	u32 queues_count;
6002 	u32 queue;
6003 	bool xmac;
6004 
6005 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6006 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6007 
6008 	if (priv->irq_wake)
6009 		pm_wakeup_event(priv->device, 0);
6010 
6011 	if (priv->dma_cap.estsel)
6012 		stmmac_est_irq_status(priv, priv, priv->dev,
6013 				      &priv->xstats, tx_cnt);
6014 
6015 	if (priv->dma_cap.fpesel) {
6016 		int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
6017 						   priv->dev);
6018 
6019 		stmmac_fpe_event_status(priv, status);
6020 	}
6021 
6022 	/* To handle GMAC own interrupts */
6023 	if ((priv->plat->has_gmac) || xmac) {
6024 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6025 
6026 		if (unlikely(status)) {
6027 			/* For LPI we need to save the tx status */
6028 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6029 				priv->tx_path_in_lpi_mode = true;
6030 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6031 				priv->tx_path_in_lpi_mode = false;
6032 		}
6033 
6034 		for (queue = 0; queue < queues_count; queue++)
6035 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6036 
6037 		/* PCS link status */
6038 		if (priv->hw->pcs &&
6039 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6040 			if (priv->xstats.pcs_link)
6041 				netif_carrier_on(priv->dev);
6042 			else
6043 				netif_carrier_off(priv->dev);
6044 		}
6045 
6046 		stmmac_timestamp_interrupt(priv, priv);
6047 	}
6048 }
6049 
6050 /**
6051  *  stmmac_interrupt - main ISR
6052  *  @irq: interrupt number.
6053  *  @dev_id: to pass the net device pointer.
6054  *  Description: this is the main driver interrupt service routine.
6055  *  It can call:
6056  *  o DMA service routine (to manage incoming frame reception and transmission
6057  *    status)
6058  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6059  *    interrupts.
6060  */
stmmac_interrupt(int irq,void * dev_id)6061 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6062 {
6063 	struct net_device *dev = (struct net_device *)dev_id;
6064 	struct stmmac_priv *priv = netdev_priv(dev);
6065 
6066 	/* Check if adapter is up */
6067 	if (test_bit(STMMAC_DOWN, &priv->state))
6068 		return IRQ_HANDLED;
6069 
6070 	/* Check ASP error if it isn't delivered via an individual IRQ */
6071 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6072 		return IRQ_HANDLED;
6073 
6074 	/* To handle Common interrupts */
6075 	stmmac_common_interrupt(priv);
6076 
6077 	/* To handle DMA interrupts */
6078 	stmmac_dma_interrupt(priv);
6079 
6080 	return IRQ_HANDLED;
6081 }
6082 
stmmac_mac_interrupt(int irq,void * dev_id)6083 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6084 {
6085 	struct net_device *dev = (struct net_device *)dev_id;
6086 	struct stmmac_priv *priv = netdev_priv(dev);
6087 
6088 	/* Check if adapter is up */
6089 	if (test_bit(STMMAC_DOWN, &priv->state))
6090 		return IRQ_HANDLED;
6091 
6092 	/* To handle Common interrupts */
6093 	stmmac_common_interrupt(priv);
6094 
6095 	return IRQ_HANDLED;
6096 }
6097 
stmmac_safety_interrupt(int irq,void * dev_id)6098 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6099 {
6100 	struct net_device *dev = (struct net_device *)dev_id;
6101 	struct stmmac_priv *priv = netdev_priv(dev);
6102 
6103 	/* Check if adapter is up */
6104 	if (test_bit(STMMAC_DOWN, &priv->state))
6105 		return IRQ_HANDLED;
6106 
6107 	/* Check if a fatal error happened */
6108 	stmmac_safety_feat_interrupt(priv);
6109 
6110 	return IRQ_HANDLED;
6111 }
6112 
stmmac_msi_intr_tx(int irq,void * data)6113 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6114 {
6115 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6116 	struct stmmac_dma_conf *dma_conf;
6117 	int chan = tx_q->queue_index;
6118 	struct stmmac_priv *priv;
6119 	int status;
6120 
6121 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6122 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6123 
6124 	/* Check if adapter is up */
6125 	if (test_bit(STMMAC_DOWN, &priv->state))
6126 		return IRQ_HANDLED;
6127 
6128 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6129 
6130 	if (unlikely(status & tx_hard_error_bump_tc)) {
6131 		/* Try to bump up the dma threshold on this failure */
6132 		stmmac_bump_dma_threshold(priv, chan);
6133 	} else if (unlikely(status == tx_hard_error)) {
6134 		stmmac_tx_err(priv, chan);
6135 	}
6136 
6137 	return IRQ_HANDLED;
6138 }
6139 
stmmac_msi_intr_rx(int irq,void * data)6140 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6141 {
6142 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6143 	struct stmmac_dma_conf *dma_conf;
6144 	int chan = rx_q->queue_index;
6145 	struct stmmac_priv *priv;
6146 
6147 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6148 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6149 
6150 	/* Check if adapter is up */
6151 	if (test_bit(STMMAC_DOWN, &priv->state))
6152 		return IRQ_HANDLED;
6153 
6154 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6155 
6156 	return IRQ_HANDLED;
6157 }
6158 
6159 /**
6160  *  stmmac_ioctl - Entry point for the Ioctl
6161  *  @dev: Device pointer.
6162  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6163  *  a proprietary structure used to pass information to the driver.
6164  *  @cmd: IOCTL command
6165  *  Description:
6166  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6167  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6168 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6169 {
6170 	struct stmmac_priv *priv = netdev_priv (dev);
6171 	int ret = -EOPNOTSUPP;
6172 
6173 	if (!netif_running(dev))
6174 		return -EINVAL;
6175 
6176 	switch (cmd) {
6177 	case SIOCGMIIPHY:
6178 	case SIOCGMIIREG:
6179 	case SIOCSMIIREG:
6180 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6181 		break;
6182 	case SIOCSHWTSTAMP:
6183 		ret = stmmac_hwtstamp_set(dev, rq);
6184 		break;
6185 	case SIOCGHWTSTAMP:
6186 		ret = stmmac_hwtstamp_get(dev, rq);
6187 		break;
6188 	default:
6189 		break;
6190 	}
6191 
6192 	return ret;
6193 }
6194 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6195 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6196 				    void *cb_priv)
6197 {
6198 	struct stmmac_priv *priv = cb_priv;
6199 	int ret = -EOPNOTSUPP;
6200 
6201 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6202 		return ret;
6203 
6204 	__stmmac_disable_all_queues(priv);
6205 
6206 	switch (type) {
6207 	case TC_SETUP_CLSU32:
6208 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6209 		break;
6210 	case TC_SETUP_CLSFLOWER:
6211 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6212 		break;
6213 	default:
6214 		break;
6215 	}
6216 
6217 	stmmac_enable_all_queues(priv);
6218 	return ret;
6219 }
6220 
6221 static LIST_HEAD(stmmac_block_cb_list);
6222 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6223 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6224 			   void *type_data)
6225 {
6226 	struct stmmac_priv *priv = netdev_priv(ndev);
6227 
6228 	switch (type) {
6229 	case TC_QUERY_CAPS:
6230 		return stmmac_tc_query_caps(priv, priv, type_data);
6231 	case TC_SETUP_QDISC_MQPRIO:
6232 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6233 	case TC_SETUP_BLOCK:
6234 		return flow_block_cb_setup_simple(type_data,
6235 						  &stmmac_block_cb_list,
6236 						  stmmac_setup_tc_block_cb,
6237 						  priv, priv, true);
6238 	case TC_SETUP_QDISC_CBS:
6239 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6240 	case TC_SETUP_QDISC_TAPRIO:
6241 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6242 	case TC_SETUP_QDISC_ETF:
6243 		return stmmac_tc_setup_etf(priv, priv, type_data);
6244 	default:
6245 		return -EOPNOTSUPP;
6246 	}
6247 }
6248 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6249 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6250 			       struct net_device *sb_dev)
6251 {
6252 	int gso = skb_shinfo(skb)->gso_type;
6253 
6254 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6255 		/*
6256 		 * There is no way to determine the number of TSO/USO
6257 		 * capable Queues. Let's use always the Queue 0
6258 		 * because if TSO/USO is supported then at least this
6259 		 * one will be capable.
6260 		 */
6261 		return 0;
6262 	}
6263 
6264 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6265 }
6266 
stmmac_set_mac_address(struct net_device * ndev,void * addr)6267 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6268 {
6269 	struct stmmac_priv *priv = netdev_priv(ndev);
6270 	int ret = 0;
6271 
6272 	ret = pm_runtime_resume_and_get(priv->device);
6273 	if (ret < 0)
6274 		return ret;
6275 
6276 	ret = eth_mac_addr(ndev, addr);
6277 	if (ret)
6278 		goto set_mac_error;
6279 
6280 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6281 
6282 set_mac_error:
6283 	pm_runtime_put(priv->device);
6284 
6285 	return ret;
6286 }
6287 
6288 #ifdef CONFIG_DEBUG_FS
6289 static struct dentry *stmmac_fs_dir;
6290 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6291 static void sysfs_display_ring(void *head, int size, int extend_desc,
6292 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6293 {
6294 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6295 	struct dma_desc *p = (struct dma_desc *)head;
6296 	unsigned int desc_size;
6297 	dma_addr_t dma_addr;
6298 	int i;
6299 
6300 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6301 	for (i = 0; i < size; i++) {
6302 		dma_addr = dma_phy_addr + i * desc_size;
6303 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6304 				i, &dma_addr,
6305 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6306 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6307 		if (extend_desc)
6308 			p = &(++ep)->basic;
6309 		else
6310 			p++;
6311 	}
6312 }
6313 
stmmac_rings_status_show(struct seq_file * seq,void * v)6314 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6315 {
6316 	struct net_device *dev = seq->private;
6317 	struct stmmac_priv *priv = netdev_priv(dev);
6318 	u32 rx_count = priv->plat->rx_queues_to_use;
6319 	u32 tx_count = priv->plat->tx_queues_to_use;
6320 	u32 queue;
6321 
6322 	if ((dev->flags & IFF_UP) == 0)
6323 		return 0;
6324 
6325 	for (queue = 0; queue < rx_count; queue++) {
6326 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6327 
6328 		seq_printf(seq, "RX Queue %d:\n", queue);
6329 
6330 		if (priv->extend_desc) {
6331 			seq_printf(seq, "Extended descriptor ring:\n");
6332 			sysfs_display_ring((void *)rx_q->dma_erx,
6333 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6334 		} else {
6335 			seq_printf(seq, "Descriptor ring:\n");
6336 			sysfs_display_ring((void *)rx_q->dma_rx,
6337 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6338 		}
6339 	}
6340 
6341 	for (queue = 0; queue < tx_count; queue++) {
6342 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6343 
6344 		seq_printf(seq, "TX Queue %d:\n", queue);
6345 
6346 		if (priv->extend_desc) {
6347 			seq_printf(seq, "Extended descriptor ring:\n");
6348 			sysfs_display_ring((void *)tx_q->dma_etx,
6349 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6350 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6351 			seq_printf(seq, "Descriptor ring:\n");
6352 			sysfs_display_ring((void *)tx_q->dma_tx,
6353 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6354 		}
6355 	}
6356 
6357 	return 0;
6358 }
6359 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6360 
stmmac_dma_cap_show(struct seq_file * seq,void * v)6361 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6362 {
6363 	static const char * const dwxgmac_timestamp_source[] = {
6364 		"None",
6365 		"Internal",
6366 		"External",
6367 		"Both",
6368 	};
6369 	static const char * const dwxgmac_safety_feature_desc[] = {
6370 		"No",
6371 		"All Safety Features with ECC and Parity",
6372 		"All Safety Features without ECC or Parity",
6373 		"All Safety Features with Parity Only",
6374 		"ECC Only",
6375 		"UNDEFINED",
6376 		"UNDEFINED",
6377 		"UNDEFINED",
6378 	};
6379 	struct net_device *dev = seq->private;
6380 	struct stmmac_priv *priv = netdev_priv(dev);
6381 
6382 	if (!priv->hw_cap_support) {
6383 		seq_printf(seq, "DMA HW features not supported\n");
6384 		return 0;
6385 	}
6386 
6387 	seq_printf(seq, "==============================\n");
6388 	seq_printf(seq, "\tDMA HW features\n");
6389 	seq_printf(seq, "==============================\n");
6390 
6391 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6392 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6393 	seq_printf(seq, "\t1000 Mbps: %s\n",
6394 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6395 	seq_printf(seq, "\tHalf duplex: %s\n",
6396 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6397 	if (priv->plat->has_xgmac) {
6398 		seq_printf(seq,
6399 			   "\tNumber of Additional MAC address registers: %d\n",
6400 			   priv->dma_cap.multi_addr);
6401 	} else {
6402 		seq_printf(seq, "\tHash Filter: %s\n",
6403 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6404 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6405 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6406 	}
6407 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6408 		   (priv->dma_cap.pcs) ? "Y" : "N");
6409 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6410 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6411 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6412 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6413 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6414 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6415 	seq_printf(seq, "\tRMON module: %s\n",
6416 		   (priv->dma_cap.rmon) ? "Y" : "N");
6417 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6418 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6419 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6420 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6421 	if (priv->plat->has_xgmac)
6422 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6423 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6424 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6425 		   (priv->dma_cap.eee) ? "Y" : "N");
6426 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6427 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6428 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6429 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6430 	    priv->plat->has_xgmac) {
6431 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6432 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6433 	} else {
6434 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6435 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6436 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6437 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6438 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6439 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6440 	}
6441 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6442 		   priv->dma_cap.number_rx_channel);
6443 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6444 		   priv->dma_cap.number_tx_channel);
6445 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6446 		   priv->dma_cap.number_rx_queues);
6447 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6448 		   priv->dma_cap.number_tx_queues);
6449 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6450 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6451 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6452 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6453 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6454 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6455 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6456 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6457 		   priv->dma_cap.pps_out_num);
6458 	seq_printf(seq, "\tSafety Features: %s\n",
6459 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6460 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6461 		   priv->dma_cap.frpsel ? "Y" : "N");
6462 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6463 		   priv->dma_cap.host_dma_width);
6464 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6465 		   priv->dma_cap.rssen ? "Y" : "N");
6466 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6467 		   priv->dma_cap.vlhash ? "Y" : "N");
6468 	seq_printf(seq, "\tSplit Header: %s\n",
6469 		   priv->dma_cap.sphen ? "Y" : "N");
6470 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6471 		   priv->dma_cap.vlins ? "Y" : "N");
6472 	seq_printf(seq, "\tDouble VLAN: %s\n",
6473 		   priv->dma_cap.dvlan ? "Y" : "N");
6474 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6475 		   priv->dma_cap.l3l4fnum);
6476 	seq_printf(seq, "\tARP Offloading: %s\n",
6477 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6478 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6479 		   priv->dma_cap.estsel ? "Y" : "N");
6480 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6481 		   priv->dma_cap.fpesel ? "Y" : "N");
6482 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6483 		   priv->dma_cap.tbssel ? "Y" : "N");
6484 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6485 		   priv->dma_cap.tbs_ch_num);
6486 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6487 		   priv->dma_cap.sgfsel ? "Y" : "N");
6488 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6489 		   BIT(priv->dma_cap.ttsfd) >> 1);
6490 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6491 		   priv->dma_cap.numtc);
6492 	seq_printf(seq, "\tDCB Feature: %s\n",
6493 		   priv->dma_cap.dcben ? "Y" : "N");
6494 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6495 		   priv->dma_cap.advthword ? "Y" : "N");
6496 	seq_printf(seq, "\tPTP Offload: %s\n",
6497 		   priv->dma_cap.ptoen ? "Y" : "N");
6498 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6499 		   priv->dma_cap.osten ? "Y" : "N");
6500 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6501 		   priv->dma_cap.pfcen ? "Y" : "N");
6502 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6503 		   BIT(priv->dma_cap.frpes) << 6);
6504 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6505 		   BIT(priv->dma_cap.frpbs) << 6);
6506 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6507 		   priv->dma_cap.frppipe_num);
6508 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6509 		   priv->dma_cap.nrvf_num ?
6510 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6511 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6512 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6513 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6514 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6515 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6516 		   priv->dma_cap.cbtisel ? "Y" : "N");
6517 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6518 		   priv->dma_cap.aux_snapshot_n);
6519 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6520 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6521 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6522 		   priv->dma_cap.edma ? "Y" : "N");
6523 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6524 		   priv->dma_cap.ediffc ? "Y" : "N");
6525 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6526 		   priv->dma_cap.vxn ? "Y" : "N");
6527 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6528 		   priv->dma_cap.dbgmem ? "Y" : "N");
6529 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6530 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6531 	return 0;
6532 }
6533 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6534 
6535 /* Use network device events to rename debugfs file entries.
6536  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6537 static int stmmac_device_event(struct notifier_block *unused,
6538 			       unsigned long event, void *ptr)
6539 {
6540 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6541 	struct stmmac_priv *priv = netdev_priv(dev);
6542 
6543 	if (dev->netdev_ops != &stmmac_netdev_ops)
6544 		goto done;
6545 
6546 	switch (event) {
6547 	case NETDEV_CHANGENAME:
6548 		if (priv->dbgfs_dir)
6549 			priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6550 							 priv->dbgfs_dir,
6551 							 stmmac_fs_dir,
6552 							 dev->name);
6553 		break;
6554 	}
6555 done:
6556 	return NOTIFY_DONE;
6557 }
6558 
6559 static struct notifier_block stmmac_notifier = {
6560 	.notifier_call = stmmac_device_event,
6561 };
6562 
stmmac_init_fs(struct net_device * dev)6563 static void stmmac_init_fs(struct net_device *dev)
6564 {
6565 	struct stmmac_priv *priv = netdev_priv(dev);
6566 
6567 	rtnl_lock();
6568 
6569 	/* Create per netdev entries */
6570 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6571 
6572 	/* Entry to report DMA RX/TX rings */
6573 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6574 			    &stmmac_rings_status_fops);
6575 
6576 	/* Entry to report the DMA HW features */
6577 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6578 			    &stmmac_dma_cap_fops);
6579 
6580 	rtnl_unlock();
6581 }
6582 
stmmac_exit_fs(struct net_device * dev)6583 static void stmmac_exit_fs(struct net_device *dev)
6584 {
6585 	struct stmmac_priv *priv = netdev_priv(dev);
6586 
6587 	debugfs_remove_recursive(priv->dbgfs_dir);
6588 }
6589 #endif /* CONFIG_DEBUG_FS */
6590 
stmmac_vid_crc32_le(__le16 vid_le)6591 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6592 {
6593 	unsigned char *data = (unsigned char *)&vid_le;
6594 	unsigned char data_byte = 0;
6595 	u32 crc = ~0x0;
6596 	u32 temp = 0;
6597 	int i, bits;
6598 
6599 	bits = get_bitmask_order(VLAN_VID_MASK);
6600 	for (i = 0; i < bits; i++) {
6601 		if ((i % 8) == 0)
6602 			data_byte = data[i / 8];
6603 
6604 		temp = ((crc & 1) ^ data_byte) & 1;
6605 		crc >>= 1;
6606 		data_byte >>= 1;
6607 
6608 		if (temp)
6609 			crc ^= 0xedb88320;
6610 	}
6611 
6612 	return crc;
6613 }
6614 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6615 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6616 {
6617 	u32 crc, hash = 0;
6618 	u16 pmatch = 0;
6619 	int count = 0;
6620 	u16 vid = 0;
6621 
6622 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6623 		__le16 vid_le = cpu_to_le16(vid);
6624 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6625 		hash |= (1 << crc);
6626 		count++;
6627 	}
6628 
6629 	if (!priv->dma_cap.vlhash) {
6630 		if (count > 2) /* VID = 0 always passes filter */
6631 			return -EOPNOTSUPP;
6632 
6633 		pmatch = vid;
6634 		hash = 0;
6635 	}
6636 
6637 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6638 }
6639 
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6640 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6641 {
6642 	struct stmmac_priv *priv = netdev_priv(ndev);
6643 	bool is_double = false;
6644 	int ret;
6645 
6646 	ret = pm_runtime_resume_and_get(priv->device);
6647 	if (ret < 0)
6648 		return ret;
6649 
6650 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6651 		is_double = true;
6652 
6653 	set_bit(vid, priv->active_vlans);
6654 	ret = stmmac_vlan_update(priv, is_double);
6655 	if (ret) {
6656 		clear_bit(vid, priv->active_vlans);
6657 		goto err_pm_put;
6658 	}
6659 
6660 	if (priv->hw->num_vlan) {
6661 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6662 		if (ret)
6663 			goto err_pm_put;
6664 	}
6665 err_pm_put:
6666 	pm_runtime_put(priv->device);
6667 
6668 	return ret;
6669 }
6670 
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6671 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6672 {
6673 	struct stmmac_priv *priv = netdev_priv(ndev);
6674 	bool is_double = false;
6675 	int ret;
6676 
6677 	ret = pm_runtime_resume_and_get(priv->device);
6678 	if (ret < 0)
6679 		return ret;
6680 
6681 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6682 		is_double = true;
6683 
6684 	clear_bit(vid, priv->active_vlans);
6685 
6686 	if (priv->hw->num_vlan) {
6687 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6688 		if (ret)
6689 			goto del_vlan_error;
6690 	}
6691 
6692 	ret = stmmac_vlan_update(priv, is_double);
6693 
6694 del_vlan_error:
6695 	pm_runtime_put(priv->device);
6696 
6697 	return ret;
6698 }
6699 
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6700 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6701 {
6702 	struct stmmac_priv *priv = netdev_priv(dev);
6703 
6704 	switch (bpf->command) {
6705 	case XDP_SETUP_PROG:
6706 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6707 	case XDP_SETUP_XSK_POOL:
6708 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6709 					     bpf->xsk.queue_id);
6710 	default:
6711 		return -EOPNOTSUPP;
6712 	}
6713 }
6714 
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6715 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6716 			   struct xdp_frame **frames, u32 flags)
6717 {
6718 	struct stmmac_priv *priv = netdev_priv(dev);
6719 	int cpu = smp_processor_id();
6720 	struct netdev_queue *nq;
6721 	int i, nxmit = 0;
6722 	int queue;
6723 
6724 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6725 		return -ENETDOWN;
6726 
6727 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6728 		return -EINVAL;
6729 
6730 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6731 	nq = netdev_get_tx_queue(priv->dev, queue);
6732 
6733 	__netif_tx_lock(nq, cpu);
6734 	/* Avoids TX time-out as we are sharing with slow path */
6735 	txq_trans_cond_update(nq);
6736 
6737 	for (i = 0; i < num_frames; i++) {
6738 		int res;
6739 
6740 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6741 		if (res == STMMAC_XDP_CONSUMED)
6742 			break;
6743 
6744 		nxmit++;
6745 	}
6746 
6747 	if (flags & XDP_XMIT_FLUSH) {
6748 		stmmac_flush_tx_descriptors(priv, queue);
6749 		stmmac_tx_timer_arm(priv, queue);
6750 	}
6751 
6752 	__netif_tx_unlock(nq);
6753 
6754 	return nxmit;
6755 }
6756 
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6757 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6758 {
6759 	struct stmmac_channel *ch = &priv->channel[queue];
6760 	unsigned long flags;
6761 
6762 	spin_lock_irqsave(&ch->lock, flags);
6763 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6764 	spin_unlock_irqrestore(&ch->lock, flags);
6765 
6766 	stmmac_stop_rx_dma(priv, queue);
6767 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6768 }
6769 
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6770 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6771 {
6772 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6773 	struct stmmac_channel *ch = &priv->channel[queue];
6774 	unsigned long flags;
6775 	u32 buf_size;
6776 	int ret;
6777 
6778 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6779 	if (ret) {
6780 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6781 		return;
6782 	}
6783 
6784 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6785 	if (ret) {
6786 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6787 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6788 		return;
6789 	}
6790 
6791 	stmmac_reset_rx_queue(priv, queue);
6792 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6793 
6794 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6795 			    rx_q->dma_rx_phy, rx_q->queue_index);
6796 
6797 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6798 			     sizeof(struct dma_desc));
6799 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6800 			       rx_q->rx_tail_addr, rx_q->queue_index);
6801 
6802 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6803 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6804 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6805 				      buf_size,
6806 				      rx_q->queue_index);
6807 	} else {
6808 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6809 				      priv->dma_conf.dma_buf_sz,
6810 				      rx_q->queue_index);
6811 	}
6812 
6813 	stmmac_start_rx_dma(priv, queue);
6814 
6815 	spin_lock_irqsave(&ch->lock, flags);
6816 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6817 	spin_unlock_irqrestore(&ch->lock, flags);
6818 }
6819 
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6820 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6821 {
6822 	struct stmmac_channel *ch = &priv->channel[queue];
6823 	unsigned long flags;
6824 
6825 	spin_lock_irqsave(&ch->lock, flags);
6826 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6827 	spin_unlock_irqrestore(&ch->lock, flags);
6828 
6829 	stmmac_stop_tx_dma(priv, queue);
6830 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6831 }
6832 
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6833 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6834 {
6835 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6836 	struct stmmac_channel *ch = &priv->channel[queue];
6837 	unsigned long flags;
6838 	int ret;
6839 
6840 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6841 	if (ret) {
6842 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6843 		return;
6844 	}
6845 
6846 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6847 	if (ret) {
6848 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6849 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6850 		return;
6851 	}
6852 
6853 	stmmac_reset_tx_queue(priv, queue);
6854 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6855 
6856 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6857 			    tx_q->dma_tx_phy, tx_q->queue_index);
6858 
6859 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6860 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6861 
6862 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6863 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6864 			       tx_q->tx_tail_addr, tx_q->queue_index);
6865 
6866 	stmmac_start_tx_dma(priv, queue);
6867 
6868 	spin_lock_irqsave(&ch->lock, flags);
6869 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6870 	spin_unlock_irqrestore(&ch->lock, flags);
6871 }
6872 
stmmac_xdp_release(struct net_device * dev)6873 void stmmac_xdp_release(struct net_device *dev)
6874 {
6875 	struct stmmac_priv *priv = netdev_priv(dev);
6876 	u32 chan;
6877 
6878 	/* Ensure tx function is not running */
6879 	netif_tx_disable(dev);
6880 
6881 	/* Disable NAPI process */
6882 	stmmac_disable_all_queues(priv);
6883 
6884 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6885 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6886 
6887 	/* Free the IRQ lines */
6888 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6889 
6890 	/* Stop TX/RX DMA channels */
6891 	stmmac_stop_all_dma(priv);
6892 
6893 	/* Release and free the Rx/Tx resources */
6894 	free_dma_desc_resources(priv, &priv->dma_conf);
6895 
6896 	/* Disable the MAC Rx/Tx */
6897 	stmmac_mac_set(priv, priv->ioaddr, false);
6898 
6899 	/* set trans_start so we don't get spurious
6900 	 * watchdogs during reset
6901 	 */
6902 	netif_trans_update(dev);
6903 	netif_carrier_off(dev);
6904 }
6905 
stmmac_xdp_open(struct net_device * dev)6906 int stmmac_xdp_open(struct net_device *dev)
6907 {
6908 	struct stmmac_priv *priv = netdev_priv(dev);
6909 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6910 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6911 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6912 	struct stmmac_rx_queue *rx_q;
6913 	struct stmmac_tx_queue *tx_q;
6914 	u32 buf_size;
6915 	bool sph_en;
6916 	u32 chan;
6917 	int ret;
6918 
6919 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6920 	if (ret < 0) {
6921 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6922 			   __func__);
6923 		goto dma_desc_error;
6924 	}
6925 
6926 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6927 	if (ret < 0) {
6928 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6929 			   __func__);
6930 		goto init_error;
6931 	}
6932 
6933 	stmmac_reset_queues_param(priv);
6934 
6935 	/* DMA CSR Channel configuration */
6936 	for (chan = 0; chan < dma_csr_ch; chan++) {
6937 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6938 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6939 	}
6940 
6941 	/* Adjust Split header */
6942 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6943 
6944 	/* DMA RX Channel Configuration */
6945 	for (chan = 0; chan < rx_cnt; chan++) {
6946 		rx_q = &priv->dma_conf.rx_queue[chan];
6947 
6948 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6949 				    rx_q->dma_rx_phy, chan);
6950 
6951 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6952 				     (rx_q->buf_alloc_num *
6953 				      sizeof(struct dma_desc));
6954 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6955 				       rx_q->rx_tail_addr, chan);
6956 
6957 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6958 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6959 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6960 					      buf_size,
6961 					      rx_q->queue_index);
6962 		} else {
6963 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6964 					      priv->dma_conf.dma_buf_sz,
6965 					      rx_q->queue_index);
6966 		}
6967 
6968 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6969 	}
6970 
6971 	/* DMA TX Channel Configuration */
6972 	for (chan = 0; chan < tx_cnt; chan++) {
6973 		tx_q = &priv->dma_conf.tx_queue[chan];
6974 
6975 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6976 				    tx_q->dma_tx_phy, chan);
6977 
6978 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6979 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6980 				       tx_q->tx_tail_addr, chan);
6981 
6982 		hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6983 		tx_q->txtimer.function = stmmac_tx_timer;
6984 	}
6985 
6986 	/* Enable the MAC Rx/Tx */
6987 	stmmac_mac_set(priv, priv->ioaddr, true);
6988 
6989 	/* Start Rx & Tx DMA Channels */
6990 	stmmac_start_all_dma(priv);
6991 
6992 	ret = stmmac_request_irq(dev);
6993 	if (ret)
6994 		goto irq_error;
6995 
6996 	/* Enable NAPI process*/
6997 	stmmac_enable_all_queues(priv);
6998 	netif_carrier_on(dev);
6999 	netif_tx_start_all_queues(dev);
7000 	stmmac_enable_all_dma_irq(priv);
7001 
7002 	return 0;
7003 
7004 irq_error:
7005 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7006 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7007 
7008 	stmmac_hw_teardown(dev);
7009 init_error:
7010 	free_dma_desc_resources(priv, &priv->dma_conf);
7011 dma_desc_error:
7012 	return ret;
7013 }
7014 
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7015 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7016 {
7017 	struct stmmac_priv *priv = netdev_priv(dev);
7018 	struct stmmac_rx_queue *rx_q;
7019 	struct stmmac_tx_queue *tx_q;
7020 	struct stmmac_channel *ch;
7021 
7022 	if (test_bit(STMMAC_DOWN, &priv->state) ||
7023 	    !netif_carrier_ok(priv->dev))
7024 		return -ENETDOWN;
7025 
7026 	if (!stmmac_xdp_is_enabled(priv))
7027 		return -EINVAL;
7028 
7029 	if (queue >= priv->plat->rx_queues_to_use ||
7030 	    queue >= priv->plat->tx_queues_to_use)
7031 		return -EINVAL;
7032 
7033 	rx_q = &priv->dma_conf.rx_queue[queue];
7034 	tx_q = &priv->dma_conf.tx_queue[queue];
7035 	ch = &priv->channel[queue];
7036 
7037 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7038 		return -EINVAL;
7039 
7040 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7041 		/* EQoS does not have per-DMA channel SW interrupt,
7042 		 * so we schedule RX Napi straight-away.
7043 		 */
7044 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7045 			__napi_schedule(&ch->rxtx_napi);
7046 	}
7047 
7048 	return 0;
7049 }
7050 
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7051 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7052 {
7053 	struct stmmac_priv *priv = netdev_priv(dev);
7054 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7055 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7056 	unsigned int start;
7057 	int q;
7058 
7059 	for (q = 0; q < tx_cnt; q++) {
7060 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7061 		u64 tx_packets;
7062 		u64 tx_bytes;
7063 
7064 		do {
7065 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7066 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7067 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7068 		do {
7069 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7070 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7071 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7072 
7073 		stats->tx_packets += tx_packets;
7074 		stats->tx_bytes += tx_bytes;
7075 	}
7076 
7077 	for (q = 0; q < rx_cnt; q++) {
7078 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7079 		u64 rx_packets;
7080 		u64 rx_bytes;
7081 
7082 		do {
7083 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7084 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7085 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7086 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7087 
7088 		stats->rx_packets += rx_packets;
7089 		stats->rx_bytes += rx_bytes;
7090 	}
7091 
7092 	stats->rx_dropped = priv->xstats.rx_dropped;
7093 	stats->rx_errors = priv->xstats.rx_errors;
7094 	stats->tx_dropped = priv->xstats.tx_dropped;
7095 	stats->tx_errors = priv->xstats.tx_errors;
7096 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7097 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7098 	stats->rx_length_errors = priv->xstats.rx_length;
7099 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7100 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7101 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7102 }
7103 
7104 static const struct net_device_ops stmmac_netdev_ops = {
7105 	.ndo_open = stmmac_open,
7106 	.ndo_start_xmit = stmmac_xmit,
7107 	.ndo_stop = stmmac_release,
7108 	.ndo_change_mtu = stmmac_change_mtu,
7109 	.ndo_fix_features = stmmac_fix_features,
7110 	.ndo_set_features = stmmac_set_features,
7111 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7112 	.ndo_tx_timeout = stmmac_tx_timeout,
7113 	.ndo_eth_ioctl = stmmac_ioctl,
7114 	.ndo_get_stats64 = stmmac_get_stats64,
7115 	.ndo_setup_tc = stmmac_setup_tc,
7116 	.ndo_select_queue = stmmac_select_queue,
7117 	.ndo_set_mac_address = stmmac_set_mac_address,
7118 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7119 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7120 	.ndo_bpf = stmmac_bpf,
7121 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7122 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7123 };
7124 
stmmac_reset_subtask(struct stmmac_priv * priv)7125 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7126 {
7127 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7128 		return;
7129 	if (test_bit(STMMAC_DOWN, &priv->state))
7130 		return;
7131 
7132 	netdev_err(priv->dev, "Reset adapter.\n");
7133 
7134 	rtnl_lock();
7135 	netif_trans_update(priv->dev);
7136 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7137 		usleep_range(1000, 2000);
7138 
7139 	set_bit(STMMAC_DOWN, &priv->state);
7140 	dev_close(priv->dev);
7141 	dev_open(priv->dev, NULL);
7142 	clear_bit(STMMAC_DOWN, &priv->state);
7143 	clear_bit(STMMAC_RESETING, &priv->state);
7144 	rtnl_unlock();
7145 }
7146 
stmmac_service_task(struct work_struct * work)7147 static void stmmac_service_task(struct work_struct *work)
7148 {
7149 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7150 			service_task);
7151 
7152 	stmmac_reset_subtask(priv);
7153 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7154 }
7155 
7156 /**
7157  *  stmmac_hw_init - Init the MAC device
7158  *  @priv: driver private structure
7159  *  Description: this function is to configure the MAC device according to
7160  *  some platform parameters or the HW capability register. It prepares the
7161  *  driver to use either ring or chain modes and to setup either enhanced or
7162  *  normal descriptors.
7163  */
stmmac_hw_init(struct stmmac_priv * priv)7164 static int stmmac_hw_init(struct stmmac_priv *priv)
7165 {
7166 	int ret;
7167 
7168 	/* dwmac-sun8i only work in chain mode */
7169 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7170 		chain_mode = 1;
7171 	priv->chain_mode = chain_mode;
7172 
7173 	/* Initialize HW Interface */
7174 	ret = stmmac_hwif_init(priv);
7175 	if (ret)
7176 		return ret;
7177 
7178 	/* Get the HW capability (new GMAC newer than 3.50a) */
7179 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7180 	if (priv->hw_cap_support) {
7181 		dev_info(priv->device, "DMA HW capability register supported\n");
7182 
7183 		/* We can override some gmac/dma configuration fields: e.g.
7184 		 * enh_desc, tx_coe (e.g. that are passed through the
7185 		 * platform) with the values from the HW capability
7186 		 * register (if supported).
7187 		 */
7188 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7189 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7190 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7191 		priv->hw->pmt = priv->plat->pmt;
7192 		if (priv->dma_cap.hash_tb_sz) {
7193 			priv->hw->multicast_filter_bins =
7194 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7195 			priv->hw->mcast_bits_log2 =
7196 					ilog2(priv->hw->multicast_filter_bins);
7197 		}
7198 
7199 		/* TXCOE doesn't work in thresh DMA mode */
7200 		if (priv->plat->force_thresh_dma_mode)
7201 			priv->plat->tx_coe = 0;
7202 		else
7203 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7204 
7205 		/* In case of GMAC4 rx_coe is from HW cap register. */
7206 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7207 
7208 		if (priv->dma_cap.rx_coe_type2)
7209 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7210 		else if (priv->dma_cap.rx_coe_type1)
7211 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7212 
7213 	} else {
7214 		dev_info(priv->device, "No HW DMA feature register supported\n");
7215 	}
7216 
7217 	if (priv->plat->rx_coe) {
7218 		priv->hw->rx_csum = priv->plat->rx_coe;
7219 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7220 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7221 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7222 	}
7223 	if (priv->plat->tx_coe)
7224 		dev_info(priv->device, "TX Checksum insertion supported\n");
7225 
7226 	if (priv->plat->pmt) {
7227 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7228 		device_set_wakeup_capable(priv->device, 1);
7229 	}
7230 
7231 	if (priv->dma_cap.tsoen)
7232 		dev_info(priv->device, "TSO supported\n");
7233 
7234 	priv->hw->vlan_fail_q_en =
7235 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7236 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7237 
7238 	/* Run HW quirks, if any */
7239 	if (priv->hwif_quirks) {
7240 		ret = priv->hwif_quirks(priv);
7241 		if (ret)
7242 			return ret;
7243 	}
7244 
7245 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7246 	 * In some case, for example on bugged HW this feature
7247 	 * has to be disable and this can be done by passing the
7248 	 * riwt_off field from the platform.
7249 	 */
7250 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7251 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7252 		priv->use_riwt = 1;
7253 		dev_info(priv->device,
7254 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7255 	}
7256 
7257 	return 0;
7258 }
7259 
stmmac_napi_add(struct net_device * dev)7260 static void stmmac_napi_add(struct net_device *dev)
7261 {
7262 	struct stmmac_priv *priv = netdev_priv(dev);
7263 	u32 queue, maxq;
7264 
7265 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7266 
7267 	for (queue = 0; queue < maxq; queue++) {
7268 		struct stmmac_channel *ch = &priv->channel[queue];
7269 
7270 		ch->priv_data = priv;
7271 		ch->index = queue;
7272 		spin_lock_init(&ch->lock);
7273 
7274 		if (queue < priv->plat->rx_queues_to_use) {
7275 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7276 		}
7277 		if (queue < priv->plat->tx_queues_to_use) {
7278 			netif_napi_add_tx(dev, &ch->tx_napi,
7279 					  stmmac_napi_poll_tx);
7280 		}
7281 		if (queue < priv->plat->rx_queues_to_use &&
7282 		    queue < priv->plat->tx_queues_to_use) {
7283 			netif_napi_add(dev, &ch->rxtx_napi,
7284 				       stmmac_napi_poll_rxtx);
7285 		}
7286 	}
7287 }
7288 
stmmac_napi_del(struct net_device * dev)7289 static void stmmac_napi_del(struct net_device *dev)
7290 {
7291 	struct stmmac_priv *priv = netdev_priv(dev);
7292 	u32 queue, maxq;
7293 
7294 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7295 
7296 	for (queue = 0; queue < maxq; queue++) {
7297 		struct stmmac_channel *ch = &priv->channel[queue];
7298 
7299 		if (queue < priv->plat->rx_queues_to_use)
7300 			netif_napi_del(&ch->rx_napi);
7301 		if (queue < priv->plat->tx_queues_to_use)
7302 			netif_napi_del(&ch->tx_napi);
7303 		if (queue < priv->plat->rx_queues_to_use &&
7304 		    queue < priv->plat->tx_queues_to_use) {
7305 			netif_napi_del(&ch->rxtx_napi);
7306 		}
7307 	}
7308 }
7309 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7310 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7311 {
7312 	struct stmmac_priv *priv = netdev_priv(dev);
7313 	int ret = 0, i;
7314 
7315 	if (netif_running(dev))
7316 		stmmac_release(dev);
7317 
7318 	stmmac_napi_del(dev);
7319 
7320 	priv->plat->rx_queues_to_use = rx_cnt;
7321 	priv->plat->tx_queues_to_use = tx_cnt;
7322 	if (!netif_is_rxfh_configured(dev))
7323 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7324 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7325 									rx_cnt);
7326 
7327 	stmmac_napi_add(dev);
7328 
7329 	if (netif_running(dev))
7330 		ret = stmmac_open(dev);
7331 
7332 	return ret;
7333 }
7334 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7335 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7336 {
7337 	struct stmmac_priv *priv = netdev_priv(dev);
7338 	int ret = 0;
7339 
7340 	if (netif_running(dev))
7341 		stmmac_release(dev);
7342 
7343 	priv->dma_conf.dma_rx_size = rx_size;
7344 	priv->dma_conf.dma_tx_size = tx_size;
7345 
7346 	if (netif_running(dev))
7347 		ret = stmmac_open(dev);
7348 
7349 	return ret;
7350 }
7351 
7352 /**
7353  * stmmac_fpe_verify_timer - Timer for MAC Merge verification
7354  * @t:  timer_list struct containing private info
7355  *
7356  * Verify the MAC Merge capability in the local TX direction, by
7357  * transmitting Verify mPackets up to 3 times. Wait until link
7358  * partner responds with a Response mPacket, otherwise fail.
7359  */
stmmac_fpe_verify_timer(struct timer_list * t)7360 static void stmmac_fpe_verify_timer(struct timer_list *t)
7361 {
7362 	struct stmmac_fpe_cfg *fpe_cfg = from_timer(fpe_cfg, t, verify_timer);
7363 	struct stmmac_priv *priv = container_of(fpe_cfg, struct stmmac_priv,
7364 						fpe_cfg);
7365 	unsigned long flags;
7366 	bool rearm = false;
7367 
7368 	spin_lock_irqsave(&fpe_cfg->lock, flags);
7369 
7370 	switch (fpe_cfg->status) {
7371 	case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
7372 	case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
7373 		if (fpe_cfg->verify_retries != 0) {
7374 			stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7375 						fpe_cfg, MPACKET_VERIFY);
7376 			rearm = true;
7377 		} else {
7378 			fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
7379 		}
7380 
7381 		fpe_cfg->verify_retries--;
7382 		break;
7383 
7384 	case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
7385 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
7386 				     priv->plat->tx_queues_to_use,
7387 				     priv->plat->rx_queues_to_use,
7388 				     true, true);
7389 		break;
7390 
7391 	default:
7392 		break;
7393 	}
7394 
7395 	if (rearm) {
7396 		mod_timer(&fpe_cfg->verify_timer,
7397 			  jiffies + msecs_to_jiffies(fpe_cfg->verify_time));
7398 	}
7399 
7400 	spin_unlock_irqrestore(&fpe_cfg->lock, flags);
7401 }
7402 
stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg * fpe_cfg)7403 static void stmmac_fpe_verify_timer_arm(struct stmmac_fpe_cfg *fpe_cfg)
7404 {
7405 	if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled &&
7406 	    fpe_cfg->verify_enabled &&
7407 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED &&
7408 	    fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) {
7409 		timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0);
7410 		mod_timer(&fpe_cfg->verify_timer, jiffies);
7411 	}
7412 }
7413 
stmmac_fpe_apply(struct stmmac_priv * priv)7414 void stmmac_fpe_apply(struct stmmac_priv *priv)
7415 {
7416 	struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg;
7417 
7418 	/* If verification is disabled, configure FPE right away.
7419 	 * Otherwise let the timer code do it.
7420 	 */
7421 	if (!fpe_cfg->verify_enabled) {
7422 		stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg,
7423 				     priv->plat->tx_queues_to_use,
7424 				     priv->plat->rx_queues_to_use,
7425 				     fpe_cfg->tx_enabled,
7426 				     fpe_cfg->pmac_enabled);
7427 	} else {
7428 		fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL;
7429 		fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
7430 
7431 		if (netif_running(priv->dev))
7432 			stmmac_fpe_verify_timer_arm(fpe_cfg);
7433 	}
7434 }
7435 
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7436 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7437 {
7438 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7439 	struct dma_desc *desc_contains_ts = ctx->desc;
7440 	struct stmmac_priv *priv = ctx->priv;
7441 	struct dma_desc *ndesc = ctx->ndesc;
7442 	struct dma_desc *desc = ctx->desc;
7443 	u64 ns = 0;
7444 
7445 	if (!priv->hwts_rx_en)
7446 		return -ENODATA;
7447 
7448 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7449 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7450 		desc_contains_ts = ndesc;
7451 
7452 	/* Check if timestamp is available */
7453 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7454 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7455 		ns -= priv->plat->cdc_error_adj;
7456 		*timestamp = ns_to_ktime(ns);
7457 		return 0;
7458 	}
7459 
7460 	return -ENODATA;
7461 }
7462 
7463 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7464 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7465 };
7466 
7467 /**
7468  * stmmac_dvr_probe
7469  * @device: device pointer
7470  * @plat_dat: platform data pointer
7471  * @res: stmmac resource pointer
7472  * Description: this is the main probe function used to
7473  * call the alloc_etherdev, allocate the priv structure.
7474  * Return:
7475  * returns 0 on success, otherwise errno.
7476  */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7477 int stmmac_dvr_probe(struct device *device,
7478 		     struct plat_stmmacenet_data *plat_dat,
7479 		     struct stmmac_resources *res)
7480 {
7481 	struct net_device *ndev = NULL;
7482 	struct stmmac_priv *priv;
7483 	u32 rxq;
7484 	int i, ret = 0;
7485 
7486 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7487 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7488 	if (!ndev)
7489 		return -ENOMEM;
7490 
7491 	SET_NETDEV_DEV(ndev, device);
7492 
7493 	priv = netdev_priv(ndev);
7494 	priv->device = device;
7495 	priv->dev = ndev;
7496 
7497 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7498 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7499 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7500 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7501 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7502 	}
7503 
7504 	priv->xstats.pcpu_stats =
7505 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7506 	if (!priv->xstats.pcpu_stats)
7507 		return -ENOMEM;
7508 
7509 	stmmac_set_ethtool_ops(ndev);
7510 	priv->pause = pause;
7511 	priv->plat = plat_dat;
7512 	priv->ioaddr = res->addr;
7513 	priv->dev->base_addr = (unsigned long)res->addr;
7514 	priv->plat->dma_cfg->multi_msi_en =
7515 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7516 
7517 	priv->dev->irq = res->irq;
7518 	priv->wol_irq = res->wol_irq;
7519 	priv->lpi_irq = res->lpi_irq;
7520 	priv->sfty_irq = res->sfty_irq;
7521 	priv->sfty_ce_irq = res->sfty_ce_irq;
7522 	priv->sfty_ue_irq = res->sfty_ue_irq;
7523 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7524 		priv->rx_irq[i] = res->rx_irq[i];
7525 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7526 		priv->tx_irq[i] = res->tx_irq[i];
7527 
7528 	if (!is_zero_ether_addr(res->mac))
7529 		eth_hw_addr_set(priv->dev, res->mac);
7530 
7531 	dev_set_drvdata(device, priv->dev);
7532 
7533 	/* Verify driver arguments */
7534 	stmmac_verify_args();
7535 
7536 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7537 	if (!priv->af_xdp_zc_qps)
7538 		return -ENOMEM;
7539 
7540 	/* Allocate workqueue */
7541 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7542 	if (!priv->wq) {
7543 		dev_err(priv->device, "failed to create workqueue\n");
7544 		ret = -ENOMEM;
7545 		goto error_wq_init;
7546 	}
7547 
7548 	INIT_WORK(&priv->service_task, stmmac_service_task);
7549 
7550 	/* Override with kernel parameters if supplied XXX CRS XXX
7551 	 * this needs to have multiple instances
7552 	 */
7553 	if ((phyaddr >= 0) && (phyaddr <= 31))
7554 		priv->plat->phy_addr = phyaddr;
7555 
7556 	if (priv->plat->stmmac_rst) {
7557 		ret = reset_control_assert(priv->plat->stmmac_rst);
7558 		reset_control_deassert(priv->plat->stmmac_rst);
7559 		/* Some reset controllers have only reset callback instead of
7560 		 * assert + deassert callbacks pair.
7561 		 */
7562 		if (ret == -ENOTSUPP)
7563 			reset_control_reset(priv->plat->stmmac_rst);
7564 	}
7565 
7566 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7567 	if (ret == -ENOTSUPP)
7568 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7569 			ERR_PTR(ret));
7570 
7571 	/* Wait a bit for the reset to take effect */
7572 	udelay(10);
7573 
7574 	/* Init MAC and get the capabilities */
7575 	ret = stmmac_hw_init(priv);
7576 	if (ret)
7577 		goto error_hw_init;
7578 
7579 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7580 	 */
7581 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7582 		priv->plat->dma_cfg->dche = false;
7583 
7584 	stmmac_check_ether_addr(priv);
7585 
7586 	ndev->netdev_ops = &stmmac_netdev_ops;
7587 
7588 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7589 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7590 
7591 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7592 			    NETIF_F_RXCSUM;
7593 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7594 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7595 
7596 	ret = stmmac_tc_init(priv, priv);
7597 	if (!ret) {
7598 		ndev->hw_features |= NETIF_F_HW_TC;
7599 	}
7600 
7601 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7602 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7603 		if (priv->plat->has_gmac4)
7604 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7605 		priv->tso = true;
7606 		dev_info(priv->device, "TSO feature enabled\n");
7607 	}
7608 
7609 	if (priv->dma_cap.sphen &&
7610 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7611 		ndev->hw_features |= NETIF_F_GRO;
7612 		priv->sph_cap = true;
7613 		priv->sph = priv->sph_cap;
7614 		dev_info(priv->device, "SPH feature enabled\n");
7615 	}
7616 
7617 	/* Ideally our host DMA address width is the same as for the
7618 	 * device. However, it may differ and then we have to use our
7619 	 * host DMA width for allocation and the device DMA width for
7620 	 * register handling.
7621 	 */
7622 	if (priv->plat->host_dma_width)
7623 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7624 	else
7625 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7626 
7627 	if (priv->dma_cap.host_dma_width) {
7628 		ret = dma_set_mask_and_coherent(device,
7629 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7630 		if (!ret) {
7631 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7632 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7633 
7634 			/*
7635 			 * If more than 32 bits can be addressed, make sure to
7636 			 * enable enhanced addressing mode.
7637 			 */
7638 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7639 				priv->plat->dma_cfg->eame = true;
7640 		} else {
7641 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7642 			if (ret) {
7643 				dev_err(priv->device, "Failed to set DMA Mask\n");
7644 				goto error_hw_init;
7645 			}
7646 
7647 			priv->dma_cap.host_dma_width = 32;
7648 		}
7649 	}
7650 
7651 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7652 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7653 #ifdef STMMAC_VLAN_TAG_USED
7654 	/* Both mac100 and gmac support receive VLAN tag detection */
7655 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7656 	if (priv->plat->has_gmac4) {
7657 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7658 		priv->hw->hw_vlan_en = true;
7659 	}
7660 	if (priv->dma_cap.vlhash) {
7661 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7662 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7663 	}
7664 	if (priv->dma_cap.vlins) {
7665 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7666 		if (priv->dma_cap.dvlan)
7667 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7668 	}
7669 #endif
7670 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7671 
7672 	priv->xstats.threshold = tc;
7673 
7674 	/* Initialize RSS */
7675 	rxq = priv->plat->rx_queues_to_use;
7676 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7677 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7678 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7679 
7680 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7681 		ndev->features |= NETIF_F_RXHASH;
7682 
7683 	ndev->vlan_features |= ndev->features;
7684 
7685 	/* MTU range: 46 - hw-specific max */
7686 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7687 	if (priv->plat->has_xgmac)
7688 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7689 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7690 		ndev->max_mtu = JUMBO_LEN;
7691 	else
7692 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7693 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7694 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7695 	 */
7696 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7697 	    (priv->plat->maxmtu >= ndev->min_mtu))
7698 		ndev->max_mtu = priv->plat->maxmtu;
7699 	else if (priv->plat->maxmtu < ndev->min_mtu)
7700 		dev_warn(priv->device,
7701 			 "%s: warning: maxmtu having invalid value (%d)\n",
7702 			 __func__, priv->plat->maxmtu);
7703 
7704 	if (flow_ctrl)
7705 		priv->flow_ctrl = FLOW_AUTO;	/* RX/TX pause on */
7706 
7707 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7708 
7709 	/* Setup channels NAPI */
7710 	stmmac_napi_add(ndev);
7711 
7712 	mutex_init(&priv->lock);
7713 
7714 	priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES;
7715 	priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS;
7716 	priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
7717 	timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0);
7718 	spin_lock_init(&priv->fpe_cfg.lock);
7719 
7720 	/* If a specific clk_csr value is passed from the platform
7721 	 * this means that the CSR Clock Range selection cannot be
7722 	 * changed at run-time and it is fixed. Viceversa the driver'll try to
7723 	 * set the MDC clock dynamically according to the csr actual
7724 	 * clock input.
7725 	 */
7726 	if (priv->plat->clk_csr >= 0)
7727 		priv->clk_csr = priv->plat->clk_csr;
7728 	else
7729 		stmmac_clk_csr_set(priv);
7730 
7731 	stmmac_check_pcs_mode(priv);
7732 
7733 	pm_runtime_get_noresume(device);
7734 	pm_runtime_set_active(device);
7735 	if (!pm_runtime_enabled(device))
7736 		pm_runtime_enable(device);
7737 
7738 	ret = stmmac_mdio_register(ndev);
7739 	if (ret < 0) {
7740 		dev_err_probe(priv->device, ret,
7741 			      "MDIO bus (id: %d) registration failed\n",
7742 			      priv->plat->bus_id);
7743 		goto error_mdio_register;
7744 	}
7745 
7746 	if (priv->plat->speed_mode_2500)
7747 		priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7748 
7749 	ret = stmmac_pcs_setup(ndev);
7750 	if (ret)
7751 		goto error_pcs_setup;
7752 
7753 	ret = stmmac_phy_setup(priv);
7754 	if (ret) {
7755 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7756 		goto error_phy_setup;
7757 	}
7758 
7759 	ret = register_netdev(ndev);
7760 	if (ret) {
7761 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7762 			__func__, ret);
7763 		goto error_netdev_register;
7764 	}
7765 
7766 #ifdef CONFIG_DEBUG_FS
7767 	stmmac_init_fs(ndev);
7768 #endif
7769 
7770 	if (priv->plat->dump_debug_regs)
7771 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7772 
7773 	/* Let pm_runtime_put() disable the clocks.
7774 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7775 	 */
7776 	pm_runtime_put(device);
7777 
7778 	return ret;
7779 
7780 error_netdev_register:
7781 	phylink_destroy(priv->phylink);
7782 error_phy_setup:
7783 	stmmac_pcs_clean(ndev);
7784 error_pcs_setup:
7785 	stmmac_mdio_unregister(ndev);
7786 error_mdio_register:
7787 	stmmac_napi_del(ndev);
7788 error_hw_init:
7789 	destroy_workqueue(priv->wq);
7790 error_wq_init:
7791 	bitmap_free(priv->af_xdp_zc_qps);
7792 
7793 	return ret;
7794 }
7795 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7796 
7797 /**
7798  * stmmac_dvr_remove
7799  * @dev: device pointer
7800  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7801  * changes the link status, releases the DMA descriptor rings.
7802  */
stmmac_dvr_remove(struct device * dev)7803 void stmmac_dvr_remove(struct device *dev)
7804 {
7805 	struct net_device *ndev = dev_get_drvdata(dev);
7806 	struct stmmac_priv *priv = netdev_priv(ndev);
7807 
7808 	netdev_info(priv->dev, "%s: removing driver", __func__);
7809 
7810 	pm_runtime_get_sync(dev);
7811 
7812 	stmmac_stop_all_dma(priv);
7813 	stmmac_mac_set(priv, priv->ioaddr, false);
7814 	unregister_netdev(ndev);
7815 
7816 #ifdef CONFIG_DEBUG_FS
7817 	stmmac_exit_fs(ndev);
7818 #endif
7819 	phylink_destroy(priv->phylink);
7820 	if (priv->plat->stmmac_rst)
7821 		reset_control_assert(priv->plat->stmmac_rst);
7822 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7823 
7824 	stmmac_pcs_clean(ndev);
7825 	stmmac_mdio_unregister(ndev);
7826 
7827 	destroy_workqueue(priv->wq);
7828 	mutex_destroy(&priv->lock);
7829 	bitmap_free(priv->af_xdp_zc_qps);
7830 
7831 	pm_runtime_disable(dev);
7832 	pm_runtime_put_noidle(dev);
7833 }
7834 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7835 
7836 /**
7837  * stmmac_suspend - suspend callback
7838  * @dev: device pointer
7839  * Description: this is the function to suspend the device and it is called
7840  * by the platform driver to stop the network queue, release the resources,
7841  * program the PMT register (for WoL), clean and release driver resources.
7842  */
stmmac_suspend(struct device * dev)7843 int stmmac_suspend(struct device *dev)
7844 {
7845 	struct net_device *ndev = dev_get_drvdata(dev);
7846 	struct stmmac_priv *priv = netdev_priv(ndev);
7847 	u32 chan;
7848 
7849 	if (!ndev || !netif_running(ndev))
7850 		return 0;
7851 
7852 	mutex_lock(&priv->lock);
7853 
7854 	netif_device_detach(ndev);
7855 
7856 	stmmac_disable_all_queues(priv);
7857 
7858 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7859 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7860 
7861 	if (priv->eee_enabled) {
7862 		priv->tx_path_in_lpi_mode = false;
7863 		del_timer_sync(&priv->eee_ctrl_timer);
7864 	}
7865 
7866 	/* Stop TX/RX DMA */
7867 	stmmac_stop_all_dma(priv);
7868 
7869 	if (priv->plat->serdes_powerdown)
7870 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7871 
7872 	/* Enable Power down mode by programming the PMT regs */
7873 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7874 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7875 		priv->irq_wake = 1;
7876 	} else {
7877 		stmmac_mac_set(priv, priv->ioaddr, false);
7878 		pinctrl_pm_select_sleep_state(priv->device);
7879 	}
7880 
7881 	mutex_unlock(&priv->lock);
7882 
7883 	rtnl_lock();
7884 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7885 		phylink_suspend(priv->phylink, true);
7886 	} else {
7887 		if (device_may_wakeup(priv->device))
7888 			phylink_speed_down(priv->phylink, false);
7889 		phylink_suspend(priv->phylink, false);
7890 	}
7891 	rtnl_unlock();
7892 
7893 	if (priv->dma_cap.fpesel)
7894 		timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7895 
7896 	priv->speed = SPEED_UNKNOWN;
7897 	return 0;
7898 }
7899 EXPORT_SYMBOL_GPL(stmmac_suspend);
7900 
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7901 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7902 {
7903 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7904 
7905 	rx_q->cur_rx = 0;
7906 	rx_q->dirty_rx = 0;
7907 }
7908 
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7909 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7910 {
7911 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7912 
7913 	tx_q->cur_tx = 0;
7914 	tx_q->dirty_tx = 0;
7915 	tx_q->mss = 0;
7916 
7917 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7918 }
7919 
7920 /**
7921  * stmmac_reset_queues_param - reset queue parameters
7922  * @priv: device pointer
7923  */
stmmac_reset_queues_param(struct stmmac_priv * priv)7924 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7925 {
7926 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7927 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7928 	u32 queue;
7929 
7930 	for (queue = 0; queue < rx_cnt; queue++)
7931 		stmmac_reset_rx_queue(priv, queue);
7932 
7933 	for (queue = 0; queue < tx_cnt; queue++)
7934 		stmmac_reset_tx_queue(priv, queue);
7935 }
7936 
7937 /**
7938  * stmmac_resume - resume callback
7939  * @dev: device pointer
7940  * Description: when resume this function is invoked to setup the DMA and CORE
7941  * in a usable state.
7942  */
stmmac_resume(struct device * dev)7943 int stmmac_resume(struct device *dev)
7944 {
7945 	struct net_device *ndev = dev_get_drvdata(dev);
7946 	struct stmmac_priv *priv = netdev_priv(ndev);
7947 	int ret;
7948 
7949 	if (!netif_running(ndev))
7950 		return 0;
7951 
7952 	/* Power Down bit, into the PM register, is cleared
7953 	 * automatically as soon as a magic packet or a Wake-up frame
7954 	 * is received. Anyway, it's better to manually clear
7955 	 * this bit because it can generate problems while resuming
7956 	 * from another devices (e.g. serial console).
7957 	 */
7958 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7959 		mutex_lock(&priv->lock);
7960 		stmmac_pmt(priv, priv->hw, 0);
7961 		mutex_unlock(&priv->lock);
7962 		priv->irq_wake = 0;
7963 	} else {
7964 		pinctrl_pm_select_default_state(priv->device);
7965 		/* reset the phy so that it's ready */
7966 		if (priv->mii)
7967 			stmmac_mdio_reset(priv->mii);
7968 	}
7969 
7970 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7971 	    priv->plat->serdes_powerup) {
7972 		ret = priv->plat->serdes_powerup(ndev,
7973 						 priv->plat->bsp_priv);
7974 
7975 		if (ret < 0)
7976 			return ret;
7977 	}
7978 
7979 	rtnl_lock();
7980 	if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7981 		phylink_resume(priv->phylink);
7982 	} else {
7983 		phylink_resume(priv->phylink);
7984 		if (device_may_wakeup(priv->device))
7985 			phylink_speed_up(priv->phylink);
7986 	}
7987 	rtnl_unlock();
7988 
7989 	rtnl_lock();
7990 	mutex_lock(&priv->lock);
7991 
7992 	stmmac_reset_queues_param(priv);
7993 
7994 	stmmac_free_tx_skbufs(priv);
7995 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7996 
7997 	stmmac_hw_setup(ndev, false);
7998 	stmmac_init_coalesce(priv);
7999 	stmmac_set_rx_mode(ndev);
8000 
8001 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
8002 
8003 	stmmac_enable_all_queues(priv);
8004 	stmmac_enable_all_dma_irq(priv);
8005 
8006 	mutex_unlock(&priv->lock);
8007 	rtnl_unlock();
8008 
8009 	netif_device_attach(ndev);
8010 
8011 	return 0;
8012 }
8013 EXPORT_SYMBOL_GPL(stmmac_resume);
8014 
8015 #ifndef MODULE
stmmac_cmdline_opt(char * str)8016 static int __init stmmac_cmdline_opt(char *str)
8017 {
8018 	char *opt;
8019 
8020 	if (!str || !*str)
8021 		return 1;
8022 	while ((opt = strsep(&str, ",")) != NULL) {
8023 		if (!strncmp(opt, "debug:", 6)) {
8024 			if (kstrtoint(opt + 6, 0, &debug))
8025 				goto err;
8026 		} else if (!strncmp(opt, "phyaddr:", 8)) {
8027 			if (kstrtoint(opt + 8, 0, &phyaddr))
8028 				goto err;
8029 		} else if (!strncmp(opt, "buf_sz:", 7)) {
8030 			if (kstrtoint(opt + 7, 0, &buf_sz))
8031 				goto err;
8032 		} else if (!strncmp(opt, "tc:", 3)) {
8033 			if (kstrtoint(opt + 3, 0, &tc))
8034 				goto err;
8035 		} else if (!strncmp(opt, "watchdog:", 9)) {
8036 			if (kstrtoint(opt + 9, 0, &watchdog))
8037 				goto err;
8038 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
8039 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
8040 				goto err;
8041 		} else if (!strncmp(opt, "pause:", 6)) {
8042 			if (kstrtoint(opt + 6, 0, &pause))
8043 				goto err;
8044 		} else if (!strncmp(opt, "eee_timer:", 10)) {
8045 			if (kstrtoint(opt + 10, 0, &eee_timer))
8046 				goto err;
8047 		} else if (!strncmp(opt, "chain_mode:", 11)) {
8048 			if (kstrtoint(opt + 11, 0, &chain_mode))
8049 				goto err;
8050 		}
8051 	}
8052 	return 1;
8053 
8054 err:
8055 	pr_err("%s: ERROR broken module parameter conversion", __func__);
8056 	return 1;
8057 }
8058 
8059 __setup("stmmaceth=", stmmac_cmdline_opt);
8060 #endif /* MODULE */
8061 
stmmac_init(void)8062 static int __init stmmac_init(void)
8063 {
8064 #ifdef CONFIG_DEBUG_FS
8065 	/* Create debugfs main directory if it doesn't exist yet */
8066 	if (!stmmac_fs_dir)
8067 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8068 	register_netdevice_notifier(&stmmac_notifier);
8069 #endif
8070 
8071 	return 0;
8072 }
8073 
stmmac_exit(void)8074 static void __exit stmmac_exit(void)
8075 {
8076 #ifdef CONFIG_DEBUG_FS
8077 	unregister_netdevice_notifier(&stmmac_notifier);
8078 	debugfs_remove_recursive(stmmac_fs_dir);
8079 #endif
8080 }
8081 
8082 module_init(stmmac_init)
8083 module_exit(stmmac_exit)
8084 
8085 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8086 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8087 MODULE_LICENSE("GPL");
8088