xref: /linux/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c (revision c1ead4b4dfe0f643cfc66571ca7d2fa332eddd35)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4   ST Ethernet IPs are built around a Synopsys IP Core.
5 
6 	Copyright(C) 2007-2011 STMicroelectronics Ltd
7 
8 
9   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10 
11   Documentation available at:
12 	http://www.stlinux.com
13   Support available at:
14 	https://bugzilla.stlinux.com/
15 *******************************************************************************/
16 
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/page_pool/helpers.h>
44 #include <net/pkt_cls.h>
45 #include <net/xdp_sock_drv.h>
46 #include "stmmac_ptp.h"
47 #include "stmmac_fpe.h"
48 #include "stmmac.h"
49 #include "stmmac_xdp.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 #include "dwxgmac2.h"
54 #include "hwif.h"
55 
56 /* As long as the interface is active, we keep the timestamping counter enabled
57  * with fine resolution and binary rollover. This avoid non-monotonic behavior
58  * (clock jumps) when changing timestamping settings at runtime.
59  */
60 #define STMMAC_HWTS_ACTIVE	(PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 				 PTP_TCR_TSCTRLSSR)
62 
63 #define	STMMAC_ALIGN(x)		ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
64 #define	TSO_MAX_BUFF_SIZE	(SZ_16K - 1)
65 
66 /* Module parameters */
67 #define TX_TIMEO	5000
68 static int watchdog = TX_TIMEO;
69 module_param(watchdog, int, 0644);
70 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
71 
72 static int debug = -1;
73 module_param(debug, int, 0644);
74 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75 
76 static int phyaddr = -1;
77 module_param(phyaddr, int, 0444);
78 MODULE_PARM_DESC(phyaddr, "Physical device address");
79 
80 #define STMMAC_TX_THRESH(x)	((x)->dma_conf.dma_tx_size / 4)
81 
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX	256
84 #define STMMAC_TX_XSK_AVAIL		16
85 #define STMMAC_RX_FILL_BATCH		16
86 
87 #define STMMAC_XDP_PASS		0
88 #define STMMAC_XDP_CONSUMED	BIT(0)
89 #define STMMAC_XDP_TX		BIT(1)
90 #define STMMAC_XDP_REDIRECT	BIT(2)
91 
92 static int flow_ctrl = 0xdead;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
95 
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
99 
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104 
105 /* This is unused */
106 #define	DEFAULT_BUFSIZE	1536
107 static int buf_sz = DEFAULT_BUFSIZE;
108 module_param(buf_sz, int, 0644);
109 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
110 
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 				      NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 				      NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114 
115 #define STMMAC_DEFAULT_LPI_TIMER	1000
116 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, uint, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122  * but allow user to force to use the chain instead of the ring
123  */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127 
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 					  u32 rxmode, u32 chan);
141 
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147 
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149 
150 /**
151  * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
152  * @bsp_priv: BSP private data structure (unused)
153  * @clk_tx_i: the transmit clock
154  * @interface: the selected interface mode
155  * @speed: the speed that the MAC will be operating at
156  *
157  * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
158  * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
159  * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
160  * the plat_data->set_clk_tx_rate method directly, call it via their own
161  * implementation, or implement their own method should they have more
162  * complex requirements. It is intended to only be used in this method.
163  *
164  * plat_data->clk_tx_i must be filled in.
165  */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)166 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
167 			   phy_interface_t interface, int speed)
168 {
169 	long rate = rgmii_clock(speed);
170 
171 	/* Silently ignore unsupported speeds as rgmii_clock() only
172 	 * supports 10, 100 and 1000Mbps. We do not want to spit
173 	 * errors for 2500 and higher speeds here.
174 	 */
175 	if (rate < 0)
176 		return 0;
177 
178 	return clk_set_rate(clk_tx_i, rate);
179 }
180 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
181 
182 /**
183  * stmmac_verify_args - verify the driver parameters.
184  * Description: it checks the driver parameters and set a default in case of
185  * errors.
186  */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 	if (unlikely(watchdog < 0))
190 		watchdog = TX_TIMEO;
191 	if (unlikely((pause < 0) || (pause > 0xffff)))
192 		pause = PAUSE_TIME;
193 
194 	if (flow_ctrl != 0xdead)
195 		pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
196 }
197 
__stmmac_disable_all_queues(struct stmmac_priv * priv)198 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
199 {
200 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
201 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
202 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
203 	u32 queue;
204 
205 	for (queue = 0; queue < maxq; queue++) {
206 		struct stmmac_channel *ch = &priv->channel[queue];
207 
208 		if (stmmac_xdp_is_enabled(priv) &&
209 		    test_bit(queue, priv->af_xdp_zc_qps)) {
210 			napi_disable(&ch->rxtx_napi);
211 			continue;
212 		}
213 
214 		if (queue < rx_queues_cnt)
215 			napi_disable(&ch->rx_napi);
216 		if (queue < tx_queues_cnt)
217 			napi_disable(&ch->tx_napi);
218 	}
219 }
220 
221 /**
222  * stmmac_disable_all_queues - Disable all queues
223  * @priv: driver private structure
224  */
stmmac_disable_all_queues(struct stmmac_priv * priv)225 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
226 {
227 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
228 	struct stmmac_rx_queue *rx_q;
229 	u32 queue;
230 
231 	/* synchronize_rcu() needed for pending XDP buffers to drain */
232 	for (queue = 0; queue < rx_queues_cnt; queue++) {
233 		rx_q = &priv->dma_conf.rx_queue[queue];
234 		if (rx_q->xsk_pool) {
235 			synchronize_rcu();
236 			break;
237 		}
238 	}
239 
240 	__stmmac_disable_all_queues(priv);
241 }
242 
243 /**
244  * stmmac_enable_all_queues - Enable all queues
245  * @priv: driver private structure
246  */
stmmac_enable_all_queues(struct stmmac_priv * priv)247 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
248 {
249 	u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
250 	u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
251 	u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
252 	u32 queue;
253 
254 	for (queue = 0; queue < maxq; queue++) {
255 		struct stmmac_channel *ch = &priv->channel[queue];
256 
257 		if (stmmac_xdp_is_enabled(priv) &&
258 		    test_bit(queue, priv->af_xdp_zc_qps)) {
259 			napi_enable(&ch->rxtx_napi);
260 			continue;
261 		}
262 
263 		if (queue < rx_queues_cnt)
264 			napi_enable(&ch->rx_napi);
265 		if (queue < tx_queues_cnt)
266 			napi_enable(&ch->tx_napi);
267 	}
268 }
269 
stmmac_service_event_schedule(struct stmmac_priv * priv)270 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
271 {
272 	if (!test_bit(STMMAC_DOWN, &priv->state) &&
273 	    !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
274 		queue_work(priv->wq, &priv->service_task);
275 }
276 
stmmac_global_err(struct stmmac_priv * priv)277 static void stmmac_global_err(struct stmmac_priv *priv)
278 {
279 	netif_carrier_off(priv->dev);
280 	set_bit(STMMAC_RESET_REQUESTED, &priv->state);
281 	stmmac_service_event_schedule(priv);
282 }
283 
print_pkt(unsigned char * buf,int len)284 static void print_pkt(unsigned char *buf, int len)
285 {
286 	pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
287 	print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
288 }
289 
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)290 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
291 {
292 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
293 	u32 avail;
294 
295 	if (tx_q->dirty_tx > tx_q->cur_tx)
296 		avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
297 	else
298 		avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
299 
300 	return avail;
301 }
302 
303 /**
304  * stmmac_rx_dirty - Get RX queue dirty
305  * @priv: driver private structure
306  * @queue: RX queue index
307  */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)308 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
309 {
310 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
311 	u32 dirty;
312 
313 	if (rx_q->dirty_rx <= rx_q->cur_rx)
314 		dirty = rx_q->cur_rx - rx_q->dirty_rx;
315 	else
316 		dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
317 
318 	return dirty;
319 }
320 
stmmac_eee_tx_busy(struct stmmac_priv * priv)321 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
322 {
323 	u32 tx_cnt = priv->plat->tx_queues_to_use;
324 	u32 queue;
325 
326 	/* check if all TX queues have the work finished */
327 	for (queue = 0; queue < tx_cnt; queue++) {
328 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
329 
330 		if (tx_q->dirty_tx != tx_q->cur_tx)
331 			return true; /* still unfinished work */
332 	}
333 
334 	return false;
335 }
336 
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)337 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
338 {
339 	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
340 }
341 
342 /**
343  * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
344  * @priv: driver private structure
345  * Description: this function is to verify and enter in LPI mode in case of
346  * EEE.
347  */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)348 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
349 {
350 	if (stmmac_eee_tx_busy(priv)) {
351 		stmmac_restart_sw_lpi_timer(priv);
352 		return;
353 	}
354 
355 	/* Check and enter in LPI mode */
356 	if (!priv->tx_path_in_lpi_mode)
357 		stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
358 				    priv->tx_lpi_clk_stop, 0);
359 }
360 
361 /**
362  * stmmac_stop_sw_lpi - stop transmitting LPI
363  * @priv: driver private structure
364  * Description: When using software-controlled LPI, stop transmitting LPI state.
365  */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)366 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
367 {
368 	timer_delete_sync(&priv->eee_ctrl_timer);
369 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
370 	priv->tx_path_in_lpi_mode = false;
371 }
372 
373 /**
374  * stmmac_eee_ctrl_timer - EEE TX SW timer.
375  * @t:  timer_list struct containing private info
376  * Description:
377  *  if there is no data transfer and if we are not in LPI state,
378  *  then MAC Transmitter can be moved to LPI state.
379  */
stmmac_eee_ctrl_timer(struct timer_list * t)380 static void stmmac_eee_ctrl_timer(struct timer_list *t)
381 {
382 	struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
383 
384 	stmmac_try_to_start_sw_lpi(priv);
385 }
386 
387 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
388  * @priv: driver private structure
389  * @p : descriptor pointer
390  * @skb : the socket buffer
391  * Description :
392  * This function will read timestamp from the descriptor & pass it to stack.
393  * and also perform some sanity checks.
394  */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)395 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
396 				   struct dma_desc *p, struct sk_buff *skb)
397 {
398 	struct skb_shared_hwtstamps shhwtstamp;
399 	bool found = false;
400 	u64 ns = 0;
401 
402 	if (!priv->hwts_tx_en)
403 		return;
404 
405 	/* exit if skb doesn't support hw tstamp */
406 	if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
407 		return;
408 
409 	/* check tx tstamp status */
410 	if (stmmac_get_tx_timestamp_status(priv, p)) {
411 		stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
412 		found = true;
413 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
414 		found = true;
415 	}
416 
417 	if (found) {
418 		ns -= priv->plat->cdc_error_adj;
419 
420 		memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
421 		shhwtstamp.hwtstamp = ns_to_ktime(ns);
422 
423 		netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
424 		/* pass tstamp to stack */
425 		skb_tstamp_tx(skb, &shhwtstamp);
426 	}
427 }
428 
429 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
430  * @priv: driver private structure
431  * @p : descriptor pointer
432  * @np : next descriptor pointer
433  * @skb : the socket buffer
434  * Description :
435  * This function will read received packet's timestamp from the descriptor
436  * and pass it to stack. It also perform some sanity checks.
437  */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)438 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
439 				   struct dma_desc *np, struct sk_buff *skb)
440 {
441 	struct skb_shared_hwtstamps *shhwtstamp = NULL;
442 	struct dma_desc *desc = p;
443 	u64 ns = 0;
444 
445 	if (!priv->hwts_rx_en)
446 		return;
447 	/* For GMAC4, the valid timestamp is from CTX next desc. */
448 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
449 		desc = np;
450 
451 	/* Check if timestamp is available */
452 	if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
453 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
454 
455 		ns -= priv->plat->cdc_error_adj;
456 
457 		netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
458 		shhwtstamp = skb_hwtstamps(skb);
459 		memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
460 		shhwtstamp->hwtstamp = ns_to_ktime(ns);
461 	} else  {
462 		netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
463 	}
464 }
465 
466 /**
467  *  stmmac_hwtstamp_set - control hardware timestamping.
468  *  @dev: device pointer.
469  *  @config: the timestamping configuration.
470  *  @extack: netlink extended ack structure for error reporting.
471  *  Description:
472  *  This function configures the MAC to enable/disable both outgoing(TX)
473  *  and incoming(RX) packets time stamping based on user input.
474  *  Return Value:
475  *  0 on success and an appropriate -ve integer on failure.
476  */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)477 static int stmmac_hwtstamp_set(struct net_device *dev,
478 			       struct kernel_hwtstamp_config *config,
479 			       struct netlink_ext_ack *extack)
480 {
481 	struct stmmac_priv *priv = netdev_priv(dev);
482 	u32 ptp_v2 = 0;
483 	u32 tstamp_all = 0;
484 	u32 ptp_over_ipv4_udp = 0;
485 	u32 ptp_over_ipv6_udp = 0;
486 	u32 ptp_over_ethernet = 0;
487 	u32 snap_type_sel = 0;
488 	u32 ts_master_en = 0;
489 	u32 ts_event_en = 0;
490 
491 	if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
492 		NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
493 		priv->hwts_tx_en = 0;
494 		priv->hwts_rx_en = 0;
495 
496 		return -EOPNOTSUPP;
497 	}
498 
499 	if (!netif_running(dev)) {
500 		NL_SET_ERR_MSG_MOD(extack,
501 				   "Cannot change timestamping configuration while down");
502 		return -ENODEV;
503 	}
504 
505 	netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
506 		   __func__, config->flags, config->tx_type, config->rx_filter);
507 
508 	if (config->tx_type != HWTSTAMP_TX_OFF &&
509 	    config->tx_type != HWTSTAMP_TX_ON)
510 		return -ERANGE;
511 
512 	if (priv->adv_ts) {
513 		switch (config->rx_filter) {
514 		case HWTSTAMP_FILTER_NONE:
515 			/* time stamp no incoming packet at all */
516 			config->rx_filter = HWTSTAMP_FILTER_NONE;
517 			break;
518 
519 		case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
520 			/* PTP v1, UDP, any kind of event packet */
521 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
522 			/* 'xmac' hardware can support Sync, Pdelay_Req and
523 			 * Pdelay_resp by setting bit14 and bits17/16 to 01
524 			 * This leaves Delay_Req timestamps out.
525 			 * Enable all events *and* general purpose message
526 			 * timestamping
527 			 */
528 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
529 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
530 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
531 			break;
532 
533 		case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
534 			/* PTP v1, UDP, Sync packet */
535 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
536 			/* take time stamp for SYNC messages only */
537 			ts_event_en = PTP_TCR_TSEVNTENA;
538 
539 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
540 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
541 			break;
542 
543 		case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
544 			/* PTP v1, UDP, Delay_req packet */
545 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
546 			/* take time stamp for Delay_Req messages only */
547 			ts_master_en = PTP_TCR_TSMSTRENA;
548 			ts_event_en = PTP_TCR_TSEVNTENA;
549 
550 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
551 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
552 			break;
553 
554 		case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
555 			/* PTP v2, UDP, any kind of event packet */
556 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
557 			ptp_v2 = PTP_TCR_TSVER2ENA;
558 			/* take time stamp for all event messages */
559 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
560 
561 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
562 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
563 			break;
564 
565 		case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
566 			/* PTP v2, UDP, Sync packet */
567 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
568 			ptp_v2 = PTP_TCR_TSVER2ENA;
569 			/* take time stamp for SYNC messages only */
570 			ts_event_en = PTP_TCR_TSEVNTENA;
571 
572 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574 			break;
575 
576 		case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
577 			/* PTP v2, UDP, Delay_req packet */
578 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
579 			ptp_v2 = PTP_TCR_TSVER2ENA;
580 			/* take time stamp for Delay_Req messages only */
581 			ts_master_en = PTP_TCR_TSMSTRENA;
582 			ts_event_en = PTP_TCR_TSEVNTENA;
583 
584 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586 			break;
587 
588 		case HWTSTAMP_FILTER_PTP_V2_EVENT:
589 			/* PTP v2/802.AS1 any layer, any kind of event packet */
590 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
591 			ptp_v2 = PTP_TCR_TSVER2ENA;
592 			snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
593 			if (priv->synopsys_id < DWMAC_CORE_4_10)
594 				ts_event_en = PTP_TCR_TSEVNTENA;
595 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
596 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
597 			ptp_over_ethernet = PTP_TCR_TSIPENA;
598 			break;
599 
600 		case HWTSTAMP_FILTER_PTP_V2_SYNC:
601 			/* PTP v2/802.AS1, any layer, Sync packet */
602 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
603 			ptp_v2 = PTP_TCR_TSVER2ENA;
604 			/* take time stamp for SYNC messages only */
605 			ts_event_en = PTP_TCR_TSEVNTENA;
606 
607 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
608 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
609 			ptp_over_ethernet = PTP_TCR_TSIPENA;
610 			break;
611 
612 		case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
613 			/* PTP v2/802.AS1, any layer, Delay_req packet */
614 			config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
615 			ptp_v2 = PTP_TCR_TSVER2ENA;
616 			/* take time stamp for Delay_Req messages only */
617 			ts_master_en = PTP_TCR_TSMSTRENA;
618 			ts_event_en = PTP_TCR_TSEVNTENA;
619 
620 			ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
621 			ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
622 			ptp_over_ethernet = PTP_TCR_TSIPENA;
623 			break;
624 
625 		case HWTSTAMP_FILTER_NTP_ALL:
626 		case HWTSTAMP_FILTER_ALL:
627 			/* time stamp any incoming packet */
628 			config->rx_filter = HWTSTAMP_FILTER_ALL;
629 			tstamp_all = PTP_TCR_TSENALL;
630 			break;
631 
632 		default:
633 			return -ERANGE;
634 		}
635 	} else {
636 		switch (config->rx_filter) {
637 		case HWTSTAMP_FILTER_NONE:
638 			config->rx_filter = HWTSTAMP_FILTER_NONE;
639 			break;
640 		default:
641 			/* PTP v1, UDP, any kind of event packet */
642 			config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
643 			break;
644 		}
645 	}
646 	priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
647 	priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
648 
649 	priv->systime_flags = STMMAC_HWTS_ACTIVE;
650 
651 	if (priv->hwts_tx_en || priv->hwts_rx_en) {
652 		priv->systime_flags |= tstamp_all | ptp_v2 |
653 				       ptp_over_ethernet | ptp_over_ipv6_udp |
654 				       ptp_over_ipv4_udp | ts_event_en |
655 				       ts_master_en | snap_type_sel;
656 	}
657 
658 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
659 
660 	priv->tstamp_config = *config;
661 
662 	return 0;
663 }
664 
665 /**
666  *  stmmac_hwtstamp_get - read hardware timestamping.
667  *  @dev: device pointer.
668  *  @config: the timestamping configuration.
669  *  Description:
670  *  This function obtain the current hardware timestamping settings
671  *  as requested.
672  */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)673 static int stmmac_hwtstamp_get(struct net_device *dev,
674 			       struct kernel_hwtstamp_config *config)
675 {
676 	struct stmmac_priv *priv = netdev_priv(dev);
677 
678 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
679 		return -EOPNOTSUPP;
680 
681 	*config = priv->tstamp_config;
682 
683 	return 0;
684 }
685 
686 /**
687  * stmmac_init_tstamp_counter - init hardware timestamping counter
688  * @priv: driver private structure
689  * @systime_flags: timestamping flags
690  * Description:
691  * Initialize hardware counter for packet timestamping.
692  * This is valid as long as the interface is open and not suspended.
693  * Will be rerun after resuming from suspend, case in which the timestamping
694  * flags updated by stmmac_hwtstamp_set() also need to be restored.
695  */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)696 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
697 				      u32 systime_flags)
698 {
699 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
700 	struct timespec64 now;
701 	u32 sec_inc = 0;
702 	u64 temp = 0;
703 
704 	if (!priv->plat->clk_ptp_rate) {
705 		netdev_err(priv->dev, "Invalid PTP clock rate");
706 		return -EINVAL;
707 	}
708 
709 	stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
710 	priv->systime_flags = systime_flags;
711 
712 	/* program Sub Second Increment reg */
713 	stmmac_config_sub_second_increment(priv, priv->ptpaddr,
714 					   priv->plat->clk_ptp_rate,
715 					   xmac, &sec_inc);
716 	temp = div_u64(1000000000ULL, sec_inc);
717 
718 	/* Store sub second increment for later use */
719 	priv->sub_second_inc = sec_inc;
720 
721 	/* calculate default added value:
722 	 * formula is :
723 	 * addend = (2^32)/freq_div_ratio;
724 	 * where, freq_div_ratio = 1e9ns/sec_inc
725 	 */
726 	temp = (u64)(temp << 32);
727 	priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
728 	stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
729 
730 	/* initialize system time */
731 	ktime_get_real_ts64(&now);
732 
733 	/* lower 32 bits of tv_sec are safe until y2106 */
734 	stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
735 
736 	return 0;
737 }
738 
739 /**
740  * stmmac_init_timestamping - initialise timestamping
741  * @priv: driver private structure
742  * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
743  * This is done by looking at the HW cap. register.
744  * This function also registers the ptp driver.
745  */
stmmac_init_timestamping(struct stmmac_priv * priv)746 static int stmmac_init_timestamping(struct stmmac_priv *priv)
747 {
748 	bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
749 	int ret;
750 
751 	if (priv->plat->ptp_clk_freq_config)
752 		priv->plat->ptp_clk_freq_config(priv);
753 
754 	if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
755 		netdev_info(priv->dev, "PTP not supported by HW\n");
756 		return -EOPNOTSUPP;
757 	}
758 
759 	ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
760 	if (ret) {
761 		netdev_warn(priv->dev, "PTP init failed\n");
762 		return ret;
763 	}
764 
765 	priv->adv_ts = 0;
766 	/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
767 	if (xmac && priv->dma_cap.atime_stamp)
768 		priv->adv_ts = 1;
769 	/* Dwmac 3.x core with extend_desc can support adv_ts */
770 	else if (priv->extend_desc && priv->dma_cap.atime_stamp)
771 		priv->adv_ts = 1;
772 
773 	if (priv->dma_cap.time_stamp)
774 		netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
775 
776 	if (priv->adv_ts)
777 		netdev_info(priv->dev,
778 			    "IEEE 1588-2008 Advanced Timestamp supported\n");
779 
780 	priv->hwts_tx_en = 0;
781 	priv->hwts_rx_en = 0;
782 
783 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
784 		stmmac_hwtstamp_correct_latency(priv, priv);
785 
786 	return 0;
787 }
788 
stmmac_setup_ptp(struct stmmac_priv * priv)789 static void stmmac_setup_ptp(struct stmmac_priv *priv)
790 {
791 	int ret;
792 
793 	ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
794 	if (ret < 0)
795 		netdev_warn(priv->dev,
796 			    "failed to enable PTP reference clock: %pe\n",
797 			    ERR_PTR(ret));
798 
799 	if (stmmac_init_timestamping(priv) == 0)
800 		stmmac_ptp_register(priv);
801 }
802 
stmmac_release_ptp(struct stmmac_priv * priv)803 static void stmmac_release_ptp(struct stmmac_priv *priv)
804 {
805 	stmmac_ptp_unregister(priv);
806 	clk_disable_unprepare(priv->plat->clk_ptp_ref);
807 }
808 
809 /**
810  *  stmmac_mac_flow_ctrl - Configure flow control in all queues
811  *  @priv: driver private structure
812  *  @duplex: duplex passed to the next function
813  *  @flow_ctrl: desired flow control modes
814  *  Description: It is used for configuring the flow control in all queues
815  */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)816 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
817 				 unsigned int flow_ctrl)
818 {
819 	u32 tx_cnt = priv->plat->tx_queues_to_use;
820 
821 	stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
822 			 tx_cnt);
823 }
824 
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)825 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
826 					 phy_interface_t interface)
827 {
828 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
829 
830 	/* Refresh the MAC-specific capabilities */
831 	stmmac_mac_update_caps(priv);
832 
833 	config->mac_capabilities = priv->hw->link.caps;
834 
835 	if (priv->plat->max_speed)
836 		phylink_limit_mac_speed(config, priv->plat->max_speed);
837 
838 	return config->mac_capabilities;
839 }
840 
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)841 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
842 						 phy_interface_t interface)
843 {
844 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
845 	struct phylink_pcs *pcs;
846 
847 	if (priv->plat->select_pcs) {
848 		pcs = priv->plat->select_pcs(priv, interface);
849 		if (!IS_ERR(pcs))
850 			return pcs;
851 	}
852 
853 	return NULL;
854 }
855 
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)856 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
857 			      const struct phylink_link_state *state)
858 {
859 	/* Nothing to do, xpcs_config() handles everything */
860 }
861 
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)862 static void stmmac_mac_link_down(struct phylink_config *config,
863 				 unsigned int mode, phy_interface_t interface)
864 {
865 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
866 
867 	stmmac_mac_set(priv, priv->ioaddr, false);
868 	if (priv->dma_cap.eee)
869 		stmmac_set_eee_pls(priv, priv->hw, false);
870 
871 	if (stmmac_fpe_supported(priv))
872 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
873 }
874 
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)875 static void stmmac_mac_link_up(struct phylink_config *config,
876 			       struct phy_device *phy,
877 			       unsigned int mode, phy_interface_t interface,
878 			       int speed, int duplex,
879 			       bool tx_pause, bool rx_pause)
880 {
881 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
882 	unsigned int flow_ctrl;
883 	u32 old_ctrl, ctrl;
884 	int ret;
885 
886 	if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
887 	    priv->plat->serdes_powerup)
888 		priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
889 
890 	old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
891 	ctrl = old_ctrl & ~priv->hw->link.speed_mask;
892 
893 	if (interface == PHY_INTERFACE_MODE_USXGMII) {
894 		switch (speed) {
895 		case SPEED_10000:
896 			ctrl |= priv->hw->link.xgmii.speed10000;
897 			break;
898 		case SPEED_5000:
899 			ctrl |= priv->hw->link.xgmii.speed5000;
900 			break;
901 		case SPEED_2500:
902 			ctrl |= priv->hw->link.xgmii.speed2500;
903 			break;
904 		default:
905 			return;
906 		}
907 	} else if (interface == PHY_INTERFACE_MODE_XLGMII) {
908 		switch (speed) {
909 		case SPEED_100000:
910 			ctrl |= priv->hw->link.xlgmii.speed100000;
911 			break;
912 		case SPEED_50000:
913 			ctrl |= priv->hw->link.xlgmii.speed50000;
914 			break;
915 		case SPEED_40000:
916 			ctrl |= priv->hw->link.xlgmii.speed40000;
917 			break;
918 		case SPEED_25000:
919 			ctrl |= priv->hw->link.xlgmii.speed25000;
920 			break;
921 		case SPEED_10000:
922 			ctrl |= priv->hw->link.xgmii.speed10000;
923 			break;
924 		case SPEED_2500:
925 			ctrl |= priv->hw->link.speed2500;
926 			break;
927 		case SPEED_1000:
928 			ctrl |= priv->hw->link.speed1000;
929 			break;
930 		default:
931 			return;
932 		}
933 	} else {
934 		switch (speed) {
935 		case SPEED_2500:
936 			ctrl |= priv->hw->link.speed2500;
937 			break;
938 		case SPEED_1000:
939 			ctrl |= priv->hw->link.speed1000;
940 			break;
941 		case SPEED_100:
942 			ctrl |= priv->hw->link.speed100;
943 			break;
944 		case SPEED_10:
945 			ctrl |= priv->hw->link.speed10;
946 			break;
947 		default:
948 			return;
949 		}
950 	}
951 
952 	if (priv->plat->fix_mac_speed)
953 		priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
954 
955 	if (!duplex)
956 		ctrl &= ~priv->hw->link.duplex;
957 	else
958 		ctrl |= priv->hw->link.duplex;
959 
960 	/* Flow Control operation */
961 	if (rx_pause && tx_pause)
962 		flow_ctrl = FLOW_AUTO;
963 	else if (rx_pause && !tx_pause)
964 		flow_ctrl = FLOW_RX;
965 	else if (!rx_pause && tx_pause)
966 		flow_ctrl = FLOW_TX;
967 	else
968 		flow_ctrl = FLOW_OFF;
969 
970 	stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
971 
972 	if (ctrl != old_ctrl)
973 		writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
974 
975 	if (priv->plat->set_clk_tx_rate) {
976 		ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
977 						priv->plat->clk_tx_i,
978 						interface, speed);
979 		if (ret < 0)
980 			netdev_err(priv->dev,
981 				   "failed to configure %s transmit clock for %dMbps: %pe\n",
982 				   phy_modes(interface), speed, ERR_PTR(ret));
983 	}
984 
985 	stmmac_mac_set(priv, priv->ioaddr, true);
986 	if (priv->dma_cap.eee)
987 		stmmac_set_eee_pls(priv, priv->hw, true);
988 
989 	if (stmmac_fpe_supported(priv))
990 		ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
991 
992 	if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
993 		stmmac_hwtstamp_correct_latency(priv, priv);
994 }
995 
stmmac_mac_disable_tx_lpi(struct phylink_config * config)996 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
997 {
998 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
999 
1000 	priv->eee_active = false;
1001 
1002 	mutex_lock(&priv->lock);
1003 
1004 	priv->eee_enabled = false;
1005 
1006 	netdev_dbg(priv->dev, "disable EEE\n");
1007 	priv->eee_sw_timer_en = false;
1008 	timer_delete_sync(&priv->eee_ctrl_timer);
1009 	stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1010 	priv->tx_path_in_lpi_mode = false;
1011 
1012 	stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1013 	mutex_unlock(&priv->lock);
1014 }
1015 
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1016 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1017 				    bool tx_clk_stop)
1018 {
1019 	struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1020 	int ret;
1021 
1022 	priv->tx_lpi_timer = timer;
1023 	priv->eee_active = true;
1024 
1025 	mutex_lock(&priv->lock);
1026 
1027 	priv->eee_enabled = true;
1028 
1029 	/* Update the transmit clock stop according to PHY capability if
1030 	 * the platform allows
1031 	 */
1032 	if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1033 		priv->tx_lpi_clk_stop = tx_clk_stop;
1034 
1035 	stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1036 			     STMMAC_DEFAULT_TWT_LS);
1037 
1038 	/* Try to cnfigure the hardware timer. */
1039 	ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1040 				  priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1041 
1042 	if (ret) {
1043 		/* Hardware timer mode not supported, or value out of range.
1044 		 * Fall back to using software LPI mode
1045 		 */
1046 		priv->eee_sw_timer_en = true;
1047 		stmmac_restart_sw_lpi_timer(priv);
1048 	}
1049 
1050 	mutex_unlock(&priv->lock);
1051 	netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1052 
1053 	return 0;
1054 }
1055 
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1056 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1057 			     phy_interface_t interface)
1058 {
1059 	struct net_device *ndev = to_net_dev(config->dev);
1060 	struct stmmac_priv *priv = netdev_priv(ndev);
1061 
1062 	if (priv->plat->mac_finish)
1063 		priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1064 
1065 	return 0;
1066 }
1067 
1068 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1069 	.mac_get_caps = stmmac_mac_get_caps,
1070 	.mac_select_pcs = stmmac_mac_select_pcs,
1071 	.mac_config = stmmac_mac_config,
1072 	.mac_link_down = stmmac_mac_link_down,
1073 	.mac_link_up = stmmac_mac_link_up,
1074 	.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1075 	.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1076 	.mac_finish = stmmac_mac_finish,
1077 };
1078 
1079 /**
1080  * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1081  * @priv: driver private structure
1082  * Description: this is to verify if the HW supports the PCS.
1083  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1084  * configured for the TBI, RTBI, or SGMII PHY interface.
1085  */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1086 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1087 {
1088 	int interface = priv->plat->phy_interface;
1089 
1090 	if (priv->dma_cap.pcs) {
1091 		if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1092 		    (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1093 		    (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1094 		    (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1095 			netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1096 			priv->hw->pcs = STMMAC_PCS_RGMII;
1097 		} else if (interface == PHY_INTERFACE_MODE_SGMII) {
1098 			netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1099 			priv->hw->pcs = STMMAC_PCS_SGMII;
1100 		}
1101 	}
1102 }
1103 
1104 /**
1105  * stmmac_init_phy - PHY initialization
1106  * @dev: net device structure
1107  * Description: it initializes the driver's PHY state, and attaches the PHY
1108  * to the mac driver.
1109  *  Return value:
1110  *  0 on success
1111  */
stmmac_init_phy(struct net_device * dev)1112 static int stmmac_init_phy(struct net_device *dev)
1113 {
1114 	struct stmmac_priv *priv = netdev_priv(dev);
1115 	int mode = priv->plat->phy_interface;
1116 	struct fwnode_handle *phy_fwnode;
1117 	struct fwnode_handle *fwnode;
1118 	struct ethtool_keee eee;
1119 	int ret;
1120 
1121 	if (!phylink_expects_phy(priv->phylink))
1122 		return 0;
1123 
1124 	if (priv->hw->xpcs &&
1125 	    xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1126 		return 0;
1127 
1128 	fwnode = priv->plat->port_node;
1129 	if (!fwnode)
1130 		fwnode = dev_fwnode(priv->device);
1131 
1132 	if (fwnode)
1133 		phy_fwnode = fwnode_get_phy_node(fwnode);
1134 	else
1135 		phy_fwnode = NULL;
1136 
1137 	/* Some DT bindings do not set-up the PHY handle. Let's try to
1138 	 * manually parse it
1139 	 */
1140 	if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1141 		int addr = priv->plat->phy_addr;
1142 		struct phy_device *phydev;
1143 
1144 		if (addr < 0) {
1145 			netdev_err(priv->dev, "no phy found\n");
1146 			return -ENODEV;
1147 		}
1148 
1149 		phydev = mdiobus_get_phy(priv->mii, addr);
1150 		if (!phydev) {
1151 			netdev_err(priv->dev, "no phy at addr %d\n", addr);
1152 			return -ENODEV;
1153 		}
1154 
1155 		ret = phylink_connect_phy(priv->phylink, phydev);
1156 	} else {
1157 		fwnode_handle_put(phy_fwnode);
1158 		ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1159 	}
1160 
1161 	if (ret) {
1162 		netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1163 			   ERR_PTR(ret));
1164 		return ret;
1165 	}
1166 
1167 	/* Configure phylib's copy of the LPI timer. Normally,
1168 	 * phylink_config.lpi_timer_default would do this, but there is a
1169 	 * chance that userspace could change the eee_timer setting via sysfs
1170 	 * before the first open. Thus, preserve existing behaviour.
1171 	 */
1172 	if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1173 		eee.tx_lpi_timer = priv->tx_lpi_timer;
1174 		phylink_ethtool_set_eee(priv->phylink, &eee);
1175 	}
1176 
1177 	if (!priv->plat->pmt) {
1178 		struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1179 
1180 		phylink_ethtool_get_wol(priv->phylink, &wol);
1181 		device_set_wakeup_capable(priv->device, !!wol.supported);
1182 		device_set_wakeup_enable(priv->device, !!wol.wolopts);
1183 	}
1184 
1185 	return 0;
1186 }
1187 
stmmac_phy_setup(struct stmmac_priv * priv)1188 static int stmmac_phy_setup(struct stmmac_priv *priv)
1189 {
1190 	struct stmmac_mdio_bus_data *mdio_bus_data;
1191 	struct phylink_config *config;
1192 	struct fwnode_handle *fwnode;
1193 	struct phylink_pcs *pcs;
1194 	struct phylink *phylink;
1195 
1196 	config = &priv->phylink_config;
1197 
1198 	config->dev = &priv->dev->dev;
1199 	config->type = PHYLINK_NETDEV;
1200 	config->mac_managed_pm = true;
1201 
1202 	/* Stmmac always requires an RX clock for hardware initialization */
1203 	config->mac_requires_rxc = true;
1204 
1205 	if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1206 		config->eee_rx_clk_stop_enable = true;
1207 
1208 	/* Set the default transmit clock stop bit based on the platform glue */
1209 	priv->tx_lpi_clk_stop = priv->plat->flags &
1210 				STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1211 
1212 	mdio_bus_data = priv->plat->mdio_bus_data;
1213 	if (mdio_bus_data)
1214 		config->default_an_inband = mdio_bus_data->default_an_inband;
1215 
1216 	/* Get the PHY interface modes (at the PHY end of the link) that
1217 	 * are supported by the platform.
1218 	 */
1219 	if (priv->plat->get_interfaces)
1220 		priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1221 					   config->supported_interfaces);
1222 
1223 	/* Set the platform/firmware specified interface mode if the
1224 	 * supported interfaces have not already been provided using
1225 	 * phy_interface as a last resort.
1226 	 */
1227 	if (phy_interface_empty(config->supported_interfaces))
1228 		__set_bit(priv->plat->phy_interface,
1229 			  config->supported_interfaces);
1230 
1231 	/* If we have an xpcs, it defines which PHY interfaces are supported. */
1232 	if (priv->hw->xpcs)
1233 		pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1234 	else
1235 		pcs = priv->hw->phylink_pcs;
1236 
1237 	if (pcs)
1238 		phy_interface_or(config->supported_interfaces,
1239 				 config->supported_interfaces,
1240 				 pcs->supported_interfaces);
1241 
1242 	if (priv->dma_cap.eee) {
1243 		/* Assume all supported interfaces also support LPI */
1244 		memcpy(config->lpi_interfaces, config->supported_interfaces,
1245 		       sizeof(config->lpi_interfaces));
1246 
1247 		/* All full duplex speeds above 100Mbps are supported */
1248 		config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1249 		config->lpi_timer_default = eee_timer * 1000;
1250 		config->eee_enabled_default = true;
1251 	}
1252 
1253 	fwnode = priv->plat->port_node;
1254 	if (!fwnode)
1255 		fwnode = dev_fwnode(priv->device);
1256 
1257 	phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1258 				 &stmmac_phylink_mac_ops);
1259 	if (IS_ERR(phylink))
1260 		return PTR_ERR(phylink);
1261 
1262 	priv->phylink = phylink;
1263 	return 0;
1264 }
1265 
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1266 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1267 				    struct stmmac_dma_conf *dma_conf)
1268 {
1269 	u32 rx_cnt = priv->plat->rx_queues_to_use;
1270 	unsigned int desc_size;
1271 	void *head_rx;
1272 	u32 queue;
1273 
1274 	/* Display RX rings */
1275 	for (queue = 0; queue < rx_cnt; queue++) {
1276 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1277 
1278 		pr_info("\tRX Queue %u rings\n", queue);
1279 
1280 		if (priv->extend_desc) {
1281 			head_rx = (void *)rx_q->dma_erx;
1282 			desc_size = sizeof(struct dma_extended_desc);
1283 		} else {
1284 			head_rx = (void *)rx_q->dma_rx;
1285 			desc_size = sizeof(struct dma_desc);
1286 		}
1287 
1288 		/* Display RX ring */
1289 		stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1290 				    rx_q->dma_rx_phy, desc_size);
1291 	}
1292 }
1293 
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1294 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1295 				    struct stmmac_dma_conf *dma_conf)
1296 {
1297 	u32 tx_cnt = priv->plat->tx_queues_to_use;
1298 	unsigned int desc_size;
1299 	void *head_tx;
1300 	u32 queue;
1301 
1302 	/* Display TX rings */
1303 	for (queue = 0; queue < tx_cnt; queue++) {
1304 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1305 
1306 		pr_info("\tTX Queue %d rings\n", queue);
1307 
1308 		if (priv->extend_desc) {
1309 			head_tx = (void *)tx_q->dma_etx;
1310 			desc_size = sizeof(struct dma_extended_desc);
1311 		} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1312 			head_tx = (void *)tx_q->dma_entx;
1313 			desc_size = sizeof(struct dma_edesc);
1314 		} else {
1315 			head_tx = (void *)tx_q->dma_tx;
1316 			desc_size = sizeof(struct dma_desc);
1317 		}
1318 
1319 		stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1320 				    tx_q->dma_tx_phy, desc_size);
1321 	}
1322 }
1323 
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1324 static void stmmac_display_rings(struct stmmac_priv *priv,
1325 				 struct stmmac_dma_conf *dma_conf)
1326 {
1327 	/* Display RX ring */
1328 	stmmac_display_rx_rings(priv, dma_conf);
1329 
1330 	/* Display TX ring */
1331 	stmmac_display_tx_rings(priv, dma_conf);
1332 }
1333 
stmmac_rx_offset(struct stmmac_priv * priv)1334 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1335 {
1336 	if (stmmac_xdp_is_enabled(priv))
1337 		return XDP_PACKET_HEADROOM;
1338 
1339 	return NET_SKB_PAD;
1340 }
1341 
stmmac_set_bfsize(int mtu,int bufsize)1342 static int stmmac_set_bfsize(int mtu, int bufsize)
1343 {
1344 	int ret = bufsize;
1345 
1346 	if (mtu >= BUF_SIZE_8KiB)
1347 		ret = BUF_SIZE_16KiB;
1348 	else if (mtu >= BUF_SIZE_4KiB)
1349 		ret = BUF_SIZE_8KiB;
1350 	else if (mtu >= BUF_SIZE_2KiB)
1351 		ret = BUF_SIZE_4KiB;
1352 	else if (mtu > DEFAULT_BUFSIZE)
1353 		ret = BUF_SIZE_2KiB;
1354 	else
1355 		ret = DEFAULT_BUFSIZE;
1356 
1357 	return ret;
1358 }
1359 
1360 /**
1361  * stmmac_clear_rx_descriptors - clear RX descriptors
1362  * @priv: driver private structure
1363  * @dma_conf: structure to take the dma data
1364  * @queue: RX queue index
1365  * Description: this function is called to clear the RX descriptors
1366  * in case of both basic and extended descriptors are used.
1367  */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1368 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1369 					struct stmmac_dma_conf *dma_conf,
1370 					u32 queue)
1371 {
1372 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1373 	int i;
1374 
1375 	/* Clear the RX descriptors */
1376 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1377 		if (priv->extend_desc)
1378 			stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1379 					priv->use_riwt, priv->mode,
1380 					(i == dma_conf->dma_rx_size - 1),
1381 					dma_conf->dma_buf_sz);
1382 		else
1383 			stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1384 					priv->use_riwt, priv->mode,
1385 					(i == dma_conf->dma_rx_size - 1),
1386 					dma_conf->dma_buf_sz);
1387 }
1388 
1389 /**
1390  * stmmac_clear_tx_descriptors - clear tx descriptors
1391  * @priv: driver private structure
1392  * @dma_conf: structure to take the dma data
1393  * @queue: TX queue index.
1394  * Description: this function is called to clear the TX descriptors
1395  * in case of both basic and extended descriptors are used.
1396  */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1397 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1398 					struct stmmac_dma_conf *dma_conf,
1399 					u32 queue)
1400 {
1401 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1402 	int i;
1403 
1404 	/* Clear the TX descriptors */
1405 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1406 		int last = (i == (dma_conf->dma_tx_size - 1));
1407 		struct dma_desc *p;
1408 
1409 		if (priv->extend_desc)
1410 			p = &tx_q->dma_etx[i].basic;
1411 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1412 			p = &tx_q->dma_entx[i].basic;
1413 		else
1414 			p = &tx_q->dma_tx[i];
1415 
1416 		stmmac_init_tx_desc(priv, p, priv->mode, last);
1417 	}
1418 }
1419 
1420 /**
1421  * stmmac_clear_descriptors - clear descriptors
1422  * @priv: driver private structure
1423  * @dma_conf: structure to take the dma data
1424  * Description: this function is called to clear the TX and RX descriptors
1425  * in case of both basic and extended descriptors are used.
1426  */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1427 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1428 				     struct stmmac_dma_conf *dma_conf)
1429 {
1430 	u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1431 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1432 	u32 queue;
1433 
1434 	/* Clear the RX descriptors */
1435 	for (queue = 0; queue < rx_queue_cnt; queue++)
1436 		stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1437 
1438 	/* Clear the TX descriptors */
1439 	for (queue = 0; queue < tx_queue_cnt; queue++)
1440 		stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1441 }
1442 
1443 /**
1444  * stmmac_init_rx_buffers - init the RX descriptor buffer.
1445  * @priv: driver private structure
1446  * @dma_conf: structure to take the dma data
1447  * @p: descriptor pointer
1448  * @i: descriptor index
1449  * @flags: gfp flag
1450  * @queue: RX queue index
1451  * Description: this function is called to allocate a receive buffer, perform
1452  * the DMA mapping and init the descriptor.
1453  */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1454 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1455 				  struct stmmac_dma_conf *dma_conf,
1456 				  struct dma_desc *p,
1457 				  int i, gfp_t flags, u32 queue)
1458 {
1459 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1460 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1461 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1462 
1463 	if (priv->dma_cap.host_dma_width <= 32)
1464 		gfp |= GFP_DMA32;
1465 
1466 	if (!buf->page) {
1467 		buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1468 		if (!buf->page)
1469 			return -ENOMEM;
1470 		buf->page_offset = stmmac_rx_offset(priv);
1471 	}
1472 
1473 	if (priv->sph && !buf->sec_page) {
1474 		buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1475 		if (!buf->sec_page)
1476 			return -ENOMEM;
1477 
1478 		buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1479 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1480 	} else {
1481 		buf->sec_page = NULL;
1482 		stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1483 	}
1484 
1485 	buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1486 
1487 	stmmac_set_desc_addr(priv, p, buf->addr);
1488 	if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1489 		stmmac_init_desc3(priv, p);
1490 
1491 	return 0;
1492 }
1493 
1494 /**
1495  * stmmac_free_rx_buffer - free RX dma buffers
1496  * @priv: private structure
1497  * @rx_q: RX queue
1498  * @i: buffer index.
1499  */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1500 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1501 				  struct stmmac_rx_queue *rx_q,
1502 				  int i)
1503 {
1504 	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1505 
1506 	if (buf->page)
1507 		page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1508 	buf->page = NULL;
1509 
1510 	if (buf->sec_page)
1511 		page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1512 	buf->sec_page = NULL;
1513 }
1514 
1515 /**
1516  * stmmac_free_tx_buffer - free RX dma buffers
1517  * @priv: private structure
1518  * @dma_conf: structure to take the dma data
1519  * @queue: RX queue index
1520  * @i: buffer index.
1521  */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1522 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1523 				  struct stmmac_dma_conf *dma_conf,
1524 				  u32 queue, int i)
1525 {
1526 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1527 
1528 	if (tx_q->tx_skbuff_dma[i].buf &&
1529 	    tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1530 		if (tx_q->tx_skbuff_dma[i].map_as_page)
1531 			dma_unmap_page(priv->device,
1532 				       tx_q->tx_skbuff_dma[i].buf,
1533 				       tx_q->tx_skbuff_dma[i].len,
1534 				       DMA_TO_DEVICE);
1535 		else
1536 			dma_unmap_single(priv->device,
1537 					 tx_q->tx_skbuff_dma[i].buf,
1538 					 tx_q->tx_skbuff_dma[i].len,
1539 					 DMA_TO_DEVICE);
1540 	}
1541 
1542 	if (tx_q->xdpf[i] &&
1543 	    (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1544 	     tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1545 		xdp_return_frame(tx_q->xdpf[i]);
1546 		tx_q->xdpf[i] = NULL;
1547 	}
1548 
1549 	if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1550 		tx_q->xsk_frames_done++;
1551 
1552 	if (tx_q->tx_skbuff[i] &&
1553 	    tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1554 		dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1555 		tx_q->tx_skbuff[i] = NULL;
1556 	}
1557 
1558 	tx_q->tx_skbuff_dma[i].buf = 0;
1559 	tx_q->tx_skbuff_dma[i].map_as_page = false;
1560 }
1561 
1562 /**
1563  * dma_free_rx_skbufs - free RX dma buffers
1564  * @priv: private structure
1565  * @dma_conf: structure to take the dma data
1566  * @queue: RX queue index
1567  */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1568 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1569 			       struct stmmac_dma_conf *dma_conf,
1570 			       u32 queue)
1571 {
1572 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1573 	int i;
1574 
1575 	for (i = 0; i < dma_conf->dma_rx_size; i++)
1576 		stmmac_free_rx_buffer(priv, rx_q, i);
1577 }
1578 
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1579 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1580 				   struct stmmac_dma_conf *dma_conf,
1581 				   u32 queue, gfp_t flags)
1582 {
1583 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1584 	int i;
1585 
1586 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1587 		struct dma_desc *p;
1588 		int ret;
1589 
1590 		if (priv->extend_desc)
1591 			p = &((rx_q->dma_erx + i)->basic);
1592 		else
1593 			p = rx_q->dma_rx + i;
1594 
1595 		ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1596 					     queue);
1597 		if (ret)
1598 			return ret;
1599 
1600 		rx_q->buf_alloc_num++;
1601 	}
1602 
1603 	return 0;
1604 }
1605 
1606 /**
1607  * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1608  * @priv: private structure
1609  * @dma_conf: structure to take the dma data
1610  * @queue: RX queue index
1611  */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1612 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1613 				struct stmmac_dma_conf *dma_conf,
1614 				u32 queue)
1615 {
1616 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1617 	int i;
1618 
1619 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1621 
1622 		if (!buf->xdp)
1623 			continue;
1624 
1625 		xsk_buff_free(buf->xdp);
1626 		buf->xdp = NULL;
1627 	}
1628 }
1629 
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1630 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1631 				      struct stmmac_dma_conf *dma_conf,
1632 				      u32 queue)
1633 {
1634 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1635 	int i;
1636 
1637 	/* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1638 	 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1639 	 * use this macro to make sure no size violations.
1640 	 */
1641 	XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1642 
1643 	for (i = 0; i < dma_conf->dma_rx_size; i++) {
1644 		struct stmmac_rx_buffer *buf;
1645 		dma_addr_t dma_addr;
1646 		struct dma_desc *p;
1647 
1648 		if (priv->extend_desc)
1649 			p = (struct dma_desc *)(rx_q->dma_erx + i);
1650 		else
1651 			p = rx_q->dma_rx + i;
1652 
1653 		buf = &rx_q->buf_pool[i];
1654 
1655 		buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1656 		if (!buf->xdp)
1657 			return -ENOMEM;
1658 
1659 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1660 		stmmac_set_desc_addr(priv, p, dma_addr);
1661 		rx_q->buf_alloc_num++;
1662 	}
1663 
1664 	return 0;
1665 }
1666 
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1667 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1668 {
1669 	if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1670 		return NULL;
1671 
1672 	return xsk_get_pool_from_qid(priv->dev, queue);
1673 }
1674 
1675 /**
1676  * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1677  * @priv: driver private structure
1678  * @dma_conf: structure to take the dma data
1679  * @queue: RX queue index
1680  * @flags: gfp flag.
1681  * Description: this function initializes the DMA RX descriptors
1682  * and allocates the socket buffers. It supports the chained and ring
1683  * modes.
1684  */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1685 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1686 				    struct stmmac_dma_conf *dma_conf,
1687 				    u32 queue, gfp_t flags)
1688 {
1689 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1690 	int ret;
1691 
1692 	netif_dbg(priv, probe, priv->dev,
1693 		  "(%s) dma_rx_phy=0x%08x\n", __func__,
1694 		  (u32)rx_q->dma_rx_phy);
1695 
1696 	stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1697 
1698 	xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1699 
1700 	rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1701 
1702 	if (rx_q->xsk_pool) {
1703 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1704 						   MEM_TYPE_XSK_BUFF_POOL,
1705 						   NULL));
1706 		netdev_info(priv->dev,
1707 			    "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1708 			    rx_q->queue_index);
1709 		xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1710 	} else {
1711 		WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1712 						   MEM_TYPE_PAGE_POOL,
1713 						   rx_q->page_pool));
1714 		netdev_info(priv->dev,
1715 			    "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1716 			    rx_q->queue_index);
1717 	}
1718 
1719 	if (rx_q->xsk_pool) {
1720 		/* RX XDP ZC buffer pool may not be populated, e.g.
1721 		 * xdpsock TX-only.
1722 		 */
1723 		stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1724 	} else {
1725 		ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1726 		if (ret < 0)
1727 			return -ENOMEM;
1728 	}
1729 
1730 	/* Setup the chained descriptor addresses */
1731 	if (priv->mode == STMMAC_CHAIN_MODE) {
1732 		if (priv->extend_desc)
1733 			stmmac_mode_init(priv, rx_q->dma_erx,
1734 					 rx_q->dma_rx_phy,
1735 					 dma_conf->dma_rx_size, 1);
1736 		else
1737 			stmmac_mode_init(priv, rx_q->dma_rx,
1738 					 rx_q->dma_rx_phy,
1739 					 dma_conf->dma_rx_size, 0);
1740 	}
1741 
1742 	return 0;
1743 }
1744 
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1745 static int init_dma_rx_desc_rings(struct net_device *dev,
1746 				  struct stmmac_dma_conf *dma_conf,
1747 				  gfp_t flags)
1748 {
1749 	struct stmmac_priv *priv = netdev_priv(dev);
1750 	u32 rx_count = priv->plat->rx_queues_to_use;
1751 	int queue;
1752 	int ret;
1753 
1754 	/* RX INITIALIZATION */
1755 	netif_dbg(priv, probe, priv->dev,
1756 		  "SKB addresses:\nskb\t\tskb data\tdma data\n");
1757 
1758 	for (queue = 0; queue < rx_count; queue++) {
1759 		ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1760 		if (ret)
1761 			goto err_init_rx_buffers;
1762 	}
1763 
1764 	return 0;
1765 
1766 err_init_rx_buffers:
1767 	while (queue >= 0) {
1768 		struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1769 
1770 		if (rx_q->xsk_pool)
1771 			dma_free_rx_xskbufs(priv, dma_conf, queue);
1772 		else
1773 			dma_free_rx_skbufs(priv, dma_conf, queue);
1774 
1775 		rx_q->buf_alloc_num = 0;
1776 		rx_q->xsk_pool = NULL;
1777 
1778 		queue--;
1779 	}
1780 
1781 	return ret;
1782 }
1783 
1784 /**
1785  * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1786  * @priv: driver private structure
1787  * @dma_conf: structure to take the dma data
1788  * @queue: TX queue index
1789  * Description: this function initializes the DMA TX descriptors
1790  * and allocates the socket buffers. It supports the chained and ring
1791  * modes.
1792  */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1793 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1794 				    struct stmmac_dma_conf *dma_conf,
1795 				    u32 queue)
1796 {
1797 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1798 	int i;
1799 
1800 	netif_dbg(priv, probe, priv->dev,
1801 		  "(%s) dma_tx_phy=0x%08x\n", __func__,
1802 		  (u32)tx_q->dma_tx_phy);
1803 
1804 	/* Setup the chained descriptor addresses */
1805 	if (priv->mode == STMMAC_CHAIN_MODE) {
1806 		if (priv->extend_desc)
1807 			stmmac_mode_init(priv, tx_q->dma_etx,
1808 					 tx_q->dma_tx_phy,
1809 					 dma_conf->dma_tx_size, 1);
1810 		else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1811 			stmmac_mode_init(priv, tx_q->dma_tx,
1812 					 tx_q->dma_tx_phy,
1813 					 dma_conf->dma_tx_size, 0);
1814 	}
1815 
1816 	tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1817 
1818 	for (i = 0; i < dma_conf->dma_tx_size; i++) {
1819 		struct dma_desc *p;
1820 
1821 		if (priv->extend_desc)
1822 			p = &((tx_q->dma_etx + i)->basic);
1823 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1824 			p = &((tx_q->dma_entx + i)->basic);
1825 		else
1826 			p = tx_q->dma_tx + i;
1827 
1828 		stmmac_clear_desc(priv, p);
1829 
1830 		tx_q->tx_skbuff_dma[i].buf = 0;
1831 		tx_q->tx_skbuff_dma[i].map_as_page = false;
1832 		tx_q->tx_skbuff_dma[i].len = 0;
1833 		tx_q->tx_skbuff_dma[i].last_segment = false;
1834 		tx_q->tx_skbuff[i] = NULL;
1835 	}
1836 
1837 	return 0;
1838 }
1839 
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1840 static int init_dma_tx_desc_rings(struct net_device *dev,
1841 				  struct stmmac_dma_conf *dma_conf)
1842 {
1843 	struct stmmac_priv *priv = netdev_priv(dev);
1844 	u32 tx_queue_cnt;
1845 	u32 queue;
1846 
1847 	tx_queue_cnt = priv->plat->tx_queues_to_use;
1848 
1849 	for (queue = 0; queue < tx_queue_cnt; queue++)
1850 		__init_dma_tx_desc_rings(priv, dma_conf, queue);
1851 
1852 	return 0;
1853 }
1854 
1855 /**
1856  * init_dma_desc_rings - init the RX/TX descriptor rings
1857  * @dev: net device structure
1858  * @dma_conf: structure to take the dma data
1859  * @flags: gfp flag.
1860  * Description: this function initializes the DMA RX/TX descriptors
1861  * and allocates the socket buffers. It supports the chained and ring
1862  * modes.
1863  */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1864 static int init_dma_desc_rings(struct net_device *dev,
1865 			       struct stmmac_dma_conf *dma_conf,
1866 			       gfp_t flags)
1867 {
1868 	struct stmmac_priv *priv = netdev_priv(dev);
1869 	int ret;
1870 
1871 	ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1872 	if (ret)
1873 		return ret;
1874 
1875 	ret = init_dma_tx_desc_rings(dev, dma_conf);
1876 
1877 	stmmac_clear_descriptors(priv, dma_conf);
1878 
1879 	if (netif_msg_hw(priv))
1880 		stmmac_display_rings(priv, dma_conf);
1881 
1882 	return ret;
1883 }
1884 
1885 /**
1886  * dma_free_tx_skbufs - free TX dma buffers
1887  * @priv: private structure
1888  * @dma_conf: structure to take the dma data
1889  * @queue: TX queue index
1890  */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1891 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1892 			       struct stmmac_dma_conf *dma_conf,
1893 			       u32 queue)
1894 {
1895 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1896 	int i;
1897 
1898 	tx_q->xsk_frames_done = 0;
1899 
1900 	for (i = 0; i < dma_conf->dma_tx_size; i++)
1901 		stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1902 
1903 	if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1904 		xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1905 		tx_q->xsk_frames_done = 0;
1906 		tx_q->xsk_pool = NULL;
1907 	}
1908 }
1909 
1910 /**
1911  * stmmac_free_tx_skbufs - free TX skb buffers
1912  * @priv: private structure
1913  */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1914 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1915 {
1916 	u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1917 	u32 queue;
1918 
1919 	for (queue = 0; queue < tx_queue_cnt; queue++)
1920 		dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1921 }
1922 
1923 /**
1924  * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1925  * @priv: private structure
1926  * @dma_conf: structure to take the dma data
1927  * @queue: RX queue index
1928  */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1929 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1930 					 struct stmmac_dma_conf *dma_conf,
1931 					 u32 queue)
1932 {
1933 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1934 
1935 	/* Release the DMA RX socket buffers */
1936 	if (rx_q->xsk_pool)
1937 		dma_free_rx_xskbufs(priv, dma_conf, queue);
1938 	else
1939 		dma_free_rx_skbufs(priv, dma_conf, queue);
1940 
1941 	rx_q->buf_alloc_num = 0;
1942 	rx_q->xsk_pool = NULL;
1943 
1944 	/* Free DMA regions of consistent memory previously allocated */
1945 	if (!priv->extend_desc)
1946 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1947 				  sizeof(struct dma_desc),
1948 				  rx_q->dma_rx, rx_q->dma_rx_phy);
1949 	else
1950 		dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1951 				  sizeof(struct dma_extended_desc),
1952 				  rx_q->dma_erx, rx_q->dma_rx_phy);
1953 
1954 	if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1955 		xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1956 
1957 	kfree(rx_q->buf_pool);
1958 	if (rx_q->page_pool)
1959 		page_pool_destroy(rx_q->page_pool);
1960 }
1961 
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1962 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1963 				       struct stmmac_dma_conf *dma_conf)
1964 {
1965 	u32 rx_count = priv->plat->rx_queues_to_use;
1966 	u32 queue;
1967 
1968 	/* Free RX queue resources */
1969 	for (queue = 0; queue < rx_count; queue++)
1970 		__free_dma_rx_desc_resources(priv, dma_conf, queue);
1971 }
1972 
1973 /**
1974  * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1975  * @priv: private structure
1976  * @dma_conf: structure to take the dma data
1977  * @queue: TX queue index
1978  */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1979 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1980 					 struct stmmac_dma_conf *dma_conf,
1981 					 u32 queue)
1982 {
1983 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1984 	size_t size;
1985 	void *addr;
1986 
1987 	/* Release the DMA TX socket buffers */
1988 	dma_free_tx_skbufs(priv, dma_conf, queue);
1989 
1990 	if (priv->extend_desc) {
1991 		size = sizeof(struct dma_extended_desc);
1992 		addr = tx_q->dma_etx;
1993 	} else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1994 		size = sizeof(struct dma_edesc);
1995 		addr = tx_q->dma_entx;
1996 	} else {
1997 		size = sizeof(struct dma_desc);
1998 		addr = tx_q->dma_tx;
1999 	}
2000 
2001 	size *= dma_conf->dma_tx_size;
2002 
2003 	dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2004 
2005 	kfree(tx_q->tx_skbuff_dma);
2006 	kfree(tx_q->tx_skbuff);
2007 }
2008 
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2009 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2010 				       struct stmmac_dma_conf *dma_conf)
2011 {
2012 	u32 tx_count = priv->plat->tx_queues_to_use;
2013 	u32 queue;
2014 
2015 	/* Free TX queue resources */
2016 	for (queue = 0; queue < tx_count; queue++)
2017 		__free_dma_tx_desc_resources(priv, dma_conf, queue);
2018 }
2019 
2020 /**
2021  * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2022  * @priv: private structure
2023  * @dma_conf: structure to take the dma data
2024  * @queue: RX queue index
2025  * Description: according to which descriptor can be used (extend or basic)
2026  * this function allocates the resources for TX and RX paths. In case of
2027  * reception, for example, it pre-allocated the RX socket buffer in order to
2028  * allow zero-copy mechanism.
2029  */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2030 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2031 					 struct stmmac_dma_conf *dma_conf,
2032 					 u32 queue)
2033 {
2034 	struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2035 	struct stmmac_channel *ch = &priv->channel[queue];
2036 	bool xdp_prog = stmmac_xdp_is_enabled(priv);
2037 	struct page_pool_params pp_params = { 0 };
2038 	unsigned int dma_buf_sz_pad, num_pages;
2039 	unsigned int napi_id;
2040 	int ret;
2041 
2042 	dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2043 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2044 	num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2045 
2046 	rx_q->queue_index = queue;
2047 	rx_q->priv_data = priv;
2048 	rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2049 
2050 	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2051 	pp_params.pool_size = dma_conf->dma_rx_size;
2052 	pp_params.order = order_base_2(num_pages);
2053 	pp_params.nid = dev_to_node(priv->device);
2054 	pp_params.dev = priv->device;
2055 	pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2056 	pp_params.offset = stmmac_rx_offset(priv);
2057 	pp_params.max_len = dma_conf->dma_buf_sz;
2058 
2059 	if (priv->sph) {
2060 		pp_params.offset = 0;
2061 		pp_params.max_len += stmmac_rx_offset(priv);
2062 	}
2063 
2064 	rx_q->page_pool = page_pool_create(&pp_params);
2065 	if (IS_ERR(rx_q->page_pool)) {
2066 		ret = PTR_ERR(rx_q->page_pool);
2067 		rx_q->page_pool = NULL;
2068 		return ret;
2069 	}
2070 
2071 	rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2072 				 sizeof(*rx_q->buf_pool),
2073 				 GFP_KERNEL);
2074 	if (!rx_q->buf_pool)
2075 		return -ENOMEM;
2076 
2077 	if (priv->extend_desc) {
2078 		rx_q->dma_erx = dma_alloc_coherent(priv->device,
2079 						   dma_conf->dma_rx_size *
2080 						   sizeof(struct dma_extended_desc),
2081 						   &rx_q->dma_rx_phy,
2082 						   GFP_KERNEL);
2083 		if (!rx_q->dma_erx)
2084 			return -ENOMEM;
2085 
2086 	} else {
2087 		rx_q->dma_rx = dma_alloc_coherent(priv->device,
2088 						  dma_conf->dma_rx_size *
2089 						  sizeof(struct dma_desc),
2090 						  &rx_q->dma_rx_phy,
2091 						  GFP_KERNEL);
2092 		if (!rx_q->dma_rx)
2093 			return -ENOMEM;
2094 	}
2095 
2096 	if (stmmac_xdp_is_enabled(priv) &&
2097 	    test_bit(queue, priv->af_xdp_zc_qps))
2098 		napi_id = ch->rxtx_napi.napi_id;
2099 	else
2100 		napi_id = ch->rx_napi.napi_id;
2101 
2102 	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2103 			       rx_q->queue_index,
2104 			       napi_id);
2105 	if (ret) {
2106 		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2107 		return -EINVAL;
2108 	}
2109 
2110 	return 0;
2111 }
2112 
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2113 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2114 				       struct stmmac_dma_conf *dma_conf)
2115 {
2116 	u32 rx_count = priv->plat->rx_queues_to_use;
2117 	u32 queue;
2118 	int ret;
2119 
2120 	/* RX queues buffers and DMA */
2121 	for (queue = 0; queue < rx_count; queue++) {
2122 		ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2123 		if (ret)
2124 			goto err_dma;
2125 	}
2126 
2127 	return 0;
2128 
2129 err_dma:
2130 	free_dma_rx_desc_resources(priv, dma_conf);
2131 
2132 	return ret;
2133 }
2134 
2135 /**
2136  * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2137  * @priv: private structure
2138  * @dma_conf: structure to take the dma data
2139  * @queue: TX queue index
2140  * Description: according to which descriptor can be used (extend or basic)
2141  * this function allocates the resources for TX and RX paths. In case of
2142  * reception, for example, it pre-allocated the RX socket buffer in order to
2143  * allow zero-copy mechanism.
2144  */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2145 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2146 					 struct stmmac_dma_conf *dma_conf,
2147 					 u32 queue)
2148 {
2149 	struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2150 	size_t size;
2151 	void *addr;
2152 
2153 	tx_q->queue_index = queue;
2154 	tx_q->priv_data = priv;
2155 
2156 	tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2157 				      sizeof(*tx_q->tx_skbuff_dma),
2158 				      GFP_KERNEL);
2159 	if (!tx_q->tx_skbuff_dma)
2160 		return -ENOMEM;
2161 
2162 	tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2163 				  sizeof(struct sk_buff *),
2164 				  GFP_KERNEL);
2165 	if (!tx_q->tx_skbuff)
2166 		return -ENOMEM;
2167 
2168 	if (priv->extend_desc)
2169 		size = sizeof(struct dma_extended_desc);
2170 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2171 		size = sizeof(struct dma_edesc);
2172 	else
2173 		size = sizeof(struct dma_desc);
2174 
2175 	size *= dma_conf->dma_tx_size;
2176 
2177 	addr = dma_alloc_coherent(priv->device, size,
2178 				  &tx_q->dma_tx_phy, GFP_KERNEL);
2179 	if (!addr)
2180 		return -ENOMEM;
2181 
2182 	if (priv->extend_desc)
2183 		tx_q->dma_etx = addr;
2184 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2185 		tx_q->dma_entx = addr;
2186 	else
2187 		tx_q->dma_tx = addr;
2188 
2189 	return 0;
2190 }
2191 
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2192 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2193 				       struct stmmac_dma_conf *dma_conf)
2194 {
2195 	u32 tx_count = priv->plat->tx_queues_to_use;
2196 	u32 queue;
2197 	int ret;
2198 
2199 	/* TX queues buffers and DMA */
2200 	for (queue = 0; queue < tx_count; queue++) {
2201 		ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2202 		if (ret)
2203 			goto err_dma;
2204 	}
2205 
2206 	return 0;
2207 
2208 err_dma:
2209 	free_dma_tx_desc_resources(priv, dma_conf);
2210 	return ret;
2211 }
2212 
2213 /**
2214  * alloc_dma_desc_resources - alloc TX/RX resources.
2215  * @priv: private structure
2216  * @dma_conf: structure to take the dma data
2217  * Description: according to which descriptor can be used (extend or basic)
2218  * this function allocates the resources for TX and RX paths. In case of
2219  * reception, for example, it pre-allocated the RX socket buffer in order to
2220  * allow zero-copy mechanism.
2221  */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2222 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2223 				    struct stmmac_dma_conf *dma_conf)
2224 {
2225 	/* RX Allocation */
2226 	int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2227 
2228 	if (ret)
2229 		return ret;
2230 
2231 	ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2232 
2233 	return ret;
2234 }
2235 
2236 /**
2237  * free_dma_desc_resources - free dma desc resources
2238  * @priv: private structure
2239  * @dma_conf: structure to take the dma data
2240  */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2241 static void free_dma_desc_resources(struct stmmac_priv *priv,
2242 				    struct stmmac_dma_conf *dma_conf)
2243 {
2244 	/* Release the DMA TX socket buffers */
2245 	free_dma_tx_desc_resources(priv, dma_conf);
2246 
2247 	/* Release the DMA RX socket buffers later
2248 	 * to ensure all pending XDP_TX buffers are returned.
2249 	 */
2250 	free_dma_rx_desc_resources(priv, dma_conf);
2251 }
2252 
2253 /**
2254  *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
2255  *  @priv: driver private structure
2256  *  Description: It is used for enabling the rx queues in the MAC
2257  */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2258 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2259 {
2260 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
2261 	int queue;
2262 	u8 mode;
2263 
2264 	for (queue = 0; queue < rx_queues_count; queue++) {
2265 		mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2266 		stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2267 	}
2268 }
2269 
2270 /**
2271  * stmmac_start_rx_dma - start RX DMA channel
2272  * @priv: driver private structure
2273  * @chan: RX channel index
2274  * Description:
2275  * This starts a RX DMA channel
2276  */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2277 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2278 {
2279 	netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2280 	stmmac_start_rx(priv, priv->ioaddr, chan);
2281 }
2282 
2283 /**
2284  * stmmac_start_tx_dma - start TX DMA channel
2285  * @priv: driver private structure
2286  * @chan: TX channel index
2287  * Description:
2288  * This starts a TX DMA channel
2289  */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2290 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2291 {
2292 	netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2293 	stmmac_start_tx(priv, priv->ioaddr, chan);
2294 }
2295 
2296 /**
2297  * stmmac_stop_rx_dma - stop RX DMA channel
2298  * @priv: driver private structure
2299  * @chan: RX channel index
2300  * Description:
2301  * This stops a RX DMA channel
2302  */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2303 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2304 {
2305 	netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2306 	stmmac_stop_rx(priv, priv->ioaddr, chan);
2307 }
2308 
2309 /**
2310  * stmmac_stop_tx_dma - stop TX DMA channel
2311  * @priv: driver private structure
2312  * @chan: TX channel index
2313  * Description:
2314  * This stops a TX DMA channel
2315  */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2316 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2317 {
2318 	netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2319 	stmmac_stop_tx(priv, priv->ioaddr, chan);
2320 }
2321 
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2322 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2323 {
2324 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2325 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2326 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2327 	u32 chan;
2328 
2329 	for (chan = 0; chan < dma_csr_ch; chan++) {
2330 		struct stmmac_channel *ch = &priv->channel[chan];
2331 		unsigned long flags;
2332 
2333 		spin_lock_irqsave(&ch->lock, flags);
2334 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2335 		spin_unlock_irqrestore(&ch->lock, flags);
2336 	}
2337 }
2338 
2339 /**
2340  * stmmac_start_all_dma - start all RX and TX DMA channels
2341  * @priv: driver private structure
2342  * Description:
2343  * This starts all the RX and TX DMA channels
2344  */
stmmac_start_all_dma(struct stmmac_priv * priv)2345 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2346 {
2347 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2348 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2349 	u32 chan = 0;
2350 
2351 	for (chan = 0; chan < rx_channels_count; chan++)
2352 		stmmac_start_rx_dma(priv, chan);
2353 
2354 	for (chan = 0; chan < tx_channels_count; chan++)
2355 		stmmac_start_tx_dma(priv, chan);
2356 }
2357 
2358 /**
2359  * stmmac_stop_all_dma - stop all RX and TX DMA channels
2360  * @priv: driver private structure
2361  * Description:
2362  * This stops the RX and TX DMA channels
2363  */
stmmac_stop_all_dma(struct stmmac_priv * priv)2364 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2365 {
2366 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2367 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2368 	u32 chan = 0;
2369 
2370 	for (chan = 0; chan < rx_channels_count; chan++)
2371 		stmmac_stop_rx_dma(priv, chan);
2372 
2373 	for (chan = 0; chan < tx_channels_count; chan++)
2374 		stmmac_stop_tx_dma(priv, chan);
2375 }
2376 
2377 /**
2378  *  stmmac_dma_operation_mode - HW DMA operation mode
2379  *  @priv: driver private structure
2380  *  Description: it is used for configuring the DMA operation mode register in
2381  *  order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2382  */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2383 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2384 {
2385 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2386 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2387 	int rxfifosz = priv->plat->rx_fifo_size;
2388 	int txfifosz = priv->plat->tx_fifo_size;
2389 	u32 txmode = 0;
2390 	u32 rxmode = 0;
2391 	u32 chan = 0;
2392 	u8 qmode = 0;
2393 
2394 	if (rxfifosz == 0)
2395 		rxfifosz = priv->dma_cap.rx_fifo_size;
2396 	if (txfifosz == 0)
2397 		txfifosz = priv->dma_cap.tx_fifo_size;
2398 
2399 	/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2400 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2401 		rxfifosz /= rx_channels_count;
2402 		txfifosz /= tx_channels_count;
2403 	}
2404 
2405 	if (priv->plat->force_thresh_dma_mode) {
2406 		txmode = tc;
2407 		rxmode = tc;
2408 	} else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2409 		/*
2410 		 * In case of GMAC, SF mode can be enabled
2411 		 * to perform the TX COE in HW. This depends on:
2412 		 * 1) TX COE if actually supported
2413 		 * 2) There is no bugged Jumbo frame support
2414 		 *    that needs to not insert csum in the TDES.
2415 		 */
2416 		txmode = SF_DMA_MODE;
2417 		rxmode = SF_DMA_MODE;
2418 		priv->xstats.threshold = SF_DMA_MODE;
2419 	} else {
2420 		txmode = tc;
2421 		rxmode = SF_DMA_MODE;
2422 	}
2423 
2424 	/* configure all channels */
2425 	for (chan = 0; chan < rx_channels_count; chan++) {
2426 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2427 		u32 buf_size;
2428 
2429 		qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2430 
2431 		stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2432 				rxfifosz, qmode);
2433 
2434 		if (rx_q->xsk_pool) {
2435 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2436 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2437 					      buf_size,
2438 					      chan);
2439 		} else {
2440 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
2441 					      priv->dma_conf.dma_buf_sz,
2442 					      chan);
2443 		}
2444 	}
2445 
2446 	for (chan = 0; chan < tx_channels_count; chan++) {
2447 		qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2448 
2449 		stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2450 				txfifosz, qmode);
2451 	}
2452 }
2453 
stmmac_xsk_request_timestamp(void * _priv)2454 static void stmmac_xsk_request_timestamp(void *_priv)
2455 {
2456 	struct stmmac_metadata_request *meta_req = _priv;
2457 
2458 	stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2459 	*meta_req->set_ic = true;
2460 }
2461 
stmmac_xsk_fill_timestamp(void * _priv)2462 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2463 {
2464 	struct stmmac_xsk_tx_complete *tx_compl = _priv;
2465 	struct stmmac_priv *priv = tx_compl->priv;
2466 	struct dma_desc *desc = tx_compl->desc;
2467 	bool found = false;
2468 	u64 ns = 0;
2469 
2470 	if (!priv->hwts_tx_en)
2471 		return 0;
2472 
2473 	/* check tx tstamp status */
2474 	if (stmmac_get_tx_timestamp_status(priv, desc)) {
2475 		stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2476 		found = true;
2477 	} else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2478 		found = true;
2479 	}
2480 
2481 	if (found) {
2482 		ns -= priv->plat->cdc_error_adj;
2483 		return ns_to_ktime(ns);
2484 	}
2485 
2486 	return 0;
2487 }
2488 
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2489 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2490 {
2491 	struct timespec64 ts = ns_to_timespec64(launch_time);
2492 	struct stmmac_metadata_request *meta_req = _priv;
2493 
2494 	if (meta_req->tbs & STMMAC_TBS_EN)
2495 		stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2496 				    ts.tv_nsec);
2497 }
2498 
2499 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2500 	.tmo_request_timestamp		= stmmac_xsk_request_timestamp,
2501 	.tmo_fill_timestamp		= stmmac_xsk_fill_timestamp,
2502 	.tmo_request_launch_time	= stmmac_xsk_request_launch_time,
2503 };
2504 
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2505 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2506 {
2507 	struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2508 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2509 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2510 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2511 	struct xsk_buff_pool *pool = tx_q->xsk_pool;
2512 	unsigned int entry = tx_q->cur_tx;
2513 	struct dma_desc *tx_desc = NULL;
2514 	struct xdp_desc xdp_desc;
2515 	bool work_done = true;
2516 	u32 tx_set_ic_bit = 0;
2517 
2518 	/* Avoids TX time-out as we are sharing with slow path */
2519 	txq_trans_cond_update(nq);
2520 
2521 	budget = min(budget, stmmac_tx_avail(priv, queue));
2522 
2523 	for (; budget > 0; budget--) {
2524 		struct stmmac_metadata_request meta_req;
2525 		struct xsk_tx_metadata *meta = NULL;
2526 		dma_addr_t dma_addr;
2527 		bool set_ic;
2528 
2529 		/* We are sharing with slow path and stop XSK TX desc submission when
2530 		 * available TX ring is less than threshold.
2531 		 */
2532 		if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2533 		    !netif_carrier_ok(priv->dev)) {
2534 			work_done = false;
2535 			break;
2536 		}
2537 
2538 		if (!xsk_tx_peek_desc(pool, &xdp_desc))
2539 			break;
2540 
2541 		if (priv->est && priv->est->enable &&
2542 		    priv->est->max_sdu[queue] &&
2543 		    xdp_desc.len > priv->est->max_sdu[queue]) {
2544 			priv->xstats.max_sdu_txq_drop[queue]++;
2545 			continue;
2546 		}
2547 
2548 		if (likely(priv->extend_desc))
2549 			tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2550 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2551 			tx_desc = &tx_q->dma_entx[entry].basic;
2552 		else
2553 			tx_desc = tx_q->dma_tx + entry;
2554 
2555 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2556 		meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2557 		xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2558 
2559 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2560 
2561 		/* To return XDP buffer to XSK pool, we simple call
2562 		 * xsk_tx_completed(), so we don't need to fill up
2563 		 * 'buf' and 'xdpf'.
2564 		 */
2565 		tx_q->tx_skbuff_dma[entry].buf = 0;
2566 		tx_q->xdpf[entry] = NULL;
2567 
2568 		tx_q->tx_skbuff_dma[entry].map_as_page = false;
2569 		tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2570 		tx_q->tx_skbuff_dma[entry].last_segment = true;
2571 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2572 
2573 		stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2574 
2575 		tx_q->tx_count_frames++;
2576 
2577 		if (!priv->tx_coal_frames[queue])
2578 			set_ic = false;
2579 		else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2580 			set_ic = true;
2581 		else
2582 			set_ic = false;
2583 
2584 		meta_req.priv = priv;
2585 		meta_req.tx_desc = tx_desc;
2586 		meta_req.set_ic = &set_ic;
2587 		meta_req.tbs = tx_q->tbs;
2588 		meta_req.edesc = &tx_q->dma_entx[entry];
2589 		xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2590 					&meta_req);
2591 		if (set_ic) {
2592 			tx_q->tx_count_frames = 0;
2593 			stmmac_set_tx_ic(priv, tx_desc);
2594 			tx_set_ic_bit++;
2595 		}
2596 
2597 		stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2598 				       csum, priv->mode, true, true,
2599 				       xdp_desc.len);
2600 
2601 		stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2602 
2603 		xsk_tx_metadata_to_compl(meta,
2604 					 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2605 
2606 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2607 		entry = tx_q->cur_tx;
2608 	}
2609 	u64_stats_update_begin(&txq_stats->napi_syncp);
2610 	u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2611 	u64_stats_update_end(&txq_stats->napi_syncp);
2612 
2613 	if (tx_desc) {
2614 		stmmac_flush_tx_descriptors(priv, queue);
2615 		xsk_tx_release(pool);
2616 	}
2617 
2618 	/* Return true if all of the 3 conditions are met
2619 	 *  a) TX Budget is still available
2620 	 *  b) work_done = true when XSK TX desc peek is empty (no more
2621 	 *     pending XSK TX for transmission)
2622 	 */
2623 	return !!budget && work_done;
2624 }
2625 
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2626 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2627 {
2628 	if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2629 		tc += 64;
2630 
2631 		if (priv->plat->force_thresh_dma_mode)
2632 			stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2633 		else
2634 			stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2635 						      chan);
2636 
2637 		priv->xstats.threshold = tc;
2638 	}
2639 }
2640 
2641 /**
2642  * stmmac_tx_clean - to manage the transmission completion
2643  * @priv: driver private structure
2644  * @budget: napi budget limiting this functions packet handling
2645  * @queue: TX queue index
2646  * @pending_packets: signal to arm the TX coal timer
2647  * Description: it reclaims the transmit resources after transmission completes.
2648  * If some packets still needs to be handled, due to TX coalesce, set
2649  * pending_packets to true to make NAPI arm the TX coal timer.
2650  */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2651 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2652 			   bool *pending_packets)
2653 {
2654 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2655 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2656 	unsigned int bytes_compl = 0, pkts_compl = 0;
2657 	unsigned int entry, xmits = 0, count = 0;
2658 	u32 tx_packets = 0, tx_errors = 0;
2659 
2660 	__netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2661 
2662 	tx_q->xsk_frames_done = 0;
2663 
2664 	entry = tx_q->dirty_tx;
2665 
2666 	/* Try to clean all TX complete frame in 1 shot */
2667 	while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2668 		struct xdp_frame *xdpf;
2669 		struct sk_buff *skb;
2670 		struct dma_desc *p;
2671 		int status;
2672 
2673 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2674 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2675 			xdpf = tx_q->xdpf[entry];
2676 			skb = NULL;
2677 		} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2678 			xdpf = NULL;
2679 			skb = tx_q->tx_skbuff[entry];
2680 		} else {
2681 			xdpf = NULL;
2682 			skb = NULL;
2683 		}
2684 
2685 		if (priv->extend_desc)
2686 			p = (struct dma_desc *)(tx_q->dma_etx + entry);
2687 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2688 			p = &tx_q->dma_entx[entry].basic;
2689 		else
2690 			p = tx_q->dma_tx + entry;
2691 
2692 		status = stmmac_tx_status(priv,	&priv->xstats, p, priv->ioaddr);
2693 		/* Check if the descriptor is owned by the DMA */
2694 		if (unlikely(status & tx_dma_own))
2695 			break;
2696 
2697 		count++;
2698 
2699 		/* Make sure descriptor fields are read after reading
2700 		 * the own bit.
2701 		 */
2702 		dma_rmb();
2703 
2704 		/* Just consider the last segment and ...*/
2705 		if (likely(!(status & tx_not_ls))) {
2706 			/* ... verify the status error condition */
2707 			if (unlikely(status & tx_err)) {
2708 				tx_errors++;
2709 				if (unlikely(status & tx_err_bump_tc))
2710 					stmmac_bump_dma_threshold(priv, queue);
2711 			} else {
2712 				tx_packets++;
2713 			}
2714 			if (skb) {
2715 				stmmac_get_tx_hwtstamp(priv, p, skb);
2716 			} else if (tx_q->xsk_pool &&
2717 				   xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2718 				struct stmmac_xsk_tx_complete tx_compl = {
2719 					.priv = priv,
2720 					.desc = p,
2721 				};
2722 
2723 				xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2724 							 &stmmac_xsk_tx_metadata_ops,
2725 							 &tx_compl);
2726 			}
2727 		}
2728 
2729 		if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2730 			   tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2731 			if (tx_q->tx_skbuff_dma[entry].map_as_page)
2732 				dma_unmap_page(priv->device,
2733 					       tx_q->tx_skbuff_dma[entry].buf,
2734 					       tx_q->tx_skbuff_dma[entry].len,
2735 					       DMA_TO_DEVICE);
2736 			else
2737 				dma_unmap_single(priv->device,
2738 						 tx_q->tx_skbuff_dma[entry].buf,
2739 						 tx_q->tx_skbuff_dma[entry].len,
2740 						 DMA_TO_DEVICE);
2741 			tx_q->tx_skbuff_dma[entry].buf = 0;
2742 			tx_q->tx_skbuff_dma[entry].len = 0;
2743 			tx_q->tx_skbuff_dma[entry].map_as_page = false;
2744 		}
2745 
2746 		stmmac_clean_desc3(priv, tx_q, p);
2747 
2748 		tx_q->tx_skbuff_dma[entry].last_segment = false;
2749 		tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2750 
2751 		if (xdpf &&
2752 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2753 			xdp_return_frame_rx_napi(xdpf);
2754 			tx_q->xdpf[entry] = NULL;
2755 		}
2756 
2757 		if (xdpf &&
2758 		    tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2759 			xdp_return_frame(xdpf);
2760 			tx_q->xdpf[entry] = NULL;
2761 		}
2762 
2763 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2764 			tx_q->xsk_frames_done++;
2765 
2766 		if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2767 			if (likely(skb)) {
2768 				pkts_compl++;
2769 				bytes_compl += skb->len;
2770 				dev_consume_skb_any(skb);
2771 				tx_q->tx_skbuff[entry] = NULL;
2772 			}
2773 		}
2774 
2775 		stmmac_release_tx_desc(priv, p, priv->mode);
2776 
2777 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2778 	}
2779 	tx_q->dirty_tx = entry;
2780 
2781 	netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2782 				  pkts_compl, bytes_compl);
2783 
2784 	if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2785 								queue))) &&
2786 	    stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2787 
2788 		netif_dbg(priv, tx_done, priv->dev,
2789 			  "%s: restart transmit\n", __func__);
2790 		netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2791 	}
2792 
2793 	if (tx_q->xsk_pool) {
2794 		bool work_done;
2795 
2796 		if (tx_q->xsk_frames_done)
2797 			xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2798 
2799 		if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2800 			xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2801 
2802 		/* For XSK TX, we try to send as many as possible.
2803 		 * If XSK work done (XSK TX desc empty and budget still
2804 		 * available), return "budget - 1" to reenable TX IRQ.
2805 		 * Else, return "budget" to make NAPI continue polling.
2806 		 */
2807 		work_done = stmmac_xdp_xmit_zc(priv, queue,
2808 					       STMMAC_XSK_TX_BUDGET_MAX);
2809 		if (work_done)
2810 			xmits = budget - 1;
2811 		else
2812 			xmits = budget;
2813 	}
2814 
2815 	if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2816 		stmmac_restart_sw_lpi_timer(priv);
2817 
2818 	/* We still have pending packets, let's call for a new scheduling */
2819 	if (tx_q->dirty_tx != tx_q->cur_tx)
2820 		*pending_packets = true;
2821 
2822 	u64_stats_update_begin(&txq_stats->napi_syncp);
2823 	u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2824 	u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2825 	u64_stats_inc(&txq_stats->napi.tx_clean);
2826 	u64_stats_update_end(&txq_stats->napi_syncp);
2827 
2828 	priv->xstats.tx_errors += tx_errors;
2829 
2830 	__netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2831 
2832 	/* Combine decisions from TX clean and XSK TX */
2833 	return max(count, xmits);
2834 }
2835 
2836 /**
2837  * stmmac_tx_err - to manage the tx error
2838  * @priv: driver private structure
2839  * @chan: channel index
2840  * Description: it cleans the descriptors and restarts the transmission
2841  * in case of transmission errors.
2842  */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2843 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2844 {
2845 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2846 
2847 	netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2848 
2849 	stmmac_stop_tx_dma(priv, chan);
2850 	dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2851 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2852 	stmmac_reset_tx_queue(priv, chan);
2853 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2854 			    tx_q->dma_tx_phy, chan);
2855 	stmmac_start_tx_dma(priv, chan);
2856 
2857 	priv->xstats.tx_errors++;
2858 	netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2859 }
2860 
2861 /**
2862  *  stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2863  *  @priv: driver private structure
2864  *  @txmode: TX operating mode
2865  *  @rxmode: RX operating mode
2866  *  @chan: channel index
2867  *  Description: it is used for configuring of the DMA operation mode in
2868  *  runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2869  *  mode.
2870  */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2871 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2872 					  u32 rxmode, u32 chan)
2873 {
2874 	u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2875 	u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2876 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
2877 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
2878 	int rxfifosz = priv->plat->rx_fifo_size;
2879 	int txfifosz = priv->plat->tx_fifo_size;
2880 
2881 	if (rxfifosz == 0)
2882 		rxfifosz = priv->dma_cap.rx_fifo_size;
2883 	if (txfifosz == 0)
2884 		txfifosz = priv->dma_cap.tx_fifo_size;
2885 
2886 	/* Adjust for real per queue fifo size */
2887 	rxfifosz /= rx_channels_count;
2888 	txfifosz /= tx_channels_count;
2889 
2890 	stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2891 	stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2892 }
2893 
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2894 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2895 {
2896 	int ret;
2897 
2898 	ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2899 			priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2900 	if (ret && (ret != -EINVAL)) {
2901 		stmmac_global_err(priv);
2902 		return true;
2903 	}
2904 
2905 	return false;
2906 }
2907 
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2908 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2909 {
2910 	int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2911 						 &priv->xstats, chan, dir);
2912 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2913 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2914 	struct stmmac_channel *ch = &priv->channel[chan];
2915 	struct napi_struct *rx_napi;
2916 	struct napi_struct *tx_napi;
2917 	unsigned long flags;
2918 
2919 	rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2920 	tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2921 
2922 	if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2923 		if (napi_schedule_prep(rx_napi)) {
2924 			spin_lock_irqsave(&ch->lock, flags);
2925 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2926 			spin_unlock_irqrestore(&ch->lock, flags);
2927 			__napi_schedule(rx_napi);
2928 		}
2929 	}
2930 
2931 	if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2932 		if (napi_schedule_prep(tx_napi)) {
2933 			spin_lock_irqsave(&ch->lock, flags);
2934 			stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2935 			spin_unlock_irqrestore(&ch->lock, flags);
2936 			__napi_schedule(tx_napi);
2937 		}
2938 	}
2939 
2940 	return status;
2941 }
2942 
2943 /**
2944  * stmmac_dma_interrupt - DMA ISR
2945  * @priv: driver private structure
2946  * Description: this is the DMA ISR. It is called by the main ISR.
2947  * It calls the dwmac dma routine and schedule poll method in case of some
2948  * work can be done.
2949  */
stmmac_dma_interrupt(struct stmmac_priv * priv)2950 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2951 {
2952 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
2953 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
2954 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
2955 				tx_channel_count : rx_channel_count;
2956 	u32 chan;
2957 	int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2958 
2959 	/* Make sure we never check beyond our status buffer. */
2960 	if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2961 		channels_to_check = ARRAY_SIZE(status);
2962 
2963 	for (chan = 0; chan < channels_to_check; chan++)
2964 		status[chan] = stmmac_napi_check(priv, chan,
2965 						 DMA_DIR_RXTX);
2966 
2967 	for (chan = 0; chan < tx_channel_count; chan++) {
2968 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2969 			/* Try to bump up the dma threshold on this failure */
2970 			stmmac_bump_dma_threshold(priv, chan);
2971 		} else if (unlikely(status[chan] == tx_hard_error)) {
2972 			stmmac_tx_err(priv, chan);
2973 		}
2974 	}
2975 }
2976 
2977 /**
2978  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2979  * @priv: driver private structure
2980  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2981  */
stmmac_mmc_setup(struct stmmac_priv * priv)2982 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2983 {
2984 	unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2985 			    MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2986 
2987 	stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2988 
2989 	if (priv->dma_cap.rmon) {
2990 		stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2991 		memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2992 	} else
2993 		netdev_info(priv->dev, "No MAC Management Counters available\n");
2994 }
2995 
2996 /**
2997  * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2998  * @priv: driver private structure
2999  * Description:
3000  *  new GMAC chip generations have a new register to indicate the
3001  *  presence of the optional feature/functions.
3002  *  This can be also used to override the value passed through the
3003  *  platform and necessary for old MAC10/100 and GMAC chips.
3004  */
stmmac_get_hw_features(struct stmmac_priv * priv)3005 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3006 {
3007 	return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3008 }
3009 
3010 /**
3011  * stmmac_check_ether_addr - check if the MAC addr is valid
3012  * @priv: driver private structure
3013  * Description:
3014  * it is to verify if the MAC address is valid, in case of failures it
3015  * generates a random MAC address
3016  */
stmmac_check_ether_addr(struct stmmac_priv * priv)3017 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3018 {
3019 	u8 addr[ETH_ALEN];
3020 
3021 	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3022 		stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3023 		if (is_valid_ether_addr(addr))
3024 			eth_hw_addr_set(priv->dev, addr);
3025 		else
3026 			eth_hw_addr_random(priv->dev);
3027 		dev_info(priv->device, "device MAC address %pM\n",
3028 			 priv->dev->dev_addr);
3029 	}
3030 }
3031 
3032 /**
3033  * stmmac_init_dma_engine - DMA init.
3034  * @priv: driver private structure
3035  * Description:
3036  * It inits the DMA invoking the specific MAC/GMAC callback.
3037  * Some DMA parameters can be passed from the platform;
3038  * in case of these are not passed a default is kept for the MAC or GMAC.
3039  */
stmmac_init_dma_engine(struct stmmac_priv * priv)3040 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3041 {
3042 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3043 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3044 	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3045 	struct stmmac_rx_queue *rx_q;
3046 	struct stmmac_tx_queue *tx_q;
3047 	u32 chan = 0;
3048 	int ret = 0;
3049 
3050 	if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3051 		netdev_err(priv->dev, "Invalid DMA configuration\n");
3052 		return -EINVAL;
3053 	}
3054 
3055 	if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3056 		priv->plat->dma_cfg->atds = 1;
3057 
3058 	ret = stmmac_reset(priv, priv->ioaddr);
3059 	if (ret) {
3060 		netdev_err(priv->dev, "Failed to reset the dma\n");
3061 		return ret;
3062 	}
3063 
3064 	/* DMA Configuration */
3065 	stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3066 
3067 	if (priv->plat->axi)
3068 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3069 
3070 	/* DMA CSR Channel configuration */
3071 	for (chan = 0; chan < dma_csr_ch; chan++) {
3072 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3073 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3074 	}
3075 
3076 	/* DMA RX Channel Configuration */
3077 	for (chan = 0; chan < rx_channels_count; chan++) {
3078 		rx_q = &priv->dma_conf.rx_queue[chan];
3079 
3080 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3081 				    rx_q->dma_rx_phy, chan);
3082 
3083 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3084 				     (rx_q->buf_alloc_num *
3085 				      sizeof(struct dma_desc));
3086 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3087 				       rx_q->rx_tail_addr, chan);
3088 	}
3089 
3090 	/* DMA TX Channel Configuration */
3091 	for (chan = 0; chan < tx_channels_count; chan++) {
3092 		tx_q = &priv->dma_conf.tx_queue[chan];
3093 
3094 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3095 				    tx_q->dma_tx_phy, chan);
3096 
3097 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3098 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3099 				       tx_q->tx_tail_addr, chan);
3100 	}
3101 
3102 	return ret;
3103 }
3104 
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3105 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3106 {
3107 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3108 	u32 tx_coal_timer = priv->tx_coal_timer[queue];
3109 	struct stmmac_channel *ch;
3110 	struct napi_struct *napi;
3111 
3112 	if (!tx_coal_timer)
3113 		return;
3114 
3115 	ch = &priv->channel[tx_q->queue_index];
3116 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3117 
3118 	/* Arm timer only if napi is not already scheduled.
3119 	 * Try to cancel any timer if napi is scheduled, timer will be armed
3120 	 * again in the next scheduled napi.
3121 	 */
3122 	if (unlikely(!napi_is_scheduled(napi)))
3123 		hrtimer_start(&tx_q->txtimer,
3124 			      STMMAC_COAL_TIMER(tx_coal_timer),
3125 			      HRTIMER_MODE_REL);
3126 	else
3127 		hrtimer_try_to_cancel(&tx_q->txtimer);
3128 }
3129 
3130 /**
3131  * stmmac_tx_timer - mitigation sw timer for tx.
3132  * @t: data pointer
3133  * Description:
3134  * This is the timer handler to directly invoke the stmmac_tx_clean.
3135  */
stmmac_tx_timer(struct hrtimer * t)3136 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3137 {
3138 	struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3139 	struct stmmac_priv *priv = tx_q->priv_data;
3140 	struct stmmac_channel *ch;
3141 	struct napi_struct *napi;
3142 
3143 	ch = &priv->channel[tx_q->queue_index];
3144 	napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3145 
3146 	if (likely(napi_schedule_prep(napi))) {
3147 		unsigned long flags;
3148 
3149 		spin_lock_irqsave(&ch->lock, flags);
3150 		stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3151 		spin_unlock_irqrestore(&ch->lock, flags);
3152 		__napi_schedule(napi);
3153 	}
3154 
3155 	return HRTIMER_NORESTART;
3156 }
3157 
3158 /**
3159  * stmmac_init_coalesce - init mitigation options.
3160  * @priv: driver private structure
3161  * Description:
3162  * This inits the coalesce parameters: i.e. timer rate,
3163  * timer handler and default threshold used for enabling the
3164  * interrupt on completion bit.
3165  */
stmmac_init_coalesce(struct stmmac_priv * priv)3166 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3167 {
3168 	u32 tx_channel_count = priv->plat->tx_queues_to_use;
3169 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
3170 	u32 chan;
3171 
3172 	for (chan = 0; chan < tx_channel_count; chan++) {
3173 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3174 
3175 		priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3176 		priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3177 
3178 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3179 	}
3180 
3181 	for (chan = 0; chan < rx_channel_count; chan++)
3182 		priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3183 }
3184 
stmmac_set_rings_length(struct stmmac_priv * priv)3185 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3186 {
3187 	u32 rx_channels_count = priv->plat->rx_queues_to_use;
3188 	u32 tx_channels_count = priv->plat->tx_queues_to_use;
3189 	u32 chan;
3190 
3191 	/* set TX ring length */
3192 	for (chan = 0; chan < tx_channels_count; chan++)
3193 		stmmac_set_tx_ring_len(priv, priv->ioaddr,
3194 				       (priv->dma_conf.dma_tx_size - 1), chan);
3195 
3196 	/* set RX ring length */
3197 	for (chan = 0; chan < rx_channels_count; chan++)
3198 		stmmac_set_rx_ring_len(priv, priv->ioaddr,
3199 				       (priv->dma_conf.dma_rx_size - 1), chan);
3200 }
3201 
3202 /**
3203  *  stmmac_set_tx_queue_weight - Set TX queue weight
3204  *  @priv: driver private structure
3205  *  Description: It is used for setting TX queues weight
3206  */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3207 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3208 {
3209 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3210 	u32 weight;
3211 	u32 queue;
3212 
3213 	for (queue = 0; queue < tx_queues_count; queue++) {
3214 		weight = priv->plat->tx_queues_cfg[queue].weight;
3215 		stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3216 	}
3217 }
3218 
3219 /**
3220  *  stmmac_configure_cbs - Configure CBS in TX queue
3221  *  @priv: driver private structure
3222  *  Description: It is used for configuring CBS in AVB TX queues
3223  */
stmmac_configure_cbs(struct stmmac_priv * priv)3224 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3225 {
3226 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3227 	u32 mode_to_use;
3228 	u32 queue;
3229 
3230 	/* queue 0 is reserved for legacy traffic */
3231 	for (queue = 1; queue < tx_queues_count; queue++) {
3232 		mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3233 		if (mode_to_use == MTL_QUEUE_DCB)
3234 			continue;
3235 
3236 		stmmac_config_cbs(priv, priv->hw,
3237 				priv->plat->tx_queues_cfg[queue].send_slope,
3238 				priv->plat->tx_queues_cfg[queue].idle_slope,
3239 				priv->plat->tx_queues_cfg[queue].high_credit,
3240 				priv->plat->tx_queues_cfg[queue].low_credit,
3241 				queue);
3242 	}
3243 }
3244 
3245 /**
3246  *  stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3247  *  @priv: driver private structure
3248  *  Description: It is used for mapping RX queues to RX dma channels
3249  */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3250 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3251 {
3252 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3253 	u32 queue;
3254 	u32 chan;
3255 
3256 	for (queue = 0; queue < rx_queues_count; queue++) {
3257 		chan = priv->plat->rx_queues_cfg[queue].chan;
3258 		stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3259 	}
3260 }
3261 
3262 /**
3263  *  stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3264  *  @priv: driver private structure
3265  *  Description: It is used for configuring the RX Queue Priority
3266  */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3267 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3268 {
3269 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3270 	u32 queue;
3271 	u32 prio;
3272 
3273 	for (queue = 0; queue < rx_queues_count; queue++) {
3274 		if (!priv->plat->rx_queues_cfg[queue].use_prio)
3275 			continue;
3276 
3277 		prio = priv->plat->rx_queues_cfg[queue].prio;
3278 		stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3279 	}
3280 }
3281 
3282 /**
3283  *  stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3284  *  @priv: driver private structure
3285  *  Description: It is used for configuring the TX Queue Priority
3286  */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3287 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3288 {
3289 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3290 	u32 queue;
3291 	u32 prio;
3292 
3293 	for (queue = 0; queue < tx_queues_count; queue++) {
3294 		if (!priv->plat->tx_queues_cfg[queue].use_prio)
3295 			continue;
3296 
3297 		prio = priv->plat->tx_queues_cfg[queue].prio;
3298 		stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3299 	}
3300 }
3301 
3302 /**
3303  *  stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3304  *  @priv: driver private structure
3305  *  Description: It is used for configuring the RX queue routing
3306  */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3307 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3308 {
3309 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3310 	u32 queue;
3311 	u8 packet;
3312 
3313 	for (queue = 0; queue < rx_queues_count; queue++) {
3314 		/* no specific packet type routing specified for the queue */
3315 		if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3316 			continue;
3317 
3318 		packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3319 		stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3320 	}
3321 }
3322 
stmmac_mac_config_rss(struct stmmac_priv * priv)3323 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3324 {
3325 	if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3326 		priv->rss.enable = false;
3327 		return;
3328 	}
3329 
3330 	if (priv->dev->features & NETIF_F_RXHASH)
3331 		priv->rss.enable = true;
3332 	else
3333 		priv->rss.enable = false;
3334 
3335 	stmmac_rss_configure(priv, priv->hw, &priv->rss,
3336 			     priv->plat->rx_queues_to_use);
3337 }
3338 
3339 /**
3340  *  stmmac_mtl_configuration - Configure MTL
3341  *  @priv: driver private structure
3342  *  Description: It is used for configurring MTL
3343  */
stmmac_mtl_configuration(struct stmmac_priv * priv)3344 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3345 {
3346 	u32 rx_queues_count = priv->plat->rx_queues_to_use;
3347 	u32 tx_queues_count = priv->plat->tx_queues_to_use;
3348 
3349 	if (tx_queues_count > 1)
3350 		stmmac_set_tx_queue_weight(priv);
3351 
3352 	/* Configure MTL RX algorithms */
3353 	if (rx_queues_count > 1)
3354 		stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3355 				priv->plat->rx_sched_algorithm);
3356 
3357 	/* Configure MTL TX algorithms */
3358 	if (tx_queues_count > 1)
3359 		stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3360 				priv->plat->tx_sched_algorithm);
3361 
3362 	/* Configure CBS in AVB TX queues */
3363 	if (tx_queues_count > 1)
3364 		stmmac_configure_cbs(priv);
3365 
3366 	/* Map RX MTL to DMA channels */
3367 	stmmac_rx_queue_dma_chan_map(priv);
3368 
3369 	/* Enable MAC RX Queues */
3370 	stmmac_mac_enable_rx_queues(priv);
3371 
3372 	/* Set RX priorities */
3373 	if (rx_queues_count > 1)
3374 		stmmac_mac_config_rx_queues_prio(priv);
3375 
3376 	/* Set TX priorities */
3377 	if (tx_queues_count > 1)
3378 		stmmac_mac_config_tx_queues_prio(priv);
3379 
3380 	/* Set RX routing */
3381 	if (rx_queues_count > 1)
3382 		stmmac_mac_config_rx_queues_routing(priv);
3383 
3384 	/* Receive Side Scaling */
3385 	if (rx_queues_count > 1)
3386 		stmmac_mac_config_rss(priv);
3387 }
3388 
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3389 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3390 {
3391 	if (priv->dma_cap.asp) {
3392 		netdev_info(priv->dev, "Enabling Safety Features\n");
3393 		stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3394 					  priv->plat->safety_feat_cfg);
3395 	} else {
3396 		netdev_info(priv->dev, "No Safety Features support found\n");
3397 	}
3398 }
3399 
3400 /**
3401  * stmmac_hw_setup - setup mac in a usable state.
3402  *  @dev : pointer to the device structure.
3403  *  Description:
3404  *  this is the main function to setup the HW in a usable state because the
3405  *  dma engine is reset, the core registers are configured (e.g. AXI,
3406  *  Checksum features, timers). The DMA is ready to start receiving and
3407  *  transmitting.
3408  *  Return value:
3409  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3410  *  file on failure.
3411  */
stmmac_hw_setup(struct net_device * dev)3412 static int stmmac_hw_setup(struct net_device *dev)
3413 {
3414 	struct stmmac_priv *priv = netdev_priv(dev);
3415 	u32 rx_cnt = priv->plat->rx_queues_to_use;
3416 	u32 tx_cnt = priv->plat->tx_queues_to_use;
3417 	bool sph_en;
3418 	u32 chan;
3419 	int ret;
3420 
3421 	/* Make sure RX clock is enabled */
3422 	if (priv->hw->phylink_pcs)
3423 		phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3424 
3425 	/* Note that clk_rx_i must be running for reset to complete. This
3426 	 * clock may also be required when setting the MAC address.
3427 	 *
3428 	 * Block the receive clock stop for LPI mode at the PHY in case
3429 	 * the link is established with EEE mode active.
3430 	 */
3431 	phylink_rx_clk_stop_block(priv->phylink);
3432 
3433 	/* DMA initialization and SW reset */
3434 	ret = stmmac_init_dma_engine(priv);
3435 	if (ret < 0) {
3436 		phylink_rx_clk_stop_unblock(priv->phylink);
3437 		netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3438 			   __func__);
3439 		return ret;
3440 	}
3441 
3442 	/* Copy the MAC addr into the HW  */
3443 	stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3444 	phylink_rx_clk_stop_unblock(priv->phylink);
3445 
3446 	/* PS and related bits will be programmed according to the speed */
3447 	if (priv->hw->pcs) {
3448 		int speed = priv->plat->mac_port_sel_speed;
3449 
3450 		if ((speed == SPEED_10) || (speed == SPEED_100) ||
3451 		    (speed == SPEED_1000)) {
3452 			priv->hw->ps = speed;
3453 		} else {
3454 			dev_warn(priv->device, "invalid port speed\n");
3455 			priv->hw->ps = 0;
3456 		}
3457 	}
3458 
3459 	/* Initialize the MAC Core */
3460 	stmmac_core_init(priv, priv->hw, dev);
3461 
3462 	/* Initialize MTL*/
3463 	stmmac_mtl_configuration(priv);
3464 
3465 	/* Initialize Safety Features */
3466 	stmmac_safety_feat_configuration(priv);
3467 
3468 	ret = stmmac_rx_ipc(priv, priv->hw);
3469 	if (!ret) {
3470 		netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3471 		priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3472 		priv->hw->rx_csum = 0;
3473 	}
3474 
3475 	/* Enable the MAC Rx/Tx */
3476 	stmmac_mac_set(priv, priv->ioaddr, true);
3477 
3478 	/* Set the HW DMA mode and the COE */
3479 	stmmac_dma_operation_mode(priv);
3480 
3481 	stmmac_mmc_setup(priv);
3482 
3483 	if (priv->use_riwt) {
3484 		u32 queue;
3485 
3486 		for (queue = 0; queue < rx_cnt; queue++) {
3487 			if (!priv->rx_riwt[queue])
3488 				priv->rx_riwt[queue] = DEF_DMA_RIWT;
3489 
3490 			stmmac_rx_watchdog(priv, priv->ioaddr,
3491 					   priv->rx_riwt[queue], queue);
3492 		}
3493 	}
3494 
3495 	if (priv->hw->pcs)
3496 		stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
3497 
3498 	/* set TX and RX rings length */
3499 	stmmac_set_rings_length(priv);
3500 
3501 	/* Enable TSO */
3502 	if (priv->tso) {
3503 		for (chan = 0; chan < tx_cnt; chan++) {
3504 			struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3505 
3506 			/* TSO and TBS cannot co-exist */
3507 			if (tx_q->tbs & STMMAC_TBS_AVAIL)
3508 				continue;
3509 
3510 			stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3511 		}
3512 	}
3513 
3514 	/* Enable Split Header */
3515 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3516 	for (chan = 0; chan < rx_cnt; chan++)
3517 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3518 
3519 
3520 	/* VLAN Tag Insertion */
3521 	if (priv->dma_cap.vlins)
3522 		stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3523 
3524 	/* TBS */
3525 	for (chan = 0; chan < tx_cnt; chan++) {
3526 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3527 		int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3528 
3529 		stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3530 	}
3531 
3532 	/* Configure real RX and TX queues */
3533 	netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3534 	netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3535 
3536 	/* Start the ball rolling... */
3537 	stmmac_start_all_dma(priv);
3538 
3539 	phylink_rx_clk_stop_block(priv->phylink);
3540 	stmmac_set_hw_vlan_mode(priv, priv->hw);
3541 	phylink_rx_clk_stop_unblock(priv->phylink);
3542 
3543 	return 0;
3544 }
3545 
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3546 static void stmmac_free_irq(struct net_device *dev,
3547 			    enum request_irq_err irq_err, int irq_idx)
3548 {
3549 	struct stmmac_priv *priv = netdev_priv(dev);
3550 	int j;
3551 
3552 	switch (irq_err) {
3553 	case REQ_IRQ_ERR_ALL:
3554 		irq_idx = priv->plat->tx_queues_to_use;
3555 		fallthrough;
3556 	case REQ_IRQ_ERR_TX:
3557 		for (j = irq_idx - 1; j >= 0; j--) {
3558 			if (priv->tx_irq[j] > 0) {
3559 				irq_set_affinity_hint(priv->tx_irq[j], NULL);
3560 				free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3561 			}
3562 		}
3563 		irq_idx = priv->plat->rx_queues_to_use;
3564 		fallthrough;
3565 	case REQ_IRQ_ERR_RX:
3566 		for (j = irq_idx - 1; j >= 0; j--) {
3567 			if (priv->rx_irq[j] > 0) {
3568 				irq_set_affinity_hint(priv->rx_irq[j], NULL);
3569 				free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3570 			}
3571 		}
3572 
3573 		if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3574 			free_irq(priv->sfty_ue_irq, dev);
3575 		fallthrough;
3576 	case REQ_IRQ_ERR_SFTY_UE:
3577 		if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3578 			free_irq(priv->sfty_ce_irq, dev);
3579 		fallthrough;
3580 	case REQ_IRQ_ERR_SFTY_CE:
3581 		if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3582 			free_irq(priv->lpi_irq, dev);
3583 		fallthrough;
3584 	case REQ_IRQ_ERR_LPI:
3585 		if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3586 			free_irq(priv->wol_irq, dev);
3587 		fallthrough;
3588 	case REQ_IRQ_ERR_SFTY:
3589 		if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3590 			free_irq(priv->sfty_irq, dev);
3591 		fallthrough;
3592 	case REQ_IRQ_ERR_WOL:
3593 		free_irq(dev->irq, dev);
3594 		fallthrough;
3595 	case REQ_IRQ_ERR_MAC:
3596 	case REQ_IRQ_ERR_NO:
3597 		/* If MAC IRQ request error, no more IRQ to free */
3598 		break;
3599 	}
3600 }
3601 
stmmac_request_irq_multi_msi(struct net_device * dev)3602 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3603 {
3604 	struct stmmac_priv *priv = netdev_priv(dev);
3605 	enum request_irq_err irq_err;
3606 	int irq_idx = 0;
3607 	char *int_name;
3608 	int ret;
3609 	int i;
3610 
3611 	/* For common interrupt */
3612 	int_name = priv->int_name_mac;
3613 	sprintf(int_name, "%s:%s", dev->name, "mac");
3614 	ret = request_irq(dev->irq, stmmac_mac_interrupt,
3615 			  0, int_name, dev);
3616 	if (unlikely(ret < 0)) {
3617 		netdev_err(priv->dev,
3618 			   "%s: alloc mac MSI %d (error: %d)\n",
3619 			   __func__, dev->irq, ret);
3620 		irq_err = REQ_IRQ_ERR_MAC;
3621 		goto irq_error;
3622 	}
3623 
3624 	/* Request the Wake IRQ in case of another line
3625 	 * is used for WoL
3626 	 */
3627 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3628 		int_name = priv->int_name_wol;
3629 		sprintf(int_name, "%s:%s", dev->name, "wol");
3630 		ret = request_irq(priv->wol_irq,
3631 				  stmmac_mac_interrupt,
3632 				  0, int_name, dev);
3633 		if (unlikely(ret < 0)) {
3634 			netdev_err(priv->dev,
3635 				   "%s: alloc wol MSI %d (error: %d)\n",
3636 				   __func__, priv->wol_irq, ret);
3637 			irq_err = REQ_IRQ_ERR_WOL;
3638 			goto irq_error;
3639 		}
3640 	}
3641 
3642 	/* Request the LPI IRQ in case of another line
3643 	 * is used for LPI
3644 	 */
3645 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3646 		int_name = priv->int_name_lpi;
3647 		sprintf(int_name, "%s:%s", dev->name, "lpi");
3648 		ret = request_irq(priv->lpi_irq,
3649 				  stmmac_mac_interrupt,
3650 				  0, int_name, dev);
3651 		if (unlikely(ret < 0)) {
3652 			netdev_err(priv->dev,
3653 				   "%s: alloc lpi MSI %d (error: %d)\n",
3654 				   __func__, priv->lpi_irq, ret);
3655 			irq_err = REQ_IRQ_ERR_LPI;
3656 			goto irq_error;
3657 		}
3658 	}
3659 
3660 	/* Request the common Safety Feature Correctible/Uncorrectible
3661 	 * Error line in case of another line is used
3662 	 */
3663 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3664 		int_name = priv->int_name_sfty;
3665 		sprintf(int_name, "%s:%s", dev->name, "safety");
3666 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3667 				  0, int_name, dev);
3668 		if (unlikely(ret < 0)) {
3669 			netdev_err(priv->dev,
3670 				   "%s: alloc sfty MSI %d (error: %d)\n",
3671 				   __func__, priv->sfty_irq, ret);
3672 			irq_err = REQ_IRQ_ERR_SFTY;
3673 			goto irq_error;
3674 		}
3675 	}
3676 
3677 	/* Request the Safety Feature Correctible Error line in
3678 	 * case of another line is used
3679 	 */
3680 	if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3681 		int_name = priv->int_name_sfty_ce;
3682 		sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3683 		ret = request_irq(priv->sfty_ce_irq,
3684 				  stmmac_safety_interrupt,
3685 				  0, int_name, dev);
3686 		if (unlikely(ret < 0)) {
3687 			netdev_err(priv->dev,
3688 				   "%s: alloc sfty ce MSI %d (error: %d)\n",
3689 				   __func__, priv->sfty_ce_irq, ret);
3690 			irq_err = REQ_IRQ_ERR_SFTY_CE;
3691 			goto irq_error;
3692 		}
3693 	}
3694 
3695 	/* Request the Safety Feature Uncorrectible Error line in
3696 	 * case of another line is used
3697 	 */
3698 	if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3699 		int_name = priv->int_name_sfty_ue;
3700 		sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3701 		ret = request_irq(priv->sfty_ue_irq,
3702 				  stmmac_safety_interrupt,
3703 				  0, int_name, dev);
3704 		if (unlikely(ret < 0)) {
3705 			netdev_err(priv->dev,
3706 				   "%s: alloc sfty ue MSI %d (error: %d)\n",
3707 				   __func__, priv->sfty_ue_irq, ret);
3708 			irq_err = REQ_IRQ_ERR_SFTY_UE;
3709 			goto irq_error;
3710 		}
3711 	}
3712 
3713 	/* Request Rx MSI irq */
3714 	for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3715 		if (i >= MTL_MAX_RX_QUEUES)
3716 			break;
3717 		if (priv->rx_irq[i] == 0)
3718 			continue;
3719 
3720 		int_name = priv->int_name_rx_irq[i];
3721 		sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3722 		ret = request_irq(priv->rx_irq[i],
3723 				  stmmac_msi_intr_rx,
3724 				  0, int_name, &priv->dma_conf.rx_queue[i]);
3725 		if (unlikely(ret < 0)) {
3726 			netdev_err(priv->dev,
3727 				   "%s: alloc rx-%d  MSI %d (error: %d)\n",
3728 				   __func__, i, priv->rx_irq[i], ret);
3729 			irq_err = REQ_IRQ_ERR_RX;
3730 			irq_idx = i;
3731 			goto irq_error;
3732 		}
3733 		irq_set_affinity_hint(priv->rx_irq[i],
3734 				      cpumask_of(i % num_online_cpus()));
3735 	}
3736 
3737 	/* Request Tx MSI irq */
3738 	for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3739 		if (i >= MTL_MAX_TX_QUEUES)
3740 			break;
3741 		if (priv->tx_irq[i] == 0)
3742 			continue;
3743 
3744 		int_name = priv->int_name_tx_irq[i];
3745 		sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3746 		ret = request_irq(priv->tx_irq[i],
3747 				  stmmac_msi_intr_tx,
3748 				  0, int_name, &priv->dma_conf.tx_queue[i]);
3749 		if (unlikely(ret < 0)) {
3750 			netdev_err(priv->dev,
3751 				   "%s: alloc tx-%d  MSI %d (error: %d)\n",
3752 				   __func__, i, priv->tx_irq[i], ret);
3753 			irq_err = REQ_IRQ_ERR_TX;
3754 			irq_idx = i;
3755 			goto irq_error;
3756 		}
3757 		irq_set_affinity_hint(priv->tx_irq[i],
3758 				      cpumask_of(i % num_online_cpus()));
3759 	}
3760 
3761 	return 0;
3762 
3763 irq_error:
3764 	stmmac_free_irq(dev, irq_err, irq_idx);
3765 	return ret;
3766 }
3767 
stmmac_request_irq_single(struct net_device * dev)3768 static int stmmac_request_irq_single(struct net_device *dev)
3769 {
3770 	struct stmmac_priv *priv = netdev_priv(dev);
3771 	enum request_irq_err irq_err;
3772 	int ret;
3773 
3774 	ret = request_irq(dev->irq, stmmac_interrupt,
3775 			  IRQF_SHARED, dev->name, dev);
3776 	if (unlikely(ret < 0)) {
3777 		netdev_err(priv->dev,
3778 			   "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3779 			   __func__, dev->irq, ret);
3780 		irq_err = REQ_IRQ_ERR_MAC;
3781 		goto irq_error;
3782 	}
3783 
3784 	/* Request the Wake IRQ in case of another line
3785 	 * is used for WoL
3786 	 */
3787 	if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3788 		ret = request_irq(priv->wol_irq, stmmac_interrupt,
3789 				  IRQF_SHARED, dev->name, dev);
3790 		if (unlikely(ret < 0)) {
3791 			netdev_err(priv->dev,
3792 				   "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3793 				   __func__, priv->wol_irq, ret);
3794 			irq_err = REQ_IRQ_ERR_WOL;
3795 			goto irq_error;
3796 		}
3797 	}
3798 
3799 	/* Request the IRQ lines */
3800 	if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3801 		ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3802 				  IRQF_SHARED, dev->name, dev);
3803 		if (unlikely(ret < 0)) {
3804 			netdev_err(priv->dev,
3805 				   "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3806 				   __func__, priv->lpi_irq, ret);
3807 			irq_err = REQ_IRQ_ERR_LPI;
3808 			goto irq_error;
3809 		}
3810 	}
3811 
3812 	/* Request the common Safety Feature Correctible/Uncorrectible
3813 	 * Error line in case of another line is used
3814 	 */
3815 	if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3816 		ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3817 				  IRQF_SHARED, dev->name, dev);
3818 		if (unlikely(ret < 0)) {
3819 			netdev_err(priv->dev,
3820 				   "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3821 				   __func__, priv->sfty_irq, ret);
3822 			irq_err = REQ_IRQ_ERR_SFTY;
3823 			goto irq_error;
3824 		}
3825 	}
3826 
3827 	return 0;
3828 
3829 irq_error:
3830 	stmmac_free_irq(dev, irq_err, 0);
3831 	return ret;
3832 }
3833 
stmmac_request_irq(struct net_device * dev)3834 static int stmmac_request_irq(struct net_device *dev)
3835 {
3836 	struct stmmac_priv *priv = netdev_priv(dev);
3837 	int ret;
3838 
3839 	/* Request the IRQ lines */
3840 	if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3841 		ret = stmmac_request_irq_multi_msi(dev);
3842 	else
3843 		ret = stmmac_request_irq_single(dev);
3844 
3845 	return ret;
3846 }
3847 
3848 /**
3849  *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3850  *  @priv: driver private structure
3851  *  @mtu: MTU to setup the dma queue and buf with
3852  *  Description: Allocate and generate a dma_conf based on the provided MTU.
3853  *  Allocate the Tx/Rx DMA queue and init them.
3854  *  Return value:
3855  *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3856  */
3857 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3858 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3859 {
3860 	struct stmmac_dma_conf *dma_conf;
3861 	int chan, bfsize, ret;
3862 
3863 	dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3864 	if (!dma_conf) {
3865 		netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3866 			   __func__);
3867 		return ERR_PTR(-ENOMEM);
3868 	}
3869 
3870 	bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3871 	if (bfsize < 0)
3872 		bfsize = 0;
3873 
3874 	if (bfsize < BUF_SIZE_16KiB)
3875 		bfsize = stmmac_set_bfsize(mtu, 0);
3876 
3877 	dma_conf->dma_buf_sz = bfsize;
3878 	/* Chose the tx/rx size from the already defined one in the
3879 	 * priv struct. (if defined)
3880 	 */
3881 	dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3882 	dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3883 
3884 	if (!dma_conf->dma_tx_size)
3885 		dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3886 	if (!dma_conf->dma_rx_size)
3887 		dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3888 
3889 	/* Earlier check for TBS */
3890 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3891 		struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3892 		int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3893 
3894 		/* Setup per-TXQ tbs flag before TX descriptor alloc */
3895 		tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3896 	}
3897 
3898 	ret = alloc_dma_desc_resources(priv, dma_conf);
3899 	if (ret < 0) {
3900 		netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3901 			   __func__);
3902 		goto alloc_error;
3903 	}
3904 
3905 	ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3906 	if (ret < 0) {
3907 		netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3908 			   __func__);
3909 		goto init_error;
3910 	}
3911 
3912 	return dma_conf;
3913 
3914 init_error:
3915 	free_dma_desc_resources(priv, dma_conf);
3916 alloc_error:
3917 	kfree(dma_conf);
3918 	return ERR_PTR(ret);
3919 }
3920 
3921 /**
3922  *  __stmmac_open - open entry point of the driver
3923  *  @dev : pointer to the device structure.
3924  *  @dma_conf :  structure to take the dma data
3925  *  Description:
3926  *  This function is the open entry point of the driver.
3927  *  Return value:
3928  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3929  *  file on failure.
3930  */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3931 static int __stmmac_open(struct net_device *dev,
3932 			 struct stmmac_dma_conf *dma_conf)
3933 {
3934 	struct stmmac_priv *priv = netdev_priv(dev);
3935 	u32 chan;
3936 	int ret;
3937 
3938 	for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3939 		if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3940 			dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3941 	memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3942 
3943 	stmmac_reset_queues_param(priv);
3944 
3945 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3946 	    priv->plat->serdes_powerup) {
3947 		ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3948 		if (ret < 0) {
3949 			netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3950 				   __func__);
3951 			goto init_error;
3952 		}
3953 	}
3954 
3955 	ret = stmmac_hw_setup(dev);
3956 	if (ret < 0) {
3957 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3958 		goto init_error;
3959 	}
3960 
3961 	stmmac_setup_ptp(priv);
3962 
3963 	stmmac_init_coalesce(priv);
3964 
3965 	phylink_start(priv->phylink);
3966 	/* We may have called phylink_speed_down before */
3967 	phylink_speed_up(priv->phylink);
3968 
3969 	ret = stmmac_request_irq(dev);
3970 	if (ret)
3971 		goto irq_error;
3972 
3973 	stmmac_enable_all_queues(priv);
3974 	netif_tx_start_all_queues(priv->dev);
3975 	stmmac_enable_all_dma_irq(priv);
3976 
3977 	return 0;
3978 
3979 irq_error:
3980 	phylink_stop(priv->phylink);
3981 
3982 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3983 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3984 
3985 	stmmac_release_ptp(priv);
3986 init_error:
3987 	return ret;
3988 }
3989 
stmmac_open(struct net_device * dev)3990 static int stmmac_open(struct net_device *dev)
3991 {
3992 	struct stmmac_priv *priv = netdev_priv(dev);
3993 	struct stmmac_dma_conf *dma_conf;
3994 	int ret;
3995 
3996 	/* Initialise the tx lpi timer, converting from msec to usec */
3997 	if (!priv->tx_lpi_timer)
3998 		priv->tx_lpi_timer = eee_timer * 1000;
3999 
4000 	dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4001 	if (IS_ERR(dma_conf))
4002 		return PTR_ERR(dma_conf);
4003 
4004 	ret = pm_runtime_resume_and_get(priv->device);
4005 	if (ret < 0)
4006 		goto err_dma_resources;
4007 
4008 	ret = stmmac_init_phy(dev);
4009 	if (ret)
4010 		goto err_runtime_pm;
4011 
4012 	ret = __stmmac_open(dev, dma_conf);
4013 	if (ret)
4014 		goto err_disconnect_phy;
4015 
4016 	kfree(dma_conf);
4017 
4018 	return ret;
4019 
4020 err_disconnect_phy:
4021 	phylink_disconnect_phy(priv->phylink);
4022 err_runtime_pm:
4023 	pm_runtime_put(priv->device);
4024 err_dma_resources:
4025 	free_dma_desc_resources(priv, dma_conf);
4026 	kfree(dma_conf);
4027 	return ret;
4028 }
4029 
__stmmac_release(struct net_device * dev)4030 static void __stmmac_release(struct net_device *dev)
4031 {
4032 	struct stmmac_priv *priv = netdev_priv(dev);
4033 	u32 chan;
4034 
4035 	/* If the PHY or MAC has WoL enabled, then the PHY will not be
4036 	 * suspended when phylink_stop() is called below. Set the PHY
4037 	 * to its slowest speed to save power.
4038 	 */
4039 	if (device_may_wakeup(priv->device))
4040 		phylink_speed_down(priv->phylink, false);
4041 
4042 	/* Stop and disconnect the PHY */
4043 	phylink_stop(priv->phylink);
4044 
4045 	stmmac_disable_all_queues(priv);
4046 
4047 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4048 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4049 
4050 	netif_tx_disable(dev);
4051 
4052 	/* Free the IRQ lines */
4053 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4054 
4055 	/* Stop TX/RX DMA and clear the descriptors */
4056 	stmmac_stop_all_dma(priv);
4057 
4058 	/* Release and free the Rx/Tx resources */
4059 	free_dma_desc_resources(priv, &priv->dma_conf);
4060 
4061 	/* Powerdown Serdes if there is */
4062 	if (priv->plat->serdes_powerdown)
4063 		priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4064 
4065 	stmmac_release_ptp(priv);
4066 
4067 	if (stmmac_fpe_supported(priv))
4068 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4069 }
4070 
4071 /**
4072  *  stmmac_release - close entry point of the driver
4073  *  @dev : device pointer.
4074  *  Description:
4075  *  This is the stop entry point of the driver.
4076  */
stmmac_release(struct net_device * dev)4077 static int stmmac_release(struct net_device *dev)
4078 {
4079 	struct stmmac_priv *priv = netdev_priv(dev);
4080 
4081 	__stmmac_release(dev);
4082 
4083 	phylink_disconnect_phy(priv->phylink);
4084 	pm_runtime_put(priv->device);
4085 
4086 	return 0;
4087 }
4088 
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4089 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4090 			       struct stmmac_tx_queue *tx_q)
4091 {
4092 	u16 tag = 0x0, inner_tag = 0x0;
4093 	u32 inner_type = 0x0;
4094 	struct dma_desc *p;
4095 
4096 	if (!priv->dma_cap.vlins)
4097 		return false;
4098 	if (!skb_vlan_tag_present(skb))
4099 		return false;
4100 	if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4101 		inner_tag = skb_vlan_tag_get(skb);
4102 		inner_type = STMMAC_VLAN_INSERT;
4103 	}
4104 
4105 	tag = skb_vlan_tag_get(skb);
4106 
4107 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4108 		p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4109 	else
4110 		p = &tx_q->dma_tx[tx_q->cur_tx];
4111 
4112 	if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4113 		return false;
4114 
4115 	stmmac_set_tx_owner(priv, p);
4116 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4117 	return true;
4118 }
4119 
4120 /**
4121  *  stmmac_tso_allocator - close entry point of the driver
4122  *  @priv: driver private structure
4123  *  @des: buffer start address
4124  *  @total_len: total length to fill in descriptors
4125  *  @last_segment: condition for the last descriptor
4126  *  @queue: TX queue index
4127  *  Description:
4128  *  This function fills descriptor and request new descriptors according to
4129  *  buffer length to fill
4130  */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4131 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4132 				 int total_len, bool last_segment, u32 queue)
4133 {
4134 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4135 	struct dma_desc *desc;
4136 	u32 buff_size;
4137 	int tmp_len;
4138 
4139 	tmp_len = total_len;
4140 
4141 	while (tmp_len > 0) {
4142 		dma_addr_t curr_addr;
4143 
4144 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4145 						priv->dma_conf.dma_tx_size);
4146 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4147 
4148 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4149 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4150 		else
4151 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4152 
4153 		curr_addr = des + (total_len - tmp_len);
4154 		stmmac_set_desc_addr(priv, desc, curr_addr);
4155 		buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4156 			    TSO_MAX_BUFF_SIZE : tmp_len;
4157 
4158 		stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4159 				0, 1,
4160 				(last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4161 				0, 0);
4162 
4163 		tmp_len -= TSO_MAX_BUFF_SIZE;
4164 	}
4165 }
4166 
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4167 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4168 {
4169 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4170 	int desc_size;
4171 
4172 	if (likely(priv->extend_desc))
4173 		desc_size = sizeof(struct dma_extended_desc);
4174 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4175 		desc_size = sizeof(struct dma_edesc);
4176 	else
4177 		desc_size = sizeof(struct dma_desc);
4178 
4179 	/* The own bit must be the latest setting done when prepare the
4180 	 * descriptor and then barrier is needed to make sure that
4181 	 * all is coherent before granting the DMA engine.
4182 	 */
4183 	wmb();
4184 
4185 	tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4186 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4187 }
4188 
4189 /**
4190  *  stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4191  *  @skb : the socket buffer
4192  *  @dev : device pointer
4193  *  Description: this is the transmit function that is called on TSO frames
4194  *  (support available on GMAC4 and newer chips).
4195  *  Diagram below show the ring programming in case of TSO frames:
4196  *
4197  *  First Descriptor
4198  *   --------
4199  *   | DES0 |---> buffer1 = L2/L3/L4 header
4200  *   | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4201  *   |      |     width is 32-bit, but we never use it.
4202  *   |      |     Also can be used as the most-significant 8-bits or 16-bits of
4203  *   |      |     buffer1 address pointer if the DMA AXI address width is 40-bit
4204  *   |      |     or 48-bit, and we always use it.
4205  *   | DES2 |---> buffer1 len
4206  *   | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4207  *   --------
4208  *   --------
4209  *   | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4210  *   | DES1 |---> same as the First Descriptor
4211  *   | DES2 |---> buffer1 len
4212  *   | DES3 |
4213  *   --------
4214  *	|
4215  *     ...
4216  *	|
4217  *   --------
4218  *   | DES0 |---> buffer1 = Split TCP Payload
4219  *   | DES1 |---> same as the First Descriptor
4220  *   | DES2 |---> buffer1 len
4221  *   | DES3 |
4222  *   --------
4223  *
4224  * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4225  */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4226 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4227 {
4228 	struct dma_desc *desc, *first, *mss_desc = NULL;
4229 	struct stmmac_priv *priv = netdev_priv(dev);
4230 	unsigned int first_entry, tx_packets;
4231 	struct stmmac_txq_stats *txq_stats;
4232 	struct stmmac_tx_queue *tx_q;
4233 	u32 pay_len, mss, queue;
4234 	int i, first_tx, nfrags;
4235 	u8 proto_hdr_len, hdr;
4236 	dma_addr_t des;
4237 	bool set_ic;
4238 
4239 	/* Always insert VLAN tag to SKB payload for TSO frames.
4240 	 *
4241 	 * Never insert VLAN tag by HW, since segments splited by
4242 	 * TSO engine will be un-tagged by mistake.
4243 	 */
4244 	if (skb_vlan_tag_present(skb)) {
4245 		skb = __vlan_hwaccel_push_inside(skb);
4246 		if (unlikely(!skb)) {
4247 			priv->xstats.tx_dropped++;
4248 			return NETDEV_TX_OK;
4249 		}
4250 	}
4251 
4252 	nfrags = skb_shinfo(skb)->nr_frags;
4253 	queue = skb_get_queue_mapping(skb);
4254 
4255 	tx_q = &priv->dma_conf.tx_queue[queue];
4256 	txq_stats = &priv->xstats.txq_stats[queue];
4257 	first_tx = tx_q->cur_tx;
4258 
4259 	/* Compute header lengths */
4260 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4261 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4262 		hdr = sizeof(struct udphdr);
4263 	} else {
4264 		proto_hdr_len = skb_tcp_all_headers(skb);
4265 		hdr = tcp_hdrlen(skb);
4266 	}
4267 
4268 	/* Desc availability based on threshold should be enough safe */
4269 	if (unlikely(stmmac_tx_avail(priv, queue) <
4270 		(((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4271 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4272 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4273 								queue));
4274 			/* This is a hard error, log it. */
4275 			netdev_err(priv->dev,
4276 				   "%s: Tx Ring full when queue awake\n",
4277 				   __func__);
4278 		}
4279 		return NETDEV_TX_BUSY;
4280 	}
4281 
4282 	pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4283 
4284 	mss = skb_shinfo(skb)->gso_size;
4285 
4286 	/* set new MSS value if needed */
4287 	if (mss != tx_q->mss) {
4288 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4289 			mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4290 		else
4291 			mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4292 
4293 		stmmac_set_mss(priv, mss_desc, mss);
4294 		tx_q->mss = mss;
4295 		tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4296 						priv->dma_conf.dma_tx_size);
4297 		WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4298 	}
4299 
4300 	if (netif_msg_tx_queued(priv)) {
4301 		pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4302 			__func__, hdr, proto_hdr_len, pay_len, mss);
4303 		pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4304 			skb->data_len);
4305 	}
4306 
4307 	first_entry = tx_q->cur_tx;
4308 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4309 
4310 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
4311 		desc = &tx_q->dma_entx[first_entry].basic;
4312 	else
4313 		desc = &tx_q->dma_tx[first_entry];
4314 	first = desc;
4315 
4316 	/* first descriptor: fill Headers on Buf1 */
4317 	des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4318 			     DMA_TO_DEVICE);
4319 	if (dma_mapping_error(priv->device, des))
4320 		goto dma_map_err;
4321 
4322 	stmmac_set_desc_addr(priv, first, des);
4323 	stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4324 			     (nfrags == 0), queue);
4325 
4326 	/* In case two or more DMA transmit descriptors are allocated for this
4327 	 * non-paged SKB data, the DMA buffer address should be saved to
4328 	 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4329 	 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4330 	 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4331 	 * since the tail areas of the DMA buffer can be accessed by DMA engine
4332 	 * sooner or later.
4333 	 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4334 	 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4335 	 * this DMA buffer right after the DMA engine completely finishes the
4336 	 * full buffer transmission.
4337 	 */
4338 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4339 	tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4340 	tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4341 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4342 
4343 	/* Prepare fragments */
4344 	for (i = 0; i < nfrags; i++) {
4345 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4346 
4347 		des = skb_frag_dma_map(priv->device, frag, 0,
4348 				       skb_frag_size(frag),
4349 				       DMA_TO_DEVICE);
4350 		if (dma_mapping_error(priv->device, des))
4351 			goto dma_map_err;
4352 
4353 		stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4354 				     (i == nfrags - 1), queue);
4355 
4356 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4357 		tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4358 		tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4359 		tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4360 	}
4361 
4362 	tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4363 
4364 	/* Only the last descriptor gets to point to the skb. */
4365 	tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4366 	tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4367 
4368 	/* Manage tx mitigation */
4369 	tx_packets = (tx_q->cur_tx + 1) - first_tx;
4370 	tx_q->tx_count_frames += tx_packets;
4371 
4372 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4373 		set_ic = true;
4374 	else if (!priv->tx_coal_frames[queue])
4375 		set_ic = false;
4376 	else if (tx_packets > priv->tx_coal_frames[queue])
4377 		set_ic = true;
4378 	else if ((tx_q->tx_count_frames %
4379 		  priv->tx_coal_frames[queue]) < tx_packets)
4380 		set_ic = true;
4381 	else
4382 		set_ic = false;
4383 
4384 	if (set_ic) {
4385 		if (tx_q->tbs & STMMAC_TBS_AVAIL)
4386 			desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4387 		else
4388 			desc = &tx_q->dma_tx[tx_q->cur_tx];
4389 
4390 		tx_q->tx_count_frames = 0;
4391 		stmmac_set_tx_ic(priv, desc);
4392 	}
4393 
4394 	/* We've used all descriptors we need for this skb, however,
4395 	 * advance cur_tx so that it references a fresh descriptor.
4396 	 * ndo_start_xmit will fill this descriptor the next time it's
4397 	 * called and stmmac_tx_clean may clean up to this descriptor.
4398 	 */
4399 	tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4400 
4401 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4402 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4403 			  __func__);
4404 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4405 	}
4406 
4407 	u64_stats_update_begin(&txq_stats->q_syncp);
4408 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4409 	u64_stats_inc(&txq_stats->q.tx_tso_frames);
4410 	u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4411 	if (set_ic)
4412 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4413 	u64_stats_update_end(&txq_stats->q_syncp);
4414 
4415 	if (priv->sarc_type)
4416 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4417 
4418 	if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4419 		     priv->hwts_tx_en)) {
4420 		/* declare that device is doing timestamping */
4421 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4422 		stmmac_enable_tx_timestamp(priv, first);
4423 	}
4424 
4425 	/* Complete the first descriptor before granting the DMA */
4426 	stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4427 				   tx_q->tx_skbuff_dma[first_entry].last_segment,
4428 				   hdr / 4, (skb->len - proto_hdr_len));
4429 
4430 	/* If context desc is used to change MSS */
4431 	if (mss_desc) {
4432 		/* Make sure that first descriptor has been completely
4433 		 * written, including its own bit. This is because MSS is
4434 		 * actually before first descriptor, so we need to make
4435 		 * sure that MSS's own bit is the last thing written.
4436 		 */
4437 		dma_wmb();
4438 		stmmac_set_tx_owner(priv, mss_desc);
4439 	}
4440 
4441 	if (netif_msg_pktdata(priv)) {
4442 		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4443 			__func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4444 			tx_q->cur_tx, first, nfrags);
4445 		pr_info(">>> frame to be transmitted: ");
4446 		print_pkt(skb->data, skb_headlen(skb));
4447 	}
4448 
4449 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4450 	skb_tx_timestamp(skb);
4451 
4452 	stmmac_flush_tx_descriptors(priv, queue);
4453 	stmmac_tx_timer_arm(priv, queue);
4454 
4455 	return NETDEV_TX_OK;
4456 
4457 dma_map_err:
4458 	dev_err(priv->device, "Tx dma map failed\n");
4459 	dev_kfree_skb(skb);
4460 	priv->xstats.tx_dropped++;
4461 	return NETDEV_TX_OK;
4462 }
4463 
4464 /**
4465  * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4466  * @skb: socket buffer to check
4467  *
4468  * Check if a packet has an ethertype that will trigger the IP header checks
4469  * and IP/TCP checksum engine of the stmmac core.
4470  *
4471  * Return: true if the ethertype can trigger the checksum engine, false
4472  * otherwise
4473  */
stmmac_has_ip_ethertype(struct sk_buff * skb)4474 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4475 {
4476 	int depth = 0;
4477 	__be16 proto;
4478 
4479 	proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4480 				    &depth);
4481 
4482 	return (depth <= ETH_HLEN) &&
4483 		(proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4484 }
4485 
4486 /**
4487  *  stmmac_xmit - Tx entry point of the driver
4488  *  @skb : the socket buffer
4489  *  @dev : device pointer
4490  *  Description : this is the tx entry point of the driver.
4491  *  It programs the chain or the ring and supports oversized frames
4492  *  and SG feature.
4493  */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4494 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4495 {
4496 	unsigned int first_entry, tx_packets, enh_desc;
4497 	struct stmmac_priv *priv = netdev_priv(dev);
4498 	unsigned int nopaged_len = skb_headlen(skb);
4499 	int i, csum_insertion = 0, is_jumbo = 0;
4500 	u32 queue = skb_get_queue_mapping(skb);
4501 	int nfrags = skb_shinfo(skb)->nr_frags;
4502 	int gso = skb_shinfo(skb)->gso_type;
4503 	struct stmmac_txq_stats *txq_stats;
4504 	struct dma_edesc *tbs_desc = NULL;
4505 	struct dma_desc *desc, *first;
4506 	struct stmmac_tx_queue *tx_q;
4507 	bool has_vlan, set_ic;
4508 	int entry, first_tx;
4509 	dma_addr_t des;
4510 
4511 	tx_q = &priv->dma_conf.tx_queue[queue];
4512 	txq_stats = &priv->xstats.txq_stats[queue];
4513 	first_tx = tx_q->cur_tx;
4514 
4515 	if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4516 		stmmac_stop_sw_lpi(priv);
4517 
4518 	/* Manage oversized TCP frames for GMAC4 device */
4519 	if (skb_is_gso(skb) && priv->tso) {
4520 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4521 			return stmmac_tso_xmit(skb, dev);
4522 		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4523 			return stmmac_tso_xmit(skb, dev);
4524 	}
4525 
4526 	if (priv->est && priv->est->enable &&
4527 	    priv->est->max_sdu[queue] &&
4528 	    skb->len > priv->est->max_sdu[queue]){
4529 		priv->xstats.max_sdu_txq_drop[queue]++;
4530 		goto max_sdu_err;
4531 	}
4532 
4533 	if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4534 		if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4535 			netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4536 								queue));
4537 			/* This is a hard error, log it. */
4538 			netdev_err(priv->dev,
4539 				   "%s: Tx Ring full when queue awake\n",
4540 				   __func__);
4541 		}
4542 		return NETDEV_TX_BUSY;
4543 	}
4544 
4545 	/* Check if VLAN can be inserted by HW */
4546 	has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4547 
4548 	entry = tx_q->cur_tx;
4549 	first_entry = entry;
4550 	WARN_ON(tx_q->tx_skbuff[first_entry]);
4551 
4552 	csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4553 	/* DWMAC IPs can be synthesized to support tx coe only for a few tx
4554 	 * queues. In that case, checksum offloading for those queues that don't
4555 	 * support tx coe needs to fallback to software checksum calculation.
4556 	 *
4557 	 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4558 	 * also have to be checksummed in software.
4559 	 */
4560 	if (csum_insertion &&
4561 	    (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4562 	     !stmmac_has_ip_ethertype(skb))) {
4563 		if (unlikely(skb_checksum_help(skb)))
4564 			goto dma_map_err;
4565 		csum_insertion = !csum_insertion;
4566 	}
4567 
4568 	if (likely(priv->extend_desc))
4569 		desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4570 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4571 		desc = &tx_q->dma_entx[entry].basic;
4572 	else
4573 		desc = tx_q->dma_tx + entry;
4574 
4575 	first = desc;
4576 
4577 	if (has_vlan)
4578 		stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4579 
4580 	enh_desc = priv->plat->enh_desc;
4581 	/* To program the descriptors according to the size of the frame */
4582 	if (enh_desc)
4583 		is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4584 
4585 	if (unlikely(is_jumbo)) {
4586 		entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4587 		if (unlikely(entry < 0) && (entry != -EINVAL))
4588 			goto dma_map_err;
4589 	}
4590 
4591 	for (i = 0; i < nfrags; i++) {
4592 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4593 		int len = skb_frag_size(frag);
4594 		bool last_segment = (i == (nfrags - 1));
4595 
4596 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4597 		WARN_ON(tx_q->tx_skbuff[entry]);
4598 
4599 		if (likely(priv->extend_desc))
4600 			desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4601 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4602 			desc = &tx_q->dma_entx[entry].basic;
4603 		else
4604 			desc = tx_q->dma_tx + entry;
4605 
4606 		des = skb_frag_dma_map(priv->device, frag, 0, len,
4607 				       DMA_TO_DEVICE);
4608 		if (dma_mapping_error(priv->device, des))
4609 			goto dma_map_err; /* should reuse desc w/o issues */
4610 
4611 		tx_q->tx_skbuff_dma[entry].buf = des;
4612 
4613 		stmmac_set_desc_addr(priv, desc, des);
4614 
4615 		tx_q->tx_skbuff_dma[entry].map_as_page = true;
4616 		tx_q->tx_skbuff_dma[entry].len = len;
4617 		tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4618 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4619 
4620 		/* Prepare the descriptor and set the own bit too */
4621 		stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4622 				priv->mode, 1, last_segment, skb->len);
4623 	}
4624 
4625 	/* Only the last descriptor gets to point to the skb. */
4626 	tx_q->tx_skbuff[entry] = skb;
4627 	tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4628 
4629 	/* According to the coalesce parameter the IC bit for the latest
4630 	 * segment is reset and the timer re-started to clean the tx status.
4631 	 * This approach takes care about the fragments: desc is the first
4632 	 * element in case of no SG.
4633 	 */
4634 	tx_packets = (entry + 1) - first_tx;
4635 	tx_q->tx_count_frames += tx_packets;
4636 
4637 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4638 		set_ic = true;
4639 	else if (!priv->tx_coal_frames[queue])
4640 		set_ic = false;
4641 	else if (tx_packets > priv->tx_coal_frames[queue])
4642 		set_ic = true;
4643 	else if ((tx_q->tx_count_frames %
4644 		  priv->tx_coal_frames[queue]) < tx_packets)
4645 		set_ic = true;
4646 	else
4647 		set_ic = false;
4648 
4649 	if (set_ic) {
4650 		if (likely(priv->extend_desc))
4651 			desc = &tx_q->dma_etx[entry].basic;
4652 		else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4653 			desc = &tx_q->dma_entx[entry].basic;
4654 		else
4655 			desc = &tx_q->dma_tx[entry];
4656 
4657 		tx_q->tx_count_frames = 0;
4658 		stmmac_set_tx_ic(priv, desc);
4659 	}
4660 
4661 	/* We've used all descriptors we need for this skb, however,
4662 	 * advance cur_tx so that it references a fresh descriptor.
4663 	 * ndo_start_xmit will fill this descriptor the next time it's
4664 	 * called and stmmac_tx_clean may clean up to this descriptor.
4665 	 */
4666 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4667 	tx_q->cur_tx = entry;
4668 
4669 	if (netif_msg_pktdata(priv)) {
4670 		netdev_dbg(priv->dev,
4671 			   "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4672 			   __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4673 			   entry, first, nfrags);
4674 
4675 		netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4676 		print_pkt(skb->data, skb->len);
4677 	}
4678 
4679 	if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4680 		netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4681 			  __func__);
4682 		netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4683 	}
4684 
4685 	u64_stats_update_begin(&txq_stats->q_syncp);
4686 	u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4687 	if (set_ic)
4688 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4689 	u64_stats_update_end(&txq_stats->q_syncp);
4690 
4691 	if (priv->sarc_type)
4692 		stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4693 
4694 	/* Ready to fill the first descriptor and set the OWN bit w/o any
4695 	 * problems because all the descriptors are actually ready to be
4696 	 * passed to the DMA engine.
4697 	 */
4698 	if (likely(!is_jumbo)) {
4699 		bool last_segment = (nfrags == 0);
4700 
4701 		des = dma_map_single(priv->device, skb->data,
4702 				     nopaged_len, DMA_TO_DEVICE);
4703 		if (dma_mapping_error(priv->device, des))
4704 			goto dma_map_err;
4705 
4706 		tx_q->tx_skbuff_dma[first_entry].buf = des;
4707 		tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4708 		tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4709 
4710 		stmmac_set_desc_addr(priv, first, des);
4711 
4712 		tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4713 		tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4714 
4715 		if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4716 			     priv->hwts_tx_en)) {
4717 			/* declare that device is doing timestamping */
4718 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4719 			stmmac_enable_tx_timestamp(priv, first);
4720 		}
4721 
4722 		/* Prepare the first descriptor setting the OWN bit too */
4723 		stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4724 				csum_insertion, priv->mode, 0, last_segment,
4725 				skb->len);
4726 	}
4727 
4728 	if (tx_q->tbs & STMMAC_TBS_EN) {
4729 		struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4730 
4731 		tbs_desc = &tx_q->dma_entx[first_entry];
4732 		stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4733 	}
4734 
4735 	stmmac_set_tx_owner(priv, first);
4736 
4737 	netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4738 
4739 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4740 	skb_tx_timestamp(skb);
4741 	stmmac_flush_tx_descriptors(priv, queue);
4742 	stmmac_tx_timer_arm(priv, queue);
4743 
4744 	return NETDEV_TX_OK;
4745 
4746 dma_map_err:
4747 	netdev_err(priv->dev, "Tx DMA map failed\n");
4748 max_sdu_err:
4749 	dev_kfree_skb(skb);
4750 	priv->xstats.tx_dropped++;
4751 	return NETDEV_TX_OK;
4752 }
4753 
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4754 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4755 {
4756 	struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4757 	__be16 vlan_proto = veth->h_vlan_proto;
4758 	u16 vlanid;
4759 
4760 	if ((vlan_proto == htons(ETH_P_8021Q) &&
4761 	     dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4762 	    (vlan_proto == htons(ETH_P_8021AD) &&
4763 	     dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4764 		/* pop the vlan tag */
4765 		vlanid = ntohs(veth->h_vlan_TCI);
4766 		memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4767 		skb_pull(skb, VLAN_HLEN);
4768 		__vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4769 	}
4770 }
4771 
4772 /**
4773  * stmmac_rx_refill - refill used skb preallocated buffers
4774  * @priv: driver private structure
4775  * @queue: RX queue index
4776  * Description : this is to reallocate the skb for the reception process
4777  * that is based on zero-copy.
4778  */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4779 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4780 {
4781 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4782 	int dirty = stmmac_rx_dirty(priv, queue);
4783 	unsigned int entry = rx_q->dirty_rx;
4784 	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4785 
4786 	if (priv->dma_cap.host_dma_width <= 32)
4787 		gfp |= GFP_DMA32;
4788 
4789 	while (dirty-- > 0) {
4790 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4791 		struct dma_desc *p;
4792 		bool use_rx_wd;
4793 
4794 		if (priv->extend_desc)
4795 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
4796 		else
4797 			p = rx_q->dma_rx + entry;
4798 
4799 		if (!buf->page) {
4800 			buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4801 			if (!buf->page)
4802 				break;
4803 		}
4804 
4805 		if (priv->sph && !buf->sec_page) {
4806 			buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4807 			if (!buf->sec_page)
4808 				break;
4809 
4810 			buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4811 		}
4812 
4813 		buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4814 
4815 		stmmac_set_desc_addr(priv, p, buf->addr);
4816 		if (priv->sph)
4817 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4818 		else
4819 			stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4820 		stmmac_refill_desc3(priv, rx_q, p);
4821 
4822 		rx_q->rx_count_frames++;
4823 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4824 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4825 			rx_q->rx_count_frames = 0;
4826 
4827 		use_rx_wd = !priv->rx_coal_frames[queue];
4828 		use_rx_wd |= rx_q->rx_count_frames > 0;
4829 		if (!priv->use_riwt)
4830 			use_rx_wd = false;
4831 
4832 		dma_wmb();
4833 		stmmac_set_rx_owner(priv, p, use_rx_wd);
4834 
4835 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4836 	}
4837 	rx_q->dirty_rx = entry;
4838 	rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4839 			    (rx_q->dirty_rx * sizeof(struct dma_desc));
4840 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4841 }
4842 
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4843 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4844 				       struct dma_desc *p,
4845 				       int status, unsigned int len)
4846 {
4847 	unsigned int plen = 0, hlen = 0;
4848 	int coe = priv->hw->rx_csum;
4849 
4850 	/* Not first descriptor, buffer is always zero */
4851 	if (priv->sph && len)
4852 		return 0;
4853 
4854 	/* First descriptor, get split header length */
4855 	stmmac_get_rx_header_len(priv, p, &hlen);
4856 	if (priv->sph && hlen) {
4857 		priv->xstats.rx_split_hdr_pkt_n++;
4858 		return hlen;
4859 	}
4860 
4861 	/* First descriptor, not last descriptor and not split header */
4862 	if (status & rx_not_ls)
4863 		return priv->dma_conf.dma_buf_sz;
4864 
4865 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4866 
4867 	/* First descriptor and last descriptor and not split header */
4868 	return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4869 }
4870 
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4871 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4872 				       struct dma_desc *p,
4873 				       int status, unsigned int len)
4874 {
4875 	int coe = priv->hw->rx_csum;
4876 	unsigned int plen = 0;
4877 
4878 	/* Not split header, buffer is not available */
4879 	if (!priv->sph)
4880 		return 0;
4881 
4882 	/* Not last descriptor */
4883 	if (status & rx_not_ls)
4884 		return priv->dma_conf.dma_buf_sz;
4885 
4886 	plen = stmmac_get_rx_frame_len(priv, p, coe);
4887 
4888 	/* Last descriptor */
4889 	return plen - len;
4890 }
4891 
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4892 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4893 				struct xdp_frame *xdpf, bool dma_map)
4894 {
4895 	struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4896 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4897 	bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
4898 	unsigned int entry = tx_q->cur_tx;
4899 	struct dma_desc *tx_desc;
4900 	dma_addr_t dma_addr;
4901 	bool set_ic;
4902 
4903 	if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4904 		return STMMAC_XDP_CONSUMED;
4905 
4906 	if (priv->est && priv->est->enable &&
4907 	    priv->est->max_sdu[queue] &&
4908 	    xdpf->len > priv->est->max_sdu[queue]) {
4909 		priv->xstats.max_sdu_txq_drop[queue]++;
4910 		return STMMAC_XDP_CONSUMED;
4911 	}
4912 
4913 	if (likely(priv->extend_desc))
4914 		tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4915 	else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4916 		tx_desc = &tx_q->dma_entx[entry].basic;
4917 	else
4918 		tx_desc = tx_q->dma_tx + entry;
4919 
4920 	if (dma_map) {
4921 		dma_addr = dma_map_single(priv->device, xdpf->data,
4922 					  xdpf->len, DMA_TO_DEVICE);
4923 		if (dma_mapping_error(priv->device, dma_addr))
4924 			return STMMAC_XDP_CONSUMED;
4925 
4926 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4927 	} else {
4928 		struct page *page = virt_to_page(xdpf->data);
4929 
4930 		dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4931 			   xdpf->headroom;
4932 		dma_sync_single_for_device(priv->device, dma_addr,
4933 					   xdpf->len, DMA_BIDIRECTIONAL);
4934 
4935 		tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4936 	}
4937 
4938 	tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4939 	tx_q->tx_skbuff_dma[entry].map_as_page = false;
4940 	tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4941 	tx_q->tx_skbuff_dma[entry].last_segment = true;
4942 	tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4943 
4944 	tx_q->xdpf[entry] = xdpf;
4945 
4946 	stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4947 
4948 	stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4949 			       csum, priv->mode, true, true,
4950 			       xdpf->len);
4951 
4952 	tx_q->tx_count_frames++;
4953 
4954 	if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4955 		set_ic = true;
4956 	else
4957 		set_ic = false;
4958 
4959 	if (set_ic) {
4960 		tx_q->tx_count_frames = 0;
4961 		stmmac_set_tx_ic(priv, tx_desc);
4962 		u64_stats_update_begin(&txq_stats->q_syncp);
4963 		u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4964 		u64_stats_update_end(&txq_stats->q_syncp);
4965 	}
4966 
4967 	stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4968 
4969 	entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4970 	tx_q->cur_tx = entry;
4971 
4972 	return STMMAC_XDP_TX;
4973 }
4974 
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4975 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4976 				   int cpu)
4977 {
4978 	int index = cpu;
4979 
4980 	if (unlikely(index < 0))
4981 		index = 0;
4982 
4983 	while (index >= priv->plat->tx_queues_to_use)
4984 		index -= priv->plat->tx_queues_to_use;
4985 
4986 	return index;
4987 }
4988 
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4989 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4990 				struct xdp_buff *xdp)
4991 {
4992 	struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4993 	int cpu = smp_processor_id();
4994 	struct netdev_queue *nq;
4995 	int queue;
4996 	int res;
4997 
4998 	if (unlikely(!xdpf))
4999 		return STMMAC_XDP_CONSUMED;
5000 
5001 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5002 	nq = netdev_get_tx_queue(priv->dev, queue);
5003 
5004 	__netif_tx_lock(nq, cpu);
5005 	/* Avoids TX time-out as we are sharing with slow path */
5006 	txq_trans_cond_update(nq);
5007 
5008 	res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5009 	if (res == STMMAC_XDP_TX)
5010 		stmmac_flush_tx_descriptors(priv, queue);
5011 
5012 	__netif_tx_unlock(nq);
5013 
5014 	return res;
5015 }
5016 
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5017 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5018 				 struct bpf_prog *prog,
5019 				 struct xdp_buff *xdp)
5020 {
5021 	u32 act;
5022 	int res;
5023 
5024 	act = bpf_prog_run_xdp(prog, xdp);
5025 	switch (act) {
5026 	case XDP_PASS:
5027 		res = STMMAC_XDP_PASS;
5028 		break;
5029 	case XDP_TX:
5030 		res = stmmac_xdp_xmit_back(priv, xdp);
5031 		break;
5032 	case XDP_REDIRECT:
5033 		if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5034 			res = STMMAC_XDP_CONSUMED;
5035 		else
5036 			res = STMMAC_XDP_REDIRECT;
5037 		break;
5038 	default:
5039 		bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5040 		fallthrough;
5041 	case XDP_ABORTED:
5042 		trace_xdp_exception(priv->dev, prog, act);
5043 		fallthrough;
5044 	case XDP_DROP:
5045 		res = STMMAC_XDP_CONSUMED;
5046 		break;
5047 	}
5048 
5049 	return res;
5050 }
5051 
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5052 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5053 					   struct xdp_buff *xdp)
5054 {
5055 	struct bpf_prog *prog;
5056 	int res;
5057 
5058 	prog = READ_ONCE(priv->xdp_prog);
5059 	if (!prog) {
5060 		res = STMMAC_XDP_PASS;
5061 		goto out;
5062 	}
5063 
5064 	res = __stmmac_xdp_run_prog(priv, prog, xdp);
5065 out:
5066 	return ERR_PTR(-res);
5067 }
5068 
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5069 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5070 				   int xdp_status)
5071 {
5072 	int cpu = smp_processor_id();
5073 	int queue;
5074 
5075 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
5076 
5077 	if (xdp_status & STMMAC_XDP_TX)
5078 		stmmac_tx_timer_arm(priv, queue);
5079 
5080 	if (xdp_status & STMMAC_XDP_REDIRECT)
5081 		xdp_do_flush();
5082 }
5083 
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5084 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5085 					       struct xdp_buff *xdp)
5086 {
5087 	unsigned int metasize = xdp->data - xdp->data_meta;
5088 	unsigned int datasize = xdp->data_end - xdp->data;
5089 	struct sk_buff *skb;
5090 
5091 	skb = napi_alloc_skb(&ch->rxtx_napi,
5092 			     xdp->data_end - xdp->data_hard_start);
5093 	if (unlikely(!skb))
5094 		return NULL;
5095 
5096 	skb_reserve(skb, xdp->data - xdp->data_hard_start);
5097 	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5098 	if (metasize)
5099 		skb_metadata_set(skb, metasize);
5100 
5101 	return skb;
5102 }
5103 
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5104 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5105 				   struct dma_desc *p, struct dma_desc *np,
5106 				   struct xdp_buff *xdp)
5107 {
5108 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5109 	struct stmmac_channel *ch = &priv->channel[queue];
5110 	unsigned int len = xdp->data_end - xdp->data;
5111 	enum pkt_hash_types hash_type;
5112 	int coe = priv->hw->rx_csum;
5113 	struct sk_buff *skb;
5114 	u32 hash;
5115 
5116 	skb = stmmac_construct_skb_zc(ch, xdp);
5117 	if (!skb) {
5118 		priv->xstats.rx_dropped++;
5119 		return;
5120 	}
5121 
5122 	stmmac_get_rx_hwtstamp(priv, p, np, skb);
5123 	if (priv->hw->hw_vlan_en)
5124 		/* MAC level stripping. */
5125 		stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5126 	else
5127 		/* Driver level stripping. */
5128 		stmmac_rx_vlan(priv->dev, skb);
5129 	skb->protocol = eth_type_trans(skb, priv->dev);
5130 
5131 	if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5132 		skb_checksum_none_assert(skb);
5133 	else
5134 		skb->ip_summed = CHECKSUM_UNNECESSARY;
5135 
5136 	if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5137 		skb_set_hash(skb, hash, hash_type);
5138 
5139 	skb_record_rx_queue(skb, queue);
5140 	napi_gro_receive(&ch->rxtx_napi, skb);
5141 
5142 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5143 	u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5144 	u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5145 	u64_stats_update_end(&rxq_stats->napi_syncp);
5146 }
5147 
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5148 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5149 {
5150 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5151 	unsigned int entry = rx_q->dirty_rx;
5152 	struct dma_desc *rx_desc = NULL;
5153 	bool ret = true;
5154 
5155 	budget = min(budget, stmmac_rx_dirty(priv, queue));
5156 
5157 	while (budget-- > 0 && entry != rx_q->cur_rx) {
5158 		struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5159 		dma_addr_t dma_addr;
5160 		bool use_rx_wd;
5161 
5162 		if (!buf->xdp) {
5163 			buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5164 			if (!buf->xdp) {
5165 				ret = false;
5166 				break;
5167 			}
5168 		}
5169 
5170 		if (priv->extend_desc)
5171 			rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5172 		else
5173 			rx_desc = rx_q->dma_rx + entry;
5174 
5175 		dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5176 		stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5177 		stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5178 		stmmac_refill_desc3(priv, rx_q, rx_desc);
5179 
5180 		rx_q->rx_count_frames++;
5181 		rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5182 		if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5183 			rx_q->rx_count_frames = 0;
5184 
5185 		use_rx_wd = !priv->rx_coal_frames[queue];
5186 		use_rx_wd |= rx_q->rx_count_frames > 0;
5187 		if (!priv->use_riwt)
5188 			use_rx_wd = false;
5189 
5190 		dma_wmb();
5191 		stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5192 
5193 		entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5194 	}
5195 
5196 	if (rx_desc) {
5197 		rx_q->dirty_rx = entry;
5198 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5199 				     (rx_q->dirty_rx * sizeof(struct dma_desc));
5200 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5201 	}
5202 
5203 	return ret;
5204 }
5205 
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5206 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5207 {
5208 	/* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5209 	 * to represent incoming packet, whereas cb field in the same structure
5210 	 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5211 	 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5212 	 */
5213 	return (struct stmmac_xdp_buff *)xdp;
5214 }
5215 
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5216 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5217 {
5218 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5219 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5220 	unsigned int count = 0, error = 0, len = 0;
5221 	int dirty = stmmac_rx_dirty(priv, queue);
5222 	unsigned int next_entry = rx_q->cur_rx;
5223 	u32 rx_errors = 0, rx_dropped = 0;
5224 	unsigned int desc_size;
5225 	struct bpf_prog *prog;
5226 	bool failure = false;
5227 	int xdp_status = 0;
5228 	int status = 0;
5229 
5230 	if (netif_msg_rx_status(priv)) {
5231 		void *rx_head;
5232 
5233 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5234 		if (priv->extend_desc) {
5235 			rx_head = (void *)rx_q->dma_erx;
5236 			desc_size = sizeof(struct dma_extended_desc);
5237 		} else {
5238 			rx_head = (void *)rx_q->dma_rx;
5239 			desc_size = sizeof(struct dma_desc);
5240 		}
5241 
5242 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5243 				    rx_q->dma_rx_phy, desc_size);
5244 	}
5245 	while (count < limit) {
5246 		struct stmmac_rx_buffer *buf;
5247 		struct stmmac_xdp_buff *ctx;
5248 		unsigned int buf1_len = 0;
5249 		struct dma_desc *np, *p;
5250 		int entry;
5251 		int res;
5252 
5253 		if (!count && rx_q->state_saved) {
5254 			error = rx_q->state.error;
5255 			len = rx_q->state.len;
5256 		} else {
5257 			rx_q->state_saved = false;
5258 			error = 0;
5259 			len = 0;
5260 		}
5261 
5262 		if (count >= limit)
5263 			break;
5264 
5265 read_again:
5266 		buf1_len = 0;
5267 		entry = next_entry;
5268 		buf = &rx_q->buf_pool[entry];
5269 
5270 		if (dirty >= STMMAC_RX_FILL_BATCH) {
5271 			failure = failure ||
5272 				  !stmmac_rx_refill_zc(priv, queue, dirty);
5273 			dirty = 0;
5274 		}
5275 
5276 		if (priv->extend_desc)
5277 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5278 		else
5279 			p = rx_q->dma_rx + entry;
5280 
5281 		/* read the status of the incoming frame */
5282 		status = stmmac_rx_status(priv, &priv->xstats, p);
5283 		/* check if managed by the DMA otherwise go ahead */
5284 		if (unlikely(status & dma_own))
5285 			break;
5286 
5287 		/* Prefetch the next RX descriptor */
5288 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5289 						priv->dma_conf.dma_rx_size);
5290 		next_entry = rx_q->cur_rx;
5291 
5292 		if (priv->extend_desc)
5293 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5294 		else
5295 			np = rx_q->dma_rx + next_entry;
5296 
5297 		prefetch(np);
5298 
5299 		/* Ensure a valid XSK buffer before proceed */
5300 		if (!buf->xdp)
5301 			break;
5302 
5303 		if (priv->extend_desc)
5304 			stmmac_rx_extended_status(priv, &priv->xstats,
5305 						  rx_q->dma_erx + entry);
5306 		if (unlikely(status == discard_frame)) {
5307 			xsk_buff_free(buf->xdp);
5308 			buf->xdp = NULL;
5309 			dirty++;
5310 			error = 1;
5311 			if (!priv->hwts_rx_en)
5312 				rx_errors++;
5313 		}
5314 
5315 		if (unlikely(error && (status & rx_not_ls)))
5316 			goto read_again;
5317 		if (unlikely(error)) {
5318 			count++;
5319 			continue;
5320 		}
5321 
5322 		/* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5323 		if (likely(status & rx_not_ls)) {
5324 			xsk_buff_free(buf->xdp);
5325 			buf->xdp = NULL;
5326 			dirty++;
5327 			count++;
5328 			goto read_again;
5329 		}
5330 
5331 		ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5332 		ctx->priv = priv;
5333 		ctx->desc = p;
5334 		ctx->ndesc = np;
5335 
5336 		/* XDP ZC Frame only support primary buffers for now */
5337 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5338 		len += buf1_len;
5339 
5340 		/* ACS is disabled; strip manually. */
5341 		if (likely(!(status & rx_not_ls))) {
5342 			buf1_len -= ETH_FCS_LEN;
5343 			len -= ETH_FCS_LEN;
5344 		}
5345 
5346 		/* RX buffer is good and fit into a XSK pool buffer */
5347 		buf->xdp->data_end = buf->xdp->data + buf1_len;
5348 		xsk_buff_dma_sync_for_cpu(buf->xdp);
5349 
5350 		prog = READ_ONCE(priv->xdp_prog);
5351 		res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5352 
5353 		switch (res) {
5354 		case STMMAC_XDP_PASS:
5355 			stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5356 			xsk_buff_free(buf->xdp);
5357 			break;
5358 		case STMMAC_XDP_CONSUMED:
5359 			xsk_buff_free(buf->xdp);
5360 			rx_dropped++;
5361 			break;
5362 		case STMMAC_XDP_TX:
5363 		case STMMAC_XDP_REDIRECT:
5364 			xdp_status |= res;
5365 			break;
5366 		}
5367 
5368 		buf->xdp = NULL;
5369 		dirty++;
5370 		count++;
5371 	}
5372 
5373 	if (status & rx_not_ls) {
5374 		rx_q->state_saved = true;
5375 		rx_q->state.error = error;
5376 		rx_q->state.len = len;
5377 	}
5378 
5379 	stmmac_finalize_xdp_rx(priv, xdp_status);
5380 
5381 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5382 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5383 	u64_stats_update_end(&rxq_stats->napi_syncp);
5384 
5385 	priv->xstats.rx_dropped += rx_dropped;
5386 	priv->xstats.rx_errors += rx_errors;
5387 
5388 	if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5389 		if (failure || stmmac_rx_dirty(priv, queue) > 0)
5390 			xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5391 		else
5392 			xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5393 
5394 		return (int)count;
5395 	}
5396 
5397 	return failure ? limit : (int)count;
5398 }
5399 
5400 /**
5401  * stmmac_rx - manage the receive process
5402  * @priv: driver private structure
5403  * @limit: napi bugget
5404  * @queue: RX queue index.
5405  * Description :  this the function called by the napi poll method.
5406  * It gets all the frames inside the ring.
5407  */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5408 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5409 {
5410 	u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5411 	struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5412 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5413 	struct stmmac_channel *ch = &priv->channel[queue];
5414 	unsigned int count = 0, error = 0, len = 0;
5415 	int status = 0, coe = priv->hw->rx_csum;
5416 	unsigned int next_entry = rx_q->cur_rx;
5417 	enum dma_data_direction dma_dir;
5418 	unsigned int desc_size;
5419 	struct sk_buff *skb = NULL;
5420 	struct stmmac_xdp_buff ctx;
5421 	int xdp_status = 0;
5422 	int bufsz;
5423 
5424 	dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5425 	bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5426 	limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5427 
5428 	if (netif_msg_rx_status(priv)) {
5429 		void *rx_head;
5430 
5431 		netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5432 		if (priv->extend_desc) {
5433 			rx_head = (void *)rx_q->dma_erx;
5434 			desc_size = sizeof(struct dma_extended_desc);
5435 		} else {
5436 			rx_head = (void *)rx_q->dma_rx;
5437 			desc_size = sizeof(struct dma_desc);
5438 		}
5439 
5440 		stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5441 				    rx_q->dma_rx_phy, desc_size);
5442 	}
5443 	while (count < limit) {
5444 		unsigned int buf1_len = 0, buf2_len = 0;
5445 		enum pkt_hash_types hash_type;
5446 		struct stmmac_rx_buffer *buf;
5447 		struct dma_desc *np, *p;
5448 		int entry;
5449 		u32 hash;
5450 
5451 		if (!count && rx_q->state_saved) {
5452 			skb = rx_q->state.skb;
5453 			error = rx_q->state.error;
5454 			len = rx_q->state.len;
5455 		} else {
5456 			rx_q->state_saved = false;
5457 			skb = NULL;
5458 			error = 0;
5459 			len = 0;
5460 		}
5461 
5462 read_again:
5463 		if (count >= limit)
5464 			break;
5465 
5466 		buf1_len = 0;
5467 		buf2_len = 0;
5468 		entry = next_entry;
5469 		buf = &rx_q->buf_pool[entry];
5470 
5471 		if (priv->extend_desc)
5472 			p = (struct dma_desc *)(rx_q->dma_erx + entry);
5473 		else
5474 			p = rx_q->dma_rx + entry;
5475 
5476 		/* read the status of the incoming frame */
5477 		status = stmmac_rx_status(priv, &priv->xstats, p);
5478 		/* check if managed by the DMA otherwise go ahead */
5479 		if (unlikely(status & dma_own))
5480 			break;
5481 
5482 		rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5483 						priv->dma_conf.dma_rx_size);
5484 		next_entry = rx_q->cur_rx;
5485 
5486 		if (priv->extend_desc)
5487 			np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5488 		else
5489 			np = rx_q->dma_rx + next_entry;
5490 
5491 		prefetch(np);
5492 
5493 		if (priv->extend_desc)
5494 			stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5495 		if (unlikely(status == discard_frame)) {
5496 			page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5497 			buf->page = NULL;
5498 			error = 1;
5499 			if (!priv->hwts_rx_en)
5500 				rx_errors++;
5501 		}
5502 
5503 		if (unlikely(error && (status & rx_not_ls)))
5504 			goto read_again;
5505 		if (unlikely(error)) {
5506 			dev_kfree_skb(skb);
5507 			skb = NULL;
5508 			count++;
5509 			continue;
5510 		}
5511 
5512 		/* Buffer is good. Go on. */
5513 
5514 		buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5515 		len += buf1_len;
5516 		buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5517 		len += buf2_len;
5518 
5519 		/* ACS is disabled; strip manually. */
5520 		if (likely(!(status & rx_not_ls))) {
5521 			if (buf2_len) {
5522 				buf2_len -= ETH_FCS_LEN;
5523 				len -= ETH_FCS_LEN;
5524 			} else if (buf1_len) {
5525 				buf1_len -= ETH_FCS_LEN;
5526 				len -= ETH_FCS_LEN;
5527 			}
5528 		}
5529 
5530 		if (!skb) {
5531 			unsigned int pre_len, sync_len;
5532 
5533 			dma_sync_single_for_cpu(priv->device, buf->addr,
5534 						buf1_len, dma_dir);
5535 			net_prefetch(page_address(buf->page) +
5536 				     buf->page_offset);
5537 
5538 			xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5539 			xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5540 					 buf->page_offset, buf1_len, true);
5541 
5542 			pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5543 				  buf->page_offset;
5544 
5545 			ctx.priv = priv;
5546 			ctx.desc = p;
5547 			ctx.ndesc = np;
5548 
5549 			skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5550 			/* Due xdp_adjust_tail: DMA sync for_device
5551 			 * cover max len CPU touch
5552 			 */
5553 			sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5554 				   buf->page_offset;
5555 			sync_len = max(sync_len, pre_len);
5556 
5557 			/* For Not XDP_PASS verdict */
5558 			if (IS_ERR(skb)) {
5559 				unsigned int xdp_res = -PTR_ERR(skb);
5560 
5561 				if (xdp_res & STMMAC_XDP_CONSUMED) {
5562 					page_pool_put_page(rx_q->page_pool,
5563 							   virt_to_head_page(ctx.xdp.data),
5564 							   sync_len, true);
5565 					buf->page = NULL;
5566 					rx_dropped++;
5567 
5568 					/* Clear skb as it was set as
5569 					 * status by XDP program.
5570 					 */
5571 					skb = NULL;
5572 
5573 					if (unlikely((status & rx_not_ls)))
5574 						goto read_again;
5575 
5576 					count++;
5577 					continue;
5578 				} else if (xdp_res & (STMMAC_XDP_TX |
5579 						      STMMAC_XDP_REDIRECT)) {
5580 					xdp_status |= xdp_res;
5581 					buf->page = NULL;
5582 					skb = NULL;
5583 					count++;
5584 					continue;
5585 				}
5586 			}
5587 		}
5588 
5589 		if (!skb) {
5590 			unsigned int head_pad_len;
5591 
5592 			/* XDP program may expand or reduce tail */
5593 			buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5594 
5595 			skb = napi_build_skb(page_address(buf->page),
5596 					     rx_q->napi_skb_frag_size);
5597 			if (!skb) {
5598 				page_pool_recycle_direct(rx_q->page_pool,
5599 							 buf->page);
5600 				rx_dropped++;
5601 				count++;
5602 				goto drain_data;
5603 			}
5604 
5605 			/* XDP program may adjust header */
5606 			head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5607 			skb_reserve(skb, head_pad_len);
5608 			skb_put(skb, buf1_len);
5609 			skb_mark_for_recycle(skb);
5610 			buf->page = NULL;
5611 		} else if (buf1_len) {
5612 			dma_sync_single_for_cpu(priv->device, buf->addr,
5613 						buf1_len, dma_dir);
5614 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5615 					buf->page, buf->page_offset, buf1_len,
5616 					priv->dma_conf.dma_buf_sz);
5617 			buf->page = NULL;
5618 		}
5619 
5620 		if (buf2_len) {
5621 			dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5622 						buf2_len, dma_dir);
5623 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5624 					buf->sec_page, 0, buf2_len,
5625 					priv->dma_conf.dma_buf_sz);
5626 			buf->sec_page = NULL;
5627 		}
5628 
5629 drain_data:
5630 		if (likely(status & rx_not_ls))
5631 			goto read_again;
5632 		if (!skb)
5633 			continue;
5634 
5635 		/* Got entire packet into SKB. Finish it. */
5636 
5637 		stmmac_get_rx_hwtstamp(priv, p, np, skb);
5638 
5639 		if (priv->hw->hw_vlan_en)
5640 			/* MAC level stripping. */
5641 			stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5642 		else
5643 			/* Driver level stripping. */
5644 			stmmac_rx_vlan(priv->dev, skb);
5645 
5646 		skb->protocol = eth_type_trans(skb, priv->dev);
5647 
5648 		if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5649 		    (status & csum_none))
5650 			skb_checksum_none_assert(skb);
5651 		else
5652 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5653 
5654 		if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5655 			skb_set_hash(skb, hash, hash_type);
5656 
5657 		skb_record_rx_queue(skb, queue);
5658 		napi_gro_receive(&ch->rx_napi, skb);
5659 		skb = NULL;
5660 
5661 		rx_packets++;
5662 		rx_bytes += len;
5663 		count++;
5664 	}
5665 
5666 	if (status & rx_not_ls || skb) {
5667 		rx_q->state_saved = true;
5668 		rx_q->state.skb = skb;
5669 		rx_q->state.error = error;
5670 		rx_q->state.len = len;
5671 	}
5672 
5673 	stmmac_finalize_xdp_rx(priv, xdp_status);
5674 
5675 	stmmac_rx_refill(priv, queue);
5676 
5677 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5678 	u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5679 	u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5680 	u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5681 	u64_stats_update_end(&rxq_stats->napi_syncp);
5682 
5683 	priv->xstats.rx_dropped += rx_dropped;
5684 	priv->xstats.rx_errors += rx_errors;
5685 
5686 	return count;
5687 }
5688 
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5689 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5690 {
5691 	struct stmmac_channel *ch =
5692 		container_of(napi, struct stmmac_channel, rx_napi);
5693 	struct stmmac_priv *priv = ch->priv_data;
5694 	struct stmmac_rxq_stats *rxq_stats;
5695 	u32 chan = ch->index;
5696 	int work_done;
5697 
5698 	rxq_stats = &priv->xstats.rxq_stats[chan];
5699 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5700 	u64_stats_inc(&rxq_stats->napi.poll);
5701 	u64_stats_update_end(&rxq_stats->napi_syncp);
5702 
5703 	work_done = stmmac_rx(priv, budget, chan);
5704 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5705 		unsigned long flags;
5706 
5707 		spin_lock_irqsave(&ch->lock, flags);
5708 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5709 		spin_unlock_irqrestore(&ch->lock, flags);
5710 	}
5711 
5712 	return work_done;
5713 }
5714 
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5715 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5716 {
5717 	struct stmmac_channel *ch =
5718 		container_of(napi, struct stmmac_channel, tx_napi);
5719 	struct stmmac_priv *priv = ch->priv_data;
5720 	struct stmmac_txq_stats *txq_stats;
5721 	bool pending_packets = false;
5722 	u32 chan = ch->index;
5723 	int work_done;
5724 
5725 	txq_stats = &priv->xstats.txq_stats[chan];
5726 	u64_stats_update_begin(&txq_stats->napi_syncp);
5727 	u64_stats_inc(&txq_stats->napi.poll);
5728 	u64_stats_update_end(&txq_stats->napi_syncp);
5729 
5730 	work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5731 	work_done = min(work_done, budget);
5732 
5733 	if (work_done < budget && napi_complete_done(napi, work_done)) {
5734 		unsigned long flags;
5735 
5736 		spin_lock_irqsave(&ch->lock, flags);
5737 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5738 		spin_unlock_irqrestore(&ch->lock, flags);
5739 	}
5740 
5741 	/* TX still have packet to handle, check if we need to arm tx timer */
5742 	if (pending_packets)
5743 		stmmac_tx_timer_arm(priv, chan);
5744 
5745 	return work_done;
5746 }
5747 
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5748 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5749 {
5750 	struct stmmac_channel *ch =
5751 		container_of(napi, struct stmmac_channel, rxtx_napi);
5752 	struct stmmac_priv *priv = ch->priv_data;
5753 	bool tx_pending_packets = false;
5754 	int rx_done, tx_done, rxtx_done;
5755 	struct stmmac_rxq_stats *rxq_stats;
5756 	struct stmmac_txq_stats *txq_stats;
5757 	u32 chan = ch->index;
5758 
5759 	rxq_stats = &priv->xstats.rxq_stats[chan];
5760 	u64_stats_update_begin(&rxq_stats->napi_syncp);
5761 	u64_stats_inc(&rxq_stats->napi.poll);
5762 	u64_stats_update_end(&rxq_stats->napi_syncp);
5763 
5764 	txq_stats = &priv->xstats.txq_stats[chan];
5765 	u64_stats_update_begin(&txq_stats->napi_syncp);
5766 	u64_stats_inc(&txq_stats->napi.poll);
5767 	u64_stats_update_end(&txq_stats->napi_syncp);
5768 
5769 	tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5770 	tx_done = min(tx_done, budget);
5771 
5772 	rx_done = stmmac_rx_zc(priv, budget, chan);
5773 
5774 	rxtx_done = max(tx_done, rx_done);
5775 
5776 	/* If either TX or RX work is not complete, return budget
5777 	 * and keep pooling
5778 	 */
5779 	if (rxtx_done >= budget)
5780 		return budget;
5781 
5782 	/* all work done, exit the polling mode */
5783 	if (napi_complete_done(napi, rxtx_done)) {
5784 		unsigned long flags;
5785 
5786 		spin_lock_irqsave(&ch->lock, flags);
5787 		/* Both RX and TX work done are compelte,
5788 		 * so enable both RX & TX IRQs.
5789 		 */
5790 		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5791 		spin_unlock_irqrestore(&ch->lock, flags);
5792 	}
5793 
5794 	/* TX still have packet to handle, check if we need to arm tx timer */
5795 	if (tx_pending_packets)
5796 		stmmac_tx_timer_arm(priv, chan);
5797 
5798 	return min(rxtx_done, budget - 1);
5799 }
5800 
5801 /**
5802  *  stmmac_tx_timeout
5803  *  @dev : Pointer to net device structure
5804  *  @txqueue: the index of the hanging transmit queue
5805  *  Description: this function is called when a packet transmission fails to
5806  *   complete within a reasonable time. The driver will mark the error in the
5807  *   netdev structure and arrange for the device to be reset to a sane state
5808  *   in order to transmit a new packet.
5809  */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5810 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5811 {
5812 	struct stmmac_priv *priv = netdev_priv(dev);
5813 
5814 	stmmac_global_err(priv);
5815 }
5816 
5817 /**
5818  *  stmmac_set_rx_mode - entry point for multicast addressing
5819  *  @dev : pointer to the device structure
5820  *  Description:
5821  *  This function is a driver entry point which gets called by the kernel
5822  *  whenever multicast addresses must be enabled/disabled.
5823  *  Return value:
5824  *  void.
5825  *
5826  *  FIXME: This may need RXC to be running, but it may be called with BH
5827  *  disabled, which means we can't call phylink_rx_clk_stop*().
5828  */
stmmac_set_rx_mode(struct net_device * dev)5829 static void stmmac_set_rx_mode(struct net_device *dev)
5830 {
5831 	struct stmmac_priv *priv = netdev_priv(dev);
5832 
5833 	stmmac_set_filter(priv, priv->hw, dev);
5834 }
5835 
5836 /**
5837  *  stmmac_change_mtu - entry point to change MTU size for the device.
5838  *  @dev : device pointer.
5839  *  @new_mtu : the new MTU size for the device.
5840  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
5841  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
5842  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
5843  *  Return value:
5844  *  0 on success and an appropriate (-)ve integer as defined in errno.h
5845  *  file on failure.
5846  */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5847 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5848 {
5849 	struct stmmac_priv *priv = netdev_priv(dev);
5850 	int txfifosz = priv->plat->tx_fifo_size;
5851 	struct stmmac_dma_conf *dma_conf;
5852 	const int mtu = new_mtu;
5853 	int ret;
5854 
5855 	if (txfifosz == 0)
5856 		txfifosz = priv->dma_cap.tx_fifo_size;
5857 
5858 	txfifosz /= priv->plat->tx_queues_to_use;
5859 
5860 	if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5861 		netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5862 		return -EINVAL;
5863 	}
5864 
5865 	new_mtu = STMMAC_ALIGN(new_mtu);
5866 
5867 	/* If condition true, FIFO is too small or MTU too large */
5868 	if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5869 		return -EINVAL;
5870 
5871 	if (netif_running(dev)) {
5872 		netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5873 		/* Try to allocate the new DMA conf with the new mtu */
5874 		dma_conf = stmmac_setup_dma_desc(priv, mtu);
5875 		if (IS_ERR(dma_conf)) {
5876 			netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5877 				   mtu);
5878 			return PTR_ERR(dma_conf);
5879 		}
5880 
5881 		__stmmac_release(dev);
5882 
5883 		ret = __stmmac_open(dev, dma_conf);
5884 		if (ret) {
5885 			free_dma_desc_resources(priv, dma_conf);
5886 			kfree(dma_conf);
5887 			netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5888 			return ret;
5889 		}
5890 
5891 		kfree(dma_conf);
5892 
5893 		stmmac_set_rx_mode(dev);
5894 	}
5895 
5896 	WRITE_ONCE(dev->mtu, mtu);
5897 	netdev_update_features(dev);
5898 
5899 	return 0;
5900 }
5901 
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5902 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5903 					     netdev_features_t features)
5904 {
5905 	struct stmmac_priv *priv = netdev_priv(dev);
5906 
5907 	if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5908 		features &= ~NETIF_F_RXCSUM;
5909 
5910 	if (!priv->plat->tx_coe)
5911 		features &= ~NETIF_F_CSUM_MASK;
5912 
5913 	/* Some GMAC devices have a bugged Jumbo frame support that
5914 	 * needs to have the Tx COE disabled for oversized frames
5915 	 * (due to limited buffer sizes). In this case we disable
5916 	 * the TX csum insertion in the TDES and not use SF.
5917 	 */
5918 	if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5919 		features &= ~NETIF_F_CSUM_MASK;
5920 
5921 	/* Disable tso if asked by ethtool */
5922 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5923 		if (features & NETIF_F_TSO)
5924 			priv->tso = true;
5925 		else
5926 			priv->tso = false;
5927 	}
5928 
5929 	return features;
5930 }
5931 
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5932 static int stmmac_set_features(struct net_device *netdev,
5933 			       netdev_features_t features)
5934 {
5935 	struct stmmac_priv *priv = netdev_priv(netdev);
5936 
5937 	/* Keep the COE Type in case of csum is supporting */
5938 	if (features & NETIF_F_RXCSUM)
5939 		priv->hw->rx_csum = priv->plat->rx_coe;
5940 	else
5941 		priv->hw->rx_csum = 0;
5942 	/* No check needed because rx_coe has been set before and it will be
5943 	 * fixed in case of issue.
5944 	 */
5945 	stmmac_rx_ipc(priv, priv->hw);
5946 
5947 	if (priv->sph_cap) {
5948 		bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5949 		u32 chan;
5950 
5951 		for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5952 			stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5953 	}
5954 
5955 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
5956 		priv->hw->hw_vlan_en = true;
5957 	else
5958 		priv->hw->hw_vlan_en = false;
5959 
5960 	phylink_rx_clk_stop_block(priv->phylink);
5961 	stmmac_set_hw_vlan_mode(priv, priv->hw);
5962 	phylink_rx_clk_stop_unblock(priv->phylink);
5963 
5964 	return 0;
5965 }
5966 
stmmac_common_interrupt(struct stmmac_priv * priv)5967 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5968 {
5969 	u32 rx_cnt = priv->plat->rx_queues_to_use;
5970 	u32 tx_cnt = priv->plat->tx_queues_to_use;
5971 	u32 queues_count;
5972 	u32 queue;
5973 	bool xmac;
5974 
5975 	xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5976 	queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5977 
5978 	if (priv->irq_wake)
5979 		pm_wakeup_event(priv->device, 0);
5980 
5981 	if (priv->dma_cap.estsel)
5982 		stmmac_est_irq_status(priv, priv, priv->dev,
5983 				      &priv->xstats, tx_cnt);
5984 
5985 	if (stmmac_fpe_supported(priv))
5986 		stmmac_fpe_irq_status(priv);
5987 
5988 	/* To handle GMAC own interrupts */
5989 	if ((priv->plat->has_gmac) || xmac) {
5990 		int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5991 
5992 		if (unlikely(status)) {
5993 			/* For LPI we need to save the tx status */
5994 			if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5995 				priv->tx_path_in_lpi_mode = true;
5996 			if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5997 				priv->tx_path_in_lpi_mode = false;
5998 		}
5999 
6000 		for (queue = 0; queue < queues_count; queue++)
6001 			stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6002 
6003 		/* PCS link status */
6004 		if (priv->hw->pcs &&
6005 		    !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6006 			if (priv->xstats.pcs_link)
6007 				netif_carrier_on(priv->dev);
6008 			else
6009 				netif_carrier_off(priv->dev);
6010 		}
6011 
6012 		stmmac_timestamp_interrupt(priv, priv);
6013 	}
6014 }
6015 
6016 /**
6017  *  stmmac_interrupt - main ISR
6018  *  @irq: interrupt number.
6019  *  @dev_id: to pass the net device pointer.
6020  *  Description: this is the main driver interrupt service routine.
6021  *  It can call:
6022  *  o DMA service routine (to manage incoming frame reception and transmission
6023  *    status)
6024  *  o Core interrupts to manage: remote wake-up, management counter, LPI
6025  *    interrupts.
6026  */
stmmac_interrupt(int irq,void * dev_id)6027 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6028 {
6029 	struct net_device *dev = (struct net_device *)dev_id;
6030 	struct stmmac_priv *priv = netdev_priv(dev);
6031 
6032 	/* Check if adapter is up */
6033 	if (test_bit(STMMAC_DOWN, &priv->state))
6034 		return IRQ_HANDLED;
6035 
6036 	/* Check ASP error if it isn't delivered via an individual IRQ */
6037 	if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6038 		return IRQ_HANDLED;
6039 
6040 	/* To handle Common interrupts */
6041 	stmmac_common_interrupt(priv);
6042 
6043 	/* To handle DMA interrupts */
6044 	stmmac_dma_interrupt(priv);
6045 
6046 	return IRQ_HANDLED;
6047 }
6048 
stmmac_mac_interrupt(int irq,void * dev_id)6049 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6050 {
6051 	struct net_device *dev = (struct net_device *)dev_id;
6052 	struct stmmac_priv *priv = netdev_priv(dev);
6053 
6054 	/* Check if adapter is up */
6055 	if (test_bit(STMMAC_DOWN, &priv->state))
6056 		return IRQ_HANDLED;
6057 
6058 	/* To handle Common interrupts */
6059 	stmmac_common_interrupt(priv);
6060 
6061 	return IRQ_HANDLED;
6062 }
6063 
stmmac_safety_interrupt(int irq,void * dev_id)6064 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6065 {
6066 	struct net_device *dev = (struct net_device *)dev_id;
6067 	struct stmmac_priv *priv = netdev_priv(dev);
6068 
6069 	/* Check if adapter is up */
6070 	if (test_bit(STMMAC_DOWN, &priv->state))
6071 		return IRQ_HANDLED;
6072 
6073 	/* Check if a fatal error happened */
6074 	stmmac_safety_feat_interrupt(priv);
6075 
6076 	return IRQ_HANDLED;
6077 }
6078 
stmmac_msi_intr_tx(int irq,void * data)6079 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6080 {
6081 	struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6082 	struct stmmac_dma_conf *dma_conf;
6083 	int chan = tx_q->queue_index;
6084 	struct stmmac_priv *priv;
6085 	int status;
6086 
6087 	dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6088 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6089 
6090 	/* Check if adapter is up */
6091 	if (test_bit(STMMAC_DOWN, &priv->state))
6092 		return IRQ_HANDLED;
6093 
6094 	status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6095 
6096 	if (unlikely(status & tx_hard_error_bump_tc)) {
6097 		/* Try to bump up the dma threshold on this failure */
6098 		stmmac_bump_dma_threshold(priv, chan);
6099 	} else if (unlikely(status == tx_hard_error)) {
6100 		stmmac_tx_err(priv, chan);
6101 	}
6102 
6103 	return IRQ_HANDLED;
6104 }
6105 
stmmac_msi_intr_rx(int irq,void * data)6106 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6107 {
6108 	struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6109 	struct stmmac_dma_conf *dma_conf;
6110 	int chan = rx_q->queue_index;
6111 	struct stmmac_priv *priv;
6112 
6113 	dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6114 	priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6115 
6116 	/* Check if adapter is up */
6117 	if (test_bit(STMMAC_DOWN, &priv->state))
6118 		return IRQ_HANDLED;
6119 
6120 	stmmac_napi_check(priv, chan, DMA_DIR_RX);
6121 
6122 	return IRQ_HANDLED;
6123 }
6124 
6125 /**
6126  *  stmmac_ioctl - Entry point for the Ioctl
6127  *  @dev: Device pointer.
6128  *  @rq: An IOCTL specefic structure, that can contain a pointer to
6129  *  a proprietary structure used to pass information to the driver.
6130  *  @cmd: IOCTL command
6131  *  Description:
6132  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6133  */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6134 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6135 {
6136 	struct stmmac_priv *priv = netdev_priv (dev);
6137 	int ret = -EOPNOTSUPP;
6138 
6139 	if (!netif_running(dev))
6140 		return -EINVAL;
6141 
6142 	switch (cmd) {
6143 	case SIOCGMIIPHY:
6144 	case SIOCGMIIREG:
6145 	case SIOCSMIIREG:
6146 		ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6147 		break;
6148 	default:
6149 		break;
6150 	}
6151 
6152 	return ret;
6153 }
6154 
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6155 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6156 				    void *cb_priv)
6157 {
6158 	struct stmmac_priv *priv = cb_priv;
6159 	int ret = -EOPNOTSUPP;
6160 
6161 	if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6162 		return ret;
6163 
6164 	__stmmac_disable_all_queues(priv);
6165 
6166 	switch (type) {
6167 	case TC_SETUP_CLSU32:
6168 		ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6169 		break;
6170 	case TC_SETUP_CLSFLOWER:
6171 		ret = stmmac_tc_setup_cls(priv, priv, type_data);
6172 		break;
6173 	default:
6174 		break;
6175 	}
6176 
6177 	stmmac_enable_all_queues(priv);
6178 	return ret;
6179 }
6180 
6181 static LIST_HEAD(stmmac_block_cb_list);
6182 
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6183 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6184 			   void *type_data)
6185 {
6186 	struct stmmac_priv *priv = netdev_priv(ndev);
6187 
6188 	switch (type) {
6189 	case TC_QUERY_CAPS:
6190 		return stmmac_tc_query_caps(priv, priv, type_data);
6191 	case TC_SETUP_QDISC_MQPRIO:
6192 		return stmmac_tc_setup_mqprio(priv, priv, type_data);
6193 	case TC_SETUP_BLOCK:
6194 		return flow_block_cb_setup_simple(type_data,
6195 						  &stmmac_block_cb_list,
6196 						  stmmac_setup_tc_block_cb,
6197 						  priv, priv, true);
6198 	case TC_SETUP_QDISC_CBS:
6199 		return stmmac_tc_setup_cbs(priv, priv, type_data);
6200 	case TC_SETUP_QDISC_TAPRIO:
6201 		return stmmac_tc_setup_taprio(priv, priv, type_data);
6202 	case TC_SETUP_QDISC_ETF:
6203 		return stmmac_tc_setup_etf(priv, priv, type_data);
6204 	default:
6205 		return -EOPNOTSUPP;
6206 	}
6207 }
6208 
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6209 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6210 			       struct net_device *sb_dev)
6211 {
6212 	int gso = skb_shinfo(skb)->gso_type;
6213 
6214 	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6215 		/*
6216 		 * There is no way to determine the number of TSO/USO
6217 		 * capable Queues. Let's use always the Queue 0
6218 		 * because if TSO/USO is supported then at least this
6219 		 * one will be capable.
6220 		 */
6221 		return 0;
6222 	}
6223 
6224 	return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6225 }
6226 
stmmac_set_mac_address(struct net_device * ndev,void * addr)6227 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6228 {
6229 	struct stmmac_priv *priv = netdev_priv(ndev);
6230 	int ret = 0;
6231 
6232 	ret = pm_runtime_resume_and_get(priv->device);
6233 	if (ret < 0)
6234 		return ret;
6235 
6236 	ret = eth_mac_addr(ndev, addr);
6237 	if (ret)
6238 		goto set_mac_error;
6239 
6240 	phylink_rx_clk_stop_block(priv->phylink);
6241 	stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6242 	phylink_rx_clk_stop_unblock(priv->phylink);
6243 
6244 set_mac_error:
6245 	pm_runtime_put(priv->device);
6246 
6247 	return ret;
6248 }
6249 
6250 #ifdef CONFIG_DEBUG_FS
6251 static struct dentry *stmmac_fs_dir;
6252 
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6253 static void sysfs_display_ring(void *head, int size, int extend_desc,
6254 			       struct seq_file *seq, dma_addr_t dma_phy_addr)
6255 {
6256 	struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6257 	struct dma_desc *p = (struct dma_desc *)head;
6258 	unsigned int desc_size;
6259 	dma_addr_t dma_addr;
6260 	int i;
6261 
6262 	desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6263 	for (i = 0; i < size; i++) {
6264 		dma_addr = dma_phy_addr + i * desc_size;
6265 		seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6266 				i, &dma_addr,
6267 				le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6268 				le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6269 		if (extend_desc)
6270 			p = &(++ep)->basic;
6271 		else
6272 			p++;
6273 	}
6274 }
6275 
stmmac_rings_status_show(struct seq_file * seq,void * v)6276 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6277 {
6278 	struct net_device *dev = seq->private;
6279 	struct stmmac_priv *priv = netdev_priv(dev);
6280 	u32 rx_count = priv->plat->rx_queues_to_use;
6281 	u32 tx_count = priv->plat->tx_queues_to_use;
6282 	u32 queue;
6283 
6284 	if ((dev->flags & IFF_UP) == 0)
6285 		return 0;
6286 
6287 	for (queue = 0; queue < rx_count; queue++) {
6288 		struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6289 
6290 		seq_printf(seq, "RX Queue %d:\n", queue);
6291 
6292 		if (priv->extend_desc) {
6293 			seq_printf(seq, "Extended descriptor ring:\n");
6294 			sysfs_display_ring((void *)rx_q->dma_erx,
6295 					   priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6296 		} else {
6297 			seq_printf(seq, "Descriptor ring:\n");
6298 			sysfs_display_ring((void *)rx_q->dma_rx,
6299 					   priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6300 		}
6301 	}
6302 
6303 	for (queue = 0; queue < tx_count; queue++) {
6304 		struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6305 
6306 		seq_printf(seq, "TX Queue %d:\n", queue);
6307 
6308 		if (priv->extend_desc) {
6309 			seq_printf(seq, "Extended descriptor ring:\n");
6310 			sysfs_display_ring((void *)tx_q->dma_etx,
6311 					   priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6312 		} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6313 			seq_printf(seq, "Descriptor ring:\n");
6314 			sysfs_display_ring((void *)tx_q->dma_tx,
6315 					   priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6316 		}
6317 	}
6318 
6319 	return 0;
6320 }
6321 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6322 
stmmac_dma_cap_show(struct seq_file * seq,void * v)6323 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6324 {
6325 	static const char * const dwxgmac_timestamp_source[] = {
6326 		"None",
6327 		"Internal",
6328 		"External",
6329 		"Both",
6330 	};
6331 	static const char * const dwxgmac_safety_feature_desc[] = {
6332 		"No",
6333 		"All Safety Features with ECC and Parity",
6334 		"All Safety Features without ECC or Parity",
6335 		"All Safety Features with Parity Only",
6336 		"ECC Only",
6337 		"UNDEFINED",
6338 		"UNDEFINED",
6339 		"UNDEFINED",
6340 	};
6341 	struct net_device *dev = seq->private;
6342 	struct stmmac_priv *priv = netdev_priv(dev);
6343 
6344 	if (!priv->hw_cap_support) {
6345 		seq_printf(seq, "DMA HW features not supported\n");
6346 		return 0;
6347 	}
6348 
6349 	seq_printf(seq, "==============================\n");
6350 	seq_printf(seq, "\tDMA HW features\n");
6351 	seq_printf(seq, "==============================\n");
6352 
6353 	seq_printf(seq, "\t10/100 Mbps: %s\n",
6354 		   (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6355 	seq_printf(seq, "\t1000 Mbps: %s\n",
6356 		   (priv->dma_cap.mbps_1000) ? "Y" : "N");
6357 	seq_printf(seq, "\tHalf duplex: %s\n",
6358 		   (priv->dma_cap.half_duplex) ? "Y" : "N");
6359 	if (priv->plat->has_xgmac) {
6360 		seq_printf(seq,
6361 			   "\tNumber of Additional MAC address registers: %d\n",
6362 			   priv->dma_cap.multi_addr);
6363 	} else {
6364 		seq_printf(seq, "\tHash Filter: %s\n",
6365 			   (priv->dma_cap.hash_filter) ? "Y" : "N");
6366 		seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6367 			   (priv->dma_cap.multi_addr) ? "Y" : "N");
6368 	}
6369 	seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6370 		   (priv->dma_cap.pcs) ? "Y" : "N");
6371 	seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6372 		   (priv->dma_cap.sma_mdio) ? "Y" : "N");
6373 	seq_printf(seq, "\tPMT Remote wake up: %s\n",
6374 		   (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6375 	seq_printf(seq, "\tPMT Magic Frame: %s\n",
6376 		   (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6377 	seq_printf(seq, "\tRMON module: %s\n",
6378 		   (priv->dma_cap.rmon) ? "Y" : "N");
6379 	seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6380 		   (priv->dma_cap.time_stamp) ? "Y" : "N");
6381 	seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6382 		   (priv->dma_cap.atime_stamp) ? "Y" : "N");
6383 	if (priv->plat->has_xgmac)
6384 		seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6385 			   dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6386 	seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6387 		   (priv->dma_cap.eee) ? "Y" : "N");
6388 	seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6389 	seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6390 		   (priv->dma_cap.tx_coe) ? "Y" : "N");
6391 	if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6392 	    priv->plat->has_xgmac) {
6393 		seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6394 			   (priv->dma_cap.rx_coe) ? "Y" : "N");
6395 	} else {
6396 		seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6397 			   (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6398 		seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6399 			   (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6400 		seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6401 			   (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6402 	}
6403 	seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6404 		   priv->dma_cap.number_rx_channel);
6405 	seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6406 		   priv->dma_cap.number_tx_channel);
6407 	seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6408 		   priv->dma_cap.number_rx_queues);
6409 	seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6410 		   priv->dma_cap.number_tx_queues);
6411 	seq_printf(seq, "\tEnhanced descriptors: %s\n",
6412 		   (priv->dma_cap.enh_desc) ? "Y" : "N");
6413 	seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6414 	seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6415 	seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6416 		   (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6417 	seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6418 	seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6419 		   priv->dma_cap.pps_out_num);
6420 	seq_printf(seq, "\tSafety Features: %s\n",
6421 		   dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6422 	seq_printf(seq, "\tFlexible RX Parser: %s\n",
6423 		   priv->dma_cap.frpsel ? "Y" : "N");
6424 	seq_printf(seq, "\tEnhanced Addressing: %d\n",
6425 		   priv->dma_cap.host_dma_width);
6426 	seq_printf(seq, "\tReceive Side Scaling: %s\n",
6427 		   priv->dma_cap.rssen ? "Y" : "N");
6428 	seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6429 		   priv->dma_cap.vlhash ? "Y" : "N");
6430 	seq_printf(seq, "\tSplit Header: %s\n",
6431 		   priv->dma_cap.sphen ? "Y" : "N");
6432 	seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6433 		   priv->dma_cap.vlins ? "Y" : "N");
6434 	seq_printf(seq, "\tDouble VLAN: %s\n",
6435 		   priv->dma_cap.dvlan ? "Y" : "N");
6436 	seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6437 		   priv->dma_cap.l3l4fnum);
6438 	seq_printf(seq, "\tARP Offloading: %s\n",
6439 		   priv->dma_cap.arpoffsel ? "Y" : "N");
6440 	seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6441 		   priv->dma_cap.estsel ? "Y" : "N");
6442 	seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6443 		   priv->dma_cap.fpesel ? "Y" : "N");
6444 	seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6445 		   priv->dma_cap.tbssel ? "Y" : "N");
6446 	seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6447 		   priv->dma_cap.tbs_ch_num);
6448 	seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6449 		   priv->dma_cap.sgfsel ? "Y" : "N");
6450 	seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6451 		   BIT(priv->dma_cap.ttsfd) >> 1);
6452 	seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6453 		   priv->dma_cap.numtc);
6454 	seq_printf(seq, "\tDCB Feature: %s\n",
6455 		   priv->dma_cap.dcben ? "Y" : "N");
6456 	seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6457 		   priv->dma_cap.advthword ? "Y" : "N");
6458 	seq_printf(seq, "\tPTP Offload: %s\n",
6459 		   priv->dma_cap.ptoen ? "Y" : "N");
6460 	seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6461 		   priv->dma_cap.osten ? "Y" : "N");
6462 	seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6463 		   priv->dma_cap.pfcen ? "Y" : "N");
6464 	seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6465 		   BIT(priv->dma_cap.frpes) << 6);
6466 	seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6467 		   BIT(priv->dma_cap.frpbs) << 6);
6468 	seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6469 		   priv->dma_cap.frppipe_num);
6470 	seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6471 		   priv->dma_cap.nrvf_num ?
6472 		   (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6473 	seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6474 		   priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6475 	seq_printf(seq, "\tDepth of GCL: %lu\n",
6476 		   priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6477 	seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6478 		   priv->dma_cap.cbtisel ? "Y" : "N");
6479 	seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6480 		   priv->dma_cap.aux_snapshot_n);
6481 	seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6482 		   priv->dma_cap.pou_ost_en ? "Y" : "N");
6483 	seq_printf(seq, "\tEnhanced DMA: %s\n",
6484 		   priv->dma_cap.edma ? "Y" : "N");
6485 	seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6486 		   priv->dma_cap.ediffc ? "Y" : "N");
6487 	seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6488 		   priv->dma_cap.vxn ? "Y" : "N");
6489 	seq_printf(seq, "\tDebug Memory Interface: %s\n",
6490 		   priv->dma_cap.dbgmem ? "Y" : "N");
6491 	seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6492 		   priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6493 	return 0;
6494 }
6495 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6496 
6497 /* Use network device events to rename debugfs file entries.
6498  */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6499 static int stmmac_device_event(struct notifier_block *unused,
6500 			       unsigned long event, void *ptr)
6501 {
6502 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6503 	struct stmmac_priv *priv = netdev_priv(dev);
6504 
6505 	if (dev->netdev_ops != &stmmac_netdev_ops)
6506 		goto done;
6507 
6508 	switch (event) {
6509 	case NETDEV_CHANGENAME:
6510 		debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6511 		break;
6512 	}
6513 done:
6514 	return NOTIFY_DONE;
6515 }
6516 
6517 static struct notifier_block stmmac_notifier = {
6518 	.notifier_call = stmmac_device_event,
6519 };
6520 
stmmac_init_fs(struct net_device * dev)6521 static void stmmac_init_fs(struct net_device *dev)
6522 {
6523 	struct stmmac_priv *priv = netdev_priv(dev);
6524 
6525 	rtnl_lock();
6526 
6527 	/* Create per netdev entries */
6528 	priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6529 
6530 	/* Entry to report DMA RX/TX rings */
6531 	debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6532 			    &stmmac_rings_status_fops);
6533 
6534 	/* Entry to report the DMA HW features */
6535 	debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6536 			    &stmmac_dma_cap_fops);
6537 
6538 	rtnl_unlock();
6539 }
6540 
stmmac_exit_fs(struct net_device * dev)6541 static void stmmac_exit_fs(struct net_device *dev)
6542 {
6543 	struct stmmac_priv *priv = netdev_priv(dev);
6544 
6545 	debugfs_remove_recursive(priv->dbgfs_dir);
6546 }
6547 #endif /* CONFIG_DEBUG_FS */
6548 
stmmac_vid_crc32_le(__le16 vid_le)6549 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6550 {
6551 	unsigned char *data = (unsigned char *)&vid_le;
6552 	unsigned char data_byte = 0;
6553 	u32 crc = ~0x0;
6554 	u32 temp = 0;
6555 	int i, bits;
6556 
6557 	bits = get_bitmask_order(VLAN_VID_MASK);
6558 	for (i = 0; i < bits; i++) {
6559 		if ((i % 8) == 0)
6560 			data_byte = data[i / 8];
6561 
6562 		temp = ((crc & 1) ^ data_byte) & 1;
6563 		crc >>= 1;
6564 		data_byte >>= 1;
6565 
6566 		if (temp)
6567 			crc ^= 0xedb88320;
6568 	}
6569 
6570 	return crc;
6571 }
6572 
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6573 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6574 {
6575 	u32 crc, hash = 0;
6576 	u16 pmatch = 0;
6577 	int count = 0;
6578 	u16 vid = 0;
6579 
6580 	for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6581 		__le16 vid_le = cpu_to_le16(vid);
6582 		crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6583 		hash |= (1 << crc);
6584 		count++;
6585 	}
6586 
6587 	if (!priv->dma_cap.vlhash) {
6588 		if (count > 2) /* VID = 0 always passes filter */
6589 			return -EOPNOTSUPP;
6590 
6591 		pmatch = vid;
6592 		hash = 0;
6593 	}
6594 
6595 	return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6596 }
6597 
6598 /* FIXME: This may need RXC to be running, but it may be called with BH
6599  * disabled, which means we can't call phylink_rx_clk_stop*().
6600  */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6601 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6602 {
6603 	struct stmmac_priv *priv = netdev_priv(ndev);
6604 	bool is_double = false;
6605 	int ret;
6606 
6607 	ret = pm_runtime_resume_and_get(priv->device);
6608 	if (ret < 0)
6609 		return ret;
6610 
6611 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6612 		is_double = true;
6613 
6614 	set_bit(vid, priv->active_vlans);
6615 	ret = stmmac_vlan_update(priv, is_double);
6616 	if (ret) {
6617 		clear_bit(vid, priv->active_vlans);
6618 		goto err_pm_put;
6619 	}
6620 
6621 	if (priv->hw->num_vlan) {
6622 		ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6623 		if (ret)
6624 			goto err_pm_put;
6625 	}
6626 err_pm_put:
6627 	pm_runtime_put(priv->device);
6628 
6629 	return ret;
6630 }
6631 
6632 /* FIXME: This may need RXC to be running, but it may be called with BH
6633  * disabled, which means we can't call phylink_rx_clk_stop*().
6634  */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6635 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6636 {
6637 	struct stmmac_priv *priv = netdev_priv(ndev);
6638 	bool is_double = false;
6639 	int ret;
6640 
6641 	ret = pm_runtime_resume_and_get(priv->device);
6642 	if (ret < 0)
6643 		return ret;
6644 
6645 	if (be16_to_cpu(proto) == ETH_P_8021AD)
6646 		is_double = true;
6647 
6648 	clear_bit(vid, priv->active_vlans);
6649 
6650 	if (priv->hw->num_vlan) {
6651 		ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6652 		if (ret)
6653 			goto del_vlan_error;
6654 	}
6655 
6656 	ret = stmmac_vlan_update(priv, is_double);
6657 
6658 del_vlan_error:
6659 	pm_runtime_put(priv->device);
6660 
6661 	return ret;
6662 }
6663 
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6664 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6665 {
6666 	struct stmmac_priv *priv = netdev_priv(dev);
6667 
6668 	switch (bpf->command) {
6669 	case XDP_SETUP_PROG:
6670 		return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6671 	case XDP_SETUP_XSK_POOL:
6672 		return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6673 					     bpf->xsk.queue_id);
6674 	default:
6675 		return -EOPNOTSUPP;
6676 	}
6677 }
6678 
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6679 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6680 			   struct xdp_frame **frames, u32 flags)
6681 {
6682 	struct stmmac_priv *priv = netdev_priv(dev);
6683 	int cpu = smp_processor_id();
6684 	struct netdev_queue *nq;
6685 	int i, nxmit = 0;
6686 	int queue;
6687 
6688 	if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6689 		return -ENETDOWN;
6690 
6691 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6692 		return -EINVAL;
6693 
6694 	queue = stmmac_xdp_get_tx_queue(priv, cpu);
6695 	nq = netdev_get_tx_queue(priv->dev, queue);
6696 
6697 	__netif_tx_lock(nq, cpu);
6698 	/* Avoids TX time-out as we are sharing with slow path */
6699 	txq_trans_cond_update(nq);
6700 
6701 	for (i = 0; i < num_frames; i++) {
6702 		int res;
6703 
6704 		res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6705 		if (res == STMMAC_XDP_CONSUMED)
6706 			break;
6707 
6708 		nxmit++;
6709 	}
6710 
6711 	if (flags & XDP_XMIT_FLUSH) {
6712 		stmmac_flush_tx_descriptors(priv, queue);
6713 		stmmac_tx_timer_arm(priv, queue);
6714 	}
6715 
6716 	__netif_tx_unlock(nq);
6717 
6718 	return nxmit;
6719 }
6720 
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6721 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6722 {
6723 	struct stmmac_channel *ch = &priv->channel[queue];
6724 	unsigned long flags;
6725 
6726 	spin_lock_irqsave(&ch->lock, flags);
6727 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6728 	spin_unlock_irqrestore(&ch->lock, flags);
6729 
6730 	stmmac_stop_rx_dma(priv, queue);
6731 	__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6732 }
6733 
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6734 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6735 {
6736 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6737 	struct stmmac_channel *ch = &priv->channel[queue];
6738 	unsigned long flags;
6739 	u32 buf_size;
6740 	int ret;
6741 
6742 	ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6743 	if (ret) {
6744 		netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6745 		return;
6746 	}
6747 
6748 	ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6749 	if (ret) {
6750 		__free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6751 		netdev_err(priv->dev, "Failed to init RX desc.\n");
6752 		return;
6753 	}
6754 
6755 	stmmac_reset_rx_queue(priv, queue);
6756 	stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6757 
6758 	stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6759 			    rx_q->dma_rx_phy, rx_q->queue_index);
6760 
6761 	rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6762 			     sizeof(struct dma_desc));
6763 	stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6764 			       rx_q->rx_tail_addr, rx_q->queue_index);
6765 
6766 	if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6767 		buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6768 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6769 				      buf_size,
6770 				      rx_q->queue_index);
6771 	} else {
6772 		stmmac_set_dma_bfsize(priv, priv->ioaddr,
6773 				      priv->dma_conf.dma_buf_sz,
6774 				      rx_q->queue_index);
6775 	}
6776 
6777 	stmmac_start_rx_dma(priv, queue);
6778 
6779 	spin_lock_irqsave(&ch->lock, flags);
6780 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6781 	spin_unlock_irqrestore(&ch->lock, flags);
6782 }
6783 
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6784 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6785 {
6786 	struct stmmac_channel *ch = &priv->channel[queue];
6787 	unsigned long flags;
6788 
6789 	spin_lock_irqsave(&ch->lock, flags);
6790 	stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6791 	spin_unlock_irqrestore(&ch->lock, flags);
6792 
6793 	stmmac_stop_tx_dma(priv, queue);
6794 	__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6795 }
6796 
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6797 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6798 {
6799 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6800 	struct stmmac_channel *ch = &priv->channel[queue];
6801 	unsigned long flags;
6802 	int ret;
6803 
6804 	ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6805 	if (ret) {
6806 		netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6807 		return;
6808 	}
6809 
6810 	ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
6811 	if (ret) {
6812 		__free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6813 		netdev_err(priv->dev, "Failed to init TX desc.\n");
6814 		return;
6815 	}
6816 
6817 	stmmac_reset_tx_queue(priv, queue);
6818 	stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6819 
6820 	stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6821 			    tx_q->dma_tx_phy, tx_q->queue_index);
6822 
6823 	if (tx_q->tbs & STMMAC_TBS_AVAIL)
6824 		stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6825 
6826 	tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6827 	stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6828 			       tx_q->tx_tail_addr, tx_q->queue_index);
6829 
6830 	stmmac_start_tx_dma(priv, queue);
6831 
6832 	spin_lock_irqsave(&ch->lock, flags);
6833 	stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6834 	spin_unlock_irqrestore(&ch->lock, flags);
6835 }
6836 
stmmac_xdp_release(struct net_device * dev)6837 void stmmac_xdp_release(struct net_device *dev)
6838 {
6839 	struct stmmac_priv *priv = netdev_priv(dev);
6840 	u32 chan;
6841 
6842 	/* Ensure tx function is not running */
6843 	netif_tx_disable(dev);
6844 
6845 	/* Disable NAPI process */
6846 	stmmac_disable_all_queues(priv);
6847 
6848 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6849 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6850 
6851 	/* Free the IRQ lines */
6852 	stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6853 
6854 	/* Stop TX/RX DMA channels */
6855 	stmmac_stop_all_dma(priv);
6856 
6857 	/* Release and free the Rx/Tx resources */
6858 	free_dma_desc_resources(priv, &priv->dma_conf);
6859 
6860 	/* Disable the MAC Rx/Tx */
6861 	stmmac_mac_set(priv, priv->ioaddr, false);
6862 
6863 	/* set trans_start so we don't get spurious
6864 	 * watchdogs during reset
6865 	 */
6866 	netif_trans_update(dev);
6867 	netif_carrier_off(dev);
6868 }
6869 
stmmac_xdp_open(struct net_device * dev)6870 int stmmac_xdp_open(struct net_device *dev)
6871 {
6872 	struct stmmac_priv *priv = netdev_priv(dev);
6873 	u32 rx_cnt = priv->plat->rx_queues_to_use;
6874 	u32 tx_cnt = priv->plat->tx_queues_to_use;
6875 	u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6876 	struct stmmac_rx_queue *rx_q;
6877 	struct stmmac_tx_queue *tx_q;
6878 	u32 buf_size;
6879 	bool sph_en;
6880 	u32 chan;
6881 	int ret;
6882 
6883 	ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6884 	if (ret < 0) {
6885 		netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6886 			   __func__);
6887 		goto dma_desc_error;
6888 	}
6889 
6890 	ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6891 	if (ret < 0) {
6892 		netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6893 			   __func__);
6894 		goto init_error;
6895 	}
6896 
6897 	stmmac_reset_queues_param(priv);
6898 
6899 	/* DMA CSR Channel configuration */
6900 	for (chan = 0; chan < dma_csr_ch; chan++) {
6901 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6902 		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6903 	}
6904 
6905 	/* Adjust Split header */
6906 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6907 
6908 	/* DMA RX Channel Configuration */
6909 	for (chan = 0; chan < rx_cnt; chan++) {
6910 		rx_q = &priv->dma_conf.rx_queue[chan];
6911 
6912 		stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6913 				    rx_q->dma_rx_phy, chan);
6914 
6915 		rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6916 				     (rx_q->buf_alloc_num *
6917 				      sizeof(struct dma_desc));
6918 		stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6919 				       rx_q->rx_tail_addr, chan);
6920 
6921 		if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6922 			buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6923 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6924 					      buf_size,
6925 					      rx_q->queue_index);
6926 		} else {
6927 			stmmac_set_dma_bfsize(priv, priv->ioaddr,
6928 					      priv->dma_conf.dma_buf_sz,
6929 					      rx_q->queue_index);
6930 		}
6931 
6932 		stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6933 	}
6934 
6935 	/* DMA TX Channel Configuration */
6936 	for (chan = 0; chan < tx_cnt; chan++) {
6937 		tx_q = &priv->dma_conf.tx_queue[chan];
6938 
6939 		stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6940 				    tx_q->dma_tx_phy, chan);
6941 
6942 		tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6943 		stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6944 				       tx_q->tx_tail_addr, chan);
6945 
6946 		hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6947 	}
6948 
6949 	/* Enable the MAC Rx/Tx */
6950 	stmmac_mac_set(priv, priv->ioaddr, true);
6951 
6952 	/* Start Rx & Tx DMA Channels */
6953 	stmmac_start_all_dma(priv);
6954 
6955 	ret = stmmac_request_irq(dev);
6956 	if (ret)
6957 		goto irq_error;
6958 
6959 	/* Enable NAPI process*/
6960 	stmmac_enable_all_queues(priv);
6961 	netif_carrier_on(dev);
6962 	netif_tx_start_all_queues(dev);
6963 	stmmac_enable_all_dma_irq(priv);
6964 
6965 	return 0;
6966 
6967 irq_error:
6968 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6969 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6970 
6971 init_error:
6972 	free_dma_desc_resources(priv, &priv->dma_conf);
6973 dma_desc_error:
6974 	return ret;
6975 }
6976 
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6977 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6978 {
6979 	struct stmmac_priv *priv = netdev_priv(dev);
6980 	struct stmmac_rx_queue *rx_q;
6981 	struct stmmac_tx_queue *tx_q;
6982 	struct stmmac_channel *ch;
6983 
6984 	if (test_bit(STMMAC_DOWN, &priv->state) ||
6985 	    !netif_carrier_ok(priv->dev))
6986 		return -ENETDOWN;
6987 
6988 	if (!stmmac_xdp_is_enabled(priv))
6989 		return -EINVAL;
6990 
6991 	if (queue >= priv->plat->rx_queues_to_use ||
6992 	    queue >= priv->plat->tx_queues_to_use)
6993 		return -EINVAL;
6994 
6995 	rx_q = &priv->dma_conf.rx_queue[queue];
6996 	tx_q = &priv->dma_conf.tx_queue[queue];
6997 	ch = &priv->channel[queue];
6998 
6999 	if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7000 		return -EINVAL;
7001 
7002 	if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7003 		/* EQoS does not have per-DMA channel SW interrupt,
7004 		 * so we schedule RX Napi straight-away.
7005 		 */
7006 		if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7007 			__napi_schedule(&ch->rxtx_napi);
7008 	}
7009 
7010 	return 0;
7011 }
7012 
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7013 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7014 {
7015 	struct stmmac_priv *priv = netdev_priv(dev);
7016 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7017 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7018 	unsigned int start;
7019 	int q;
7020 
7021 	for (q = 0; q < tx_cnt; q++) {
7022 		struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7023 		u64 tx_packets;
7024 		u64 tx_bytes;
7025 
7026 		do {
7027 			start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7028 			tx_bytes   = u64_stats_read(&txq_stats->q.tx_bytes);
7029 		} while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7030 		do {
7031 			start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7032 			tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7033 		} while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7034 
7035 		stats->tx_packets += tx_packets;
7036 		stats->tx_bytes += tx_bytes;
7037 	}
7038 
7039 	for (q = 0; q < rx_cnt; q++) {
7040 		struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7041 		u64 rx_packets;
7042 		u64 rx_bytes;
7043 
7044 		do {
7045 			start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7046 			rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7047 			rx_bytes   = u64_stats_read(&rxq_stats->napi.rx_bytes);
7048 		} while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7049 
7050 		stats->rx_packets += rx_packets;
7051 		stats->rx_bytes += rx_bytes;
7052 	}
7053 
7054 	stats->rx_dropped = priv->xstats.rx_dropped;
7055 	stats->rx_errors = priv->xstats.rx_errors;
7056 	stats->tx_dropped = priv->xstats.tx_dropped;
7057 	stats->tx_errors = priv->xstats.tx_errors;
7058 	stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7059 	stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7060 	stats->rx_length_errors = priv->xstats.rx_length;
7061 	stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7062 	stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7063 	stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7064 }
7065 
7066 static const struct net_device_ops stmmac_netdev_ops = {
7067 	.ndo_open = stmmac_open,
7068 	.ndo_start_xmit = stmmac_xmit,
7069 	.ndo_stop = stmmac_release,
7070 	.ndo_change_mtu = stmmac_change_mtu,
7071 	.ndo_fix_features = stmmac_fix_features,
7072 	.ndo_set_features = stmmac_set_features,
7073 	.ndo_set_rx_mode = stmmac_set_rx_mode,
7074 	.ndo_tx_timeout = stmmac_tx_timeout,
7075 	.ndo_eth_ioctl = stmmac_ioctl,
7076 	.ndo_get_stats64 = stmmac_get_stats64,
7077 	.ndo_setup_tc = stmmac_setup_tc,
7078 	.ndo_select_queue = stmmac_select_queue,
7079 	.ndo_set_mac_address = stmmac_set_mac_address,
7080 	.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7081 	.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7082 	.ndo_bpf = stmmac_bpf,
7083 	.ndo_xdp_xmit = stmmac_xdp_xmit,
7084 	.ndo_xsk_wakeup = stmmac_xsk_wakeup,
7085 	.ndo_hwtstamp_get = stmmac_hwtstamp_get,
7086 	.ndo_hwtstamp_set = stmmac_hwtstamp_set,
7087 };
7088 
stmmac_reset_subtask(struct stmmac_priv * priv)7089 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7090 {
7091 	if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7092 		return;
7093 	if (test_bit(STMMAC_DOWN, &priv->state))
7094 		return;
7095 
7096 	netdev_err(priv->dev, "Reset adapter.\n");
7097 
7098 	rtnl_lock();
7099 	netif_trans_update(priv->dev);
7100 	while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7101 		usleep_range(1000, 2000);
7102 
7103 	set_bit(STMMAC_DOWN, &priv->state);
7104 	dev_close(priv->dev);
7105 	dev_open(priv->dev, NULL);
7106 	clear_bit(STMMAC_DOWN, &priv->state);
7107 	clear_bit(STMMAC_RESETING, &priv->state);
7108 	rtnl_unlock();
7109 }
7110 
stmmac_service_task(struct work_struct * work)7111 static void stmmac_service_task(struct work_struct *work)
7112 {
7113 	struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7114 			service_task);
7115 
7116 	stmmac_reset_subtask(priv);
7117 	clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7118 }
7119 
7120 /**
7121  *  stmmac_hw_init - Init the MAC device
7122  *  @priv: driver private structure
7123  *  Description: this function is to configure the MAC device according to
7124  *  some platform parameters or the HW capability register. It prepares the
7125  *  driver to use either ring or chain modes and to setup either enhanced or
7126  *  normal descriptors.
7127  */
stmmac_hw_init(struct stmmac_priv * priv)7128 static int stmmac_hw_init(struct stmmac_priv *priv)
7129 {
7130 	int ret;
7131 
7132 	/* dwmac-sun8i only work in chain mode */
7133 	if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7134 		chain_mode = 1;
7135 	priv->chain_mode = chain_mode;
7136 
7137 	/* Initialize HW Interface */
7138 	ret = stmmac_hwif_init(priv);
7139 	if (ret)
7140 		return ret;
7141 
7142 	/* Get the HW capability (new GMAC newer than 3.50a) */
7143 	priv->hw_cap_support = stmmac_get_hw_features(priv);
7144 	if (priv->hw_cap_support) {
7145 		dev_info(priv->device, "DMA HW capability register supported\n");
7146 
7147 		/* We can override some gmac/dma configuration fields: e.g.
7148 		 * enh_desc, tx_coe (e.g. that are passed through the
7149 		 * platform) with the values from the HW capability
7150 		 * register (if supported).
7151 		 */
7152 		priv->plat->enh_desc = priv->dma_cap.enh_desc;
7153 		priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7154 				!(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7155 		if (priv->dma_cap.hash_tb_sz) {
7156 			priv->hw->multicast_filter_bins =
7157 					(BIT(priv->dma_cap.hash_tb_sz) << 5);
7158 			priv->hw->mcast_bits_log2 =
7159 					ilog2(priv->hw->multicast_filter_bins);
7160 		}
7161 
7162 		/* TXCOE doesn't work in thresh DMA mode */
7163 		if (priv->plat->force_thresh_dma_mode)
7164 			priv->plat->tx_coe = 0;
7165 		else
7166 			priv->plat->tx_coe = priv->dma_cap.tx_coe;
7167 
7168 		/* In case of GMAC4 rx_coe is from HW cap register. */
7169 		priv->plat->rx_coe = priv->dma_cap.rx_coe;
7170 
7171 		if (priv->dma_cap.rx_coe_type2)
7172 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7173 		else if (priv->dma_cap.rx_coe_type1)
7174 			priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7175 
7176 	} else {
7177 		dev_info(priv->device, "No HW DMA feature register supported\n");
7178 	}
7179 
7180 	if (priv->plat->rx_coe) {
7181 		priv->hw->rx_csum = priv->plat->rx_coe;
7182 		dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7183 		if (priv->synopsys_id < DWMAC_CORE_4_00)
7184 			dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7185 	}
7186 	if (priv->plat->tx_coe)
7187 		dev_info(priv->device, "TX Checksum insertion supported\n");
7188 
7189 	if (priv->plat->pmt) {
7190 		dev_info(priv->device, "Wake-Up On Lan supported\n");
7191 		device_set_wakeup_capable(priv->device, 1);
7192 		devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7193 	}
7194 
7195 	if (priv->dma_cap.tsoen)
7196 		dev_info(priv->device, "TSO supported\n");
7197 
7198 	if (priv->dma_cap.number_rx_queues &&
7199 	    priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7200 		dev_warn(priv->device,
7201 			 "Number of Rx queues (%u) exceeds dma capability\n",
7202 			 priv->plat->rx_queues_to_use);
7203 		priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7204 	}
7205 	if (priv->dma_cap.number_tx_queues &&
7206 	    priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7207 		dev_warn(priv->device,
7208 			 "Number of Tx queues (%u) exceeds dma capability\n",
7209 			 priv->plat->tx_queues_to_use);
7210 		priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7211 	}
7212 
7213 	if (priv->dma_cap.rx_fifo_size &&
7214 	    priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7215 		dev_warn(priv->device,
7216 			 "Rx FIFO size (%u) exceeds dma capability\n",
7217 			 priv->plat->rx_fifo_size);
7218 		priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7219 	}
7220 	if (priv->dma_cap.tx_fifo_size &&
7221 	    priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7222 		dev_warn(priv->device,
7223 			 "Tx FIFO size (%u) exceeds dma capability\n",
7224 			 priv->plat->tx_fifo_size);
7225 		priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7226 	}
7227 
7228 	priv->hw->vlan_fail_q_en =
7229 		(priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7230 	priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7231 
7232 	/* Run HW quirks, if any */
7233 	if (priv->hwif_quirks) {
7234 		ret = priv->hwif_quirks(priv);
7235 		if (ret)
7236 			return ret;
7237 	}
7238 
7239 	/* Rx Watchdog is available in the COREs newer than the 3.40.
7240 	 * In some case, for example on bugged HW this feature
7241 	 * has to be disable and this can be done by passing the
7242 	 * riwt_off field from the platform.
7243 	 */
7244 	if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7245 	    (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7246 		priv->use_riwt = 1;
7247 		dev_info(priv->device,
7248 			 "Enable RX Mitigation via HW Watchdog Timer\n");
7249 	}
7250 
7251 	return 0;
7252 }
7253 
stmmac_napi_add(struct net_device * dev)7254 static void stmmac_napi_add(struct net_device *dev)
7255 {
7256 	struct stmmac_priv *priv = netdev_priv(dev);
7257 	u32 queue, maxq;
7258 
7259 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7260 
7261 	for (queue = 0; queue < maxq; queue++) {
7262 		struct stmmac_channel *ch = &priv->channel[queue];
7263 
7264 		ch->priv_data = priv;
7265 		ch->index = queue;
7266 		spin_lock_init(&ch->lock);
7267 
7268 		if (queue < priv->plat->rx_queues_to_use) {
7269 			netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7270 		}
7271 		if (queue < priv->plat->tx_queues_to_use) {
7272 			netif_napi_add_tx(dev, &ch->tx_napi,
7273 					  stmmac_napi_poll_tx);
7274 		}
7275 		if (queue < priv->plat->rx_queues_to_use &&
7276 		    queue < priv->plat->tx_queues_to_use) {
7277 			netif_napi_add(dev, &ch->rxtx_napi,
7278 				       stmmac_napi_poll_rxtx);
7279 		}
7280 	}
7281 }
7282 
stmmac_napi_del(struct net_device * dev)7283 static void stmmac_napi_del(struct net_device *dev)
7284 {
7285 	struct stmmac_priv *priv = netdev_priv(dev);
7286 	u32 queue, maxq;
7287 
7288 	maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7289 
7290 	for (queue = 0; queue < maxq; queue++) {
7291 		struct stmmac_channel *ch = &priv->channel[queue];
7292 
7293 		if (queue < priv->plat->rx_queues_to_use)
7294 			netif_napi_del(&ch->rx_napi);
7295 		if (queue < priv->plat->tx_queues_to_use)
7296 			netif_napi_del(&ch->tx_napi);
7297 		if (queue < priv->plat->rx_queues_to_use &&
7298 		    queue < priv->plat->tx_queues_to_use) {
7299 			netif_napi_del(&ch->rxtx_napi);
7300 		}
7301 	}
7302 }
7303 
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7304 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7305 {
7306 	struct stmmac_priv *priv = netdev_priv(dev);
7307 	int ret = 0, i;
7308 
7309 	if (netif_running(dev))
7310 		stmmac_release(dev);
7311 
7312 	stmmac_napi_del(dev);
7313 
7314 	priv->plat->rx_queues_to_use = rx_cnt;
7315 	priv->plat->tx_queues_to_use = tx_cnt;
7316 	if (!netif_is_rxfh_configured(dev))
7317 		for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7318 			priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7319 									rx_cnt);
7320 
7321 	stmmac_napi_add(dev);
7322 
7323 	if (netif_running(dev))
7324 		ret = stmmac_open(dev);
7325 
7326 	return ret;
7327 }
7328 
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7329 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7330 {
7331 	struct stmmac_priv *priv = netdev_priv(dev);
7332 	int ret = 0;
7333 
7334 	if (netif_running(dev))
7335 		stmmac_release(dev);
7336 
7337 	priv->dma_conf.dma_rx_size = rx_size;
7338 	priv->dma_conf.dma_tx_size = tx_size;
7339 
7340 	if (netif_running(dev))
7341 		ret = stmmac_open(dev);
7342 
7343 	return ret;
7344 }
7345 
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7346 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7347 {
7348 	const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7349 	struct dma_desc *desc_contains_ts = ctx->desc;
7350 	struct stmmac_priv *priv = ctx->priv;
7351 	struct dma_desc *ndesc = ctx->ndesc;
7352 	struct dma_desc *desc = ctx->desc;
7353 	u64 ns = 0;
7354 
7355 	if (!priv->hwts_rx_en)
7356 		return -ENODATA;
7357 
7358 	/* For GMAC4, the valid timestamp is from CTX next desc. */
7359 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7360 		desc_contains_ts = ndesc;
7361 
7362 	/* Check if timestamp is available */
7363 	if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7364 		stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7365 		ns -= priv->plat->cdc_error_adj;
7366 		*timestamp = ns_to_ktime(ns);
7367 		return 0;
7368 	}
7369 
7370 	return -ENODATA;
7371 }
7372 
7373 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7374 	.xmo_rx_timestamp		= stmmac_xdp_rx_timestamp,
7375 };
7376 
7377 /**
7378  * stmmac_dvr_probe
7379  * @device: device pointer
7380  * @plat_dat: platform data pointer
7381  * @res: stmmac resource pointer
7382  * Description: this is the main probe function used to
7383  * call the alloc_etherdev, allocate the priv structure.
7384  * Return:
7385  * returns 0 on success, otherwise errno.
7386  */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7387 int stmmac_dvr_probe(struct device *device,
7388 		     struct plat_stmmacenet_data *plat_dat,
7389 		     struct stmmac_resources *res)
7390 {
7391 	struct net_device *ndev = NULL;
7392 	struct stmmac_priv *priv;
7393 	u32 rxq;
7394 	int i, ret = 0;
7395 
7396 	ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7397 				       MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7398 	if (!ndev)
7399 		return -ENOMEM;
7400 
7401 	SET_NETDEV_DEV(ndev, device);
7402 
7403 	priv = netdev_priv(ndev);
7404 	priv->device = device;
7405 	priv->dev = ndev;
7406 
7407 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7408 		u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7409 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7410 		u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7411 		u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7412 	}
7413 
7414 	priv->xstats.pcpu_stats =
7415 		devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7416 	if (!priv->xstats.pcpu_stats)
7417 		return -ENOMEM;
7418 
7419 	stmmac_set_ethtool_ops(ndev);
7420 	priv->pause_time = pause;
7421 	priv->plat = plat_dat;
7422 	priv->ioaddr = res->addr;
7423 	priv->dev->base_addr = (unsigned long)res->addr;
7424 	priv->plat->dma_cfg->multi_msi_en =
7425 		(priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7426 
7427 	priv->dev->irq = res->irq;
7428 	priv->wol_irq = res->wol_irq;
7429 	priv->lpi_irq = res->lpi_irq;
7430 	priv->sfty_irq = res->sfty_irq;
7431 	priv->sfty_ce_irq = res->sfty_ce_irq;
7432 	priv->sfty_ue_irq = res->sfty_ue_irq;
7433 	for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7434 		priv->rx_irq[i] = res->rx_irq[i];
7435 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7436 		priv->tx_irq[i] = res->tx_irq[i];
7437 
7438 	if (!is_zero_ether_addr(res->mac))
7439 		eth_hw_addr_set(priv->dev, res->mac);
7440 
7441 	dev_set_drvdata(device, priv->dev);
7442 
7443 	/* Verify driver arguments */
7444 	stmmac_verify_args();
7445 
7446 	priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7447 	if (!priv->af_xdp_zc_qps)
7448 		return -ENOMEM;
7449 
7450 	/* Allocate workqueue */
7451 	priv->wq = create_singlethread_workqueue("stmmac_wq");
7452 	if (!priv->wq) {
7453 		dev_err(priv->device, "failed to create workqueue\n");
7454 		ret = -ENOMEM;
7455 		goto error_wq_init;
7456 	}
7457 
7458 	INIT_WORK(&priv->service_task, stmmac_service_task);
7459 
7460 	timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7461 
7462 	/* Override with kernel parameters if supplied XXX CRS XXX
7463 	 * this needs to have multiple instances
7464 	 */
7465 	if ((phyaddr >= 0) && (phyaddr <= 31))
7466 		priv->plat->phy_addr = phyaddr;
7467 
7468 	if (priv->plat->stmmac_rst) {
7469 		ret = reset_control_assert(priv->plat->stmmac_rst);
7470 		reset_control_deassert(priv->plat->stmmac_rst);
7471 		/* Some reset controllers have only reset callback instead of
7472 		 * assert + deassert callbacks pair.
7473 		 */
7474 		if (ret == -ENOTSUPP)
7475 			reset_control_reset(priv->plat->stmmac_rst);
7476 	}
7477 
7478 	ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7479 	if (ret == -ENOTSUPP)
7480 		dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7481 			ERR_PTR(ret));
7482 
7483 	/* Wait a bit for the reset to take effect */
7484 	udelay(10);
7485 
7486 	/* Init MAC and get the capabilities */
7487 	ret = stmmac_hw_init(priv);
7488 	if (ret)
7489 		goto error_hw_init;
7490 
7491 	/* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7492 	 */
7493 	if (priv->synopsys_id < DWMAC_CORE_5_20)
7494 		priv->plat->dma_cfg->dche = false;
7495 
7496 	stmmac_check_ether_addr(priv);
7497 
7498 	ndev->netdev_ops = &stmmac_netdev_ops;
7499 
7500 	ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7501 	ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7502 
7503 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7504 			    NETIF_F_RXCSUM;
7505 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7506 			     NETDEV_XDP_ACT_XSK_ZEROCOPY;
7507 
7508 	ret = stmmac_tc_init(priv, priv);
7509 	if (!ret) {
7510 		ndev->hw_features |= NETIF_F_HW_TC;
7511 	}
7512 
7513 	if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7514 		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7515 		if (priv->plat->has_gmac4)
7516 			ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7517 		priv->tso = true;
7518 		dev_info(priv->device, "TSO feature enabled\n");
7519 	}
7520 
7521 	if (priv->dma_cap.sphen &&
7522 	    !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7523 		ndev->hw_features |= NETIF_F_GRO;
7524 		priv->sph_cap = true;
7525 		priv->sph = priv->sph_cap;
7526 		dev_info(priv->device, "SPH feature enabled\n");
7527 	}
7528 
7529 	/* Ideally our host DMA address width is the same as for the
7530 	 * device. However, it may differ and then we have to use our
7531 	 * host DMA width for allocation and the device DMA width for
7532 	 * register handling.
7533 	 */
7534 	if (priv->plat->host_dma_width)
7535 		priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7536 	else
7537 		priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7538 
7539 	if (priv->dma_cap.host_dma_width) {
7540 		ret = dma_set_mask_and_coherent(device,
7541 				DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7542 		if (!ret) {
7543 			dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7544 				 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7545 
7546 			/*
7547 			 * If more than 32 bits can be addressed, make sure to
7548 			 * enable enhanced addressing mode.
7549 			 */
7550 			if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7551 				priv->plat->dma_cfg->eame = true;
7552 		} else {
7553 			ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7554 			if (ret) {
7555 				dev_err(priv->device, "Failed to set DMA Mask\n");
7556 				goto error_hw_init;
7557 			}
7558 
7559 			priv->dma_cap.host_dma_width = 32;
7560 		}
7561 	}
7562 
7563 	ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7564 	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7565 #ifdef STMMAC_VLAN_TAG_USED
7566 	/* Both mac100 and gmac support receive VLAN tag detection */
7567 	ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7568 	if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7569 		ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7570 		priv->hw->hw_vlan_en = true;
7571 	}
7572 	if (priv->dma_cap.vlhash) {
7573 		ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7574 		ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7575 	}
7576 	if (priv->dma_cap.vlins) {
7577 		ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7578 		if (priv->dma_cap.dvlan)
7579 			ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7580 	}
7581 #endif
7582 	priv->msg_enable = netif_msg_init(debug, default_msg_level);
7583 
7584 	priv->xstats.threshold = tc;
7585 
7586 	/* Initialize RSS */
7587 	rxq = priv->plat->rx_queues_to_use;
7588 	netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7589 	for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7590 		priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7591 
7592 	if (priv->dma_cap.rssen && priv->plat->rss_en)
7593 		ndev->features |= NETIF_F_RXHASH;
7594 
7595 	ndev->vlan_features |= ndev->features;
7596 
7597 	/* MTU range: 46 - hw-specific max */
7598 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7599 	if (priv->plat->has_xgmac)
7600 		ndev->max_mtu = XGMAC_JUMBO_LEN;
7601 	else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7602 		ndev->max_mtu = JUMBO_LEN;
7603 	else
7604 		ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7605 	/* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7606 	 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7607 	 */
7608 	if ((priv->plat->maxmtu < ndev->max_mtu) &&
7609 	    (priv->plat->maxmtu >= ndev->min_mtu))
7610 		ndev->max_mtu = priv->plat->maxmtu;
7611 	else if (priv->plat->maxmtu < ndev->min_mtu)
7612 		dev_warn(priv->device,
7613 			 "%s: warning: maxmtu having invalid value (%d)\n",
7614 			 __func__, priv->plat->maxmtu);
7615 
7616 	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7617 
7618 	/* Setup channels NAPI */
7619 	stmmac_napi_add(ndev);
7620 
7621 	mutex_init(&priv->lock);
7622 
7623 	stmmac_fpe_init(priv);
7624 
7625 	stmmac_check_pcs_mode(priv);
7626 
7627 	pm_runtime_get_noresume(device);
7628 	pm_runtime_set_active(device);
7629 	if (!pm_runtime_enabled(device))
7630 		pm_runtime_enable(device);
7631 
7632 	ret = stmmac_mdio_register(ndev);
7633 	if (ret < 0) {
7634 		dev_err_probe(priv->device, ret,
7635 			      "MDIO bus (id: %d) registration failed\n",
7636 			      priv->plat->bus_id);
7637 		goto error_mdio_register;
7638 	}
7639 
7640 	ret = stmmac_pcs_setup(ndev);
7641 	if (ret)
7642 		goto error_pcs_setup;
7643 
7644 	ret = stmmac_phy_setup(priv);
7645 	if (ret) {
7646 		netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7647 		goto error_phy_setup;
7648 	}
7649 
7650 	ret = register_netdev(ndev);
7651 	if (ret) {
7652 		dev_err(priv->device, "%s: ERROR %i registering the device\n",
7653 			__func__, ret);
7654 		goto error_netdev_register;
7655 	}
7656 
7657 #ifdef CONFIG_DEBUG_FS
7658 	stmmac_init_fs(ndev);
7659 #endif
7660 
7661 	if (priv->plat->dump_debug_regs)
7662 		priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7663 
7664 	/* Let pm_runtime_put() disable the clocks.
7665 	 * If CONFIG_PM is not enabled, the clocks will stay powered.
7666 	 */
7667 	pm_runtime_put(device);
7668 
7669 	return ret;
7670 
7671 error_netdev_register:
7672 	phylink_destroy(priv->phylink);
7673 error_phy_setup:
7674 	stmmac_pcs_clean(ndev);
7675 error_pcs_setup:
7676 	stmmac_mdio_unregister(ndev);
7677 error_mdio_register:
7678 	stmmac_napi_del(ndev);
7679 error_hw_init:
7680 	destroy_workqueue(priv->wq);
7681 error_wq_init:
7682 	bitmap_free(priv->af_xdp_zc_qps);
7683 
7684 	return ret;
7685 }
7686 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7687 
7688 /**
7689  * stmmac_dvr_remove
7690  * @dev: device pointer
7691  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7692  * changes the link status, releases the DMA descriptor rings.
7693  */
stmmac_dvr_remove(struct device * dev)7694 void stmmac_dvr_remove(struct device *dev)
7695 {
7696 	struct net_device *ndev = dev_get_drvdata(dev);
7697 	struct stmmac_priv *priv = netdev_priv(ndev);
7698 
7699 	netdev_info(priv->dev, "%s: removing driver", __func__);
7700 
7701 	pm_runtime_get_sync(dev);
7702 
7703 	unregister_netdev(ndev);
7704 
7705 #ifdef CONFIG_DEBUG_FS
7706 	stmmac_exit_fs(ndev);
7707 #endif
7708 	phylink_destroy(priv->phylink);
7709 	if (priv->plat->stmmac_rst)
7710 		reset_control_assert(priv->plat->stmmac_rst);
7711 	reset_control_assert(priv->plat->stmmac_ahb_rst);
7712 
7713 	stmmac_pcs_clean(ndev);
7714 	stmmac_mdio_unregister(ndev);
7715 
7716 	destroy_workqueue(priv->wq);
7717 	mutex_destroy(&priv->lock);
7718 	bitmap_free(priv->af_xdp_zc_qps);
7719 
7720 	pm_runtime_disable(dev);
7721 	pm_runtime_put_noidle(dev);
7722 }
7723 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7724 
7725 /**
7726  * stmmac_suspend - suspend callback
7727  * @dev: device pointer
7728  * Description: this is the function to suspend the device and it is called
7729  * by the platform driver to stop the network queue, release the resources,
7730  * program the PMT register (for WoL), clean and release driver resources.
7731  */
stmmac_suspend(struct device * dev)7732 int stmmac_suspend(struct device *dev)
7733 {
7734 	struct net_device *ndev = dev_get_drvdata(dev);
7735 	struct stmmac_priv *priv = netdev_priv(ndev);
7736 	u32 chan;
7737 
7738 	if (!ndev || !netif_running(ndev))
7739 		return 0;
7740 
7741 	mutex_lock(&priv->lock);
7742 
7743 	netif_device_detach(ndev);
7744 
7745 	stmmac_disable_all_queues(priv);
7746 
7747 	for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7748 		hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7749 
7750 	if (priv->eee_sw_timer_en) {
7751 		priv->tx_path_in_lpi_mode = false;
7752 		timer_delete_sync(&priv->eee_ctrl_timer);
7753 	}
7754 
7755 	/* Stop TX/RX DMA */
7756 	stmmac_stop_all_dma(priv);
7757 
7758 	if (priv->plat->serdes_powerdown)
7759 		priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7760 
7761 	/* Enable Power down mode by programming the PMT regs */
7762 	if (stmmac_wol_enabled_mac(priv)) {
7763 		stmmac_pmt(priv, priv->hw, priv->wolopts);
7764 		priv->irq_wake = 1;
7765 	} else {
7766 		stmmac_mac_set(priv, priv->ioaddr, false);
7767 		pinctrl_pm_select_sleep_state(priv->device);
7768 	}
7769 
7770 	mutex_unlock(&priv->lock);
7771 
7772 	rtnl_lock();
7773 	if (stmmac_wol_enabled_phy(priv))
7774 		phylink_speed_down(priv->phylink, false);
7775 
7776 	phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
7777 	rtnl_unlock();
7778 
7779 	if (stmmac_fpe_supported(priv))
7780 		ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7781 
7782 	if (priv->plat->suspend)
7783 		return priv->plat->suspend(dev, priv->plat->bsp_priv);
7784 
7785 	return 0;
7786 }
7787 EXPORT_SYMBOL_GPL(stmmac_suspend);
7788 
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7789 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7790 {
7791 	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7792 
7793 	rx_q->cur_rx = 0;
7794 	rx_q->dirty_rx = 0;
7795 }
7796 
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7797 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7798 {
7799 	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7800 
7801 	tx_q->cur_tx = 0;
7802 	tx_q->dirty_tx = 0;
7803 	tx_q->mss = 0;
7804 
7805 	netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7806 }
7807 
7808 /**
7809  * stmmac_reset_queues_param - reset queue parameters
7810  * @priv: device pointer
7811  */
stmmac_reset_queues_param(struct stmmac_priv * priv)7812 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7813 {
7814 	u32 rx_cnt = priv->plat->rx_queues_to_use;
7815 	u32 tx_cnt = priv->plat->tx_queues_to_use;
7816 	u32 queue;
7817 
7818 	for (queue = 0; queue < rx_cnt; queue++)
7819 		stmmac_reset_rx_queue(priv, queue);
7820 
7821 	for (queue = 0; queue < tx_cnt; queue++)
7822 		stmmac_reset_tx_queue(priv, queue);
7823 }
7824 
7825 /**
7826  * stmmac_resume - resume callback
7827  * @dev: device pointer
7828  * Description: when resume this function is invoked to setup the DMA and CORE
7829  * in a usable state.
7830  */
stmmac_resume(struct device * dev)7831 int stmmac_resume(struct device *dev)
7832 {
7833 	struct net_device *ndev = dev_get_drvdata(dev);
7834 	struct stmmac_priv *priv = netdev_priv(ndev);
7835 	int ret;
7836 
7837 	if (priv->plat->resume) {
7838 		ret = priv->plat->resume(dev, priv->plat->bsp_priv);
7839 		if (ret)
7840 			return ret;
7841 	}
7842 
7843 	if (!netif_running(ndev))
7844 		return 0;
7845 
7846 	/* Power Down bit, into the PM register, is cleared
7847 	 * automatically as soon as a magic packet or a Wake-up frame
7848 	 * is received. Anyway, it's better to manually clear
7849 	 * this bit because it can generate problems while resuming
7850 	 * from another devices (e.g. serial console).
7851 	 */
7852 	if (stmmac_wol_enabled_mac(priv)) {
7853 		mutex_lock(&priv->lock);
7854 		stmmac_pmt(priv, priv->hw, 0);
7855 		mutex_unlock(&priv->lock);
7856 		priv->irq_wake = 0;
7857 	} else {
7858 		pinctrl_pm_select_default_state(priv->device);
7859 		/* reset the phy so that it's ready */
7860 		if (priv->mii)
7861 			stmmac_mdio_reset(priv->mii);
7862 	}
7863 
7864 	if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7865 	    priv->plat->serdes_powerup) {
7866 		ret = priv->plat->serdes_powerup(ndev,
7867 						 priv->plat->bsp_priv);
7868 
7869 		if (ret < 0)
7870 			return ret;
7871 	}
7872 
7873 	rtnl_lock();
7874 
7875 	/* Prepare the PHY to resume, ensuring that its clocks which are
7876 	 * necessary for the MAC DMA reset to complete are running
7877 	 */
7878 	phylink_prepare_resume(priv->phylink);
7879 
7880 	mutex_lock(&priv->lock);
7881 
7882 	stmmac_reset_queues_param(priv);
7883 
7884 	stmmac_free_tx_skbufs(priv);
7885 	stmmac_clear_descriptors(priv, &priv->dma_conf);
7886 
7887 	ret = stmmac_hw_setup(ndev);
7888 	if (ret < 0) {
7889 		netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
7890 		mutex_unlock(&priv->lock);
7891 		rtnl_unlock();
7892 		return ret;
7893 	}
7894 
7895 	stmmac_init_timestamping(priv);
7896 
7897 	stmmac_init_coalesce(priv);
7898 	phylink_rx_clk_stop_block(priv->phylink);
7899 	stmmac_set_rx_mode(ndev);
7900 
7901 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7902 	phylink_rx_clk_stop_unblock(priv->phylink);
7903 
7904 	stmmac_enable_all_queues(priv);
7905 	stmmac_enable_all_dma_irq(priv);
7906 
7907 	mutex_unlock(&priv->lock);
7908 
7909 	/* phylink_resume() must be called after the hardware has been
7910 	 * initialised because it may bring the link up immediately in a
7911 	 * workqueue thread, which will race with initialisation.
7912 	 */
7913 	phylink_resume(priv->phylink);
7914 	if (stmmac_wol_enabled_phy(priv))
7915 		phylink_speed_up(priv->phylink);
7916 
7917 	rtnl_unlock();
7918 
7919 	netif_device_attach(ndev);
7920 
7921 	return 0;
7922 }
7923 EXPORT_SYMBOL_GPL(stmmac_resume);
7924 
7925 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
7926 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
7927 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
7928 
7929 #ifndef MODULE
stmmac_cmdline_opt(char * str)7930 static int __init stmmac_cmdline_opt(char *str)
7931 {
7932 	char *opt;
7933 
7934 	if (!str || !*str)
7935 		return 1;
7936 	while ((opt = strsep(&str, ",")) != NULL) {
7937 		if (!strncmp(opt, "debug:", 6)) {
7938 			if (kstrtoint(opt + 6, 0, &debug))
7939 				goto err;
7940 		} else if (!strncmp(opt, "phyaddr:", 8)) {
7941 			if (kstrtoint(opt + 8, 0, &phyaddr))
7942 				goto err;
7943 		} else if (!strncmp(opt, "tc:", 3)) {
7944 			if (kstrtoint(opt + 3, 0, &tc))
7945 				goto err;
7946 		} else if (!strncmp(opt, "watchdog:", 9)) {
7947 			if (kstrtoint(opt + 9, 0, &watchdog))
7948 				goto err;
7949 		} else if (!strncmp(opt, "flow_ctrl:", 10)) {
7950 			if (kstrtoint(opt + 10, 0, &flow_ctrl))
7951 				goto err;
7952 		} else if (!strncmp(opt, "pause:", 6)) {
7953 			if (kstrtoint(opt + 6, 0, &pause))
7954 				goto err;
7955 		} else if (!strncmp(opt, "eee_timer:", 10)) {
7956 			if (kstrtoint(opt + 10, 0, &eee_timer))
7957 				goto err;
7958 		} else if (!strncmp(opt, "chain_mode:", 11)) {
7959 			if (kstrtoint(opt + 11, 0, &chain_mode))
7960 				goto err;
7961 		}
7962 	}
7963 	return 1;
7964 
7965 err:
7966 	pr_err("%s: ERROR broken module parameter conversion", __func__);
7967 	return 1;
7968 }
7969 
7970 __setup("stmmaceth=", stmmac_cmdline_opt);
7971 #endif /* MODULE */
7972 
stmmac_init(void)7973 static int __init stmmac_init(void)
7974 {
7975 #ifdef CONFIG_DEBUG_FS
7976 	/* Create debugfs main directory if it doesn't exist yet */
7977 	if (!stmmac_fs_dir)
7978 		stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7979 	register_netdevice_notifier(&stmmac_notifier);
7980 #endif
7981 
7982 	return 0;
7983 }
7984 
stmmac_exit(void)7985 static void __exit stmmac_exit(void)
7986 {
7987 #ifdef CONFIG_DEBUG_FS
7988 	unregister_netdevice_notifier(&stmmac_notifier);
7989 	debugfs_remove_recursive(stmmac_fs_dir);
7990 #endif
7991 }
7992 
7993 module_init(stmmac_init)
7994 module_exit(stmmac_exit)
7995 
7996 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7997 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7998 MODULE_LICENSE("GPL");
7999