1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54
55 /* As long as the interface is active, we keep the timestamping counter enabled
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
57 * (clock jumps) when changing timestamping settings at runtime.
58 */
59 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 PTP_TCR_TSCTRLSSR)
61
62 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64
65 /* Module parameters */
66 #define TX_TIMEO 5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = 0xdead;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 /* This is unused */
105 #define DEFAULT_BUFSIZE 1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER 1000
115 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, uint, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121 * but allow user to force to use the chain instead of the ring
122 */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139 u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151 int ret = 0;
152
153 if (enabled) {
154 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155 if (ret)
156 return ret;
157 ret = clk_prepare_enable(priv->plat->pclk);
158 if (ret) {
159 clk_disable_unprepare(priv->plat->stmmac_clk);
160 return ret;
161 }
162 if (priv->plat->clks_config) {
163 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164 if (ret) {
165 clk_disable_unprepare(priv->plat->stmmac_clk);
166 clk_disable_unprepare(priv->plat->pclk);
167 return ret;
168 }
169 }
170 } else {
171 clk_disable_unprepare(priv->plat->stmmac_clk);
172 clk_disable_unprepare(priv->plat->pclk);
173 if (priv->plat->clks_config)
174 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175 }
176
177 return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
183 * @bsp_priv: BSP private data structure (unused)
184 * @clk_tx_i: the transmit clock
185 * @interface: the selected interface mode
186 * @speed: the speed that the MAC will be operating at
187 *
188 * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
189 * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
190 * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
191 * the plat_data->set_clk_tx_rate method directly, call it via their own
192 * implementation, or implement their own method should they have more
193 * complex requirements. It is intended to only be used in this method.
194 *
195 * plat_data->clk_tx_i must be filled in.
196 */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)197 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
198 phy_interface_t interface, int speed)
199 {
200 long rate = rgmii_clock(speed);
201
202 /* Silently ignore unsupported speeds as rgmii_clock() only
203 * supports 10, 100 and 1000Mbps. We do not want to spit
204 * errors for 2500 and higher speeds here.
205 */
206 if (rate < 0)
207 return 0;
208
209 return clk_set_rate(clk_tx_i, rate);
210 }
211 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
212
213 /**
214 * stmmac_verify_args - verify the driver parameters.
215 * Description: it checks the driver parameters and set a default in case of
216 * errors.
217 */
stmmac_verify_args(void)218 static void stmmac_verify_args(void)
219 {
220 if (unlikely(watchdog < 0))
221 watchdog = TX_TIMEO;
222 if (unlikely((pause < 0) || (pause > 0xffff)))
223 pause = PAUSE_TIME;
224
225 if (flow_ctrl != 0xdead)
226 pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
227 }
228
__stmmac_disable_all_queues(struct stmmac_priv * priv)229 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
233 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
234 u32 queue;
235
236 for (queue = 0; queue < maxq; queue++) {
237 struct stmmac_channel *ch = &priv->channel[queue];
238
239 if (stmmac_xdp_is_enabled(priv) &&
240 test_bit(queue, priv->af_xdp_zc_qps)) {
241 napi_disable(&ch->rxtx_napi);
242 continue;
243 }
244
245 if (queue < rx_queues_cnt)
246 napi_disable(&ch->rx_napi);
247 if (queue < tx_queues_cnt)
248 napi_disable(&ch->tx_napi);
249 }
250 }
251
252 /**
253 * stmmac_disable_all_queues - Disable all queues
254 * @priv: driver private structure
255 */
stmmac_disable_all_queues(struct stmmac_priv * priv)256 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
257 {
258 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
259 struct stmmac_rx_queue *rx_q;
260 u32 queue;
261
262 /* synchronize_rcu() needed for pending XDP buffers to drain */
263 for (queue = 0; queue < rx_queues_cnt; queue++) {
264 rx_q = &priv->dma_conf.rx_queue[queue];
265 if (rx_q->xsk_pool) {
266 synchronize_rcu();
267 break;
268 }
269 }
270
271 __stmmac_disable_all_queues(priv);
272 }
273
274 /**
275 * stmmac_enable_all_queues - Enable all queues
276 * @priv: driver private structure
277 */
stmmac_enable_all_queues(struct stmmac_priv * priv)278 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
279 {
280 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
281 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
282 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
283 u32 queue;
284
285 for (queue = 0; queue < maxq; queue++) {
286 struct stmmac_channel *ch = &priv->channel[queue];
287
288 if (stmmac_xdp_is_enabled(priv) &&
289 test_bit(queue, priv->af_xdp_zc_qps)) {
290 napi_enable(&ch->rxtx_napi);
291 continue;
292 }
293
294 if (queue < rx_queues_cnt)
295 napi_enable(&ch->rx_napi);
296 if (queue < tx_queues_cnt)
297 napi_enable(&ch->tx_napi);
298 }
299 }
300
stmmac_service_event_schedule(struct stmmac_priv * priv)301 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
302 {
303 if (!test_bit(STMMAC_DOWN, &priv->state) &&
304 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
305 queue_work(priv->wq, &priv->service_task);
306 }
307
stmmac_global_err(struct stmmac_priv * priv)308 static void stmmac_global_err(struct stmmac_priv *priv)
309 {
310 netif_carrier_off(priv->dev);
311 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
312 stmmac_service_event_schedule(priv);
313 }
314
315 /**
316 * stmmac_clk_csr_set - dynamically set the MDC clock
317 * @priv: driver private structure
318 * Description: this is to dynamically set the MDC clock according to the csr
319 * clock input.
320 * Note:
321 * If a specific clk_csr value is passed from the platform
322 * this means that the CSR Clock Range selection cannot be
323 * changed at run-time and it is fixed (as reported in the driver
324 * documentation). Viceversa the driver will try to set the MDC
325 * clock dynamically according to the actual clock input.
326 */
stmmac_clk_csr_set(struct stmmac_priv * priv)327 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
328 {
329 unsigned long clk_rate;
330
331 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
332
333 /* Platform provided default clk_csr would be assumed valid
334 * for all other cases except for the below mentioned ones.
335 * For values higher than the IEEE 802.3 specified frequency
336 * we can not estimate the proper divider as it is not known
337 * the frequency of clk_csr_i. So we do not change the default
338 * divider.
339 */
340 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
341 if (clk_rate < CSR_F_35M)
342 priv->clk_csr = STMMAC_CSR_20_35M;
343 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
344 priv->clk_csr = STMMAC_CSR_35_60M;
345 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
346 priv->clk_csr = STMMAC_CSR_60_100M;
347 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
348 priv->clk_csr = STMMAC_CSR_100_150M;
349 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
350 priv->clk_csr = STMMAC_CSR_150_250M;
351 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
352 priv->clk_csr = STMMAC_CSR_250_300M;
353 else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
354 priv->clk_csr = STMMAC_CSR_300_500M;
355 else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
356 priv->clk_csr = STMMAC_CSR_500_800M;
357 }
358
359 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
360 if (clk_rate > 160000000)
361 priv->clk_csr = 0x03;
362 else if (clk_rate > 80000000)
363 priv->clk_csr = 0x02;
364 else if (clk_rate > 40000000)
365 priv->clk_csr = 0x01;
366 else
367 priv->clk_csr = 0;
368 }
369
370 if (priv->plat->has_xgmac) {
371 if (clk_rate > 400000000)
372 priv->clk_csr = 0x5;
373 else if (clk_rate > 350000000)
374 priv->clk_csr = 0x4;
375 else if (clk_rate > 300000000)
376 priv->clk_csr = 0x3;
377 else if (clk_rate > 250000000)
378 priv->clk_csr = 0x2;
379 else if (clk_rate > 150000000)
380 priv->clk_csr = 0x1;
381 else
382 priv->clk_csr = 0x0;
383 }
384 }
385
print_pkt(unsigned char * buf,int len)386 static void print_pkt(unsigned char *buf, int len)
387 {
388 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
389 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
390 }
391
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)392 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
393 {
394 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
395 u32 avail;
396
397 if (tx_q->dirty_tx > tx_q->cur_tx)
398 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
399 else
400 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
401
402 return avail;
403 }
404
405 /**
406 * stmmac_rx_dirty - Get RX queue dirty
407 * @priv: driver private structure
408 * @queue: RX queue index
409 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)410 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
411 {
412 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
413 u32 dirty;
414
415 if (rx_q->dirty_rx <= rx_q->cur_rx)
416 dirty = rx_q->cur_rx - rx_q->dirty_rx;
417 else
418 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
419
420 return dirty;
421 }
422
stmmac_eee_tx_busy(struct stmmac_priv * priv)423 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
424 {
425 u32 tx_cnt = priv->plat->tx_queues_to_use;
426 u32 queue;
427
428 /* check if all TX queues have the work finished */
429 for (queue = 0; queue < tx_cnt; queue++) {
430 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
431
432 if (tx_q->dirty_tx != tx_q->cur_tx)
433 return true; /* still unfinished work */
434 }
435
436 return false;
437 }
438
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)439 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
440 {
441 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
442 }
443
444 /**
445 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
446 * @priv: driver private structure
447 * Description: this function is to verify and enter in LPI mode in case of
448 * EEE.
449 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)450 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
451 {
452 if (stmmac_eee_tx_busy(priv)) {
453 stmmac_restart_sw_lpi_timer(priv);
454 return;
455 }
456
457 /* Check and enter in LPI mode */
458 if (!priv->tx_path_in_lpi_mode)
459 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
460 priv->tx_lpi_clk_stop, 0);
461 }
462
463 /**
464 * stmmac_stop_sw_lpi - stop transmitting LPI
465 * @priv: driver private structure
466 * Description: When using software-controlled LPI, stop transmitting LPI state.
467 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)468 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
469 {
470 timer_delete_sync(&priv->eee_ctrl_timer);
471 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
472 priv->tx_path_in_lpi_mode = false;
473 }
474
475 /**
476 * stmmac_eee_ctrl_timer - EEE TX SW timer.
477 * @t: timer_list struct containing private info
478 * Description:
479 * if there is no data transfer and if we are not in LPI state,
480 * then MAC Transmitter can be moved to LPI state.
481 */
stmmac_eee_ctrl_timer(struct timer_list * t)482 static void stmmac_eee_ctrl_timer(struct timer_list *t)
483 {
484 struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
485
486 stmmac_try_to_start_sw_lpi(priv);
487 }
488
489 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
490 * @priv: driver private structure
491 * @p : descriptor pointer
492 * @skb : the socket buffer
493 * Description :
494 * This function will read timestamp from the descriptor & pass it to stack.
495 * and also perform some sanity checks.
496 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)497 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
498 struct dma_desc *p, struct sk_buff *skb)
499 {
500 struct skb_shared_hwtstamps shhwtstamp;
501 bool found = false;
502 u64 ns = 0;
503
504 if (!priv->hwts_tx_en)
505 return;
506
507 /* exit if skb doesn't support hw tstamp */
508 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
509 return;
510
511 /* check tx tstamp status */
512 if (stmmac_get_tx_timestamp_status(priv, p)) {
513 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
514 found = true;
515 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
516 found = true;
517 }
518
519 if (found) {
520 ns -= priv->plat->cdc_error_adj;
521
522 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
523 shhwtstamp.hwtstamp = ns_to_ktime(ns);
524
525 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
526 /* pass tstamp to stack */
527 skb_tstamp_tx(skb, &shhwtstamp);
528 }
529 }
530
531 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
532 * @priv: driver private structure
533 * @p : descriptor pointer
534 * @np : next descriptor pointer
535 * @skb : the socket buffer
536 * Description :
537 * This function will read received packet's timestamp from the descriptor
538 * and pass it to stack. It also perform some sanity checks.
539 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)540 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
541 struct dma_desc *np, struct sk_buff *skb)
542 {
543 struct skb_shared_hwtstamps *shhwtstamp = NULL;
544 struct dma_desc *desc = p;
545 u64 ns = 0;
546
547 if (!priv->hwts_rx_en)
548 return;
549 /* For GMAC4, the valid timestamp is from CTX next desc. */
550 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
551 desc = np;
552
553 /* Check if timestamp is available */
554 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
555 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
556
557 ns -= priv->plat->cdc_error_adj;
558
559 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
560 shhwtstamp = skb_hwtstamps(skb);
561 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 shhwtstamp->hwtstamp = ns_to_ktime(ns);
563 } else {
564 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
565 }
566 }
567
568 /**
569 * stmmac_hwtstamp_set - control hardware timestamping.
570 * @dev: device pointer.
571 * @config: the timestamping configuration.
572 * @extack: netlink extended ack structure for error reporting.
573 * Description:
574 * This function configures the MAC to enable/disable both outgoing(TX)
575 * and incoming(RX) packets time stamping based on user input.
576 * Return Value:
577 * 0 on success and an appropriate -ve integer on failure.
578 */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)579 static int stmmac_hwtstamp_set(struct net_device *dev,
580 struct kernel_hwtstamp_config *config,
581 struct netlink_ext_ack *extack)
582 {
583 struct stmmac_priv *priv = netdev_priv(dev);
584 u32 ptp_v2 = 0;
585 u32 tstamp_all = 0;
586 u32 ptp_over_ipv4_udp = 0;
587 u32 ptp_over_ipv6_udp = 0;
588 u32 ptp_over_ethernet = 0;
589 u32 snap_type_sel = 0;
590 u32 ts_master_en = 0;
591 u32 ts_event_en = 0;
592
593 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
594 NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
595 priv->hwts_tx_en = 0;
596 priv->hwts_rx_en = 0;
597
598 return -EOPNOTSUPP;
599 }
600
601 if (!netif_running(dev)) {
602 NL_SET_ERR_MSG_MOD(extack,
603 "Cannot change timestamping configuration while down");
604 return -ENODEV;
605 }
606
607 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
608 __func__, config->flags, config->tx_type, config->rx_filter);
609
610 if (config->tx_type != HWTSTAMP_TX_OFF &&
611 config->tx_type != HWTSTAMP_TX_ON)
612 return -ERANGE;
613
614 if (priv->adv_ts) {
615 switch (config->rx_filter) {
616 case HWTSTAMP_FILTER_NONE:
617 /* time stamp no incoming packet at all */
618 config->rx_filter = HWTSTAMP_FILTER_NONE;
619 break;
620
621 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
622 /* PTP v1, UDP, any kind of event packet */
623 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
624 /* 'xmac' hardware can support Sync, Pdelay_Req and
625 * Pdelay_resp by setting bit14 and bits17/16 to 01
626 * This leaves Delay_Req timestamps out.
627 * Enable all events *and* general purpose message
628 * timestamping
629 */
630 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
631 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
632 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
633 break;
634
635 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
636 /* PTP v1, UDP, Sync packet */
637 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
638 /* take time stamp for SYNC messages only */
639 ts_event_en = PTP_TCR_TSEVNTENA;
640
641 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 break;
644
645 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
646 /* PTP v1, UDP, Delay_req packet */
647 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
648 /* take time stamp for Delay_Req messages only */
649 ts_master_en = PTP_TCR_TSMSTRENA;
650 ts_event_en = PTP_TCR_TSEVNTENA;
651
652 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
653 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
654 break;
655
656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
657 /* PTP v2, UDP, any kind of event packet */
658 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
659 ptp_v2 = PTP_TCR_TSVER2ENA;
660 /* take time stamp for all event messages */
661 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
662
663 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
664 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
665 break;
666
667 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
668 /* PTP v2, UDP, Sync packet */
669 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
670 ptp_v2 = PTP_TCR_TSVER2ENA;
671 /* take time stamp for SYNC messages only */
672 ts_event_en = PTP_TCR_TSEVNTENA;
673
674 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
675 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
676 break;
677
678 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
679 /* PTP v2, UDP, Delay_req packet */
680 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
681 ptp_v2 = PTP_TCR_TSVER2ENA;
682 /* take time stamp for Delay_Req messages only */
683 ts_master_en = PTP_TCR_TSMSTRENA;
684 ts_event_en = PTP_TCR_TSEVNTENA;
685
686 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 break;
689
690 case HWTSTAMP_FILTER_PTP_V2_EVENT:
691 /* PTP v2/802.AS1 any layer, any kind of event packet */
692 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
693 ptp_v2 = PTP_TCR_TSVER2ENA;
694 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
695 if (priv->synopsys_id < DWMAC_CORE_4_10)
696 ts_event_en = PTP_TCR_TSEVNTENA;
697 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 ptp_over_ethernet = PTP_TCR_TSIPENA;
700 break;
701
702 case HWTSTAMP_FILTER_PTP_V2_SYNC:
703 /* PTP v2/802.AS1, any layer, Sync packet */
704 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
705 ptp_v2 = PTP_TCR_TSVER2ENA;
706 /* take time stamp for SYNC messages only */
707 ts_event_en = PTP_TCR_TSEVNTENA;
708
709 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711 ptp_over_ethernet = PTP_TCR_TSIPENA;
712 break;
713
714 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
715 /* PTP v2/802.AS1, any layer, Delay_req packet */
716 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
717 ptp_v2 = PTP_TCR_TSVER2ENA;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en = PTP_TCR_TSMSTRENA;
720 ts_event_en = PTP_TCR_TSEVNTENA;
721
722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 ptp_over_ethernet = PTP_TCR_TSIPENA;
725 break;
726
727 case HWTSTAMP_FILTER_NTP_ALL:
728 case HWTSTAMP_FILTER_ALL:
729 /* time stamp any incoming packet */
730 config->rx_filter = HWTSTAMP_FILTER_ALL;
731 tstamp_all = PTP_TCR_TSENALL;
732 break;
733
734 default:
735 return -ERANGE;
736 }
737 } else {
738 switch (config->rx_filter) {
739 case HWTSTAMP_FILTER_NONE:
740 config->rx_filter = HWTSTAMP_FILTER_NONE;
741 break;
742 default:
743 /* PTP v1, UDP, any kind of event packet */
744 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
745 break;
746 }
747 }
748 priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
749 priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
750
751 priv->systime_flags = STMMAC_HWTS_ACTIVE;
752
753 if (priv->hwts_tx_en || priv->hwts_rx_en) {
754 priv->systime_flags |= tstamp_all | ptp_v2 |
755 ptp_over_ethernet | ptp_over_ipv6_udp |
756 ptp_over_ipv4_udp | ts_event_en |
757 ts_master_en | snap_type_sel;
758 }
759
760 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
761
762 priv->tstamp_config = *config;
763
764 return 0;
765 }
766
767 /**
768 * stmmac_hwtstamp_get - read hardware timestamping.
769 * @dev: device pointer.
770 * @config: the timestamping configuration.
771 * Description:
772 * This function obtain the current hardware timestamping settings
773 * as requested.
774 */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)775 static int stmmac_hwtstamp_get(struct net_device *dev,
776 struct kernel_hwtstamp_config *config)
777 {
778 struct stmmac_priv *priv = netdev_priv(dev);
779
780 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
781 return -EOPNOTSUPP;
782
783 *config = priv->tstamp_config;
784
785 return 0;
786 }
787
788 /**
789 * stmmac_init_tstamp_counter - init hardware timestamping counter
790 * @priv: driver private structure
791 * @systime_flags: timestamping flags
792 * Description:
793 * Initialize hardware counter for packet timestamping.
794 * This is valid as long as the interface is open and not suspended.
795 * Will be rerun after resuming from suspend, case in which the timestamping
796 * flags updated by stmmac_hwtstamp_set() also need to be restored.
797 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)798 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
799 {
800 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
801 struct timespec64 now;
802 u32 sec_inc = 0;
803 u64 temp = 0;
804
805 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806 return -EOPNOTSUPP;
807
808 if (!priv->plat->clk_ptp_rate) {
809 netdev_err(priv->dev, "Invalid PTP clock rate");
810 return -EINVAL;
811 }
812
813 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
814 priv->systime_flags = systime_flags;
815
816 /* program Sub Second Increment reg */
817 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
818 priv->plat->clk_ptp_rate,
819 xmac, &sec_inc);
820 temp = div_u64(1000000000ULL, sec_inc);
821
822 /* Store sub second increment for later use */
823 priv->sub_second_inc = sec_inc;
824
825 /* calculate default added value:
826 * formula is :
827 * addend = (2^32)/freq_div_ratio;
828 * where, freq_div_ratio = 1e9ns/sec_inc
829 */
830 temp = (u64)(temp << 32);
831 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
832 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
833
834 /* initialize system time */
835 ktime_get_real_ts64(&now);
836
837 /* lower 32 bits of tv_sec are safe until y2106 */
838 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
839
840 return 0;
841 }
842 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
843
844 /**
845 * stmmac_init_ptp - init PTP
846 * @priv: driver private structure
847 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
848 * This is done by looking at the HW cap. register.
849 * This function also registers the ptp driver.
850 */
stmmac_init_ptp(struct stmmac_priv * priv)851 static int stmmac_init_ptp(struct stmmac_priv *priv)
852 {
853 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
854 int ret;
855
856 if (priv->plat->ptp_clk_freq_config)
857 priv->plat->ptp_clk_freq_config(priv);
858
859 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
860 if (ret)
861 return ret;
862
863 priv->adv_ts = 0;
864 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
865 if (xmac && priv->dma_cap.atime_stamp)
866 priv->adv_ts = 1;
867 /* Dwmac 3.x core with extend_desc can support adv_ts */
868 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
869 priv->adv_ts = 1;
870
871 if (priv->dma_cap.time_stamp)
872 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
873
874 if (priv->adv_ts)
875 netdev_info(priv->dev,
876 "IEEE 1588-2008 Advanced Timestamp supported\n");
877
878 priv->hwts_tx_en = 0;
879 priv->hwts_rx_en = 0;
880
881 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
882 stmmac_hwtstamp_correct_latency(priv, priv);
883
884 return 0;
885 }
886
stmmac_release_ptp(struct stmmac_priv * priv)887 static void stmmac_release_ptp(struct stmmac_priv *priv)
888 {
889 clk_disable_unprepare(priv->plat->clk_ptp_ref);
890 stmmac_ptp_unregister(priv);
891 }
892
893 /**
894 * stmmac_mac_flow_ctrl - Configure flow control in all queues
895 * @priv: driver private structure
896 * @duplex: duplex passed to the next function
897 * @flow_ctrl: desired flow control modes
898 * Description: It is used for configuring the flow control in all queues
899 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)900 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
901 unsigned int flow_ctrl)
902 {
903 u32 tx_cnt = priv->plat->tx_queues_to_use;
904
905 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
906 tx_cnt);
907 }
908
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)909 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
910 phy_interface_t interface)
911 {
912 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
913
914 /* Refresh the MAC-specific capabilities */
915 stmmac_mac_update_caps(priv);
916
917 config->mac_capabilities = priv->hw->link.caps;
918
919 if (priv->plat->max_speed)
920 phylink_limit_mac_speed(config, priv->plat->max_speed);
921
922 return config->mac_capabilities;
923 }
924
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)925 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
926 phy_interface_t interface)
927 {
928 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
929 struct phylink_pcs *pcs;
930
931 if (priv->plat->select_pcs) {
932 pcs = priv->plat->select_pcs(priv, interface);
933 if (!IS_ERR(pcs))
934 return pcs;
935 }
936
937 return NULL;
938 }
939
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)940 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
941 const struct phylink_link_state *state)
942 {
943 /* Nothing to do, xpcs_config() handles everything */
944 }
945
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)946 static void stmmac_mac_link_down(struct phylink_config *config,
947 unsigned int mode, phy_interface_t interface)
948 {
949 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
950
951 stmmac_mac_set(priv, priv->ioaddr, false);
952 if (priv->dma_cap.eee)
953 stmmac_set_eee_pls(priv, priv->hw, false);
954
955 if (stmmac_fpe_supported(priv))
956 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
957 }
958
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)959 static void stmmac_mac_link_up(struct phylink_config *config,
960 struct phy_device *phy,
961 unsigned int mode, phy_interface_t interface,
962 int speed, int duplex,
963 bool tx_pause, bool rx_pause)
964 {
965 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
966 unsigned int flow_ctrl;
967 u32 old_ctrl, ctrl;
968 int ret;
969
970 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
971 priv->plat->serdes_powerup)
972 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
973
974 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
975 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
976
977 if (interface == PHY_INTERFACE_MODE_USXGMII) {
978 switch (speed) {
979 case SPEED_10000:
980 ctrl |= priv->hw->link.xgmii.speed10000;
981 break;
982 case SPEED_5000:
983 ctrl |= priv->hw->link.xgmii.speed5000;
984 break;
985 case SPEED_2500:
986 ctrl |= priv->hw->link.xgmii.speed2500;
987 break;
988 default:
989 return;
990 }
991 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
992 switch (speed) {
993 case SPEED_100000:
994 ctrl |= priv->hw->link.xlgmii.speed100000;
995 break;
996 case SPEED_50000:
997 ctrl |= priv->hw->link.xlgmii.speed50000;
998 break;
999 case SPEED_40000:
1000 ctrl |= priv->hw->link.xlgmii.speed40000;
1001 break;
1002 case SPEED_25000:
1003 ctrl |= priv->hw->link.xlgmii.speed25000;
1004 break;
1005 case SPEED_10000:
1006 ctrl |= priv->hw->link.xgmii.speed10000;
1007 break;
1008 case SPEED_2500:
1009 ctrl |= priv->hw->link.speed2500;
1010 break;
1011 case SPEED_1000:
1012 ctrl |= priv->hw->link.speed1000;
1013 break;
1014 default:
1015 return;
1016 }
1017 } else {
1018 switch (speed) {
1019 case SPEED_2500:
1020 ctrl |= priv->hw->link.speed2500;
1021 break;
1022 case SPEED_1000:
1023 ctrl |= priv->hw->link.speed1000;
1024 break;
1025 case SPEED_100:
1026 ctrl |= priv->hw->link.speed100;
1027 break;
1028 case SPEED_10:
1029 ctrl |= priv->hw->link.speed10;
1030 break;
1031 default:
1032 return;
1033 }
1034 }
1035
1036 if (priv->plat->fix_mac_speed)
1037 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1038
1039 if (!duplex)
1040 ctrl &= ~priv->hw->link.duplex;
1041 else
1042 ctrl |= priv->hw->link.duplex;
1043
1044 /* Flow Control operation */
1045 if (rx_pause && tx_pause)
1046 flow_ctrl = FLOW_AUTO;
1047 else if (rx_pause && !tx_pause)
1048 flow_ctrl = FLOW_RX;
1049 else if (!rx_pause && tx_pause)
1050 flow_ctrl = FLOW_TX;
1051 else
1052 flow_ctrl = FLOW_OFF;
1053
1054 stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1055
1056 if (ctrl != old_ctrl)
1057 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1058
1059 if (priv->plat->set_clk_tx_rate) {
1060 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1061 priv->plat->clk_tx_i,
1062 interface, speed);
1063 if (ret < 0)
1064 netdev_err(priv->dev,
1065 "failed to configure %s transmit clock for %dMbps: %pe\n",
1066 phy_modes(interface), speed, ERR_PTR(ret));
1067 }
1068
1069 stmmac_mac_set(priv, priv->ioaddr, true);
1070 if (priv->dma_cap.eee)
1071 stmmac_set_eee_pls(priv, priv->hw, true);
1072
1073 if (stmmac_fpe_supported(priv))
1074 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1075
1076 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1077 stmmac_hwtstamp_correct_latency(priv, priv);
1078 }
1079
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1080 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1081 {
1082 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1083
1084 priv->eee_active = false;
1085
1086 mutex_lock(&priv->lock);
1087
1088 priv->eee_enabled = false;
1089
1090 netdev_dbg(priv->dev, "disable EEE\n");
1091 priv->eee_sw_timer_en = false;
1092 timer_delete_sync(&priv->eee_ctrl_timer);
1093 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1094 priv->tx_path_in_lpi_mode = false;
1095
1096 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1097 mutex_unlock(&priv->lock);
1098 }
1099
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1100 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1101 bool tx_clk_stop)
1102 {
1103 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1104 int ret;
1105
1106 priv->tx_lpi_timer = timer;
1107 priv->eee_active = true;
1108
1109 mutex_lock(&priv->lock);
1110
1111 priv->eee_enabled = true;
1112
1113 /* Update the transmit clock stop according to PHY capability if
1114 * the platform allows
1115 */
1116 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1117 priv->tx_lpi_clk_stop = tx_clk_stop;
1118
1119 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1120 STMMAC_DEFAULT_TWT_LS);
1121
1122 /* Try to cnfigure the hardware timer. */
1123 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1124 priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1125
1126 if (ret) {
1127 /* Hardware timer mode not supported, or value out of range.
1128 * Fall back to using software LPI mode
1129 */
1130 priv->eee_sw_timer_en = true;
1131 stmmac_restart_sw_lpi_timer(priv);
1132 }
1133
1134 mutex_unlock(&priv->lock);
1135 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1136
1137 return 0;
1138 }
1139
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1140 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1141 phy_interface_t interface)
1142 {
1143 struct net_device *ndev = to_net_dev(config->dev);
1144 struct stmmac_priv *priv = netdev_priv(ndev);
1145
1146 if (priv->plat->mac_finish)
1147 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1148
1149 return 0;
1150 }
1151
1152 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1153 .mac_get_caps = stmmac_mac_get_caps,
1154 .mac_select_pcs = stmmac_mac_select_pcs,
1155 .mac_config = stmmac_mac_config,
1156 .mac_link_down = stmmac_mac_link_down,
1157 .mac_link_up = stmmac_mac_link_up,
1158 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1159 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1160 .mac_finish = stmmac_mac_finish,
1161 };
1162
1163 /**
1164 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1165 * @priv: driver private structure
1166 * Description: this is to verify if the HW supports the PCS.
1167 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1168 * configured for the TBI, RTBI, or SGMII PHY interface.
1169 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1170 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1171 {
1172 int interface = priv->plat->mac_interface;
1173
1174 if (priv->dma_cap.pcs) {
1175 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1176 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1177 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1178 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1179 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1180 priv->hw->pcs = STMMAC_PCS_RGMII;
1181 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1182 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1183 priv->hw->pcs = STMMAC_PCS_SGMII;
1184 }
1185 }
1186 }
1187
1188 /**
1189 * stmmac_init_phy - PHY initialization
1190 * @dev: net device structure
1191 * Description: it initializes the driver's PHY state, and attaches the PHY
1192 * to the mac driver.
1193 * Return value:
1194 * 0 on success
1195 */
stmmac_init_phy(struct net_device * dev)1196 static int stmmac_init_phy(struct net_device *dev)
1197 {
1198 struct stmmac_priv *priv = netdev_priv(dev);
1199 struct fwnode_handle *phy_fwnode;
1200 struct fwnode_handle *fwnode;
1201 int ret;
1202
1203 if (!phylink_expects_phy(priv->phylink))
1204 return 0;
1205
1206 fwnode = priv->plat->port_node;
1207 if (!fwnode)
1208 fwnode = dev_fwnode(priv->device);
1209
1210 if (fwnode)
1211 phy_fwnode = fwnode_get_phy_node(fwnode);
1212 else
1213 phy_fwnode = NULL;
1214
1215 /* Some DT bindings do not set-up the PHY handle. Let's try to
1216 * manually parse it
1217 */
1218 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1219 int addr = priv->plat->phy_addr;
1220 struct phy_device *phydev;
1221
1222 if (addr < 0) {
1223 netdev_err(priv->dev, "no phy found\n");
1224 return -ENODEV;
1225 }
1226
1227 phydev = mdiobus_get_phy(priv->mii, addr);
1228 if (!phydev) {
1229 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1230 return -ENODEV;
1231 }
1232
1233 ret = phylink_connect_phy(priv->phylink, phydev);
1234 } else {
1235 fwnode_handle_put(phy_fwnode);
1236 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1237 }
1238
1239 if (ret == 0) {
1240 struct ethtool_keee eee;
1241
1242 /* Configure phylib's copy of the LPI timer. Normally,
1243 * phylink_config.lpi_timer_default would do this, but there is
1244 * a chance that userspace could change the eee_timer setting
1245 * via sysfs before the first open. Thus, preserve existing
1246 * behaviour.
1247 */
1248 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1249 eee.tx_lpi_timer = priv->tx_lpi_timer;
1250 phylink_ethtool_set_eee(priv->phylink, &eee);
1251 }
1252 }
1253
1254 if (!priv->plat->pmt) {
1255 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1256
1257 phylink_ethtool_get_wol(priv->phylink, &wol);
1258 device_set_wakeup_capable(priv->device, !!wol.supported);
1259 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1260 }
1261
1262 return ret;
1263 }
1264
stmmac_phy_setup(struct stmmac_priv * priv)1265 static int stmmac_phy_setup(struct stmmac_priv *priv)
1266 {
1267 struct stmmac_mdio_bus_data *mdio_bus_data;
1268 struct phylink_config *config;
1269 struct fwnode_handle *fwnode;
1270 struct phylink_pcs *pcs;
1271 struct phylink *phylink;
1272
1273 config = &priv->phylink_config;
1274
1275 config->dev = &priv->dev->dev;
1276 config->type = PHYLINK_NETDEV;
1277 config->mac_managed_pm = true;
1278
1279 /* Stmmac always requires an RX clock for hardware initialization */
1280 config->mac_requires_rxc = true;
1281
1282 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1283 config->eee_rx_clk_stop_enable = true;
1284
1285 /* Set the default transmit clock stop bit based on the platform glue */
1286 priv->tx_lpi_clk_stop = priv->plat->flags &
1287 STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1288
1289 mdio_bus_data = priv->plat->mdio_bus_data;
1290 if (mdio_bus_data)
1291 config->default_an_inband = mdio_bus_data->default_an_inband;
1292
1293 /* Get the PHY interface modes (at the PHY end of the link) that
1294 * are supported by the platform.
1295 */
1296 if (priv->plat->get_interfaces)
1297 priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1298 config->supported_interfaces);
1299
1300 /* Set the platform/firmware specified interface mode if the
1301 * supported interfaces have not already been provided using
1302 * phy_interface as a last resort.
1303 */
1304 if (phy_interface_empty(config->supported_interfaces))
1305 __set_bit(priv->plat->phy_interface,
1306 config->supported_interfaces);
1307
1308 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1309 if (priv->hw->xpcs)
1310 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1311 else
1312 pcs = priv->hw->phylink_pcs;
1313
1314 if (pcs)
1315 phy_interface_or(config->supported_interfaces,
1316 config->supported_interfaces,
1317 pcs->supported_interfaces);
1318
1319 if (priv->dma_cap.eee) {
1320 /* Assume all supported interfaces also support LPI */
1321 memcpy(config->lpi_interfaces, config->supported_interfaces,
1322 sizeof(config->lpi_interfaces));
1323
1324 /* All full duplex speeds above 100Mbps are supported */
1325 config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1326 config->lpi_timer_default = eee_timer * 1000;
1327 config->eee_enabled_default = true;
1328 }
1329
1330 fwnode = priv->plat->port_node;
1331 if (!fwnode)
1332 fwnode = dev_fwnode(priv->device);
1333
1334 phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1335 &stmmac_phylink_mac_ops);
1336 if (IS_ERR(phylink))
1337 return PTR_ERR(phylink);
1338
1339 priv->phylink = phylink;
1340 return 0;
1341 }
1342
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1343 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1344 struct stmmac_dma_conf *dma_conf)
1345 {
1346 u32 rx_cnt = priv->plat->rx_queues_to_use;
1347 unsigned int desc_size;
1348 void *head_rx;
1349 u32 queue;
1350
1351 /* Display RX rings */
1352 for (queue = 0; queue < rx_cnt; queue++) {
1353 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1354
1355 pr_info("\tRX Queue %u rings\n", queue);
1356
1357 if (priv->extend_desc) {
1358 head_rx = (void *)rx_q->dma_erx;
1359 desc_size = sizeof(struct dma_extended_desc);
1360 } else {
1361 head_rx = (void *)rx_q->dma_rx;
1362 desc_size = sizeof(struct dma_desc);
1363 }
1364
1365 /* Display RX ring */
1366 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1367 rx_q->dma_rx_phy, desc_size);
1368 }
1369 }
1370
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1371 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1372 struct stmmac_dma_conf *dma_conf)
1373 {
1374 u32 tx_cnt = priv->plat->tx_queues_to_use;
1375 unsigned int desc_size;
1376 void *head_tx;
1377 u32 queue;
1378
1379 /* Display TX rings */
1380 for (queue = 0; queue < tx_cnt; queue++) {
1381 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1382
1383 pr_info("\tTX Queue %d rings\n", queue);
1384
1385 if (priv->extend_desc) {
1386 head_tx = (void *)tx_q->dma_etx;
1387 desc_size = sizeof(struct dma_extended_desc);
1388 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1389 head_tx = (void *)tx_q->dma_entx;
1390 desc_size = sizeof(struct dma_edesc);
1391 } else {
1392 head_tx = (void *)tx_q->dma_tx;
1393 desc_size = sizeof(struct dma_desc);
1394 }
1395
1396 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1397 tx_q->dma_tx_phy, desc_size);
1398 }
1399 }
1400
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1401 static void stmmac_display_rings(struct stmmac_priv *priv,
1402 struct stmmac_dma_conf *dma_conf)
1403 {
1404 /* Display RX ring */
1405 stmmac_display_rx_rings(priv, dma_conf);
1406
1407 /* Display TX ring */
1408 stmmac_display_tx_rings(priv, dma_conf);
1409 }
1410
stmmac_rx_offset(struct stmmac_priv * priv)1411 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1412 {
1413 if (stmmac_xdp_is_enabled(priv))
1414 return XDP_PACKET_HEADROOM;
1415
1416 return NET_SKB_PAD;
1417 }
1418
stmmac_set_bfsize(int mtu,int bufsize)1419 static int stmmac_set_bfsize(int mtu, int bufsize)
1420 {
1421 int ret = bufsize;
1422
1423 if (mtu >= BUF_SIZE_8KiB)
1424 ret = BUF_SIZE_16KiB;
1425 else if (mtu >= BUF_SIZE_4KiB)
1426 ret = BUF_SIZE_8KiB;
1427 else if (mtu >= BUF_SIZE_2KiB)
1428 ret = BUF_SIZE_4KiB;
1429 else if (mtu > DEFAULT_BUFSIZE)
1430 ret = BUF_SIZE_2KiB;
1431 else
1432 ret = DEFAULT_BUFSIZE;
1433
1434 return ret;
1435 }
1436
1437 /**
1438 * stmmac_clear_rx_descriptors - clear RX descriptors
1439 * @priv: driver private structure
1440 * @dma_conf: structure to take the dma data
1441 * @queue: RX queue index
1442 * Description: this function is called to clear the RX descriptors
1443 * in case of both basic and extended descriptors are used.
1444 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1445 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1446 struct stmmac_dma_conf *dma_conf,
1447 u32 queue)
1448 {
1449 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1450 int i;
1451
1452 /* Clear the RX descriptors */
1453 for (i = 0; i < dma_conf->dma_rx_size; i++)
1454 if (priv->extend_desc)
1455 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1456 priv->use_riwt, priv->mode,
1457 (i == dma_conf->dma_rx_size - 1),
1458 dma_conf->dma_buf_sz);
1459 else
1460 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1461 priv->use_riwt, priv->mode,
1462 (i == dma_conf->dma_rx_size - 1),
1463 dma_conf->dma_buf_sz);
1464 }
1465
1466 /**
1467 * stmmac_clear_tx_descriptors - clear tx descriptors
1468 * @priv: driver private structure
1469 * @dma_conf: structure to take the dma data
1470 * @queue: TX queue index.
1471 * Description: this function is called to clear the TX descriptors
1472 * in case of both basic and extended descriptors are used.
1473 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1474 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1475 struct stmmac_dma_conf *dma_conf,
1476 u32 queue)
1477 {
1478 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1479 int i;
1480
1481 /* Clear the TX descriptors */
1482 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1483 int last = (i == (dma_conf->dma_tx_size - 1));
1484 struct dma_desc *p;
1485
1486 if (priv->extend_desc)
1487 p = &tx_q->dma_etx[i].basic;
1488 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1489 p = &tx_q->dma_entx[i].basic;
1490 else
1491 p = &tx_q->dma_tx[i];
1492
1493 stmmac_init_tx_desc(priv, p, priv->mode, last);
1494 }
1495 }
1496
1497 /**
1498 * stmmac_clear_descriptors - clear descriptors
1499 * @priv: driver private structure
1500 * @dma_conf: structure to take the dma data
1501 * Description: this function is called to clear the TX and RX descriptors
1502 * in case of both basic and extended descriptors are used.
1503 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1504 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1505 struct stmmac_dma_conf *dma_conf)
1506 {
1507 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1508 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1509 u32 queue;
1510
1511 /* Clear the RX descriptors */
1512 for (queue = 0; queue < rx_queue_cnt; queue++)
1513 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1514
1515 /* Clear the TX descriptors */
1516 for (queue = 0; queue < tx_queue_cnt; queue++)
1517 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1518 }
1519
1520 /**
1521 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1522 * @priv: driver private structure
1523 * @dma_conf: structure to take the dma data
1524 * @p: descriptor pointer
1525 * @i: descriptor index
1526 * @flags: gfp flag
1527 * @queue: RX queue index
1528 * Description: this function is called to allocate a receive buffer, perform
1529 * the DMA mapping and init the descriptor.
1530 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1531 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1532 struct stmmac_dma_conf *dma_conf,
1533 struct dma_desc *p,
1534 int i, gfp_t flags, u32 queue)
1535 {
1536 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1537 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1538 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1539
1540 if (priv->dma_cap.host_dma_width <= 32)
1541 gfp |= GFP_DMA32;
1542
1543 if (!buf->page) {
1544 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1545 if (!buf->page)
1546 return -ENOMEM;
1547 buf->page_offset = stmmac_rx_offset(priv);
1548 }
1549
1550 if (priv->sph && !buf->sec_page) {
1551 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1552 if (!buf->sec_page)
1553 return -ENOMEM;
1554
1555 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1556 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1557 } else {
1558 buf->sec_page = NULL;
1559 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1560 }
1561
1562 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1563
1564 stmmac_set_desc_addr(priv, p, buf->addr);
1565 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1566 stmmac_init_desc3(priv, p);
1567
1568 return 0;
1569 }
1570
1571 /**
1572 * stmmac_free_rx_buffer - free RX dma buffers
1573 * @priv: private structure
1574 * @rx_q: RX queue
1575 * @i: buffer index.
1576 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1577 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1578 struct stmmac_rx_queue *rx_q,
1579 int i)
1580 {
1581 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1582
1583 if (buf->page)
1584 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1585 buf->page = NULL;
1586
1587 if (buf->sec_page)
1588 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1589 buf->sec_page = NULL;
1590 }
1591
1592 /**
1593 * stmmac_free_tx_buffer - free RX dma buffers
1594 * @priv: private structure
1595 * @dma_conf: structure to take the dma data
1596 * @queue: RX queue index
1597 * @i: buffer index.
1598 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1599 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1600 struct stmmac_dma_conf *dma_conf,
1601 u32 queue, int i)
1602 {
1603 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1604
1605 if (tx_q->tx_skbuff_dma[i].buf &&
1606 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1607 if (tx_q->tx_skbuff_dma[i].map_as_page)
1608 dma_unmap_page(priv->device,
1609 tx_q->tx_skbuff_dma[i].buf,
1610 tx_q->tx_skbuff_dma[i].len,
1611 DMA_TO_DEVICE);
1612 else
1613 dma_unmap_single(priv->device,
1614 tx_q->tx_skbuff_dma[i].buf,
1615 tx_q->tx_skbuff_dma[i].len,
1616 DMA_TO_DEVICE);
1617 }
1618
1619 if (tx_q->xdpf[i] &&
1620 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1621 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1622 xdp_return_frame(tx_q->xdpf[i]);
1623 tx_q->xdpf[i] = NULL;
1624 }
1625
1626 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1627 tx_q->xsk_frames_done++;
1628
1629 if (tx_q->tx_skbuff[i] &&
1630 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1631 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1632 tx_q->tx_skbuff[i] = NULL;
1633 }
1634
1635 tx_q->tx_skbuff_dma[i].buf = 0;
1636 tx_q->tx_skbuff_dma[i].map_as_page = false;
1637 }
1638
1639 /**
1640 * dma_free_rx_skbufs - free RX dma buffers
1641 * @priv: private structure
1642 * @dma_conf: structure to take the dma data
1643 * @queue: RX queue index
1644 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1645 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1646 struct stmmac_dma_conf *dma_conf,
1647 u32 queue)
1648 {
1649 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1650 int i;
1651
1652 for (i = 0; i < dma_conf->dma_rx_size; i++)
1653 stmmac_free_rx_buffer(priv, rx_q, i);
1654 }
1655
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1656 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1657 struct stmmac_dma_conf *dma_conf,
1658 u32 queue, gfp_t flags)
1659 {
1660 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1661 int i;
1662
1663 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1664 struct dma_desc *p;
1665 int ret;
1666
1667 if (priv->extend_desc)
1668 p = &((rx_q->dma_erx + i)->basic);
1669 else
1670 p = rx_q->dma_rx + i;
1671
1672 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1673 queue);
1674 if (ret)
1675 return ret;
1676
1677 rx_q->buf_alloc_num++;
1678 }
1679
1680 return 0;
1681 }
1682
1683 /**
1684 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1685 * @priv: private structure
1686 * @dma_conf: structure to take the dma data
1687 * @queue: RX queue index
1688 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1689 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1690 struct stmmac_dma_conf *dma_conf,
1691 u32 queue)
1692 {
1693 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1694 int i;
1695
1696 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1697 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1698
1699 if (!buf->xdp)
1700 continue;
1701
1702 xsk_buff_free(buf->xdp);
1703 buf->xdp = NULL;
1704 }
1705 }
1706
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1707 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1708 struct stmmac_dma_conf *dma_conf,
1709 u32 queue)
1710 {
1711 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1712 int i;
1713
1714 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1715 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1716 * use this macro to make sure no size violations.
1717 */
1718 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1719
1720 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1721 struct stmmac_rx_buffer *buf;
1722 dma_addr_t dma_addr;
1723 struct dma_desc *p;
1724
1725 if (priv->extend_desc)
1726 p = (struct dma_desc *)(rx_q->dma_erx + i);
1727 else
1728 p = rx_q->dma_rx + i;
1729
1730 buf = &rx_q->buf_pool[i];
1731
1732 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1733 if (!buf->xdp)
1734 return -ENOMEM;
1735
1736 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1737 stmmac_set_desc_addr(priv, p, dma_addr);
1738 rx_q->buf_alloc_num++;
1739 }
1740
1741 return 0;
1742 }
1743
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1744 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1745 {
1746 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1747 return NULL;
1748
1749 return xsk_get_pool_from_qid(priv->dev, queue);
1750 }
1751
1752 /**
1753 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1754 * @priv: driver private structure
1755 * @dma_conf: structure to take the dma data
1756 * @queue: RX queue index
1757 * @flags: gfp flag.
1758 * Description: this function initializes the DMA RX descriptors
1759 * and allocates the socket buffers. It supports the chained and ring
1760 * modes.
1761 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1762 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1763 struct stmmac_dma_conf *dma_conf,
1764 u32 queue, gfp_t flags)
1765 {
1766 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1767 int ret;
1768
1769 netif_dbg(priv, probe, priv->dev,
1770 "(%s) dma_rx_phy=0x%08x\n", __func__,
1771 (u32)rx_q->dma_rx_phy);
1772
1773 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1774
1775 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1776
1777 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1778
1779 if (rx_q->xsk_pool) {
1780 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1781 MEM_TYPE_XSK_BUFF_POOL,
1782 NULL));
1783 netdev_info(priv->dev,
1784 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1785 rx_q->queue_index);
1786 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1787 } else {
1788 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1789 MEM_TYPE_PAGE_POOL,
1790 rx_q->page_pool));
1791 netdev_info(priv->dev,
1792 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1793 rx_q->queue_index);
1794 }
1795
1796 if (rx_q->xsk_pool) {
1797 /* RX XDP ZC buffer pool may not be populated, e.g.
1798 * xdpsock TX-only.
1799 */
1800 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1801 } else {
1802 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1803 if (ret < 0)
1804 return -ENOMEM;
1805 }
1806
1807 /* Setup the chained descriptor addresses */
1808 if (priv->mode == STMMAC_CHAIN_MODE) {
1809 if (priv->extend_desc)
1810 stmmac_mode_init(priv, rx_q->dma_erx,
1811 rx_q->dma_rx_phy,
1812 dma_conf->dma_rx_size, 1);
1813 else
1814 stmmac_mode_init(priv, rx_q->dma_rx,
1815 rx_q->dma_rx_phy,
1816 dma_conf->dma_rx_size, 0);
1817 }
1818
1819 return 0;
1820 }
1821
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1822 static int init_dma_rx_desc_rings(struct net_device *dev,
1823 struct stmmac_dma_conf *dma_conf,
1824 gfp_t flags)
1825 {
1826 struct stmmac_priv *priv = netdev_priv(dev);
1827 u32 rx_count = priv->plat->rx_queues_to_use;
1828 int queue;
1829 int ret;
1830
1831 /* RX INITIALIZATION */
1832 netif_dbg(priv, probe, priv->dev,
1833 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1834
1835 for (queue = 0; queue < rx_count; queue++) {
1836 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1837 if (ret)
1838 goto err_init_rx_buffers;
1839 }
1840
1841 return 0;
1842
1843 err_init_rx_buffers:
1844 while (queue >= 0) {
1845 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1846
1847 if (rx_q->xsk_pool)
1848 dma_free_rx_xskbufs(priv, dma_conf, queue);
1849 else
1850 dma_free_rx_skbufs(priv, dma_conf, queue);
1851
1852 rx_q->buf_alloc_num = 0;
1853 rx_q->xsk_pool = NULL;
1854
1855 queue--;
1856 }
1857
1858 return ret;
1859 }
1860
1861 /**
1862 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1863 * @priv: driver private structure
1864 * @dma_conf: structure to take the dma data
1865 * @queue: TX queue index
1866 * Description: this function initializes the DMA TX descriptors
1867 * and allocates the socket buffers. It supports the chained and ring
1868 * modes.
1869 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1870 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1871 struct stmmac_dma_conf *dma_conf,
1872 u32 queue)
1873 {
1874 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1875 int i;
1876
1877 netif_dbg(priv, probe, priv->dev,
1878 "(%s) dma_tx_phy=0x%08x\n", __func__,
1879 (u32)tx_q->dma_tx_phy);
1880
1881 /* Setup the chained descriptor addresses */
1882 if (priv->mode == STMMAC_CHAIN_MODE) {
1883 if (priv->extend_desc)
1884 stmmac_mode_init(priv, tx_q->dma_etx,
1885 tx_q->dma_tx_phy,
1886 dma_conf->dma_tx_size, 1);
1887 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1888 stmmac_mode_init(priv, tx_q->dma_tx,
1889 tx_q->dma_tx_phy,
1890 dma_conf->dma_tx_size, 0);
1891 }
1892
1893 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1894
1895 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1896 struct dma_desc *p;
1897
1898 if (priv->extend_desc)
1899 p = &((tx_q->dma_etx + i)->basic);
1900 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1901 p = &((tx_q->dma_entx + i)->basic);
1902 else
1903 p = tx_q->dma_tx + i;
1904
1905 stmmac_clear_desc(priv, p);
1906
1907 tx_q->tx_skbuff_dma[i].buf = 0;
1908 tx_q->tx_skbuff_dma[i].map_as_page = false;
1909 tx_q->tx_skbuff_dma[i].len = 0;
1910 tx_q->tx_skbuff_dma[i].last_segment = false;
1911 tx_q->tx_skbuff[i] = NULL;
1912 }
1913
1914 return 0;
1915 }
1916
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1917 static int init_dma_tx_desc_rings(struct net_device *dev,
1918 struct stmmac_dma_conf *dma_conf)
1919 {
1920 struct stmmac_priv *priv = netdev_priv(dev);
1921 u32 tx_queue_cnt;
1922 u32 queue;
1923
1924 tx_queue_cnt = priv->plat->tx_queues_to_use;
1925
1926 for (queue = 0; queue < tx_queue_cnt; queue++)
1927 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1928
1929 return 0;
1930 }
1931
1932 /**
1933 * init_dma_desc_rings - init the RX/TX descriptor rings
1934 * @dev: net device structure
1935 * @dma_conf: structure to take the dma data
1936 * @flags: gfp flag.
1937 * Description: this function initializes the DMA RX/TX descriptors
1938 * and allocates the socket buffers. It supports the chained and ring
1939 * modes.
1940 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1941 static int init_dma_desc_rings(struct net_device *dev,
1942 struct stmmac_dma_conf *dma_conf,
1943 gfp_t flags)
1944 {
1945 struct stmmac_priv *priv = netdev_priv(dev);
1946 int ret;
1947
1948 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1949 if (ret)
1950 return ret;
1951
1952 ret = init_dma_tx_desc_rings(dev, dma_conf);
1953
1954 stmmac_clear_descriptors(priv, dma_conf);
1955
1956 if (netif_msg_hw(priv))
1957 stmmac_display_rings(priv, dma_conf);
1958
1959 return ret;
1960 }
1961
1962 /**
1963 * dma_free_tx_skbufs - free TX dma buffers
1964 * @priv: private structure
1965 * @dma_conf: structure to take the dma data
1966 * @queue: TX queue index
1967 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1968 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1969 struct stmmac_dma_conf *dma_conf,
1970 u32 queue)
1971 {
1972 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1973 int i;
1974
1975 tx_q->xsk_frames_done = 0;
1976
1977 for (i = 0; i < dma_conf->dma_tx_size; i++)
1978 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1979
1980 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1981 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1982 tx_q->xsk_frames_done = 0;
1983 tx_q->xsk_pool = NULL;
1984 }
1985 }
1986
1987 /**
1988 * stmmac_free_tx_skbufs - free TX skb buffers
1989 * @priv: private structure
1990 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1991 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1992 {
1993 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1994 u32 queue;
1995
1996 for (queue = 0; queue < tx_queue_cnt; queue++)
1997 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1998 }
1999
2000 /**
2001 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2002 * @priv: private structure
2003 * @dma_conf: structure to take the dma data
2004 * @queue: RX queue index
2005 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2006 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2007 struct stmmac_dma_conf *dma_conf,
2008 u32 queue)
2009 {
2010 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011
2012 /* Release the DMA RX socket buffers */
2013 if (rx_q->xsk_pool)
2014 dma_free_rx_xskbufs(priv, dma_conf, queue);
2015 else
2016 dma_free_rx_skbufs(priv, dma_conf, queue);
2017
2018 rx_q->buf_alloc_num = 0;
2019 rx_q->xsk_pool = NULL;
2020
2021 /* Free DMA regions of consistent memory previously allocated */
2022 if (!priv->extend_desc)
2023 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2024 sizeof(struct dma_desc),
2025 rx_q->dma_rx, rx_q->dma_rx_phy);
2026 else
2027 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2028 sizeof(struct dma_extended_desc),
2029 rx_q->dma_erx, rx_q->dma_rx_phy);
2030
2031 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2032 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2033
2034 kfree(rx_q->buf_pool);
2035 if (rx_q->page_pool)
2036 page_pool_destroy(rx_q->page_pool);
2037 }
2038
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2039 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2040 struct stmmac_dma_conf *dma_conf)
2041 {
2042 u32 rx_count = priv->plat->rx_queues_to_use;
2043 u32 queue;
2044
2045 /* Free RX queue resources */
2046 for (queue = 0; queue < rx_count; queue++)
2047 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2048 }
2049
2050 /**
2051 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2052 * @priv: private structure
2053 * @dma_conf: structure to take the dma data
2054 * @queue: TX queue index
2055 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2056 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2057 struct stmmac_dma_conf *dma_conf,
2058 u32 queue)
2059 {
2060 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2061 size_t size;
2062 void *addr;
2063
2064 /* Release the DMA TX socket buffers */
2065 dma_free_tx_skbufs(priv, dma_conf, queue);
2066
2067 if (priv->extend_desc) {
2068 size = sizeof(struct dma_extended_desc);
2069 addr = tx_q->dma_etx;
2070 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2071 size = sizeof(struct dma_edesc);
2072 addr = tx_q->dma_entx;
2073 } else {
2074 size = sizeof(struct dma_desc);
2075 addr = tx_q->dma_tx;
2076 }
2077
2078 size *= dma_conf->dma_tx_size;
2079
2080 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2081
2082 kfree(tx_q->tx_skbuff_dma);
2083 kfree(tx_q->tx_skbuff);
2084 }
2085
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2086 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2087 struct stmmac_dma_conf *dma_conf)
2088 {
2089 u32 tx_count = priv->plat->tx_queues_to_use;
2090 u32 queue;
2091
2092 /* Free TX queue resources */
2093 for (queue = 0; queue < tx_count; queue++)
2094 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2095 }
2096
2097 /**
2098 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2099 * @priv: private structure
2100 * @dma_conf: structure to take the dma data
2101 * @queue: RX queue index
2102 * Description: according to which descriptor can be used (extend or basic)
2103 * this function allocates the resources for TX and RX paths. In case of
2104 * reception, for example, it pre-allocated the RX socket buffer in order to
2105 * allow zero-copy mechanism.
2106 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2107 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2108 struct stmmac_dma_conf *dma_conf,
2109 u32 queue)
2110 {
2111 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2112 struct stmmac_channel *ch = &priv->channel[queue];
2113 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2114 struct page_pool_params pp_params = { 0 };
2115 unsigned int dma_buf_sz_pad, num_pages;
2116 unsigned int napi_id;
2117 int ret;
2118
2119 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2120 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2121 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2122
2123 rx_q->queue_index = queue;
2124 rx_q->priv_data = priv;
2125 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2126
2127 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2128 pp_params.pool_size = dma_conf->dma_rx_size;
2129 pp_params.order = order_base_2(num_pages);
2130 pp_params.nid = dev_to_node(priv->device);
2131 pp_params.dev = priv->device;
2132 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2133 pp_params.offset = stmmac_rx_offset(priv);
2134 pp_params.max_len = dma_conf->dma_buf_sz;
2135
2136 if (priv->sph) {
2137 pp_params.offset = 0;
2138 pp_params.max_len += stmmac_rx_offset(priv);
2139 }
2140
2141 rx_q->page_pool = page_pool_create(&pp_params);
2142 if (IS_ERR(rx_q->page_pool)) {
2143 ret = PTR_ERR(rx_q->page_pool);
2144 rx_q->page_pool = NULL;
2145 return ret;
2146 }
2147
2148 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2149 sizeof(*rx_q->buf_pool),
2150 GFP_KERNEL);
2151 if (!rx_q->buf_pool)
2152 return -ENOMEM;
2153
2154 if (priv->extend_desc) {
2155 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2156 dma_conf->dma_rx_size *
2157 sizeof(struct dma_extended_desc),
2158 &rx_q->dma_rx_phy,
2159 GFP_KERNEL);
2160 if (!rx_q->dma_erx)
2161 return -ENOMEM;
2162
2163 } else {
2164 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2165 dma_conf->dma_rx_size *
2166 sizeof(struct dma_desc),
2167 &rx_q->dma_rx_phy,
2168 GFP_KERNEL);
2169 if (!rx_q->dma_rx)
2170 return -ENOMEM;
2171 }
2172
2173 if (stmmac_xdp_is_enabled(priv) &&
2174 test_bit(queue, priv->af_xdp_zc_qps))
2175 napi_id = ch->rxtx_napi.napi_id;
2176 else
2177 napi_id = ch->rx_napi.napi_id;
2178
2179 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2180 rx_q->queue_index,
2181 napi_id);
2182 if (ret) {
2183 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2184 return -EINVAL;
2185 }
2186
2187 return 0;
2188 }
2189
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2190 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2191 struct stmmac_dma_conf *dma_conf)
2192 {
2193 u32 rx_count = priv->plat->rx_queues_to_use;
2194 u32 queue;
2195 int ret;
2196
2197 /* RX queues buffers and DMA */
2198 for (queue = 0; queue < rx_count; queue++) {
2199 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2200 if (ret)
2201 goto err_dma;
2202 }
2203
2204 return 0;
2205
2206 err_dma:
2207 free_dma_rx_desc_resources(priv, dma_conf);
2208
2209 return ret;
2210 }
2211
2212 /**
2213 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2214 * @priv: private structure
2215 * @dma_conf: structure to take the dma data
2216 * @queue: TX queue index
2217 * Description: according to which descriptor can be used (extend or basic)
2218 * this function allocates the resources for TX and RX paths. In case of
2219 * reception, for example, it pre-allocated the RX socket buffer in order to
2220 * allow zero-copy mechanism.
2221 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2222 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2223 struct stmmac_dma_conf *dma_conf,
2224 u32 queue)
2225 {
2226 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2227 size_t size;
2228 void *addr;
2229
2230 tx_q->queue_index = queue;
2231 tx_q->priv_data = priv;
2232
2233 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2234 sizeof(*tx_q->tx_skbuff_dma),
2235 GFP_KERNEL);
2236 if (!tx_q->tx_skbuff_dma)
2237 return -ENOMEM;
2238
2239 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2240 sizeof(struct sk_buff *),
2241 GFP_KERNEL);
2242 if (!tx_q->tx_skbuff)
2243 return -ENOMEM;
2244
2245 if (priv->extend_desc)
2246 size = sizeof(struct dma_extended_desc);
2247 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2248 size = sizeof(struct dma_edesc);
2249 else
2250 size = sizeof(struct dma_desc);
2251
2252 size *= dma_conf->dma_tx_size;
2253
2254 addr = dma_alloc_coherent(priv->device, size,
2255 &tx_q->dma_tx_phy, GFP_KERNEL);
2256 if (!addr)
2257 return -ENOMEM;
2258
2259 if (priv->extend_desc)
2260 tx_q->dma_etx = addr;
2261 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2262 tx_q->dma_entx = addr;
2263 else
2264 tx_q->dma_tx = addr;
2265
2266 return 0;
2267 }
2268
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2269 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2270 struct stmmac_dma_conf *dma_conf)
2271 {
2272 u32 tx_count = priv->plat->tx_queues_to_use;
2273 u32 queue;
2274 int ret;
2275
2276 /* TX queues buffers and DMA */
2277 for (queue = 0; queue < tx_count; queue++) {
2278 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2279 if (ret)
2280 goto err_dma;
2281 }
2282
2283 return 0;
2284
2285 err_dma:
2286 free_dma_tx_desc_resources(priv, dma_conf);
2287 return ret;
2288 }
2289
2290 /**
2291 * alloc_dma_desc_resources - alloc TX/RX resources.
2292 * @priv: private structure
2293 * @dma_conf: structure to take the dma data
2294 * Description: according to which descriptor can be used (extend or basic)
2295 * this function allocates the resources for TX and RX paths. In case of
2296 * reception, for example, it pre-allocated the RX socket buffer in order to
2297 * allow zero-copy mechanism.
2298 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2299 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2300 struct stmmac_dma_conf *dma_conf)
2301 {
2302 /* RX Allocation */
2303 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2304
2305 if (ret)
2306 return ret;
2307
2308 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2309
2310 return ret;
2311 }
2312
2313 /**
2314 * free_dma_desc_resources - free dma desc resources
2315 * @priv: private structure
2316 * @dma_conf: structure to take the dma data
2317 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2318 static void free_dma_desc_resources(struct stmmac_priv *priv,
2319 struct stmmac_dma_conf *dma_conf)
2320 {
2321 /* Release the DMA TX socket buffers */
2322 free_dma_tx_desc_resources(priv, dma_conf);
2323
2324 /* Release the DMA RX socket buffers later
2325 * to ensure all pending XDP_TX buffers are returned.
2326 */
2327 free_dma_rx_desc_resources(priv, dma_conf);
2328 }
2329
2330 /**
2331 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2332 * @priv: driver private structure
2333 * Description: It is used for enabling the rx queues in the MAC
2334 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2335 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2336 {
2337 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2338 int queue;
2339 u8 mode;
2340
2341 for (queue = 0; queue < rx_queues_count; queue++) {
2342 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2343 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2344 }
2345 }
2346
2347 /**
2348 * stmmac_start_rx_dma - start RX DMA channel
2349 * @priv: driver private structure
2350 * @chan: RX channel index
2351 * Description:
2352 * This starts a RX DMA channel
2353 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2354 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2355 {
2356 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2357 stmmac_start_rx(priv, priv->ioaddr, chan);
2358 }
2359
2360 /**
2361 * stmmac_start_tx_dma - start TX DMA channel
2362 * @priv: driver private structure
2363 * @chan: TX channel index
2364 * Description:
2365 * This starts a TX DMA channel
2366 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2367 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2368 {
2369 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2370 stmmac_start_tx(priv, priv->ioaddr, chan);
2371 }
2372
2373 /**
2374 * stmmac_stop_rx_dma - stop RX DMA channel
2375 * @priv: driver private structure
2376 * @chan: RX channel index
2377 * Description:
2378 * This stops a RX DMA channel
2379 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2380 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2381 {
2382 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2383 stmmac_stop_rx(priv, priv->ioaddr, chan);
2384 }
2385
2386 /**
2387 * stmmac_stop_tx_dma - stop TX DMA channel
2388 * @priv: driver private structure
2389 * @chan: TX channel index
2390 * Description:
2391 * This stops a TX DMA channel
2392 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2393 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2394 {
2395 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2396 stmmac_stop_tx(priv, priv->ioaddr, chan);
2397 }
2398
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2399 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2400 {
2401 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2402 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2403 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2404 u32 chan;
2405
2406 for (chan = 0; chan < dma_csr_ch; chan++) {
2407 struct stmmac_channel *ch = &priv->channel[chan];
2408 unsigned long flags;
2409
2410 spin_lock_irqsave(&ch->lock, flags);
2411 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2412 spin_unlock_irqrestore(&ch->lock, flags);
2413 }
2414 }
2415
2416 /**
2417 * stmmac_start_all_dma - start all RX and TX DMA channels
2418 * @priv: driver private structure
2419 * Description:
2420 * This starts all the RX and TX DMA channels
2421 */
stmmac_start_all_dma(struct stmmac_priv * priv)2422 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2423 {
2424 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2425 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2426 u32 chan = 0;
2427
2428 for (chan = 0; chan < rx_channels_count; chan++)
2429 stmmac_start_rx_dma(priv, chan);
2430
2431 for (chan = 0; chan < tx_channels_count; chan++)
2432 stmmac_start_tx_dma(priv, chan);
2433 }
2434
2435 /**
2436 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2437 * @priv: driver private structure
2438 * Description:
2439 * This stops the RX and TX DMA channels
2440 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2441 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2442 {
2443 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2444 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2445 u32 chan = 0;
2446
2447 for (chan = 0; chan < rx_channels_count; chan++)
2448 stmmac_stop_rx_dma(priv, chan);
2449
2450 for (chan = 0; chan < tx_channels_count; chan++)
2451 stmmac_stop_tx_dma(priv, chan);
2452 }
2453
2454 /**
2455 * stmmac_dma_operation_mode - HW DMA operation mode
2456 * @priv: driver private structure
2457 * Description: it is used for configuring the DMA operation mode register in
2458 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2459 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2460 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2461 {
2462 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2463 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2464 int rxfifosz = priv->plat->rx_fifo_size;
2465 int txfifosz = priv->plat->tx_fifo_size;
2466 u32 txmode = 0;
2467 u32 rxmode = 0;
2468 u32 chan = 0;
2469 u8 qmode = 0;
2470
2471 if (rxfifosz == 0)
2472 rxfifosz = priv->dma_cap.rx_fifo_size;
2473 if (txfifosz == 0)
2474 txfifosz = priv->dma_cap.tx_fifo_size;
2475
2476 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2477 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2478 rxfifosz /= rx_channels_count;
2479 txfifosz /= tx_channels_count;
2480 }
2481
2482 if (priv->plat->force_thresh_dma_mode) {
2483 txmode = tc;
2484 rxmode = tc;
2485 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2486 /*
2487 * In case of GMAC, SF mode can be enabled
2488 * to perform the TX COE in HW. This depends on:
2489 * 1) TX COE if actually supported
2490 * 2) There is no bugged Jumbo frame support
2491 * that needs to not insert csum in the TDES.
2492 */
2493 txmode = SF_DMA_MODE;
2494 rxmode = SF_DMA_MODE;
2495 priv->xstats.threshold = SF_DMA_MODE;
2496 } else {
2497 txmode = tc;
2498 rxmode = SF_DMA_MODE;
2499 }
2500
2501 /* configure all channels */
2502 for (chan = 0; chan < rx_channels_count; chan++) {
2503 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2504 u32 buf_size;
2505
2506 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2507
2508 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2509 rxfifosz, qmode);
2510
2511 if (rx_q->xsk_pool) {
2512 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2513 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2514 buf_size,
2515 chan);
2516 } else {
2517 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2518 priv->dma_conf.dma_buf_sz,
2519 chan);
2520 }
2521 }
2522
2523 for (chan = 0; chan < tx_channels_count; chan++) {
2524 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2525
2526 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2527 txfifosz, qmode);
2528 }
2529 }
2530
stmmac_xsk_request_timestamp(void * _priv)2531 static void stmmac_xsk_request_timestamp(void *_priv)
2532 {
2533 struct stmmac_metadata_request *meta_req = _priv;
2534
2535 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2536 *meta_req->set_ic = true;
2537 }
2538
stmmac_xsk_fill_timestamp(void * _priv)2539 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2540 {
2541 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2542 struct stmmac_priv *priv = tx_compl->priv;
2543 struct dma_desc *desc = tx_compl->desc;
2544 bool found = false;
2545 u64 ns = 0;
2546
2547 if (!priv->hwts_tx_en)
2548 return 0;
2549
2550 /* check tx tstamp status */
2551 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2552 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2553 found = true;
2554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2555 found = true;
2556 }
2557
2558 if (found) {
2559 ns -= priv->plat->cdc_error_adj;
2560 return ns_to_ktime(ns);
2561 }
2562
2563 return 0;
2564 }
2565
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2566 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2567 {
2568 struct timespec64 ts = ns_to_timespec64(launch_time);
2569 struct stmmac_metadata_request *meta_req = _priv;
2570
2571 if (meta_req->tbs & STMMAC_TBS_EN)
2572 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2573 ts.tv_nsec);
2574 }
2575
2576 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2577 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2578 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2579 .tmo_request_launch_time = stmmac_xsk_request_launch_time,
2580 };
2581
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2582 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2583 {
2584 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2585 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2586 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2587 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2588 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2589 unsigned int entry = tx_q->cur_tx;
2590 struct dma_desc *tx_desc = NULL;
2591 struct xdp_desc xdp_desc;
2592 bool work_done = true;
2593 u32 tx_set_ic_bit = 0;
2594
2595 /* Avoids TX time-out as we are sharing with slow path */
2596 txq_trans_cond_update(nq);
2597
2598 budget = min(budget, stmmac_tx_avail(priv, queue));
2599
2600 for (; budget > 0; budget--) {
2601 struct stmmac_metadata_request meta_req;
2602 struct xsk_tx_metadata *meta = NULL;
2603 dma_addr_t dma_addr;
2604 bool set_ic;
2605
2606 /* We are sharing with slow path and stop XSK TX desc submission when
2607 * available TX ring is less than threshold.
2608 */
2609 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2610 !netif_carrier_ok(priv->dev)) {
2611 work_done = false;
2612 break;
2613 }
2614
2615 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2616 break;
2617
2618 if (priv->est && priv->est->enable &&
2619 priv->est->max_sdu[queue] &&
2620 xdp_desc.len > priv->est->max_sdu[queue]) {
2621 priv->xstats.max_sdu_txq_drop[queue]++;
2622 continue;
2623 }
2624
2625 if (likely(priv->extend_desc))
2626 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2627 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2628 tx_desc = &tx_q->dma_entx[entry].basic;
2629 else
2630 tx_desc = tx_q->dma_tx + entry;
2631
2632 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2633 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2634 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2635
2636 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2637
2638 /* To return XDP buffer to XSK pool, we simple call
2639 * xsk_tx_completed(), so we don't need to fill up
2640 * 'buf' and 'xdpf'.
2641 */
2642 tx_q->tx_skbuff_dma[entry].buf = 0;
2643 tx_q->xdpf[entry] = NULL;
2644
2645 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2646 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2647 tx_q->tx_skbuff_dma[entry].last_segment = true;
2648 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2649
2650 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2651
2652 tx_q->tx_count_frames++;
2653
2654 if (!priv->tx_coal_frames[queue])
2655 set_ic = false;
2656 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2657 set_ic = true;
2658 else
2659 set_ic = false;
2660
2661 meta_req.priv = priv;
2662 meta_req.tx_desc = tx_desc;
2663 meta_req.set_ic = &set_ic;
2664 meta_req.tbs = tx_q->tbs;
2665 meta_req.edesc = &tx_q->dma_entx[entry];
2666 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2667 &meta_req);
2668 if (set_ic) {
2669 tx_q->tx_count_frames = 0;
2670 stmmac_set_tx_ic(priv, tx_desc);
2671 tx_set_ic_bit++;
2672 }
2673
2674 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2675 csum, priv->mode, true, true,
2676 xdp_desc.len);
2677
2678 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2679
2680 xsk_tx_metadata_to_compl(meta,
2681 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2682
2683 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2684 entry = tx_q->cur_tx;
2685 }
2686 u64_stats_update_begin(&txq_stats->napi_syncp);
2687 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2688 u64_stats_update_end(&txq_stats->napi_syncp);
2689
2690 if (tx_desc) {
2691 stmmac_flush_tx_descriptors(priv, queue);
2692 xsk_tx_release(pool);
2693 }
2694
2695 /* Return true if all of the 3 conditions are met
2696 * a) TX Budget is still available
2697 * b) work_done = true when XSK TX desc peek is empty (no more
2698 * pending XSK TX for transmission)
2699 */
2700 return !!budget && work_done;
2701 }
2702
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2703 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2704 {
2705 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2706 tc += 64;
2707
2708 if (priv->plat->force_thresh_dma_mode)
2709 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2710 else
2711 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2712 chan);
2713
2714 priv->xstats.threshold = tc;
2715 }
2716 }
2717
2718 /**
2719 * stmmac_tx_clean - to manage the transmission completion
2720 * @priv: driver private structure
2721 * @budget: napi budget limiting this functions packet handling
2722 * @queue: TX queue index
2723 * @pending_packets: signal to arm the TX coal timer
2724 * Description: it reclaims the transmit resources after transmission completes.
2725 * If some packets still needs to be handled, due to TX coalesce, set
2726 * pending_packets to true to make NAPI arm the TX coal timer.
2727 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2728 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2729 bool *pending_packets)
2730 {
2731 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2732 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2733 unsigned int bytes_compl = 0, pkts_compl = 0;
2734 unsigned int entry, xmits = 0, count = 0;
2735 u32 tx_packets = 0, tx_errors = 0;
2736
2737 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2738
2739 tx_q->xsk_frames_done = 0;
2740
2741 entry = tx_q->dirty_tx;
2742
2743 /* Try to clean all TX complete frame in 1 shot */
2744 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2745 struct xdp_frame *xdpf;
2746 struct sk_buff *skb;
2747 struct dma_desc *p;
2748 int status;
2749
2750 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2751 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2752 xdpf = tx_q->xdpf[entry];
2753 skb = NULL;
2754 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2755 xdpf = NULL;
2756 skb = tx_q->tx_skbuff[entry];
2757 } else {
2758 xdpf = NULL;
2759 skb = NULL;
2760 }
2761
2762 if (priv->extend_desc)
2763 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2764 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2765 p = &tx_q->dma_entx[entry].basic;
2766 else
2767 p = tx_q->dma_tx + entry;
2768
2769 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2770 /* Check if the descriptor is owned by the DMA */
2771 if (unlikely(status & tx_dma_own))
2772 break;
2773
2774 count++;
2775
2776 /* Make sure descriptor fields are read after reading
2777 * the own bit.
2778 */
2779 dma_rmb();
2780
2781 /* Just consider the last segment and ...*/
2782 if (likely(!(status & tx_not_ls))) {
2783 /* ... verify the status error condition */
2784 if (unlikely(status & tx_err)) {
2785 tx_errors++;
2786 if (unlikely(status & tx_err_bump_tc))
2787 stmmac_bump_dma_threshold(priv, queue);
2788 } else {
2789 tx_packets++;
2790 }
2791 if (skb) {
2792 stmmac_get_tx_hwtstamp(priv, p, skb);
2793 } else if (tx_q->xsk_pool &&
2794 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2795 struct stmmac_xsk_tx_complete tx_compl = {
2796 .priv = priv,
2797 .desc = p,
2798 };
2799
2800 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2801 &stmmac_xsk_tx_metadata_ops,
2802 &tx_compl);
2803 }
2804 }
2805
2806 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2807 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2808 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2809 dma_unmap_page(priv->device,
2810 tx_q->tx_skbuff_dma[entry].buf,
2811 tx_q->tx_skbuff_dma[entry].len,
2812 DMA_TO_DEVICE);
2813 else
2814 dma_unmap_single(priv->device,
2815 tx_q->tx_skbuff_dma[entry].buf,
2816 tx_q->tx_skbuff_dma[entry].len,
2817 DMA_TO_DEVICE);
2818 tx_q->tx_skbuff_dma[entry].buf = 0;
2819 tx_q->tx_skbuff_dma[entry].len = 0;
2820 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2821 }
2822
2823 stmmac_clean_desc3(priv, tx_q, p);
2824
2825 tx_q->tx_skbuff_dma[entry].last_segment = false;
2826 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2827
2828 if (xdpf &&
2829 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2830 xdp_return_frame_rx_napi(xdpf);
2831 tx_q->xdpf[entry] = NULL;
2832 }
2833
2834 if (xdpf &&
2835 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2836 xdp_return_frame(xdpf);
2837 tx_q->xdpf[entry] = NULL;
2838 }
2839
2840 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2841 tx_q->xsk_frames_done++;
2842
2843 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2844 if (likely(skb)) {
2845 pkts_compl++;
2846 bytes_compl += skb->len;
2847 dev_consume_skb_any(skb);
2848 tx_q->tx_skbuff[entry] = NULL;
2849 }
2850 }
2851
2852 stmmac_release_tx_desc(priv, p, priv->mode);
2853
2854 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2855 }
2856 tx_q->dirty_tx = entry;
2857
2858 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2859 pkts_compl, bytes_compl);
2860
2861 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2862 queue))) &&
2863 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2864
2865 netif_dbg(priv, tx_done, priv->dev,
2866 "%s: restart transmit\n", __func__);
2867 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2868 }
2869
2870 if (tx_q->xsk_pool) {
2871 bool work_done;
2872
2873 if (tx_q->xsk_frames_done)
2874 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2875
2876 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2877 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2878
2879 /* For XSK TX, we try to send as many as possible.
2880 * If XSK work done (XSK TX desc empty and budget still
2881 * available), return "budget - 1" to reenable TX IRQ.
2882 * Else, return "budget" to make NAPI continue polling.
2883 */
2884 work_done = stmmac_xdp_xmit_zc(priv, queue,
2885 STMMAC_XSK_TX_BUDGET_MAX);
2886 if (work_done)
2887 xmits = budget - 1;
2888 else
2889 xmits = budget;
2890 }
2891
2892 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2893 stmmac_restart_sw_lpi_timer(priv);
2894
2895 /* We still have pending packets, let's call for a new scheduling */
2896 if (tx_q->dirty_tx != tx_q->cur_tx)
2897 *pending_packets = true;
2898
2899 u64_stats_update_begin(&txq_stats->napi_syncp);
2900 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2901 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2902 u64_stats_inc(&txq_stats->napi.tx_clean);
2903 u64_stats_update_end(&txq_stats->napi_syncp);
2904
2905 priv->xstats.tx_errors += tx_errors;
2906
2907 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2908
2909 /* Combine decisions from TX clean and XSK TX */
2910 return max(count, xmits);
2911 }
2912
2913 /**
2914 * stmmac_tx_err - to manage the tx error
2915 * @priv: driver private structure
2916 * @chan: channel index
2917 * Description: it cleans the descriptors and restarts the transmission
2918 * in case of transmission errors.
2919 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2920 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2921 {
2922 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2923
2924 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2925
2926 stmmac_stop_tx_dma(priv, chan);
2927 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2928 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2929 stmmac_reset_tx_queue(priv, chan);
2930 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2931 tx_q->dma_tx_phy, chan);
2932 stmmac_start_tx_dma(priv, chan);
2933
2934 priv->xstats.tx_errors++;
2935 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2936 }
2937
2938 /**
2939 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2940 * @priv: driver private structure
2941 * @txmode: TX operating mode
2942 * @rxmode: RX operating mode
2943 * @chan: channel index
2944 * Description: it is used for configuring of the DMA operation mode in
2945 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2946 * mode.
2947 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2948 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2949 u32 rxmode, u32 chan)
2950 {
2951 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2952 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2953 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2954 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2955 int rxfifosz = priv->plat->rx_fifo_size;
2956 int txfifosz = priv->plat->tx_fifo_size;
2957
2958 if (rxfifosz == 0)
2959 rxfifosz = priv->dma_cap.rx_fifo_size;
2960 if (txfifosz == 0)
2961 txfifosz = priv->dma_cap.tx_fifo_size;
2962
2963 /* Adjust for real per queue fifo size */
2964 rxfifosz /= rx_channels_count;
2965 txfifosz /= tx_channels_count;
2966
2967 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2968 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2969 }
2970
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2971 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2972 {
2973 int ret;
2974
2975 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2976 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2977 if (ret && (ret != -EINVAL)) {
2978 stmmac_global_err(priv);
2979 return true;
2980 }
2981
2982 return false;
2983 }
2984
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2985 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2986 {
2987 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2988 &priv->xstats, chan, dir);
2989 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2990 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2991 struct stmmac_channel *ch = &priv->channel[chan];
2992 struct napi_struct *rx_napi;
2993 struct napi_struct *tx_napi;
2994 unsigned long flags;
2995
2996 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2997 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2998
2999 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3000 if (napi_schedule_prep(rx_napi)) {
3001 spin_lock_irqsave(&ch->lock, flags);
3002 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3003 spin_unlock_irqrestore(&ch->lock, flags);
3004 __napi_schedule(rx_napi);
3005 }
3006 }
3007
3008 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3009 if (napi_schedule_prep(tx_napi)) {
3010 spin_lock_irqsave(&ch->lock, flags);
3011 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3012 spin_unlock_irqrestore(&ch->lock, flags);
3013 __napi_schedule(tx_napi);
3014 }
3015 }
3016
3017 return status;
3018 }
3019
3020 /**
3021 * stmmac_dma_interrupt - DMA ISR
3022 * @priv: driver private structure
3023 * Description: this is the DMA ISR. It is called by the main ISR.
3024 * It calls the dwmac dma routine and schedule poll method in case of some
3025 * work can be done.
3026 */
stmmac_dma_interrupt(struct stmmac_priv * priv)3027 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3028 {
3029 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3030 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3031 u32 channels_to_check = tx_channel_count > rx_channel_count ?
3032 tx_channel_count : rx_channel_count;
3033 u32 chan;
3034 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3035
3036 /* Make sure we never check beyond our status buffer. */
3037 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3038 channels_to_check = ARRAY_SIZE(status);
3039
3040 for (chan = 0; chan < channels_to_check; chan++)
3041 status[chan] = stmmac_napi_check(priv, chan,
3042 DMA_DIR_RXTX);
3043
3044 for (chan = 0; chan < tx_channel_count; chan++) {
3045 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3046 /* Try to bump up the dma threshold on this failure */
3047 stmmac_bump_dma_threshold(priv, chan);
3048 } else if (unlikely(status[chan] == tx_hard_error)) {
3049 stmmac_tx_err(priv, chan);
3050 }
3051 }
3052 }
3053
3054 /**
3055 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3056 * @priv: driver private structure
3057 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3058 */
stmmac_mmc_setup(struct stmmac_priv * priv)3059 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3060 {
3061 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3062 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3063
3064 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3065
3066 if (priv->dma_cap.rmon) {
3067 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3068 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3069 } else
3070 netdev_info(priv->dev, "No MAC Management Counters available\n");
3071 }
3072
3073 /**
3074 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3075 * @priv: driver private structure
3076 * Description:
3077 * new GMAC chip generations have a new register to indicate the
3078 * presence of the optional feature/functions.
3079 * This can be also used to override the value passed through the
3080 * platform and necessary for old MAC10/100 and GMAC chips.
3081 */
stmmac_get_hw_features(struct stmmac_priv * priv)3082 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3083 {
3084 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3085 }
3086
3087 /**
3088 * stmmac_check_ether_addr - check if the MAC addr is valid
3089 * @priv: driver private structure
3090 * Description:
3091 * it is to verify if the MAC address is valid, in case of failures it
3092 * generates a random MAC address
3093 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3094 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3095 {
3096 u8 addr[ETH_ALEN];
3097
3098 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3099 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3100 if (is_valid_ether_addr(addr))
3101 eth_hw_addr_set(priv->dev, addr);
3102 else
3103 eth_hw_addr_random(priv->dev);
3104 dev_info(priv->device, "device MAC address %pM\n",
3105 priv->dev->dev_addr);
3106 }
3107 }
3108
3109 /**
3110 * stmmac_init_dma_engine - DMA init.
3111 * @priv: driver private structure
3112 * Description:
3113 * It inits the DMA invoking the specific MAC/GMAC callback.
3114 * Some DMA parameters can be passed from the platform;
3115 * in case of these are not passed a default is kept for the MAC or GMAC.
3116 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3117 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3118 {
3119 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3120 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3121 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3122 struct stmmac_rx_queue *rx_q;
3123 struct stmmac_tx_queue *tx_q;
3124 u32 chan = 0;
3125 int ret = 0;
3126
3127 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3128 netdev_err(priv->dev, "Invalid DMA configuration\n");
3129 return -EINVAL;
3130 }
3131
3132 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3133 priv->plat->dma_cfg->atds = 1;
3134
3135 ret = stmmac_reset(priv, priv->ioaddr);
3136 if (ret) {
3137 netdev_err(priv->dev, "Failed to reset the dma\n");
3138 return ret;
3139 }
3140
3141 /* DMA Configuration */
3142 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3143
3144 if (priv->plat->axi)
3145 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3146
3147 /* DMA CSR Channel configuration */
3148 for (chan = 0; chan < dma_csr_ch; chan++) {
3149 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3150 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3151 }
3152
3153 /* DMA RX Channel Configuration */
3154 for (chan = 0; chan < rx_channels_count; chan++) {
3155 rx_q = &priv->dma_conf.rx_queue[chan];
3156
3157 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3158 rx_q->dma_rx_phy, chan);
3159
3160 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3161 (rx_q->buf_alloc_num *
3162 sizeof(struct dma_desc));
3163 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3164 rx_q->rx_tail_addr, chan);
3165 }
3166
3167 /* DMA TX Channel Configuration */
3168 for (chan = 0; chan < tx_channels_count; chan++) {
3169 tx_q = &priv->dma_conf.tx_queue[chan];
3170
3171 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3172 tx_q->dma_tx_phy, chan);
3173
3174 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3175 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3176 tx_q->tx_tail_addr, chan);
3177 }
3178
3179 return ret;
3180 }
3181
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3182 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3183 {
3184 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3185 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3186 struct stmmac_channel *ch;
3187 struct napi_struct *napi;
3188
3189 if (!tx_coal_timer)
3190 return;
3191
3192 ch = &priv->channel[tx_q->queue_index];
3193 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3194
3195 /* Arm timer only if napi is not already scheduled.
3196 * Try to cancel any timer if napi is scheduled, timer will be armed
3197 * again in the next scheduled napi.
3198 */
3199 if (unlikely(!napi_is_scheduled(napi)))
3200 hrtimer_start(&tx_q->txtimer,
3201 STMMAC_COAL_TIMER(tx_coal_timer),
3202 HRTIMER_MODE_REL);
3203 else
3204 hrtimer_try_to_cancel(&tx_q->txtimer);
3205 }
3206
3207 /**
3208 * stmmac_tx_timer - mitigation sw timer for tx.
3209 * @t: data pointer
3210 * Description:
3211 * This is the timer handler to directly invoke the stmmac_tx_clean.
3212 */
stmmac_tx_timer(struct hrtimer * t)3213 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3214 {
3215 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3216 struct stmmac_priv *priv = tx_q->priv_data;
3217 struct stmmac_channel *ch;
3218 struct napi_struct *napi;
3219
3220 ch = &priv->channel[tx_q->queue_index];
3221 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3222
3223 if (likely(napi_schedule_prep(napi))) {
3224 unsigned long flags;
3225
3226 spin_lock_irqsave(&ch->lock, flags);
3227 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3228 spin_unlock_irqrestore(&ch->lock, flags);
3229 __napi_schedule(napi);
3230 }
3231
3232 return HRTIMER_NORESTART;
3233 }
3234
3235 /**
3236 * stmmac_init_coalesce - init mitigation options.
3237 * @priv: driver private structure
3238 * Description:
3239 * This inits the coalesce parameters: i.e. timer rate,
3240 * timer handler and default threshold used for enabling the
3241 * interrupt on completion bit.
3242 */
stmmac_init_coalesce(struct stmmac_priv * priv)3243 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3244 {
3245 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3246 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3247 u32 chan;
3248
3249 for (chan = 0; chan < tx_channel_count; chan++) {
3250 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3251
3252 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3253 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3254
3255 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3256 }
3257
3258 for (chan = 0; chan < rx_channel_count; chan++)
3259 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3260 }
3261
stmmac_set_rings_length(struct stmmac_priv * priv)3262 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3263 {
3264 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3265 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3266 u32 chan;
3267
3268 /* set TX ring length */
3269 for (chan = 0; chan < tx_channels_count; chan++)
3270 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3271 (priv->dma_conf.dma_tx_size - 1), chan);
3272
3273 /* set RX ring length */
3274 for (chan = 0; chan < rx_channels_count; chan++)
3275 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3276 (priv->dma_conf.dma_rx_size - 1), chan);
3277 }
3278
3279 /**
3280 * stmmac_set_tx_queue_weight - Set TX queue weight
3281 * @priv: driver private structure
3282 * Description: It is used for setting TX queues weight
3283 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3284 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3285 {
3286 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3287 u32 weight;
3288 u32 queue;
3289
3290 for (queue = 0; queue < tx_queues_count; queue++) {
3291 weight = priv->plat->tx_queues_cfg[queue].weight;
3292 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3293 }
3294 }
3295
3296 /**
3297 * stmmac_configure_cbs - Configure CBS in TX queue
3298 * @priv: driver private structure
3299 * Description: It is used for configuring CBS in AVB TX queues
3300 */
stmmac_configure_cbs(struct stmmac_priv * priv)3301 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3302 {
3303 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3304 u32 mode_to_use;
3305 u32 queue;
3306
3307 /* queue 0 is reserved for legacy traffic */
3308 for (queue = 1; queue < tx_queues_count; queue++) {
3309 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3310 if (mode_to_use == MTL_QUEUE_DCB)
3311 continue;
3312
3313 stmmac_config_cbs(priv, priv->hw,
3314 priv->plat->tx_queues_cfg[queue].send_slope,
3315 priv->plat->tx_queues_cfg[queue].idle_slope,
3316 priv->plat->tx_queues_cfg[queue].high_credit,
3317 priv->plat->tx_queues_cfg[queue].low_credit,
3318 queue);
3319 }
3320 }
3321
3322 /**
3323 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3324 * @priv: driver private structure
3325 * Description: It is used for mapping RX queues to RX dma channels
3326 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3327 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3328 {
3329 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3330 u32 queue;
3331 u32 chan;
3332
3333 for (queue = 0; queue < rx_queues_count; queue++) {
3334 chan = priv->plat->rx_queues_cfg[queue].chan;
3335 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3336 }
3337 }
3338
3339 /**
3340 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3341 * @priv: driver private structure
3342 * Description: It is used for configuring the RX Queue Priority
3343 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3344 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3345 {
3346 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3347 u32 queue;
3348 u32 prio;
3349
3350 for (queue = 0; queue < rx_queues_count; queue++) {
3351 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3352 continue;
3353
3354 prio = priv->plat->rx_queues_cfg[queue].prio;
3355 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3356 }
3357 }
3358
3359 /**
3360 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3361 * @priv: driver private structure
3362 * Description: It is used for configuring the TX Queue Priority
3363 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3364 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3365 {
3366 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3367 u32 queue;
3368 u32 prio;
3369
3370 for (queue = 0; queue < tx_queues_count; queue++) {
3371 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3372 continue;
3373
3374 prio = priv->plat->tx_queues_cfg[queue].prio;
3375 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3376 }
3377 }
3378
3379 /**
3380 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3381 * @priv: driver private structure
3382 * Description: It is used for configuring the RX queue routing
3383 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3384 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3385 {
3386 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3387 u32 queue;
3388 u8 packet;
3389
3390 for (queue = 0; queue < rx_queues_count; queue++) {
3391 /* no specific packet type routing specified for the queue */
3392 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3393 continue;
3394
3395 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3396 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3397 }
3398 }
3399
stmmac_mac_config_rss(struct stmmac_priv * priv)3400 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3401 {
3402 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3403 priv->rss.enable = false;
3404 return;
3405 }
3406
3407 if (priv->dev->features & NETIF_F_RXHASH)
3408 priv->rss.enable = true;
3409 else
3410 priv->rss.enable = false;
3411
3412 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3413 priv->plat->rx_queues_to_use);
3414 }
3415
3416 /**
3417 * stmmac_mtl_configuration - Configure MTL
3418 * @priv: driver private structure
3419 * Description: It is used for configurring MTL
3420 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3421 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3422 {
3423 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3424 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3425
3426 if (tx_queues_count > 1)
3427 stmmac_set_tx_queue_weight(priv);
3428
3429 /* Configure MTL RX algorithms */
3430 if (rx_queues_count > 1)
3431 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3432 priv->plat->rx_sched_algorithm);
3433
3434 /* Configure MTL TX algorithms */
3435 if (tx_queues_count > 1)
3436 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3437 priv->plat->tx_sched_algorithm);
3438
3439 /* Configure CBS in AVB TX queues */
3440 if (tx_queues_count > 1)
3441 stmmac_configure_cbs(priv);
3442
3443 /* Map RX MTL to DMA channels */
3444 stmmac_rx_queue_dma_chan_map(priv);
3445
3446 /* Enable MAC RX Queues */
3447 stmmac_mac_enable_rx_queues(priv);
3448
3449 /* Set RX priorities */
3450 if (rx_queues_count > 1)
3451 stmmac_mac_config_rx_queues_prio(priv);
3452
3453 /* Set TX priorities */
3454 if (tx_queues_count > 1)
3455 stmmac_mac_config_tx_queues_prio(priv);
3456
3457 /* Set RX routing */
3458 if (rx_queues_count > 1)
3459 stmmac_mac_config_rx_queues_routing(priv);
3460
3461 /* Receive Side Scaling */
3462 if (rx_queues_count > 1)
3463 stmmac_mac_config_rss(priv);
3464 }
3465
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3466 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3467 {
3468 if (priv->dma_cap.asp) {
3469 netdev_info(priv->dev, "Enabling Safety Features\n");
3470 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3471 priv->plat->safety_feat_cfg);
3472 } else {
3473 netdev_info(priv->dev, "No Safety Features support found\n");
3474 }
3475 }
3476
3477 /**
3478 * stmmac_hw_setup - setup mac in a usable state.
3479 * @dev : pointer to the device structure.
3480 * @ptp_register: register PTP if set
3481 * Description:
3482 * this is the main function to setup the HW in a usable state because the
3483 * dma engine is reset, the core registers are configured (e.g. AXI,
3484 * Checksum features, timers). The DMA is ready to start receiving and
3485 * transmitting.
3486 * Return value:
3487 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3488 * file on failure.
3489 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3490 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3491 {
3492 struct stmmac_priv *priv = netdev_priv(dev);
3493 u32 rx_cnt = priv->plat->rx_queues_to_use;
3494 u32 tx_cnt = priv->plat->tx_queues_to_use;
3495 bool sph_en;
3496 u32 chan;
3497 int ret;
3498
3499 /* Make sure RX clock is enabled */
3500 if (priv->hw->phylink_pcs)
3501 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3502
3503 /* Note that clk_rx_i must be running for reset to complete. This
3504 * clock may also be required when setting the MAC address.
3505 *
3506 * Block the receive clock stop for LPI mode at the PHY in case
3507 * the link is established with EEE mode active.
3508 */
3509 phylink_rx_clk_stop_block(priv->phylink);
3510
3511 /* DMA initialization and SW reset */
3512 ret = stmmac_init_dma_engine(priv);
3513 if (ret < 0) {
3514 phylink_rx_clk_stop_unblock(priv->phylink);
3515 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3516 __func__);
3517 return ret;
3518 }
3519
3520 /* Copy the MAC addr into the HW */
3521 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3522 phylink_rx_clk_stop_unblock(priv->phylink);
3523
3524 /* PS and related bits will be programmed according to the speed */
3525 if (priv->hw->pcs) {
3526 int speed = priv->plat->mac_port_sel_speed;
3527
3528 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3529 (speed == SPEED_1000)) {
3530 priv->hw->ps = speed;
3531 } else {
3532 dev_warn(priv->device, "invalid port speed\n");
3533 priv->hw->ps = 0;
3534 }
3535 }
3536
3537 /* Initialize the MAC Core */
3538 stmmac_core_init(priv, priv->hw, dev);
3539
3540 /* Initialize MTL*/
3541 stmmac_mtl_configuration(priv);
3542
3543 /* Initialize Safety Features */
3544 stmmac_safety_feat_configuration(priv);
3545
3546 ret = stmmac_rx_ipc(priv, priv->hw);
3547 if (!ret) {
3548 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3549 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3550 priv->hw->rx_csum = 0;
3551 }
3552
3553 /* Enable the MAC Rx/Tx */
3554 stmmac_mac_set(priv, priv->ioaddr, true);
3555
3556 /* Set the HW DMA mode and the COE */
3557 stmmac_dma_operation_mode(priv);
3558
3559 stmmac_mmc_setup(priv);
3560
3561 if (ptp_register) {
3562 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3563 if (ret < 0)
3564 netdev_warn(priv->dev,
3565 "failed to enable PTP reference clock: %pe\n",
3566 ERR_PTR(ret));
3567 }
3568
3569 ret = stmmac_init_ptp(priv);
3570 if (ret == -EOPNOTSUPP)
3571 netdev_info(priv->dev, "PTP not supported by HW\n");
3572 else if (ret)
3573 netdev_warn(priv->dev, "PTP init failed\n");
3574 else if (ptp_register)
3575 stmmac_ptp_register(priv);
3576
3577 if (priv->use_riwt) {
3578 u32 queue;
3579
3580 for (queue = 0; queue < rx_cnt; queue++) {
3581 if (!priv->rx_riwt[queue])
3582 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3583
3584 stmmac_rx_watchdog(priv, priv->ioaddr,
3585 priv->rx_riwt[queue], queue);
3586 }
3587 }
3588
3589 if (priv->hw->pcs)
3590 stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
3591
3592 /* set TX and RX rings length */
3593 stmmac_set_rings_length(priv);
3594
3595 /* Enable TSO */
3596 if (priv->tso) {
3597 for (chan = 0; chan < tx_cnt; chan++) {
3598 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3599
3600 /* TSO and TBS cannot co-exist */
3601 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3602 continue;
3603
3604 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3605 }
3606 }
3607
3608 /* Enable Split Header */
3609 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3610 for (chan = 0; chan < rx_cnt; chan++)
3611 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3612
3613
3614 /* VLAN Tag Insertion */
3615 if (priv->dma_cap.vlins)
3616 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3617
3618 /* TBS */
3619 for (chan = 0; chan < tx_cnt; chan++) {
3620 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3621 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3622
3623 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3624 }
3625
3626 /* Configure real RX and TX queues */
3627 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3628 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3629
3630 /* Start the ball rolling... */
3631 stmmac_start_all_dma(priv);
3632
3633 phylink_rx_clk_stop_block(priv->phylink);
3634 stmmac_set_hw_vlan_mode(priv, priv->hw);
3635 phylink_rx_clk_stop_unblock(priv->phylink);
3636
3637 return 0;
3638 }
3639
stmmac_hw_teardown(struct net_device * dev)3640 static void stmmac_hw_teardown(struct net_device *dev)
3641 {
3642 struct stmmac_priv *priv = netdev_priv(dev);
3643
3644 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3645 }
3646
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3647 static void stmmac_free_irq(struct net_device *dev,
3648 enum request_irq_err irq_err, int irq_idx)
3649 {
3650 struct stmmac_priv *priv = netdev_priv(dev);
3651 int j;
3652
3653 switch (irq_err) {
3654 case REQ_IRQ_ERR_ALL:
3655 irq_idx = priv->plat->tx_queues_to_use;
3656 fallthrough;
3657 case REQ_IRQ_ERR_TX:
3658 for (j = irq_idx - 1; j >= 0; j--) {
3659 if (priv->tx_irq[j] > 0) {
3660 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3661 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3662 }
3663 }
3664 irq_idx = priv->plat->rx_queues_to_use;
3665 fallthrough;
3666 case REQ_IRQ_ERR_RX:
3667 for (j = irq_idx - 1; j >= 0; j--) {
3668 if (priv->rx_irq[j] > 0) {
3669 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3670 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3671 }
3672 }
3673
3674 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3675 free_irq(priv->sfty_ue_irq, dev);
3676 fallthrough;
3677 case REQ_IRQ_ERR_SFTY_UE:
3678 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3679 free_irq(priv->sfty_ce_irq, dev);
3680 fallthrough;
3681 case REQ_IRQ_ERR_SFTY_CE:
3682 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3683 free_irq(priv->lpi_irq, dev);
3684 fallthrough;
3685 case REQ_IRQ_ERR_LPI:
3686 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3687 free_irq(priv->wol_irq, dev);
3688 fallthrough;
3689 case REQ_IRQ_ERR_SFTY:
3690 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3691 free_irq(priv->sfty_irq, dev);
3692 fallthrough;
3693 case REQ_IRQ_ERR_WOL:
3694 free_irq(dev->irq, dev);
3695 fallthrough;
3696 case REQ_IRQ_ERR_MAC:
3697 case REQ_IRQ_ERR_NO:
3698 /* If MAC IRQ request error, no more IRQ to free */
3699 break;
3700 }
3701 }
3702
stmmac_request_irq_multi_msi(struct net_device * dev)3703 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3704 {
3705 struct stmmac_priv *priv = netdev_priv(dev);
3706 enum request_irq_err irq_err;
3707 int irq_idx = 0;
3708 char *int_name;
3709 int ret;
3710 int i;
3711
3712 /* For common interrupt */
3713 int_name = priv->int_name_mac;
3714 sprintf(int_name, "%s:%s", dev->name, "mac");
3715 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3716 0, int_name, dev);
3717 if (unlikely(ret < 0)) {
3718 netdev_err(priv->dev,
3719 "%s: alloc mac MSI %d (error: %d)\n",
3720 __func__, dev->irq, ret);
3721 irq_err = REQ_IRQ_ERR_MAC;
3722 goto irq_error;
3723 }
3724
3725 /* Request the Wake IRQ in case of another line
3726 * is used for WoL
3727 */
3728 priv->wol_irq_disabled = true;
3729 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3730 int_name = priv->int_name_wol;
3731 sprintf(int_name, "%s:%s", dev->name, "wol");
3732 ret = request_irq(priv->wol_irq,
3733 stmmac_mac_interrupt,
3734 0, int_name, dev);
3735 if (unlikely(ret < 0)) {
3736 netdev_err(priv->dev,
3737 "%s: alloc wol MSI %d (error: %d)\n",
3738 __func__, priv->wol_irq, ret);
3739 irq_err = REQ_IRQ_ERR_WOL;
3740 goto irq_error;
3741 }
3742 }
3743
3744 /* Request the LPI IRQ in case of another line
3745 * is used for LPI
3746 */
3747 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3748 int_name = priv->int_name_lpi;
3749 sprintf(int_name, "%s:%s", dev->name, "lpi");
3750 ret = request_irq(priv->lpi_irq,
3751 stmmac_mac_interrupt,
3752 0, int_name, dev);
3753 if (unlikely(ret < 0)) {
3754 netdev_err(priv->dev,
3755 "%s: alloc lpi MSI %d (error: %d)\n",
3756 __func__, priv->lpi_irq, ret);
3757 irq_err = REQ_IRQ_ERR_LPI;
3758 goto irq_error;
3759 }
3760 }
3761
3762 /* Request the common Safety Feature Correctible/Uncorrectible
3763 * Error line in case of another line is used
3764 */
3765 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3766 int_name = priv->int_name_sfty;
3767 sprintf(int_name, "%s:%s", dev->name, "safety");
3768 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3769 0, int_name, dev);
3770 if (unlikely(ret < 0)) {
3771 netdev_err(priv->dev,
3772 "%s: alloc sfty MSI %d (error: %d)\n",
3773 __func__, priv->sfty_irq, ret);
3774 irq_err = REQ_IRQ_ERR_SFTY;
3775 goto irq_error;
3776 }
3777 }
3778
3779 /* Request the Safety Feature Correctible Error line in
3780 * case of another line is used
3781 */
3782 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3783 int_name = priv->int_name_sfty_ce;
3784 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3785 ret = request_irq(priv->sfty_ce_irq,
3786 stmmac_safety_interrupt,
3787 0, int_name, dev);
3788 if (unlikely(ret < 0)) {
3789 netdev_err(priv->dev,
3790 "%s: alloc sfty ce MSI %d (error: %d)\n",
3791 __func__, priv->sfty_ce_irq, ret);
3792 irq_err = REQ_IRQ_ERR_SFTY_CE;
3793 goto irq_error;
3794 }
3795 }
3796
3797 /* Request the Safety Feature Uncorrectible Error line in
3798 * case of another line is used
3799 */
3800 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3801 int_name = priv->int_name_sfty_ue;
3802 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3803 ret = request_irq(priv->sfty_ue_irq,
3804 stmmac_safety_interrupt,
3805 0, int_name, dev);
3806 if (unlikely(ret < 0)) {
3807 netdev_err(priv->dev,
3808 "%s: alloc sfty ue MSI %d (error: %d)\n",
3809 __func__, priv->sfty_ue_irq, ret);
3810 irq_err = REQ_IRQ_ERR_SFTY_UE;
3811 goto irq_error;
3812 }
3813 }
3814
3815 /* Request Rx MSI irq */
3816 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3817 if (i >= MTL_MAX_RX_QUEUES)
3818 break;
3819 if (priv->rx_irq[i] == 0)
3820 continue;
3821
3822 int_name = priv->int_name_rx_irq[i];
3823 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3824 ret = request_irq(priv->rx_irq[i],
3825 stmmac_msi_intr_rx,
3826 0, int_name, &priv->dma_conf.rx_queue[i]);
3827 if (unlikely(ret < 0)) {
3828 netdev_err(priv->dev,
3829 "%s: alloc rx-%d MSI %d (error: %d)\n",
3830 __func__, i, priv->rx_irq[i], ret);
3831 irq_err = REQ_IRQ_ERR_RX;
3832 irq_idx = i;
3833 goto irq_error;
3834 }
3835 irq_set_affinity_hint(priv->rx_irq[i],
3836 cpumask_of(i % num_online_cpus()));
3837 }
3838
3839 /* Request Tx MSI irq */
3840 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3841 if (i >= MTL_MAX_TX_QUEUES)
3842 break;
3843 if (priv->tx_irq[i] == 0)
3844 continue;
3845
3846 int_name = priv->int_name_tx_irq[i];
3847 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3848 ret = request_irq(priv->tx_irq[i],
3849 stmmac_msi_intr_tx,
3850 0, int_name, &priv->dma_conf.tx_queue[i]);
3851 if (unlikely(ret < 0)) {
3852 netdev_err(priv->dev,
3853 "%s: alloc tx-%d MSI %d (error: %d)\n",
3854 __func__, i, priv->tx_irq[i], ret);
3855 irq_err = REQ_IRQ_ERR_TX;
3856 irq_idx = i;
3857 goto irq_error;
3858 }
3859 irq_set_affinity_hint(priv->tx_irq[i],
3860 cpumask_of(i % num_online_cpus()));
3861 }
3862
3863 return 0;
3864
3865 irq_error:
3866 stmmac_free_irq(dev, irq_err, irq_idx);
3867 return ret;
3868 }
3869
stmmac_request_irq_single(struct net_device * dev)3870 static int stmmac_request_irq_single(struct net_device *dev)
3871 {
3872 struct stmmac_priv *priv = netdev_priv(dev);
3873 enum request_irq_err irq_err;
3874 int ret;
3875
3876 ret = request_irq(dev->irq, stmmac_interrupt,
3877 IRQF_SHARED, dev->name, dev);
3878 if (unlikely(ret < 0)) {
3879 netdev_err(priv->dev,
3880 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3881 __func__, dev->irq, ret);
3882 irq_err = REQ_IRQ_ERR_MAC;
3883 goto irq_error;
3884 }
3885
3886 /* Request the Wake IRQ in case of another line
3887 * is used for WoL
3888 */
3889 priv->wol_irq_disabled = true;
3890 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3891 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3892 IRQF_SHARED, dev->name, dev);
3893 if (unlikely(ret < 0)) {
3894 netdev_err(priv->dev,
3895 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3896 __func__, priv->wol_irq, ret);
3897 irq_err = REQ_IRQ_ERR_WOL;
3898 goto irq_error;
3899 }
3900 }
3901
3902 /* Request the IRQ lines */
3903 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3904 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3905 IRQF_SHARED, dev->name, dev);
3906 if (unlikely(ret < 0)) {
3907 netdev_err(priv->dev,
3908 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3909 __func__, priv->lpi_irq, ret);
3910 irq_err = REQ_IRQ_ERR_LPI;
3911 goto irq_error;
3912 }
3913 }
3914
3915 /* Request the common Safety Feature Correctible/Uncorrectible
3916 * Error line in case of another line is used
3917 */
3918 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3919 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3920 IRQF_SHARED, dev->name, dev);
3921 if (unlikely(ret < 0)) {
3922 netdev_err(priv->dev,
3923 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3924 __func__, priv->sfty_irq, ret);
3925 irq_err = REQ_IRQ_ERR_SFTY;
3926 goto irq_error;
3927 }
3928 }
3929
3930 return 0;
3931
3932 irq_error:
3933 stmmac_free_irq(dev, irq_err, 0);
3934 return ret;
3935 }
3936
stmmac_request_irq(struct net_device * dev)3937 static int stmmac_request_irq(struct net_device *dev)
3938 {
3939 struct stmmac_priv *priv = netdev_priv(dev);
3940 int ret;
3941
3942 /* Request the IRQ lines */
3943 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3944 ret = stmmac_request_irq_multi_msi(dev);
3945 else
3946 ret = stmmac_request_irq_single(dev);
3947
3948 return ret;
3949 }
3950
3951 /**
3952 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3953 * @priv: driver private structure
3954 * @mtu: MTU to setup the dma queue and buf with
3955 * Description: Allocate and generate a dma_conf based on the provided MTU.
3956 * Allocate the Tx/Rx DMA queue and init them.
3957 * Return value:
3958 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3959 */
3960 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3961 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3962 {
3963 struct stmmac_dma_conf *dma_conf;
3964 int chan, bfsize, ret;
3965
3966 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3967 if (!dma_conf) {
3968 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3969 __func__);
3970 return ERR_PTR(-ENOMEM);
3971 }
3972
3973 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3974 if (bfsize < 0)
3975 bfsize = 0;
3976
3977 if (bfsize < BUF_SIZE_16KiB)
3978 bfsize = stmmac_set_bfsize(mtu, 0);
3979
3980 dma_conf->dma_buf_sz = bfsize;
3981 /* Chose the tx/rx size from the already defined one in the
3982 * priv struct. (if defined)
3983 */
3984 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3985 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3986
3987 if (!dma_conf->dma_tx_size)
3988 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3989 if (!dma_conf->dma_rx_size)
3990 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3991
3992 /* Earlier check for TBS */
3993 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3994 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3995 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3996
3997 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3998 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3999 }
4000
4001 ret = alloc_dma_desc_resources(priv, dma_conf);
4002 if (ret < 0) {
4003 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4004 __func__);
4005 goto alloc_error;
4006 }
4007
4008 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4009 if (ret < 0) {
4010 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4011 __func__);
4012 goto init_error;
4013 }
4014
4015 return dma_conf;
4016
4017 init_error:
4018 free_dma_desc_resources(priv, dma_conf);
4019 alloc_error:
4020 kfree(dma_conf);
4021 return ERR_PTR(ret);
4022 }
4023
4024 /**
4025 * __stmmac_open - open entry point of the driver
4026 * @dev : pointer to the device structure.
4027 * @dma_conf : structure to take the dma data
4028 * Description:
4029 * This function is the open entry point of the driver.
4030 * Return value:
4031 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4032 * file on failure.
4033 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)4034 static int __stmmac_open(struct net_device *dev,
4035 struct stmmac_dma_conf *dma_conf)
4036 {
4037 struct stmmac_priv *priv = netdev_priv(dev);
4038 int mode = priv->plat->phy_interface;
4039 u32 chan;
4040 int ret;
4041
4042 /* Initialise the tx lpi timer, converting from msec to usec */
4043 if (!priv->tx_lpi_timer)
4044 priv->tx_lpi_timer = eee_timer * 1000;
4045
4046 ret = pm_runtime_resume_and_get(priv->device);
4047 if (ret < 0)
4048 return ret;
4049
4050 if ((!priv->hw->xpcs ||
4051 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
4052 ret = stmmac_init_phy(dev);
4053 if (ret) {
4054 netdev_err(priv->dev,
4055 "%s: Cannot attach to PHY (error: %d)\n",
4056 __func__, ret);
4057 goto init_phy_error;
4058 }
4059 }
4060
4061 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4062 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4063 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4064 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4065
4066 stmmac_reset_queues_param(priv);
4067
4068 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4069 priv->plat->serdes_powerup) {
4070 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4071 if (ret < 0) {
4072 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4073 __func__);
4074 goto init_error;
4075 }
4076 }
4077
4078 ret = stmmac_hw_setup(dev, true);
4079 if (ret < 0) {
4080 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4081 goto init_error;
4082 }
4083
4084 stmmac_init_coalesce(priv);
4085
4086 phylink_start(priv->phylink);
4087 /* We may have called phylink_speed_down before */
4088 phylink_speed_up(priv->phylink);
4089
4090 ret = stmmac_request_irq(dev);
4091 if (ret)
4092 goto irq_error;
4093
4094 stmmac_enable_all_queues(priv);
4095 netif_tx_start_all_queues(priv->dev);
4096 stmmac_enable_all_dma_irq(priv);
4097
4098 return 0;
4099
4100 irq_error:
4101 phylink_stop(priv->phylink);
4102
4103 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4104 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4105
4106 stmmac_hw_teardown(dev);
4107 init_error:
4108 phylink_disconnect_phy(priv->phylink);
4109 init_phy_error:
4110 pm_runtime_put(priv->device);
4111 return ret;
4112 }
4113
stmmac_open(struct net_device * dev)4114 static int stmmac_open(struct net_device *dev)
4115 {
4116 struct stmmac_priv *priv = netdev_priv(dev);
4117 struct stmmac_dma_conf *dma_conf;
4118 int ret;
4119
4120 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4121 if (IS_ERR(dma_conf))
4122 return PTR_ERR(dma_conf);
4123
4124 ret = __stmmac_open(dev, dma_conf);
4125 if (ret)
4126 free_dma_desc_resources(priv, dma_conf);
4127
4128 kfree(dma_conf);
4129 return ret;
4130 }
4131
4132 /**
4133 * stmmac_release - close entry point of the driver
4134 * @dev : device pointer.
4135 * Description:
4136 * This is the stop entry point of the driver.
4137 */
stmmac_release(struct net_device * dev)4138 static int stmmac_release(struct net_device *dev)
4139 {
4140 struct stmmac_priv *priv = netdev_priv(dev);
4141 u32 chan;
4142
4143 if (device_may_wakeup(priv->device))
4144 phylink_speed_down(priv->phylink, false);
4145 /* Stop and disconnect the PHY */
4146 phylink_stop(priv->phylink);
4147 phylink_disconnect_phy(priv->phylink);
4148
4149 stmmac_disable_all_queues(priv);
4150
4151 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4152 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4153
4154 netif_tx_disable(dev);
4155
4156 /* Free the IRQ lines */
4157 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4158
4159 /* Stop TX/RX DMA and clear the descriptors */
4160 stmmac_stop_all_dma(priv);
4161
4162 /* Release and free the Rx/Tx resources */
4163 free_dma_desc_resources(priv, &priv->dma_conf);
4164
4165 /* Powerdown Serdes if there is */
4166 if (priv->plat->serdes_powerdown)
4167 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4168
4169 stmmac_release_ptp(priv);
4170
4171 if (stmmac_fpe_supported(priv))
4172 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4173
4174 pm_runtime_put(priv->device);
4175
4176 return 0;
4177 }
4178
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4179 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4180 struct stmmac_tx_queue *tx_q)
4181 {
4182 u16 tag = 0x0, inner_tag = 0x0;
4183 u32 inner_type = 0x0;
4184 struct dma_desc *p;
4185
4186 if (!priv->dma_cap.vlins)
4187 return false;
4188 if (!skb_vlan_tag_present(skb))
4189 return false;
4190 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4191 inner_tag = skb_vlan_tag_get(skb);
4192 inner_type = STMMAC_VLAN_INSERT;
4193 }
4194
4195 tag = skb_vlan_tag_get(skb);
4196
4197 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4198 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4199 else
4200 p = &tx_q->dma_tx[tx_q->cur_tx];
4201
4202 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4203 return false;
4204
4205 stmmac_set_tx_owner(priv, p);
4206 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4207 return true;
4208 }
4209
4210 /**
4211 * stmmac_tso_allocator - close entry point of the driver
4212 * @priv: driver private structure
4213 * @des: buffer start address
4214 * @total_len: total length to fill in descriptors
4215 * @last_segment: condition for the last descriptor
4216 * @queue: TX queue index
4217 * Description:
4218 * This function fills descriptor and request new descriptors according to
4219 * buffer length to fill
4220 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4221 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4222 int total_len, bool last_segment, u32 queue)
4223 {
4224 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4225 struct dma_desc *desc;
4226 u32 buff_size;
4227 int tmp_len;
4228
4229 tmp_len = total_len;
4230
4231 while (tmp_len > 0) {
4232 dma_addr_t curr_addr;
4233
4234 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4235 priv->dma_conf.dma_tx_size);
4236 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4237
4238 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4239 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4240 else
4241 desc = &tx_q->dma_tx[tx_q->cur_tx];
4242
4243 curr_addr = des + (total_len - tmp_len);
4244 stmmac_set_desc_addr(priv, desc, curr_addr);
4245 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4246 TSO_MAX_BUFF_SIZE : tmp_len;
4247
4248 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4249 0, 1,
4250 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4251 0, 0);
4252
4253 tmp_len -= TSO_MAX_BUFF_SIZE;
4254 }
4255 }
4256
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4257 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4258 {
4259 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4260 int desc_size;
4261
4262 if (likely(priv->extend_desc))
4263 desc_size = sizeof(struct dma_extended_desc);
4264 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4265 desc_size = sizeof(struct dma_edesc);
4266 else
4267 desc_size = sizeof(struct dma_desc);
4268
4269 /* The own bit must be the latest setting done when prepare the
4270 * descriptor and then barrier is needed to make sure that
4271 * all is coherent before granting the DMA engine.
4272 */
4273 wmb();
4274
4275 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4276 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4277 }
4278
4279 /**
4280 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4281 * @skb : the socket buffer
4282 * @dev : device pointer
4283 * Description: this is the transmit function that is called on TSO frames
4284 * (support available on GMAC4 and newer chips).
4285 * Diagram below show the ring programming in case of TSO frames:
4286 *
4287 * First Descriptor
4288 * --------
4289 * | DES0 |---> buffer1 = L2/L3/L4 header
4290 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4291 * | | width is 32-bit, but we never use it.
4292 * | | Also can be used as the most-significant 8-bits or 16-bits of
4293 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4294 * | | or 48-bit, and we always use it.
4295 * | DES2 |---> buffer1 len
4296 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4297 * --------
4298 * --------
4299 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4300 * | DES1 |---> same as the First Descriptor
4301 * | DES2 |---> buffer1 len
4302 * | DES3 |
4303 * --------
4304 * |
4305 * ...
4306 * |
4307 * --------
4308 * | DES0 |---> buffer1 = Split TCP Payload
4309 * | DES1 |---> same as the First Descriptor
4310 * | DES2 |---> buffer1 len
4311 * | DES3 |
4312 * --------
4313 *
4314 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4315 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4316 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4317 {
4318 struct dma_desc *desc, *first, *mss_desc = NULL;
4319 struct stmmac_priv *priv = netdev_priv(dev);
4320 unsigned int first_entry, tx_packets;
4321 struct stmmac_txq_stats *txq_stats;
4322 struct stmmac_tx_queue *tx_q;
4323 u32 pay_len, mss, queue;
4324 int i, first_tx, nfrags;
4325 u8 proto_hdr_len, hdr;
4326 dma_addr_t des;
4327 bool set_ic;
4328
4329 /* Always insert VLAN tag to SKB payload for TSO frames.
4330 *
4331 * Never insert VLAN tag by HW, since segments splited by
4332 * TSO engine will be un-tagged by mistake.
4333 */
4334 if (skb_vlan_tag_present(skb)) {
4335 skb = __vlan_hwaccel_push_inside(skb);
4336 if (unlikely(!skb)) {
4337 priv->xstats.tx_dropped++;
4338 return NETDEV_TX_OK;
4339 }
4340 }
4341
4342 nfrags = skb_shinfo(skb)->nr_frags;
4343 queue = skb_get_queue_mapping(skb);
4344
4345 tx_q = &priv->dma_conf.tx_queue[queue];
4346 txq_stats = &priv->xstats.txq_stats[queue];
4347 first_tx = tx_q->cur_tx;
4348
4349 /* Compute header lengths */
4350 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4351 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4352 hdr = sizeof(struct udphdr);
4353 } else {
4354 proto_hdr_len = skb_tcp_all_headers(skb);
4355 hdr = tcp_hdrlen(skb);
4356 }
4357
4358 /* Desc availability based on threshold should be enough safe */
4359 if (unlikely(stmmac_tx_avail(priv, queue) <
4360 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4361 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4362 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4363 queue));
4364 /* This is a hard error, log it. */
4365 netdev_err(priv->dev,
4366 "%s: Tx Ring full when queue awake\n",
4367 __func__);
4368 }
4369 return NETDEV_TX_BUSY;
4370 }
4371
4372 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4373
4374 mss = skb_shinfo(skb)->gso_size;
4375
4376 /* set new MSS value if needed */
4377 if (mss != tx_q->mss) {
4378 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4379 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4380 else
4381 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4382
4383 stmmac_set_mss(priv, mss_desc, mss);
4384 tx_q->mss = mss;
4385 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4386 priv->dma_conf.dma_tx_size);
4387 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4388 }
4389
4390 if (netif_msg_tx_queued(priv)) {
4391 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4392 __func__, hdr, proto_hdr_len, pay_len, mss);
4393 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4394 skb->data_len);
4395 }
4396
4397 first_entry = tx_q->cur_tx;
4398 WARN_ON(tx_q->tx_skbuff[first_entry]);
4399
4400 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4401 desc = &tx_q->dma_entx[first_entry].basic;
4402 else
4403 desc = &tx_q->dma_tx[first_entry];
4404 first = desc;
4405
4406 /* first descriptor: fill Headers on Buf1 */
4407 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4408 DMA_TO_DEVICE);
4409 if (dma_mapping_error(priv->device, des))
4410 goto dma_map_err;
4411
4412 stmmac_set_desc_addr(priv, first, des);
4413 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4414 (nfrags == 0), queue);
4415
4416 /* In case two or more DMA transmit descriptors are allocated for this
4417 * non-paged SKB data, the DMA buffer address should be saved to
4418 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4419 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4420 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4421 * since the tail areas of the DMA buffer can be accessed by DMA engine
4422 * sooner or later.
4423 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4424 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4425 * this DMA buffer right after the DMA engine completely finishes the
4426 * full buffer transmission.
4427 */
4428 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4429 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4430 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4431 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4432
4433 /* Prepare fragments */
4434 for (i = 0; i < nfrags; i++) {
4435 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4436
4437 des = skb_frag_dma_map(priv->device, frag, 0,
4438 skb_frag_size(frag),
4439 DMA_TO_DEVICE);
4440 if (dma_mapping_error(priv->device, des))
4441 goto dma_map_err;
4442
4443 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4444 (i == nfrags - 1), queue);
4445
4446 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4447 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4448 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4449 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4450 }
4451
4452 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4453
4454 /* Only the last descriptor gets to point to the skb. */
4455 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4456 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4457
4458 /* Manage tx mitigation */
4459 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4460 tx_q->tx_count_frames += tx_packets;
4461
4462 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4463 set_ic = true;
4464 else if (!priv->tx_coal_frames[queue])
4465 set_ic = false;
4466 else if (tx_packets > priv->tx_coal_frames[queue])
4467 set_ic = true;
4468 else if ((tx_q->tx_count_frames %
4469 priv->tx_coal_frames[queue]) < tx_packets)
4470 set_ic = true;
4471 else
4472 set_ic = false;
4473
4474 if (set_ic) {
4475 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4476 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4477 else
4478 desc = &tx_q->dma_tx[tx_q->cur_tx];
4479
4480 tx_q->tx_count_frames = 0;
4481 stmmac_set_tx_ic(priv, desc);
4482 }
4483
4484 /* We've used all descriptors we need for this skb, however,
4485 * advance cur_tx so that it references a fresh descriptor.
4486 * ndo_start_xmit will fill this descriptor the next time it's
4487 * called and stmmac_tx_clean may clean up to this descriptor.
4488 */
4489 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4490
4491 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4492 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4493 __func__);
4494 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4495 }
4496
4497 u64_stats_update_begin(&txq_stats->q_syncp);
4498 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4499 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4500 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4501 if (set_ic)
4502 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4503 u64_stats_update_end(&txq_stats->q_syncp);
4504
4505 if (priv->sarc_type)
4506 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4507
4508 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4509 priv->hwts_tx_en)) {
4510 /* declare that device is doing timestamping */
4511 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4512 stmmac_enable_tx_timestamp(priv, first);
4513 }
4514
4515 /* Complete the first descriptor before granting the DMA */
4516 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4517 tx_q->tx_skbuff_dma[first_entry].last_segment,
4518 hdr / 4, (skb->len - proto_hdr_len));
4519
4520 /* If context desc is used to change MSS */
4521 if (mss_desc) {
4522 /* Make sure that first descriptor has been completely
4523 * written, including its own bit. This is because MSS is
4524 * actually before first descriptor, so we need to make
4525 * sure that MSS's own bit is the last thing written.
4526 */
4527 dma_wmb();
4528 stmmac_set_tx_owner(priv, mss_desc);
4529 }
4530
4531 if (netif_msg_pktdata(priv)) {
4532 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4533 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4534 tx_q->cur_tx, first, nfrags);
4535 pr_info(">>> frame to be transmitted: ");
4536 print_pkt(skb->data, skb_headlen(skb));
4537 }
4538
4539 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4540 skb_tx_timestamp(skb);
4541
4542 stmmac_flush_tx_descriptors(priv, queue);
4543 stmmac_tx_timer_arm(priv, queue);
4544
4545 return NETDEV_TX_OK;
4546
4547 dma_map_err:
4548 dev_err(priv->device, "Tx dma map failed\n");
4549 dev_kfree_skb(skb);
4550 priv->xstats.tx_dropped++;
4551 return NETDEV_TX_OK;
4552 }
4553
4554 /**
4555 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4556 * @skb: socket buffer to check
4557 *
4558 * Check if a packet has an ethertype that will trigger the IP header checks
4559 * and IP/TCP checksum engine of the stmmac core.
4560 *
4561 * Return: true if the ethertype can trigger the checksum engine, false
4562 * otherwise
4563 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4564 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4565 {
4566 int depth = 0;
4567 __be16 proto;
4568
4569 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4570 &depth);
4571
4572 return (depth <= ETH_HLEN) &&
4573 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4574 }
4575
4576 /**
4577 * stmmac_xmit - Tx entry point of the driver
4578 * @skb : the socket buffer
4579 * @dev : device pointer
4580 * Description : this is the tx entry point of the driver.
4581 * It programs the chain or the ring and supports oversized frames
4582 * and SG feature.
4583 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4584 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4585 {
4586 unsigned int first_entry, tx_packets, enh_desc;
4587 struct stmmac_priv *priv = netdev_priv(dev);
4588 unsigned int nopaged_len = skb_headlen(skb);
4589 int i, csum_insertion = 0, is_jumbo = 0;
4590 u32 queue = skb_get_queue_mapping(skb);
4591 int nfrags = skb_shinfo(skb)->nr_frags;
4592 int gso = skb_shinfo(skb)->gso_type;
4593 struct stmmac_txq_stats *txq_stats;
4594 struct dma_edesc *tbs_desc = NULL;
4595 struct dma_desc *desc, *first;
4596 struct stmmac_tx_queue *tx_q;
4597 bool has_vlan, set_ic;
4598 int entry, first_tx;
4599 dma_addr_t des;
4600
4601 tx_q = &priv->dma_conf.tx_queue[queue];
4602 txq_stats = &priv->xstats.txq_stats[queue];
4603 first_tx = tx_q->cur_tx;
4604
4605 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4606 stmmac_stop_sw_lpi(priv);
4607
4608 /* Manage oversized TCP frames for GMAC4 device */
4609 if (skb_is_gso(skb) && priv->tso) {
4610 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4611 return stmmac_tso_xmit(skb, dev);
4612 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4613 return stmmac_tso_xmit(skb, dev);
4614 }
4615
4616 if (priv->est && priv->est->enable &&
4617 priv->est->max_sdu[queue] &&
4618 skb->len > priv->est->max_sdu[queue]){
4619 priv->xstats.max_sdu_txq_drop[queue]++;
4620 goto max_sdu_err;
4621 }
4622
4623 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4624 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4625 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4626 queue));
4627 /* This is a hard error, log it. */
4628 netdev_err(priv->dev,
4629 "%s: Tx Ring full when queue awake\n",
4630 __func__);
4631 }
4632 return NETDEV_TX_BUSY;
4633 }
4634
4635 /* Check if VLAN can be inserted by HW */
4636 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4637
4638 entry = tx_q->cur_tx;
4639 first_entry = entry;
4640 WARN_ON(tx_q->tx_skbuff[first_entry]);
4641
4642 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4643 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4644 * queues. In that case, checksum offloading for those queues that don't
4645 * support tx coe needs to fallback to software checksum calculation.
4646 *
4647 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4648 * also have to be checksummed in software.
4649 */
4650 if (csum_insertion &&
4651 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4652 !stmmac_has_ip_ethertype(skb))) {
4653 if (unlikely(skb_checksum_help(skb)))
4654 goto dma_map_err;
4655 csum_insertion = !csum_insertion;
4656 }
4657
4658 if (likely(priv->extend_desc))
4659 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4660 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4661 desc = &tx_q->dma_entx[entry].basic;
4662 else
4663 desc = tx_q->dma_tx + entry;
4664
4665 first = desc;
4666
4667 if (has_vlan)
4668 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4669
4670 enh_desc = priv->plat->enh_desc;
4671 /* To program the descriptors according to the size of the frame */
4672 if (enh_desc)
4673 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4674
4675 if (unlikely(is_jumbo)) {
4676 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4677 if (unlikely(entry < 0) && (entry != -EINVAL))
4678 goto dma_map_err;
4679 }
4680
4681 for (i = 0; i < nfrags; i++) {
4682 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4683 int len = skb_frag_size(frag);
4684 bool last_segment = (i == (nfrags - 1));
4685
4686 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4687 WARN_ON(tx_q->tx_skbuff[entry]);
4688
4689 if (likely(priv->extend_desc))
4690 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4691 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4692 desc = &tx_q->dma_entx[entry].basic;
4693 else
4694 desc = tx_q->dma_tx + entry;
4695
4696 des = skb_frag_dma_map(priv->device, frag, 0, len,
4697 DMA_TO_DEVICE);
4698 if (dma_mapping_error(priv->device, des))
4699 goto dma_map_err; /* should reuse desc w/o issues */
4700
4701 tx_q->tx_skbuff_dma[entry].buf = des;
4702
4703 stmmac_set_desc_addr(priv, desc, des);
4704
4705 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4706 tx_q->tx_skbuff_dma[entry].len = len;
4707 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4708 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4709
4710 /* Prepare the descriptor and set the own bit too */
4711 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4712 priv->mode, 1, last_segment, skb->len);
4713 }
4714
4715 /* Only the last descriptor gets to point to the skb. */
4716 tx_q->tx_skbuff[entry] = skb;
4717 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4718
4719 /* According to the coalesce parameter the IC bit for the latest
4720 * segment is reset and the timer re-started to clean the tx status.
4721 * This approach takes care about the fragments: desc is the first
4722 * element in case of no SG.
4723 */
4724 tx_packets = (entry + 1) - first_tx;
4725 tx_q->tx_count_frames += tx_packets;
4726
4727 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4728 set_ic = true;
4729 else if (!priv->tx_coal_frames[queue])
4730 set_ic = false;
4731 else if (tx_packets > priv->tx_coal_frames[queue])
4732 set_ic = true;
4733 else if ((tx_q->tx_count_frames %
4734 priv->tx_coal_frames[queue]) < tx_packets)
4735 set_ic = true;
4736 else
4737 set_ic = false;
4738
4739 if (set_ic) {
4740 if (likely(priv->extend_desc))
4741 desc = &tx_q->dma_etx[entry].basic;
4742 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4743 desc = &tx_q->dma_entx[entry].basic;
4744 else
4745 desc = &tx_q->dma_tx[entry];
4746
4747 tx_q->tx_count_frames = 0;
4748 stmmac_set_tx_ic(priv, desc);
4749 }
4750
4751 /* We've used all descriptors we need for this skb, however,
4752 * advance cur_tx so that it references a fresh descriptor.
4753 * ndo_start_xmit will fill this descriptor the next time it's
4754 * called and stmmac_tx_clean may clean up to this descriptor.
4755 */
4756 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4757 tx_q->cur_tx = entry;
4758
4759 if (netif_msg_pktdata(priv)) {
4760 netdev_dbg(priv->dev,
4761 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4762 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4763 entry, first, nfrags);
4764
4765 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4766 print_pkt(skb->data, skb->len);
4767 }
4768
4769 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4770 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4771 __func__);
4772 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4773 }
4774
4775 u64_stats_update_begin(&txq_stats->q_syncp);
4776 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4777 if (set_ic)
4778 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4779 u64_stats_update_end(&txq_stats->q_syncp);
4780
4781 if (priv->sarc_type)
4782 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4783
4784 /* Ready to fill the first descriptor and set the OWN bit w/o any
4785 * problems because all the descriptors are actually ready to be
4786 * passed to the DMA engine.
4787 */
4788 if (likely(!is_jumbo)) {
4789 bool last_segment = (nfrags == 0);
4790
4791 des = dma_map_single(priv->device, skb->data,
4792 nopaged_len, DMA_TO_DEVICE);
4793 if (dma_mapping_error(priv->device, des))
4794 goto dma_map_err;
4795
4796 tx_q->tx_skbuff_dma[first_entry].buf = des;
4797 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4798 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4799
4800 stmmac_set_desc_addr(priv, first, des);
4801
4802 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4803 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4804
4805 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4806 priv->hwts_tx_en)) {
4807 /* declare that device is doing timestamping */
4808 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4809 stmmac_enable_tx_timestamp(priv, first);
4810 }
4811
4812 /* Prepare the first descriptor setting the OWN bit too */
4813 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4814 csum_insertion, priv->mode, 0, last_segment,
4815 skb->len);
4816 }
4817
4818 if (tx_q->tbs & STMMAC_TBS_EN) {
4819 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4820
4821 tbs_desc = &tx_q->dma_entx[first_entry];
4822 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4823 }
4824
4825 stmmac_set_tx_owner(priv, first);
4826
4827 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4828
4829 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4830 skb_tx_timestamp(skb);
4831 stmmac_flush_tx_descriptors(priv, queue);
4832 stmmac_tx_timer_arm(priv, queue);
4833
4834 return NETDEV_TX_OK;
4835
4836 dma_map_err:
4837 netdev_err(priv->dev, "Tx DMA map failed\n");
4838 max_sdu_err:
4839 dev_kfree_skb(skb);
4840 priv->xstats.tx_dropped++;
4841 return NETDEV_TX_OK;
4842 }
4843
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4844 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4845 {
4846 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4847 __be16 vlan_proto = veth->h_vlan_proto;
4848 u16 vlanid;
4849
4850 if ((vlan_proto == htons(ETH_P_8021Q) &&
4851 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4852 (vlan_proto == htons(ETH_P_8021AD) &&
4853 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4854 /* pop the vlan tag */
4855 vlanid = ntohs(veth->h_vlan_TCI);
4856 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4857 skb_pull(skb, VLAN_HLEN);
4858 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4859 }
4860 }
4861
4862 /**
4863 * stmmac_rx_refill - refill used skb preallocated buffers
4864 * @priv: driver private structure
4865 * @queue: RX queue index
4866 * Description : this is to reallocate the skb for the reception process
4867 * that is based on zero-copy.
4868 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4869 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4870 {
4871 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4872 int dirty = stmmac_rx_dirty(priv, queue);
4873 unsigned int entry = rx_q->dirty_rx;
4874 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4875
4876 if (priv->dma_cap.host_dma_width <= 32)
4877 gfp |= GFP_DMA32;
4878
4879 while (dirty-- > 0) {
4880 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4881 struct dma_desc *p;
4882 bool use_rx_wd;
4883
4884 if (priv->extend_desc)
4885 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4886 else
4887 p = rx_q->dma_rx + entry;
4888
4889 if (!buf->page) {
4890 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4891 if (!buf->page)
4892 break;
4893 }
4894
4895 if (priv->sph && !buf->sec_page) {
4896 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4897 if (!buf->sec_page)
4898 break;
4899
4900 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4901 }
4902
4903 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4904
4905 stmmac_set_desc_addr(priv, p, buf->addr);
4906 if (priv->sph)
4907 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4908 else
4909 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4910 stmmac_refill_desc3(priv, rx_q, p);
4911
4912 rx_q->rx_count_frames++;
4913 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4914 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4915 rx_q->rx_count_frames = 0;
4916
4917 use_rx_wd = !priv->rx_coal_frames[queue];
4918 use_rx_wd |= rx_q->rx_count_frames > 0;
4919 if (!priv->use_riwt)
4920 use_rx_wd = false;
4921
4922 dma_wmb();
4923 stmmac_set_rx_owner(priv, p, use_rx_wd);
4924
4925 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4926 }
4927 rx_q->dirty_rx = entry;
4928 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4929 (rx_q->dirty_rx * sizeof(struct dma_desc));
4930 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4931 }
4932
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4933 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4934 struct dma_desc *p,
4935 int status, unsigned int len)
4936 {
4937 unsigned int plen = 0, hlen = 0;
4938 int coe = priv->hw->rx_csum;
4939
4940 /* Not first descriptor, buffer is always zero */
4941 if (priv->sph && len)
4942 return 0;
4943
4944 /* First descriptor, get split header length */
4945 stmmac_get_rx_header_len(priv, p, &hlen);
4946 if (priv->sph && hlen) {
4947 priv->xstats.rx_split_hdr_pkt_n++;
4948 return hlen;
4949 }
4950
4951 /* First descriptor, not last descriptor and not split header */
4952 if (status & rx_not_ls)
4953 return priv->dma_conf.dma_buf_sz;
4954
4955 plen = stmmac_get_rx_frame_len(priv, p, coe);
4956
4957 /* First descriptor and last descriptor and not split header */
4958 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4959 }
4960
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4961 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4962 struct dma_desc *p,
4963 int status, unsigned int len)
4964 {
4965 int coe = priv->hw->rx_csum;
4966 unsigned int plen = 0;
4967
4968 /* Not split header, buffer is not available */
4969 if (!priv->sph)
4970 return 0;
4971
4972 /* Not last descriptor */
4973 if (status & rx_not_ls)
4974 return priv->dma_conf.dma_buf_sz;
4975
4976 plen = stmmac_get_rx_frame_len(priv, p, coe);
4977
4978 /* Last descriptor */
4979 return plen - len;
4980 }
4981
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4982 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4983 struct xdp_frame *xdpf, bool dma_map)
4984 {
4985 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4986 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4987 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
4988 unsigned int entry = tx_q->cur_tx;
4989 struct dma_desc *tx_desc;
4990 dma_addr_t dma_addr;
4991 bool set_ic;
4992
4993 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4994 return STMMAC_XDP_CONSUMED;
4995
4996 if (priv->est && priv->est->enable &&
4997 priv->est->max_sdu[queue] &&
4998 xdpf->len > priv->est->max_sdu[queue]) {
4999 priv->xstats.max_sdu_txq_drop[queue]++;
5000 return STMMAC_XDP_CONSUMED;
5001 }
5002
5003 if (likely(priv->extend_desc))
5004 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5005 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5006 tx_desc = &tx_q->dma_entx[entry].basic;
5007 else
5008 tx_desc = tx_q->dma_tx + entry;
5009
5010 if (dma_map) {
5011 dma_addr = dma_map_single(priv->device, xdpf->data,
5012 xdpf->len, DMA_TO_DEVICE);
5013 if (dma_mapping_error(priv->device, dma_addr))
5014 return STMMAC_XDP_CONSUMED;
5015
5016 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5017 } else {
5018 struct page *page = virt_to_page(xdpf->data);
5019
5020 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5021 xdpf->headroom;
5022 dma_sync_single_for_device(priv->device, dma_addr,
5023 xdpf->len, DMA_BIDIRECTIONAL);
5024
5025 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5026 }
5027
5028 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5029 tx_q->tx_skbuff_dma[entry].map_as_page = false;
5030 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5031 tx_q->tx_skbuff_dma[entry].last_segment = true;
5032 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5033
5034 tx_q->xdpf[entry] = xdpf;
5035
5036 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5037
5038 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5039 csum, priv->mode, true, true,
5040 xdpf->len);
5041
5042 tx_q->tx_count_frames++;
5043
5044 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5045 set_ic = true;
5046 else
5047 set_ic = false;
5048
5049 if (set_ic) {
5050 tx_q->tx_count_frames = 0;
5051 stmmac_set_tx_ic(priv, tx_desc);
5052 u64_stats_update_begin(&txq_stats->q_syncp);
5053 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5054 u64_stats_update_end(&txq_stats->q_syncp);
5055 }
5056
5057 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5058
5059 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5060 tx_q->cur_tx = entry;
5061
5062 return STMMAC_XDP_TX;
5063 }
5064
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5065 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5066 int cpu)
5067 {
5068 int index = cpu;
5069
5070 if (unlikely(index < 0))
5071 index = 0;
5072
5073 while (index >= priv->plat->tx_queues_to_use)
5074 index -= priv->plat->tx_queues_to_use;
5075
5076 return index;
5077 }
5078
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5079 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5080 struct xdp_buff *xdp)
5081 {
5082 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5083 int cpu = smp_processor_id();
5084 struct netdev_queue *nq;
5085 int queue;
5086 int res;
5087
5088 if (unlikely(!xdpf))
5089 return STMMAC_XDP_CONSUMED;
5090
5091 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5092 nq = netdev_get_tx_queue(priv->dev, queue);
5093
5094 __netif_tx_lock(nq, cpu);
5095 /* Avoids TX time-out as we are sharing with slow path */
5096 txq_trans_cond_update(nq);
5097
5098 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5099 if (res == STMMAC_XDP_TX)
5100 stmmac_flush_tx_descriptors(priv, queue);
5101
5102 __netif_tx_unlock(nq);
5103
5104 return res;
5105 }
5106
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5107 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5108 struct bpf_prog *prog,
5109 struct xdp_buff *xdp)
5110 {
5111 u32 act;
5112 int res;
5113
5114 act = bpf_prog_run_xdp(prog, xdp);
5115 switch (act) {
5116 case XDP_PASS:
5117 res = STMMAC_XDP_PASS;
5118 break;
5119 case XDP_TX:
5120 res = stmmac_xdp_xmit_back(priv, xdp);
5121 break;
5122 case XDP_REDIRECT:
5123 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5124 res = STMMAC_XDP_CONSUMED;
5125 else
5126 res = STMMAC_XDP_REDIRECT;
5127 break;
5128 default:
5129 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5130 fallthrough;
5131 case XDP_ABORTED:
5132 trace_xdp_exception(priv->dev, prog, act);
5133 fallthrough;
5134 case XDP_DROP:
5135 res = STMMAC_XDP_CONSUMED;
5136 break;
5137 }
5138
5139 return res;
5140 }
5141
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5142 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5143 struct xdp_buff *xdp)
5144 {
5145 struct bpf_prog *prog;
5146 int res;
5147
5148 prog = READ_ONCE(priv->xdp_prog);
5149 if (!prog) {
5150 res = STMMAC_XDP_PASS;
5151 goto out;
5152 }
5153
5154 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5155 out:
5156 return ERR_PTR(-res);
5157 }
5158
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5159 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5160 int xdp_status)
5161 {
5162 int cpu = smp_processor_id();
5163 int queue;
5164
5165 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5166
5167 if (xdp_status & STMMAC_XDP_TX)
5168 stmmac_tx_timer_arm(priv, queue);
5169
5170 if (xdp_status & STMMAC_XDP_REDIRECT)
5171 xdp_do_flush();
5172 }
5173
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5174 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5175 struct xdp_buff *xdp)
5176 {
5177 unsigned int metasize = xdp->data - xdp->data_meta;
5178 unsigned int datasize = xdp->data_end - xdp->data;
5179 struct sk_buff *skb;
5180
5181 skb = napi_alloc_skb(&ch->rxtx_napi,
5182 xdp->data_end - xdp->data_hard_start);
5183 if (unlikely(!skb))
5184 return NULL;
5185
5186 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5187 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5188 if (metasize)
5189 skb_metadata_set(skb, metasize);
5190
5191 return skb;
5192 }
5193
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5194 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5195 struct dma_desc *p, struct dma_desc *np,
5196 struct xdp_buff *xdp)
5197 {
5198 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5199 struct stmmac_channel *ch = &priv->channel[queue];
5200 unsigned int len = xdp->data_end - xdp->data;
5201 enum pkt_hash_types hash_type;
5202 int coe = priv->hw->rx_csum;
5203 struct sk_buff *skb;
5204 u32 hash;
5205
5206 skb = stmmac_construct_skb_zc(ch, xdp);
5207 if (!skb) {
5208 priv->xstats.rx_dropped++;
5209 return;
5210 }
5211
5212 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5213 if (priv->hw->hw_vlan_en)
5214 /* MAC level stripping. */
5215 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5216 else
5217 /* Driver level stripping. */
5218 stmmac_rx_vlan(priv->dev, skb);
5219 skb->protocol = eth_type_trans(skb, priv->dev);
5220
5221 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5222 skb_checksum_none_assert(skb);
5223 else
5224 skb->ip_summed = CHECKSUM_UNNECESSARY;
5225
5226 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5227 skb_set_hash(skb, hash, hash_type);
5228
5229 skb_record_rx_queue(skb, queue);
5230 napi_gro_receive(&ch->rxtx_napi, skb);
5231
5232 u64_stats_update_begin(&rxq_stats->napi_syncp);
5233 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5234 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5235 u64_stats_update_end(&rxq_stats->napi_syncp);
5236 }
5237
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5238 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5239 {
5240 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5241 unsigned int entry = rx_q->dirty_rx;
5242 struct dma_desc *rx_desc = NULL;
5243 bool ret = true;
5244
5245 budget = min(budget, stmmac_rx_dirty(priv, queue));
5246
5247 while (budget-- > 0 && entry != rx_q->cur_rx) {
5248 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5249 dma_addr_t dma_addr;
5250 bool use_rx_wd;
5251
5252 if (!buf->xdp) {
5253 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5254 if (!buf->xdp) {
5255 ret = false;
5256 break;
5257 }
5258 }
5259
5260 if (priv->extend_desc)
5261 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5262 else
5263 rx_desc = rx_q->dma_rx + entry;
5264
5265 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5266 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5267 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5268 stmmac_refill_desc3(priv, rx_q, rx_desc);
5269
5270 rx_q->rx_count_frames++;
5271 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5272 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5273 rx_q->rx_count_frames = 0;
5274
5275 use_rx_wd = !priv->rx_coal_frames[queue];
5276 use_rx_wd |= rx_q->rx_count_frames > 0;
5277 if (!priv->use_riwt)
5278 use_rx_wd = false;
5279
5280 dma_wmb();
5281 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5282
5283 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5284 }
5285
5286 if (rx_desc) {
5287 rx_q->dirty_rx = entry;
5288 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5289 (rx_q->dirty_rx * sizeof(struct dma_desc));
5290 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5291 }
5292
5293 return ret;
5294 }
5295
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5296 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5297 {
5298 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5299 * to represent incoming packet, whereas cb field in the same structure
5300 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5301 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5302 */
5303 return (struct stmmac_xdp_buff *)xdp;
5304 }
5305
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5306 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5307 {
5308 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5309 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5310 unsigned int count = 0, error = 0, len = 0;
5311 int dirty = stmmac_rx_dirty(priv, queue);
5312 unsigned int next_entry = rx_q->cur_rx;
5313 u32 rx_errors = 0, rx_dropped = 0;
5314 unsigned int desc_size;
5315 struct bpf_prog *prog;
5316 bool failure = false;
5317 int xdp_status = 0;
5318 int status = 0;
5319
5320 if (netif_msg_rx_status(priv)) {
5321 void *rx_head;
5322
5323 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5324 if (priv->extend_desc) {
5325 rx_head = (void *)rx_q->dma_erx;
5326 desc_size = sizeof(struct dma_extended_desc);
5327 } else {
5328 rx_head = (void *)rx_q->dma_rx;
5329 desc_size = sizeof(struct dma_desc);
5330 }
5331
5332 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5333 rx_q->dma_rx_phy, desc_size);
5334 }
5335 while (count < limit) {
5336 struct stmmac_rx_buffer *buf;
5337 struct stmmac_xdp_buff *ctx;
5338 unsigned int buf1_len = 0;
5339 struct dma_desc *np, *p;
5340 int entry;
5341 int res;
5342
5343 if (!count && rx_q->state_saved) {
5344 error = rx_q->state.error;
5345 len = rx_q->state.len;
5346 } else {
5347 rx_q->state_saved = false;
5348 error = 0;
5349 len = 0;
5350 }
5351
5352 if (count >= limit)
5353 break;
5354
5355 read_again:
5356 buf1_len = 0;
5357 entry = next_entry;
5358 buf = &rx_q->buf_pool[entry];
5359
5360 if (dirty >= STMMAC_RX_FILL_BATCH) {
5361 failure = failure ||
5362 !stmmac_rx_refill_zc(priv, queue, dirty);
5363 dirty = 0;
5364 }
5365
5366 if (priv->extend_desc)
5367 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5368 else
5369 p = rx_q->dma_rx + entry;
5370
5371 /* read the status of the incoming frame */
5372 status = stmmac_rx_status(priv, &priv->xstats, p);
5373 /* check if managed by the DMA otherwise go ahead */
5374 if (unlikely(status & dma_own))
5375 break;
5376
5377 /* Prefetch the next RX descriptor */
5378 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5379 priv->dma_conf.dma_rx_size);
5380 next_entry = rx_q->cur_rx;
5381
5382 if (priv->extend_desc)
5383 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5384 else
5385 np = rx_q->dma_rx + next_entry;
5386
5387 prefetch(np);
5388
5389 /* Ensure a valid XSK buffer before proceed */
5390 if (!buf->xdp)
5391 break;
5392
5393 if (priv->extend_desc)
5394 stmmac_rx_extended_status(priv, &priv->xstats,
5395 rx_q->dma_erx + entry);
5396 if (unlikely(status == discard_frame)) {
5397 xsk_buff_free(buf->xdp);
5398 buf->xdp = NULL;
5399 dirty++;
5400 error = 1;
5401 if (!priv->hwts_rx_en)
5402 rx_errors++;
5403 }
5404
5405 if (unlikely(error && (status & rx_not_ls)))
5406 goto read_again;
5407 if (unlikely(error)) {
5408 count++;
5409 continue;
5410 }
5411
5412 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5413 if (likely(status & rx_not_ls)) {
5414 xsk_buff_free(buf->xdp);
5415 buf->xdp = NULL;
5416 dirty++;
5417 count++;
5418 goto read_again;
5419 }
5420
5421 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5422 ctx->priv = priv;
5423 ctx->desc = p;
5424 ctx->ndesc = np;
5425
5426 /* XDP ZC Frame only support primary buffers for now */
5427 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5428 len += buf1_len;
5429
5430 /* ACS is disabled; strip manually. */
5431 if (likely(!(status & rx_not_ls))) {
5432 buf1_len -= ETH_FCS_LEN;
5433 len -= ETH_FCS_LEN;
5434 }
5435
5436 /* RX buffer is good and fit into a XSK pool buffer */
5437 buf->xdp->data_end = buf->xdp->data + buf1_len;
5438 xsk_buff_dma_sync_for_cpu(buf->xdp);
5439
5440 prog = READ_ONCE(priv->xdp_prog);
5441 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5442
5443 switch (res) {
5444 case STMMAC_XDP_PASS:
5445 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5446 xsk_buff_free(buf->xdp);
5447 break;
5448 case STMMAC_XDP_CONSUMED:
5449 xsk_buff_free(buf->xdp);
5450 rx_dropped++;
5451 break;
5452 case STMMAC_XDP_TX:
5453 case STMMAC_XDP_REDIRECT:
5454 xdp_status |= res;
5455 break;
5456 }
5457
5458 buf->xdp = NULL;
5459 dirty++;
5460 count++;
5461 }
5462
5463 if (status & rx_not_ls) {
5464 rx_q->state_saved = true;
5465 rx_q->state.error = error;
5466 rx_q->state.len = len;
5467 }
5468
5469 stmmac_finalize_xdp_rx(priv, xdp_status);
5470
5471 u64_stats_update_begin(&rxq_stats->napi_syncp);
5472 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5473 u64_stats_update_end(&rxq_stats->napi_syncp);
5474
5475 priv->xstats.rx_dropped += rx_dropped;
5476 priv->xstats.rx_errors += rx_errors;
5477
5478 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5479 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5480 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5481 else
5482 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5483
5484 return (int)count;
5485 }
5486
5487 return failure ? limit : (int)count;
5488 }
5489
5490 /**
5491 * stmmac_rx - manage the receive process
5492 * @priv: driver private structure
5493 * @limit: napi bugget
5494 * @queue: RX queue index.
5495 * Description : this the function called by the napi poll method.
5496 * It gets all the frames inside the ring.
5497 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5498 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5499 {
5500 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5501 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5502 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5503 struct stmmac_channel *ch = &priv->channel[queue];
5504 unsigned int count = 0, error = 0, len = 0;
5505 int status = 0, coe = priv->hw->rx_csum;
5506 unsigned int next_entry = rx_q->cur_rx;
5507 enum dma_data_direction dma_dir;
5508 unsigned int desc_size;
5509 struct sk_buff *skb = NULL;
5510 struct stmmac_xdp_buff ctx;
5511 int xdp_status = 0;
5512 int bufsz;
5513
5514 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5515 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5516 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5517
5518 if (netif_msg_rx_status(priv)) {
5519 void *rx_head;
5520
5521 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5522 if (priv->extend_desc) {
5523 rx_head = (void *)rx_q->dma_erx;
5524 desc_size = sizeof(struct dma_extended_desc);
5525 } else {
5526 rx_head = (void *)rx_q->dma_rx;
5527 desc_size = sizeof(struct dma_desc);
5528 }
5529
5530 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5531 rx_q->dma_rx_phy, desc_size);
5532 }
5533 while (count < limit) {
5534 unsigned int buf1_len = 0, buf2_len = 0;
5535 enum pkt_hash_types hash_type;
5536 struct stmmac_rx_buffer *buf;
5537 struct dma_desc *np, *p;
5538 int entry;
5539 u32 hash;
5540
5541 if (!count && rx_q->state_saved) {
5542 skb = rx_q->state.skb;
5543 error = rx_q->state.error;
5544 len = rx_q->state.len;
5545 } else {
5546 rx_q->state_saved = false;
5547 skb = NULL;
5548 error = 0;
5549 len = 0;
5550 }
5551
5552 read_again:
5553 if (count >= limit)
5554 break;
5555
5556 buf1_len = 0;
5557 buf2_len = 0;
5558 entry = next_entry;
5559 buf = &rx_q->buf_pool[entry];
5560
5561 if (priv->extend_desc)
5562 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5563 else
5564 p = rx_q->dma_rx + entry;
5565
5566 /* read the status of the incoming frame */
5567 status = stmmac_rx_status(priv, &priv->xstats, p);
5568 /* check if managed by the DMA otherwise go ahead */
5569 if (unlikely(status & dma_own))
5570 break;
5571
5572 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5573 priv->dma_conf.dma_rx_size);
5574 next_entry = rx_q->cur_rx;
5575
5576 if (priv->extend_desc)
5577 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5578 else
5579 np = rx_q->dma_rx + next_entry;
5580
5581 prefetch(np);
5582
5583 if (priv->extend_desc)
5584 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5585 if (unlikely(status == discard_frame)) {
5586 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5587 buf->page = NULL;
5588 error = 1;
5589 if (!priv->hwts_rx_en)
5590 rx_errors++;
5591 }
5592
5593 if (unlikely(error && (status & rx_not_ls)))
5594 goto read_again;
5595 if (unlikely(error)) {
5596 dev_kfree_skb(skb);
5597 skb = NULL;
5598 count++;
5599 continue;
5600 }
5601
5602 /* Buffer is good. Go on. */
5603
5604 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5605 len += buf1_len;
5606 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5607 len += buf2_len;
5608
5609 /* ACS is disabled; strip manually. */
5610 if (likely(!(status & rx_not_ls))) {
5611 if (buf2_len) {
5612 buf2_len -= ETH_FCS_LEN;
5613 len -= ETH_FCS_LEN;
5614 } else if (buf1_len) {
5615 buf1_len -= ETH_FCS_LEN;
5616 len -= ETH_FCS_LEN;
5617 }
5618 }
5619
5620 if (!skb) {
5621 unsigned int pre_len, sync_len;
5622
5623 dma_sync_single_for_cpu(priv->device, buf->addr,
5624 buf1_len, dma_dir);
5625 net_prefetch(page_address(buf->page) +
5626 buf->page_offset);
5627
5628 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5629 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5630 buf->page_offset, buf1_len, true);
5631
5632 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5633 buf->page_offset;
5634
5635 ctx.priv = priv;
5636 ctx.desc = p;
5637 ctx.ndesc = np;
5638
5639 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5640 /* Due xdp_adjust_tail: DMA sync for_device
5641 * cover max len CPU touch
5642 */
5643 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5644 buf->page_offset;
5645 sync_len = max(sync_len, pre_len);
5646
5647 /* For Not XDP_PASS verdict */
5648 if (IS_ERR(skb)) {
5649 unsigned int xdp_res = -PTR_ERR(skb);
5650
5651 if (xdp_res & STMMAC_XDP_CONSUMED) {
5652 page_pool_put_page(rx_q->page_pool,
5653 virt_to_head_page(ctx.xdp.data),
5654 sync_len, true);
5655 buf->page = NULL;
5656 rx_dropped++;
5657
5658 /* Clear skb as it was set as
5659 * status by XDP program.
5660 */
5661 skb = NULL;
5662
5663 if (unlikely((status & rx_not_ls)))
5664 goto read_again;
5665
5666 count++;
5667 continue;
5668 } else if (xdp_res & (STMMAC_XDP_TX |
5669 STMMAC_XDP_REDIRECT)) {
5670 xdp_status |= xdp_res;
5671 buf->page = NULL;
5672 skb = NULL;
5673 count++;
5674 continue;
5675 }
5676 }
5677 }
5678
5679 if (!skb) {
5680 unsigned int head_pad_len;
5681
5682 /* XDP program may expand or reduce tail */
5683 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5684
5685 skb = napi_build_skb(page_address(buf->page),
5686 rx_q->napi_skb_frag_size);
5687 if (!skb) {
5688 page_pool_recycle_direct(rx_q->page_pool,
5689 buf->page);
5690 rx_dropped++;
5691 count++;
5692 goto drain_data;
5693 }
5694
5695 /* XDP program may adjust header */
5696 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5697 skb_reserve(skb, head_pad_len);
5698 skb_put(skb, buf1_len);
5699 skb_mark_for_recycle(skb);
5700 buf->page = NULL;
5701 } else if (buf1_len) {
5702 dma_sync_single_for_cpu(priv->device, buf->addr,
5703 buf1_len, dma_dir);
5704 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5705 buf->page, buf->page_offset, buf1_len,
5706 priv->dma_conf.dma_buf_sz);
5707 buf->page = NULL;
5708 }
5709
5710 if (buf2_len) {
5711 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5712 buf2_len, dma_dir);
5713 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5714 buf->sec_page, 0, buf2_len,
5715 priv->dma_conf.dma_buf_sz);
5716 buf->sec_page = NULL;
5717 }
5718
5719 drain_data:
5720 if (likely(status & rx_not_ls))
5721 goto read_again;
5722 if (!skb)
5723 continue;
5724
5725 /* Got entire packet into SKB. Finish it. */
5726
5727 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5728
5729 if (priv->hw->hw_vlan_en)
5730 /* MAC level stripping. */
5731 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5732 else
5733 /* Driver level stripping. */
5734 stmmac_rx_vlan(priv->dev, skb);
5735
5736 skb->protocol = eth_type_trans(skb, priv->dev);
5737
5738 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5739 skb_checksum_none_assert(skb);
5740 else
5741 skb->ip_summed = CHECKSUM_UNNECESSARY;
5742
5743 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5744 skb_set_hash(skb, hash, hash_type);
5745
5746 skb_record_rx_queue(skb, queue);
5747 napi_gro_receive(&ch->rx_napi, skb);
5748 skb = NULL;
5749
5750 rx_packets++;
5751 rx_bytes += len;
5752 count++;
5753 }
5754
5755 if (status & rx_not_ls || skb) {
5756 rx_q->state_saved = true;
5757 rx_q->state.skb = skb;
5758 rx_q->state.error = error;
5759 rx_q->state.len = len;
5760 }
5761
5762 stmmac_finalize_xdp_rx(priv, xdp_status);
5763
5764 stmmac_rx_refill(priv, queue);
5765
5766 u64_stats_update_begin(&rxq_stats->napi_syncp);
5767 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5768 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5769 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5770 u64_stats_update_end(&rxq_stats->napi_syncp);
5771
5772 priv->xstats.rx_dropped += rx_dropped;
5773 priv->xstats.rx_errors += rx_errors;
5774
5775 return count;
5776 }
5777
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5778 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5779 {
5780 struct stmmac_channel *ch =
5781 container_of(napi, struct stmmac_channel, rx_napi);
5782 struct stmmac_priv *priv = ch->priv_data;
5783 struct stmmac_rxq_stats *rxq_stats;
5784 u32 chan = ch->index;
5785 int work_done;
5786
5787 rxq_stats = &priv->xstats.rxq_stats[chan];
5788 u64_stats_update_begin(&rxq_stats->napi_syncp);
5789 u64_stats_inc(&rxq_stats->napi.poll);
5790 u64_stats_update_end(&rxq_stats->napi_syncp);
5791
5792 work_done = stmmac_rx(priv, budget, chan);
5793 if (work_done < budget && napi_complete_done(napi, work_done)) {
5794 unsigned long flags;
5795
5796 spin_lock_irqsave(&ch->lock, flags);
5797 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5798 spin_unlock_irqrestore(&ch->lock, flags);
5799 }
5800
5801 return work_done;
5802 }
5803
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5804 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5805 {
5806 struct stmmac_channel *ch =
5807 container_of(napi, struct stmmac_channel, tx_napi);
5808 struct stmmac_priv *priv = ch->priv_data;
5809 struct stmmac_txq_stats *txq_stats;
5810 bool pending_packets = false;
5811 u32 chan = ch->index;
5812 int work_done;
5813
5814 txq_stats = &priv->xstats.txq_stats[chan];
5815 u64_stats_update_begin(&txq_stats->napi_syncp);
5816 u64_stats_inc(&txq_stats->napi.poll);
5817 u64_stats_update_end(&txq_stats->napi_syncp);
5818
5819 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5820 work_done = min(work_done, budget);
5821
5822 if (work_done < budget && napi_complete_done(napi, work_done)) {
5823 unsigned long flags;
5824
5825 spin_lock_irqsave(&ch->lock, flags);
5826 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5827 spin_unlock_irqrestore(&ch->lock, flags);
5828 }
5829
5830 /* TX still have packet to handle, check if we need to arm tx timer */
5831 if (pending_packets)
5832 stmmac_tx_timer_arm(priv, chan);
5833
5834 return work_done;
5835 }
5836
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5837 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5838 {
5839 struct stmmac_channel *ch =
5840 container_of(napi, struct stmmac_channel, rxtx_napi);
5841 struct stmmac_priv *priv = ch->priv_data;
5842 bool tx_pending_packets = false;
5843 int rx_done, tx_done, rxtx_done;
5844 struct stmmac_rxq_stats *rxq_stats;
5845 struct stmmac_txq_stats *txq_stats;
5846 u32 chan = ch->index;
5847
5848 rxq_stats = &priv->xstats.rxq_stats[chan];
5849 u64_stats_update_begin(&rxq_stats->napi_syncp);
5850 u64_stats_inc(&rxq_stats->napi.poll);
5851 u64_stats_update_end(&rxq_stats->napi_syncp);
5852
5853 txq_stats = &priv->xstats.txq_stats[chan];
5854 u64_stats_update_begin(&txq_stats->napi_syncp);
5855 u64_stats_inc(&txq_stats->napi.poll);
5856 u64_stats_update_end(&txq_stats->napi_syncp);
5857
5858 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5859 tx_done = min(tx_done, budget);
5860
5861 rx_done = stmmac_rx_zc(priv, budget, chan);
5862
5863 rxtx_done = max(tx_done, rx_done);
5864
5865 /* If either TX or RX work is not complete, return budget
5866 * and keep pooling
5867 */
5868 if (rxtx_done >= budget)
5869 return budget;
5870
5871 /* all work done, exit the polling mode */
5872 if (napi_complete_done(napi, rxtx_done)) {
5873 unsigned long flags;
5874
5875 spin_lock_irqsave(&ch->lock, flags);
5876 /* Both RX and TX work done are compelte,
5877 * so enable both RX & TX IRQs.
5878 */
5879 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5880 spin_unlock_irqrestore(&ch->lock, flags);
5881 }
5882
5883 /* TX still have packet to handle, check if we need to arm tx timer */
5884 if (tx_pending_packets)
5885 stmmac_tx_timer_arm(priv, chan);
5886
5887 return min(rxtx_done, budget - 1);
5888 }
5889
5890 /**
5891 * stmmac_tx_timeout
5892 * @dev : Pointer to net device structure
5893 * @txqueue: the index of the hanging transmit queue
5894 * Description: this function is called when a packet transmission fails to
5895 * complete within a reasonable time. The driver will mark the error in the
5896 * netdev structure and arrange for the device to be reset to a sane state
5897 * in order to transmit a new packet.
5898 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5899 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5900 {
5901 struct stmmac_priv *priv = netdev_priv(dev);
5902
5903 stmmac_global_err(priv);
5904 }
5905
5906 /**
5907 * stmmac_set_rx_mode - entry point for multicast addressing
5908 * @dev : pointer to the device structure
5909 * Description:
5910 * This function is a driver entry point which gets called by the kernel
5911 * whenever multicast addresses must be enabled/disabled.
5912 * Return value:
5913 * void.
5914 *
5915 * FIXME: This may need RXC to be running, but it may be called with BH
5916 * disabled, which means we can't call phylink_rx_clk_stop*().
5917 */
stmmac_set_rx_mode(struct net_device * dev)5918 static void stmmac_set_rx_mode(struct net_device *dev)
5919 {
5920 struct stmmac_priv *priv = netdev_priv(dev);
5921
5922 stmmac_set_filter(priv, priv->hw, dev);
5923 }
5924
5925 /**
5926 * stmmac_change_mtu - entry point to change MTU size for the device.
5927 * @dev : device pointer.
5928 * @new_mtu : the new MTU size for the device.
5929 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5930 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5931 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5932 * Return value:
5933 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5934 * file on failure.
5935 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5936 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5937 {
5938 struct stmmac_priv *priv = netdev_priv(dev);
5939 int txfifosz = priv->plat->tx_fifo_size;
5940 struct stmmac_dma_conf *dma_conf;
5941 const int mtu = new_mtu;
5942 int ret;
5943
5944 if (txfifosz == 0)
5945 txfifosz = priv->dma_cap.tx_fifo_size;
5946
5947 txfifosz /= priv->plat->tx_queues_to_use;
5948
5949 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5950 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5951 return -EINVAL;
5952 }
5953
5954 new_mtu = STMMAC_ALIGN(new_mtu);
5955
5956 /* If condition true, FIFO is too small or MTU too large */
5957 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5958 return -EINVAL;
5959
5960 if (netif_running(dev)) {
5961 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5962 /* Try to allocate the new DMA conf with the new mtu */
5963 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5964 if (IS_ERR(dma_conf)) {
5965 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5966 mtu);
5967 return PTR_ERR(dma_conf);
5968 }
5969
5970 stmmac_release(dev);
5971
5972 ret = __stmmac_open(dev, dma_conf);
5973 if (ret) {
5974 free_dma_desc_resources(priv, dma_conf);
5975 kfree(dma_conf);
5976 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5977 return ret;
5978 }
5979
5980 kfree(dma_conf);
5981
5982 stmmac_set_rx_mode(dev);
5983 }
5984
5985 WRITE_ONCE(dev->mtu, mtu);
5986 netdev_update_features(dev);
5987
5988 return 0;
5989 }
5990
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5991 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5992 netdev_features_t features)
5993 {
5994 struct stmmac_priv *priv = netdev_priv(dev);
5995
5996 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5997 features &= ~NETIF_F_RXCSUM;
5998
5999 if (!priv->plat->tx_coe)
6000 features &= ~NETIF_F_CSUM_MASK;
6001
6002 /* Some GMAC devices have a bugged Jumbo frame support that
6003 * needs to have the Tx COE disabled for oversized frames
6004 * (due to limited buffer sizes). In this case we disable
6005 * the TX csum insertion in the TDES and not use SF.
6006 */
6007 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6008 features &= ~NETIF_F_CSUM_MASK;
6009
6010 /* Disable tso if asked by ethtool */
6011 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6012 if (features & NETIF_F_TSO)
6013 priv->tso = true;
6014 else
6015 priv->tso = false;
6016 }
6017
6018 return features;
6019 }
6020
stmmac_set_features(struct net_device * netdev,netdev_features_t features)6021 static int stmmac_set_features(struct net_device *netdev,
6022 netdev_features_t features)
6023 {
6024 struct stmmac_priv *priv = netdev_priv(netdev);
6025
6026 /* Keep the COE Type in case of csum is supporting */
6027 if (features & NETIF_F_RXCSUM)
6028 priv->hw->rx_csum = priv->plat->rx_coe;
6029 else
6030 priv->hw->rx_csum = 0;
6031 /* No check needed because rx_coe has been set before and it will be
6032 * fixed in case of issue.
6033 */
6034 stmmac_rx_ipc(priv, priv->hw);
6035
6036 if (priv->sph_cap) {
6037 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6038 u32 chan;
6039
6040 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6041 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6042 }
6043
6044 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6045 priv->hw->hw_vlan_en = true;
6046 else
6047 priv->hw->hw_vlan_en = false;
6048
6049 phylink_rx_clk_stop_block(priv->phylink);
6050 stmmac_set_hw_vlan_mode(priv, priv->hw);
6051 phylink_rx_clk_stop_unblock(priv->phylink);
6052
6053 return 0;
6054 }
6055
stmmac_common_interrupt(struct stmmac_priv * priv)6056 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6057 {
6058 u32 rx_cnt = priv->plat->rx_queues_to_use;
6059 u32 tx_cnt = priv->plat->tx_queues_to_use;
6060 u32 queues_count;
6061 u32 queue;
6062 bool xmac;
6063
6064 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6065 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6066
6067 if (priv->irq_wake)
6068 pm_wakeup_event(priv->device, 0);
6069
6070 if (priv->dma_cap.estsel)
6071 stmmac_est_irq_status(priv, priv, priv->dev,
6072 &priv->xstats, tx_cnt);
6073
6074 if (stmmac_fpe_supported(priv))
6075 stmmac_fpe_irq_status(priv);
6076
6077 /* To handle GMAC own interrupts */
6078 if ((priv->plat->has_gmac) || xmac) {
6079 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6080
6081 if (unlikely(status)) {
6082 /* For LPI we need to save the tx status */
6083 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6084 priv->tx_path_in_lpi_mode = true;
6085 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6086 priv->tx_path_in_lpi_mode = false;
6087 }
6088
6089 for (queue = 0; queue < queues_count; queue++)
6090 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6091
6092 /* PCS link status */
6093 if (priv->hw->pcs &&
6094 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6095 if (priv->xstats.pcs_link)
6096 netif_carrier_on(priv->dev);
6097 else
6098 netif_carrier_off(priv->dev);
6099 }
6100
6101 stmmac_timestamp_interrupt(priv, priv);
6102 }
6103 }
6104
6105 /**
6106 * stmmac_interrupt - main ISR
6107 * @irq: interrupt number.
6108 * @dev_id: to pass the net device pointer.
6109 * Description: this is the main driver interrupt service routine.
6110 * It can call:
6111 * o DMA service routine (to manage incoming frame reception and transmission
6112 * status)
6113 * o Core interrupts to manage: remote wake-up, management counter, LPI
6114 * interrupts.
6115 */
stmmac_interrupt(int irq,void * dev_id)6116 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6117 {
6118 struct net_device *dev = (struct net_device *)dev_id;
6119 struct stmmac_priv *priv = netdev_priv(dev);
6120
6121 /* Check if adapter is up */
6122 if (test_bit(STMMAC_DOWN, &priv->state))
6123 return IRQ_HANDLED;
6124
6125 /* Check ASP error if it isn't delivered via an individual IRQ */
6126 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6127 return IRQ_HANDLED;
6128
6129 /* To handle Common interrupts */
6130 stmmac_common_interrupt(priv);
6131
6132 /* To handle DMA interrupts */
6133 stmmac_dma_interrupt(priv);
6134
6135 return IRQ_HANDLED;
6136 }
6137
stmmac_mac_interrupt(int irq,void * dev_id)6138 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6139 {
6140 struct net_device *dev = (struct net_device *)dev_id;
6141 struct stmmac_priv *priv = netdev_priv(dev);
6142
6143 /* Check if adapter is up */
6144 if (test_bit(STMMAC_DOWN, &priv->state))
6145 return IRQ_HANDLED;
6146
6147 /* To handle Common interrupts */
6148 stmmac_common_interrupt(priv);
6149
6150 return IRQ_HANDLED;
6151 }
6152
stmmac_safety_interrupt(int irq,void * dev_id)6153 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6154 {
6155 struct net_device *dev = (struct net_device *)dev_id;
6156 struct stmmac_priv *priv = netdev_priv(dev);
6157
6158 /* Check if adapter is up */
6159 if (test_bit(STMMAC_DOWN, &priv->state))
6160 return IRQ_HANDLED;
6161
6162 /* Check if a fatal error happened */
6163 stmmac_safety_feat_interrupt(priv);
6164
6165 return IRQ_HANDLED;
6166 }
6167
stmmac_msi_intr_tx(int irq,void * data)6168 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6169 {
6170 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6171 struct stmmac_dma_conf *dma_conf;
6172 int chan = tx_q->queue_index;
6173 struct stmmac_priv *priv;
6174 int status;
6175
6176 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6177 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6178
6179 /* Check if adapter is up */
6180 if (test_bit(STMMAC_DOWN, &priv->state))
6181 return IRQ_HANDLED;
6182
6183 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6184
6185 if (unlikely(status & tx_hard_error_bump_tc)) {
6186 /* Try to bump up the dma threshold on this failure */
6187 stmmac_bump_dma_threshold(priv, chan);
6188 } else if (unlikely(status == tx_hard_error)) {
6189 stmmac_tx_err(priv, chan);
6190 }
6191
6192 return IRQ_HANDLED;
6193 }
6194
stmmac_msi_intr_rx(int irq,void * data)6195 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6196 {
6197 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6198 struct stmmac_dma_conf *dma_conf;
6199 int chan = rx_q->queue_index;
6200 struct stmmac_priv *priv;
6201
6202 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6203 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6204
6205 /* Check if adapter is up */
6206 if (test_bit(STMMAC_DOWN, &priv->state))
6207 return IRQ_HANDLED;
6208
6209 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6210
6211 return IRQ_HANDLED;
6212 }
6213
6214 /**
6215 * stmmac_ioctl - Entry point for the Ioctl
6216 * @dev: Device pointer.
6217 * @rq: An IOCTL specefic structure, that can contain a pointer to
6218 * a proprietary structure used to pass information to the driver.
6219 * @cmd: IOCTL command
6220 * Description:
6221 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6222 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6223 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6224 {
6225 struct stmmac_priv *priv = netdev_priv (dev);
6226 int ret = -EOPNOTSUPP;
6227
6228 if (!netif_running(dev))
6229 return -EINVAL;
6230
6231 switch (cmd) {
6232 case SIOCGMIIPHY:
6233 case SIOCGMIIREG:
6234 case SIOCSMIIREG:
6235 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6236 break;
6237 default:
6238 break;
6239 }
6240
6241 return ret;
6242 }
6243
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6244 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6245 void *cb_priv)
6246 {
6247 struct stmmac_priv *priv = cb_priv;
6248 int ret = -EOPNOTSUPP;
6249
6250 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6251 return ret;
6252
6253 __stmmac_disable_all_queues(priv);
6254
6255 switch (type) {
6256 case TC_SETUP_CLSU32:
6257 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6258 break;
6259 case TC_SETUP_CLSFLOWER:
6260 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6261 break;
6262 default:
6263 break;
6264 }
6265
6266 stmmac_enable_all_queues(priv);
6267 return ret;
6268 }
6269
6270 static LIST_HEAD(stmmac_block_cb_list);
6271
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6272 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6273 void *type_data)
6274 {
6275 struct stmmac_priv *priv = netdev_priv(ndev);
6276
6277 switch (type) {
6278 case TC_QUERY_CAPS:
6279 return stmmac_tc_query_caps(priv, priv, type_data);
6280 case TC_SETUP_QDISC_MQPRIO:
6281 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6282 case TC_SETUP_BLOCK:
6283 return flow_block_cb_setup_simple(type_data,
6284 &stmmac_block_cb_list,
6285 stmmac_setup_tc_block_cb,
6286 priv, priv, true);
6287 case TC_SETUP_QDISC_CBS:
6288 return stmmac_tc_setup_cbs(priv, priv, type_data);
6289 case TC_SETUP_QDISC_TAPRIO:
6290 return stmmac_tc_setup_taprio(priv, priv, type_data);
6291 case TC_SETUP_QDISC_ETF:
6292 return stmmac_tc_setup_etf(priv, priv, type_data);
6293 default:
6294 return -EOPNOTSUPP;
6295 }
6296 }
6297
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6298 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6299 struct net_device *sb_dev)
6300 {
6301 int gso = skb_shinfo(skb)->gso_type;
6302
6303 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6304 /*
6305 * There is no way to determine the number of TSO/USO
6306 * capable Queues. Let's use always the Queue 0
6307 * because if TSO/USO is supported then at least this
6308 * one will be capable.
6309 */
6310 return 0;
6311 }
6312
6313 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6314 }
6315
stmmac_set_mac_address(struct net_device * ndev,void * addr)6316 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6317 {
6318 struct stmmac_priv *priv = netdev_priv(ndev);
6319 int ret = 0;
6320
6321 ret = pm_runtime_resume_and_get(priv->device);
6322 if (ret < 0)
6323 return ret;
6324
6325 ret = eth_mac_addr(ndev, addr);
6326 if (ret)
6327 goto set_mac_error;
6328
6329 phylink_rx_clk_stop_block(priv->phylink);
6330 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6331 phylink_rx_clk_stop_unblock(priv->phylink);
6332
6333 set_mac_error:
6334 pm_runtime_put(priv->device);
6335
6336 return ret;
6337 }
6338
6339 #ifdef CONFIG_DEBUG_FS
6340 static struct dentry *stmmac_fs_dir;
6341
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6342 static void sysfs_display_ring(void *head, int size, int extend_desc,
6343 struct seq_file *seq, dma_addr_t dma_phy_addr)
6344 {
6345 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6346 struct dma_desc *p = (struct dma_desc *)head;
6347 unsigned int desc_size;
6348 dma_addr_t dma_addr;
6349 int i;
6350
6351 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6352 for (i = 0; i < size; i++) {
6353 dma_addr = dma_phy_addr + i * desc_size;
6354 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6355 i, &dma_addr,
6356 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6357 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6358 if (extend_desc)
6359 p = &(++ep)->basic;
6360 else
6361 p++;
6362 }
6363 }
6364
stmmac_rings_status_show(struct seq_file * seq,void * v)6365 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6366 {
6367 struct net_device *dev = seq->private;
6368 struct stmmac_priv *priv = netdev_priv(dev);
6369 u32 rx_count = priv->plat->rx_queues_to_use;
6370 u32 tx_count = priv->plat->tx_queues_to_use;
6371 u32 queue;
6372
6373 if ((dev->flags & IFF_UP) == 0)
6374 return 0;
6375
6376 for (queue = 0; queue < rx_count; queue++) {
6377 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6378
6379 seq_printf(seq, "RX Queue %d:\n", queue);
6380
6381 if (priv->extend_desc) {
6382 seq_printf(seq, "Extended descriptor ring:\n");
6383 sysfs_display_ring((void *)rx_q->dma_erx,
6384 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6385 } else {
6386 seq_printf(seq, "Descriptor ring:\n");
6387 sysfs_display_ring((void *)rx_q->dma_rx,
6388 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6389 }
6390 }
6391
6392 for (queue = 0; queue < tx_count; queue++) {
6393 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6394
6395 seq_printf(seq, "TX Queue %d:\n", queue);
6396
6397 if (priv->extend_desc) {
6398 seq_printf(seq, "Extended descriptor ring:\n");
6399 sysfs_display_ring((void *)tx_q->dma_etx,
6400 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6401 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6402 seq_printf(seq, "Descriptor ring:\n");
6403 sysfs_display_ring((void *)tx_q->dma_tx,
6404 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6405 }
6406 }
6407
6408 return 0;
6409 }
6410 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6411
stmmac_dma_cap_show(struct seq_file * seq,void * v)6412 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6413 {
6414 static const char * const dwxgmac_timestamp_source[] = {
6415 "None",
6416 "Internal",
6417 "External",
6418 "Both",
6419 };
6420 static const char * const dwxgmac_safety_feature_desc[] = {
6421 "No",
6422 "All Safety Features with ECC and Parity",
6423 "All Safety Features without ECC or Parity",
6424 "All Safety Features with Parity Only",
6425 "ECC Only",
6426 "UNDEFINED",
6427 "UNDEFINED",
6428 "UNDEFINED",
6429 };
6430 struct net_device *dev = seq->private;
6431 struct stmmac_priv *priv = netdev_priv(dev);
6432
6433 if (!priv->hw_cap_support) {
6434 seq_printf(seq, "DMA HW features not supported\n");
6435 return 0;
6436 }
6437
6438 seq_printf(seq, "==============================\n");
6439 seq_printf(seq, "\tDMA HW features\n");
6440 seq_printf(seq, "==============================\n");
6441
6442 seq_printf(seq, "\t10/100 Mbps: %s\n",
6443 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6444 seq_printf(seq, "\t1000 Mbps: %s\n",
6445 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6446 seq_printf(seq, "\tHalf duplex: %s\n",
6447 (priv->dma_cap.half_duplex) ? "Y" : "N");
6448 if (priv->plat->has_xgmac) {
6449 seq_printf(seq,
6450 "\tNumber of Additional MAC address registers: %d\n",
6451 priv->dma_cap.multi_addr);
6452 } else {
6453 seq_printf(seq, "\tHash Filter: %s\n",
6454 (priv->dma_cap.hash_filter) ? "Y" : "N");
6455 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6456 (priv->dma_cap.multi_addr) ? "Y" : "N");
6457 }
6458 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6459 (priv->dma_cap.pcs) ? "Y" : "N");
6460 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6461 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6462 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6463 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6464 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6465 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6466 seq_printf(seq, "\tRMON module: %s\n",
6467 (priv->dma_cap.rmon) ? "Y" : "N");
6468 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6469 (priv->dma_cap.time_stamp) ? "Y" : "N");
6470 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6471 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6472 if (priv->plat->has_xgmac)
6473 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6474 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6475 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6476 (priv->dma_cap.eee) ? "Y" : "N");
6477 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6478 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6479 (priv->dma_cap.tx_coe) ? "Y" : "N");
6480 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6481 priv->plat->has_xgmac) {
6482 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6483 (priv->dma_cap.rx_coe) ? "Y" : "N");
6484 } else {
6485 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6486 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6487 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6488 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6489 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6490 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6491 }
6492 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6493 priv->dma_cap.number_rx_channel);
6494 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6495 priv->dma_cap.number_tx_channel);
6496 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6497 priv->dma_cap.number_rx_queues);
6498 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6499 priv->dma_cap.number_tx_queues);
6500 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6501 (priv->dma_cap.enh_desc) ? "Y" : "N");
6502 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6503 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6504 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6505 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6506 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6507 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6508 priv->dma_cap.pps_out_num);
6509 seq_printf(seq, "\tSafety Features: %s\n",
6510 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6511 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6512 priv->dma_cap.frpsel ? "Y" : "N");
6513 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6514 priv->dma_cap.host_dma_width);
6515 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6516 priv->dma_cap.rssen ? "Y" : "N");
6517 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6518 priv->dma_cap.vlhash ? "Y" : "N");
6519 seq_printf(seq, "\tSplit Header: %s\n",
6520 priv->dma_cap.sphen ? "Y" : "N");
6521 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6522 priv->dma_cap.vlins ? "Y" : "N");
6523 seq_printf(seq, "\tDouble VLAN: %s\n",
6524 priv->dma_cap.dvlan ? "Y" : "N");
6525 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6526 priv->dma_cap.l3l4fnum);
6527 seq_printf(seq, "\tARP Offloading: %s\n",
6528 priv->dma_cap.arpoffsel ? "Y" : "N");
6529 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6530 priv->dma_cap.estsel ? "Y" : "N");
6531 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6532 priv->dma_cap.fpesel ? "Y" : "N");
6533 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6534 priv->dma_cap.tbssel ? "Y" : "N");
6535 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6536 priv->dma_cap.tbs_ch_num);
6537 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6538 priv->dma_cap.sgfsel ? "Y" : "N");
6539 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6540 BIT(priv->dma_cap.ttsfd) >> 1);
6541 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6542 priv->dma_cap.numtc);
6543 seq_printf(seq, "\tDCB Feature: %s\n",
6544 priv->dma_cap.dcben ? "Y" : "N");
6545 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6546 priv->dma_cap.advthword ? "Y" : "N");
6547 seq_printf(seq, "\tPTP Offload: %s\n",
6548 priv->dma_cap.ptoen ? "Y" : "N");
6549 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6550 priv->dma_cap.osten ? "Y" : "N");
6551 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6552 priv->dma_cap.pfcen ? "Y" : "N");
6553 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6554 BIT(priv->dma_cap.frpes) << 6);
6555 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6556 BIT(priv->dma_cap.frpbs) << 6);
6557 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6558 priv->dma_cap.frppipe_num);
6559 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6560 priv->dma_cap.nrvf_num ?
6561 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6562 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6563 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6564 seq_printf(seq, "\tDepth of GCL: %lu\n",
6565 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6566 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6567 priv->dma_cap.cbtisel ? "Y" : "N");
6568 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6569 priv->dma_cap.aux_snapshot_n);
6570 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6571 priv->dma_cap.pou_ost_en ? "Y" : "N");
6572 seq_printf(seq, "\tEnhanced DMA: %s\n",
6573 priv->dma_cap.edma ? "Y" : "N");
6574 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6575 priv->dma_cap.ediffc ? "Y" : "N");
6576 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6577 priv->dma_cap.vxn ? "Y" : "N");
6578 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6579 priv->dma_cap.dbgmem ? "Y" : "N");
6580 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6581 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6582 return 0;
6583 }
6584 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6585
6586 /* Use network device events to rename debugfs file entries.
6587 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6588 static int stmmac_device_event(struct notifier_block *unused,
6589 unsigned long event, void *ptr)
6590 {
6591 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6592 struct stmmac_priv *priv = netdev_priv(dev);
6593
6594 if (dev->netdev_ops != &stmmac_netdev_ops)
6595 goto done;
6596
6597 switch (event) {
6598 case NETDEV_CHANGENAME:
6599 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6600 break;
6601 }
6602 done:
6603 return NOTIFY_DONE;
6604 }
6605
6606 static struct notifier_block stmmac_notifier = {
6607 .notifier_call = stmmac_device_event,
6608 };
6609
stmmac_init_fs(struct net_device * dev)6610 static void stmmac_init_fs(struct net_device *dev)
6611 {
6612 struct stmmac_priv *priv = netdev_priv(dev);
6613
6614 rtnl_lock();
6615
6616 /* Create per netdev entries */
6617 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6618
6619 /* Entry to report DMA RX/TX rings */
6620 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6621 &stmmac_rings_status_fops);
6622
6623 /* Entry to report the DMA HW features */
6624 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6625 &stmmac_dma_cap_fops);
6626
6627 rtnl_unlock();
6628 }
6629
stmmac_exit_fs(struct net_device * dev)6630 static void stmmac_exit_fs(struct net_device *dev)
6631 {
6632 struct stmmac_priv *priv = netdev_priv(dev);
6633
6634 debugfs_remove_recursive(priv->dbgfs_dir);
6635 }
6636 #endif /* CONFIG_DEBUG_FS */
6637
stmmac_vid_crc32_le(__le16 vid_le)6638 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6639 {
6640 unsigned char *data = (unsigned char *)&vid_le;
6641 unsigned char data_byte = 0;
6642 u32 crc = ~0x0;
6643 u32 temp = 0;
6644 int i, bits;
6645
6646 bits = get_bitmask_order(VLAN_VID_MASK);
6647 for (i = 0; i < bits; i++) {
6648 if ((i % 8) == 0)
6649 data_byte = data[i / 8];
6650
6651 temp = ((crc & 1) ^ data_byte) & 1;
6652 crc >>= 1;
6653 data_byte >>= 1;
6654
6655 if (temp)
6656 crc ^= 0xedb88320;
6657 }
6658
6659 return crc;
6660 }
6661
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6662 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6663 {
6664 u32 crc, hash = 0;
6665 u16 pmatch = 0;
6666 int count = 0;
6667 u16 vid = 0;
6668
6669 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6670 __le16 vid_le = cpu_to_le16(vid);
6671 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6672 hash |= (1 << crc);
6673 count++;
6674 }
6675
6676 if (!priv->dma_cap.vlhash) {
6677 if (count > 2) /* VID = 0 always passes filter */
6678 return -EOPNOTSUPP;
6679
6680 pmatch = vid;
6681 hash = 0;
6682 }
6683
6684 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6685 }
6686
6687 /* FIXME: This may need RXC to be running, but it may be called with BH
6688 * disabled, which means we can't call phylink_rx_clk_stop*().
6689 */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6690 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6691 {
6692 struct stmmac_priv *priv = netdev_priv(ndev);
6693 bool is_double = false;
6694 int ret;
6695
6696 ret = pm_runtime_resume_and_get(priv->device);
6697 if (ret < 0)
6698 return ret;
6699
6700 if (be16_to_cpu(proto) == ETH_P_8021AD)
6701 is_double = true;
6702
6703 set_bit(vid, priv->active_vlans);
6704 ret = stmmac_vlan_update(priv, is_double);
6705 if (ret) {
6706 clear_bit(vid, priv->active_vlans);
6707 goto err_pm_put;
6708 }
6709
6710 if (priv->hw->num_vlan) {
6711 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6712 if (ret)
6713 goto err_pm_put;
6714 }
6715 err_pm_put:
6716 pm_runtime_put(priv->device);
6717
6718 return ret;
6719 }
6720
6721 /* FIXME: This may need RXC to be running, but it may be called with BH
6722 * disabled, which means we can't call phylink_rx_clk_stop*().
6723 */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6724 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6725 {
6726 struct stmmac_priv *priv = netdev_priv(ndev);
6727 bool is_double = false;
6728 int ret;
6729
6730 ret = pm_runtime_resume_and_get(priv->device);
6731 if (ret < 0)
6732 return ret;
6733
6734 if (be16_to_cpu(proto) == ETH_P_8021AD)
6735 is_double = true;
6736
6737 clear_bit(vid, priv->active_vlans);
6738
6739 if (priv->hw->num_vlan) {
6740 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6741 if (ret)
6742 goto del_vlan_error;
6743 }
6744
6745 ret = stmmac_vlan_update(priv, is_double);
6746
6747 del_vlan_error:
6748 pm_runtime_put(priv->device);
6749
6750 return ret;
6751 }
6752
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6753 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6754 {
6755 struct stmmac_priv *priv = netdev_priv(dev);
6756
6757 switch (bpf->command) {
6758 case XDP_SETUP_PROG:
6759 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6760 case XDP_SETUP_XSK_POOL:
6761 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6762 bpf->xsk.queue_id);
6763 default:
6764 return -EOPNOTSUPP;
6765 }
6766 }
6767
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6768 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6769 struct xdp_frame **frames, u32 flags)
6770 {
6771 struct stmmac_priv *priv = netdev_priv(dev);
6772 int cpu = smp_processor_id();
6773 struct netdev_queue *nq;
6774 int i, nxmit = 0;
6775 int queue;
6776
6777 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6778 return -ENETDOWN;
6779
6780 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6781 return -EINVAL;
6782
6783 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6784 nq = netdev_get_tx_queue(priv->dev, queue);
6785
6786 __netif_tx_lock(nq, cpu);
6787 /* Avoids TX time-out as we are sharing with slow path */
6788 txq_trans_cond_update(nq);
6789
6790 for (i = 0; i < num_frames; i++) {
6791 int res;
6792
6793 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6794 if (res == STMMAC_XDP_CONSUMED)
6795 break;
6796
6797 nxmit++;
6798 }
6799
6800 if (flags & XDP_XMIT_FLUSH) {
6801 stmmac_flush_tx_descriptors(priv, queue);
6802 stmmac_tx_timer_arm(priv, queue);
6803 }
6804
6805 __netif_tx_unlock(nq);
6806
6807 return nxmit;
6808 }
6809
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6810 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6811 {
6812 struct stmmac_channel *ch = &priv->channel[queue];
6813 unsigned long flags;
6814
6815 spin_lock_irqsave(&ch->lock, flags);
6816 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6817 spin_unlock_irqrestore(&ch->lock, flags);
6818
6819 stmmac_stop_rx_dma(priv, queue);
6820 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6821 }
6822
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6823 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6824 {
6825 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6826 struct stmmac_channel *ch = &priv->channel[queue];
6827 unsigned long flags;
6828 u32 buf_size;
6829 int ret;
6830
6831 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6832 if (ret) {
6833 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6834 return;
6835 }
6836
6837 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6838 if (ret) {
6839 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6840 netdev_err(priv->dev, "Failed to init RX desc.\n");
6841 return;
6842 }
6843
6844 stmmac_reset_rx_queue(priv, queue);
6845 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6846
6847 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6848 rx_q->dma_rx_phy, rx_q->queue_index);
6849
6850 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6851 sizeof(struct dma_desc));
6852 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6853 rx_q->rx_tail_addr, rx_q->queue_index);
6854
6855 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6856 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6857 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6858 buf_size,
6859 rx_q->queue_index);
6860 } else {
6861 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6862 priv->dma_conf.dma_buf_sz,
6863 rx_q->queue_index);
6864 }
6865
6866 stmmac_start_rx_dma(priv, queue);
6867
6868 spin_lock_irqsave(&ch->lock, flags);
6869 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6870 spin_unlock_irqrestore(&ch->lock, flags);
6871 }
6872
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6873 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6874 {
6875 struct stmmac_channel *ch = &priv->channel[queue];
6876 unsigned long flags;
6877
6878 spin_lock_irqsave(&ch->lock, flags);
6879 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6880 spin_unlock_irqrestore(&ch->lock, flags);
6881
6882 stmmac_stop_tx_dma(priv, queue);
6883 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6884 }
6885
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6886 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6887 {
6888 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6889 struct stmmac_channel *ch = &priv->channel[queue];
6890 unsigned long flags;
6891 int ret;
6892
6893 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6894 if (ret) {
6895 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6896 return;
6897 }
6898
6899 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6900 if (ret) {
6901 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6902 netdev_err(priv->dev, "Failed to init TX desc.\n");
6903 return;
6904 }
6905
6906 stmmac_reset_tx_queue(priv, queue);
6907 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6908
6909 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6910 tx_q->dma_tx_phy, tx_q->queue_index);
6911
6912 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6913 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6914
6915 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6916 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6917 tx_q->tx_tail_addr, tx_q->queue_index);
6918
6919 stmmac_start_tx_dma(priv, queue);
6920
6921 spin_lock_irqsave(&ch->lock, flags);
6922 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6923 spin_unlock_irqrestore(&ch->lock, flags);
6924 }
6925
stmmac_xdp_release(struct net_device * dev)6926 void stmmac_xdp_release(struct net_device *dev)
6927 {
6928 struct stmmac_priv *priv = netdev_priv(dev);
6929 u32 chan;
6930
6931 /* Ensure tx function is not running */
6932 netif_tx_disable(dev);
6933
6934 /* Disable NAPI process */
6935 stmmac_disable_all_queues(priv);
6936
6937 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6938 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6939
6940 /* Free the IRQ lines */
6941 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6942
6943 /* Stop TX/RX DMA channels */
6944 stmmac_stop_all_dma(priv);
6945
6946 /* Release and free the Rx/Tx resources */
6947 free_dma_desc_resources(priv, &priv->dma_conf);
6948
6949 /* Disable the MAC Rx/Tx */
6950 stmmac_mac_set(priv, priv->ioaddr, false);
6951
6952 /* set trans_start so we don't get spurious
6953 * watchdogs during reset
6954 */
6955 netif_trans_update(dev);
6956 netif_carrier_off(dev);
6957 }
6958
stmmac_xdp_open(struct net_device * dev)6959 int stmmac_xdp_open(struct net_device *dev)
6960 {
6961 struct stmmac_priv *priv = netdev_priv(dev);
6962 u32 rx_cnt = priv->plat->rx_queues_to_use;
6963 u32 tx_cnt = priv->plat->tx_queues_to_use;
6964 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6965 struct stmmac_rx_queue *rx_q;
6966 struct stmmac_tx_queue *tx_q;
6967 u32 buf_size;
6968 bool sph_en;
6969 u32 chan;
6970 int ret;
6971
6972 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6973 if (ret < 0) {
6974 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6975 __func__);
6976 goto dma_desc_error;
6977 }
6978
6979 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6980 if (ret < 0) {
6981 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6982 __func__);
6983 goto init_error;
6984 }
6985
6986 stmmac_reset_queues_param(priv);
6987
6988 /* DMA CSR Channel configuration */
6989 for (chan = 0; chan < dma_csr_ch; chan++) {
6990 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6991 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6992 }
6993
6994 /* Adjust Split header */
6995 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6996
6997 /* DMA RX Channel Configuration */
6998 for (chan = 0; chan < rx_cnt; chan++) {
6999 rx_q = &priv->dma_conf.rx_queue[chan];
7000
7001 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7002 rx_q->dma_rx_phy, chan);
7003
7004 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7005 (rx_q->buf_alloc_num *
7006 sizeof(struct dma_desc));
7007 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7008 rx_q->rx_tail_addr, chan);
7009
7010 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7011 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7012 stmmac_set_dma_bfsize(priv, priv->ioaddr,
7013 buf_size,
7014 rx_q->queue_index);
7015 } else {
7016 stmmac_set_dma_bfsize(priv, priv->ioaddr,
7017 priv->dma_conf.dma_buf_sz,
7018 rx_q->queue_index);
7019 }
7020
7021 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7022 }
7023
7024 /* DMA TX Channel Configuration */
7025 for (chan = 0; chan < tx_cnt; chan++) {
7026 tx_q = &priv->dma_conf.tx_queue[chan];
7027
7028 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7029 tx_q->dma_tx_phy, chan);
7030
7031 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7032 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7033 tx_q->tx_tail_addr, chan);
7034
7035 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7036 }
7037
7038 /* Enable the MAC Rx/Tx */
7039 stmmac_mac_set(priv, priv->ioaddr, true);
7040
7041 /* Start Rx & Tx DMA Channels */
7042 stmmac_start_all_dma(priv);
7043
7044 ret = stmmac_request_irq(dev);
7045 if (ret)
7046 goto irq_error;
7047
7048 /* Enable NAPI process*/
7049 stmmac_enable_all_queues(priv);
7050 netif_carrier_on(dev);
7051 netif_tx_start_all_queues(dev);
7052 stmmac_enable_all_dma_irq(priv);
7053
7054 return 0;
7055
7056 irq_error:
7057 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7058 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7059
7060 stmmac_hw_teardown(dev);
7061 init_error:
7062 free_dma_desc_resources(priv, &priv->dma_conf);
7063 dma_desc_error:
7064 return ret;
7065 }
7066
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7067 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7068 {
7069 struct stmmac_priv *priv = netdev_priv(dev);
7070 struct stmmac_rx_queue *rx_q;
7071 struct stmmac_tx_queue *tx_q;
7072 struct stmmac_channel *ch;
7073
7074 if (test_bit(STMMAC_DOWN, &priv->state) ||
7075 !netif_carrier_ok(priv->dev))
7076 return -ENETDOWN;
7077
7078 if (!stmmac_xdp_is_enabled(priv))
7079 return -EINVAL;
7080
7081 if (queue >= priv->plat->rx_queues_to_use ||
7082 queue >= priv->plat->tx_queues_to_use)
7083 return -EINVAL;
7084
7085 rx_q = &priv->dma_conf.rx_queue[queue];
7086 tx_q = &priv->dma_conf.tx_queue[queue];
7087 ch = &priv->channel[queue];
7088
7089 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7090 return -EINVAL;
7091
7092 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7093 /* EQoS does not have per-DMA channel SW interrupt,
7094 * so we schedule RX Napi straight-away.
7095 */
7096 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7097 __napi_schedule(&ch->rxtx_napi);
7098 }
7099
7100 return 0;
7101 }
7102
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7103 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7104 {
7105 struct stmmac_priv *priv = netdev_priv(dev);
7106 u32 tx_cnt = priv->plat->tx_queues_to_use;
7107 u32 rx_cnt = priv->plat->rx_queues_to_use;
7108 unsigned int start;
7109 int q;
7110
7111 for (q = 0; q < tx_cnt; q++) {
7112 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7113 u64 tx_packets;
7114 u64 tx_bytes;
7115
7116 do {
7117 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7118 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7119 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7120 do {
7121 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7122 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7123 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7124
7125 stats->tx_packets += tx_packets;
7126 stats->tx_bytes += tx_bytes;
7127 }
7128
7129 for (q = 0; q < rx_cnt; q++) {
7130 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7131 u64 rx_packets;
7132 u64 rx_bytes;
7133
7134 do {
7135 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7136 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7137 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7138 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7139
7140 stats->rx_packets += rx_packets;
7141 stats->rx_bytes += rx_bytes;
7142 }
7143
7144 stats->rx_dropped = priv->xstats.rx_dropped;
7145 stats->rx_errors = priv->xstats.rx_errors;
7146 stats->tx_dropped = priv->xstats.tx_dropped;
7147 stats->tx_errors = priv->xstats.tx_errors;
7148 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7149 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7150 stats->rx_length_errors = priv->xstats.rx_length;
7151 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7152 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7153 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7154 }
7155
7156 static const struct net_device_ops stmmac_netdev_ops = {
7157 .ndo_open = stmmac_open,
7158 .ndo_start_xmit = stmmac_xmit,
7159 .ndo_stop = stmmac_release,
7160 .ndo_change_mtu = stmmac_change_mtu,
7161 .ndo_fix_features = stmmac_fix_features,
7162 .ndo_set_features = stmmac_set_features,
7163 .ndo_set_rx_mode = stmmac_set_rx_mode,
7164 .ndo_tx_timeout = stmmac_tx_timeout,
7165 .ndo_eth_ioctl = stmmac_ioctl,
7166 .ndo_get_stats64 = stmmac_get_stats64,
7167 .ndo_setup_tc = stmmac_setup_tc,
7168 .ndo_select_queue = stmmac_select_queue,
7169 .ndo_set_mac_address = stmmac_set_mac_address,
7170 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7171 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7172 .ndo_bpf = stmmac_bpf,
7173 .ndo_xdp_xmit = stmmac_xdp_xmit,
7174 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7175 .ndo_hwtstamp_get = stmmac_hwtstamp_get,
7176 .ndo_hwtstamp_set = stmmac_hwtstamp_set,
7177 };
7178
stmmac_reset_subtask(struct stmmac_priv * priv)7179 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7180 {
7181 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7182 return;
7183 if (test_bit(STMMAC_DOWN, &priv->state))
7184 return;
7185
7186 netdev_err(priv->dev, "Reset adapter.\n");
7187
7188 rtnl_lock();
7189 netif_trans_update(priv->dev);
7190 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7191 usleep_range(1000, 2000);
7192
7193 set_bit(STMMAC_DOWN, &priv->state);
7194 dev_close(priv->dev);
7195 dev_open(priv->dev, NULL);
7196 clear_bit(STMMAC_DOWN, &priv->state);
7197 clear_bit(STMMAC_RESETING, &priv->state);
7198 rtnl_unlock();
7199 }
7200
stmmac_service_task(struct work_struct * work)7201 static void stmmac_service_task(struct work_struct *work)
7202 {
7203 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7204 service_task);
7205
7206 stmmac_reset_subtask(priv);
7207 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7208 }
7209
7210 /**
7211 * stmmac_hw_init - Init the MAC device
7212 * @priv: driver private structure
7213 * Description: this function is to configure the MAC device according to
7214 * some platform parameters or the HW capability register. It prepares the
7215 * driver to use either ring or chain modes and to setup either enhanced or
7216 * normal descriptors.
7217 */
stmmac_hw_init(struct stmmac_priv * priv)7218 static int stmmac_hw_init(struct stmmac_priv *priv)
7219 {
7220 int ret;
7221
7222 /* dwmac-sun8i only work in chain mode */
7223 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7224 chain_mode = 1;
7225 priv->chain_mode = chain_mode;
7226
7227 /* Initialize HW Interface */
7228 ret = stmmac_hwif_init(priv);
7229 if (ret)
7230 return ret;
7231
7232 /* Get the HW capability (new GMAC newer than 3.50a) */
7233 priv->hw_cap_support = stmmac_get_hw_features(priv);
7234 if (priv->hw_cap_support) {
7235 dev_info(priv->device, "DMA HW capability register supported\n");
7236
7237 /* We can override some gmac/dma configuration fields: e.g.
7238 * enh_desc, tx_coe (e.g. that are passed through the
7239 * platform) with the values from the HW capability
7240 * register (if supported).
7241 */
7242 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7243 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7244 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7245 priv->hw->pmt = priv->plat->pmt;
7246 if (priv->dma_cap.hash_tb_sz) {
7247 priv->hw->multicast_filter_bins =
7248 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7249 priv->hw->mcast_bits_log2 =
7250 ilog2(priv->hw->multicast_filter_bins);
7251 }
7252
7253 /* TXCOE doesn't work in thresh DMA mode */
7254 if (priv->plat->force_thresh_dma_mode)
7255 priv->plat->tx_coe = 0;
7256 else
7257 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7258
7259 /* In case of GMAC4 rx_coe is from HW cap register. */
7260 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7261
7262 if (priv->dma_cap.rx_coe_type2)
7263 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7264 else if (priv->dma_cap.rx_coe_type1)
7265 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7266
7267 } else {
7268 dev_info(priv->device, "No HW DMA feature register supported\n");
7269 }
7270
7271 if (priv->plat->rx_coe) {
7272 priv->hw->rx_csum = priv->plat->rx_coe;
7273 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7274 if (priv->synopsys_id < DWMAC_CORE_4_00)
7275 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7276 }
7277 if (priv->plat->tx_coe)
7278 dev_info(priv->device, "TX Checksum insertion supported\n");
7279
7280 if (priv->plat->pmt) {
7281 dev_info(priv->device, "Wake-Up On Lan supported\n");
7282 device_set_wakeup_capable(priv->device, 1);
7283 }
7284
7285 if (priv->dma_cap.tsoen)
7286 dev_info(priv->device, "TSO supported\n");
7287
7288 if (priv->dma_cap.number_rx_queues &&
7289 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7290 dev_warn(priv->device,
7291 "Number of Rx queues (%u) exceeds dma capability\n",
7292 priv->plat->rx_queues_to_use);
7293 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7294 }
7295 if (priv->dma_cap.number_tx_queues &&
7296 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7297 dev_warn(priv->device,
7298 "Number of Tx queues (%u) exceeds dma capability\n",
7299 priv->plat->tx_queues_to_use);
7300 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7301 }
7302
7303 if (priv->dma_cap.rx_fifo_size &&
7304 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7305 dev_warn(priv->device,
7306 "Rx FIFO size (%u) exceeds dma capability\n",
7307 priv->plat->rx_fifo_size);
7308 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7309 }
7310 if (priv->dma_cap.tx_fifo_size &&
7311 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7312 dev_warn(priv->device,
7313 "Tx FIFO size (%u) exceeds dma capability\n",
7314 priv->plat->tx_fifo_size);
7315 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7316 }
7317
7318 priv->hw->vlan_fail_q_en =
7319 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7320 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7321
7322 /* Run HW quirks, if any */
7323 if (priv->hwif_quirks) {
7324 ret = priv->hwif_quirks(priv);
7325 if (ret)
7326 return ret;
7327 }
7328
7329 /* Rx Watchdog is available in the COREs newer than the 3.40.
7330 * In some case, for example on bugged HW this feature
7331 * has to be disable and this can be done by passing the
7332 * riwt_off field from the platform.
7333 */
7334 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7335 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7336 priv->use_riwt = 1;
7337 dev_info(priv->device,
7338 "Enable RX Mitigation via HW Watchdog Timer\n");
7339 }
7340
7341 return 0;
7342 }
7343
stmmac_napi_add(struct net_device * dev)7344 static void stmmac_napi_add(struct net_device *dev)
7345 {
7346 struct stmmac_priv *priv = netdev_priv(dev);
7347 u32 queue, maxq;
7348
7349 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7350
7351 for (queue = 0; queue < maxq; queue++) {
7352 struct stmmac_channel *ch = &priv->channel[queue];
7353
7354 ch->priv_data = priv;
7355 ch->index = queue;
7356 spin_lock_init(&ch->lock);
7357
7358 if (queue < priv->plat->rx_queues_to_use) {
7359 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7360 }
7361 if (queue < priv->plat->tx_queues_to_use) {
7362 netif_napi_add_tx(dev, &ch->tx_napi,
7363 stmmac_napi_poll_tx);
7364 }
7365 if (queue < priv->plat->rx_queues_to_use &&
7366 queue < priv->plat->tx_queues_to_use) {
7367 netif_napi_add(dev, &ch->rxtx_napi,
7368 stmmac_napi_poll_rxtx);
7369 }
7370 }
7371 }
7372
stmmac_napi_del(struct net_device * dev)7373 static void stmmac_napi_del(struct net_device *dev)
7374 {
7375 struct stmmac_priv *priv = netdev_priv(dev);
7376 u32 queue, maxq;
7377
7378 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7379
7380 for (queue = 0; queue < maxq; queue++) {
7381 struct stmmac_channel *ch = &priv->channel[queue];
7382
7383 if (queue < priv->plat->rx_queues_to_use)
7384 netif_napi_del(&ch->rx_napi);
7385 if (queue < priv->plat->tx_queues_to_use)
7386 netif_napi_del(&ch->tx_napi);
7387 if (queue < priv->plat->rx_queues_to_use &&
7388 queue < priv->plat->tx_queues_to_use) {
7389 netif_napi_del(&ch->rxtx_napi);
7390 }
7391 }
7392 }
7393
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7394 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7395 {
7396 struct stmmac_priv *priv = netdev_priv(dev);
7397 int ret = 0, i;
7398
7399 if (netif_running(dev))
7400 stmmac_release(dev);
7401
7402 stmmac_napi_del(dev);
7403
7404 priv->plat->rx_queues_to_use = rx_cnt;
7405 priv->plat->tx_queues_to_use = tx_cnt;
7406 if (!netif_is_rxfh_configured(dev))
7407 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7408 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7409 rx_cnt);
7410
7411 stmmac_napi_add(dev);
7412
7413 if (netif_running(dev))
7414 ret = stmmac_open(dev);
7415
7416 return ret;
7417 }
7418
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7419 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7420 {
7421 struct stmmac_priv *priv = netdev_priv(dev);
7422 int ret = 0;
7423
7424 if (netif_running(dev))
7425 stmmac_release(dev);
7426
7427 priv->dma_conf.dma_rx_size = rx_size;
7428 priv->dma_conf.dma_tx_size = tx_size;
7429
7430 if (netif_running(dev))
7431 ret = stmmac_open(dev);
7432
7433 return ret;
7434 }
7435
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7436 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7437 {
7438 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7439 struct dma_desc *desc_contains_ts = ctx->desc;
7440 struct stmmac_priv *priv = ctx->priv;
7441 struct dma_desc *ndesc = ctx->ndesc;
7442 struct dma_desc *desc = ctx->desc;
7443 u64 ns = 0;
7444
7445 if (!priv->hwts_rx_en)
7446 return -ENODATA;
7447
7448 /* For GMAC4, the valid timestamp is from CTX next desc. */
7449 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7450 desc_contains_ts = ndesc;
7451
7452 /* Check if timestamp is available */
7453 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7454 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7455 ns -= priv->plat->cdc_error_adj;
7456 *timestamp = ns_to_ktime(ns);
7457 return 0;
7458 }
7459
7460 return -ENODATA;
7461 }
7462
7463 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7464 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7465 };
7466
7467 /**
7468 * stmmac_dvr_probe
7469 * @device: device pointer
7470 * @plat_dat: platform data pointer
7471 * @res: stmmac resource pointer
7472 * Description: this is the main probe function used to
7473 * call the alloc_etherdev, allocate the priv structure.
7474 * Return:
7475 * returns 0 on success, otherwise errno.
7476 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7477 int stmmac_dvr_probe(struct device *device,
7478 struct plat_stmmacenet_data *plat_dat,
7479 struct stmmac_resources *res)
7480 {
7481 struct net_device *ndev = NULL;
7482 struct stmmac_priv *priv;
7483 u32 rxq;
7484 int i, ret = 0;
7485
7486 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7487 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7488 if (!ndev)
7489 return -ENOMEM;
7490
7491 SET_NETDEV_DEV(ndev, device);
7492
7493 priv = netdev_priv(ndev);
7494 priv->device = device;
7495 priv->dev = ndev;
7496
7497 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7498 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7499 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7500 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7501 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7502 }
7503
7504 priv->xstats.pcpu_stats =
7505 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7506 if (!priv->xstats.pcpu_stats)
7507 return -ENOMEM;
7508
7509 stmmac_set_ethtool_ops(ndev);
7510 priv->pause_time = pause;
7511 priv->plat = plat_dat;
7512 priv->ioaddr = res->addr;
7513 priv->dev->base_addr = (unsigned long)res->addr;
7514 priv->plat->dma_cfg->multi_msi_en =
7515 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7516
7517 priv->dev->irq = res->irq;
7518 priv->wol_irq = res->wol_irq;
7519 priv->lpi_irq = res->lpi_irq;
7520 priv->sfty_irq = res->sfty_irq;
7521 priv->sfty_ce_irq = res->sfty_ce_irq;
7522 priv->sfty_ue_irq = res->sfty_ue_irq;
7523 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7524 priv->rx_irq[i] = res->rx_irq[i];
7525 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7526 priv->tx_irq[i] = res->tx_irq[i];
7527
7528 if (!is_zero_ether_addr(res->mac))
7529 eth_hw_addr_set(priv->dev, res->mac);
7530
7531 dev_set_drvdata(device, priv->dev);
7532
7533 /* Verify driver arguments */
7534 stmmac_verify_args();
7535
7536 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7537 if (!priv->af_xdp_zc_qps)
7538 return -ENOMEM;
7539
7540 /* Allocate workqueue */
7541 priv->wq = create_singlethread_workqueue("stmmac_wq");
7542 if (!priv->wq) {
7543 dev_err(priv->device, "failed to create workqueue\n");
7544 ret = -ENOMEM;
7545 goto error_wq_init;
7546 }
7547
7548 INIT_WORK(&priv->service_task, stmmac_service_task);
7549
7550 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7551
7552 /* Override with kernel parameters if supplied XXX CRS XXX
7553 * this needs to have multiple instances
7554 */
7555 if ((phyaddr >= 0) && (phyaddr <= 31))
7556 priv->plat->phy_addr = phyaddr;
7557
7558 if (priv->plat->stmmac_rst) {
7559 ret = reset_control_assert(priv->plat->stmmac_rst);
7560 reset_control_deassert(priv->plat->stmmac_rst);
7561 /* Some reset controllers have only reset callback instead of
7562 * assert + deassert callbacks pair.
7563 */
7564 if (ret == -ENOTSUPP)
7565 reset_control_reset(priv->plat->stmmac_rst);
7566 }
7567
7568 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7569 if (ret == -ENOTSUPP)
7570 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7571 ERR_PTR(ret));
7572
7573 /* Wait a bit for the reset to take effect */
7574 udelay(10);
7575
7576 /* Init MAC and get the capabilities */
7577 ret = stmmac_hw_init(priv);
7578 if (ret)
7579 goto error_hw_init;
7580
7581 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7582 */
7583 if (priv->synopsys_id < DWMAC_CORE_5_20)
7584 priv->plat->dma_cfg->dche = false;
7585
7586 stmmac_check_ether_addr(priv);
7587
7588 ndev->netdev_ops = &stmmac_netdev_ops;
7589
7590 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7591 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7592
7593 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7594 NETIF_F_RXCSUM;
7595 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7596 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7597
7598 ret = stmmac_tc_init(priv, priv);
7599 if (!ret) {
7600 ndev->hw_features |= NETIF_F_HW_TC;
7601 }
7602
7603 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7604 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7605 if (priv->plat->has_gmac4)
7606 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7607 priv->tso = true;
7608 dev_info(priv->device, "TSO feature enabled\n");
7609 }
7610
7611 if (priv->dma_cap.sphen &&
7612 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7613 ndev->hw_features |= NETIF_F_GRO;
7614 priv->sph_cap = true;
7615 priv->sph = priv->sph_cap;
7616 dev_info(priv->device, "SPH feature enabled\n");
7617 }
7618
7619 /* Ideally our host DMA address width is the same as for the
7620 * device. However, it may differ and then we have to use our
7621 * host DMA width for allocation and the device DMA width for
7622 * register handling.
7623 */
7624 if (priv->plat->host_dma_width)
7625 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7626 else
7627 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7628
7629 if (priv->dma_cap.host_dma_width) {
7630 ret = dma_set_mask_and_coherent(device,
7631 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7632 if (!ret) {
7633 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7634 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7635
7636 /*
7637 * If more than 32 bits can be addressed, make sure to
7638 * enable enhanced addressing mode.
7639 */
7640 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7641 priv->plat->dma_cfg->eame = true;
7642 } else {
7643 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7644 if (ret) {
7645 dev_err(priv->device, "Failed to set DMA Mask\n");
7646 goto error_hw_init;
7647 }
7648
7649 priv->dma_cap.host_dma_width = 32;
7650 }
7651 }
7652
7653 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7654 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7655 #ifdef STMMAC_VLAN_TAG_USED
7656 /* Both mac100 and gmac support receive VLAN tag detection */
7657 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7658 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7659 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7660 priv->hw->hw_vlan_en = true;
7661 }
7662 if (priv->dma_cap.vlhash) {
7663 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7664 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7665 }
7666 if (priv->dma_cap.vlins) {
7667 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7668 if (priv->dma_cap.dvlan)
7669 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7670 }
7671 #endif
7672 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7673
7674 priv->xstats.threshold = tc;
7675
7676 /* Initialize RSS */
7677 rxq = priv->plat->rx_queues_to_use;
7678 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7679 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7680 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7681
7682 if (priv->dma_cap.rssen && priv->plat->rss_en)
7683 ndev->features |= NETIF_F_RXHASH;
7684
7685 ndev->vlan_features |= ndev->features;
7686
7687 /* MTU range: 46 - hw-specific max */
7688 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7689 if (priv->plat->has_xgmac)
7690 ndev->max_mtu = XGMAC_JUMBO_LEN;
7691 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7692 ndev->max_mtu = JUMBO_LEN;
7693 else
7694 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7695 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7696 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7697 */
7698 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7699 (priv->plat->maxmtu >= ndev->min_mtu))
7700 ndev->max_mtu = priv->plat->maxmtu;
7701 else if (priv->plat->maxmtu < ndev->min_mtu)
7702 dev_warn(priv->device,
7703 "%s: warning: maxmtu having invalid value (%d)\n",
7704 __func__, priv->plat->maxmtu);
7705
7706 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7707
7708 /* Setup channels NAPI */
7709 stmmac_napi_add(ndev);
7710
7711 mutex_init(&priv->lock);
7712
7713 stmmac_fpe_init(priv);
7714
7715 /* If a specific clk_csr value is passed from the platform
7716 * this means that the CSR Clock Range selection cannot be
7717 * changed at run-time and it is fixed. Viceversa the driver'll try to
7718 * set the MDC clock dynamically according to the csr actual
7719 * clock input.
7720 */
7721 if (priv->plat->clk_csr >= 0)
7722 priv->clk_csr = priv->plat->clk_csr;
7723 else
7724 stmmac_clk_csr_set(priv);
7725
7726 stmmac_check_pcs_mode(priv);
7727
7728 pm_runtime_get_noresume(device);
7729 pm_runtime_set_active(device);
7730 if (!pm_runtime_enabled(device))
7731 pm_runtime_enable(device);
7732
7733 ret = stmmac_mdio_register(ndev);
7734 if (ret < 0) {
7735 dev_err_probe(priv->device, ret,
7736 "MDIO bus (id: %d) registration failed\n",
7737 priv->plat->bus_id);
7738 goto error_mdio_register;
7739 }
7740
7741 ret = stmmac_pcs_setup(ndev);
7742 if (ret)
7743 goto error_pcs_setup;
7744
7745 ret = stmmac_phy_setup(priv);
7746 if (ret) {
7747 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7748 goto error_phy_setup;
7749 }
7750
7751 ret = register_netdev(ndev);
7752 if (ret) {
7753 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7754 __func__, ret);
7755 goto error_netdev_register;
7756 }
7757
7758 #ifdef CONFIG_DEBUG_FS
7759 stmmac_init_fs(ndev);
7760 #endif
7761
7762 if (priv->plat->dump_debug_regs)
7763 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7764
7765 /* Let pm_runtime_put() disable the clocks.
7766 * If CONFIG_PM is not enabled, the clocks will stay powered.
7767 */
7768 pm_runtime_put(device);
7769
7770 return ret;
7771
7772 error_netdev_register:
7773 phylink_destroy(priv->phylink);
7774 error_phy_setup:
7775 stmmac_pcs_clean(ndev);
7776 error_pcs_setup:
7777 stmmac_mdio_unregister(ndev);
7778 error_mdio_register:
7779 stmmac_napi_del(ndev);
7780 error_hw_init:
7781 destroy_workqueue(priv->wq);
7782 error_wq_init:
7783 bitmap_free(priv->af_xdp_zc_qps);
7784
7785 return ret;
7786 }
7787 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7788
7789 /**
7790 * stmmac_dvr_remove
7791 * @dev: device pointer
7792 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7793 * changes the link status, releases the DMA descriptor rings.
7794 */
stmmac_dvr_remove(struct device * dev)7795 void stmmac_dvr_remove(struct device *dev)
7796 {
7797 struct net_device *ndev = dev_get_drvdata(dev);
7798 struct stmmac_priv *priv = netdev_priv(ndev);
7799
7800 netdev_info(priv->dev, "%s: removing driver", __func__);
7801
7802 pm_runtime_get_sync(dev);
7803
7804 unregister_netdev(ndev);
7805
7806 #ifdef CONFIG_DEBUG_FS
7807 stmmac_exit_fs(ndev);
7808 #endif
7809 phylink_destroy(priv->phylink);
7810 if (priv->plat->stmmac_rst)
7811 reset_control_assert(priv->plat->stmmac_rst);
7812 reset_control_assert(priv->plat->stmmac_ahb_rst);
7813
7814 stmmac_pcs_clean(ndev);
7815 stmmac_mdio_unregister(ndev);
7816
7817 destroy_workqueue(priv->wq);
7818 mutex_destroy(&priv->lock);
7819 bitmap_free(priv->af_xdp_zc_qps);
7820
7821 pm_runtime_disable(dev);
7822 pm_runtime_put_noidle(dev);
7823 }
7824 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7825
7826 /**
7827 * stmmac_suspend - suspend callback
7828 * @dev: device pointer
7829 * Description: this is the function to suspend the device and it is called
7830 * by the platform driver to stop the network queue, release the resources,
7831 * program the PMT register (for WoL), clean and release driver resources.
7832 */
stmmac_suspend(struct device * dev)7833 int stmmac_suspend(struct device *dev)
7834 {
7835 struct net_device *ndev = dev_get_drvdata(dev);
7836 struct stmmac_priv *priv = netdev_priv(ndev);
7837 u32 chan;
7838
7839 if (!ndev || !netif_running(ndev))
7840 return 0;
7841
7842 mutex_lock(&priv->lock);
7843
7844 netif_device_detach(ndev);
7845
7846 stmmac_disable_all_queues(priv);
7847
7848 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7849 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7850
7851 if (priv->eee_sw_timer_en) {
7852 priv->tx_path_in_lpi_mode = false;
7853 timer_delete_sync(&priv->eee_ctrl_timer);
7854 }
7855
7856 /* Stop TX/RX DMA */
7857 stmmac_stop_all_dma(priv);
7858
7859 if (priv->plat->serdes_powerdown)
7860 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7861
7862 /* Enable Power down mode by programming the PMT regs */
7863 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7864 stmmac_pmt(priv, priv->hw, priv->wolopts);
7865 priv->irq_wake = 1;
7866 } else {
7867 stmmac_mac_set(priv, priv->ioaddr, false);
7868 pinctrl_pm_select_sleep_state(priv->device);
7869 }
7870
7871 mutex_unlock(&priv->lock);
7872
7873 rtnl_lock();
7874 if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7875 phylink_speed_down(priv->phylink, false);
7876
7877 phylink_suspend(priv->phylink,
7878 device_may_wakeup(priv->device) && priv->plat->pmt);
7879 rtnl_unlock();
7880
7881 if (stmmac_fpe_supported(priv))
7882 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7883
7884 return 0;
7885 }
7886 EXPORT_SYMBOL_GPL(stmmac_suspend);
7887
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7888 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7889 {
7890 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7891
7892 rx_q->cur_rx = 0;
7893 rx_q->dirty_rx = 0;
7894 }
7895
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7896 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7897 {
7898 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7899
7900 tx_q->cur_tx = 0;
7901 tx_q->dirty_tx = 0;
7902 tx_q->mss = 0;
7903
7904 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7905 }
7906
7907 /**
7908 * stmmac_reset_queues_param - reset queue parameters
7909 * @priv: device pointer
7910 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7911 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7912 {
7913 u32 rx_cnt = priv->plat->rx_queues_to_use;
7914 u32 tx_cnt = priv->plat->tx_queues_to_use;
7915 u32 queue;
7916
7917 for (queue = 0; queue < rx_cnt; queue++)
7918 stmmac_reset_rx_queue(priv, queue);
7919
7920 for (queue = 0; queue < tx_cnt; queue++)
7921 stmmac_reset_tx_queue(priv, queue);
7922 }
7923
7924 /**
7925 * stmmac_resume - resume callback
7926 * @dev: device pointer
7927 * Description: when resume this function is invoked to setup the DMA and CORE
7928 * in a usable state.
7929 */
stmmac_resume(struct device * dev)7930 int stmmac_resume(struct device *dev)
7931 {
7932 struct net_device *ndev = dev_get_drvdata(dev);
7933 struct stmmac_priv *priv = netdev_priv(ndev);
7934 int ret;
7935
7936 if (!netif_running(ndev))
7937 return 0;
7938
7939 /* Power Down bit, into the PM register, is cleared
7940 * automatically as soon as a magic packet or a Wake-up frame
7941 * is received. Anyway, it's better to manually clear
7942 * this bit because it can generate problems while resuming
7943 * from another devices (e.g. serial console).
7944 */
7945 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7946 mutex_lock(&priv->lock);
7947 stmmac_pmt(priv, priv->hw, 0);
7948 mutex_unlock(&priv->lock);
7949 priv->irq_wake = 0;
7950 } else {
7951 pinctrl_pm_select_default_state(priv->device);
7952 /* reset the phy so that it's ready */
7953 if (priv->mii)
7954 stmmac_mdio_reset(priv->mii);
7955 }
7956
7957 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7958 priv->plat->serdes_powerup) {
7959 ret = priv->plat->serdes_powerup(ndev,
7960 priv->plat->bsp_priv);
7961
7962 if (ret < 0)
7963 return ret;
7964 }
7965
7966 rtnl_lock();
7967
7968 /* Prepare the PHY to resume, ensuring that its clocks which are
7969 * necessary for the MAC DMA reset to complete are running
7970 */
7971 phylink_prepare_resume(priv->phylink);
7972
7973 mutex_lock(&priv->lock);
7974
7975 stmmac_reset_queues_param(priv);
7976
7977 stmmac_free_tx_skbufs(priv);
7978 stmmac_clear_descriptors(priv, &priv->dma_conf);
7979
7980 stmmac_hw_setup(ndev, false);
7981 stmmac_init_coalesce(priv);
7982 phylink_rx_clk_stop_block(priv->phylink);
7983 stmmac_set_rx_mode(ndev);
7984
7985 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7986 phylink_rx_clk_stop_unblock(priv->phylink);
7987
7988 stmmac_enable_all_queues(priv);
7989 stmmac_enable_all_dma_irq(priv);
7990
7991 mutex_unlock(&priv->lock);
7992
7993 /* phylink_resume() must be called after the hardware has been
7994 * initialised because it may bring the link up immediately in a
7995 * workqueue thread, which will race with initialisation.
7996 */
7997 phylink_resume(priv->phylink);
7998 if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7999 phylink_speed_up(priv->phylink);
8000
8001 rtnl_unlock();
8002
8003 netif_device_attach(ndev);
8004
8005 return 0;
8006 }
8007 EXPORT_SYMBOL_GPL(stmmac_resume);
8008
8009 #ifndef MODULE
stmmac_cmdline_opt(char * str)8010 static int __init stmmac_cmdline_opt(char *str)
8011 {
8012 char *opt;
8013
8014 if (!str || !*str)
8015 return 1;
8016 while ((opt = strsep(&str, ",")) != NULL) {
8017 if (!strncmp(opt, "debug:", 6)) {
8018 if (kstrtoint(opt + 6, 0, &debug))
8019 goto err;
8020 } else if (!strncmp(opt, "phyaddr:", 8)) {
8021 if (kstrtoint(opt + 8, 0, &phyaddr))
8022 goto err;
8023 } else if (!strncmp(opt, "tc:", 3)) {
8024 if (kstrtoint(opt + 3, 0, &tc))
8025 goto err;
8026 } else if (!strncmp(opt, "watchdog:", 9)) {
8027 if (kstrtoint(opt + 9, 0, &watchdog))
8028 goto err;
8029 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8030 if (kstrtoint(opt + 10, 0, &flow_ctrl))
8031 goto err;
8032 } else if (!strncmp(opt, "pause:", 6)) {
8033 if (kstrtoint(opt + 6, 0, &pause))
8034 goto err;
8035 } else if (!strncmp(opt, "eee_timer:", 10)) {
8036 if (kstrtoint(opt + 10, 0, &eee_timer))
8037 goto err;
8038 } else if (!strncmp(opt, "chain_mode:", 11)) {
8039 if (kstrtoint(opt + 11, 0, &chain_mode))
8040 goto err;
8041 }
8042 }
8043 return 1;
8044
8045 err:
8046 pr_err("%s: ERROR broken module parameter conversion", __func__);
8047 return 1;
8048 }
8049
8050 __setup("stmmaceth=", stmmac_cmdline_opt);
8051 #endif /* MODULE */
8052
stmmac_init(void)8053 static int __init stmmac_init(void)
8054 {
8055 #ifdef CONFIG_DEBUG_FS
8056 /* Create debugfs main directory if it doesn't exist yet */
8057 if (!stmmac_fs_dir)
8058 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8059 register_netdevice_notifier(&stmmac_notifier);
8060 #endif
8061
8062 return 0;
8063 }
8064
stmmac_exit(void)8065 static void __exit stmmac_exit(void)
8066 {
8067 #ifdef CONFIG_DEBUG_FS
8068 unregister_netdevice_notifier(&stmmac_notifier);
8069 debugfs_remove_recursive(stmmac_fs_dir);
8070 #endif
8071 }
8072
8073 module_init(stmmac_init)
8074 module_exit(stmmac_exit)
8075
8076 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8077 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8078 MODULE_LICENSE("GPL");
8079