1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54
55 /* As long as the interface is active, we keep the timestamping counter enabled
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
57 * (clock jumps) when changing timestamping settings at runtime.
58 */
59 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 PTP_TCR_TSCTRLSSR)
61
62 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64
65 /* Module parameters */
66 #define TX_TIMEO 5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
80 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
81
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX 256
84 #define STMMAC_TX_XSK_AVAIL 16
85 #define STMMAC_RX_FILL_BATCH 16
86
87 #define STMMAC_XDP_PASS 0
88 #define STMMAC_XDP_CONSUMED BIT(0)
89 #define STMMAC_XDP_TX BIT(1)
90 #define STMMAC_XDP_REDIRECT BIT(2)
91
92 static int flow_ctrl = FLOW_AUTO;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
95
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
99
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104
105 #define DEFAULT_BUFSIZE 1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109
110 #define STMMAC_RX_COPYBREAK 256
111
112 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
113 NETIF_MSG_LINK | NETIF_MSG_IFUP |
114 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
115
116 #define STMMAC_DEFAULT_LPI_TIMER 1000
117 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
118 module_param(eee_timer, int, 0644);
119 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
120 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
121
122 /* By default the driver will use the ring mode to manage tx and rx descriptors,
123 * but allow user to force to use the chain instead of the ring
124 */
125 static unsigned int chain_mode;
126 module_param(chain_mode, int, 0444);
127 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
128
129 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
130 /* For MSI interrupts handling */
131 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
133 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
134 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
135 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
138 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
139 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
140 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
141 u32 rxmode, u32 chan);
142
143 #ifdef CONFIG_DEBUG_FS
144 static const struct net_device_ops stmmac_netdev_ops;
145 static void stmmac_init_fs(struct net_device *dev);
146 static void stmmac_exit_fs(struct net_device *dev);
147 #endif
148
149 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
150
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)151 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
152 {
153 int ret = 0;
154
155 if (enabled) {
156 ret = clk_prepare_enable(priv->plat->stmmac_clk);
157 if (ret)
158 return ret;
159 ret = clk_prepare_enable(priv->plat->pclk);
160 if (ret) {
161 clk_disable_unprepare(priv->plat->stmmac_clk);
162 return ret;
163 }
164 if (priv->plat->clks_config) {
165 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
166 if (ret) {
167 clk_disable_unprepare(priv->plat->stmmac_clk);
168 clk_disable_unprepare(priv->plat->pclk);
169 return ret;
170 }
171 }
172 } else {
173 clk_disable_unprepare(priv->plat->stmmac_clk);
174 clk_disable_unprepare(priv->plat->pclk);
175 if (priv->plat->clks_config)
176 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
177 }
178
179 return ret;
180 }
181 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
182
183 /**
184 * stmmac_verify_args - verify the driver parameters.
185 * Description: it checks the driver parameters and set a default in case of
186 * errors.
187 */
stmmac_verify_args(void)188 static void stmmac_verify_args(void)
189 {
190 if (unlikely(watchdog < 0))
191 watchdog = TX_TIMEO;
192 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
193 buf_sz = DEFAULT_BUFSIZE;
194 if (unlikely(flow_ctrl > 1))
195 flow_ctrl = FLOW_AUTO;
196 else if (likely(flow_ctrl < 0))
197 flow_ctrl = FLOW_OFF;
198 if (unlikely((pause < 0) || (pause > 0xffff)))
199 pause = PAUSE_TIME;
200 if (eee_timer < 0)
201 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
202 }
203
__stmmac_disable_all_queues(struct stmmac_priv * priv)204 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
205 {
206 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
207 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
208 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
209 u32 queue;
210
211 for (queue = 0; queue < maxq; queue++) {
212 struct stmmac_channel *ch = &priv->channel[queue];
213
214 if (stmmac_xdp_is_enabled(priv) &&
215 test_bit(queue, priv->af_xdp_zc_qps)) {
216 napi_disable(&ch->rxtx_napi);
217 continue;
218 }
219
220 if (queue < rx_queues_cnt)
221 napi_disable(&ch->rx_napi);
222 if (queue < tx_queues_cnt)
223 napi_disable(&ch->tx_napi);
224 }
225 }
226
227 /**
228 * stmmac_disable_all_queues - Disable all queues
229 * @priv: driver private structure
230 */
stmmac_disable_all_queues(struct stmmac_priv * priv)231 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
232 {
233 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
234 struct stmmac_rx_queue *rx_q;
235 u32 queue;
236
237 /* synchronize_rcu() needed for pending XDP buffers to drain */
238 for (queue = 0; queue < rx_queues_cnt; queue++) {
239 rx_q = &priv->dma_conf.rx_queue[queue];
240 if (rx_q->xsk_pool) {
241 synchronize_rcu();
242 break;
243 }
244 }
245
246 __stmmac_disable_all_queues(priv);
247 }
248
249 /**
250 * stmmac_enable_all_queues - Enable all queues
251 * @priv: driver private structure
252 */
stmmac_enable_all_queues(struct stmmac_priv * priv)253 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
254 {
255 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
256 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
257 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
258 u32 queue;
259
260 for (queue = 0; queue < maxq; queue++) {
261 struct stmmac_channel *ch = &priv->channel[queue];
262
263 if (stmmac_xdp_is_enabled(priv) &&
264 test_bit(queue, priv->af_xdp_zc_qps)) {
265 napi_enable(&ch->rxtx_napi);
266 continue;
267 }
268
269 if (queue < rx_queues_cnt)
270 napi_enable(&ch->rx_napi);
271 if (queue < tx_queues_cnt)
272 napi_enable(&ch->tx_napi);
273 }
274 }
275
stmmac_service_event_schedule(struct stmmac_priv * priv)276 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
277 {
278 if (!test_bit(STMMAC_DOWN, &priv->state) &&
279 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
280 queue_work(priv->wq, &priv->service_task);
281 }
282
stmmac_global_err(struct stmmac_priv * priv)283 static void stmmac_global_err(struct stmmac_priv *priv)
284 {
285 netif_carrier_off(priv->dev);
286 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
287 stmmac_service_event_schedule(priv);
288 }
289
290 /**
291 * stmmac_clk_csr_set - dynamically set the MDC clock
292 * @priv: driver private structure
293 * Description: this is to dynamically set the MDC clock according to the csr
294 * clock input.
295 * Note:
296 * If a specific clk_csr value is passed from the platform
297 * this means that the CSR Clock Range selection cannot be
298 * changed at run-time and it is fixed (as reported in the driver
299 * documentation). Viceversa the driver will try to set the MDC
300 * clock dynamically according to the actual clock input.
301 */
stmmac_clk_csr_set(struct stmmac_priv * priv)302 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
303 {
304 u32 clk_rate;
305
306 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
307
308 /* Platform provided default clk_csr would be assumed valid
309 * for all other cases except for the below mentioned ones.
310 * For values higher than the IEEE 802.3 specified frequency
311 * we can not estimate the proper divider as it is not known
312 * the frequency of clk_csr_i. So we do not change the default
313 * divider.
314 */
315 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
316 if (clk_rate < CSR_F_35M)
317 priv->clk_csr = STMMAC_CSR_20_35M;
318 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
319 priv->clk_csr = STMMAC_CSR_35_60M;
320 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
321 priv->clk_csr = STMMAC_CSR_60_100M;
322 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
323 priv->clk_csr = STMMAC_CSR_100_150M;
324 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
325 priv->clk_csr = STMMAC_CSR_150_250M;
326 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
327 priv->clk_csr = STMMAC_CSR_250_300M;
328 }
329
330 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
331 if (clk_rate > 160000000)
332 priv->clk_csr = 0x03;
333 else if (clk_rate > 80000000)
334 priv->clk_csr = 0x02;
335 else if (clk_rate > 40000000)
336 priv->clk_csr = 0x01;
337 else
338 priv->clk_csr = 0;
339 }
340
341 if (priv->plat->has_xgmac) {
342 if (clk_rate > 400000000)
343 priv->clk_csr = 0x5;
344 else if (clk_rate > 350000000)
345 priv->clk_csr = 0x4;
346 else if (clk_rate > 300000000)
347 priv->clk_csr = 0x3;
348 else if (clk_rate > 250000000)
349 priv->clk_csr = 0x2;
350 else if (clk_rate > 150000000)
351 priv->clk_csr = 0x1;
352 else
353 priv->clk_csr = 0x0;
354 }
355 }
356
print_pkt(unsigned char * buf,int len)357 static void print_pkt(unsigned char *buf, int len)
358 {
359 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
360 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
361 }
362
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)363 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
364 {
365 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
366 u32 avail;
367
368 if (tx_q->dirty_tx > tx_q->cur_tx)
369 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
370 else
371 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
372
373 return avail;
374 }
375
376 /**
377 * stmmac_rx_dirty - Get RX queue dirty
378 * @priv: driver private structure
379 * @queue: RX queue index
380 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)381 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
382 {
383 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
384 u32 dirty;
385
386 if (rx_q->dirty_rx <= rx_q->cur_rx)
387 dirty = rx_q->cur_rx - rx_q->dirty_rx;
388 else
389 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
390
391 return dirty;
392 }
393
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)394 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
395 {
396 int tx_lpi_timer;
397
398 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
399 priv->eee_sw_timer_en = en ? 0 : 1;
400 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
401 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
402 }
403
404 /**
405 * stmmac_enable_eee_mode - check and enter in LPI mode
406 * @priv: driver private structure
407 * Description: this function is to verify and enter in LPI mode in case of
408 * EEE.
409 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)410 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
411 {
412 u32 tx_cnt = priv->plat->tx_queues_to_use;
413 u32 queue;
414
415 /* check if all TX queues have the work finished */
416 for (queue = 0; queue < tx_cnt; queue++) {
417 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
418
419 if (tx_q->dirty_tx != tx_q->cur_tx)
420 return -EBUSY; /* still unfinished work */
421 }
422
423 /* Check and enter in LPI mode */
424 if (!priv->tx_path_in_lpi_mode)
425 stmmac_set_eee_mode(priv, priv->hw,
426 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
427 return 0;
428 }
429
430 /**
431 * stmmac_disable_eee_mode - disable and exit from LPI mode
432 * @priv: driver private structure
433 * Description: this function is to exit and disable EEE in case of
434 * LPI state is true. This is called by the xmit.
435 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)436 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
437 {
438 if (!priv->eee_sw_timer_en) {
439 stmmac_lpi_entry_timer_config(priv, 0);
440 return;
441 }
442
443 stmmac_reset_eee_mode(priv, priv->hw);
444 del_timer_sync(&priv->eee_ctrl_timer);
445 priv->tx_path_in_lpi_mode = false;
446 }
447
448 /**
449 * stmmac_eee_ctrl_timer - EEE TX SW timer.
450 * @t: timer_list struct containing private info
451 * Description:
452 * if there is no data transfer and if we are not in LPI state,
453 * then MAC Transmitter can be moved to LPI state.
454 */
stmmac_eee_ctrl_timer(struct timer_list * t)455 static void stmmac_eee_ctrl_timer(struct timer_list *t)
456 {
457 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
458
459 if (stmmac_enable_eee_mode(priv))
460 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
461 }
462
463 /**
464 * stmmac_eee_init - init EEE
465 * @priv: driver private structure
466 * Description:
467 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
468 * can also manage EEE, this function enable the LPI state and start related
469 * timer.
470 */
stmmac_eee_init(struct stmmac_priv * priv)471 bool stmmac_eee_init(struct stmmac_priv *priv)
472 {
473 int eee_tw_timer = priv->eee_tw_timer;
474
475 /* Check if MAC core supports the EEE feature. */
476 if (!priv->dma_cap.eee)
477 return false;
478
479 mutex_lock(&priv->lock);
480
481 /* Check if it needs to be deactivated */
482 if (!priv->eee_active) {
483 if (priv->eee_enabled) {
484 netdev_dbg(priv->dev, "disable EEE\n");
485 stmmac_lpi_entry_timer_config(priv, 0);
486 del_timer_sync(&priv->eee_ctrl_timer);
487 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
488 if (priv->hw->xpcs)
489 xpcs_config_eee(priv->hw->xpcs,
490 priv->plat->mult_fact_100ns,
491 false);
492 }
493 mutex_unlock(&priv->lock);
494 return false;
495 }
496
497 if (priv->eee_active && !priv->eee_enabled) {
498 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
499 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
500 eee_tw_timer);
501 if (priv->hw->xpcs)
502 xpcs_config_eee(priv->hw->xpcs,
503 priv->plat->mult_fact_100ns,
504 true);
505 }
506
507 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
508 del_timer_sync(&priv->eee_ctrl_timer);
509 priv->tx_path_in_lpi_mode = false;
510 stmmac_lpi_entry_timer_config(priv, 1);
511 } else {
512 stmmac_lpi_entry_timer_config(priv, 0);
513 mod_timer(&priv->eee_ctrl_timer,
514 STMMAC_LPI_T(priv->tx_lpi_timer));
515 }
516
517 mutex_unlock(&priv->lock);
518 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
519 return true;
520 }
521
522 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
523 * @priv: driver private structure
524 * @p : descriptor pointer
525 * @skb : the socket buffer
526 * Description :
527 * This function will read timestamp from the descriptor & pass it to stack.
528 * and also perform some sanity checks.
529 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)530 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
531 struct dma_desc *p, struct sk_buff *skb)
532 {
533 struct skb_shared_hwtstamps shhwtstamp;
534 bool found = false;
535 u64 ns = 0;
536
537 if (!priv->hwts_tx_en)
538 return;
539
540 /* exit if skb doesn't support hw tstamp */
541 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
542 return;
543
544 /* check tx tstamp status */
545 if (stmmac_get_tx_timestamp_status(priv, p)) {
546 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
547 found = true;
548 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
549 found = true;
550 }
551
552 if (found) {
553 ns -= priv->plat->cdc_error_adj;
554
555 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
556 shhwtstamp.hwtstamp = ns_to_ktime(ns);
557
558 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
559 /* pass tstamp to stack */
560 skb_tstamp_tx(skb, &shhwtstamp);
561 }
562 }
563
564 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
565 * @priv: driver private structure
566 * @p : descriptor pointer
567 * @np : next descriptor pointer
568 * @skb : the socket buffer
569 * Description :
570 * This function will read received packet's timestamp from the descriptor
571 * and pass it to stack. It also perform some sanity checks.
572 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)573 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
574 struct dma_desc *np, struct sk_buff *skb)
575 {
576 struct skb_shared_hwtstamps *shhwtstamp = NULL;
577 struct dma_desc *desc = p;
578 u64 ns = 0;
579
580 if (!priv->hwts_rx_en)
581 return;
582 /* For GMAC4, the valid timestamp is from CTX next desc. */
583 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
584 desc = np;
585
586 /* Check if timestamp is available */
587 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
588 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
589
590 ns -= priv->plat->cdc_error_adj;
591
592 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
593 shhwtstamp = skb_hwtstamps(skb);
594 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
595 shhwtstamp->hwtstamp = ns_to_ktime(ns);
596 } else {
597 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
598 }
599 }
600
601 /**
602 * stmmac_hwtstamp_set - control hardware timestamping.
603 * @dev: device pointer.
604 * @ifr: An IOCTL specific structure, that can contain a pointer to
605 * a proprietary structure used to pass information to the driver.
606 * Description:
607 * This function configures the MAC to enable/disable both outgoing(TX)
608 * and incoming(RX) packets time stamping based on user input.
609 * Return Value:
610 * 0 on success and an appropriate -ve integer on failure.
611 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)612 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
613 {
614 struct stmmac_priv *priv = netdev_priv(dev);
615 struct hwtstamp_config config;
616 u32 ptp_v2 = 0;
617 u32 tstamp_all = 0;
618 u32 ptp_over_ipv4_udp = 0;
619 u32 ptp_over_ipv6_udp = 0;
620 u32 ptp_over_ethernet = 0;
621 u32 snap_type_sel = 0;
622 u32 ts_master_en = 0;
623 u32 ts_event_en = 0;
624
625 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
626 netdev_alert(priv->dev, "No support for HW time stamping\n");
627 priv->hwts_tx_en = 0;
628 priv->hwts_rx_en = 0;
629
630 return -EOPNOTSUPP;
631 }
632
633 if (copy_from_user(&config, ifr->ifr_data,
634 sizeof(config)))
635 return -EFAULT;
636
637 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
638 __func__, config.flags, config.tx_type, config.rx_filter);
639
640 if (config.tx_type != HWTSTAMP_TX_OFF &&
641 config.tx_type != HWTSTAMP_TX_ON)
642 return -ERANGE;
643
644 if (priv->adv_ts) {
645 switch (config.rx_filter) {
646 case HWTSTAMP_FILTER_NONE:
647 /* time stamp no incoming packet at all */
648 config.rx_filter = HWTSTAMP_FILTER_NONE;
649 break;
650
651 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
652 /* PTP v1, UDP, any kind of event packet */
653 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
654 /* 'xmac' hardware can support Sync, Pdelay_Req and
655 * Pdelay_resp by setting bit14 and bits17/16 to 01
656 * This leaves Delay_Req timestamps out.
657 * Enable all events *and* general purpose message
658 * timestamping
659 */
660 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
661 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663 break;
664
665 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
666 /* PTP v1, UDP, Sync packet */
667 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
668 /* take time stamp for SYNC messages only */
669 ts_event_en = PTP_TCR_TSEVNTENA;
670
671 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
672 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
673 break;
674
675 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
676 /* PTP v1, UDP, Delay_req packet */
677 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
678 /* take time stamp for Delay_Req messages only */
679 ts_master_en = PTP_TCR_TSMSTRENA;
680 ts_event_en = PTP_TCR_TSEVNTENA;
681
682 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
683 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
684 break;
685
686 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
687 /* PTP v2, UDP, any kind of event packet */
688 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
689 ptp_v2 = PTP_TCR_TSVER2ENA;
690 /* take time stamp for all event messages */
691 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
692
693 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
694 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
695 break;
696
697 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
698 /* PTP v2, UDP, Sync packet */
699 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
700 ptp_v2 = PTP_TCR_TSVER2ENA;
701 /* take time stamp for SYNC messages only */
702 ts_event_en = PTP_TCR_TSEVNTENA;
703
704 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
705 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
706 break;
707
708 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
709 /* PTP v2, UDP, Delay_req packet */
710 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
711 ptp_v2 = PTP_TCR_TSVER2ENA;
712 /* take time stamp for Delay_Req messages only */
713 ts_master_en = PTP_TCR_TSMSTRENA;
714 ts_event_en = PTP_TCR_TSEVNTENA;
715
716 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
717 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
718 break;
719
720 case HWTSTAMP_FILTER_PTP_V2_EVENT:
721 /* PTP v2/802.AS1 any layer, any kind of event packet */
722 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
723 ptp_v2 = PTP_TCR_TSVER2ENA;
724 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
725 if (priv->synopsys_id < DWMAC_CORE_4_10)
726 ts_event_en = PTP_TCR_TSEVNTENA;
727 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
728 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
729 ptp_over_ethernet = PTP_TCR_TSIPENA;
730 break;
731
732 case HWTSTAMP_FILTER_PTP_V2_SYNC:
733 /* PTP v2/802.AS1, any layer, Sync packet */
734 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
735 ptp_v2 = PTP_TCR_TSVER2ENA;
736 /* take time stamp for SYNC messages only */
737 ts_event_en = PTP_TCR_TSEVNTENA;
738
739 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
740 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
741 ptp_over_ethernet = PTP_TCR_TSIPENA;
742 break;
743
744 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
745 /* PTP v2/802.AS1, any layer, Delay_req packet */
746 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
747 ptp_v2 = PTP_TCR_TSVER2ENA;
748 /* take time stamp for Delay_Req messages only */
749 ts_master_en = PTP_TCR_TSMSTRENA;
750 ts_event_en = PTP_TCR_TSEVNTENA;
751
752 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
753 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
754 ptp_over_ethernet = PTP_TCR_TSIPENA;
755 break;
756
757 case HWTSTAMP_FILTER_NTP_ALL:
758 case HWTSTAMP_FILTER_ALL:
759 /* time stamp any incoming packet */
760 config.rx_filter = HWTSTAMP_FILTER_ALL;
761 tstamp_all = PTP_TCR_TSENALL;
762 break;
763
764 default:
765 return -ERANGE;
766 }
767 } else {
768 switch (config.rx_filter) {
769 case HWTSTAMP_FILTER_NONE:
770 config.rx_filter = HWTSTAMP_FILTER_NONE;
771 break;
772 default:
773 /* PTP v1, UDP, any kind of event packet */
774 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
775 break;
776 }
777 }
778 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
779 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
780
781 priv->systime_flags = STMMAC_HWTS_ACTIVE;
782
783 if (priv->hwts_tx_en || priv->hwts_rx_en) {
784 priv->systime_flags |= tstamp_all | ptp_v2 |
785 ptp_over_ethernet | ptp_over_ipv6_udp |
786 ptp_over_ipv4_udp | ts_event_en |
787 ts_master_en | snap_type_sel;
788 }
789
790 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
791
792 memcpy(&priv->tstamp_config, &config, sizeof(config));
793
794 return copy_to_user(ifr->ifr_data, &config,
795 sizeof(config)) ? -EFAULT : 0;
796 }
797
798 /**
799 * stmmac_hwtstamp_get - read hardware timestamping.
800 * @dev: device pointer.
801 * @ifr: An IOCTL specific structure, that can contain a pointer to
802 * a proprietary structure used to pass information to the driver.
803 * Description:
804 * This function obtain the current hardware timestamping settings
805 * as requested.
806 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)807 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
808 {
809 struct stmmac_priv *priv = netdev_priv(dev);
810 struct hwtstamp_config *config = &priv->tstamp_config;
811
812 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
813 return -EOPNOTSUPP;
814
815 return copy_to_user(ifr->ifr_data, config,
816 sizeof(*config)) ? -EFAULT : 0;
817 }
818
819 /**
820 * stmmac_init_tstamp_counter - init hardware timestamping counter
821 * @priv: driver private structure
822 * @systime_flags: timestamping flags
823 * Description:
824 * Initialize hardware counter for packet timestamping.
825 * This is valid as long as the interface is open and not suspended.
826 * Will be rerun after resuming from suspend, case in which the timestamping
827 * flags updated by stmmac_hwtstamp_set() also need to be restored.
828 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)829 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
830 {
831 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
832 struct timespec64 now;
833 u32 sec_inc = 0;
834 u64 temp = 0;
835
836 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
837 return -EOPNOTSUPP;
838
839 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
840 priv->systime_flags = systime_flags;
841
842 /* program Sub Second Increment reg */
843 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
844 priv->plat->clk_ptp_rate,
845 xmac, &sec_inc);
846 temp = div_u64(1000000000ULL, sec_inc);
847
848 /* Store sub second increment for later use */
849 priv->sub_second_inc = sec_inc;
850
851 /* calculate default added value:
852 * formula is :
853 * addend = (2^32)/freq_div_ratio;
854 * where, freq_div_ratio = 1e9ns/sec_inc
855 */
856 temp = (u64)(temp << 32);
857 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
858 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
859
860 /* initialize system time */
861 ktime_get_real_ts64(&now);
862
863 /* lower 32 bits of tv_sec are safe until y2106 */
864 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
865
866 return 0;
867 }
868 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
869
870 /**
871 * stmmac_init_ptp - init PTP
872 * @priv: driver private structure
873 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
874 * This is done by looking at the HW cap. register.
875 * This function also registers the ptp driver.
876 */
stmmac_init_ptp(struct stmmac_priv * priv)877 static int stmmac_init_ptp(struct stmmac_priv *priv)
878 {
879 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
880 int ret;
881
882 if (priv->plat->ptp_clk_freq_config)
883 priv->plat->ptp_clk_freq_config(priv);
884
885 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
886 if (ret)
887 return ret;
888
889 priv->adv_ts = 0;
890 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
891 if (xmac && priv->dma_cap.atime_stamp)
892 priv->adv_ts = 1;
893 /* Dwmac 3.x core with extend_desc can support adv_ts */
894 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
895 priv->adv_ts = 1;
896
897 if (priv->dma_cap.time_stamp)
898 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
899
900 if (priv->adv_ts)
901 netdev_info(priv->dev,
902 "IEEE 1588-2008 Advanced Timestamp supported\n");
903
904 priv->hwts_tx_en = 0;
905 priv->hwts_rx_en = 0;
906
907 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
908 stmmac_hwtstamp_correct_latency(priv, priv);
909
910 return 0;
911 }
912
stmmac_release_ptp(struct stmmac_priv * priv)913 static void stmmac_release_ptp(struct stmmac_priv *priv)
914 {
915 clk_disable_unprepare(priv->plat->clk_ptp_ref);
916 stmmac_ptp_unregister(priv);
917 }
918
919 /**
920 * stmmac_mac_flow_ctrl - Configure flow control in all queues
921 * @priv: driver private structure
922 * @duplex: duplex passed to the next function
923 * Description: It is used for configuring the flow control in all queues
924 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)925 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
926 {
927 u32 tx_cnt = priv->plat->tx_queues_to_use;
928
929 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
930 priv->pause, tx_cnt);
931 }
932
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)933 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
934 phy_interface_t interface)
935 {
936 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
937
938 /* Refresh the MAC-specific capabilities */
939 stmmac_mac_update_caps(priv);
940
941 config->mac_capabilities = priv->hw->link.caps;
942
943 if (priv->plat->max_speed)
944 phylink_limit_mac_speed(config, priv->plat->max_speed);
945
946 return config->mac_capabilities;
947 }
948
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)949 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
950 phy_interface_t interface)
951 {
952 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
953 struct phylink_pcs *pcs;
954
955 if (priv->plat->select_pcs) {
956 pcs = priv->plat->select_pcs(priv, interface);
957 if (!IS_ERR(pcs))
958 return pcs;
959 }
960
961 return NULL;
962 }
963
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)964 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
965 const struct phylink_link_state *state)
966 {
967 /* Nothing to do, xpcs_config() handles everything */
968 }
969
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)970 static void stmmac_mac_link_down(struct phylink_config *config,
971 unsigned int mode, phy_interface_t interface)
972 {
973 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
974
975 stmmac_mac_set(priv, priv->ioaddr, false);
976 priv->eee_active = false;
977 priv->tx_lpi_enabled = false;
978 priv->eee_enabled = stmmac_eee_init(priv);
979 stmmac_set_eee_pls(priv, priv->hw, false);
980
981 if (stmmac_fpe_supported(priv))
982 stmmac_fpe_link_state_handle(priv, false);
983 }
984
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)985 static void stmmac_mac_link_up(struct phylink_config *config,
986 struct phy_device *phy,
987 unsigned int mode, phy_interface_t interface,
988 int speed, int duplex,
989 bool tx_pause, bool rx_pause)
990 {
991 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
992 u32 old_ctrl, ctrl;
993
994 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
995 priv->plat->serdes_powerup)
996 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
997
998 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
999 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1000
1001 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1002 switch (speed) {
1003 case SPEED_10000:
1004 ctrl |= priv->hw->link.xgmii.speed10000;
1005 break;
1006 case SPEED_5000:
1007 ctrl |= priv->hw->link.xgmii.speed5000;
1008 break;
1009 case SPEED_2500:
1010 ctrl |= priv->hw->link.xgmii.speed2500;
1011 break;
1012 default:
1013 return;
1014 }
1015 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1016 switch (speed) {
1017 case SPEED_100000:
1018 ctrl |= priv->hw->link.xlgmii.speed100000;
1019 break;
1020 case SPEED_50000:
1021 ctrl |= priv->hw->link.xlgmii.speed50000;
1022 break;
1023 case SPEED_40000:
1024 ctrl |= priv->hw->link.xlgmii.speed40000;
1025 break;
1026 case SPEED_25000:
1027 ctrl |= priv->hw->link.xlgmii.speed25000;
1028 break;
1029 case SPEED_10000:
1030 ctrl |= priv->hw->link.xgmii.speed10000;
1031 break;
1032 case SPEED_2500:
1033 ctrl |= priv->hw->link.speed2500;
1034 break;
1035 case SPEED_1000:
1036 ctrl |= priv->hw->link.speed1000;
1037 break;
1038 default:
1039 return;
1040 }
1041 } else {
1042 switch (speed) {
1043 case SPEED_2500:
1044 ctrl |= priv->hw->link.speed2500;
1045 break;
1046 case SPEED_1000:
1047 ctrl |= priv->hw->link.speed1000;
1048 break;
1049 case SPEED_100:
1050 ctrl |= priv->hw->link.speed100;
1051 break;
1052 case SPEED_10:
1053 ctrl |= priv->hw->link.speed10;
1054 break;
1055 default:
1056 return;
1057 }
1058 }
1059
1060 priv->speed = speed;
1061
1062 if (priv->plat->fix_mac_speed)
1063 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1064
1065 if (!duplex)
1066 ctrl &= ~priv->hw->link.duplex;
1067 else
1068 ctrl |= priv->hw->link.duplex;
1069
1070 /* Flow Control operation */
1071 if (rx_pause && tx_pause)
1072 priv->flow_ctrl = FLOW_AUTO;
1073 else if (rx_pause && !tx_pause)
1074 priv->flow_ctrl = FLOW_RX;
1075 else if (!rx_pause && tx_pause)
1076 priv->flow_ctrl = FLOW_TX;
1077 else
1078 priv->flow_ctrl = FLOW_OFF;
1079
1080 stmmac_mac_flow_ctrl(priv, duplex);
1081
1082 if (ctrl != old_ctrl)
1083 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1084
1085 stmmac_mac_set(priv, priv->ioaddr, true);
1086 if (phy && priv->dma_cap.eee) {
1087 priv->eee_active =
1088 phy_init_eee(phy, !(priv->plat->flags &
1089 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1090 priv->eee_enabled = stmmac_eee_init(priv);
1091 priv->tx_lpi_enabled = priv->eee_enabled;
1092 stmmac_set_eee_pls(priv, priv->hw, true);
1093 }
1094
1095 if (stmmac_fpe_supported(priv))
1096 stmmac_fpe_link_state_handle(priv, true);
1097
1098 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1099 stmmac_hwtstamp_correct_latency(priv, priv);
1100 }
1101
1102 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1103 .mac_get_caps = stmmac_mac_get_caps,
1104 .mac_select_pcs = stmmac_mac_select_pcs,
1105 .mac_config = stmmac_mac_config,
1106 .mac_link_down = stmmac_mac_link_down,
1107 .mac_link_up = stmmac_mac_link_up,
1108 };
1109
1110 /**
1111 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1112 * @priv: driver private structure
1113 * Description: this is to verify if the HW supports the PCS.
1114 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1115 * configured for the TBI, RTBI, or SGMII PHY interface.
1116 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1117 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1118 {
1119 int interface = priv->plat->mac_interface;
1120
1121 if (priv->dma_cap.pcs) {
1122 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1123 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1124 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1125 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1126 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1127 priv->hw->pcs = STMMAC_PCS_RGMII;
1128 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1129 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1130 priv->hw->pcs = STMMAC_PCS_SGMII;
1131 }
1132 }
1133 }
1134
1135 /**
1136 * stmmac_init_phy - PHY initialization
1137 * @dev: net device structure
1138 * Description: it initializes the driver's PHY state, and attaches the PHY
1139 * to the mac driver.
1140 * Return value:
1141 * 0 on success
1142 */
stmmac_init_phy(struct net_device * dev)1143 static int stmmac_init_phy(struct net_device *dev)
1144 {
1145 struct stmmac_priv *priv = netdev_priv(dev);
1146 struct fwnode_handle *phy_fwnode;
1147 struct fwnode_handle *fwnode;
1148 int ret;
1149
1150 if (!phylink_expects_phy(priv->phylink))
1151 return 0;
1152
1153 fwnode = priv->plat->port_node;
1154 if (!fwnode)
1155 fwnode = dev_fwnode(priv->device);
1156
1157 if (fwnode)
1158 phy_fwnode = fwnode_get_phy_node(fwnode);
1159 else
1160 phy_fwnode = NULL;
1161
1162 /* Some DT bindings do not set-up the PHY handle. Let's try to
1163 * manually parse it
1164 */
1165 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1166 int addr = priv->plat->phy_addr;
1167 struct phy_device *phydev;
1168
1169 if (addr < 0) {
1170 netdev_err(priv->dev, "no phy found\n");
1171 return -ENODEV;
1172 }
1173
1174 phydev = mdiobus_get_phy(priv->mii, addr);
1175 if (!phydev) {
1176 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1177 return -ENODEV;
1178 }
1179
1180 if (priv->dma_cap.eee)
1181 phy_support_eee(phydev);
1182
1183 ret = phylink_connect_phy(priv->phylink, phydev);
1184 } else {
1185 fwnode_handle_put(phy_fwnode);
1186 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1187 }
1188
1189 if (!priv->plat->pmt) {
1190 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1191
1192 phylink_ethtool_get_wol(priv->phylink, &wol);
1193 device_set_wakeup_capable(priv->device, !!wol.supported);
1194 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1195 }
1196
1197 return ret;
1198 }
1199
stmmac_phy_setup(struct stmmac_priv * priv)1200 static int stmmac_phy_setup(struct stmmac_priv *priv)
1201 {
1202 struct stmmac_mdio_bus_data *mdio_bus_data;
1203 int mode = priv->plat->phy_interface;
1204 struct fwnode_handle *fwnode;
1205 struct phylink *phylink;
1206
1207 priv->phylink_config.dev = &priv->dev->dev;
1208 priv->phylink_config.type = PHYLINK_NETDEV;
1209 priv->phylink_config.mac_managed_pm = true;
1210
1211 /* Stmmac always requires an RX clock for hardware initialization */
1212 priv->phylink_config.mac_requires_rxc = true;
1213
1214 mdio_bus_data = priv->plat->mdio_bus_data;
1215 if (mdio_bus_data)
1216 priv->phylink_config.default_an_inband =
1217 mdio_bus_data->default_an_inband;
1218
1219 /* Set the platform/firmware specified interface mode. Note, phylink
1220 * deals with the PHY interface mode, not the MAC interface mode.
1221 */
1222 __set_bit(mode, priv->phylink_config.supported_interfaces);
1223
1224 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1225 if (priv->hw->xpcs)
1226 xpcs_get_interfaces(priv->hw->xpcs,
1227 priv->phylink_config.supported_interfaces);
1228
1229 fwnode = priv->plat->port_node;
1230 if (!fwnode)
1231 fwnode = dev_fwnode(priv->device);
1232
1233 phylink = phylink_create(&priv->phylink_config, fwnode,
1234 mode, &stmmac_phylink_mac_ops);
1235 if (IS_ERR(phylink))
1236 return PTR_ERR(phylink);
1237
1238 priv->phylink = phylink;
1239 return 0;
1240 }
1241
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1242 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1243 struct stmmac_dma_conf *dma_conf)
1244 {
1245 u32 rx_cnt = priv->plat->rx_queues_to_use;
1246 unsigned int desc_size;
1247 void *head_rx;
1248 u32 queue;
1249
1250 /* Display RX rings */
1251 for (queue = 0; queue < rx_cnt; queue++) {
1252 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1253
1254 pr_info("\tRX Queue %u rings\n", queue);
1255
1256 if (priv->extend_desc) {
1257 head_rx = (void *)rx_q->dma_erx;
1258 desc_size = sizeof(struct dma_extended_desc);
1259 } else {
1260 head_rx = (void *)rx_q->dma_rx;
1261 desc_size = sizeof(struct dma_desc);
1262 }
1263
1264 /* Display RX ring */
1265 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1266 rx_q->dma_rx_phy, desc_size);
1267 }
1268 }
1269
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1270 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1271 struct stmmac_dma_conf *dma_conf)
1272 {
1273 u32 tx_cnt = priv->plat->tx_queues_to_use;
1274 unsigned int desc_size;
1275 void *head_tx;
1276 u32 queue;
1277
1278 /* Display TX rings */
1279 for (queue = 0; queue < tx_cnt; queue++) {
1280 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1281
1282 pr_info("\tTX Queue %d rings\n", queue);
1283
1284 if (priv->extend_desc) {
1285 head_tx = (void *)tx_q->dma_etx;
1286 desc_size = sizeof(struct dma_extended_desc);
1287 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1288 head_tx = (void *)tx_q->dma_entx;
1289 desc_size = sizeof(struct dma_edesc);
1290 } else {
1291 head_tx = (void *)tx_q->dma_tx;
1292 desc_size = sizeof(struct dma_desc);
1293 }
1294
1295 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1296 tx_q->dma_tx_phy, desc_size);
1297 }
1298 }
1299
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1300 static void stmmac_display_rings(struct stmmac_priv *priv,
1301 struct stmmac_dma_conf *dma_conf)
1302 {
1303 /* Display RX ring */
1304 stmmac_display_rx_rings(priv, dma_conf);
1305
1306 /* Display TX ring */
1307 stmmac_display_tx_rings(priv, dma_conf);
1308 }
1309
stmmac_set_bfsize(int mtu,int bufsize)1310 static int stmmac_set_bfsize(int mtu, int bufsize)
1311 {
1312 int ret = bufsize;
1313
1314 if (mtu >= BUF_SIZE_8KiB)
1315 ret = BUF_SIZE_16KiB;
1316 else if (mtu >= BUF_SIZE_4KiB)
1317 ret = BUF_SIZE_8KiB;
1318 else if (mtu >= BUF_SIZE_2KiB)
1319 ret = BUF_SIZE_4KiB;
1320 else if (mtu > DEFAULT_BUFSIZE)
1321 ret = BUF_SIZE_2KiB;
1322 else
1323 ret = DEFAULT_BUFSIZE;
1324
1325 return ret;
1326 }
1327
1328 /**
1329 * stmmac_clear_rx_descriptors - clear RX descriptors
1330 * @priv: driver private structure
1331 * @dma_conf: structure to take the dma data
1332 * @queue: RX queue index
1333 * Description: this function is called to clear the RX descriptors
1334 * in case of both basic and extended descriptors are used.
1335 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1336 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1337 struct stmmac_dma_conf *dma_conf,
1338 u32 queue)
1339 {
1340 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1341 int i;
1342
1343 /* Clear the RX descriptors */
1344 for (i = 0; i < dma_conf->dma_rx_size; i++)
1345 if (priv->extend_desc)
1346 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1347 priv->use_riwt, priv->mode,
1348 (i == dma_conf->dma_rx_size - 1),
1349 dma_conf->dma_buf_sz);
1350 else
1351 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1352 priv->use_riwt, priv->mode,
1353 (i == dma_conf->dma_rx_size - 1),
1354 dma_conf->dma_buf_sz);
1355 }
1356
1357 /**
1358 * stmmac_clear_tx_descriptors - clear tx descriptors
1359 * @priv: driver private structure
1360 * @dma_conf: structure to take the dma data
1361 * @queue: TX queue index.
1362 * Description: this function is called to clear the TX descriptors
1363 * in case of both basic and extended descriptors are used.
1364 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1365 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1366 struct stmmac_dma_conf *dma_conf,
1367 u32 queue)
1368 {
1369 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1370 int i;
1371
1372 /* Clear the TX descriptors */
1373 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1374 int last = (i == (dma_conf->dma_tx_size - 1));
1375 struct dma_desc *p;
1376
1377 if (priv->extend_desc)
1378 p = &tx_q->dma_etx[i].basic;
1379 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1380 p = &tx_q->dma_entx[i].basic;
1381 else
1382 p = &tx_q->dma_tx[i];
1383
1384 stmmac_init_tx_desc(priv, p, priv->mode, last);
1385 }
1386 }
1387
1388 /**
1389 * stmmac_clear_descriptors - clear descriptors
1390 * @priv: driver private structure
1391 * @dma_conf: structure to take the dma data
1392 * Description: this function is called to clear the TX and RX descriptors
1393 * in case of both basic and extended descriptors are used.
1394 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1395 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1396 struct stmmac_dma_conf *dma_conf)
1397 {
1398 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1399 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1400 u32 queue;
1401
1402 /* Clear the RX descriptors */
1403 for (queue = 0; queue < rx_queue_cnt; queue++)
1404 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1405
1406 /* Clear the TX descriptors */
1407 for (queue = 0; queue < tx_queue_cnt; queue++)
1408 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1409 }
1410
1411 /**
1412 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1413 * @priv: driver private structure
1414 * @dma_conf: structure to take the dma data
1415 * @p: descriptor pointer
1416 * @i: descriptor index
1417 * @flags: gfp flag
1418 * @queue: RX queue index
1419 * Description: this function is called to allocate a receive buffer, perform
1420 * the DMA mapping and init the descriptor.
1421 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1422 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1423 struct stmmac_dma_conf *dma_conf,
1424 struct dma_desc *p,
1425 int i, gfp_t flags, u32 queue)
1426 {
1427 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1428 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1429 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1430
1431 if (priv->dma_cap.host_dma_width <= 32)
1432 gfp |= GFP_DMA32;
1433
1434 if (!buf->page) {
1435 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1436 if (!buf->page)
1437 return -ENOMEM;
1438 buf->page_offset = stmmac_rx_offset(priv);
1439 }
1440
1441 if (priv->sph && !buf->sec_page) {
1442 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1443 if (!buf->sec_page)
1444 return -ENOMEM;
1445
1446 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1447 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1448 } else {
1449 buf->sec_page = NULL;
1450 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1451 }
1452
1453 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1454
1455 stmmac_set_desc_addr(priv, p, buf->addr);
1456 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1457 stmmac_init_desc3(priv, p);
1458
1459 return 0;
1460 }
1461
1462 /**
1463 * stmmac_free_rx_buffer - free RX dma buffers
1464 * @priv: private structure
1465 * @rx_q: RX queue
1466 * @i: buffer index.
1467 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1468 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1469 struct stmmac_rx_queue *rx_q,
1470 int i)
1471 {
1472 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1473
1474 if (buf->page)
1475 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1476 buf->page = NULL;
1477
1478 if (buf->sec_page)
1479 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1480 buf->sec_page = NULL;
1481 }
1482
1483 /**
1484 * stmmac_free_tx_buffer - free RX dma buffers
1485 * @priv: private structure
1486 * @dma_conf: structure to take the dma data
1487 * @queue: RX queue index
1488 * @i: buffer index.
1489 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1490 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1491 struct stmmac_dma_conf *dma_conf,
1492 u32 queue, int i)
1493 {
1494 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1495
1496 if (tx_q->tx_skbuff_dma[i].buf &&
1497 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1498 if (tx_q->tx_skbuff_dma[i].map_as_page)
1499 dma_unmap_page(priv->device,
1500 tx_q->tx_skbuff_dma[i].buf,
1501 tx_q->tx_skbuff_dma[i].len,
1502 DMA_TO_DEVICE);
1503 else
1504 dma_unmap_single(priv->device,
1505 tx_q->tx_skbuff_dma[i].buf,
1506 tx_q->tx_skbuff_dma[i].len,
1507 DMA_TO_DEVICE);
1508 }
1509
1510 if (tx_q->xdpf[i] &&
1511 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1512 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1513 xdp_return_frame(tx_q->xdpf[i]);
1514 tx_q->xdpf[i] = NULL;
1515 }
1516
1517 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1518 tx_q->xsk_frames_done++;
1519
1520 if (tx_q->tx_skbuff[i] &&
1521 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1522 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1523 tx_q->tx_skbuff[i] = NULL;
1524 }
1525
1526 tx_q->tx_skbuff_dma[i].buf = 0;
1527 tx_q->tx_skbuff_dma[i].map_as_page = false;
1528 }
1529
1530 /**
1531 * dma_free_rx_skbufs - free RX dma buffers
1532 * @priv: private structure
1533 * @dma_conf: structure to take the dma data
1534 * @queue: RX queue index
1535 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1536 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1537 struct stmmac_dma_conf *dma_conf,
1538 u32 queue)
1539 {
1540 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1541 int i;
1542
1543 for (i = 0; i < dma_conf->dma_rx_size; i++)
1544 stmmac_free_rx_buffer(priv, rx_q, i);
1545 }
1546
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1547 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1548 struct stmmac_dma_conf *dma_conf,
1549 u32 queue, gfp_t flags)
1550 {
1551 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1552 int i;
1553
1554 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1555 struct dma_desc *p;
1556 int ret;
1557
1558 if (priv->extend_desc)
1559 p = &((rx_q->dma_erx + i)->basic);
1560 else
1561 p = rx_q->dma_rx + i;
1562
1563 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1564 queue);
1565 if (ret)
1566 return ret;
1567
1568 rx_q->buf_alloc_num++;
1569 }
1570
1571 return 0;
1572 }
1573
1574 /**
1575 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1576 * @priv: private structure
1577 * @dma_conf: structure to take the dma data
1578 * @queue: RX queue index
1579 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1580 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1581 struct stmmac_dma_conf *dma_conf,
1582 u32 queue)
1583 {
1584 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1585 int i;
1586
1587 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1588 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1589
1590 if (!buf->xdp)
1591 continue;
1592
1593 xsk_buff_free(buf->xdp);
1594 buf->xdp = NULL;
1595 }
1596 }
1597
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1598 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1599 struct stmmac_dma_conf *dma_conf,
1600 u32 queue)
1601 {
1602 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1603 int i;
1604
1605 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1606 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1607 * use this macro to make sure no size violations.
1608 */
1609 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1610
1611 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1612 struct stmmac_rx_buffer *buf;
1613 dma_addr_t dma_addr;
1614 struct dma_desc *p;
1615
1616 if (priv->extend_desc)
1617 p = (struct dma_desc *)(rx_q->dma_erx + i);
1618 else
1619 p = rx_q->dma_rx + i;
1620
1621 buf = &rx_q->buf_pool[i];
1622
1623 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1624 if (!buf->xdp)
1625 return -ENOMEM;
1626
1627 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1628 stmmac_set_desc_addr(priv, p, dma_addr);
1629 rx_q->buf_alloc_num++;
1630 }
1631
1632 return 0;
1633 }
1634
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1635 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1636 {
1637 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1638 return NULL;
1639
1640 return xsk_get_pool_from_qid(priv->dev, queue);
1641 }
1642
1643 /**
1644 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1645 * @priv: driver private structure
1646 * @dma_conf: structure to take the dma data
1647 * @queue: RX queue index
1648 * @flags: gfp flag.
1649 * Description: this function initializes the DMA RX descriptors
1650 * and allocates the socket buffers. It supports the chained and ring
1651 * modes.
1652 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1653 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1654 struct stmmac_dma_conf *dma_conf,
1655 u32 queue, gfp_t flags)
1656 {
1657 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1658 int ret;
1659
1660 netif_dbg(priv, probe, priv->dev,
1661 "(%s) dma_rx_phy=0x%08x\n", __func__,
1662 (u32)rx_q->dma_rx_phy);
1663
1664 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1665
1666 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1667
1668 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1669
1670 if (rx_q->xsk_pool) {
1671 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1672 MEM_TYPE_XSK_BUFF_POOL,
1673 NULL));
1674 netdev_info(priv->dev,
1675 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1676 rx_q->queue_index);
1677 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1678 } else {
1679 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1680 MEM_TYPE_PAGE_POOL,
1681 rx_q->page_pool));
1682 netdev_info(priv->dev,
1683 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1684 rx_q->queue_index);
1685 }
1686
1687 if (rx_q->xsk_pool) {
1688 /* RX XDP ZC buffer pool may not be populated, e.g.
1689 * xdpsock TX-only.
1690 */
1691 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1692 } else {
1693 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1694 if (ret < 0)
1695 return -ENOMEM;
1696 }
1697
1698 /* Setup the chained descriptor addresses */
1699 if (priv->mode == STMMAC_CHAIN_MODE) {
1700 if (priv->extend_desc)
1701 stmmac_mode_init(priv, rx_q->dma_erx,
1702 rx_q->dma_rx_phy,
1703 dma_conf->dma_rx_size, 1);
1704 else
1705 stmmac_mode_init(priv, rx_q->dma_rx,
1706 rx_q->dma_rx_phy,
1707 dma_conf->dma_rx_size, 0);
1708 }
1709
1710 return 0;
1711 }
1712
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1713 static int init_dma_rx_desc_rings(struct net_device *dev,
1714 struct stmmac_dma_conf *dma_conf,
1715 gfp_t flags)
1716 {
1717 struct stmmac_priv *priv = netdev_priv(dev);
1718 u32 rx_count = priv->plat->rx_queues_to_use;
1719 int queue;
1720 int ret;
1721
1722 /* RX INITIALIZATION */
1723 netif_dbg(priv, probe, priv->dev,
1724 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1725
1726 for (queue = 0; queue < rx_count; queue++) {
1727 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1728 if (ret)
1729 goto err_init_rx_buffers;
1730 }
1731
1732 return 0;
1733
1734 err_init_rx_buffers:
1735 while (queue >= 0) {
1736 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1737
1738 if (rx_q->xsk_pool)
1739 dma_free_rx_xskbufs(priv, dma_conf, queue);
1740 else
1741 dma_free_rx_skbufs(priv, dma_conf, queue);
1742
1743 rx_q->buf_alloc_num = 0;
1744 rx_q->xsk_pool = NULL;
1745
1746 queue--;
1747 }
1748
1749 return ret;
1750 }
1751
1752 /**
1753 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1754 * @priv: driver private structure
1755 * @dma_conf: structure to take the dma data
1756 * @queue: TX queue index
1757 * Description: this function initializes the DMA TX descriptors
1758 * and allocates the socket buffers. It supports the chained and ring
1759 * modes.
1760 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1761 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1762 struct stmmac_dma_conf *dma_conf,
1763 u32 queue)
1764 {
1765 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1766 int i;
1767
1768 netif_dbg(priv, probe, priv->dev,
1769 "(%s) dma_tx_phy=0x%08x\n", __func__,
1770 (u32)tx_q->dma_tx_phy);
1771
1772 /* Setup the chained descriptor addresses */
1773 if (priv->mode == STMMAC_CHAIN_MODE) {
1774 if (priv->extend_desc)
1775 stmmac_mode_init(priv, tx_q->dma_etx,
1776 tx_q->dma_tx_phy,
1777 dma_conf->dma_tx_size, 1);
1778 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1779 stmmac_mode_init(priv, tx_q->dma_tx,
1780 tx_q->dma_tx_phy,
1781 dma_conf->dma_tx_size, 0);
1782 }
1783
1784 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1785
1786 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1787 struct dma_desc *p;
1788
1789 if (priv->extend_desc)
1790 p = &((tx_q->dma_etx + i)->basic);
1791 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1792 p = &((tx_q->dma_entx + i)->basic);
1793 else
1794 p = tx_q->dma_tx + i;
1795
1796 stmmac_clear_desc(priv, p);
1797
1798 tx_q->tx_skbuff_dma[i].buf = 0;
1799 tx_q->tx_skbuff_dma[i].map_as_page = false;
1800 tx_q->tx_skbuff_dma[i].len = 0;
1801 tx_q->tx_skbuff_dma[i].last_segment = false;
1802 tx_q->tx_skbuff[i] = NULL;
1803 }
1804
1805 return 0;
1806 }
1807
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1808 static int init_dma_tx_desc_rings(struct net_device *dev,
1809 struct stmmac_dma_conf *dma_conf)
1810 {
1811 struct stmmac_priv *priv = netdev_priv(dev);
1812 u32 tx_queue_cnt;
1813 u32 queue;
1814
1815 tx_queue_cnt = priv->plat->tx_queues_to_use;
1816
1817 for (queue = 0; queue < tx_queue_cnt; queue++)
1818 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1819
1820 return 0;
1821 }
1822
1823 /**
1824 * init_dma_desc_rings - init the RX/TX descriptor rings
1825 * @dev: net device structure
1826 * @dma_conf: structure to take the dma data
1827 * @flags: gfp flag.
1828 * Description: this function initializes the DMA RX/TX descriptors
1829 * and allocates the socket buffers. It supports the chained and ring
1830 * modes.
1831 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1832 static int init_dma_desc_rings(struct net_device *dev,
1833 struct stmmac_dma_conf *dma_conf,
1834 gfp_t flags)
1835 {
1836 struct stmmac_priv *priv = netdev_priv(dev);
1837 int ret;
1838
1839 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1840 if (ret)
1841 return ret;
1842
1843 ret = init_dma_tx_desc_rings(dev, dma_conf);
1844
1845 stmmac_clear_descriptors(priv, dma_conf);
1846
1847 if (netif_msg_hw(priv))
1848 stmmac_display_rings(priv, dma_conf);
1849
1850 return ret;
1851 }
1852
1853 /**
1854 * dma_free_tx_skbufs - free TX dma buffers
1855 * @priv: private structure
1856 * @dma_conf: structure to take the dma data
1857 * @queue: TX queue index
1858 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1859 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1860 struct stmmac_dma_conf *dma_conf,
1861 u32 queue)
1862 {
1863 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1864 int i;
1865
1866 tx_q->xsk_frames_done = 0;
1867
1868 for (i = 0; i < dma_conf->dma_tx_size; i++)
1869 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1870
1871 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1872 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1873 tx_q->xsk_frames_done = 0;
1874 tx_q->xsk_pool = NULL;
1875 }
1876 }
1877
1878 /**
1879 * stmmac_free_tx_skbufs - free TX skb buffers
1880 * @priv: private structure
1881 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1882 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1883 {
1884 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1885 u32 queue;
1886
1887 for (queue = 0; queue < tx_queue_cnt; queue++)
1888 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1889 }
1890
1891 /**
1892 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1893 * @priv: private structure
1894 * @dma_conf: structure to take the dma data
1895 * @queue: RX queue index
1896 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1897 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1898 struct stmmac_dma_conf *dma_conf,
1899 u32 queue)
1900 {
1901 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1902
1903 /* Release the DMA RX socket buffers */
1904 if (rx_q->xsk_pool)
1905 dma_free_rx_xskbufs(priv, dma_conf, queue);
1906 else
1907 dma_free_rx_skbufs(priv, dma_conf, queue);
1908
1909 rx_q->buf_alloc_num = 0;
1910 rx_q->xsk_pool = NULL;
1911
1912 /* Free DMA regions of consistent memory previously allocated */
1913 if (!priv->extend_desc)
1914 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1915 sizeof(struct dma_desc),
1916 rx_q->dma_rx, rx_q->dma_rx_phy);
1917 else
1918 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1919 sizeof(struct dma_extended_desc),
1920 rx_q->dma_erx, rx_q->dma_rx_phy);
1921
1922 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1923 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1924
1925 kfree(rx_q->buf_pool);
1926 if (rx_q->page_pool)
1927 page_pool_destroy(rx_q->page_pool);
1928 }
1929
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1930 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1931 struct stmmac_dma_conf *dma_conf)
1932 {
1933 u32 rx_count = priv->plat->rx_queues_to_use;
1934 u32 queue;
1935
1936 /* Free RX queue resources */
1937 for (queue = 0; queue < rx_count; queue++)
1938 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1939 }
1940
1941 /**
1942 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1943 * @priv: private structure
1944 * @dma_conf: structure to take the dma data
1945 * @queue: TX queue index
1946 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1947 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1948 struct stmmac_dma_conf *dma_conf,
1949 u32 queue)
1950 {
1951 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1952 size_t size;
1953 void *addr;
1954
1955 /* Release the DMA TX socket buffers */
1956 dma_free_tx_skbufs(priv, dma_conf, queue);
1957
1958 if (priv->extend_desc) {
1959 size = sizeof(struct dma_extended_desc);
1960 addr = tx_q->dma_etx;
1961 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1962 size = sizeof(struct dma_edesc);
1963 addr = tx_q->dma_entx;
1964 } else {
1965 size = sizeof(struct dma_desc);
1966 addr = tx_q->dma_tx;
1967 }
1968
1969 size *= dma_conf->dma_tx_size;
1970
1971 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1972
1973 kfree(tx_q->tx_skbuff_dma);
1974 kfree(tx_q->tx_skbuff);
1975 }
1976
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1977 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
1978 struct stmmac_dma_conf *dma_conf)
1979 {
1980 u32 tx_count = priv->plat->tx_queues_to_use;
1981 u32 queue;
1982
1983 /* Free TX queue resources */
1984 for (queue = 0; queue < tx_count; queue++)
1985 __free_dma_tx_desc_resources(priv, dma_conf, queue);
1986 }
1987
1988 /**
1989 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1990 * @priv: private structure
1991 * @dma_conf: structure to take the dma data
1992 * @queue: RX queue index
1993 * Description: according to which descriptor can be used (extend or basic)
1994 * this function allocates the resources for TX and RX paths. In case of
1995 * reception, for example, it pre-allocated the RX socket buffer in order to
1996 * allow zero-copy mechanism.
1997 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1998 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
1999 struct stmmac_dma_conf *dma_conf,
2000 u32 queue)
2001 {
2002 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2003 struct stmmac_channel *ch = &priv->channel[queue];
2004 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2005 struct page_pool_params pp_params = { 0 };
2006 unsigned int num_pages;
2007 unsigned int napi_id;
2008 int ret;
2009
2010 rx_q->queue_index = queue;
2011 rx_q->priv_data = priv;
2012
2013 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2014 pp_params.pool_size = dma_conf->dma_rx_size;
2015 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2016 pp_params.order = ilog2(num_pages);
2017 pp_params.nid = dev_to_node(priv->device);
2018 pp_params.dev = priv->device;
2019 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2020 pp_params.offset = stmmac_rx_offset(priv);
2021 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2022
2023 rx_q->page_pool = page_pool_create(&pp_params);
2024 if (IS_ERR(rx_q->page_pool)) {
2025 ret = PTR_ERR(rx_q->page_pool);
2026 rx_q->page_pool = NULL;
2027 return ret;
2028 }
2029
2030 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2031 sizeof(*rx_q->buf_pool),
2032 GFP_KERNEL);
2033 if (!rx_q->buf_pool)
2034 return -ENOMEM;
2035
2036 if (priv->extend_desc) {
2037 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2038 dma_conf->dma_rx_size *
2039 sizeof(struct dma_extended_desc),
2040 &rx_q->dma_rx_phy,
2041 GFP_KERNEL);
2042 if (!rx_q->dma_erx)
2043 return -ENOMEM;
2044
2045 } else {
2046 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2047 dma_conf->dma_rx_size *
2048 sizeof(struct dma_desc),
2049 &rx_q->dma_rx_phy,
2050 GFP_KERNEL);
2051 if (!rx_q->dma_rx)
2052 return -ENOMEM;
2053 }
2054
2055 if (stmmac_xdp_is_enabled(priv) &&
2056 test_bit(queue, priv->af_xdp_zc_qps))
2057 napi_id = ch->rxtx_napi.napi_id;
2058 else
2059 napi_id = ch->rx_napi.napi_id;
2060
2061 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2062 rx_q->queue_index,
2063 napi_id);
2064 if (ret) {
2065 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2066 return -EINVAL;
2067 }
2068
2069 return 0;
2070 }
2071
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2072 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2073 struct stmmac_dma_conf *dma_conf)
2074 {
2075 u32 rx_count = priv->plat->rx_queues_to_use;
2076 u32 queue;
2077 int ret;
2078
2079 /* RX queues buffers and DMA */
2080 for (queue = 0; queue < rx_count; queue++) {
2081 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2082 if (ret)
2083 goto err_dma;
2084 }
2085
2086 return 0;
2087
2088 err_dma:
2089 free_dma_rx_desc_resources(priv, dma_conf);
2090
2091 return ret;
2092 }
2093
2094 /**
2095 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2096 * @priv: private structure
2097 * @dma_conf: structure to take the dma data
2098 * @queue: TX queue index
2099 * Description: according to which descriptor can be used (extend or basic)
2100 * this function allocates the resources for TX and RX paths. In case of
2101 * reception, for example, it pre-allocated the RX socket buffer in order to
2102 * allow zero-copy mechanism.
2103 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2104 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2105 struct stmmac_dma_conf *dma_conf,
2106 u32 queue)
2107 {
2108 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2109 size_t size;
2110 void *addr;
2111
2112 tx_q->queue_index = queue;
2113 tx_q->priv_data = priv;
2114
2115 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2116 sizeof(*tx_q->tx_skbuff_dma),
2117 GFP_KERNEL);
2118 if (!tx_q->tx_skbuff_dma)
2119 return -ENOMEM;
2120
2121 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2122 sizeof(struct sk_buff *),
2123 GFP_KERNEL);
2124 if (!tx_q->tx_skbuff)
2125 return -ENOMEM;
2126
2127 if (priv->extend_desc)
2128 size = sizeof(struct dma_extended_desc);
2129 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2130 size = sizeof(struct dma_edesc);
2131 else
2132 size = sizeof(struct dma_desc);
2133
2134 size *= dma_conf->dma_tx_size;
2135
2136 addr = dma_alloc_coherent(priv->device, size,
2137 &tx_q->dma_tx_phy, GFP_KERNEL);
2138 if (!addr)
2139 return -ENOMEM;
2140
2141 if (priv->extend_desc)
2142 tx_q->dma_etx = addr;
2143 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2144 tx_q->dma_entx = addr;
2145 else
2146 tx_q->dma_tx = addr;
2147
2148 return 0;
2149 }
2150
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2151 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2152 struct stmmac_dma_conf *dma_conf)
2153 {
2154 u32 tx_count = priv->plat->tx_queues_to_use;
2155 u32 queue;
2156 int ret;
2157
2158 /* TX queues buffers and DMA */
2159 for (queue = 0; queue < tx_count; queue++) {
2160 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2161 if (ret)
2162 goto err_dma;
2163 }
2164
2165 return 0;
2166
2167 err_dma:
2168 free_dma_tx_desc_resources(priv, dma_conf);
2169 return ret;
2170 }
2171
2172 /**
2173 * alloc_dma_desc_resources - alloc TX/RX resources.
2174 * @priv: private structure
2175 * @dma_conf: structure to take the dma data
2176 * Description: according to which descriptor can be used (extend or basic)
2177 * this function allocates the resources for TX and RX paths. In case of
2178 * reception, for example, it pre-allocated the RX socket buffer in order to
2179 * allow zero-copy mechanism.
2180 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2181 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2182 struct stmmac_dma_conf *dma_conf)
2183 {
2184 /* RX Allocation */
2185 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2186
2187 if (ret)
2188 return ret;
2189
2190 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2191
2192 return ret;
2193 }
2194
2195 /**
2196 * free_dma_desc_resources - free dma desc resources
2197 * @priv: private structure
2198 * @dma_conf: structure to take the dma data
2199 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2200 static void free_dma_desc_resources(struct stmmac_priv *priv,
2201 struct stmmac_dma_conf *dma_conf)
2202 {
2203 /* Release the DMA TX socket buffers */
2204 free_dma_tx_desc_resources(priv, dma_conf);
2205
2206 /* Release the DMA RX socket buffers later
2207 * to ensure all pending XDP_TX buffers are returned.
2208 */
2209 free_dma_rx_desc_resources(priv, dma_conf);
2210 }
2211
2212 /**
2213 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2214 * @priv: driver private structure
2215 * Description: It is used for enabling the rx queues in the MAC
2216 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2217 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2218 {
2219 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2220 int queue;
2221 u8 mode;
2222
2223 for (queue = 0; queue < rx_queues_count; queue++) {
2224 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2225 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2226 }
2227 }
2228
2229 /**
2230 * stmmac_start_rx_dma - start RX DMA channel
2231 * @priv: driver private structure
2232 * @chan: RX channel index
2233 * Description:
2234 * This starts a RX DMA channel
2235 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2236 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2237 {
2238 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2239 stmmac_start_rx(priv, priv->ioaddr, chan);
2240 }
2241
2242 /**
2243 * stmmac_start_tx_dma - start TX DMA channel
2244 * @priv: driver private structure
2245 * @chan: TX channel index
2246 * Description:
2247 * This starts a TX DMA channel
2248 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2249 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2250 {
2251 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2252 stmmac_start_tx(priv, priv->ioaddr, chan);
2253 }
2254
2255 /**
2256 * stmmac_stop_rx_dma - stop RX DMA channel
2257 * @priv: driver private structure
2258 * @chan: RX channel index
2259 * Description:
2260 * This stops a RX DMA channel
2261 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2262 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2263 {
2264 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2265 stmmac_stop_rx(priv, priv->ioaddr, chan);
2266 }
2267
2268 /**
2269 * stmmac_stop_tx_dma - stop TX DMA channel
2270 * @priv: driver private structure
2271 * @chan: TX channel index
2272 * Description:
2273 * This stops a TX DMA channel
2274 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2275 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2276 {
2277 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2278 stmmac_stop_tx(priv, priv->ioaddr, chan);
2279 }
2280
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2281 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2282 {
2283 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2284 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2285 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2286 u32 chan;
2287
2288 for (chan = 0; chan < dma_csr_ch; chan++) {
2289 struct stmmac_channel *ch = &priv->channel[chan];
2290 unsigned long flags;
2291
2292 spin_lock_irqsave(&ch->lock, flags);
2293 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2294 spin_unlock_irqrestore(&ch->lock, flags);
2295 }
2296 }
2297
2298 /**
2299 * stmmac_start_all_dma - start all RX and TX DMA channels
2300 * @priv: driver private structure
2301 * Description:
2302 * This starts all the RX and TX DMA channels
2303 */
stmmac_start_all_dma(struct stmmac_priv * priv)2304 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2305 {
2306 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 u32 chan = 0;
2309
2310 for (chan = 0; chan < rx_channels_count; chan++)
2311 stmmac_start_rx_dma(priv, chan);
2312
2313 for (chan = 0; chan < tx_channels_count; chan++)
2314 stmmac_start_tx_dma(priv, chan);
2315 }
2316
2317 /**
2318 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2319 * @priv: driver private structure
2320 * Description:
2321 * This stops the RX and TX DMA channels
2322 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2323 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2324 {
2325 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2326 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2327 u32 chan = 0;
2328
2329 for (chan = 0; chan < rx_channels_count; chan++)
2330 stmmac_stop_rx_dma(priv, chan);
2331
2332 for (chan = 0; chan < tx_channels_count; chan++)
2333 stmmac_stop_tx_dma(priv, chan);
2334 }
2335
2336 /**
2337 * stmmac_dma_operation_mode - HW DMA operation mode
2338 * @priv: driver private structure
2339 * Description: it is used for configuring the DMA operation mode register in
2340 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2341 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2342 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2343 {
2344 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2345 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2346 int rxfifosz = priv->plat->rx_fifo_size;
2347 int txfifosz = priv->plat->tx_fifo_size;
2348 u32 txmode = 0;
2349 u32 rxmode = 0;
2350 u32 chan = 0;
2351 u8 qmode = 0;
2352
2353 if (rxfifosz == 0)
2354 rxfifosz = priv->dma_cap.rx_fifo_size;
2355 if (txfifosz == 0)
2356 txfifosz = priv->dma_cap.tx_fifo_size;
2357
2358 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2359 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2360 rxfifosz /= rx_channels_count;
2361 txfifosz /= tx_channels_count;
2362 }
2363
2364 if (priv->plat->force_thresh_dma_mode) {
2365 txmode = tc;
2366 rxmode = tc;
2367 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2368 /*
2369 * In case of GMAC, SF mode can be enabled
2370 * to perform the TX COE in HW. This depends on:
2371 * 1) TX COE if actually supported
2372 * 2) There is no bugged Jumbo frame support
2373 * that needs to not insert csum in the TDES.
2374 */
2375 txmode = SF_DMA_MODE;
2376 rxmode = SF_DMA_MODE;
2377 priv->xstats.threshold = SF_DMA_MODE;
2378 } else {
2379 txmode = tc;
2380 rxmode = SF_DMA_MODE;
2381 }
2382
2383 /* configure all channels */
2384 for (chan = 0; chan < rx_channels_count; chan++) {
2385 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2386 u32 buf_size;
2387
2388 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2389
2390 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2391 rxfifosz, qmode);
2392
2393 if (rx_q->xsk_pool) {
2394 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2395 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2396 buf_size,
2397 chan);
2398 } else {
2399 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2400 priv->dma_conf.dma_buf_sz,
2401 chan);
2402 }
2403 }
2404
2405 for (chan = 0; chan < tx_channels_count; chan++) {
2406 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2407
2408 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2409 txfifosz, qmode);
2410 }
2411 }
2412
stmmac_xsk_request_timestamp(void * _priv)2413 static void stmmac_xsk_request_timestamp(void *_priv)
2414 {
2415 struct stmmac_metadata_request *meta_req = _priv;
2416
2417 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2418 *meta_req->set_ic = true;
2419 }
2420
stmmac_xsk_fill_timestamp(void * _priv)2421 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2422 {
2423 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2424 struct stmmac_priv *priv = tx_compl->priv;
2425 struct dma_desc *desc = tx_compl->desc;
2426 bool found = false;
2427 u64 ns = 0;
2428
2429 if (!priv->hwts_tx_en)
2430 return 0;
2431
2432 /* check tx tstamp status */
2433 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2434 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2435 found = true;
2436 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2437 found = true;
2438 }
2439
2440 if (found) {
2441 ns -= priv->plat->cdc_error_adj;
2442 return ns_to_ktime(ns);
2443 }
2444
2445 return 0;
2446 }
2447
2448 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2449 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2450 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2451 };
2452
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2453 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2454 {
2455 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2456 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2457 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2458 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2459 unsigned int entry = tx_q->cur_tx;
2460 struct dma_desc *tx_desc = NULL;
2461 struct xdp_desc xdp_desc;
2462 bool work_done = true;
2463 u32 tx_set_ic_bit = 0;
2464
2465 /* Avoids TX time-out as we are sharing with slow path */
2466 txq_trans_cond_update(nq);
2467
2468 budget = min(budget, stmmac_tx_avail(priv, queue));
2469
2470 while (budget-- > 0) {
2471 struct stmmac_metadata_request meta_req;
2472 struct xsk_tx_metadata *meta = NULL;
2473 dma_addr_t dma_addr;
2474 bool set_ic;
2475
2476 /* We are sharing with slow path and stop XSK TX desc submission when
2477 * available TX ring is less than threshold.
2478 */
2479 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2480 !netif_carrier_ok(priv->dev)) {
2481 work_done = false;
2482 break;
2483 }
2484
2485 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2486 break;
2487
2488 if (priv->est && priv->est->enable &&
2489 priv->est->max_sdu[queue] &&
2490 xdp_desc.len > priv->est->max_sdu[queue]) {
2491 priv->xstats.max_sdu_txq_drop[queue]++;
2492 continue;
2493 }
2494
2495 if (likely(priv->extend_desc))
2496 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2497 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2498 tx_desc = &tx_q->dma_entx[entry].basic;
2499 else
2500 tx_desc = tx_q->dma_tx + entry;
2501
2502 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2503 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2504 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2505
2506 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2507
2508 /* To return XDP buffer to XSK pool, we simple call
2509 * xsk_tx_completed(), so we don't need to fill up
2510 * 'buf' and 'xdpf'.
2511 */
2512 tx_q->tx_skbuff_dma[entry].buf = 0;
2513 tx_q->xdpf[entry] = NULL;
2514
2515 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2516 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2517 tx_q->tx_skbuff_dma[entry].last_segment = true;
2518 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2519
2520 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2521
2522 tx_q->tx_count_frames++;
2523
2524 if (!priv->tx_coal_frames[queue])
2525 set_ic = false;
2526 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2527 set_ic = true;
2528 else
2529 set_ic = false;
2530
2531 meta_req.priv = priv;
2532 meta_req.tx_desc = tx_desc;
2533 meta_req.set_ic = &set_ic;
2534 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2535 &meta_req);
2536 if (set_ic) {
2537 tx_q->tx_count_frames = 0;
2538 stmmac_set_tx_ic(priv, tx_desc);
2539 tx_set_ic_bit++;
2540 }
2541
2542 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2543 true, priv->mode, true, true,
2544 xdp_desc.len);
2545
2546 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2547
2548 xsk_tx_metadata_to_compl(meta,
2549 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2550
2551 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2552 entry = tx_q->cur_tx;
2553 }
2554 u64_stats_update_begin(&txq_stats->napi_syncp);
2555 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2556 u64_stats_update_end(&txq_stats->napi_syncp);
2557
2558 if (tx_desc) {
2559 stmmac_flush_tx_descriptors(priv, queue);
2560 xsk_tx_release(pool);
2561 }
2562
2563 /* Return true if all of the 3 conditions are met
2564 * a) TX Budget is still available
2565 * b) work_done = true when XSK TX desc peek is empty (no more
2566 * pending XSK TX for transmission)
2567 */
2568 return !!budget && work_done;
2569 }
2570
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2571 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2572 {
2573 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2574 tc += 64;
2575
2576 if (priv->plat->force_thresh_dma_mode)
2577 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2578 else
2579 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2580 chan);
2581
2582 priv->xstats.threshold = tc;
2583 }
2584 }
2585
2586 /**
2587 * stmmac_tx_clean - to manage the transmission completion
2588 * @priv: driver private structure
2589 * @budget: napi budget limiting this functions packet handling
2590 * @queue: TX queue index
2591 * @pending_packets: signal to arm the TX coal timer
2592 * Description: it reclaims the transmit resources after transmission completes.
2593 * If some packets still needs to be handled, due to TX coalesce, set
2594 * pending_packets to true to make NAPI arm the TX coal timer.
2595 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2596 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2597 bool *pending_packets)
2598 {
2599 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2600 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2601 unsigned int bytes_compl = 0, pkts_compl = 0;
2602 unsigned int entry, xmits = 0, count = 0;
2603 u32 tx_packets = 0, tx_errors = 0;
2604
2605 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2606
2607 tx_q->xsk_frames_done = 0;
2608
2609 entry = tx_q->dirty_tx;
2610
2611 /* Try to clean all TX complete frame in 1 shot */
2612 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2613 struct xdp_frame *xdpf;
2614 struct sk_buff *skb;
2615 struct dma_desc *p;
2616 int status;
2617
2618 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2619 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2620 xdpf = tx_q->xdpf[entry];
2621 skb = NULL;
2622 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2623 xdpf = NULL;
2624 skb = tx_q->tx_skbuff[entry];
2625 } else {
2626 xdpf = NULL;
2627 skb = NULL;
2628 }
2629
2630 if (priv->extend_desc)
2631 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2632 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2633 p = &tx_q->dma_entx[entry].basic;
2634 else
2635 p = tx_q->dma_tx + entry;
2636
2637 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2638 /* Check if the descriptor is owned by the DMA */
2639 if (unlikely(status & tx_dma_own))
2640 break;
2641
2642 count++;
2643
2644 /* Make sure descriptor fields are read after reading
2645 * the own bit.
2646 */
2647 dma_rmb();
2648
2649 /* Just consider the last segment and ...*/
2650 if (likely(!(status & tx_not_ls))) {
2651 /* ... verify the status error condition */
2652 if (unlikely(status & tx_err)) {
2653 tx_errors++;
2654 if (unlikely(status & tx_err_bump_tc))
2655 stmmac_bump_dma_threshold(priv, queue);
2656 } else {
2657 tx_packets++;
2658 }
2659 if (skb) {
2660 stmmac_get_tx_hwtstamp(priv, p, skb);
2661 } else if (tx_q->xsk_pool &&
2662 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2663 struct stmmac_xsk_tx_complete tx_compl = {
2664 .priv = priv,
2665 .desc = p,
2666 };
2667
2668 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2669 &stmmac_xsk_tx_metadata_ops,
2670 &tx_compl);
2671 }
2672 }
2673
2674 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2675 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2676 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2677 dma_unmap_page(priv->device,
2678 tx_q->tx_skbuff_dma[entry].buf,
2679 tx_q->tx_skbuff_dma[entry].len,
2680 DMA_TO_DEVICE);
2681 else
2682 dma_unmap_single(priv->device,
2683 tx_q->tx_skbuff_dma[entry].buf,
2684 tx_q->tx_skbuff_dma[entry].len,
2685 DMA_TO_DEVICE);
2686 tx_q->tx_skbuff_dma[entry].buf = 0;
2687 tx_q->tx_skbuff_dma[entry].len = 0;
2688 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2689 }
2690
2691 stmmac_clean_desc3(priv, tx_q, p);
2692
2693 tx_q->tx_skbuff_dma[entry].last_segment = false;
2694 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2695
2696 if (xdpf &&
2697 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2698 xdp_return_frame_rx_napi(xdpf);
2699 tx_q->xdpf[entry] = NULL;
2700 }
2701
2702 if (xdpf &&
2703 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2704 xdp_return_frame(xdpf);
2705 tx_q->xdpf[entry] = NULL;
2706 }
2707
2708 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2709 tx_q->xsk_frames_done++;
2710
2711 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2712 if (likely(skb)) {
2713 pkts_compl++;
2714 bytes_compl += skb->len;
2715 dev_consume_skb_any(skb);
2716 tx_q->tx_skbuff[entry] = NULL;
2717 }
2718 }
2719
2720 stmmac_release_tx_desc(priv, p, priv->mode);
2721
2722 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2723 }
2724 tx_q->dirty_tx = entry;
2725
2726 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2727 pkts_compl, bytes_compl);
2728
2729 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2730 queue))) &&
2731 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2732
2733 netif_dbg(priv, tx_done, priv->dev,
2734 "%s: restart transmit\n", __func__);
2735 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2736 }
2737
2738 if (tx_q->xsk_pool) {
2739 bool work_done;
2740
2741 if (tx_q->xsk_frames_done)
2742 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2743
2744 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2745 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2746
2747 /* For XSK TX, we try to send as many as possible.
2748 * If XSK work done (XSK TX desc empty and budget still
2749 * available), return "budget - 1" to reenable TX IRQ.
2750 * Else, return "budget" to make NAPI continue polling.
2751 */
2752 work_done = stmmac_xdp_xmit_zc(priv, queue,
2753 STMMAC_XSK_TX_BUDGET_MAX);
2754 if (work_done)
2755 xmits = budget - 1;
2756 else
2757 xmits = budget;
2758 }
2759
2760 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2761 priv->eee_sw_timer_en) {
2762 if (stmmac_enable_eee_mode(priv))
2763 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2764 }
2765
2766 /* We still have pending packets, let's call for a new scheduling */
2767 if (tx_q->dirty_tx != tx_q->cur_tx)
2768 *pending_packets = true;
2769
2770 u64_stats_update_begin(&txq_stats->napi_syncp);
2771 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2772 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2773 u64_stats_inc(&txq_stats->napi.tx_clean);
2774 u64_stats_update_end(&txq_stats->napi_syncp);
2775
2776 priv->xstats.tx_errors += tx_errors;
2777
2778 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2779
2780 /* Combine decisions from TX clean and XSK TX */
2781 return max(count, xmits);
2782 }
2783
2784 /**
2785 * stmmac_tx_err - to manage the tx error
2786 * @priv: driver private structure
2787 * @chan: channel index
2788 * Description: it cleans the descriptors and restarts the transmission
2789 * in case of transmission errors.
2790 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2791 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2792 {
2793 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2794
2795 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2796
2797 stmmac_stop_tx_dma(priv, chan);
2798 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2799 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2800 stmmac_reset_tx_queue(priv, chan);
2801 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2802 tx_q->dma_tx_phy, chan);
2803 stmmac_start_tx_dma(priv, chan);
2804
2805 priv->xstats.tx_errors++;
2806 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2807 }
2808
2809 /**
2810 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2811 * @priv: driver private structure
2812 * @txmode: TX operating mode
2813 * @rxmode: RX operating mode
2814 * @chan: channel index
2815 * Description: it is used for configuring of the DMA operation mode in
2816 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2817 * mode.
2818 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2819 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2820 u32 rxmode, u32 chan)
2821 {
2822 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2823 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2824 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2825 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2826 int rxfifosz = priv->plat->rx_fifo_size;
2827 int txfifosz = priv->plat->tx_fifo_size;
2828
2829 if (rxfifosz == 0)
2830 rxfifosz = priv->dma_cap.rx_fifo_size;
2831 if (txfifosz == 0)
2832 txfifosz = priv->dma_cap.tx_fifo_size;
2833
2834 /* Adjust for real per queue fifo size */
2835 rxfifosz /= rx_channels_count;
2836 txfifosz /= tx_channels_count;
2837
2838 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2839 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2840 }
2841
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2842 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2843 {
2844 int ret;
2845
2846 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2847 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2848 if (ret && (ret != -EINVAL)) {
2849 stmmac_global_err(priv);
2850 return true;
2851 }
2852
2853 return false;
2854 }
2855
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2856 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2857 {
2858 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2859 &priv->xstats, chan, dir);
2860 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2861 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2862 struct stmmac_channel *ch = &priv->channel[chan];
2863 struct napi_struct *rx_napi;
2864 struct napi_struct *tx_napi;
2865 unsigned long flags;
2866
2867 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2868 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2869
2870 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2871 if (napi_schedule_prep(rx_napi)) {
2872 spin_lock_irqsave(&ch->lock, flags);
2873 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2874 spin_unlock_irqrestore(&ch->lock, flags);
2875 __napi_schedule(rx_napi);
2876 }
2877 }
2878
2879 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2880 if (napi_schedule_prep(tx_napi)) {
2881 spin_lock_irqsave(&ch->lock, flags);
2882 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2883 spin_unlock_irqrestore(&ch->lock, flags);
2884 __napi_schedule(tx_napi);
2885 }
2886 }
2887
2888 return status;
2889 }
2890
2891 /**
2892 * stmmac_dma_interrupt - DMA ISR
2893 * @priv: driver private structure
2894 * Description: this is the DMA ISR. It is called by the main ISR.
2895 * It calls the dwmac dma routine and schedule poll method in case of some
2896 * work can be done.
2897 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2898 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2899 {
2900 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2901 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2902 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2903 tx_channel_count : rx_channel_count;
2904 u32 chan;
2905 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2906
2907 /* Make sure we never check beyond our status buffer. */
2908 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2909 channels_to_check = ARRAY_SIZE(status);
2910
2911 for (chan = 0; chan < channels_to_check; chan++)
2912 status[chan] = stmmac_napi_check(priv, chan,
2913 DMA_DIR_RXTX);
2914
2915 for (chan = 0; chan < tx_channel_count; chan++) {
2916 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2917 /* Try to bump up the dma threshold on this failure */
2918 stmmac_bump_dma_threshold(priv, chan);
2919 } else if (unlikely(status[chan] == tx_hard_error)) {
2920 stmmac_tx_err(priv, chan);
2921 }
2922 }
2923 }
2924
2925 /**
2926 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2927 * @priv: driver private structure
2928 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2929 */
stmmac_mmc_setup(struct stmmac_priv * priv)2930 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2931 {
2932 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2933 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2934
2935 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2936
2937 if (priv->dma_cap.rmon) {
2938 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2939 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2940 } else
2941 netdev_info(priv->dev, "No MAC Management Counters available\n");
2942 }
2943
2944 /**
2945 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2946 * @priv: driver private structure
2947 * Description:
2948 * new GMAC chip generations have a new register to indicate the
2949 * presence of the optional feature/functions.
2950 * This can be also used to override the value passed through the
2951 * platform and necessary for old MAC10/100 and GMAC chips.
2952 */
stmmac_get_hw_features(struct stmmac_priv * priv)2953 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2954 {
2955 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2956 }
2957
2958 /**
2959 * stmmac_check_ether_addr - check if the MAC addr is valid
2960 * @priv: driver private structure
2961 * Description:
2962 * it is to verify if the MAC address is valid, in case of failures it
2963 * generates a random MAC address
2964 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2965 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2966 {
2967 u8 addr[ETH_ALEN];
2968
2969 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2970 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2971 if (is_valid_ether_addr(addr))
2972 eth_hw_addr_set(priv->dev, addr);
2973 else
2974 eth_hw_addr_random(priv->dev);
2975 dev_info(priv->device, "device MAC address %pM\n",
2976 priv->dev->dev_addr);
2977 }
2978 }
2979
2980 /**
2981 * stmmac_init_dma_engine - DMA init.
2982 * @priv: driver private structure
2983 * Description:
2984 * It inits the DMA invoking the specific MAC/GMAC callback.
2985 * Some DMA parameters can be passed from the platform;
2986 * in case of these are not passed a default is kept for the MAC or GMAC.
2987 */
stmmac_init_dma_engine(struct stmmac_priv * priv)2988 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2989 {
2990 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2991 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2992 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2993 struct stmmac_rx_queue *rx_q;
2994 struct stmmac_tx_queue *tx_q;
2995 u32 chan = 0;
2996 int ret = 0;
2997
2998 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2999 dev_err(priv->device, "Invalid DMA configuration\n");
3000 return -EINVAL;
3001 }
3002
3003 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3004 priv->plat->dma_cfg->atds = 1;
3005
3006 ret = stmmac_reset(priv, priv->ioaddr);
3007 if (ret) {
3008 dev_err(priv->device, "Failed to reset the dma\n");
3009 return ret;
3010 }
3011
3012 /* DMA Configuration */
3013 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3014
3015 if (priv->plat->axi)
3016 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3017
3018 /* DMA CSR Channel configuration */
3019 for (chan = 0; chan < dma_csr_ch; chan++) {
3020 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3021 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3022 }
3023
3024 /* DMA RX Channel Configuration */
3025 for (chan = 0; chan < rx_channels_count; chan++) {
3026 rx_q = &priv->dma_conf.rx_queue[chan];
3027
3028 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3029 rx_q->dma_rx_phy, chan);
3030
3031 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3032 (rx_q->buf_alloc_num *
3033 sizeof(struct dma_desc));
3034 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3035 rx_q->rx_tail_addr, chan);
3036 }
3037
3038 /* DMA TX Channel Configuration */
3039 for (chan = 0; chan < tx_channels_count; chan++) {
3040 tx_q = &priv->dma_conf.tx_queue[chan];
3041
3042 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3043 tx_q->dma_tx_phy, chan);
3044
3045 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3046 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3047 tx_q->tx_tail_addr, chan);
3048 }
3049
3050 return ret;
3051 }
3052
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3053 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3054 {
3055 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3056 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3057 struct stmmac_channel *ch;
3058 struct napi_struct *napi;
3059
3060 if (!tx_coal_timer)
3061 return;
3062
3063 ch = &priv->channel[tx_q->queue_index];
3064 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3065
3066 /* Arm timer only if napi is not already scheduled.
3067 * Try to cancel any timer if napi is scheduled, timer will be armed
3068 * again in the next scheduled napi.
3069 */
3070 if (unlikely(!napi_is_scheduled(napi)))
3071 hrtimer_start(&tx_q->txtimer,
3072 STMMAC_COAL_TIMER(tx_coal_timer),
3073 HRTIMER_MODE_REL);
3074 else
3075 hrtimer_try_to_cancel(&tx_q->txtimer);
3076 }
3077
3078 /**
3079 * stmmac_tx_timer - mitigation sw timer for tx.
3080 * @t: data pointer
3081 * Description:
3082 * This is the timer handler to directly invoke the stmmac_tx_clean.
3083 */
stmmac_tx_timer(struct hrtimer * t)3084 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3085 {
3086 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3087 struct stmmac_priv *priv = tx_q->priv_data;
3088 struct stmmac_channel *ch;
3089 struct napi_struct *napi;
3090
3091 ch = &priv->channel[tx_q->queue_index];
3092 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3093
3094 if (likely(napi_schedule_prep(napi))) {
3095 unsigned long flags;
3096
3097 spin_lock_irqsave(&ch->lock, flags);
3098 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3099 spin_unlock_irqrestore(&ch->lock, flags);
3100 __napi_schedule(napi);
3101 }
3102
3103 return HRTIMER_NORESTART;
3104 }
3105
3106 /**
3107 * stmmac_init_coalesce - init mitigation options.
3108 * @priv: driver private structure
3109 * Description:
3110 * This inits the coalesce parameters: i.e. timer rate,
3111 * timer handler and default threshold used for enabling the
3112 * interrupt on completion bit.
3113 */
stmmac_init_coalesce(struct stmmac_priv * priv)3114 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3115 {
3116 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3117 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3118 u32 chan;
3119
3120 for (chan = 0; chan < tx_channel_count; chan++) {
3121 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3122
3123 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3124 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3125
3126 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3127 tx_q->txtimer.function = stmmac_tx_timer;
3128 }
3129
3130 for (chan = 0; chan < rx_channel_count; chan++)
3131 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3132 }
3133
stmmac_set_rings_length(struct stmmac_priv * priv)3134 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3135 {
3136 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3137 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3138 u32 chan;
3139
3140 /* set TX ring length */
3141 for (chan = 0; chan < tx_channels_count; chan++)
3142 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3143 (priv->dma_conf.dma_tx_size - 1), chan);
3144
3145 /* set RX ring length */
3146 for (chan = 0; chan < rx_channels_count; chan++)
3147 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3148 (priv->dma_conf.dma_rx_size - 1), chan);
3149 }
3150
3151 /**
3152 * stmmac_set_tx_queue_weight - Set TX queue weight
3153 * @priv: driver private structure
3154 * Description: It is used for setting TX queues weight
3155 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3156 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3157 {
3158 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3159 u32 weight;
3160 u32 queue;
3161
3162 for (queue = 0; queue < tx_queues_count; queue++) {
3163 weight = priv->plat->tx_queues_cfg[queue].weight;
3164 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3165 }
3166 }
3167
3168 /**
3169 * stmmac_configure_cbs - Configure CBS in TX queue
3170 * @priv: driver private structure
3171 * Description: It is used for configuring CBS in AVB TX queues
3172 */
stmmac_configure_cbs(struct stmmac_priv * priv)3173 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3174 {
3175 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3176 u32 mode_to_use;
3177 u32 queue;
3178
3179 /* queue 0 is reserved for legacy traffic */
3180 for (queue = 1; queue < tx_queues_count; queue++) {
3181 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3182 if (mode_to_use == MTL_QUEUE_DCB)
3183 continue;
3184
3185 stmmac_config_cbs(priv, priv->hw,
3186 priv->plat->tx_queues_cfg[queue].send_slope,
3187 priv->plat->tx_queues_cfg[queue].idle_slope,
3188 priv->plat->tx_queues_cfg[queue].high_credit,
3189 priv->plat->tx_queues_cfg[queue].low_credit,
3190 queue);
3191 }
3192 }
3193
3194 /**
3195 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3196 * @priv: driver private structure
3197 * Description: It is used for mapping RX queues to RX dma channels
3198 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3199 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3200 {
3201 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3202 u32 queue;
3203 u32 chan;
3204
3205 for (queue = 0; queue < rx_queues_count; queue++) {
3206 chan = priv->plat->rx_queues_cfg[queue].chan;
3207 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3208 }
3209 }
3210
3211 /**
3212 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3213 * @priv: driver private structure
3214 * Description: It is used for configuring the RX Queue Priority
3215 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3216 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3217 {
3218 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3219 u32 queue;
3220 u32 prio;
3221
3222 for (queue = 0; queue < rx_queues_count; queue++) {
3223 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3224 continue;
3225
3226 prio = priv->plat->rx_queues_cfg[queue].prio;
3227 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3228 }
3229 }
3230
3231 /**
3232 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3233 * @priv: driver private structure
3234 * Description: It is used for configuring the TX Queue Priority
3235 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3236 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3237 {
3238 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3239 u32 queue;
3240 u32 prio;
3241
3242 for (queue = 0; queue < tx_queues_count; queue++) {
3243 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3244 continue;
3245
3246 prio = priv->plat->tx_queues_cfg[queue].prio;
3247 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3248 }
3249 }
3250
3251 /**
3252 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3253 * @priv: driver private structure
3254 * Description: It is used for configuring the RX queue routing
3255 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3256 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3257 {
3258 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3259 u32 queue;
3260 u8 packet;
3261
3262 for (queue = 0; queue < rx_queues_count; queue++) {
3263 /* no specific packet type routing specified for the queue */
3264 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3265 continue;
3266
3267 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3268 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3269 }
3270 }
3271
stmmac_mac_config_rss(struct stmmac_priv * priv)3272 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3273 {
3274 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3275 priv->rss.enable = false;
3276 return;
3277 }
3278
3279 if (priv->dev->features & NETIF_F_RXHASH)
3280 priv->rss.enable = true;
3281 else
3282 priv->rss.enable = false;
3283
3284 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3285 priv->plat->rx_queues_to_use);
3286 }
3287
3288 /**
3289 * stmmac_mtl_configuration - Configure MTL
3290 * @priv: driver private structure
3291 * Description: It is used for configurring MTL
3292 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3293 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3294 {
3295 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3296 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3297
3298 if (tx_queues_count > 1)
3299 stmmac_set_tx_queue_weight(priv);
3300
3301 /* Configure MTL RX algorithms */
3302 if (rx_queues_count > 1)
3303 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3304 priv->plat->rx_sched_algorithm);
3305
3306 /* Configure MTL TX algorithms */
3307 if (tx_queues_count > 1)
3308 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3309 priv->plat->tx_sched_algorithm);
3310
3311 /* Configure CBS in AVB TX queues */
3312 if (tx_queues_count > 1)
3313 stmmac_configure_cbs(priv);
3314
3315 /* Map RX MTL to DMA channels */
3316 stmmac_rx_queue_dma_chan_map(priv);
3317
3318 /* Enable MAC RX Queues */
3319 stmmac_mac_enable_rx_queues(priv);
3320
3321 /* Set RX priorities */
3322 if (rx_queues_count > 1)
3323 stmmac_mac_config_rx_queues_prio(priv);
3324
3325 /* Set TX priorities */
3326 if (tx_queues_count > 1)
3327 stmmac_mac_config_tx_queues_prio(priv);
3328
3329 /* Set RX routing */
3330 if (rx_queues_count > 1)
3331 stmmac_mac_config_rx_queues_routing(priv);
3332
3333 /* Receive Side Scaling */
3334 if (rx_queues_count > 1)
3335 stmmac_mac_config_rss(priv);
3336 }
3337
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3338 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3339 {
3340 if (priv->dma_cap.asp) {
3341 netdev_info(priv->dev, "Enabling Safety Features\n");
3342 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3343 priv->plat->safety_feat_cfg);
3344 } else {
3345 netdev_info(priv->dev, "No Safety Features support found\n");
3346 }
3347 }
3348
3349 /**
3350 * stmmac_hw_setup - setup mac in a usable state.
3351 * @dev : pointer to the device structure.
3352 * @ptp_register: register PTP if set
3353 * Description:
3354 * this is the main function to setup the HW in a usable state because the
3355 * dma engine is reset, the core registers are configured (e.g. AXI,
3356 * Checksum features, timers). The DMA is ready to start receiving and
3357 * transmitting.
3358 * Return value:
3359 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3360 * file on failure.
3361 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3362 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3363 {
3364 struct stmmac_priv *priv = netdev_priv(dev);
3365 u32 rx_cnt = priv->plat->rx_queues_to_use;
3366 u32 tx_cnt = priv->plat->tx_queues_to_use;
3367 bool sph_en;
3368 u32 chan;
3369 int ret;
3370
3371 /* Make sure RX clock is enabled */
3372 if (priv->hw->phylink_pcs)
3373 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3374
3375 /* DMA initialization and SW reset */
3376 ret = stmmac_init_dma_engine(priv);
3377 if (ret < 0) {
3378 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3379 __func__);
3380 return ret;
3381 }
3382
3383 /* Copy the MAC addr into the HW */
3384 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3385
3386 /* PS and related bits will be programmed according to the speed */
3387 if (priv->hw->pcs) {
3388 int speed = priv->plat->mac_port_sel_speed;
3389
3390 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3391 (speed == SPEED_1000)) {
3392 priv->hw->ps = speed;
3393 } else {
3394 dev_warn(priv->device, "invalid port speed\n");
3395 priv->hw->ps = 0;
3396 }
3397 }
3398
3399 /* Initialize the MAC Core */
3400 stmmac_core_init(priv, priv->hw, dev);
3401
3402 /* Initialize MTL*/
3403 stmmac_mtl_configuration(priv);
3404
3405 /* Initialize Safety Features */
3406 stmmac_safety_feat_configuration(priv);
3407
3408 ret = stmmac_rx_ipc(priv, priv->hw);
3409 if (!ret) {
3410 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3411 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3412 priv->hw->rx_csum = 0;
3413 }
3414
3415 /* Enable the MAC Rx/Tx */
3416 stmmac_mac_set(priv, priv->ioaddr, true);
3417
3418 /* Set the HW DMA mode and the COE */
3419 stmmac_dma_operation_mode(priv);
3420
3421 stmmac_mmc_setup(priv);
3422
3423 if (ptp_register) {
3424 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3425 if (ret < 0)
3426 netdev_warn(priv->dev,
3427 "failed to enable PTP reference clock: %pe\n",
3428 ERR_PTR(ret));
3429 }
3430
3431 ret = stmmac_init_ptp(priv);
3432 if (ret == -EOPNOTSUPP)
3433 netdev_info(priv->dev, "PTP not supported by HW\n");
3434 else if (ret)
3435 netdev_warn(priv->dev, "PTP init failed\n");
3436 else if (ptp_register)
3437 stmmac_ptp_register(priv);
3438
3439 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3440
3441 /* Convert the timer from msec to usec */
3442 if (!priv->tx_lpi_timer)
3443 priv->tx_lpi_timer = eee_timer * 1000;
3444
3445 if (priv->use_riwt) {
3446 u32 queue;
3447
3448 for (queue = 0; queue < rx_cnt; queue++) {
3449 if (!priv->rx_riwt[queue])
3450 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3451
3452 stmmac_rx_watchdog(priv, priv->ioaddr,
3453 priv->rx_riwt[queue], queue);
3454 }
3455 }
3456
3457 if (priv->hw->pcs)
3458 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3459
3460 /* set TX and RX rings length */
3461 stmmac_set_rings_length(priv);
3462
3463 /* Enable TSO */
3464 if (priv->tso) {
3465 for (chan = 0; chan < tx_cnt; chan++) {
3466 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3467
3468 /* TSO and TBS cannot co-exist */
3469 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3470 continue;
3471
3472 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3473 }
3474 }
3475
3476 /* Enable Split Header */
3477 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3478 for (chan = 0; chan < rx_cnt; chan++)
3479 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3480
3481
3482 /* VLAN Tag Insertion */
3483 if (priv->dma_cap.vlins)
3484 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3485
3486 /* TBS */
3487 for (chan = 0; chan < tx_cnt; chan++) {
3488 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3489 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3490
3491 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3492 }
3493
3494 /* Configure real RX and TX queues */
3495 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3496 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3497
3498 /* Start the ball rolling... */
3499 stmmac_start_all_dma(priv);
3500
3501 stmmac_set_hw_vlan_mode(priv, priv->hw);
3502
3503 return 0;
3504 }
3505
stmmac_hw_teardown(struct net_device * dev)3506 static void stmmac_hw_teardown(struct net_device *dev)
3507 {
3508 struct stmmac_priv *priv = netdev_priv(dev);
3509
3510 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3511 }
3512
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3513 static void stmmac_free_irq(struct net_device *dev,
3514 enum request_irq_err irq_err, int irq_idx)
3515 {
3516 struct stmmac_priv *priv = netdev_priv(dev);
3517 int j;
3518
3519 switch (irq_err) {
3520 case REQ_IRQ_ERR_ALL:
3521 irq_idx = priv->plat->tx_queues_to_use;
3522 fallthrough;
3523 case REQ_IRQ_ERR_TX:
3524 for (j = irq_idx - 1; j >= 0; j--) {
3525 if (priv->tx_irq[j] > 0) {
3526 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3527 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3528 }
3529 }
3530 irq_idx = priv->plat->rx_queues_to_use;
3531 fallthrough;
3532 case REQ_IRQ_ERR_RX:
3533 for (j = irq_idx - 1; j >= 0; j--) {
3534 if (priv->rx_irq[j] > 0) {
3535 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3536 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3537 }
3538 }
3539
3540 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3541 free_irq(priv->sfty_ue_irq, dev);
3542 fallthrough;
3543 case REQ_IRQ_ERR_SFTY_UE:
3544 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3545 free_irq(priv->sfty_ce_irq, dev);
3546 fallthrough;
3547 case REQ_IRQ_ERR_SFTY_CE:
3548 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3549 free_irq(priv->lpi_irq, dev);
3550 fallthrough;
3551 case REQ_IRQ_ERR_LPI:
3552 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3553 free_irq(priv->wol_irq, dev);
3554 fallthrough;
3555 case REQ_IRQ_ERR_SFTY:
3556 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3557 free_irq(priv->sfty_irq, dev);
3558 fallthrough;
3559 case REQ_IRQ_ERR_WOL:
3560 free_irq(dev->irq, dev);
3561 fallthrough;
3562 case REQ_IRQ_ERR_MAC:
3563 case REQ_IRQ_ERR_NO:
3564 /* If MAC IRQ request error, no more IRQ to free */
3565 break;
3566 }
3567 }
3568
stmmac_request_irq_multi_msi(struct net_device * dev)3569 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3570 {
3571 struct stmmac_priv *priv = netdev_priv(dev);
3572 enum request_irq_err irq_err;
3573 cpumask_t cpu_mask;
3574 int irq_idx = 0;
3575 char *int_name;
3576 int ret;
3577 int i;
3578
3579 /* For common interrupt */
3580 int_name = priv->int_name_mac;
3581 sprintf(int_name, "%s:%s", dev->name, "mac");
3582 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3583 0, int_name, dev);
3584 if (unlikely(ret < 0)) {
3585 netdev_err(priv->dev,
3586 "%s: alloc mac MSI %d (error: %d)\n",
3587 __func__, dev->irq, ret);
3588 irq_err = REQ_IRQ_ERR_MAC;
3589 goto irq_error;
3590 }
3591
3592 /* Request the Wake IRQ in case of another line
3593 * is used for WoL
3594 */
3595 priv->wol_irq_disabled = true;
3596 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3597 int_name = priv->int_name_wol;
3598 sprintf(int_name, "%s:%s", dev->name, "wol");
3599 ret = request_irq(priv->wol_irq,
3600 stmmac_mac_interrupt,
3601 0, int_name, dev);
3602 if (unlikely(ret < 0)) {
3603 netdev_err(priv->dev,
3604 "%s: alloc wol MSI %d (error: %d)\n",
3605 __func__, priv->wol_irq, ret);
3606 irq_err = REQ_IRQ_ERR_WOL;
3607 goto irq_error;
3608 }
3609 }
3610
3611 /* Request the LPI IRQ in case of another line
3612 * is used for LPI
3613 */
3614 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3615 int_name = priv->int_name_lpi;
3616 sprintf(int_name, "%s:%s", dev->name, "lpi");
3617 ret = request_irq(priv->lpi_irq,
3618 stmmac_mac_interrupt,
3619 0, int_name, dev);
3620 if (unlikely(ret < 0)) {
3621 netdev_err(priv->dev,
3622 "%s: alloc lpi MSI %d (error: %d)\n",
3623 __func__, priv->lpi_irq, ret);
3624 irq_err = REQ_IRQ_ERR_LPI;
3625 goto irq_error;
3626 }
3627 }
3628
3629 /* Request the common Safety Feature Correctible/Uncorrectible
3630 * Error line in case of another line is used
3631 */
3632 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3633 int_name = priv->int_name_sfty;
3634 sprintf(int_name, "%s:%s", dev->name, "safety");
3635 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3636 0, int_name, dev);
3637 if (unlikely(ret < 0)) {
3638 netdev_err(priv->dev,
3639 "%s: alloc sfty MSI %d (error: %d)\n",
3640 __func__, priv->sfty_irq, ret);
3641 irq_err = REQ_IRQ_ERR_SFTY;
3642 goto irq_error;
3643 }
3644 }
3645
3646 /* Request the Safety Feature Correctible Error line in
3647 * case of another line is used
3648 */
3649 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3650 int_name = priv->int_name_sfty_ce;
3651 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3652 ret = request_irq(priv->sfty_ce_irq,
3653 stmmac_safety_interrupt,
3654 0, int_name, dev);
3655 if (unlikely(ret < 0)) {
3656 netdev_err(priv->dev,
3657 "%s: alloc sfty ce MSI %d (error: %d)\n",
3658 __func__, priv->sfty_ce_irq, ret);
3659 irq_err = REQ_IRQ_ERR_SFTY_CE;
3660 goto irq_error;
3661 }
3662 }
3663
3664 /* Request the Safety Feature Uncorrectible Error line in
3665 * case of another line is used
3666 */
3667 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3668 int_name = priv->int_name_sfty_ue;
3669 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3670 ret = request_irq(priv->sfty_ue_irq,
3671 stmmac_safety_interrupt,
3672 0, int_name, dev);
3673 if (unlikely(ret < 0)) {
3674 netdev_err(priv->dev,
3675 "%s: alloc sfty ue MSI %d (error: %d)\n",
3676 __func__, priv->sfty_ue_irq, ret);
3677 irq_err = REQ_IRQ_ERR_SFTY_UE;
3678 goto irq_error;
3679 }
3680 }
3681
3682 /* Request Rx MSI irq */
3683 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3684 if (i >= MTL_MAX_RX_QUEUES)
3685 break;
3686 if (priv->rx_irq[i] == 0)
3687 continue;
3688
3689 int_name = priv->int_name_rx_irq[i];
3690 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3691 ret = request_irq(priv->rx_irq[i],
3692 stmmac_msi_intr_rx,
3693 0, int_name, &priv->dma_conf.rx_queue[i]);
3694 if (unlikely(ret < 0)) {
3695 netdev_err(priv->dev,
3696 "%s: alloc rx-%d MSI %d (error: %d)\n",
3697 __func__, i, priv->rx_irq[i], ret);
3698 irq_err = REQ_IRQ_ERR_RX;
3699 irq_idx = i;
3700 goto irq_error;
3701 }
3702 cpumask_clear(&cpu_mask);
3703 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3704 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3705 }
3706
3707 /* Request Tx MSI irq */
3708 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3709 if (i >= MTL_MAX_TX_QUEUES)
3710 break;
3711 if (priv->tx_irq[i] == 0)
3712 continue;
3713
3714 int_name = priv->int_name_tx_irq[i];
3715 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3716 ret = request_irq(priv->tx_irq[i],
3717 stmmac_msi_intr_tx,
3718 0, int_name, &priv->dma_conf.tx_queue[i]);
3719 if (unlikely(ret < 0)) {
3720 netdev_err(priv->dev,
3721 "%s: alloc tx-%d MSI %d (error: %d)\n",
3722 __func__, i, priv->tx_irq[i], ret);
3723 irq_err = REQ_IRQ_ERR_TX;
3724 irq_idx = i;
3725 goto irq_error;
3726 }
3727 cpumask_clear(&cpu_mask);
3728 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3729 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3730 }
3731
3732 return 0;
3733
3734 irq_error:
3735 stmmac_free_irq(dev, irq_err, irq_idx);
3736 return ret;
3737 }
3738
stmmac_request_irq_single(struct net_device * dev)3739 static int stmmac_request_irq_single(struct net_device *dev)
3740 {
3741 struct stmmac_priv *priv = netdev_priv(dev);
3742 enum request_irq_err irq_err;
3743 int ret;
3744
3745 ret = request_irq(dev->irq, stmmac_interrupt,
3746 IRQF_SHARED, dev->name, dev);
3747 if (unlikely(ret < 0)) {
3748 netdev_err(priv->dev,
3749 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3750 __func__, dev->irq, ret);
3751 irq_err = REQ_IRQ_ERR_MAC;
3752 goto irq_error;
3753 }
3754
3755 /* Request the Wake IRQ in case of another line
3756 * is used for WoL
3757 */
3758 priv->wol_irq_disabled = true;
3759 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3760 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3761 IRQF_SHARED, dev->name, dev);
3762 if (unlikely(ret < 0)) {
3763 netdev_err(priv->dev,
3764 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3765 __func__, priv->wol_irq, ret);
3766 irq_err = REQ_IRQ_ERR_WOL;
3767 goto irq_error;
3768 }
3769 }
3770
3771 /* Request the IRQ lines */
3772 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3773 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3774 IRQF_SHARED, dev->name, dev);
3775 if (unlikely(ret < 0)) {
3776 netdev_err(priv->dev,
3777 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3778 __func__, priv->lpi_irq, ret);
3779 irq_err = REQ_IRQ_ERR_LPI;
3780 goto irq_error;
3781 }
3782 }
3783
3784 /* Request the common Safety Feature Correctible/Uncorrectible
3785 * Error line in case of another line is used
3786 */
3787 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3788 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3789 IRQF_SHARED, dev->name, dev);
3790 if (unlikely(ret < 0)) {
3791 netdev_err(priv->dev,
3792 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3793 __func__, priv->sfty_irq, ret);
3794 irq_err = REQ_IRQ_ERR_SFTY;
3795 goto irq_error;
3796 }
3797 }
3798
3799 return 0;
3800
3801 irq_error:
3802 stmmac_free_irq(dev, irq_err, 0);
3803 return ret;
3804 }
3805
stmmac_request_irq(struct net_device * dev)3806 static int stmmac_request_irq(struct net_device *dev)
3807 {
3808 struct stmmac_priv *priv = netdev_priv(dev);
3809 int ret;
3810
3811 /* Request the IRQ lines */
3812 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3813 ret = stmmac_request_irq_multi_msi(dev);
3814 else
3815 ret = stmmac_request_irq_single(dev);
3816
3817 return ret;
3818 }
3819
3820 /**
3821 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3822 * @priv: driver private structure
3823 * @mtu: MTU to setup the dma queue and buf with
3824 * Description: Allocate and generate a dma_conf based on the provided MTU.
3825 * Allocate the Tx/Rx DMA queue and init them.
3826 * Return value:
3827 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3828 */
3829 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3830 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3831 {
3832 struct stmmac_dma_conf *dma_conf;
3833 int chan, bfsize, ret;
3834
3835 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3836 if (!dma_conf) {
3837 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3838 __func__);
3839 return ERR_PTR(-ENOMEM);
3840 }
3841
3842 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3843 if (bfsize < 0)
3844 bfsize = 0;
3845
3846 if (bfsize < BUF_SIZE_16KiB)
3847 bfsize = stmmac_set_bfsize(mtu, 0);
3848
3849 dma_conf->dma_buf_sz = bfsize;
3850 /* Chose the tx/rx size from the already defined one in the
3851 * priv struct. (if defined)
3852 */
3853 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3854 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3855
3856 if (!dma_conf->dma_tx_size)
3857 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3858 if (!dma_conf->dma_rx_size)
3859 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3860
3861 /* Earlier check for TBS */
3862 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3863 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3864 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3865
3866 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3867 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3868 }
3869
3870 ret = alloc_dma_desc_resources(priv, dma_conf);
3871 if (ret < 0) {
3872 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3873 __func__);
3874 goto alloc_error;
3875 }
3876
3877 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3878 if (ret < 0) {
3879 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3880 __func__);
3881 goto init_error;
3882 }
3883
3884 return dma_conf;
3885
3886 init_error:
3887 free_dma_desc_resources(priv, dma_conf);
3888 alloc_error:
3889 kfree(dma_conf);
3890 return ERR_PTR(ret);
3891 }
3892
3893 /**
3894 * __stmmac_open - open entry point of the driver
3895 * @dev : pointer to the device structure.
3896 * @dma_conf : structure to take the dma data
3897 * Description:
3898 * This function is the open entry point of the driver.
3899 * Return value:
3900 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3901 * file on failure.
3902 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3903 static int __stmmac_open(struct net_device *dev,
3904 struct stmmac_dma_conf *dma_conf)
3905 {
3906 struct stmmac_priv *priv = netdev_priv(dev);
3907 int mode = priv->plat->phy_interface;
3908 u32 chan;
3909 int ret;
3910
3911 ret = pm_runtime_resume_and_get(priv->device);
3912 if (ret < 0)
3913 return ret;
3914
3915 if ((!priv->hw->xpcs ||
3916 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3917 ret = stmmac_init_phy(dev);
3918 if (ret) {
3919 netdev_err(priv->dev,
3920 "%s: Cannot attach to PHY (error: %d)\n",
3921 __func__, ret);
3922 goto init_phy_error;
3923 }
3924 }
3925
3926 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3927
3928 buf_sz = dma_conf->dma_buf_sz;
3929 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3930 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3931 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3932 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3933
3934 stmmac_reset_queues_param(priv);
3935
3936 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3937 priv->plat->serdes_powerup) {
3938 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3939 if (ret < 0) {
3940 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3941 __func__);
3942 goto init_error;
3943 }
3944 }
3945
3946 ret = stmmac_hw_setup(dev, true);
3947 if (ret < 0) {
3948 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3949 goto init_error;
3950 }
3951
3952 stmmac_init_coalesce(priv);
3953
3954 phylink_start(priv->phylink);
3955 /* We may have called phylink_speed_down before */
3956 phylink_speed_up(priv->phylink);
3957
3958 ret = stmmac_request_irq(dev);
3959 if (ret)
3960 goto irq_error;
3961
3962 stmmac_enable_all_queues(priv);
3963 netif_tx_start_all_queues(priv->dev);
3964 stmmac_enable_all_dma_irq(priv);
3965
3966 return 0;
3967
3968 irq_error:
3969 phylink_stop(priv->phylink);
3970
3971 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3972 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3973
3974 stmmac_hw_teardown(dev);
3975 init_error:
3976 phylink_disconnect_phy(priv->phylink);
3977 init_phy_error:
3978 pm_runtime_put(priv->device);
3979 return ret;
3980 }
3981
stmmac_open(struct net_device * dev)3982 static int stmmac_open(struct net_device *dev)
3983 {
3984 struct stmmac_priv *priv = netdev_priv(dev);
3985 struct stmmac_dma_conf *dma_conf;
3986 int ret;
3987
3988 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3989 if (IS_ERR(dma_conf))
3990 return PTR_ERR(dma_conf);
3991
3992 ret = __stmmac_open(dev, dma_conf);
3993 if (ret)
3994 free_dma_desc_resources(priv, dma_conf);
3995
3996 kfree(dma_conf);
3997 return ret;
3998 }
3999
4000 /**
4001 * stmmac_release - close entry point of the driver
4002 * @dev : device pointer.
4003 * Description:
4004 * This is the stop entry point of the driver.
4005 */
stmmac_release(struct net_device * dev)4006 static int stmmac_release(struct net_device *dev)
4007 {
4008 struct stmmac_priv *priv = netdev_priv(dev);
4009 u32 chan;
4010
4011 if (device_may_wakeup(priv->device))
4012 phylink_speed_down(priv->phylink, false);
4013 /* Stop and disconnect the PHY */
4014 phylink_stop(priv->phylink);
4015 phylink_disconnect_phy(priv->phylink);
4016
4017 stmmac_disable_all_queues(priv);
4018
4019 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4020 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4021
4022 netif_tx_disable(dev);
4023
4024 /* Free the IRQ lines */
4025 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4026
4027 if (priv->eee_enabled) {
4028 priv->tx_path_in_lpi_mode = false;
4029 del_timer_sync(&priv->eee_ctrl_timer);
4030 }
4031
4032 /* Stop TX/RX DMA and clear the descriptors */
4033 stmmac_stop_all_dma(priv);
4034
4035 /* Release and free the Rx/Tx resources */
4036 free_dma_desc_resources(priv, &priv->dma_conf);
4037
4038 /* Disable the MAC Rx/Tx */
4039 stmmac_mac_set(priv, priv->ioaddr, false);
4040
4041 /* Powerdown Serdes if there is */
4042 if (priv->plat->serdes_powerdown)
4043 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4044
4045 stmmac_release_ptp(priv);
4046
4047 if (stmmac_fpe_supported(priv))
4048 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4049
4050 pm_runtime_put(priv->device);
4051
4052 return 0;
4053 }
4054
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4055 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4056 struct stmmac_tx_queue *tx_q)
4057 {
4058 u16 tag = 0x0, inner_tag = 0x0;
4059 u32 inner_type = 0x0;
4060 struct dma_desc *p;
4061
4062 if (!priv->dma_cap.vlins)
4063 return false;
4064 if (!skb_vlan_tag_present(skb))
4065 return false;
4066 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4067 inner_tag = skb_vlan_tag_get(skb);
4068 inner_type = STMMAC_VLAN_INSERT;
4069 }
4070
4071 tag = skb_vlan_tag_get(skb);
4072
4073 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4074 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4075 else
4076 p = &tx_q->dma_tx[tx_q->cur_tx];
4077
4078 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4079 return false;
4080
4081 stmmac_set_tx_owner(priv, p);
4082 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4083 return true;
4084 }
4085
4086 /**
4087 * stmmac_tso_allocator - close entry point of the driver
4088 * @priv: driver private structure
4089 * @des: buffer start address
4090 * @total_len: total length to fill in descriptors
4091 * @last_segment: condition for the last descriptor
4092 * @queue: TX queue index
4093 * Description:
4094 * This function fills descriptor and request new descriptors according to
4095 * buffer length to fill
4096 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4097 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4098 int total_len, bool last_segment, u32 queue)
4099 {
4100 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4101 struct dma_desc *desc;
4102 u32 buff_size;
4103 int tmp_len;
4104
4105 tmp_len = total_len;
4106
4107 while (tmp_len > 0) {
4108 dma_addr_t curr_addr;
4109
4110 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4111 priv->dma_conf.dma_tx_size);
4112 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4113
4114 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4115 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4116 else
4117 desc = &tx_q->dma_tx[tx_q->cur_tx];
4118
4119 curr_addr = des + (total_len - tmp_len);
4120 if (priv->dma_cap.addr64 <= 32)
4121 desc->des0 = cpu_to_le32(curr_addr);
4122 else
4123 stmmac_set_desc_addr(priv, desc, curr_addr);
4124
4125 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4126 TSO_MAX_BUFF_SIZE : tmp_len;
4127
4128 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4129 0, 1,
4130 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4131 0, 0);
4132
4133 tmp_len -= TSO_MAX_BUFF_SIZE;
4134 }
4135 }
4136
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4138 {
4139 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4140 int desc_size;
4141
4142 if (likely(priv->extend_desc))
4143 desc_size = sizeof(struct dma_extended_desc);
4144 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4145 desc_size = sizeof(struct dma_edesc);
4146 else
4147 desc_size = sizeof(struct dma_desc);
4148
4149 /* The own bit must be the latest setting done when prepare the
4150 * descriptor and then barrier is needed to make sure that
4151 * all is coherent before granting the DMA engine.
4152 */
4153 wmb();
4154
4155 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4156 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4157 }
4158
4159 /**
4160 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4161 * @skb : the socket buffer
4162 * @dev : device pointer
4163 * Description: this is the transmit function that is called on TSO frames
4164 * (support available on GMAC4 and newer chips).
4165 * Diagram below show the ring programming in case of TSO frames:
4166 *
4167 * First Descriptor
4168 * --------
4169 * | DES0 |---> buffer1 = L2/L3/L4 header
4170 * | DES1 |---> TCP Payload (can continue on next descr...)
4171 * | DES2 |---> buffer 1 and 2 len
4172 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4173 * --------
4174 * |
4175 * ...
4176 * |
4177 * --------
4178 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4179 * | DES1 | --|
4180 * | DES2 | --> buffer 1 and 2 len
4181 * | DES3 |
4182 * --------
4183 *
4184 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4185 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4186 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4187 {
4188 struct dma_desc *desc, *first, *mss_desc = NULL;
4189 struct stmmac_priv *priv = netdev_priv(dev);
4190 int tmp_pay_len = 0, first_tx, nfrags;
4191 unsigned int first_entry, tx_packets;
4192 struct stmmac_txq_stats *txq_stats;
4193 struct stmmac_tx_queue *tx_q;
4194 u32 pay_len, mss, queue;
4195 dma_addr_t tso_des, des;
4196 u8 proto_hdr_len, hdr;
4197 bool set_ic;
4198 int i;
4199
4200 /* Always insert VLAN tag to SKB payload for TSO frames.
4201 *
4202 * Never insert VLAN tag by HW, since segments splited by
4203 * TSO engine will be un-tagged by mistake.
4204 */
4205 if (skb_vlan_tag_present(skb)) {
4206 skb = __vlan_hwaccel_push_inside(skb);
4207 if (unlikely(!skb)) {
4208 priv->xstats.tx_dropped++;
4209 return NETDEV_TX_OK;
4210 }
4211 }
4212
4213 nfrags = skb_shinfo(skb)->nr_frags;
4214 queue = skb_get_queue_mapping(skb);
4215
4216 tx_q = &priv->dma_conf.tx_queue[queue];
4217 txq_stats = &priv->xstats.txq_stats[queue];
4218 first_tx = tx_q->cur_tx;
4219
4220 /* Compute header lengths */
4221 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4222 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4223 hdr = sizeof(struct udphdr);
4224 } else {
4225 proto_hdr_len = skb_tcp_all_headers(skb);
4226 hdr = tcp_hdrlen(skb);
4227 }
4228
4229 /* Desc availability based on threshold should be enough safe */
4230 if (unlikely(stmmac_tx_avail(priv, queue) <
4231 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4232 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4233 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4234 queue));
4235 /* This is a hard error, log it. */
4236 netdev_err(priv->dev,
4237 "%s: Tx Ring full when queue awake\n",
4238 __func__);
4239 }
4240 return NETDEV_TX_BUSY;
4241 }
4242
4243 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4244
4245 mss = skb_shinfo(skb)->gso_size;
4246
4247 /* set new MSS value if needed */
4248 if (mss != tx_q->mss) {
4249 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4250 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4251 else
4252 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4253
4254 stmmac_set_mss(priv, mss_desc, mss);
4255 tx_q->mss = mss;
4256 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4257 priv->dma_conf.dma_tx_size);
4258 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4259 }
4260
4261 if (netif_msg_tx_queued(priv)) {
4262 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4263 __func__, hdr, proto_hdr_len, pay_len, mss);
4264 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4265 skb->data_len);
4266 }
4267
4268 first_entry = tx_q->cur_tx;
4269 WARN_ON(tx_q->tx_skbuff[first_entry]);
4270
4271 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4272 desc = &tx_q->dma_entx[first_entry].basic;
4273 else
4274 desc = &tx_q->dma_tx[first_entry];
4275 first = desc;
4276
4277 /* first descriptor: fill Headers on Buf1 */
4278 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4279 DMA_TO_DEVICE);
4280 if (dma_mapping_error(priv->device, des))
4281 goto dma_map_err;
4282
4283 if (priv->dma_cap.addr64 <= 32) {
4284 first->des0 = cpu_to_le32(des);
4285
4286 /* Fill start of payload in buff2 of first descriptor */
4287 if (pay_len)
4288 first->des1 = cpu_to_le32(des + proto_hdr_len);
4289
4290 /* If needed take extra descriptors to fill the remaining payload */
4291 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4292 tso_des = des;
4293 } else {
4294 stmmac_set_desc_addr(priv, first, des);
4295 tmp_pay_len = pay_len;
4296 tso_des = des + proto_hdr_len;
4297 pay_len = 0;
4298 }
4299
4300 stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
4301
4302 /* In case two or more DMA transmit descriptors are allocated for this
4303 * non-paged SKB data, the DMA buffer address should be saved to
4304 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4305 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4306 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4307 * since the tail areas of the DMA buffer can be accessed by DMA engine
4308 * sooner or later.
4309 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4310 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4311 * this DMA buffer right after the DMA engine completely finishes the
4312 * full buffer transmission.
4313 */
4314 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4315 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4316 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4317 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4318
4319 /* Prepare fragments */
4320 for (i = 0; i < nfrags; i++) {
4321 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4322
4323 des = skb_frag_dma_map(priv->device, frag, 0,
4324 skb_frag_size(frag),
4325 DMA_TO_DEVICE);
4326 if (dma_mapping_error(priv->device, des))
4327 goto dma_map_err;
4328
4329 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4330 (i == nfrags - 1), queue);
4331
4332 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4333 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4334 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4335 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4336 }
4337
4338 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4339
4340 /* Only the last descriptor gets to point to the skb. */
4341 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4342 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4343
4344 /* Manage tx mitigation */
4345 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4346 tx_q->tx_count_frames += tx_packets;
4347
4348 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4349 set_ic = true;
4350 else if (!priv->tx_coal_frames[queue])
4351 set_ic = false;
4352 else if (tx_packets > priv->tx_coal_frames[queue])
4353 set_ic = true;
4354 else if ((tx_q->tx_count_frames %
4355 priv->tx_coal_frames[queue]) < tx_packets)
4356 set_ic = true;
4357 else
4358 set_ic = false;
4359
4360 if (set_ic) {
4361 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4362 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4363 else
4364 desc = &tx_q->dma_tx[tx_q->cur_tx];
4365
4366 tx_q->tx_count_frames = 0;
4367 stmmac_set_tx_ic(priv, desc);
4368 }
4369
4370 /* We've used all descriptors we need for this skb, however,
4371 * advance cur_tx so that it references a fresh descriptor.
4372 * ndo_start_xmit will fill this descriptor the next time it's
4373 * called and stmmac_tx_clean may clean up to this descriptor.
4374 */
4375 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4376
4377 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4378 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4379 __func__);
4380 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4381 }
4382
4383 u64_stats_update_begin(&txq_stats->q_syncp);
4384 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4385 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4386 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4387 if (set_ic)
4388 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4389 u64_stats_update_end(&txq_stats->q_syncp);
4390
4391 if (priv->sarc_type)
4392 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4393
4394 skb_tx_timestamp(skb);
4395
4396 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4397 priv->hwts_tx_en)) {
4398 /* declare that device is doing timestamping */
4399 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4400 stmmac_enable_tx_timestamp(priv, first);
4401 }
4402
4403 /* Complete the first descriptor before granting the DMA */
4404 stmmac_prepare_tso_tx_desc(priv, first, 1,
4405 proto_hdr_len,
4406 pay_len,
4407 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4408 hdr / 4, (skb->len - proto_hdr_len));
4409
4410 /* If context desc is used to change MSS */
4411 if (mss_desc) {
4412 /* Make sure that first descriptor has been completely
4413 * written, including its own bit. This is because MSS is
4414 * actually before first descriptor, so we need to make
4415 * sure that MSS's own bit is the last thing written.
4416 */
4417 dma_wmb();
4418 stmmac_set_tx_owner(priv, mss_desc);
4419 }
4420
4421 if (netif_msg_pktdata(priv)) {
4422 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4423 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4424 tx_q->cur_tx, first, nfrags);
4425 pr_info(">>> frame to be transmitted: ");
4426 print_pkt(skb->data, skb_headlen(skb));
4427 }
4428
4429 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4430
4431 stmmac_flush_tx_descriptors(priv, queue);
4432 stmmac_tx_timer_arm(priv, queue);
4433
4434 return NETDEV_TX_OK;
4435
4436 dma_map_err:
4437 dev_err(priv->device, "Tx dma map failed\n");
4438 dev_kfree_skb(skb);
4439 priv->xstats.tx_dropped++;
4440 return NETDEV_TX_OK;
4441 }
4442
4443 /**
4444 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4445 * @skb: socket buffer to check
4446 *
4447 * Check if a packet has an ethertype that will trigger the IP header checks
4448 * and IP/TCP checksum engine of the stmmac core.
4449 *
4450 * Return: true if the ethertype can trigger the checksum engine, false
4451 * otherwise
4452 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4453 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4454 {
4455 int depth = 0;
4456 __be16 proto;
4457
4458 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4459 &depth);
4460
4461 return (depth <= ETH_HLEN) &&
4462 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4463 }
4464
4465 /**
4466 * stmmac_xmit - Tx entry point of the driver
4467 * @skb : the socket buffer
4468 * @dev : device pointer
4469 * Description : this is the tx entry point of the driver.
4470 * It programs the chain or the ring and supports oversized frames
4471 * and SG feature.
4472 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4473 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4474 {
4475 unsigned int first_entry, tx_packets, enh_desc;
4476 struct stmmac_priv *priv = netdev_priv(dev);
4477 unsigned int nopaged_len = skb_headlen(skb);
4478 int i, csum_insertion = 0, is_jumbo = 0;
4479 u32 queue = skb_get_queue_mapping(skb);
4480 int nfrags = skb_shinfo(skb)->nr_frags;
4481 int gso = skb_shinfo(skb)->gso_type;
4482 struct stmmac_txq_stats *txq_stats;
4483 struct dma_edesc *tbs_desc = NULL;
4484 struct dma_desc *desc, *first;
4485 struct stmmac_tx_queue *tx_q;
4486 bool has_vlan, set_ic;
4487 int entry, first_tx;
4488 dma_addr_t des;
4489
4490 tx_q = &priv->dma_conf.tx_queue[queue];
4491 txq_stats = &priv->xstats.txq_stats[queue];
4492 first_tx = tx_q->cur_tx;
4493
4494 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4495 stmmac_disable_eee_mode(priv);
4496
4497 /* Manage oversized TCP frames for GMAC4 device */
4498 if (skb_is_gso(skb) && priv->tso) {
4499 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4500 return stmmac_tso_xmit(skb, dev);
4501 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4502 return stmmac_tso_xmit(skb, dev);
4503 }
4504
4505 if (priv->est && priv->est->enable &&
4506 priv->est->max_sdu[queue] &&
4507 skb->len > priv->est->max_sdu[queue]){
4508 priv->xstats.max_sdu_txq_drop[queue]++;
4509 goto max_sdu_err;
4510 }
4511
4512 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4513 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4514 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4515 queue));
4516 /* This is a hard error, log it. */
4517 netdev_err(priv->dev,
4518 "%s: Tx Ring full when queue awake\n",
4519 __func__);
4520 }
4521 return NETDEV_TX_BUSY;
4522 }
4523
4524 /* Check if VLAN can be inserted by HW */
4525 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4526
4527 entry = tx_q->cur_tx;
4528 first_entry = entry;
4529 WARN_ON(tx_q->tx_skbuff[first_entry]);
4530
4531 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4532 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4533 * queues. In that case, checksum offloading for those queues that don't
4534 * support tx coe needs to fallback to software checksum calculation.
4535 *
4536 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4537 * also have to be checksummed in software.
4538 */
4539 if (csum_insertion &&
4540 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4541 !stmmac_has_ip_ethertype(skb))) {
4542 if (unlikely(skb_checksum_help(skb)))
4543 goto dma_map_err;
4544 csum_insertion = !csum_insertion;
4545 }
4546
4547 if (likely(priv->extend_desc))
4548 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4549 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4550 desc = &tx_q->dma_entx[entry].basic;
4551 else
4552 desc = tx_q->dma_tx + entry;
4553
4554 first = desc;
4555
4556 if (has_vlan)
4557 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4558
4559 enh_desc = priv->plat->enh_desc;
4560 /* To program the descriptors according to the size of the frame */
4561 if (enh_desc)
4562 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4563
4564 if (unlikely(is_jumbo)) {
4565 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4566 if (unlikely(entry < 0) && (entry != -EINVAL))
4567 goto dma_map_err;
4568 }
4569
4570 for (i = 0; i < nfrags; i++) {
4571 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4572 int len = skb_frag_size(frag);
4573 bool last_segment = (i == (nfrags - 1));
4574
4575 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4576 WARN_ON(tx_q->tx_skbuff[entry]);
4577
4578 if (likely(priv->extend_desc))
4579 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4580 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4581 desc = &tx_q->dma_entx[entry].basic;
4582 else
4583 desc = tx_q->dma_tx + entry;
4584
4585 des = skb_frag_dma_map(priv->device, frag, 0, len,
4586 DMA_TO_DEVICE);
4587 if (dma_mapping_error(priv->device, des))
4588 goto dma_map_err; /* should reuse desc w/o issues */
4589
4590 tx_q->tx_skbuff_dma[entry].buf = des;
4591
4592 stmmac_set_desc_addr(priv, desc, des);
4593
4594 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4595 tx_q->tx_skbuff_dma[entry].len = len;
4596 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4597 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4598
4599 /* Prepare the descriptor and set the own bit too */
4600 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4601 priv->mode, 1, last_segment, skb->len);
4602 }
4603
4604 /* Only the last descriptor gets to point to the skb. */
4605 tx_q->tx_skbuff[entry] = skb;
4606 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4607
4608 /* According to the coalesce parameter the IC bit for the latest
4609 * segment is reset and the timer re-started to clean the tx status.
4610 * This approach takes care about the fragments: desc is the first
4611 * element in case of no SG.
4612 */
4613 tx_packets = (entry + 1) - first_tx;
4614 tx_q->tx_count_frames += tx_packets;
4615
4616 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4617 set_ic = true;
4618 else if (!priv->tx_coal_frames[queue])
4619 set_ic = false;
4620 else if (tx_packets > priv->tx_coal_frames[queue])
4621 set_ic = true;
4622 else if ((tx_q->tx_count_frames %
4623 priv->tx_coal_frames[queue]) < tx_packets)
4624 set_ic = true;
4625 else
4626 set_ic = false;
4627
4628 if (set_ic) {
4629 if (likely(priv->extend_desc))
4630 desc = &tx_q->dma_etx[entry].basic;
4631 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4632 desc = &tx_q->dma_entx[entry].basic;
4633 else
4634 desc = &tx_q->dma_tx[entry];
4635
4636 tx_q->tx_count_frames = 0;
4637 stmmac_set_tx_ic(priv, desc);
4638 }
4639
4640 /* We've used all descriptors we need for this skb, however,
4641 * advance cur_tx so that it references a fresh descriptor.
4642 * ndo_start_xmit will fill this descriptor the next time it's
4643 * called and stmmac_tx_clean may clean up to this descriptor.
4644 */
4645 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4646 tx_q->cur_tx = entry;
4647
4648 if (netif_msg_pktdata(priv)) {
4649 netdev_dbg(priv->dev,
4650 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4651 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4652 entry, first, nfrags);
4653
4654 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4655 print_pkt(skb->data, skb->len);
4656 }
4657
4658 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4659 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4660 __func__);
4661 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4662 }
4663
4664 u64_stats_update_begin(&txq_stats->q_syncp);
4665 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4666 if (set_ic)
4667 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4668 u64_stats_update_end(&txq_stats->q_syncp);
4669
4670 if (priv->sarc_type)
4671 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4672
4673 skb_tx_timestamp(skb);
4674
4675 /* Ready to fill the first descriptor and set the OWN bit w/o any
4676 * problems because all the descriptors are actually ready to be
4677 * passed to the DMA engine.
4678 */
4679 if (likely(!is_jumbo)) {
4680 bool last_segment = (nfrags == 0);
4681
4682 des = dma_map_single(priv->device, skb->data,
4683 nopaged_len, DMA_TO_DEVICE);
4684 if (dma_mapping_error(priv->device, des))
4685 goto dma_map_err;
4686
4687 tx_q->tx_skbuff_dma[first_entry].buf = des;
4688 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4689 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4690
4691 stmmac_set_desc_addr(priv, first, des);
4692
4693 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4694 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4695
4696 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4697 priv->hwts_tx_en)) {
4698 /* declare that device is doing timestamping */
4699 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4700 stmmac_enable_tx_timestamp(priv, first);
4701 }
4702
4703 /* Prepare the first descriptor setting the OWN bit too */
4704 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4705 csum_insertion, priv->mode, 0, last_segment,
4706 skb->len);
4707 }
4708
4709 if (tx_q->tbs & STMMAC_TBS_EN) {
4710 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4711
4712 tbs_desc = &tx_q->dma_entx[first_entry];
4713 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4714 }
4715
4716 stmmac_set_tx_owner(priv, first);
4717
4718 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4719
4720 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4721
4722 stmmac_flush_tx_descriptors(priv, queue);
4723 stmmac_tx_timer_arm(priv, queue);
4724
4725 return NETDEV_TX_OK;
4726
4727 dma_map_err:
4728 netdev_err(priv->dev, "Tx DMA map failed\n");
4729 max_sdu_err:
4730 dev_kfree_skb(skb);
4731 priv->xstats.tx_dropped++;
4732 return NETDEV_TX_OK;
4733 }
4734
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4735 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4736 {
4737 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4738 __be16 vlan_proto = veth->h_vlan_proto;
4739 u16 vlanid;
4740
4741 if ((vlan_proto == htons(ETH_P_8021Q) &&
4742 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4743 (vlan_proto == htons(ETH_P_8021AD) &&
4744 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4745 /* pop the vlan tag */
4746 vlanid = ntohs(veth->h_vlan_TCI);
4747 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4748 skb_pull(skb, VLAN_HLEN);
4749 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4750 }
4751 }
4752
4753 /**
4754 * stmmac_rx_refill - refill used skb preallocated buffers
4755 * @priv: driver private structure
4756 * @queue: RX queue index
4757 * Description : this is to reallocate the skb for the reception process
4758 * that is based on zero-copy.
4759 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4760 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4761 {
4762 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4763 int dirty = stmmac_rx_dirty(priv, queue);
4764 unsigned int entry = rx_q->dirty_rx;
4765 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4766
4767 if (priv->dma_cap.host_dma_width <= 32)
4768 gfp |= GFP_DMA32;
4769
4770 while (dirty-- > 0) {
4771 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4772 struct dma_desc *p;
4773 bool use_rx_wd;
4774
4775 if (priv->extend_desc)
4776 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4777 else
4778 p = rx_q->dma_rx + entry;
4779
4780 if (!buf->page) {
4781 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4782 if (!buf->page)
4783 break;
4784 }
4785
4786 if (priv->sph && !buf->sec_page) {
4787 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4788 if (!buf->sec_page)
4789 break;
4790
4791 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4792 }
4793
4794 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4795
4796 stmmac_set_desc_addr(priv, p, buf->addr);
4797 if (priv->sph)
4798 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4799 else
4800 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4801 stmmac_refill_desc3(priv, rx_q, p);
4802
4803 rx_q->rx_count_frames++;
4804 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4805 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4806 rx_q->rx_count_frames = 0;
4807
4808 use_rx_wd = !priv->rx_coal_frames[queue];
4809 use_rx_wd |= rx_q->rx_count_frames > 0;
4810 if (!priv->use_riwt)
4811 use_rx_wd = false;
4812
4813 dma_wmb();
4814 stmmac_set_rx_owner(priv, p, use_rx_wd);
4815
4816 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4817 }
4818 rx_q->dirty_rx = entry;
4819 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4820 (rx_q->dirty_rx * sizeof(struct dma_desc));
4821 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4822 }
4823
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4824 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4825 struct dma_desc *p,
4826 int status, unsigned int len)
4827 {
4828 unsigned int plen = 0, hlen = 0;
4829 int coe = priv->hw->rx_csum;
4830
4831 /* Not first descriptor, buffer is always zero */
4832 if (priv->sph && len)
4833 return 0;
4834
4835 /* First descriptor, get split header length */
4836 stmmac_get_rx_header_len(priv, p, &hlen);
4837 if (priv->sph && hlen) {
4838 priv->xstats.rx_split_hdr_pkt_n++;
4839 return hlen;
4840 }
4841
4842 /* First descriptor, not last descriptor and not split header */
4843 if (status & rx_not_ls)
4844 return priv->dma_conf.dma_buf_sz;
4845
4846 plen = stmmac_get_rx_frame_len(priv, p, coe);
4847
4848 /* First descriptor and last descriptor and not split header */
4849 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4850 }
4851
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4852 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4853 struct dma_desc *p,
4854 int status, unsigned int len)
4855 {
4856 int coe = priv->hw->rx_csum;
4857 unsigned int plen = 0;
4858
4859 /* Not split header, buffer is not available */
4860 if (!priv->sph)
4861 return 0;
4862
4863 /* Not last descriptor */
4864 if (status & rx_not_ls)
4865 return priv->dma_conf.dma_buf_sz;
4866
4867 plen = stmmac_get_rx_frame_len(priv, p, coe);
4868
4869 /* Last descriptor */
4870 return plen - len;
4871 }
4872
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4873 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4874 struct xdp_frame *xdpf, bool dma_map)
4875 {
4876 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4877 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4878 unsigned int entry = tx_q->cur_tx;
4879 struct dma_desc *tx_desc;
4880 dma_addr_t dma_addr;
4881 bool set_ic;
4882
4883 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4884 return STMMAC_XDP_CONSUMED;
4885
4886 if (priv->est && priv->est->enable &&
4887 priv->est->max_sdu[queue] &&
4888 xdpf->len > priv->est->max_sdu[queue]) {
4889 priv->xstats.max_sdu_txq_drop[queue]++;
4890 return STMMAC_XDP_CONSUMED;
4891 }
4892
4893 if (likely(priv->extend_desc))
4894 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4895 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4896 tx_desc = &tx_q->dma_entx[entry].basic;
4897 else
4898 tx_desc = tx_q->dma_tx + entry;
4899
4900 if (dma_map) {
4901 dma_addr = dma_map_single(priv->device, xdpf->data,
4902 xdpf->len, DMA_TO_DEVICE);
4903 if (dma_mapping_error(priv->device, dma_addr))
4904 return STMMAC_XDP_CONSUMED;
4905
4906 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4907 } else {
4908 struct page *page = virt_to_page(xdpf->data);
4909
4910 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4911 xdpf->headroom;
4912 dma_sync_single_for_device(priv->device, dma_addr,
4913 xdpf->len, DMA_BIDIRECTIONAL);
4914
4915 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4916 }
4917
4918 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4919 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4920 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4921 tx_q->tx_skbuff_dma[entry].last_segment = true;
4922 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4923
4924 tx_q->xdpf[entry] = xdpf;
4925
4926 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4927
4928 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4929 true, priv->mode, true, true,
4930 xdpf->len);
4931
4932 tx_q->tx_count_frames++;
4933
4934 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4935 set_ic = true;
4936 else
4937 set_ic = false;
4938
4939 if (set_ic) {
4940 tx_q->tx_count_frames = 0;
4941 stmmac_set_tx_ic(priv, tx_desc);
4942 u64_stats_update_begin(&txq_stats->q_syncp);
4943 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4944 u64_stats_update_end(&txq_stats->q_syncp);
4945 }
4946
4947 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4948
4949 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4950 tx_q->cur_tx = entry;
4951
4952 return STMMAC_XDP_TX;
4953 }
4954
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4955 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4956 int cpu)
4957 {
4958 int index = cpu;
4959
4960 if (unlikely(index < 0))
4961 index = 0;
4962
4963 while (index >= priv->plat->tx_queues_to_use)
4964 index -= priv->plat->tx_queues_to_use;
4965
4966 return index;
4967 }
4968
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4969 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4970 struct xdp_buff *xdp)
4971 {
4972 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4973 int cpu = smp_processor_id();
4974 struct netdev_queue *nq;
4975 int queue;
4976 int res;
4977
4978 if (unlikely(!xdpf))
4979 return STMMAC_XDP_CONSUMED;
4980
4981 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4982 nq = netdev_get_tx_queue(priv->dev, queue);
4983
4984 __netif_tx_lock(nq, cpu);
4985 /* Avoids TX time-out as we are sharing with slow path */
4986 txq_trans_cond_update(nq);
4987
4988 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4989 if (res == STMMAC_XDP_TX)
4990 stmmac_flush_tx_descriptors(priv, queue);
4991
4992 __netif_tx_unlock(nq);
4993
4994 return res;
4995 }
4996
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4997 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4998 struct bpf_prog *prog,
4999 struct xdp_buff *xdp)
5000 {
5001 u32 act;
5002 int res;
5003
5004 act = bpf_prog_run_xdp(prog, xdp);
5005 switch (act) {
5006 case XDP_PASS:
5007 res = STMMAC_XDP_PASS;
5008 break;
5009 case XDP_TX:
5010 res = stmmac_xdp_xmit_back(priv, xdp);
5011 break;
5012 case XDP_REDIRECT:
5013 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5014 res = STMMAC_XDP_CONSUMED;
5015 else
5016 res = STMMAC_XDP_REDIRECT;
5017 break;
5018 default:
5019 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5020 fallthrough;
5021 case XDP_ABORTED:
5022 trace_xdp_exception(priv->dev, prog, act);
5023 fallthrough;
5024 case XDP_DROP:
5025 res = STMMAC_XDP_CONSUMED;
5026 break;
5027 }
5028
5029 return res;
5030 }
5031
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5032 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5033 struct xdp_buff *xdp)
5034 {
5035 struct bpf_prog *prog;
5036 int res;
5037
5038 prog = READ_ONCE(priv->xdp_prog);
5039 if (!prog) {
5040 res = STMMAC_XDP_PASS;
5041 goto out;
5042 }
5043
5044 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5045 out:
5046 return ERR_PTR(-res);
5047 }
5048
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5049 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5050 int xdp_status)
5051 {
5052 int cpu = smp_processor_id();
5053 int queue;
5054
5055 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5056
5057 if (xdp_status & STMMAC_XDP_TX)
5058 stmmac_tx_timer_arm(priv, queue);
5059
5060 if (xdp_status & STMMAC_XDP_REDIRECT)
5061 xdp_do_flush();
5062 }
5063
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5064 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5065 struct xdp_buff *xdp)
5066 {
5067 unsigned int metasize = xdp->data - xdp->data_meta;
5068 unsigned int datasize = xdp->data_end - xdp->data;
5069 struct sk_buff *skb;
5070
5071 skb = napi_alloc_skb(&ch->rxtx_napi,
5072 xdp->data_end - xdp->data_hard_start);
5073 if (unlikely(!skb))
5074 return NULL;
5075
5076 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5077 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5078 if (metasize)
5079 skb_metadata_set(skb, metasize);
5080
5081 return skb;
5082 }
5083
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5084 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5085 struct dma_desc *p, struct dma_desc *np,
5086 struct xdp_buff *xdp)
5087 {
5088 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5089 struct stmmac_channel *ch = &priv->channel[queue];
5090 unsigned int len = xdp->data_end - xdp->data;
5091 enum pkt_hash_types hash_type;
5092 int coe = priv->hw->rx_csum;
5093 struct sk_buff *skb;
5094 u32 hash;
5095
5096 skb = stmmac_construct_skb_zc(ch, xdp);
5097 if (!skb) {
5098 priv->xstats.rx_dropped++;
5099 return;
5100 }
5101
5102 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5103 if (priv->hw->hw_vlan_en)
5104 /* MAC level stripping. */
5105 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5106 else
5107 /* Driver level stripping. */
5108 stmmac_rx_vlan(priv->dev, skb);
5109 skb->protocol = eth_type_trans(skb, priv->dev);
5110
5111 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5112 skb_checksum_none_assert(skb);
5113 else
5114 skb->ip_summed = CHECKSUM_UNNECESSARY;
5115
5116 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5117 skb_set_hash(skb, hash, hash_type);
5118
5119 skb_record_rx_queue(skb, queue);
5120 napi_gro_receive(&ch->rxtx_napi, skb);
5121
5122 u64_stats_update_begin(&rxq_stats->napi_syncp);
5123 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5124 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5125 u64_stats_update_end(&rxq_stats->napi_syncp);
5126 }
5127
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5128 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5129 {
5130 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5131 unsigned int entry = rx_q->dirty_rx;
5132 struct dma_desc *rx_desc = NULL;
5133 bool ret = true;
5134
5135 budget = min(budget, stmmac_rx_dirty(priv, queue));
5136
5137 while (budget-- > 0 && entry != rx_q->cur_rx) {
5138 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5139 dma_addr_t dma_addr;
5140 bool use_rx_wd;
5141
5142 if (!buf->xdp) {
5143 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5144 if (!buf->xdp) {
5145 ret = false;
5146 break;
5147 }
5148 }
5149
5150 if (priv->extend_desc)
5151 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5152 else
5153 rx_desc = rx_q->dma_rx + entry;
5154
5155 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5156 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5157 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5158 stmmac_refill_desc3(priv, rx_q, rx_desc);
5159
5160 rx_q->rx_count_frames++;
5161 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5162 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5163 rx_q->rx_count_frames = 0;
5164
5165 use_rx_wd = !priv->rx_coal_frames[queue];
5166 use_rx_wd |= rx_q->rx_count_frames > 0;
5167 if (!priv->use_riwt)
5168 use_rx_wd = false;
5169
5170 dma_wmb();
5171 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5172
5173 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5174 }
5175
5176 if (rx_desc) {
5177 rx_q->dirty_rx = entry;
5178 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5179 (rx_q->dirty_rx * sizeof(struct dma_desc));
5180 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5181 }
5182
5183 return ret;
5184 }
5185
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5186 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5187 {
5188 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5189 * to represent incoming packet, whereas cb field in the same structure
5190 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5191 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5192 */
5193 return (struct stmmac_xdp_buff *)xdp;
5194 }
5195
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5196 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5197 {
5198 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5199 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5200 unsigned int count = 0, error = 0, len = 0;
5201 int dirty = stmmac_rx_dirty(priv, queue);
5202 unsigned int next_entry = rx_q->cur_rx;
5203 u32 rx_errors = 0, rx_dropped = 0;
5204 unsigned int desc_size;
5205 struct bpf_prog *prog;
5206 bool failure = false;
5207 int xdp_status = 0;
5208 int status = 0;
5209
5210 if (netif_msg_rx_status(priv)) {
5211 void *rx_head;
5212
5213 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5214 if (priv->extend_desc) {
5215 rx_head = (void *)rx_q->dma_erx;
5216 desc_size = sizeof(struct dma_extended_desc);
5217 } else {
5218 rx_head = (void *)rx_q->dma_rx;
5219 desc_size = sizeof(struct dma_desc);
5220 }
5221
5222 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5223 rx_q->dma_rx_phy, desc_size);
5224 }
5225 while (count < limit) {
5226 struct stmmac_rx_buffer *buf;
5227 struct stmmac_xdp_buff *ctx;
5228 unsigned int buf1_len = 0;
5229 struct dma_desc *np, *p;
5230 int entry;
5231 int res;
5232
5233 if (!count && rx_q->state_saved) {
5234 error = rx_q->state.error;
5235 len = rx_q->state.len;
5236 } else {
5237 rx_q->state_saved = false;
5238 error = 0;
5239 len = 0;
5240 }
5241
5242 if (count >= limit)
5243 break;
5244
5245 read_again:
5246 buf1_len = 0;
5247 entry = next_entry;
5248 buf = &rx_q->buf_pool[entry];
5249
5250 if (dirty >= STMMAC_RX_FILL_BATCH) {
5251 failure = failure ||
5252 !stmmac_rx_refill_zc(priv, queue, dirty);
5253 dirty = 0;
5254 }
5255
5256 if (priv->extend_desc)
5257 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5258 else
5259 p = rx_q->dma_rx + entry;
5260
5261 /* read the status of the incoming frame */
5262 status = stmmac_rx_status(priv, &priv->xstats, p);
5263 /* check if managed by the DMA otherwise go ahead */
5264 if (unlikely(status & dma_own))
5265 break;
5266
5267 /* Prefetch the next RX descriptor */
5268 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5269 priv->dma_conf.dma_rx_size);
5270 next_entry = rx_q->cur_rx;
5271
5272 if (priv->extend_desc)
5273 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5274 else
5275 np = rx_q->dma_rx + next_entry;
5276
5277 prefetch(np);
5278
5279 /* Ensure a valid XSK buffer before proceed */
5280 if (!buf->xdp)
5281 break;
5282
5283 if (priv->extend_desc)
5284 stmmac_rx_extended_status(priv, &priv->xstats,
5285 rx_q->dma_erx + entry);
5286 if (unlikely(status == discard_frame)) {
5287 xsk_buff_free(buf->xdp);
5288 buf->xdp = NULL;
5289 dirty++;
5290 error = 1;
5291 if (!priv->hwts_rx_en)
5292 rx_errors++;
5293 }
5294
5295 if (unlikely(error && (status & rx_not_ls)))
5296 goto read_again;
5297 if (unlikely(error)) {
5298 count++;
5299 continue;
5300 }
5301
5302 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5303 if (likely(status & rx_not_ls)) {
5304 xsk_buff_free(buf->xdp);
5305 buf->xdp = NULL;
5306 dirty++;
5307 count++;
5308 goto read_again;
5309 }
5310
5311 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5312 ctx->priv = priv;
5313 ctx->desc = p;
5314 ctx->ndesc = np;
5315
5316 /* XDP ZC Frame only support primary buffers for now */
5317 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5318 len += buf1_len;
5319
5320 /* ACS is disabled; strip manually. */
5321 if (likely(!(status & rx_not_ls))) {
5322 buf1_len -= ETH_FCS_LEN;
5323 len -= ETH_FCS_LEN;
5324 }
5325
5326 /* RX buffer is good and fit into a XSK pool buffer */
5327 buf->xdp->data_end = buf->xdp->data + buf1_len;
5328 xsk_buff_dma_sync_for_cpu(buf->xdp);
5329
5330 prog = READ_ONCE(priv->xdp_prog);
5331 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5332
5333 switch (res) {
5334 case STMMAC_XDP_PASS:
5335 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5336 xsk_buff_free(buf->xdp);
5337 break;
5338 case STMMAC_XDP_CONSUMED:
5339 xsk_buff_free(buf->xdp);
5340 rx_dropped++;
5341 break;
5342 case STMMAC_XDP_TX:
5343 case STMMAC_XDP_REDIRECT:
5344 xdp_status |= res;
5345 break;
5346 }
5347
5348 buf->xdp = NULL;
5349 dirty++;
5350 count++;
5351 }
5352
5353 if (status & rx_not_ls) {
5354 rx_q->state_saved = true;
5355 rx_q->state.error = error;
5356 rx_q->state.len = len;
5357 }
5358
5359 stmmac_finalize_xdp_rx(priv, xdp_status);
5360
5361 u64_stats_update_begin(&rxq_stats->napi_syncp);
5362 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5363 u64_stats_update_end(&rxq_stats->napi_syncp);
5364
5365 priv->xstats.rx_dropped += rx_dropped;
5366 priv->xstats.rx_errors += rx_errors;
5367
5368 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5369 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5370 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5371 else
5372 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5373
5374 return (int)count;
5375 }
5376
5377 return failure ? limit : (int)count;
5378 }
5379
5380 /**
5381 * stmmac_rx - manage the receive process
5382 * @priv: driver private structure
5383 * @limit: napi bugget
5384 * @queue: RX queue index.
5385 * Description : this the function called by the napi poll method.
5386 * It gets all the frames inside the ring.
5387 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5388 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5389 {
5390 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5391 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5392 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5393 struct stmmac_channel *ch = &priv->channel[queue];
5394 unsigned int count = 0, error = 0, len = 0;
5395 int status = 0, coe = priv->hw->rx_csum;
5396 unsigned int next_entry = rx_q->cur_rx;
5397 enum dma_data_direction dma_dir;
5398 unsigned int desc_size;
5399 struct sk_buff *skb = NULL;
5400 struct stmmac_xdp_buff ctx;
5401 int xdp_status = 0;
5402 int buf_sz;
5403
5404 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5405 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5406 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5407
5408 if (netif_msg_rx_status(priv)) {
5409 void *rx_head;
5410
5411 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5412 if (priv->extend_desc) {
5413 rx_head = (void *)rx_q->dma_erx;
5414 desc_size = sizeof(struct dma_extended_desc);
5415 } else {
5416 rx_head = (void *)rx_q->dma_rx;
5417 desc_size = sizeof(struct dma_desc);
5418 }
5419
5420 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5421 rx_q->dma_rx_phy, desc_size);
5422 }
5423 while (count < limit) {
5424 unsigned int buf1_len = 0, buf2_len = 0;
5425 enum pkt_hash_types hash_type;
5426 struct stmmac_rx_buffer *buf;
5427 struct dma_desc *np, *p;
5428 int entry;
5429 u32 hash;
5430
5431 if (!count && rx_q->state_saved) {
5432 skb = rx_q->state.skb;
5433 error = rx_q->state.error;
5434 len = rx_q->state.len;
5435 } else {
5436 rx_q->state_saved = false;
5437 skb = NULL;
5438 error = 0;
5439 len = 0;
5440 }
5441
5442 read_again:
5443 if (count >= limit)
5444 break;
5445
5446 buf1_len = 0;
5447 buf2_len = 0;
5448 entry = next_entry;
5449 buf = &rx_q->buf_pool[entry];
5450
5451 if (priv->extend_desc)
5452 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5453 else
5454 p = rx_q->dma_rx + entry;
5455
5456 /* read the status of the incoming frame */
5457 status = stmmac_rx_status(priv, &priv->xstats, p);
5458 /* check if managed by the DMA otherwise go ahead */
5459 if (unlikely(status & dma_own))
5460 break;
5461
5462 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5463 priv->dma_conf.dma_rx_size);
5464 next_entry = rx_q->cur_rx;
5465
5466 if (priv->extend_desc)
5467 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5468 else
5469 np = rx_q->dma_rx + next_entry;
5470
5471 prefetch(np);
5472
5473 if (priv->extend_desc)
5474 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5475 if (unlikely(status == discard_frame)) {
5476 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5477 buf->page = NULL;
5478 error = 1;
5479 if (!priv->hwts_rx_en)
5480 rx_errors++;
5481 }
5482
5483 if (unlikely(error && (status & rx_not_ls)))
5484 goto read_again;
5485 if (unlikely(error)) {
5486 dev_kfree_skb(skb);
5487 skb = NULL;
5488 count++;
5489 continue;
5490 }
5491
5492 /* Buffer is good. Go on. */
5493
5494 prefetch(page_address(buf->page) + buf->page_offset);
5495 if (buf->sec_page)
5496 prefetch(page_address(buf->sec_page));
5497
5498 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5499 len += buf1_len;
5500 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5501 len += buf2_len;
5502
5503 /* ACS is disabled; strip manually. */
5504 if (likely(!(status & rx_not_ls))) {
5505 if (buf2_len) {
5506 buf2_len -= ETH_FCS_LEN;
5507 len -= ETH_FCS_LEN;
5508 } else if (buf1_len) {
5509 buf1_len -= ETH_FCS_LEN;
5510 len -= ETH_FCS_LEN;
5511 }
5512 }
5513
5514 if (!skb) {
5515 unsigned int pre_len, sync_len;
5516
5517 dma_sync_single_for_cpu(priv->device, buf->addr,
5518 buf1_len, dma_dir);
5519
5520 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5521 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5522 buf->page_offset, buf1_len, true);
5523
5524 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5525 buf->page_offset;
5526
5527 ctx.priv = priv;
5528 ctx.desc = p;
5529 ctx.ndesc = np;
5530
5531 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5532 /* Due xdp_adjust_tail: DMA sync for_device
5533 * cover max len CPU touch
5534 */
5535 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5536 buf->page_offset;
5537 sync_len = max(sync_len, pre_len);
5538
5539 /* For Not XDP_PASS verdict */
5540 if (IS_ERR(skb)) {
5541 unsigned int xdp_res = -PTR_ERR(skb);
5542
5543 if (xdp_res & STMMAC_XDP_CONSUMED) {
5544 page_pool_put_page(rx_q->page_pool,
5545 virt_to_head_page(ctx.xdp.data),
5546 sync_len, true);
5547 buf->page = NULL;
5548 rx_dropped++;
5549
5550 /* Clear skb as it was set as
5551 * status by XDP program.
5552 */
5553 skb = NULL;
5554
5555 if (unlikely((status & rx_not_ls)))
5556 goto read_again;
5557
5558 count++;
5559 continue;
5560 } else if (xdp_res & (STMMAC_XDP_TX |
5561 STMMAC_XDP_REDIRECT)) {
5562 xdp_status |= xdp_res;
5563 buf->page = NULL;
5564 skb = NULL;
5565 count++;
5566 continue;
5567 }
5568 }
5569 }
5570
5571 if (!skb) {
5572 /* XDP program may expand or reduce tail */
5573 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5574
5575 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5576 if (!skb) {
5577 rx_dropped++;
5578 count++;
5579 goto drain_data;
5580 }
5581
5582 /* XDP program may adjust header */
5583 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5584 skb_put(skb, buf1_len);
5585
5586 /* Data payload copied into SKB, page ready for recycle */
5587 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5588 buf->page = NULL;
5589 } else if (buf1_len) {
5590 dma_sync_single_for_cpu(priv->device, buf->addr,
5591 buf1_len, dma_dir);
5592 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5593 buf->page, buf->page_offset, buf1_len,
5594 priv->dma_conf.dma_buf_sz);
5595
5596 /* Data payload appended into SKB */
5597 skb_mark_for_recycle(skb);
5598 buf->page = NULL;
5599 }
5600
5601 if (buf2_len) {
5602 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5603 buf2_len, dma_dir);
5604 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5605 buf->sec_page, 0, buf2_len,
5606 priv->dma_conf.dma_buf_sz);
5607
5608 /* Data payload appended into SKB */
5609 skb_mark_for_recycle(skb);
5610 buf->sec_page = NULL;
5611 }
5612
5613 drain_data:
5614 if (likely(status & rx_not_ls))
5615 goto read_again;
5616 if (!skb)
5617 continue;
5618
5619 /* Got entire packet into SKB. Finish it. */
5620
5621 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5622
5623 if (priv->hw->hw_vlan_en)
5624 /* MAC level stripping. */
5625 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5626 else
5627 /* Driver level stripping. */
5628 stmmac_rx_vlan(priv->dev, skb);
5629
5630 skb->protocol = eth_type_trans(skb, priv->dev);
5631
5632 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5633 skb_checksum_none_assert(skb);
5634 else
5635 skb->ip_summed = CHECKSUM_UNNECESSARY;
5636
5637 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5638 skb_set_hash(skb, hash, hash_type);
5639
5640 skb_record_rx_queue(skb, queue);
5641 napi_gro_receive(&ch->rx_napi, skb);
5642 skb = NULL;
5643
5644 rx_packets++;
5645 rx_bytes += len;
5646 count++;
5647 }
5648
5649 if (status & rx_not_ls || skb) {
5650 rx_q->state_saved = true;
5651 rx_q->state.skb = skb;
5652 rx_q->state.error = error;
5653 rx_q->state.len = len;
5654 }
5655
5656 stmmac_finalize_xdp_rx(priv, xdp_status);
5657
5658 stmmac_rx_refill(priv, queue);
5659
5660 u64_stats_update_begin(&rxq_stats->napi_syncp);
5661 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5662 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5663 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5664 u64_stats_update_end(&rxq_stats->napi_syncp);
5665
5666 priv->xstats.rx_dropped += rx_dropped;
5667 priv->xstats.rx_errors += rx_errors;
5668
5669 return count;
5670 }
5671
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5672 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5673 {
5674 struct stmmac_channel *ch =
5675 container_of(napi, struct stmmac_channel, rx_napi);
5676 struct stmmac_priv *priv = ch->priv_data;
5677 struct stmmac_rxq_stats *rxq_stats;
5678 u32 chan = ch->index;
5679 int work_done;
5680
5681 rxq_stats = &priv->xstats.rxq_stats[chan];
5682 u64_stats_update_begin(&rxq_stats->napi_syncp);
5683 u64_stats_inc(&rxq_stats->napi.poll);
5684 u64_stats_update_end(&rxq_stats->napi_syncp);
5685
5686 work_done = stmmac_rx(priv, budget, chan);
5687 if (work_done < budget && napi_complete_done(napi, work_done)) {
5688 unsigned long flags;
5689
5690 spin_lock_irqsave(&ch->lock, flags);
5691 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5692 spin_unlock_irqrestore(&ch->lock, flags);
5693 }
5694
5695 return work_done;
5696 }
5697
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5698 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5699 {
5700 struct stmmac_channel *ch =
5701 container_of(napi, struct stmmac_channel, tx_napi);
5702 struct stmmac_priv *priv = ch->priv_data;
5703 struct stmmac_txq_stats *txq_stats;
5704 bool pending_packets = false;
5705 u32 chan = ch->index;
5706 int work_done;
5707
5708 txq_stats = &priv->xstats.txq_stats[chan];
5709 u64_stats_update_begin(&txq_stats->napi_syncp);
5710 u64_stats_inc(&txq_stats->napi.poll);
5711 u64_stats_update_end(&txq_stats->napi_syncp);
5712
5713 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5714 work_done = min(work_done, budget);
5715
5716 if (work_done < budget && napi_complete_done(napi, work_done)) {
5717 unsigned long flags;
5718
5719 spin_lock_irqsave(&ch->lock, flags);
5720 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5721 spin_unlock_irqrestore(&ch->lock, flags);
5722 }
5723
5724 /* TX still have packet to handle, check if we need to arm tx timer */
5725 if (pending_packets)
5726 stmmac_tx_timer_arm(priv, chan);
5727
5728 return work_done;
5729 }
5730
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5731 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5732 {
5733 struct stmmac_channel *ch =
5734 container_of(napi, struct stmmac_channel, rxtx_napi);
5735 struct stmmac_priv *priv = ch->priv_data;
5736 bool tx_pending_packets = false;
5737 int rx_done, tx_done, rxtx_done;
5738 struct stmmac_rxq_stats *rxq_stats;
5739 struct stmmac_txq_stats *txq_stats;
5740 u32 chan = ch->index;
5741
5742 rxq_stats = &priv->xstats.rxq_stats[chan];
5743 u64_stats_update_begin(&rxq_stats->napi_syncp);
5744 u64_stats_inc(&rxq_stats->napi.poll);
5745 u64_stats_update_end(&rxq_stats->napi_syncp);
5746
5747 txq_stats = &priv->xstats.txq_stats[chan];
5748 u64_stats_update_begin(&txq_stats->napi_syncp);
5749 u64_stats_inc(&txq_stats->napi.poll);
5750 u64_stats_update_end(&txq_stats->napi_syncp);
5751
5752 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5753 tx_done = min(tx_done, budget);
5754
5755 rx_done = stmmac_rx_zc(priv, budget, chan);
5756
5757 rxtx_done = max(tx_done, rx_done);
5758
5759 /* If either TX or RX work is not complete, return budget
5760 * and keep pooling
5761 */
5762 if (rxtx_done >= budget)
5763 return budget;
5764
5765 /* all work done, exit the polling mode */
5766 if (napi_complete_done(napi, rxtx_done)) {
5767 unsigned long flags;
5768
5769 spin_lock_irqsave(&ch->lock, flags);
5770 /* Both RX and TX work done are compelte,
5771 * so enable both RX & TX IRQs.
5772 */
5773 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5774 spin_unlock_irqrestore(&ch->lock, flags);
5775 }
5776
5777 /* TX still have packet to handle, check if we need to arm tx timer */
5778 if (tx_pending_packets)
5779 stmmac_tx_timer_arm(priv, chan);
5780
5781 return min(rxtx_done, budget - 1);
5782 }
5783
5784 /**
5785 * stmmac_tx_timeout
5786 * @dev : Pointer to net device structure
5787 * @txqueue: the index of the hanging transmit queue
5788 * Description: this function is called when a packet transmission fails to
5789 * complete within a reasonable time. The driver will mark the error in the
5790 * netdev structure and arrange for the device to be reset to a sane state
5791 * in order to transmit a new packet.
5792 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5793 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5794 {
5795 struct stmmac_priv *priv = netdev_priv(dev);
5796
5797 stmmac_global_err(priv);
5798 }
5799
5800 /**
5801 * stmmac_set_rx_mode - entry point for multicast addressing
5802 * @dev : pointer to the device structure
5803 * Description:
5804 * This function is a driver entry point which gets called by the kernel
5805 * whenever multicast addresses must be enabled/disabled.
5806 * Return value:
5807 * void.
5808 */
stmmac_set_rx_mode(struct net_device * dev)5809 static void stmmac_set_rx_mode(struct net_device *dev)
5810 {
5811 struct stmmac_priv *priv = netdev_priv(dev);
5812
5813 stmmac_set_filter(priv, priv->hw, dev);
5814 }
5815
5816 /**
5817 * stmmac_change_mtu - entry point to change MTU size for the device.
5818 * @dev : device pointer.
5819 * @new_mtu : the new MTU size for the device.
5820 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5821 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5822 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5823 * Return value:
5824 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5825 * file on failure.
5826 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5827 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5828 {
5829 struct stmmac_priv *priv = netdev_priv(dev);
5830 int txfifosz = priv->plat->tx_fifo_size;
5831 struct stmmac_dma_conf *dma_conf;
5832 const int mtu = new_mtu;
5833 int ret;
5834
5835 if (txfifosz == 0)
5836 txfifosz = priv->dma_cap.tx_fifo_size;
5837
5838 txfifosz /= priv->plat->tx_queues_to_use;
5839
5840 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5841 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5842 return -EINVAL;
5843 }
5844
5845 new_mtu = STMMAC_ALIGN(new_mtu);
5846
5847 /* If condition true, FIFO is too small or MTU too large */
5848 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5849 return -EINVAL;
5850
5851 if (netif_running(dev)) {
5852 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5853 /* Try to allocate the new DMA conf with the new mtu */
5854 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5855 if (IS_ERR(dma_conf)) {
5856 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5857 mtu);
5858 return PTR_ERR(dma_conf);
5859 }
5860
5861 stmmac_release(dev);
5862
5863 ret = __stmmac_open(dev, dma_conf);
5864 if (ret) {
5865 free_dma_desc_resources(priv, dma_conf);
5866 kfree(dma_conf);
5867 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5868 return ret;
5869 }
5870
5871 kfree(dma_conf);
5872
5873 stmmac_set_rx_mode(dev);
5874 }
5875
5876 WRITE_ONCE(dev->mtu, mtu);
5877 netdev_update_features(dev);
5878
5879 return 0;
5880 }
5881
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5882 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5883 netdev_features_t features)
5884 {
5885 struct stmmac_priv *priv = netdev_priv(dev);
5886
5887 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5888 features &= ~NETIF_F_RXCSUM;
5889
5890 if (!priv->plat->tx_coe)
5891 features &= ~NETIF_F_CSUM_MASK;
5892
5893 /* Some GMAC devices have a bugged Jumbo frame support that
5894 * needs to have the Tx COE disabled for oversized frames
5895 * (due to limited buffer sizes). In this case we disable
5896 * the TX csum insertion in the TDES and not use SF.
5897 */
5898 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5899 features &= ~NETIF_F_CSUM_MASK;
5900
5901 /* Disable tso if asked by ethtool */
5902 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5903 if (features & NETIF_F_TSO)
5904 priv->tso = true;
5905 else
5906 priv->tso = false;
5907 }
5908
5909 return features;
5910 }
5911
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5912 static int stmmac_set_features(struct net_device *netdev,
5913 netdev_features_t features)
5914 {
5915 struct stmmac_priv *priv = netdev_priv(netdev);
5916
5917 /* Keep the COE Type in case of csum is supporting */
5918 if (features & NETIF_F_RXCSUM)
5919 priv->hw->rx_csum = priv->plat->rx_coe;
5920 else
5921 priv->hw->rx_csum = 0;
5922 /* No check needed because rx_coe has been set before and it will be
5923 * fixed in case of issue.
5924 */
5925 stmmac_rx_ipc(priv, priv->hw);
5926
5927 if (priv->sph_cap) {
5928 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5929 u32 chan;
5930
5931 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5932 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5933 }
5934
5935 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5936 priv->hw->hw_vlan_en = true;
5937 else
5938 priv->hw->hw_vlan_en = false;
5939
5940 stmmac_set_hw_vlan_mode(priv, priv->hw);
5941
5942 return 0;
5943 }
5944
stmmac_common_interrupt(struct stmmac_priv * priv)5945 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5946 {
5947 u32 rx_cnt = priv->plat->rx_queues_to_use;
5948 u32 tx_cnt = priv->plat->tx_queues_to_use;
5949 u32 queues_count;
5950 u32 queue;
5951 bool xmac;
5952
5953 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5954 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5955
5956 if (priv->irq_wake)
5957 pm_wakeup_event(priv->device, 0);
5958
5959 if (priv->dma_cap.estsel)
5960 stmmac_est_irq_status(priv, priv, priv->dev,
5961 &priv->xstats, tx_cnt);
5962
5963 if (stmmac_fpe_supported(priv))
5964 stmmac_fpe_irq_status(priv);
5965
5966 /* To handle GMAC own interrupts */
5967 if ((priv->plat->has_gmac) || xmac) {
5968 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5969
5970 if (unlikely(status)) {
5971 /* For LPI we need to save the tx status */
5972 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5973 priv->tx_path_in_lpi_mode = true;
5974 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5975 priv->tx_path_in_lpi_mode = false;
5976 }
5977
5978 for (queue = 0; queue < queues_count; queue++)
5979 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
5980
5981 /* PCS link status */
5982 if (priv->hw->pcs &&
5983 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
5984 if (priv->xstats.pcs_link)
5985 netif_carrier_on(priv->dev);
5986 else
5987 netif_carrier_off(priv->dev);
5988 }
5989
5990 stmmac_timestamp_interrupt(priv, priv);
5991 }
5992 }
5993
5994 /**
5995 * stmmac_interrupt - main ISR
5996 * @irq: interrupt number.
5997 * @dev_id: to pass the net device pointer.
5998 * Description: this is the main driver interrupt service routine.
5999 * It can call:
6000 * o DMA service routine (to manage incoming frame reception and transmission
6001 * status)
6002 * o Core interrupts to manage: remote wake-up, management counter, LPI
6003 * interrupts.
6004 */
stmmac_interrupt(int irq,void * dev_id)6005 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6006 {
6007 struct net_device *dev = (struct net_device *)dev_id;
6008 struct stmmac_priv *priv = netdev_priv(dev);
6009
6010 /* Check if adapter is up */
6011 if (test_bit(STMMAC_DOWN, &priv->state))
6012 return IRQ_HANDLED;
6013
6014 /* Check ASP error if it isn't delivered via an individual IRQ */
6015 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6016 return IRQ_HANDLED;
6017
6018 /* To handle Common interrupts */
6019 stmmac_common_interrupt(priv);
6020
6021 /* To handle DMA interrupts */
6022 stmmac_dma_interrupt(priv);
6023
6024 return IRQ_HANDLED;
6025 }
6026
stmmac_mac_interrupt(int irq,void * dev_id)6027 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6028 {
6029 struct net_device *dev = (struct net_device *)dev_id;
6030 struct stmmac_priv *priv = netdev_priv(dev);
6031
6032 /* Check if adapter is up */
6033 if (test_bit(STMMAC_DOWN, &priv->state))
6034 return IRQ_HANDLED;
6035
6036 /* To handle Common interrupts */
6037 stmmac_common_interrupt(priv);
6038
6039 return IRQ_HANDLED;
6040 }
6041
stmmac_safety_interrupt(int irq,void * dev_id)6042 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6043 {
6044 struct net_device *dev = (struct net_device *)dev_id;
6045 struct stmmac_priv *priv = netdev_priv(dev);
6046
6047 /* Check if adapter is up */
6048 if (test_bit(STMMAC_DOWN, &priv->state))
6049 return IRQ_HANDLED;
6050
6051 /* Check if a fatal error happened */
6052 stmmac_safety_feat_interrupt(priv);
6053
6054 return IRQ_HANDLED;
6055 }
6056
stmmac_msi_intr_tx(int irq,void * data)6057 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6058 {
6059 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6060 struct stmmac_dma_conf *dma_conf;
6061 int chan = tx_q->queue_index;
6062 struct stmmac_priv *priv;
6063 int status;
6064
6065 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6066 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6067
6068 /* Check if adapter is up */
6069 if (test_bit(STMMAC_DOWN, &priv->state))
6070 return IRQ_HANDLED;
6071
6072 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6073
6074 if (unlikely(status & tx_hard_error_bump_tc)) {
6075 /* Try to bump up the dma threshold on this failure */
6076 stmmac_bump_dma_threshold(priv, chan);
6077 } else if (unlikely(status == tx_hard_error)) {
6078 stmmac_tx_err(priv, chan);
6079 }
6080
6081 return IRQ_HANDLED;
6082 }
6083
stmmac_msi_intr_rx(int irq,void * data)6084 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6085 {
6086 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6087 struct stmmac_dma_conf *dma_conf;
6088 int chan = rx_q->queue_index;
6089 struct stmmac_priv *priv;
6090
6091 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6092 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6093
6094 /* Check if adapter is up */
6095 if (test_bit(STMMAC_DOWN, &priv->state))
6096 return IRQ_HANDLED;
6097
6098 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6099
6100 return IRQ_HANDLED;
6101 }
6102
6103 /**
6104 * stmmac_ioctl - Entry point for the Ioctl
6105 * @dev: Device pointer.
6106 * @rq: An IOCTL specefic structure, that can contain a pointer to
6107 * a proprietary structure used to pass information to the driver.
6108 * @cmd: IOCTL command
6109 * Description:
6110 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6111 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6112 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6113 {
6114 struct stmmac_priv *priv = netdev_priv (dev);
6115 int ret = -EOPNOTSUPP;
6116
6117 if (!netif_running(dev))
6118 return -EINVAL;
6119
6120 switch (cmd) {
6121 case SIOCGMIIPHY:
6122 case SIOCGMIIREG:
6123 case SIOCSMIIREG:
6124 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6125 break;
6126 case SIOCSHWTSTAMP:
6127 ret = stmmac_hwtstamp_set(dev, rq);
6128 break;
6129 case SIOCGHWTSTAMP:
6130 ret = stmmac_hwtstamp_get(dev, rq);
6131 break;
6132 default:
6133 break;
6134 }
6135
6136 return ret;
6137 }
6138
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6139 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6140 void *cb_priv)
6141 {
6142 struct stmmac_priv *priv = cb_priv;
6143 int ret = -EOPNOTSUPP;
6144
6145 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6146 return ret;
6147
6148 __stmmac_disable_all_queues(priv);
6149
6150 switch (type) {
6151 case TC_SETUP_CLSU32:
6152 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6153 break;
6154 case TC_SETUP_CLSFLOWER:
6155 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6156 break;
6157 default:
6158 break;
6159 }
6160
6161 stmmac_enable_all_queues(priv);
6162 return ret;
6163 }
6164
6165 static LIST_HEAD(stmmac_block_cb_list);
6166
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6167 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6168 void *type_data)
6169 {
6170 struct stmmac_priv *priv = netdev_priv(ndev);
6171
6172 switch (type) {
6173 case TC_QUERY_CAPS:
6174 return stmmac_tc_query_caps(priv, priv, type_data);
6175 case TC_SETUP_QDISC_MQPRIO:
6176 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6177 case TC_SETUP_BLOCK:
6178 return flow_block_cb_setup_simple(type_data,
6179 &stmmac_block_cb_list,
6180 stmmac_setup_tc_block_cb,
6181 priv, priv, true);
6182 case TC_SETUP_QDISC_CBS:
6183 return stmmac_tc_setup_cbs(priv, priv, type_data);
6184 case TC_SETUP_QDISC_TAPRIO:
6185 return stmmac_tc_setup_taprio(priv, priv, type_data);
6186 case TC_SETUP_QDISC_ETF:
6187 return stmmac_tc_setup_etf(priv, priv, type_data);
6188 default:
6189 return -EOPNOTSUPP;
6190 }
6191 }
6192
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6193 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6194 struct net_device *sb_dev)
6195 {
6196 int gso = skb_shinfo(skb)->gso_type;
6197
6198 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6199 /*
6200 * There is no way to determine the number of TSO/USO
6201 * capable Queues. Let's use always the Queue 0
6202 * because if TSO/USO is supported then at least this
6203 * one will be capable.
6204 */
6205 return 0;
6206 }
6207
6208 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6209 }
6210
stmmac_set_mac_address(struct net_device * ndev,void * addr)6211 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6212 {
6213 struct stmmac_priv *priv = netdev_priv(ndev);
6214 int ret = 0;
6215
6216 ret = pm_runtime_resume_and_get(priv->device);
6217 if (ret < 0)
6218 return ret;
6219
6220 ret = eth_mac_addr(ndev, addr);
6221 if (ret)
6222 goto set_mac_error;
6223
6224 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6225
6226 set_mac_error:
6227 pm_runtime_put(priv->device);
6228
6229 return ret;
6230 }
6231
6232 #ifdef CONFIG_DEBUG_FS
6233 static struct dentry *stmmac_fs_dir;
6234
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6235 static void sysfs_display_ring(void *head, int size, int extend_desc,
6236 struct seq_file *seq, dma_addr_t dma_phy_addr)
6237 {
6238 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6239 struct dma_desc *p = (struct dma_desc *)head;
6240 unsigned int desc_size;
6241 dma_addr_t dma_addr;
6242 int i;
6243
6244 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6245 for (i = 0; i < size; i++) {
6246 dma_addr = dma_phy_addr + i * desc_size;
6247 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6248 i, &dma_addr,
6249 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6250 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6251 if (extend_desc)
6252 p = &(++ep)->basic;
6253 else
6254 p++;
6255 }
6256 }
6257
stmmac_rings_status_show(struct seq_file * seq,void * v)6258 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6259 {
6260 struct net_device *dev = seq->private;
6261 struct stmmac_priv *priv = netdev_priv(dev);
6262 u32 rx_count = priv->plat->rx_queues_to_use;
6263 u32 tx_count = priv->plat->tx_queues_to_use;
6264 u32 queue;
6265
6266 if ((dev->flags & IFF_UP) == 0)
6267 return 0;
6268
6269 for (queue = 0; queue < rx_count; queue++) {
6270 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6271
6272 seq_printf(seq, "RX Queue %d:\n", queue);
6273
6274 if (priv->extend_desc) {
6275 seq_printf(seq, "Extended descriptor ring:\n");
6276 sysfs_display_ring((void *)rx_q->dma_erx,
6277 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6278 } else {
6279 seq_printf(seq, "Descriptor ring:\n");
6280 sysfs_display_ring((void *)rx_q->dma_rx,
6281 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6282 }
6283 }
6284
6285 for (queue = 0; queue < tx_count; queue++) {
6286 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6287
6288 seq_printf(seq, "TX Queue %d:\n", queue);
6289
6290 if (priv->extend_desc) {
6291 seq_printf(seq, "Extended descriptor ring:\n");
6292 sysfs_display_ring((void *)tx_q->dma_etx,
6293 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6294 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6295 seq_printf(seq, "Descriptor ring:\n");
6296 sysfs_display_ring((void *)tx_q->dma_tx,
6297 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6298 }
6299 }
6300
6301 return 0;
6302 }
6303 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6304
stmmac_dma_cap_show(struct seq_file * seq,void * v)6305 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6306 {
6307 static const char * const dwxgmac_timestamp_source[] = {
6308 "None",
6309 "Internal",
6310 "External",
6311 "Both",
6312 };
6313 static const char * const dwxgmac_safety_feature_desc[] = {
6314 "No",
6315 "All Safety Features with ECC and Parity",
6316 "All Safety Features without ECC or Parity",
6317 "All Safety Features with Parity Only",
6318 "ECC Only",
6319 "UNDEFINED",
6320 "UNDEFINED",
6321 "UNDEFINED",
6322 };
6323 struct net_device *dev = seq->private;
6324 struct stmmac_priv *priv = netdev_priv(dev);
6325
6326 if (!priv->hw_cap_support) {
6327 seq_printf(seq, "DMA HW features not supported\n");
6328 return 0;
6329 }
6330
6331 seq_printf(seq, "==============================\n");
6332 seq_printf(seq, "\tDMA HW features\n");
6333 seq_printf(seq, "==============================\n");
6334
6335 seq_printf(seq, "\t10/100 Mbps: %s\n",
6336 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6337 seq_printf(seq, "\t1000 Mbps: %s\n",
6338 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6339 seq_printf(seq, "\tHalf duplex: %s\n",
6340 (priv->dma_cap.half_duplex) ? "Y" : "N");
6341 if (priv->plat->has_xgmac) {
6342 seq_printf(seq,
6343 "\tNumber of Additional MAC address registers: %d\n",
6344 priv->dma_cap.multi_addr);
6345 } else {
6346 seq_printf(seq, "\tHash Filter: %s\n",
6347 (priv->dma_cap.hash_filter) ? "Y" : "N");
6348 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6349 (priv->dma_cap.multi_addr) ? "Y" : "N");
6350 }
6351 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6352 (priv->dma_cap.pcs) ? "Y" : "N");
6353 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6354 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6355 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6356 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6357 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6358 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6359 seq_printf(seq, "\tRMON module: %s\n",
6360 (priv->dma_cap.rmon) ? "Y" : "N");
6361 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6362 (priv->dma_cap.time_stamp) ? "Y" : "N");
6363 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6364 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6365 if (priv->plat->has_xgmac)
6366 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6367 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6368 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6369 (priv->dma_cap.eee) ? "Y" : "N");
6370 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6371 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6372 (priv->dma_cap.tx_coe) ? "Y" : "N");
6373 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6374 priv->plat->has_xgmac) {
6375 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6376 (priv->dma_cap.rx_coe) ? "Y" : "N");
6377 } else {
6378 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6379 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6380 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6381 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6382 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6383 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6384 }
6385 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6386 priv->dma_cap.number_rx_channel);
6387 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6388 priv->dma_cap.number_tx_channel);
6389 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6390 priv->dma_cap.number_rx_queues);
6391 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6392 priv->dma_cap.number_tx_queues);
6393 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6394 (priv->dma_cap.enh_desc) ? "Y" : "N");
6395 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6396 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6397 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6398 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6399 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6400 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6401 priv->dma_cap.pps_out_num);
6402 seq_printf(seq, "\tSafety Features: %s\n",
6403 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6404 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6405 priv->dma_cap.frpsel ? "Y" : "N");
6406 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6407 priv->dma_cap.host_dma_width);
6408 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6409 priv->dma_cap.rssen ? "Y" : "N");
6410 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6411 priv->dma_cap.vlhash ? "Y" : "N");
6412 seq_printf(seq, "\tSplit Header: %s\n",
6413 priv->dma_cap.sphen ? "Y" : "N");
6414 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6415 priv->dma_cap.vlins ? "Y" : "N");
6416 seq_printf(seq, "\tDouble VLAN: %s\n",
6417 priv->dma_cap.dvlan ? "Y" : "N");
6418 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6419 priv->dma_cap.l3l4fnum);
6420 seq_printf(seq, "\tARP Offloading: %s\n",
6421 priv->dma_cap.arpoffsel ? "Y" : "N");
6422 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6423 priv->dma_cap.estsel ? "Y" : "N");
6424 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6425 priv->dma_cap.fpesel ? "Y" : "N");
6426 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6427 priv->dma_cap.tbssel ? "Y" : "N");
6428 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6429 priv->dma_cap.tbs_ch_num);
6430 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6431 priv->dma_cap.sgfsel ? "Y" : "N");
6432 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6433 BIT(priv->dma_cap.ttsfd) >> 1);
6434 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6435 priv->dma_cap.numtc);
6436 seq_printf(seq, "\tDCB Feature: %s\n",
6437 priv->dma_cap.dcben ? "Y" : "N");
6438 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6439 priv->dma_cap.advthword ? "Y" : "N");
6440 seq_printf(seq, "\tPTP Offload: %s\n",
6441 priv->dma_cap.ptoen ? "Y" : "N");
6442 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6443 priv->dma_cap.osten ? "Y" : "N");
6444 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6445 priv->dma_cap.pfcen ? "Y" : "N");
6446 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6447 BIT(priv->dma_cap.frpes) << 6);
6448 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6449 BIT(priv->dma_cap.frpbs) << 6);
6450 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6451 priv->dma_cap.frppipe_num);
6452 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6453 priv->dma_cap.nrvf_num ?
6454 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6455 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6456 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6457 seq_printf(seq, "\tDepth of GCL: %lu\n",
6458 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6459 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6460 priv->dma_cap.cbtisel ? "Y" : "N");
6461 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6462 priv->dma_cap.aux_snapshot_n);
6463 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6464 priv->dma_cap.pou_ost_en ? "Y" : "N");
6465 seq_printf(seq, "\tEnhanced DMA: %s\n",
6466 priv->dma_cap.edma ? "Y" : "N");
6467 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6468 priv->dma_cap.ediffc ? "Y" : "N");
6469 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6470 priv->dma_cap.vxn ? "Y" : "N");
6471 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6472 priv->dma_cap.dbgmem ? "Y" : "N");
6473 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6474 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6475 return 0;
6476 }
6477 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6478
6479 /* Use network device events to rename debugfs file entries.
6480 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6481 static int stmmac_device_event(struct notifier_block *unused,
6482 unsigned long event, void *ptr)
6483 {
6484 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6485 struct stmmac_priv *priv = netdev_priv(dev);
6486
6487 if (dev->netdev_ops != &stmmac_netdev_ops)
6488 goto done;
6489
6490 switch (event) {
6491 case NETDEV_CHANGENAME:
6492 if (priv->dbgfs_dir)
6493 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6494 priv->dbgfs_dir,
6495 stmmac_fs_dir,
6496 dev->name);
6497 break;
6498 }
6499 done:
6500 return NOTIFY_DONE;
6501 }
6502
6503 static struct notifier_block stmmac_notifier = {
6504 .notifier_call = stmmac_device_event,
6505 };
6506
stmmac_init_fs(struct net_device * dev)6507 static void stmmac_init_fs(struct net_device *dev)
6508 {
6509 struct stmmac_priv *priv = netdev_priv(dev);
6510
6511 rtnl_lock();
6512
6513 /* Create per netdev entries */
6514 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6515
6516 /* Entry to report DMA RX/TX rings */
6517 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6518 &stmmac_rings_status_fops);
6519
6520 /* Entry to report the DMA HW features */
6521 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6522 &stmmac_dma_cap_fops);
6523
6524 rtnl_unlock();
6525 }
6526
stmmac_exit_fs(struct net_device * dev)6527 static void stmmac_exit_fs(struct net_device *dev)
6528 {
6529 struct stmmac_priv *priv = netdev_priv(dev);
6530
6531 debugfs_remove_recursive(priv->dbgfs_dir);
6532 }
6533 #endif /* CONFIG_DEBUG_FS */
6534
stmmac_vid_crc32_le(__le16 vid_le)6535 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6536 {
6537 unsigned char *data = (unsigned char *)&vid_le;
6538 unsigned char data_byte = 0;
6539 u32 crc = ~0x0;
6540 u32 temp = 0;
6541 int i, bits;
6542
6543 bits = get_bitmask_order(VLAN_VID_MASK);
6544 for (i = 0; i < bits; i++) {
6545 if ((i % 8) == 0)
6546 data_byte = data[i / 8];
6547
6548 temp = ((crc & 1) ^ data_byte) & 1;
6549 crc >>= 1;
6550 data_byte >>= 1;
6551
6552 if (temp)
6553 crc ^= 0xedb88320;
6554 }
6555
6556 return crc;
6557 }
6558
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6559 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6560 {
6561 u32 crc, hash = 0;
6562 u16 pmatch = 0;
6563 int count = 0;
6564 u16 vid = 0;
6565
6566 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6567 __le16 vid_le = cpu_to_le16(vid);
6568 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6569 hash |= (1 << crc);
6570 count++;
6571 }
6572
6573 if (!priv->dma_cap.vlhash) {
6574 if (count > 2) /* VID = 0 always passes filter */
6575 return -EOPNOTSUPP;
6576
6577 pmatch = vid;
6578 hash = 0;
6579 }
6580
6581 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6582 }
6583
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6584 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6585 {
6586 struct stmmac_priv *priv = netdev_priv(ndev);
6587 bool is_double = false;
6588 int ret;
6589
6590 ret = pm_runtime_resume_and_get(priv->device);
6591 if (ret < 0)
6592 return ret;
6593
6594 if (be16_to_cpu(proto) == ETH_P_8021AD)
6595 is_double = true;
6596
6597 set_bit(vid, priv->active_vlans);
6598 ret = stmmac_vlan_update(priv, is_double);
6599 if (ret) {
6600 clear_bit(vid, priv->active_vlans);
6601 goto err_pm_put;
6602 }
6603
6604 if (priv->hw->num_vlan) {
6605 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6606 if (ret)
6607 goto err_pm_put;
6608 }
6609 err_pm_put:
6610 pm_runtime_put(priv->device);
6611
6612 return ret;
6613 }
6614
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6615 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6616 {
6617 struct stmmac_priv *priv = netdev_priv(ndev);
6618 bool is_double = false;
6619 int ret;
6620
6621 ret = pm_runtime_resume_and_get(priv->device);
6622 if (ret < 0)
6623 return ret;
6624
6625 if (be16_to_cpu(proto) == ETH_P_8021AD)
6626 is_double = true;
6627
6628 clear_bit(vid, priv->active_vlans);
6629
6630 if (priv->hw->num_vlan) {
6631 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6632 if (ret)
6633 goto del_vlan_error;
6634 }
6635
6636 ret = stmmac_vlan_update(priv, is_double);
6637
6638 del_vlan_error:
6639 pm_runtime_put(priv->device);
6640
6641 return ret;
6642 }
6643
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6644 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6645 {
6646 struct stmmac_priv *priv = netdev_priv(dev);
6647
6648 switch (bpf->command) {
6649 case XDP_SETUP_PROG:
6650 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6651 case XDP_SETUP_XSK_POOL:
6652 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6653 bpf->xsk.queue_id);
6654 default:
6655 return -EOPNOTSUPP;
6656 }
6657 }
6658
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6659 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6660 struct xdp_frame **frames, u32 flags)
6661 {
6662 struct stmmac_priv *priv = netdev_priv(dev);
6663 int cpu = smp_processor_id();
6664 struct netdev_queue *nq;
6665 int i, nxmit = 0;
6666 int queue;
6667
6668 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6669 return -ENETDOWN;
6670
6671 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6672 return -EINVAL;
6673
6674 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6675 nq = netdev_get_tx_queue(priv->dev, queue);
6676
6677 __netif_tx_lock(nq, cpu);
6678 /* Avoids TX time-out as we are sharing with slow path */
6679 txq_trans_cond_update(nq);
6680
6681 for (i = 0; i < num_frames; i++) {
6682 int res;
6683
6684 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6685 if (res == STMMAC_XDP_CONSUMED)
6686 break;
6687
6688 nxmit++;
6689 }
6690
6691 if (flags & XDP_XMIT_FLUSH) {
6692 stmmac_flush_tx_descriptors(priv, queue);
6693 stmmac_tx_timer_arm(priv, queue);
6694 }
6695
6696 __netif_tx_unlock(nq);
6697
6698 return nxmit;
6699 }
6700
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6701 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6702 {
6703 struct stmmac_channel *ch = &priv->channel[queue];
6704 unsigned long flags;
6705
6706 spin_lock_irqsave(&ch->lock, flags);
6707 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6708 spin_unlock_irqrestore(&ch->lock, flags);
6709
6710 stmmac_stop_rx_dma(priv, queue);
6711 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6712 }
6713
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6714 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6715 {
6716 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6717 struct stmmac_channel *ch = &priv->channel[queue];
6718 unsigned long flags;
6719 u32 buf_size;
6720 int ret;
6721
6722 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6723 if (ret) {
6724 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6725 return;
6726 }
6727
6728 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6729 if (ret) {
6730 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6731 netdev_err(priv->dev, "Failed to init RX desc.\n");
6732 return;
6733 }
6734
6735 stmmac_reset_rx_queue(priv, queue);
6736 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6737
6738 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6739 rx_q->dma_rx_phy, rx_q->queue_index);
6740
6741 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6742 sizeof(struct dma_desc));
6743 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6744 rx_q->rx_tail_addr, rx_q->queue_index);
6745
6746 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6747 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6748 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6749 buf_size,
6750 rx_q->queue_index);
6751 } else {
6752 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6753 priv->dma_conf.dma_buf_sz,
6754 rx_q->queue_index);
6755 }
6756
6757 stmmac_start_rx_dma(priv, queue);
6758
6759 spin_lock_irqsave(&ch->lock, flags);
6760 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6761 spin_unlock_irqrestore(&ch->lock, flags);
6762 }
6763
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6764 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6765 {
6766 struct stmmac_channel *ch = &priv->channel[queue];
6767 unsigned long flags;
6768
6769 spin_lock_irqsave(&ch->lock, flags);
6770 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6771 spin_unlock_irqrestore(&ch->lock, flags);
6772
6773 stmmac_stop_tx_dma(priv, queue);
6774 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6775 }
6776
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6777 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6778 {
6779 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6780 struct stmmac_channel *ch = &priv->channel[queue];
6781 unsigned long flags;
6782 int ret;
6783
6784 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6785 if (ret) {
6786 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6787 return;
6788 }
6789
6790 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6791 if (ret) {
6792 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6793 netdev_err(priv->dev, "Failed to init TX desc.\n");
6794 return;
6795 }
6796
6797 stmmac_reset_tx_queue(priv, queue);
6798 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6799
6800 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6801 tx_q->dma_tx_phy, tx_q->queue_index);
6802
6803 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6804 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6805
6806 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6807 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6808 tx_q->tx_tail_addr, tx_q->queue_index);
6809
6810 stmmac_start_tx_dma(priv, queue);
6811
6812 spin_lock_irqsave(&ch->lock, flags);
6813 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6814 spin_unlock_irqrestore(&ch->lock, flags);
6815 }
6816
stmmac_xdp_release(struct net_device * dev)6817 void stmmac_xdp_release(struct net_device *dev)
6818 {
6819 struct stmmac_priv *priv = netdev_priv(dev);
6820 u32 chan;
6821
6822 /* Ensure tx function is not running */
6823 netif_tx_disable(dev);
6824
6825 /* Disable NAPI process */
6826 stmmac_disable_all_queues(priv);
6827
6828 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6829 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6830
6831 /* Free the IRQ lines */
6832 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6833
6834 /* Stop TX/RX DMA channels */
6835 stmmac_stop_all_dma(priv);
6836
6837 /* Release and free the Rx/Tx resources */
6838 free_dma_desc_resources(priv, &priv->dma_conf);
6839
6840 /* Disable the MAC Rx/Tx */
6841 stmmac_mac_set(priv, priv->ioaddr, false);
6842
6843 /* set trans_start so we don't get spurious
6844 * watchdogs during reset
6845 */
6846 netif_trans_update(dev);
6847 netif_carrier_off(dev);
6848 }
6849
stmmac_xdp_open(struct net_device * dev)6850 int stmmac_xdp_open(struct net_device *dev)
6851 {
6852 struct stmmac_priv *priv = netdev_priv(dev);
6853 u32 rx_cnt = priv->plat->rx_queues_to_use;
6854 u32 tx_cnt = priv->plat->tx_queues_to_use;
6855 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6856 struct stmmac_rx_queue *rx_q;
6857 struct stmmac_tx_queue *tx_q;
6858 u32 buf_size;
6859 bool sph_en;
6860 u32 chan;
6861 int ret;
6862
6863 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6864 if (ret < 0) {
6865 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6866 __func__);
6867 goto dma_desc_error;
6868 }
6869
6870 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6871 if (ret < 0) {
6872 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6873 __func__);
6874 goto init_error;
6875 }
6876
6877 stmmac_reset_queues_param(priv);
6878
6879 /* DMA CSR Channel configuration */
6880 for (chan = 0; chan < dma_csr_ch; chan++) {
6881 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6882 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6883 }
6884
6885 /* Adjust Split header */
6886 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6887
6888 /* DMA RX Channel Configuration */
6889 for (chan = 0; chan < rx_cnt; chan++) {
6890 rx_q = &priv->dma_conf.rx_queue[chan];
6891
6892 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6893 rx_q->dma_rx_phy, chan);
6894
6895 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6896 (rx_q->buf_alloc_num *
6897 sizeof(struct dma_desc));
6898 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6899 rx_q->rx_tail_addr, chan);
6900
6901 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6902 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6903 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6904 buf_size,
6905 rx_q->queue_index);
6906 } else {
6907 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6908 priv->dma_conf.dma_buf_sz,
6909 rx_q->queue_index);
6910 }
6911
6912 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6913 }
6914
6915 /* DMA TX Channel Configuration */
6916 for (chan = 0; chan < tx_cnt; chan++) {
6917 tx_q = &priv->dma_conf.tx_queue[chan];
6918
6919 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6920 tx_q->dma_tx_phy, chan);
6921
6922 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6923 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6924 tx_q->tx_tail_addr, chan);
6925
6926 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6927 tx_q->txtimer.function = stmmac_tx_timer;
6928 }
6929
6930 /* Enable the MAC Rx/Tx */
6931 stmmac_mac_set(priv, priv->ioaddr, true);
6932
6933 /* Start Rx & Tx DMA Channels */
6934 stmmac_start_all_dma(priv);
6935
6936 ret = stmmac_request_irq(dev);
6937 if (ret)
6938 goto irq_error;
6939
6940 /* Enable NAPI process*/
6941 stmmac_enable_all_queues(priv);
6942 netif_carrier_on(dev);
6943 netif_tx_start_all_queues(dev);
6944 stmmac_enable_all_dma_irq(priv);
6945
6946 return 0;
6947
6948 irq_error:
6949 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6950 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6951
6952 stmmac_hw_teardown(dev);
6953 init_error:
6954 free_dma_desc_resources(priv, &priv->dma_conf);
6955 dma_desc_error:
6956 return ret;
6957 }
6958
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6959 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6960 {
6961 struct stmmac_priv *priv = netdev_priv(dev);
6962 struct stmmac_rx_queue *rx_q;
6963 struct stmmac_tx_queue *tx_q;
6964 struct stmmac_channel *ch;
6965
6966 if (test_bit(STMMAC_DOWN, &priv->state) ||
6967 !netif_carrier_ok(priv->dev))
6968 return -ENETDOWN;
6969
6970 if (!stmmac_xdp_is_enabled(priv))
6971 return -EINVAL;
6972
6973 if (queue >= priv->plat->rx_queues_to_use ||
6974 queue >= priv->plat->tx_queues_to_use)
6975 return -EINVAL;
6976
6977 rx_q = &priv->dma_conf.rx_queue[queue];
6978 tx_q = &priv->dma_conf.tx_queue[queue];
6979 ch = &priv->channel[queue];
6980
6981 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6982 return -EINVAL;
6983
6984 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
6985 /* EQoS does not have per-DMA channel SW interrupt,
6986 * so we schedule RX Napi straight-away.
6987 */
6988 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6989 __napi_schedule(&ch->rxtx_napi);
6990 }
6991
6992 return 0;
6993 }
6994
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6995 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6996 {
6997 struct stmmac_priv *priv = netdev_priv(dev);
6998 u32 tx_cnt = priv->plat->tx_queues_to_use;
6999 u32 rx_cnt = priv->plat->rx_queues_to_use;
7000 unsigned int start;
7001 int q;
7002
7003 for (q = 0; q < tx_cnt; q++) {
7004 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7005 u64 tx_packets;
7006 u64 tx_bytes;
7007
7008 do {
7009 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7010 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7011 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7012 do {
7013 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7014 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7015 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7016
7017 stats->tx_packets += tx_packets;
7018 stats->tx_bytes += tx_bytes;
7019 }
7020
7021 for (q = 0; q < rx_cnt; q++) {
7022 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7023 u64 rx_packets;
7024 u64 rx_bytes;
7025
7026 do {
7027 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7028 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7029 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7030 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7031
7032 stats->rx_packets += rx_packets;
7033 stats->rx_bytes += rx_bytes;
7034 }
7035
7036 stats->rx_dropped = priv->xstats.rx_dropped;
7037 stats->rx_errors = priv->xstats.rx_errors;
7038 stats->tx_dropped = priv->xstats.tx_dropped;
7039 stats->tx_errors = priv->xstats.tx_errors;
7040 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7041 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7042 stats->rx_length_errors = priv->xstats.rx_length;
7043 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7044 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7045 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7046 }
7047
7048 static const struct net_device_ops stmmac_netdev_ops = {
7049 .ndo_open = stmmac_open,
7050 .ndo_start_xmit = stmmac_xmit,
7051 .ndo_stop = stmmac_release,
7052 .ndo_change_mtu = stmmac_change_mtu,
7053 .ndo_fix_features = stmmac_fix_features,
7054 .ndo_set_features = stmmac_set_features,
7055 .ndo_set_rx_mode = stmmac_set_rx_mode,
7056 .ndo_tx_timeout = stmmac_tx_timeout,
7057 .ndo_eth_ioctl = stmmac_ioctl,
7058 .ndo_get_stats64 = stmmac_get_stats64,
7059 .ndo_setup_tc = stmmac_setup_tc,
7060 .ndo_select_queue = stmmac_select_queue,
7061 .ndo_set_mac_address = stmmac_set_mac_address,
7062 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7063 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7064 .ndo_bpf = stmmac_bpf,
7065 .ndo_xdp_xmit = stmmac_xdp_xmit,
7066 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7067 };
7068
stmmac_reset_subtask(struct stmmac_priv * priv)7069 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7070 {
7071 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7072 return;
7073 if (test_bit(STMMAC_DOWN, &priv->state))
7074 return;
7075
7076 netdev_err(priv->dev, "Reset adapter.\n");
7077
7078 rtnl_lock();
7079 netif_trans_update(priv->dev);
7080 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7081 usleep_range(1000, 2000);
7082
7083 set_bit(STMMAC_DOWN, &priv->state);
7084 dev_close(priv->dev);
7085 dev_open(priv->dev, NULL);
7086 clear_bit(STMMAC_DOWN, &priv->state);
7087 clear_bit(STMMAC_RESETING, &priv->state);
7088 rtnl_unlock();
7089 }
7090
stmmac_service_task(struct work_struct * work)7091 static void stmmac_service_task(struct work_struct *work)
7092 {
7093 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7094 service_task);
7095
7096 stmmac_reset_subtask(priv);
7097 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7098 }
7099
7100 /**
7101 * stmmac_hw_init - Init the MAC device
7102 * @priv: driver private structure
7103 * Description: this function is to configure the MAC device according to
7104 * some platform parameters or the HW capability register. It prepares the
7105 * driver to use either ring or chain modes and to setup either enhanced or
7106 * normal descriptors.
7107 */
stmmac_hw_init(struct stmmac_priv * priv)7108 static int stmmac_hw_init(struct stmmac_priv *priv)
7109 {
7110 int ret;
7111
7112 /* dwmac-sun8i only work in chain mode */
7113 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7114 chain_mode = 1;
7115 priv->chain_mode = chain_mode;
7116
7117 /* Initialize HW Interface */
7118 ret = stmmac_hwif_init(priv);
7119 if (ret)
7120 return ret;
7121
7122 /* Get the HW capability (new GMAC newer than 3.50a) */
7123 priv->hw_cap_support = stmmac_get_hw_features(priv);
7124 if (priv->hw_cap_support) {
7125 dev_info(priv->device, "DMA HW capability register supported\n");
7126
7127 /* We can override some gmac/dma configuration fields: e.g.
7128 * enh_desc, tx_coe (e.g. that are passed through the
7129 * platform) with the values from the HW capability
7130 * register (if supported).
7131 */
7132 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7133 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7134 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7135 priv->hw->pmt = priv->plat->pmt;
7136 if (priv->dma_cap.hash_tb_sz) {
7137 priv->hw->multicast_filter_bins =
7138 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7139 priv->hw->mcast_bits_log2 =
7140 ilog2(priv->hw->multicast_filter_bins);
7141 }
7142
7143 /* TXCOE doesn't work in thresh DMA mode */
7144 if (priv->plat->force_thresh_dma_mode)
7145 priv->plat->tx_coe = 0;
7146 else
7147 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7148
7149 /* In case of GMAC4 rx_coe is from HW cap register. */
7150 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7151
7152 if (priv->dma_cap.rx_coe_type2)
7153 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7154 else if (priv->dma_cap.rx_coe_type1)
7155 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7156
7157 } else {
7158 dev_info(priv->device, "No HW DMA feature register supported\n");
7159 }
7160
7161 if (priv->plat->rx_coe) {
7162 priv->hw->rx_csum = priv->plat->rx_coe;
7163 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7164 if (priv->synopsys_id < DWMAC_CORE_4_00)
7165 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7166 }
7167 if (priv->plat->tx_coe)
7168 dev_info(priv->device, "TX Checksum insertion supported\n");
7169
7170 if (priv->plat->pmt) {
7171 dev_info(priv->device, "Wake-Up On Lan supported\n");
7172 device_set_wakeup_capable(priv->device, 1);
7173 }
7174
7175 if (priv->dma_cap.tsoen)
7176 dev_info(priv->device, "TSO supported\n");
7177
7178 priv->hw->vlan_fail_q_en =
7179 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7180 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7181
7182 /* Run HW quirks, if any */
7183 if (priv->hwif_quirks) {
7184 ret = priv->hwif_quirks(priv);
7185 if (ret)
7186 return ret;
7187 }
7188
7189 /* Rx Watchdog is available in the COREs newer than the 3.40.
7190 * In some case, for example on bugged HW this feature
7191 * has to be disable and this can be done by passing the
7192 * riwt_off field from the platform.
7193 */
7194 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7195 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7196 priv->use_riwt = 1;
7197 dev_info(priv->device,
7198 "Enable RX Mitigation via HW Watchdog Timer\n");
7199 }
7200
7201 return 0;
7202 }
7203
stmmac_napi_add(struct net_device * dev)7204 static void stmmac_napi_add(struct net_device *dev)
7205 {
7206 struct stmmac_priv *priv = netdev_priv(dev);
7207 u32 queue, maxq;
7208
7209 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7210
7211 for (queue = 0; queue < maxq; queue++) {
7212 struct stmmac_channel *ch = &priv->channel[queue];
7213
7214 ch->priv_data = priv;
7215 ch->index = queue;
7216 spin_lock_init(&ch->lock);
7217
7218 if (queue < priv->plat->rx_queues_to_use) {
7219 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7220 }
7221 if (queue < priv->plat->tx_queues_to_use) {
7222 netif_napi_add_tx(dev, &ch->tx_napi,
7223 stmmac_napi_poll_tx);
7224 }
7225 if (queue < priv->plat->rx_queues_to_use &&
7226 queue < priv->plat->tx_queues_to_use) {
7227 netif_napi_add(dev, &ch->rxtx_napi,
7228 stmmac_napi_poll_rxtx);
7229 }
7230 }
7231 }
7232
stmmac_napi_del(struct net_device * dev)7233 static void stmmac_napi_del(struct net_device *dev)
7234 {
7235 struct stmmac_priv *priv = netdev_priv(dev);
7236 u32 queue, maxq;
7237
7238 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7239
7240 for (queue = 0; queue < maxq; queue++) {
7241 struct stmmac_channel *ch = &priv->channel[queue];
7242
7243 if (queue < priv->plat->rx_queues_to_use)
7244 netif_napi_del(&ch->rx_napi);
7245 if (queue < priv->plat->tx_queues_to_use)
7246 netif_napi_del(&ch->tx_napi);
7247 if (queue < priv->plat->rx_queues_to_use &&
7248 queue < priv->plat->tx_queues_to_use) {
7249 netif_napi_del(&ch->rxtx_napi);
7250 }
7251 }
7252 }
7253
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7254 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7255 {
7256 struct stmmac_priv *priv = netdev_priv(dev);
7257 int ret = 0, i;
7258
7259 if (netif_running(dev))
7260 stmmac_release(dev);
7261
7262 stmmac_napi_del(dev);
7263
7264 priv->plat->rx_queues_to_use = rx_cnt;
7265 priv->plat->tx_queues_to_use = tx_cnt;
7266 if (!netif_is_rxfh_configured(dev))
7267 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7268 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7269 rx_cnt);
7270
7271 stmmac_napi_add(dev);
7272
7273 if (netif_running(dev))
7274 ret = stmmac_open(dev);
7275
7276 return ret;
7277 }
7278
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7279 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7280 {
7281 struct stmmac_priv *priv = netdev_priv(dev);
7282 int ret = 0;
7283
7284 if (netif_running(dev))
7285 stmmac_release(dev);
7286
7287 priv->dma_conf.dma_rx_size = rx_size;
7288 priv->dma_conf.dma_tx_size = tx_size;
7289
7290 if (netif_running(dev))
7291 ret = stmmac_open(dev);
7292
7293 return ret;
7294 }
7295
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7296 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7297 {
7298 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7299 struct dma_desc *desc_contains_ts = ctx->desc;
7300 struct stmmac_priv *priv = ctx->priv;
7301 struct dma_desc *ndesc = ctx->ndesc;
7302 struct dma_desc *desc = ctx->desc;
7303 u64 ns = 0;
7304
7305 if (!priv->hwts_rx_en)
7306 return -ENODATA;
7307
7308 /* For GMAC4, the valid timestamp is from CTX next desc. */
7309 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7310 desc_contains_ts = ndesc;
7311
7312 /* Check if timestamp is available */
7313 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7314 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7315 ns -= priv->plat->cdc_error_adj;
7316 *timestamp = ns_to_ktime(ns);
7317 return 0;
7318 }
7319
7320 return -ENODATA;
7321 }
7322
7323 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7324 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7325 };
7326
7327 /**
7328 * stmmac_dvr_probe
7329 * @device: device pointer
7330 * @plat_dat: platform data pointer
7331 * @res: stmmac resource pointer
7332 * Description: this is the main probe function used to
7333 * call the alloc_etherdev, allocate the priv structure.
7334 * Return:
7335 * returns 0 on success, otherwise errno.
7336 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7337 int stmmac_dvr_probe(struct device *device,
7338 struct plat_stmmacenet_data *plat_dat,
7339 struct stmmac_resources *res)
7340 {
7341 struct net_device *ndev = NULL;
7342 struct stmmac_priv *priv;
7343 u32 rxq;
7344 int i, ret = 0;
7345
7346 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7347 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7348 if (!ndev)
7349 return -ENOMEM;
7350
7351 SET_NETDEV_DEV(ndev, device);
7352
7353 priv = netdev_priv(ndev);
7354 priv->device = device;
7355 priv->dev = ndev;
7356
7357 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7358 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7359 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7360 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7361 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7362 }
7363
7364 priv->xstats.pcpu_stats =
7365 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7366 if (!priv->xstats.pcpu_stats)
7367 return -ENOMEM;
7368
7369 stmmac_set_ethtool_ops(ndev);
7370 priv->pause = pause;
7371 priv->plat = plat_dat;
7372 priv->ioaddr = res->addr;
7373 priv->dev->base_addr = (unsigned long)res->addr;
7374 priv->plat->dma_cfg->multi_msi_en =
7375 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7376
7377 priv->dev->irq = res->irq;
7378 priv->wol_irq = res->wol_irq;
7379 priv->lpi_irq = res->lpi_irq;
7380 priv->sfty_irq = res->sfty_irq;
7381 priv->sfty_ce_irq = res->sfty_ce_irq;
7382 priv->sfty_ue_irq = res->sfty_ue_irq;
7383 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7384 priv->rx_irq[i] = res->rx_irq[i];
7385 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7386 priv->tx_irq[i] = res->tx_irq[i];
7387
7388 if (!is_zero_ether_addr(res->mac))
7389 eth_hw_addr_set(priv->dev, res->mac);
7390
7391 dev_set_drvdata(device, priv->dev);
7392
7393 /* Verify driver arguments */
7394 stmmac_verify_args();
7395
7396 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7397 if (!priv->af_xdp_zc_qps)
7398 return -ENOMEM;
7399
7400 /* Allocate workqueue */
7401 priv->wq = create_singlethread_workqueue("stmmac_wq");
7402 if (!priv->wq) {
7403 dev_err(priv->device, "failed to create workqueue\n");
7404 ret = -ENOMEM;
7405 goto error_wq_init;
7406 }
7407
7408 INIT_WORK(&priv->service_task, stmmac_service_task);
7409
7410 /* Override with kernel parameters if supplied XXX CRS XXX
7411 * this needs to have multiple instances
7412 */
7413 if ((phyaddr >= 0) && (phyaddr <= 31))
7414 priv->plat->phy_addr = phyaddr;
7415
7416 if (priv->plat->stmmac_rst) {
7417 ret = reset_control_assert(priv->plat->stmmac_rst);
7418 reset_control_deassert(priv->plat->stmmac_rst);
7419 /* Some reset controllers have only reset callback instead of
7420 * assert + deassert callbacks pair.
7421 */
7422 if (ret == -ENOTSUPP)
7423 reset_control_reset(priv->plat->stmmac_rst);
7424 }
7425
7426 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7427 if (ret == -ENOTSUPP)
7428 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7429 ERR_PTR(ret));
7430
7431 /* Wait a bit for the reset to take effect */
7432 udelay(10);
7433
7434 /* Init MAC and get the capabilities */
7435 ret = stmmac_hw_init(priv);
7436 if (ret)
7437 goto error_hw_init;
7438
7439 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7440 */
7441 if (priv->synopsys_id < DWMAC_CORE_5_20)
7442 priv->plat->dma_cfg->dche = false;
7443
7444 stmmac_check_ether_addr(priv);
7445
7446 ndev->netdev_ops = &stmmac_netdev_ops;
7447
7448 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7449 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7450
7451 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7452 NETIF_F_RXCSUM;
7453 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7454 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7455
7456 ret = stmmac_tc_init(priv, priv);
7457 if (!ret) {
7458 ndev->hw_features |= NETIF_F_HW_TC;
7459 }
7460
7461 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7462 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7463 if (priv->plat->has_gmac4)
7464 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7465 priv->tso = true;
7466 dev_info(priv->device, "TSO feature enabled\n");
7467 }
7468
7469 if (priv->dma_cap.sphen &&
7470 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7471 ndev->hw_features |= NETIF_F_GRO;
7472 priv->sph_cap = true;
7473 priv->sph = priv->sph_cap;
7474 dev_info(priv->device, "SPH feature enabled\n");
7475 }
7476
7477 /* Ideally our host DMA address width is the same as for the
7478 * device. However, it may differ and then we have to use our
7479 * host DMA width for allocation and the device DMA width for
7480 * register handling.
7481 */
7482 if (priv->plat->host_dma_width)
7483 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7484 else
7485 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7486
7487 if (priv->dma_cap.host_dma_width) {
7488 ret = dma_set_mask_and_coherent(device,
7489 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7490 if (!ret) {
7491 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7492 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7493
7494 /*
7495 * If more than 32 bits can be addressed, make sure to
7496 * enable enhanced addressing mode.
7497 */
7498 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7499 priv->plat->dma_cfg->eame = true;
7500 } else {
7501 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7502 if (ret) {
7503 dev_err(priv->device, "Failed to set DMA Mask\n");
7504 goto error_hw_init;
7505 }
7506
7507 priv->dma_cap.host_dma_width = 32;
7508 }
7509 }
7510
7511 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7512 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7513 #ifdef STMMAC_VLAN_TAG_USED
7514 /* Both mac100 and gmac support receive VLAN tag detection */
7515 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7516 if (priv->plat->has_gmac4) {
7517 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7518 priv->hw->hw_vlan_en = true;
7519 }
7520 if (priv->dma_cap.vlhash) {
7521 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7522 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7523 }
7524 if (priv->dma_cap.vlins) {
7525 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7526 if (priv->dma_cap.dvlan)
7527 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7528 }
7529 #endif
7530 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7531
7532 priv->xstats.threshold = tc;
7533
7534 /* Initialize RSS */
7535 rxq = priv->plat->rx_queues_to_use;
7536 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7537 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7538 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7539
7540 if (priv->dma_cap.rssen && priv->plat->rss_en)
7541 ndev->features |= NETIF_F_RXHASH;
7542
7543 ndev->vlan_features |= ndev->features;
7544
7545 /* MTU range: 46 - hw-specific max */
7546 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7547 if (priv->plat->has_xgmac)
7548 ndev->max_mtu = XGMAC_JUMBO_LEN;
7549 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7550 ndev->max_mtu = JUMBO_LEN;
7551 else
7552 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7553 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7554 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7555 */
7556 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7557 (priv->plat->maxmtu >= ndev->min_mtu))
7558 ndev->max_mtu = priv->plat->maxmtu;
7559 else if (priv->plat->maxmtu < ndev->min_mtu)
7560 dev_warn(priv->device,
7561 "%s: warning: maxmtu having invalid value (%d)\n",
7562 __func__, priv->plat->maxmtu);
7563
7564 if (flow_ctrl)
7565 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7566
7567 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7568
7569 /* Setup channels NAPI */
7570 stmmac_napi_add(ndev);
7571
7572 mutex_init(&priv->lock);
7573
7574 stmmac_fpe_init(priv);
7575
7576 /* If a specific clk_csr value is passed from the platform
7577 * this means that the CSR Clock Range selection cannot be
7578 * changed at run-time and it is fixed. Viceversa the driver'll try to
7579 * set the MDC clock dynamically according to the csr actual
7580 * clock input.
7581 */
7582 if (priv->plat->clk_csr >= 0)
7583 priv->clk_csr = priv->plat->clk_csr;
7584 else
7585 stmmac_clk_csr_set(priv);
7586
7587 stmmac_check_pcs_mode(priv);
7588
7589 pm_runtime_get_noresume(device);
7590 pm_runtime_set_active(device);
7591 if (!pm_runtime_enabled(device))
7592 pm_runtime_enable(device);
7593
7594 ret = stmmac_mdio_register(ndev);
7595 if (ret < 0) {
7596 dev_err_probe(priv->device, ret,
7597 "MDIO bus (id: %d) registration failed\n",
7598 priv->plat->bus_id);
7599 goto error_mdio_register;
7600 }
7601
7602 if (priv->plat->speed_mode_2500)
7603 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7604
7605 ret = stmmac_pcs_setup(ndev);
7606 if (ret)
7607 goto error_pcs_setup;
7608
7609 ret = stmmac_phy_setup(priv);
7610 if (ret) {
7611 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7612 goto error_phy_setup;
7613 }
7614
7615 ret = register_netdev(ndev);
7616 if (ret) {
7617 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7618 __func__, ret);
7619 goto error_netdev_register;
7620 }
7621
7622 #ifdef CONFIG_DEBUG_FS
7623 stmmac_init_fs(ndev);
7624 #endif
7625
7626 if (priv->plat->dump_debug_regs)
7627 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7628
7629 /* Let pm_runtime_put() disable the clocks.
7630 * If CONFIG_PM is not enabled, the clocks will stay powered.
7631 */
7632 pm_runtime_put(device);
7633
7634 return ret;
7635
7636 error_netdev_register:
7637 phylink_destroy(priv->phylink);
7638 error_phy_setup:
7639 stmmac_pcs_clean(ndev);
7640 error_pcs_setup:
7641 stmmac_mdio_unregister(ndev);
7642 error_mdio_register:
7643 stmmac_napi_del(ndev);
7644 error_hw_init:
7645 destroy_workqueue(priv->wq);
7646 error_wq_init:
7647 bitmap_free(priv->af_xdp_zc_qps);
7648
7649 return ret;
7650 }
7651 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7652
7653 /**
7654 * stmmac_dvr_remove
7655 * @dev: device pointer
7656 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7657 * changes the link status, releases the DMA descriptor rings.
7658 */
stmmac_dvr_remove(struct device * dev)7659 void stmmac_dvr_remove(struct device *dev)
7660 {
7661 struct net_device *ndev = dev_get_drvdata(dev);
7662 struct stmmac_priv *priv = netdev_priv(ndev);
7663
7664 netdev_info(priv->dev, "%s: removing driver", __func__);
7665
7666 pm_runtime_get_sync(dev);
7667
7668 stmmac_stop_all_dma(priv);
7669 stmmac_mac_set(priv, priv->ioaddr, false);
7670 unregister_netdev(ndev);
7671
7672 #ifdef CONFIG_DEBUG_FS
7673 stmmac_exit_fs(ndev);
7674 #endif
7675 phylink_destroy(priv->phylink);
7676 if (priv->plat->stmmac_rst)
7677 reset_control_assert(priv->plat->stmmac_rst);
7678 reset_control_assert(priv->plat->stmmac_ahb_rst);
7679
7680 stmmac_pcs_clean(ndev);
7681 stmmac_mdio_unregister(ndev);
7682
7683 destroy_workqueue(priv->wq);
7684 mutex_destroy(&priv->lock);
7685 bitmap_free(priv->af_xdp_zc_qps);
7686
7687 pm_runtime_disable(dev);
7688 pm_runtime_put_noidle(dev);
7689 }
7690 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7691
7692 /**
7693 * stmmac_suspend - suspend callback
7694 * @dev: device pointer
7695 * Description: this is the function to suspend the device and it is called
7696 * by the platform driver to stop the network queue, release the resources,
7697 * program the PMT register (for WoL), clean and release driver resources.
7698 */
stmmac_suspend(struct device * dev)7699 int stmmac_suspend(struct device *dev)
7700 {
7701 struct net_device *ndev = dev_get_drvdata(dev);
7702 struct stmmac_priv *priv = netdev_priv(ndev);
7703 u32 chan;
7704
7705 if (!ndev || !netif_running(ndev))
7706 return 0;
7707
7708 mutex_lock(&priv->lock);
7709
7710 netif_device_detach(ndev);
7711
7712 stmmac_disable_all_queues(priv);
7713
7714 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7715 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7716
7717 if (priv->eee_enabled) {
7718 priv->tx_path_in_lpi_mode = false;
7719 del_timer_sync(&priv->eee_ctrl_timer);
7720 }
7721
7722 /* Stop TX/RX DMA */
7723 stmmac_stop_all_dma(priv);
7724
7725 if (priv->plat->serdes_powerdown)
7726 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7727
7728 /* Enable Power down mode by programming the PMT regs */
7729 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7730 stmmac_pmt(priv, priv->hw, priv->wolopts);
7731 priv->irq_wake = 1;
7732 } else {
7733 stmmac_mac_set(priv, priv->ioaddr, false);
7734 pinctrl_pm_select_sleep_state(priv->device);
7735 }
7736
7737 mutex_unlock(&priv->lock);
7738
7739 rtnl_lock();
7740 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7741 phylink_suspend(priv->phylink, true);
7742 } else {
7743 if (device_may_wakeup(priv->device))
7744 phylink_speed_down(priv->phylink, false);
7745 phylink_suspend(priv->phylink, false);
7746 }
7747 rtnl_unlock();
7748
7749 if (stmmac_fpe_supported(priv))
7750 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7751
7752 priv->speed = SPEED_UNKNOWN;
7753 return 0;
7754 }
7755 EXPORT_SYMBOL_GPL(stmmac_suspend);
7756
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7757 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7758 {
7759 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7760
7761 rx_q->cur_rx = 0;
7762 rx_q->dirty_rx = 0;
7763 }
7764
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7765 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7766 {
7767 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7768
7769 tx_q->cur_tx = 0;
7770 tx_q->dirty_tx = 0;
7771 tx_q->mss = 0;
7772
7773 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7774 }
7775
7776 /**
7777 * stmmac_reset_queues_param - reset queue parameters
7778 * @priv: device pointer
7779 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7780 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7781 {
7782 u32 rx_cnt = priv->plat->rx_queues_to_use;
7783 u32 tx_cnt = priv->plat->tx_queues_to_use;
7784 u32 queue;
7785
7786 for (queue = 0; queue < rx_cnt; queue++)
7787 stmmac_reset_rx_queue(priv, queue);
7788
7789 for (queue = 0; queue < tx_cnt; queue++)
7790 stmmac_reset_tx_queue(priv, queue);
7791 }
7792
7793 /**
7794 * stmmac_resume - resume callback
7795 * @dev: device pointer
7796 * Description: when resume this function is invoked to setup the DMA and CORE
7797 * in a usable state.
7798 */
stmmac_resume(struct device * dev)7799 int stmmac_resume(struct device *dev)
7800 {
7801 struct net_device *ndev = dev_get_drvdata(dev);
7802 struct stmmac_priv *priv = netdev_priv(ndev);
7803 int ret;
7804
7805 if (!netif_running(ndev))
7806 return 0;
7807
7808 /* Power Down bit, into the PM register, is cleared
7809 * automatically as soon as a magic packet or a Wake-up frame
7810 * is received. Anyway, it's better to manually clear
7811 * this bit because it can generate problems while resuming
7812 * from another devices (e.g. serial console).
7813 */
7814 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7815 mutex_lock(&priv->lock);
7816 stmmac_pmt(priv, priv->hw, 0);
7817 mutex_unlock(&priv->lock);
7818 priv->irq_wake = 0;
7819 } else {
7820 pinctrl_pm_select_default_state(priv->device);
7821 /* reset the phy so that it's ready */
7822 if (priv->mii)
7823 stmmac_mdio_reset(priv->mii);
7824 }
7825
7826 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7827 priv->plat->serdes_powerup) {
7828 ret = priv->plat->serdes_powerup(ndev,
7829 priv->plat->bsp_priv);
7830
7831 if (ret < 0)
7832 return ret;
7833 }
7834
7835 rtnl_lock();
7836 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7837 phylink_resume(priv->phylink);
7838 } else {
7839 phylink_resume(priv->phylink);
7840 if (device_may_wakeup(priv->device))
7841 phylink_speed_up(priv->phylink);
7842 }
7843 rtnl_unlock();
7844
7845 rtnl_lock();
7846 mutex_lock(&priv->lock);
7847
7848 stmmac_reset_queues_param(priv);
7849
7850 stmmac_free_tx_skbufs(priv);
7851 stmmac_clear_descriptors(priv, &priv->dma_conf);
7852
7853 stmmac_hw_setup(ndev, false);
7854 stmmac_init_coalesce(priv);
7855 stmmac_set_rx_mode(ndev);
7856
7857 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7858
7859 stmmac_enable_all_queues(priv);
7860 stmmac_enable_all_dma_irq(priv);
7861
7862 mutex_unlock(&priv->lock);
7863 rtnl_unlock();
7864
7865 netif_device_attach(ndev);
7866
7867 return 0;
7868 }
7869 EXPORT_SYMBOL_GPL(stmmac_resume);
7870
7871 #ifndef MODULE
stmmac_cmdline_opt(char * str)7872 static int __init stmmac_cmdline_opt(char *str)
7873 {
7874 char *opt;
7875
7876 if (!str || !*str)
7877 return 1;
7878 while ((opt = strsep(&str, ",")) != NULL) {
7879 if (!strncmp(opt, "debug:", 6)) {
7880 if (kstrtoint(opt + 6, 0, &debug))
7881 goto err;
7882 } else if (!strncmp(opt, "phyaddr:", 8)) {
7883 if (kstrtoint(opt + 8, 0, &phyaddr))
7884 goto err;
7885 } else if (!strncmp(opt, "buf_sz:", 7)) {
7886 if (kstrtoint(opt + 7, 0, &buf_sz))
7887 goto err;
7888 } else if (!strncmp(opt, "tc:", 3)) {
7889 if (kstrtoint(opt + 3, 0, &tc))
7890 goto err;
7891 } else if (!strncmp(opt, "watchdog:", 9)) {
7892 if (kstrtoint(opt + 9, 0, &watchdog))
7893 goto err;
7894 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7895 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7896 goto err;
7897 } else if (!strncmp(opt, "pause:", 6)) {
7898 if (kstrtoint(opt + 6, 0, &pause))
7899 goto err;
7900 } else if (!strncmp(opt, "eee_timer:", 10)) {
7901 if (kstrtoint(opt + 10, 0, &eee_timer))
7902 goto err;
7903 } else if (!strncmp(opt, "chain_mode:", 11)) {
7904 if (kstrtoint(opt + 11, 0, &chain_mode))
7905 goto err;
7906 }
7907 }
7908 return 1;
7909
7910 err:
7911 pr_err("%s: ERROR broken module parameter conversion", __func__);
7912 return 1;
7913 }
7914
7915 __setup("stmmaceth=", stmmac_cmdline_opt);
7916 #endif /* MODULE */
7917
stmmac_init(void)7918 static int __init stmmac_init(void)
7919 {
7920 #ifdef CONFIG_DEBUG_FS
7921 /* Create debugfs main directory if it doesn't exist yet */
7922 if (!stmmac_fs_dir)
7923 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7924 register_netdevice_notifier(&stmmac_notifier);
7925 #endif
7926
7927 return 0;
7928 }
7929
stmmac_exit(void)7930 static void __exit stmmac_exit(void)
7931 {
7932 #ifdef CONFIG_DEBUG_FS
7933 unregister_netdevice_notifier(&stmmac_notifier);
7934 debugfs_remove_recursive(stmmac_fs_dir);
7935 #endif
7936 }
7937
7938 module_init(stmmac_init)
7939 module_exit(stmmac_exit)
7940
7941 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7942 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7943 MODULE_LICENSE("GPL");
7944