1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54
55 /* As long as the interface is active, we keep the timestamping counter enabled
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
57 * (clock jumps) when changing timestamping settings at runtime.
58 */
59 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 PTP_TCR_TSCTRLSSR)
61
62 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64
65 /* Module parameters */
66 #define TX_TIMEO 5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
110 NETIF_MSG_LINK | NETIF_MSG_IFUP |
111 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
112
113 #define STMMAC_DEFAULT_LPI_TIMER 1000
114 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
115 module_param(eee_timer, uint, 0644);
116 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
117 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
118
119 /* By default the driver will use the ring mode to manage tx and rx descriptors,
120 * but allow user to force to use the chain instead of the ring
121 */
122 static unsigned int chain_mode;
123 module_param(chain_mode, int, 0444);
124 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
125
126 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
127 /* For MSI interrupts handling */
128 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
129 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
131 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
133 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
135 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
137 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
138 u32 rxmode, u32 chan);
139
140 #ifdef CONFIG_DEBUG_FS
141 static const struct net_device_ops stmmac_netdev_ops;
142 static void stmmac_init_fs(struct net_device *dev);
143 static void stmmac_exit_fs(struct net_device *dev);
144 #endif
145
146 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
147
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)148 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
149 {
150 int ret = 0;
151
152 if (enabled) {
153 ret = clk_prepare_enable(priv->plat->stmmac_clk);
154 if (ret)
155 return ret;
156 ret = clk_prepare_enable(priv->plat->pclk);
157 if (ret) {
158 clk_disable_unprepare(priv->plat->stmmac_clk);
159 return ret;
160 }
161 if (priv->plat->clks_config) {
162 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 if (ret) {
164 clk_disable_unprepare(priv->plat->stmmac_clk);
165 clk_disable_unprepare(priv->plat->pclk);
166 return ret;
167 }
168 }
169 } else {
170 clk_disable_unprepare(priv->plat->stmmac_clk);
171 clk_disable_unprepare(priv->plat->pclk);
172 if (priv->plat->clks_config)
173 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
174 }
175
176 return ret;
177 }
178 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
179
180 /**
181 * stmmac_verify_args - verify the driver parameters.
182 * Description: it checks the driver parameters and set a default in case of
183 * errors.
184 */
stmmac_verify_args(void)185 static void stmmac_verify_args(void)
186 {
187 if (unlikely(watchdog < 0))
188 watchdog = TX_TIMEO;
189 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
190 buf_sz = DEFAULT_BUFSIZE;
191 if (unlikely(flow_ctrl > 1))
192 flow_ctrl = FLOW_AUTO;
193 else if (likely(flow_ctrl < 0))
194 flow_ctrl = FLOW_OFF;
195 if (unlikely((pause < 0) || (pause > 0xffff)))
196 pause = PAUSE_TIME;
197 }
198
__stmmac_disable_all_queues(struct stmmac_priv * priv)199 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200 {
201 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204 u32 queue;
205
206 for (queue = 0; queue < maxq; queue++) {
207 struct stmmac_channel *ch = &priv->channel[queue];
208
209 if (stmmac_xdp_is_enabled(priv) &&
210 test_bit(queue, priv->af_xdp_zc_qps)) {
211 napi_disable(&ch->rxtx_napi);
212 continue;
213 }
214
215 if (queue < rx_queues_cnt)
216 napi_disable(&ch->rx_napi);
217 if (queue < tx_queues_cnt)
218 napi_disable(&ch->tx_napi);
219 }
220 }
221
222 /**
223 * stmmac_disable_all_queues - Disable all queues
224 * @priv: driver private structure
225 */
stmmac_disable_all_queues(struct stmmac_priv * priv)226 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227 {
228 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229 struct stmmac_rx_queue *rx_q;
230 u32 queue;
231
232 /* synchronize_rcu() needed for pending XDP buffers to drain */
233 for (queue = 0; queue < rx_queues_cnt; queue++) {
234 rx_q = &priv->dma_conf.rx_queue[queue];
235 if (rx_q->xsk_pool) {
236 synchronize_rcu();
237 break;
238 }
239 }
240
241 __stmmac_disable_all_queues(priv);
242 }
243
244 /**
245 * stmmac_enable_all_queues - Enable all queues
246 * @priv: driver private structure
247 */
stmmac_enable_all_queues(struct stmmac_priv * priv)248 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249 {
250 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253 u32 queue;
254
255 for (queue = 0; queue < maxq; queue++) {
256 struct stmmac_channel *ch = &priv->channel[queue];
257
258 if (stmmac_xdp_is_enabled(priv) &&
259 test_bit(queue, priv->af_xdp_zc_qps)) {
260 napi_enable(&ch->rxtx_napi);
261 continue;
262 }
263
264 if (queue < rx_queues_cnt)
265 napi_enable(&ch->rx_napi);
266 if (queue < tx_queues_cnt)
267 napi_enable(&ch->tx_napi);
268 }
269 }
270
stmmac_service_event_schedule(struct stmmac_priv * priv)271 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272 {
273 if (!test_bit(STMMAC_DOWN, &priv->state) &&
274 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275 queue_work(priv->wq, &priv->service_task);
276 }
277
stmmac_global_err(struct stmmac_priv * priv)278 static void stmmac_global_err(struct stmmac_priv *priv)
279 {
280 netif_carrier_off(priv->dev);
281 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282 stmmac_service_event_schedule(priv);
283 }
284
285 /**
286 * stmmac_clk_csr_set - dynamically set the MDC clock
287 * @priv: driver private structure
288 * Description: this is to dynamically set the MDC clock according to the csr
289 * clock input.
290 * Note:
291 * If a specific clk_csr value is passed from the platform
292 * this means that the CSR Clock Range selection cannot be
293 * changed at run-time and it is fixed (as reported in the driver
294 * documentation). Viceversa the driver will try to set the MDC
295 * clock dynamically according to the actual clock input.
296 */
stmmac_clk_csr_set(struct stmmac_priv * priv)297 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298 {
299 unsigned long clk_rate;
300
301 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302
303 /* Platform provided default clk_csr would be assumed valid
304 * for all other cases except for the below mentioned ones.
305 * For values higher than the IEEE 802.3 specified frequency
306 * we can not estimate the proper divider as it is not known
307 * the frequency of clk_csr_i. So we do not change the default
308 * divider.
309 */
310 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311 if (clk_rate < CSR_F_35M)
312 priv->clk_csr = STMMAC_CSR_20_35M;
313 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314 priv->clk_csr = STMMAC_CSR_35_60M;
315 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316 priv->clk_csr = STMMAC_CSR_60_100M;
317 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318 priv->clk_csr = STMMAC_CSR_100_150M;
319 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320 priv->clk_csr = STMMAC_CSR_150_250M;
321 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322 priv->clk_csr = STMMAC_CSR_250_300M;
323 else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
324 priv->clk_csr = STMMAC_CSR_300_500M;
325 else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
326 priv->clk_csr = STMMAC_CSR_500_800M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_disable_hw_lpi_timer(struct stmmac_priv * priv)393 static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
394 {
395 stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
396 }
397
stmmac_enable_hw_lpi_timer(struct stmmac_priv * priv)398 static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
399 {
400 stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
401 }
402
stmmac_eee_tx_busy(struct stmmac_priv * priv)403 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
404 {
405 u32 tx_cnt = priv->plat->tx_queues_to_use;
406 u32 queue;
407
408 /* check if all TX queues have the work finished */
409 for (queue = 0; queue < tx_cnt; queue++) {
410 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
411
412 if (tx_q->dirty_tx != tx_q->cur_tx)
413 return true; /* still unfinished work */
414 }
415
416 return false;
417 }
418
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)419 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
420 {
421 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
422 }
423
424 /**
425 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
426 * @priv: driver private structure
427 * Description: this function is to verify and enter in LPI mode in case of
428 * EEE.
429 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)430 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
431 {
432 if (stmmac_eee_tx_busy(priv)) {
433 stmmac_restart_sw_lpi_timer(priv);
434 return;
435 }
436
437 /* Check and enter in LPI mode */
438 if (!priv->tx_path_in_lpi_mode)
439 stmmac_set_eee_mode(priv, priv->hw,
440 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
441 }
442
443 /**
444 * stmmac_stop_sw_lpi - stop transmitting LPI
445 * @priv: driver private structure
446 * Description: When using software-controlled LPI, stop transmitting LPI state.
447 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)448 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
449 {
450 stmmac_reset_eee_mode(priv, priv->hw);
451 del_timer_sync(&priv->eee_ctrl_timer);
452 priv->tx_path_in_lpi_mode = false;
453 }
454
455 /**
456 * stmmac_eee_ctrl_timer - EEE TX SW timer.
457 * @t: timer_list struct containing private info
458 * Description:
459 * if there is no data transfer and if we are not in LPI state,
460 * then MAC Transmitter can be moved to LPI state.
461 */
stmmac_eee_ctrl_timer(struct timer_list * t)462 static void stmmac_eee_ctrl_timer(struct timer_list *t)
463 {
464 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
465
466 stmmac_try_to_start_sw_lpi(priv);
467 }
468
469 /**
470 * stmmac_eee_init - init EEE
471 * @priv: driver private structure
472 * @active: indicates whether EEE should be enabled.
473 * Description:
474 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
475 * can also manage EEE, this function enable the LPI state and start related
476 * timer.
477 */
stmmac_eee_init(struct stmmac_priv * priv,bool active)478 static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
479 {
480 priv->eee_active = active;
481
482 /* Check if MAC core supports the EEE feature. */
483 if (!priv->dma_cap.eee) {
484 priv->eee_enabled = false;
485 return;
486 }
487
488 mutex_lock(&priv->lock);
489
490 /* Check if it needs to be deactivated */
491 if (!priv->eee_active) {
492 if (priv->eee_enabled) {
493 netdev_dbg(priv->dev, "disable EEE\n");
494 priv->eee_sw_timer_en = false;
495 stmmac_disable_hw_lpi_timer(priv);
496 del_timer_sync(&priv->eee_ctrl_timer);
497 stmmac_set_eee_timer(priv, priv->hw, 0,
498 STMMAC_DEFAULT_TWT_LS);
499 if (priv->hw->xpcs)
500 xpcs_config_eee(priv->hw->xpcs,
501 priv->plat->mult_fact_100ns,
502 false);
503 }
504 priv->eee_enabled = false;
505 mutex_unlock(&priv->lock);
506 return;
507 }
508
509 if (priv->eee_active && !priv->eee_enabled) {
510 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
511 STMMAC_DEFAULT_TWT_LS);
512 if (priv->hw->xpcs)
513 xpcs_config_eee(priv->hw->xpcs,
514 priv->plat->mult_fact_100ns,
515 true);
516 }
517
518 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
519 /* Use hardware LPI mode */
520 del_timer_sync(&priv->eee_ctrl_timer);
521 priv->tx_path_in_lpi_mode = false;
522 priv->eee_sw_timer_en = false;
523 stmmac_enable_hw_lpi_timer(priv);
524 } else {
525 /* Use software LPI mode */
526 priv->eee_sw_timer_en = true;
527 stmmac_disable_hw_lpi_timer(priv);
528 stmmac_restart_sw_lpi_timer(priv);
529 }
530
531 priv->eee_enabled = true;
532
533 mutex_unlock(&priv->lock);
534 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
535 }
536
537 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
538 * @priv: driver private structure
539 * @p : descriptor pointer
540 * @skb : the socket buffer
541 * Description :
542 * This function will read timestamp from the descriptor & pass it to stack.
543 * and also perform some sanity checks.
544 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)545 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
546 struct dma_desc *p, struct sk_buff *skb)
547 {
548 struct skb_shared_hwtstamps shhwtstamp;
549 bool found = false;
550 u64 ns = 0;
551
552 if (!priv->hwts_tx_en)
553 return;
554
555 /* exit if skb doesn't support hw tstamp */
556 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
557 return;
558
559 /* check tx tstamp status */
560 if (stmmac_get_tx_timestamp_status(priv, p)) {
561 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
562 found = true;
563 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
564 found = true;
565 }
566
567 if (found) {
568 ns -= priv->plat->cdc_error_adj;
569
570 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
571 shhwtstamp.hwtstamp = ns_to_ktime(ns);
572
573 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
574 /* pass tstamp to stack */
575 skb_tstamp_tx(skb, &shhwtstamp);
576 }
577 }
578
579 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
580 * @priv: driver private structure
581 * @p : descriptor pointer
582 * @np : next descriptor pointer
583 * @skb : the socket buffer
584 * Description :
585 * This function will read received packet's timestamp from the descriptor
586 * and pass it to stack. It also perform some sanity checks.
587 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)588 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
589 struct dma_desc *np, struct sk_buff *skb)
590 {
591 struct skb_shared_hwtstamps *shhwtstamp = NULL;
592 struct dma_desc *desc = p;
593 u64 ns = 0;
594
595 if (!priv->hwts_rx_en)
596 return;
597 /* For GMAC4, the valid timestamp is from CTX next desc. */
598 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
599 desc = np;
600
601 /* Check if timestamp is available */
602 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
603 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
604
605 ns -= priv->plat->cdc_error_adj;
606
607 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
608 shhwtstamp = skb_hwtstamps(skb);
609 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
610 shhwtstamp->hwtstamp = ns_to_ktime(ns);
611 } else {
612 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
613 }
614 }
615
616 /**
617 * stmmac_hwtstamp_set - control hardware timestamping.
618 * @dev: device pointer.
619 * @ifr: An IOCTL specific structure, that can contain a pointer to
620 * a proprietary structure used to pass information to the driver.
621 * Description:
622 * This function configures the MAC to enable/disable both outgoing(TX)
623 * and incoming(RX) packets time stamping based on user input.
624 * Return Value:
625 * 0 on success and an appropriate -ve integer on failure.
626 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)627 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
628 {
629 struct stmmac_priv *priv = netdev_priv(dev);
630 struct hwtstamp_config config;
631 u32 ptp_v2 = 0;
632 u32 tstamp_all = 0;
633 u32 ptp_over_ipv4_udp = 0;
634 u32 ptp_over_ipv6_udp = 0;
635 u32 ptp_over_ethernet = 0;
636 u32 snap_type_sel = 0;
637 u32 ts_master_en = 0;
638 u32 ts_event_en = 0;
639
640 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
641 netdev_alert(priv->dev, "No support for HW time stamping\n");
642 priv->hwts_tx_en = 0;
643 priv->hwts_rx_en = 0;
644
645 return -EOPNOTSUPP;
646 }
647
648 if (copy_from_user(&config, ifr->ifr_data,
649 sizeof(config)))
650 return -EFAULT;
651
652 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
653 __func__, config.flags, config.tx_type, config.rx_filter);
654
655 if (config.tx_type != HWTSTAMP_TX_OFF &&
656 config.tx_type != HWTSTAMP_TX_ON)
657 return -ERANGE;
658
659 if (priv->adv_ts) {
660 switch (config.rx_filter) {
661 case HWTSTAMP_FILTER_NONE:
662 /* time stamp no incoming packet at all */
663 config.rx_filter = HWTSTAMP_FILTER_NONE;
664 break;
665
666 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
667 /* PTP v1, UDP, any kind of event packet */
668 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
669 /* 'xmac' hardware can support Sync, Pdelay_Req and
670 * Pdelay_resp by setting bit14 and bits17/16 to 01
671 * This leaves Delay_Req timestamps out.
672 * Enable all events *and* general purpose message
673 * timestamping
674 */
675 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
676 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678 break;
679
680 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
681 /* PTP v1, UDP, Sync packet */
682 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
683 /* take time stamp for SYNC messages only */
684 ts_event_en = PTP_TCR_TSEVNTENA;
685
686 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 break;
689
690 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
691 /* PTP v1, UDP, Delay_req packet */
692 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
693 /* take time stamp for Delay_Req messages only */
694 ts_master_en = PTP_TCR_TSMSTRENA;
695 ts_event_en = PTP_TCR_TSEVNTENA;
696
697 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 break;
700
701 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
702 /* PTP v2, UDP, any kind of event packet */
703 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
704 ptp_v2 = PTP_TCR_TSVER2ENA;
705 /* take time stamp for all event messages */
706 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
707
708 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
709 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
710 break;
711
712 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
713 /* PTP v2, UDP, Sync packet */
714 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
715 ptp_v2 = PTP_TCR_TSVER2ENA;
716 /* take time stamp for SYNC messages only */
717 ts_event_en = PTP_TCR_TSEVNTENA;
718
719 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
720 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
721 break;
722
723 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
724 /* PTP v2, UDP, Delay_req packet */
725 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
726 ptp_v2 = PTP_TCR_TSVER2ENA;
727 /* take time stamp for Delay_Req messages only */
728 ts_master_en = PTP_TCR_TSMSTRENA;
729 ts_event_en = PTP_TCR_TSEVNTENA;
730
731 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
732 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
733 break;
734
735 case HWTSTAMP_FILTER_PTP_V2_EVENT:
736 /* PTP v2/802.AS1 any layer, any kind of event packet */
737 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
738 ptp_v2 = PTP_TCR_TSVER2ENA;
739 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
740 if (priv->synopsys_id < DWMAC_CORE_4_10)
741 ts_event_en = PTP_TCR_TSEVNTENA;
742 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
743 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
744 ptp_over_ethernet = PTP_TCR_TSIPENA;
745 break;
746
747 case HWTSTAMP_FILTER_PTP_V2_SYNC:
748 /* PTP v2/802.AS1, any layer, Sync packet */
749 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
750 ptp_v2 = PTP_TCR_TSVER2ENA;
751 /* take time stamp for SYNC messages only */
752 ts_event_en = PTP_TCR_TSEVNTENA;
753
754 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
755 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
756 ptp_over_ethernet = PTP_TCR_TSIPENA;
757 break;
758
759 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
760 /* PTP v2/802.AS1, any layer, Delay_req packet */
761 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
762 ptp_v2 = PTP_TCR_TSVER2ENA;
763 /* take time stamp for Delay_Req messages only */
764 ts_master_en = PTP_TCR_TSMSTRENA;
765 ts_event_en = PTP_TCR_TSEVNTENA;
766
767 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
768 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
769 ptp_over_ethernet = PTP_TCR_TSIPENA;
770 break;
771
772 case HWTSTAMP_FILTER_NTP_ALL:
773 case HWTSTAMP_FILTER_ALL:
774 /* time stamp any incoming packet */
775 config.rx_filter = HWTSTAMP_FILTER_ALL;
776 tstamp_all = PTP_TCR_TSENALL;
777 break;
778
779 default:
780 return -ERANGE;
781 }
782 } else {
783 switch (config.rx_filter) {
784 case HWTSTAMP_FILTER_NONE:
785 config.rx_filter = HWTSTAMP_FILTER_NONE;
786 break;
787 default:
788 /* PTP v1, UDP, any kind of event packet */
789 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
790 break;
791 }
792 }
793 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
794 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
795
796 priv->systime_flags = STMMAC_HWTS_ACTIVE;
797
798 if (priv->hwts_tx_en || priv->hwts_rx_en) {
799 priv->systime_flags |= tstamp_all | ptp_v2 |
800 ptp_over_ethernet | ptp_over_ipv6_udp |
801 ptp_over_ipv4_udp | ts_event_en |
802 ts_master_en | snap_type_sel;
803 }
804
805 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
806
807 memcpy(&priv->tstamp_config, &config, sizeof(config));
808
809 return copy_to_user(ifr->ifr_data, &config,
810 sizeof(config)) ? -EFAULT : 0;
811 }
812
813 /**
814 * stmmac_hwtstamp_get - read hardware timestamping.
815 * @dev: device pointer.
816 * @ifr: An IOCTL specific structure, that can contain a pointer to
817 * a proprietary structure used to pass information to the driver.
818 * Description:
819 * This function obtain the current hardware timestamping settings
820 * as requested.
821 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)822 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
823 {
824 struct stmmac_priv *priv = netdev_priv(dev);
825 struct hwtstamp_config *config = &priv->tstamp_config;
826
827 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
828 return -EOPNOTSUPP;
829
830 return copy_to_user(ifr->ifr_data, config,
831 sizeof(*config)) ? -EFAULT : 0;
832 }
833
834 /**
835 * stmmac_init_tstamp_counter - init hardware timestamping counter
836 * @priv: driver private structure
837 * @systime_flags: timestamping flags
838 * Description:
839 * Initialize hardware counter for packet timestamping.
840 * This is valid as long as the interface is open and not suspended.
841 * Will be rerun after resuming from suspend, case in which the timestamping
842 * flags updated by stmmac_hwtstamp_set() also need to be restored.
843 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)844 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
845 {
846 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
847 struct timespec64 now;
848 u32 sec_inc = 0;
849 u64 temp = 0;
850
851 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
852 return -EOPNOTSUPP;
853
854 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
855 priv->systime_flags = systime_flags;
856
857 /* program Sub Second Increment reg */
858 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
859 priv->plat->clk_ptp_rate,
860 xmac, &sec_inc);
861 temp = div_u64(1000000000ULL, sec_inc);
862
863 /* Store sub second increment for later use */
864 priv->sub_second_inc = sec_inc;
865
866 /* calculate default added value:
867 * formula is :
868 * addend = (2^32)/freq_div_ratio;
869 * where, freq_div_ratio = 1e9ns/sec_inc
870 */
871 temp = (u64)(temp << 32);
872 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
873 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
874
875 /* initialize system time */
876 ktime_get_real_ts64(&now);
877
878 /* lower 32 bits of tv_sec are safe until y2106 */
879 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
880
881 return 0;
882 }
883 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
884
885 /**
886 * stmmac_init_ptp - init PTP
887 * @priv: driver private structure
888 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
889 * This is done by looking at the HW cap. register.
890 * This function also registers the ptp driver.
891 */
stmmac_init_ptp(struct stmmac_priv * priv)892 static int stmmac_init_ptp(struct stmmac_priv *priv)
893 {
894 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
895 int ret;
896
897 if (priv->plat->ptp_clk_freq_config)
898 priv->plat->ptp_clk_freq_config(priv);
899
900 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
901 if (ret)
902 return ret;
903
904 priv->adv_ts = 0;
905 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
906 if (xmac && priv->dma_cap.atime_stamp)
907 priv->adv_ts = 1;
908 /* Dwmac 3.x core with extend_desc can support adv_ts */
909 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
910 priv->adv_ts = 1;
911
912 if (priv->dma_cap.time_stamp)
913 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
914
915 if (priv->adv_ts)
916 netdev_info(priv->dev,
917 "IEEE 1588-2008 Advanced Timestamp supported\n");
918
919 priv->hwts_tx_en = 0;
920 priv->hwts_rx_en = 0;
921
922 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
923 stmmac_hwtstamp_correct_latency(priv, priv);
924
925 return 0;
926 }
927
stmmac_release_ptp(struct stmmac_priv * priv)928 static void stmmac_release_ptp(struct stmmac_priv *priv)
929 {
930 clk_disable_unprepare(priv->plat->clk_ptp_ref);
931 stmmac_ptp_unregister(priv);
932 }
933
934 /**
935 * stmmac_mac_flow_ctrl - Configure flow control in all queues
936 * @priv: driver private structure
937 * @duplex: duplex passed to the next function
938 * Description: It is used for configuring the flow control in all queues
939 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)940 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
941 {
942 u32 tx_cnt = priv->plat->tx_queues_to_use;
943
944 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
945 priv->pause, tx_cnt);
946 }
947
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)948 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
949 phy_interface_t interface)
950 {
951 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952
953 /* Refresh the MAC-specific capabilities */
954 stmmac_mac_update_caps(priv);
955
956 config->mac_capabilities = priv->hw->link.caps;
957
958 if (priv->plat->max_speed)
959 phylink_limit_mac_speed(config, priv->plat->max_speed);
960
961 return config->mac_capabilities;
962 }
963
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)964 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
965 phy_interface_t interface)
966 {
967 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
968 struct phylink_pcs *pcs;
969
970 if (priv->plat->select_pcs) {
971 pcs = priv->plat->select_pcs(priv, interface);
972 if (!IS_ERR(pcs))
973 return pcs;
974 }
975
976 return NULL;
977 }
978
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)979 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
980 const struct phylink_link_state *state)
981 {
982 /* Nothing to do, xpcs_config() handles everything */
983 }
984
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)985 static void stmmac_mac_link_down(struct phylink_config *config,
986 unsigned int mode, phy_interface_t interface)
987 {
988 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989
990 stmmac_mac_set(priv, priv->ioaddr, false);
991 if (priv->dma_cap.eee)
992 stmmac_set_eee_pls(priv, priv->hw, false);
993
994 if (stmmac_fpe_supported(priv))
995 stmmac_fpe_link_state_handle(priv, false);
996 }
997
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)998 static void stmmac_mac_link_up(struct phylink_config *config,
999 struct phy_device *phy,
1000 unsigned int mode, phy_interface_t interface,
1001 int speed, int duplex,
1002 bool tx_pause, bool rx_pause)
1003 {
1004 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1005 u32 old_ctrl, ctrl;
1006
1007 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1008 priv->plat->serdes_powerup)
1009 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1010
1011 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1012 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1013
1014 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1015 switch (speed) {
1016 case SPEED_10000:
1017 ctrl |= priv->hw->link.xgmii.speed10000;
1018 break;
1019 case SPEED_5000:
1020 ctrl |= priv->hw->link.xgmii.speed5000;
1021 break;
1022 case SPEED_2500:
1023 ctrl |= priv->hw->link.xgmii.speed2500;
1024 break;
1025 default:
1026 return;
1027 }
1028 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1029 switch (speed) {
1030 case SPEED_100000:
1031 ctrl |= priv->hw->link.xlgmii.speed100000;
1032 break;
1033 case SPEED_50000:
1034 ctrl |= priv->hw->link.xlgmii.speed50000;
1035 break;
1036 case SPEED_40000:
1037 ctrl |= priv->hw->link.xlgmii.speed40000;
1038 break;
1039 case SPEED_25000:
1040 ctrl |= priv->hw->link.xlgmii.speed25000;
1041 break;
1042 case SPEED_10000:
1043 ctrl |= priv->hw->link.xgmii.speed10000;
1044 break;
1045 case SPEED_2500:
1046 ctrl |= priv->hw->link.speed2500;
1047 break;
1048 case SPEED_1000:
1049 ctrl |= priv->hw->link.speed1000;
1050 break;
1051 default:
1052 return;
1053 }
1054 } else {
1055 switch (speed) {
1056 case SPEED_2500:
1057 ctrl |= priv->hw->link.speed2500;
1058 break;
1059 case SPEED_1000:
1060 ctrl |= priv->hw->link.speed1000;
1061 break;
1062 case SPEED_100:
1063 ctrl |= priv->hw->link.speed100;
1064 break;
1065 case SPEED_10:
1066 ctrl |= priv->hw->link.speed10;
1067 break;
1068 default:
1069 return;
1070 }
1071 }
1072
1073 priv->speed = speed;
1074
1075 if (priv->plat->fix_mac_speed)
1076 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1077
1078 if (!duplex)
1079 ctrl &= ~priv->hw->link.duplex;
1080 else
1081 ctrl |= priv->hw->link.duplex;
1082
1083 /* Flow Control operation */
1084 if (rx_pause && tx_pause)
1085 priv->flow_ctrl = FLOW_AUTO;
1086 else if (rx_pause && !tx_pause)
1087 priv->flow_ctrl = FLOW_RX;
1088 else if (!rx_pause && tx_pause)
1089 priv->flow_ctrl = FLOW_TX;
1090 else
1091 priv->flow_ctrl = FLOW_OFF;
1092
1093 stmmac_mac_flow_ctrl(priv, duplex);
1094
1095 if (ctrl != old_ctrl)
1096 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1097
1098 stmmac_mac_set(priv, priv->ioaddr, true);
1099 if (priv->dma_cap.eee)
1100 stmmac_set_eee_pls(priv, priv->hw, true);
1101
1102 if (stmmac_fpe_supported(priv))
1103 stmmac_fpe_link_state_handle(priv, true);
1104
1105 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1106 stmmac_hwtstamp_correct_latency(priv, priv);
1107 }
1108
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1109 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1110 {
1111 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1112
1113 stmmac_eee_init(priv, false);
1114 }
1115
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1116 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1117 bool tx_clk_stop)
1118 {
1119 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1120
1121 priv->tx_lpi_timer = timer;
1122 stmmac_eee_init(priv, true);
1123
1124 return 0;
1125 }
1126
1127 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1128 .mac_get_caps = stmmac_mac_get_caps,
1129 .mac_select_pcs = stmmac_mac_select_pcs,
1130 .mac_config = stmmac_mac_config,
1131 .mac_link_down = stmmac_mac_link_down,
1132 .mac_link_up = stmmac_mac_link_up,
1133 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1134 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1135 };
1136
1137 /**
1138 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1139 * @priv: driver private structure
1140 * Description: this is to verify if the HW supports the PCS.
1141 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1142 * configured for the TBI, RTBI, or SGMII PHY interface.
1143 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1144 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1145 {
1146 int interface = priv->plat->mac_interface;
1147
1148 if (priv->dma_cap.pcs) {
1149 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1150 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1151 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1152 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1153 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1154 priv->hw->pcs = STMMAC_PCS_RGMII;
1155 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1156 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1157 priv->hw->pcs = STMMAC_PCS_SGMII;
1158 }
1159 }
1160 }
1161
1162 /**
1163 * stmmac_init_phy - PHY initialization
1164 * @dev: net device structure
1165 * Description: it initializes the driver's PHY state, and attaches the PHY
1166 * to the mac driver.
1167 * Return value:
1168 * 0 on success
1169 */
stmmac_init_phy(struct net_device * dev)1170 static int stmmac_init_phy(struct net_device *dev)
1171 {
1172 struct stmmac_priv *priv = netdev_priv(dev);
1173 struct fwnode_handle *phy_fwnode;
1174 struct fwnode_handle *fwnode;
1175 int ret;
1176
1177 if (!phylink_expects_phy(priv->phylink))
1178 return 0;
1179
1180 fwnode = priv->plat->port_node;
1181 if (!fwnode)
1182 fwnode = dev_fwnode(priv->device);
1183
1184 if (fwnode)
1185 phy_fwnode = fwnode_get_phy_node(fwnode);
1186 else
1187 phy_fwnode = NULL;
1188
1189 /* Some DT bindings do not set-up the PHY handle. Let's try to
1190 * manually parse it
1191 */
1192 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1193 int addr = priv->plat->phy_addr;
1194 struct phy_device *phydev;
1195
1196 if (addr < 0) {
1197 netdev_err(priv->dev, "no phy found\n");
1198 return -ENODEV;
1199 }
1200
1201 phydev = mdiobus_get_phy(priv->mii, addr);
1202 if (!phydev) {
1203 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1204 return -ENODEV;
1205 }
1206
1207 ret = phylink_connect_phy(priv->phylink, phydev);
1208 } else {
1209 fwnode_handle_put(phy_fwnode);
1210 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1211 }
1212
1213 if (ret == 0) {
1214 struct ethtool_keee eee;
1215
1216 /* Configure phylib's copy of the LPI timer. Normally,
1217 * phylink_config.lpi_timer_default would do this, but there is
1218 * a chance that userspace could change the eee_timer setting
1219 * via sysfs before the first open. Thus, preserve existing
1220 * behaviour.
1221 */
1222 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1223 eee.tx_lpi_timer = priv->tx_lpi_timer;
1224 phylink_ethtool_set_eee(priv->phylink, &eee);
1225 }
1226 }
1227
1228 if (!priv->plat->pmt) {
1229 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1230
1231 phylink_ethtool_get_wol(priv->phylink, &wol);
1232 device_set_wakeup_capable(priv->device, !!wol.supported);
1233 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1234 }
1235
1236 return ret;
1237 }
1238
stmmac_phy_setup(struct stmmac_priv * priv)1239 static int stmmac_phy_setup(struct stmmac_priv *priv)
1240 {
1241 struct stmmac_mdio_bus_data *mdio_bus_data;
1242 int mode = priv->plat->phy_interface;
1243 struct fwnode_handle *fwnode;
1244 struct phylink_pcs *pcs;
1245 struct phylink *phylink;
1246
1247 priv->phylink_config.dev = &priv->dev->dev;
1248 priv->phylink_config.type = PHYLINK_NETDEV;
1249 priv->phylink_config.mac_managed_pm = true;
1250
1251 /* Stmmac always requires an RX clock for hardware initialization */
1252 priv->phylink_config.mac_requires_rxc = true;
1253
1254 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1255 priv->phylink_config.eee_rx_clk_stop_enable = true;
1256
1257 mdio_bus_data = priv->plat->mdio_bus_data;
1258 if (mdio_bus_data)
1259 priv->phylink_config.default_an_inband =
1260 mdio_bus_data->default_an_inband;
1261
1262 /* Set the platform/firmware specified interface mode. Note, phylink
1263 * deals with the PHY interface mode, not the MAC interface mode.
1264 */
1265 __set_bit(mode, priv->phylink_config.supported_interfaces);
1266
1267 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1268 if (priv->hw->xpcs)
1269 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1270 else
1271 pcs = priv->hw->phylink_pcs;
1272
1273 if (pcs)
1274 phy_interface_or(priv->phylink_config.supported_interfaces,
1275 priv->phylink_config.supported_interfaces,
1276 pcs->supported_interfaces);
1277
1278 if (priv->dma_cap.eee) {
1279 /* Assume all supported interfaces also support LPI */
1280 memcpy(priv->phylink_config.lpi_interfaces,
1281 priv->phylink_config.supported_interfaces,
1282 sizeof(priv->phylink_config.lpi_interfaces));
1283
1284 /* All full duplex speeds above 100Mbps are supported */
1285 priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
1286 MAC_100FD;
1287 priv->phylink_config.lpi_timer_default = eee_timer * 1000;
1288 priv->phylink_config.eee_enabled_default = true;
1289 }
1290
1291 fwnode = priv->plat->port_node;
1292 if (!fwnode)
1293 fwnode = dev_fwnode(priv->device);
1294
1295 phylink = phylink_create(&priv->phylink_config, fwnode,
1296 mode, &stmmac_phylink_mac_ops);
1297 if (IS_ERR(phylink))
1298 return PTR_ERR(phylink);
1299
1300 priv->phylink = phylink;
1301 return 0;
1302 }
1303
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1304 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1305 struct stmmac_dma_conf *dma_conf)
1306 {
1307 u32 rx_cnt = priv->plat->rx_queues_to_use;
1308 unsigned int desc_size;
1309 void *head_rx;
1310 u32 queue;
1311
1312 /* Display RX rings */
1313 for (queue = 0; queue < rx_cnt; queue++) {
1314 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1315
1316 pr_info("\tRX Queue %u rings\n", queue);
1317
1318 if (priv->extend_desc) {
1319 head_rx = (void *)rx_q->dma_erx;
1320 desc_size = sizeof(struct dma_extended_desc);
1321 } else {
1322 head_rx = (void *)rx_q->dma_rx;
1323 desc_size = sizeof(struct dma_desc);
1324 }
1325
1326 /* Display RX ring */
1327 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1328 rx_q->dma_rx_phy, desc_size);
1329 }
1330 }
1331
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1332 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1333 struct stmmac_dma_conf *dma_conf)
1334 {
1335 u32 tx_cnt = priv->plat->tx_queues_to_use;
1336 unsigned int desc_size;
1337 void *head_tx;
1338 u32 queue;
1339
1340 /* Display TX rings */
1341 for (queue = 0; queue < tx_cnt; queue++) {
1342 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1343
1344 pr_info("\tTX Queue %d rings\n", queue);
1345
1346 if (priv->extend_desc) {
1347 head_tx = (void *)tx_q->dma_etx;
1348 desc_size = sizeof(struct dma_extended_desc);
1349 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1350 head_tx = (void *)tx_q->dma_entx;
1351 desc_size = sizeof(struct dma_edesc);
1352 } else {
1353 head_tx = (void *)tx_q->dma_tx;
1354 desc_size = sizeof(struct dma_desc);
1355 }
1356
1357 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1358 tx_q->dma_tx_phy, desc_size);
1359 }
1360 }
1361
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1362 static void stmmac_display_rings(struct stmmac_priv *priv,
1363 struct stmmac_dma_conf *dma_conf)
1364 {
1365 /* Display RX ring */
1366 stmmac_display_rx_rings(priv, dma_conf);
1367
1368 /* Display TX ring */
1369 stmmac_display_tx_rings(priv, dma_conf);
1370 }
1371
stmmac_rx_offset(struct stmmac_priv * priv)1372 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1373 {
1374 if (stmmac_xdp_is_enabled(priv))
1375 return XDP_PACKET_HEADROOM;
1376
1377 return NET_SKB_PAD;
1378 }
1379
stmmac_set_bfsize(int mtu,int bufsize)1380 static int stmmac_set_bfsize(int mtu, int bufsize)
1381 {
1382 int ret = bufsize;
1383
1384 if (mtu >= BUF_SIZE_8KiB)
1385 ret = BUF_SIZE_16KiB;
1386 else if (mtu >= BUF_SIZE_4KiB)
1387 ret = BUF_SIZE_8KiB;
1388 else if (mtu >= BUF_SIZE_2KiB)
1389 ret = BUF_SIZE_4KiB;
1390 else if (mtu > DEFAULT_BUFSIZE)
1391 ret = BUF_SIZE_2KiB;
1392 else
1393 ret = DEFAULT_BUFSIZE;
1394
1395 return ret;
1396 }
1397
1398 /**
1399 * stmmac_clear_rx_descriptors - clear RX descriptors
1400 * @priv: driver private structure
1401 * @dma_conf: structure to take the dma data
1402 * @queue: RX queue index
1403 * Description: this function is called to clear the RX descriptors
1404 * in case of both basic and extended descriptors are used.
1405 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1406 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1407 struct stmmac_dma_conf *dma_conf,
1408 u32 queue)
1409 {
1410 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1411 int i;
1412
1413 /* Clear the RX descriptors */
1414 for (i = 0; i < dma_conf->dma_rx_size; i++)
1415 if (priv->extend_desc)
1416 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1417 priv->use_riwt, priv->mode,
1418 (i == dma_conf->dma_rx_size - 1),
1419 dma_conf->dma_buf_sz);
1420 else
1421 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1422 priv->use_riwt, priv->mode,
1423 (i == dma_conf->dma_rx_size - 1),
1424 dma_conf->dma_buf_sz);
1425 }
1426
1427 /**
1428 * stmmac_clear_tx_descriptors - clear tx descriptors
1429 * @priv: driver private structure
1430 * @dma_conf: structure to take the dma data
1431 * @queue: TX queue index.
1432 * Description: this function is called to clear the TX descriptors
1433 * in case of both basic and extended descriptors are used.
1434 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1435 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1436 struct stmmac_dma_conf *dma_conf,
1437 u32 queue)
1438 {
1439 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1440 int i;
1441
1442 /* Clear the TX descriptors */
1443 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1444 int last = (i == (dma_conf->dma_tx_size - 1));
1445 struct dma_desc *p;
1446
1447 if (priv->extend_desc)
1448 p = &tx_q->dma_etx[i].basic;
1449 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1450 p = &tx_q->dma_entx[i].basic;
1451 else
1452 p = &tx_q->dma_tx[i];
1453
1454 stmmac_init_tx_desc(priv, p, priv->mode, last);
1455 }
1456 }
1457
1458 /**
1459 * stmmac_clear_descriptors - clear descriptors
1460 * @priv: driver private structure
1461 * @dma_conf: structure to take the dma data
1462 * Description: this function is called to clear the TX and RX descriptors
1463 * in case of both basic and extended descriptors are used.
1464 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1465 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1466 struct stmmac_dma_conf *dma_conf)
1467 {
1468 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1469 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1470 u32 queue;
1471
1472 /* Clear the RX descriptors */
1473 for (queue = 0; queue < rx_queue_cnt; queue++)
1474 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1475
1476 /* Clear the TX descriptors */
1477 for (queue = 0; queue < tx_queue_cnt; queue++)
1478 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1479 }
1480
1481 /**
1482 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1483 * @priv: driver private structure
1484 * @dma_conf: structure to take the dma data
1485 * @p: descriptor pointer
1486 * @i: descriptor index
1487 * @flags: gfp flag
1488 * @queue: RX queue index
1489 * Description: this function is called to allocate a receive buffer, perform
1490 * the DMA mapping and init the descriptor.
1491 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1492 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1493 struct stmmac_dma_conf *dma_conf,
1494 struct dma_desc *p,
1495 int i, gfp_t flags, u32 queue)
1496 {
1497 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1498 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1499 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1500
1501 if (priv->dma_cap.host_dma_width <= 32)
1502 gfp |= GFP_DMA32;
1503
1504 if (!buf->page) {
1505 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1506 if (!buf->page)
1507 return -ENOMEM;
1508 buf->page_offset = stmmac_rx_offset(priv);
1509 }
1510
1511 if (priv->sph && !buf->sec_page) {
1512 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1513 if (!buf->sec_page)
1514 return -ENOMEM;
1515
1516 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1517 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1518 } else {
1519 buf->sec_page = NULL;
1520 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1521 }
1522
1523 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1524
1525 stmmac_set_desc_addr(priv, p, buf->addr);
1526 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1527 stmmac_init_desc3(priv, p);
1528
1529 return 0;
1530 }
1531
1532 /**
1533 * stmmac_free_rx_buffer - free RX dma buffers
1534 * @priv: private structure
1535 * @rx_q: RX queue
1536 * @i: buffer index.
1537 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1538 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1539 struct stmmac_rx_queue *rx_q,
1540 int i)
1541 {
1542 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1543
1544 if (buf->page)
1545 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1546 buf->page = NULL;
1547
1548 if (buf->sec_page)
1549 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1550 buf->sec_page = NULL;
1551 }
1552
1553 /**
1554 * stmmac_free_tx_buffer - free RX dma buffers
1555 * @priv: private structure
1556 * @dma_conf: structure to take the dma data
1557 * @queue: RX queue index
1558 * @i: buffer index.
1559 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1560 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1561 struct stmmac_dma_conf *dma_conf,
1562 u32 queue, int i)
1563 {
1564 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1565
1566 if (tx_q->tx_skbuff_dma[i].buf &&
1567 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1568 if (tx_q->tx_skbuff_dma[i].map_as_page)
1569 dma_unmap_page(priv->device,
1570 tx_q->tx_skbuff_dma[i].buf,
1571 tx_q->tx_skbuff_dma[i].len,
1572 DMA_TO_DEVICE);
1573 else
1574 dma_unmap_single(priv->device,
1575 tx_q->tx_skbuff_dma[i].buf,
1576 tx_q->tx_skbuff_dma[i].len,
1577 DMA_TO_DEVICE);
1578 }
1579
1580 if (tx_q->xdpf[i] &&
1581 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1582 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1583 xdp_return_frame(tx_q->xdpf[i]);
1584 tx_q->xdpf[i] = NULL;
1585 }
1586
1587 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1588 tx_q->xsk_frames_done++;
1589
1590 if (tx_q->tx_skbuff[i] &&
1591 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1592 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1593 tx_q->tx_skbuff[i] = NULL;
1594 }
1595
1596 tx_q->tx_skbuff_dma[i].buf = 0;
1597 tx_q->tx_skbuff_dma[i].map_as_page = false;
1598 }
1599
1600 /**
1601 * dma_free_rx_skbufs - free RX dma buffers
1602 * @priv: private structure
1603 * @dma_conf: structure to take the dma data
1604 * @queue: RX queue index
1605 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1606 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1607 struct stmmac_dma_conf *dma_conf,
1608 u32 queue)
1609 {
1610 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 int i;
1612
1613 for (i = 0; i < dma_conf->dma_rx_size; i++)
1614 stmmac_free_rx_buffer(priv, rx_q, i);
1615 }
1616
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1617 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1618 struct stmmac_dma_conf *dma_conf,
1619 u32 queue, gfp_t flags)
1620 {
1621 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1622 int i;
1623
1624 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1625 struct dma_desc *p;
1626 int ret;
1627
1628 if (priv->extend_desc)
1629 p = &((rx_q->dma_erx + i)->basic);
1630 else
1631 p = rx_q->dma_rx + i;
1632
1633 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1634 queue);
1635 if (ret)
1636 return ret;
1637
1638 rx_q->buf_alloc_num++;
1639 }
1640
1641 return 0;
1642 }
1643
1644 /**
1645 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1646 * @priv: private structure
1647 * @dma_conf: structure to take the dma data
1648 * @queue: RX queue index
1649 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1650 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1651 struct stmmac_dma_conf *dma_conf,
1652 u32 queue)
1653 {
1654 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1655 int i;
1656
1657 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1658 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1659
1660 if (!buf->xdp)
1661 continue;
1662
1663 xsk_buff_free(buf->xdp);
1664 buf->xdp = NULL;
1665 }
1666 }
1667
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1668 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1669 struct stmmac_dma_conf *dma_conf,
1670 u32 queue)
1671 {
1672 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1673 int i;
1674
1675 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1676 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1677 * use this macro to make sure no size violations.
1678 */
1679 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1680
1681 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1682 struct stmmac_rx_buffer *buf;
1683 dma_addr_t dma_addr;
1684 struct dma_desc *p;
1685
1686 if (priv->extend_desc)
1687 p = (struct dma_desc *)(rx_q->dma_erx + i);
1688 else
1689 p = rx_q->dma_rx + i;
1690
1691 buf = &rx_q->buf_pool[i];
1692
1693 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1694 if (!buf->xdp)
1695 return -ENOMEM;
1696
1697 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1698 stmmac_set_desc_addr(priv, p, dma_addr);
1699 rx_q->buf_alloc_num++;
1700 }
1701
1702 return 0;
1703 }
1704
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1705 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1706 {
1707 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1708 return NULL;
1709
1710 return xsk_get_pool_from_qid(priv->dev, queue);
1711 }
1712
1713 /**
1714 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1715 * @priv: driver private structure
1716 * @dma_conf: structure to take the dma data
1717 * @queue: RX queue index
1718 * @flags: gfp flag.
1719 * Description: this function initializes the DMA RX descriptors
1720 * and allocates the socket buffers. It supports the chained and ring
1721 * modes.
1722 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1723 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1724 struct stmmac_dma_conf *dma_conf,
1725 u32 queue, gfp_t flags)
1726 {
1727 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1728 int ret;
1729
1730 netif_dbg(priv, probe, priv->dev,
1731 "(%s) dma_rx_phy=0x%08x\n", __func__,
1732 (u32)rx_q->dma_rx_phy);
1733
1734 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1735
1736 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1737
1738 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1739
1740 if (rx_q->xsk_pool) {
1741 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1742 MEM_TYPE_XSK_BUFF_POOL,
1743 NULL));
1744 netdev_info(priv->dev,
1745 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1746 rx_q->queue_index);
1747 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1748 } else {
1749 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1750 MEM_TYPE_PAGE_POOL,
1751 rx_q->page_pool));
1752 netdev_info(priv->dev,
1753 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1754 rx_q->queue_index);
1755 }
1756
1757 if (rx_q->xsk_pool) {
1758 /* RX XDP ZC buffer pool may not be populated, e.g.
1759 * xdpsock TX-only.
1760 */
1761 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1762 } else {
1763 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1764 if (ret < 0)
1765 return -ENOMEM;
1766 }
1767
1768 /* Setup the chained descriptor addresses */
1769 if (priv->mode == STMMAC_CHAIN_MODE) {
1770 if (priv->extend_desc)
1771 stmmac_mode_init(priv, rx_q->dma_erx,
1772 rx_q->dma_rx_phy,
1773 dma_conf->dma_rx_size, 1);
1774 else
1775 stmmac_mode_init(priv, rx_q->dma_rx,
1776 rx_q->dma_rx_phy,
1777 dma_conf->dma_rx_size, 0);
1778 }
1779
1780 return 0;
1781 }
1782
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1783 static int init_dma_rx_desc_rings(struct net_device *dev,
1784 struct stmmac_dma_conf *dma_conf,
1785 gfp_t flags)
1786 {
1787 struct stmmac_priv *priv = netdev_priv(dev);
1788 u32 rx_count = priv->plat->rx_queues_to_use;
1789 int queue;
1790 int ret;
1791
1792 /* RX INITIALIZATION */
1793 netif_dbg(priv, probe, priv->dev,
1794 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1795
1796 for (queue = 0; queue < rx_count; queue++) {
1797 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1798 if (ret)
1799 goto err_init_rx_buffers;
1800 }
1801
1802 return 0;
1803
1804 err_init_rx_buffers:
1805 while (queue >= 0) {
1806 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1807
1808 if (rx_q->xsk_pool)
1809 dma_free_rx_xskbufs(priv, dma_conf, queue);
1810 else
1811 dma_free_rx_skbufs(priv, dma_conf, queue);
1812
1813 rx_q->buf_alloc_num = 0;
1814 rx_q->xsk_pool = NULL;
1815
1816 queue--;
1817 }
1818
1819 return ret;
1820 }
1821
1822 /**
1823 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1824 * @priv: driver private structure
1825 * @dma_conf: structure to take the dma data
1826 * @queue: TX queue index
1827 * Description: this function initializes the DMA TX descriptors
1828 * and allocates the socket buffers. It supports the chained and ring
1829 * modes.
1830 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1831 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1832 struct stmmac_dma_conf *dma_conf,
1833 u32 queue)
1834 {
1835 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1836 int i;
1837
1838 netif_dbg(priv, probe, priv->dev,
1839 "(%s) dma_tx_phy=0x%08x\n", __func__,
1840 (u32)tx_q->dma_tx_phy);
1841
1842 /* Setup the chained descriptor addresses */
1843 if (priv->mode == STMMAC_CHAIN_MODE) {
1844 if (priv->extend_desc)
1845 stmmac_mode_init(priv, tx_q->dma_etx,
1846 tx_q->dma_tx_phy,
1847 dma_conf->dma_tx_size, 1);
1848 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1849 stmmac_mode_init(priv, tx_q->dma_tx,
1850 tx_q->dma_tx_phy,
1851 dma_conf->dma_tx_size, 0);
1852 }
1853
1854 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1855
1856 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1857 struct dma_desc *p;
1858
1859 if (priv->extend_desc)
1860 p = &((tx_q->dma_etx + i)->basic);
1861 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1862 p = &((tx_q->dma_entx + i)->basic);
1863 else
1864 p = tx_q->dma_tx + i;
1865
1866 stmmac_clear_desc(priv, p);
1867
1868 tx_q->tx_skbuff_dma[i].buf = 0;
1869 tx_q->tx_skbuff_dma[i].map_as_page = false;
1870 tx_q->tx_skbuff_dma[i].len = 0;
1871 tx_q->tx_skbuff_dma[i].last_segment = false;
1872 tx_q->tx_skbuff[i] = NULL;
1873 }
1874
1875 return 0;
1876 }
1877
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1878 static int init_dma_tx_desc_rings(struct net_device *dev,
1879 struct stmmac_dma_conf *dma_conf)
1880 {
1881 struct stmmac_priv *priv = netdev_priv(dev);
1882 u32 tx_queue_cnt;
1883 u32 queue;
1884
1885 tx_queue_cnt = priv->plat->tx_queues_to_use;
1886
1887 for (queue = 0; queue < tx_queue_cnt; queue++)
1888 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1889
1890 return 0;
1891 }
1892
1893 /**
1894 * init_dma_desc_rings - init the RX/TX descriptor rings
1895 * @dev: net device structure
1896 * @dma_conf: structure to take the dma data
1897 * @flags: gfp flag.
1898 * Description: this function initializes the DMA RX/TX descriptors
1899 * and allocates the socket buffers. It supports the chained and ring
1900 * modes.
1901 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1902 static int init_dma_desc_rings(struct net_device *dev,
1903 struct stmmac_dma_conf *dma_conf,
1904 gfp_t flags)
1905 {
1906 struct stmmac_priv *priv = netdev_priv(dev);
1907 int ret;
1908
1909 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1910 if (ret)
1911 return ret;
1912
1913 ret = init_dma_tx_desc_rings(dev, dma_conf);
1914
1915 stmmac_clear_descriptors(priv, dma_conf);
1916
1917 if (netif_msg_hw(priv))
1918 stmmac_display_rings(priv, dma_conf);
1919
1920 return ret;
1921 }
1922
1923 /**
1924 * dma_free_tx_skbufs - free TX dma buffers
1925 * @priv: private structure
1926 * @dma_conf: structure to take the dma data
1927 * @queue: TX queue index
1928 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1929 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1930 struct stmmac_dma_conf *dma_conf,
1931 u32 queue)
1932 {
1933 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1934 int i;
1935
1936 tx_q->xsk_frames_done = 0;
1937
1938 for (i = 0; i < dma_conf->dma_tx_size; i++)
1939 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1940
1941 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1942 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1943 tx_q->xsk_frames_done = 0;
1944 tx_q->xsk_pool = NULL;
1945 }
1946 }
1947
1948 /**
1949 * stmmac_free_tx_skbufs - free TX skb buffers
1950 * @priv: private structure
1951 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1952 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1953 {
1954 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1955 u32 queue;
1956
1957 for (queue = 0; queue < tx_queue_cnt; queue++)
1958 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1959 }
1960
1961 /**
1962 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1963 * @priv: private structure
1964 * @dma_conf: structure to take the dma data
1965 * @queue: RX queue index
1966 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1967 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1968 struct stmmac_dma_conf *dma_conf,
1969 u32 queue)
1970 {
1971 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1972
1973 /* Release the DMA RX socket buffers */
1974 if (rx_q->xsk_pool)
1975 dma_free_rx_xskbufs(priv, dma_conf, queue);
1976 else
1977 dma_free_rx_skbufs(priv, dma_conf, queue);
1978
1979 rx_q->buf_alloc_num = 0;
1980 rx_q->xsk_pool = NULL;
1981
1982 /* Free DMA regions of consistent memory previously allocated */
1983 if (!priv->extend_desc)
1984 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1985 sizeof(struct dma_desc),
1986 rx_q->dma_rx, rx_q->dma_rx_phy);
1987 else
1988 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1989 sizeof(struct dma_extended_desc),
1990 rx_q->dma_erx, rx_q->dma_rx_phy);
1991
1992 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1993 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1994
1995 kfree(rx_q->buf_pool);
1996 if (rx_q->page_pool)
1997 page_pool_destroy(rx_q->page_pool);
1998 }
1999
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2000 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2001 struct stmmac_dma_conf *dma_conf)
2002 {
2003 u32 rx_count = priv->plat->rx_queues_to_use;
2004 u32 queue;
2005
2006 /* Free RX queue resources */
2007 for (queue = 0; queue < rx_count; queue++)
2008 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2009 }
2010
2011 /**
2012 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2013 * @priv: private structure
2014 * @dma_conf: structure to take the dma data
2015 * @queue: TX queue index
2016 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2017 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2018 struct stmmac_dma_conf *dma_conf,
2019 u32 queue)
2020 {
2021 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2022 size_t size;
2023 void *addr;
2024
2025 /* Release the DMA TX socket buffers */
2026 dma_free_tx_skbufs(priv, dma_conf, queue);
2027
2028 if (priv->extend_desc) {
2029 size = sizeof(struct dma_extended_desc);
2030 addr = tx_q->dma_etx;
2031 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2032 size = sizeof(struct dma_edesc);
2033 addr = tx_q->dma_entx;
2034 } else {
2035 size = sizeof(struct dma_desc);
2036 addr = tx_q->dma_tx;
2037 }
2038
2039 size *= dma_conf->dma_tx_size;
2040
2041 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2042
2043 kfree(tx_q->tx_skbuff_dma);
2044 kfree(tx_q->tx_skbuff);
2045 }
2046
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2047 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2048 struct stmmac_dma_conf *dma_conf)
2049 {
2050 u32 tx_count = priv->plat->tx_queues_to_use;
2051 u32 queue;
2052
2053 /* Free TX queue resources */
2054 for (queue = 0; queue < tx_count; queue++)
2055 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2056 }
2057
2058 /**
2059 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2060 * @priv: private structure
2061 * @dma_conf: structure to take the dma data
2062 * @queue: RX queue index
2063 * Description: according to which descriptor can be used (extend or basic)
2064 * this function allocates the resources for TX and RX paths. In case of
2065 * reception, for example, it pre-allocated the RX socket buffer in order to
2066 * allow zero-copy mechanism.
2067 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2068 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2069 struct stmmac_dma_conf *dma_conf,
2070 u32 queue)
2071 {
2072 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2073 struct stmmac_channel *ch = &priv->channel[queue];
2074 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2075 struct page_pool_params pp_params = { 0 };
2076 unsigned int dma_buf_sz_pad, num_pages;
2077 unsigned int napi_id;
2078 int ret;
2079
2080 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2081 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2082 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2083
2084 rx_q->queue_index = queue;
2085 rx_q->priv_data = priv;
2086 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2087
2088 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2089 pp_params.pool_size = dma_conf->dma_rx_size;
2090 pp_params.order = order_base_2(num_pages);
2091 pp_params.nid = dev_to_node(priv->device);
2092 pp_params.dev = priv->device;
2093 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2094 pp_params.offset = stmmac_rx_offset(priv);
2095 pp_params.max_len = dma_conf->dma_buf_sz;
2096
2097 if (priv->sph) {
2098 pp_params.offset = 0;
2099 pp_params.max_len += stmmac_rx_offset(priv);
2100 }
2101
2102 rx_q->page_pool = page_pool_create(&pp_params);
2103 if (IS_ERR(rx_q->page_pool)) {
2104 ret = PTR_ERR(rx_q->page_pool);
2105 rx_q->page_pool = NULL;
2106 return ret;
2107 }
2108
2109 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2110 sizeof(*rx_q->buf_pool),
2111 GFP_KERNEL);
2112 if (!rx_q->buf_pool)
2113 return -ENOMEM;
2114
2115 if (priv->extend_desc) {
2116 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2117 dma_conf->dma_rx_size *
2118 sizeof(struct dma_extended_desc),
2119 &rx_q->dma_rx_phy,
2120 GFP_KERNEL);
2121 if (!rx_q->dma_erx)
2122 return -ENOMEM;
2123
2124 } else {
2125 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2126 dma_conf->dma_rx_size *
2127 sizeof(struct dma_desc),
2128 &rx_q->dma_rx_phy,
2129 GFP_KERNEL);
2130 if (!rx_q->dma_rx)
2131 return -ENOMEM;
2132 }
2133
2134 if (stmmac_xdp_is_enabled(priv) &&
2135 test_bit(queue, priv->af_xdp_zc_qps))
2136 napi_id = ch->rxtx_napi.napi_id;
2137 else
2138 napi_id = ch->rx_napi.napi_id;
2139
2140 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2141 rx_q->queue_index,
2142 napi_id);
2143 if (ret) {
2144 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2145 return -EINVAL;
2146 }
2147
2148 return 0;
2149 }
2150
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2151 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2152 struct stmmac_dma_conf *dma_conf)
2153 {
2154 u32 rx_count = priv->plat->rx_queues_to_use;
2155 u32 queue;
2156 int ret;
2157
2158 /* RX queues buffers and DMA */
2159 for (queue = 0; queue < rx_count; queue++) {
2160 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2161 if (ret)
2162 goto err_dma;
2163 }
2164
2165 return 0;
2166
2167 err_dma:
2168 free_dma_rx_desc_resources(priv, dma_conf);
2169
2170 return ret;
2171 }
2172
2173 /**
2174 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2175 * @priv: private structure
2176 * @dma_conf: structure to take the dma data
2177 * @queue: TX queue index
2178 * Description: according to which descriptor can be used (extend or basic)
2179 * this function allocates the resources for TX and RX paths. In case of
2180 * reception, for example, it pre-allocated the RX socket buffer in order to
2181 * allow zero-copy mechanism.
2182 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2183 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2184 struct stmmac_dma_conf *dma_conf,
2185 u32 queue)
2186 {
2187 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2188 size_t size;
2189 void *addr;
2190
2191 tx_q->queue_index = queue;
2192 tx_q->priv_data = priv;
2193
2194 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2195 sizeof(*tx_q->tx_skbuff_dma),
2196 GFP_KERNEL);
2197 if (!tx_q->tx_skbuff_dma)
2198 return -ENOMEM;
2199
2200 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2201 sizeof(struct sk_buff *),
2202 GFP_KERNEL);
2203 if (!tx_q->tx_skbuff)
2204 return -ENOMEM;
2205
2206 if (priv->extend_desc)
2207 size = sizeof(struct dma_extended_desc);
2208 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2209 size = sizeof(struct dma_edesc);
2210 else
2211 size = sizeof(struct dma_desc);
2212
2213 size *= dma_conf->dma_tx_size;
2214
2215 addr = dma_alloc_coherent(priv->device, size,
2216 &tx_q->dma_tx_phy, GFP_KERNEL);
2217 if (!addr)
2218 return -ENOMEM;
2219
2220 if (priv->extend_desc)
2221 tx_q->dma_etx = addr;
2222 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2223 tx_q->dma_entx = addr;
2224 else
2225 tx_q->dma_tx = addr;
2226
2227 return 0;
2228 }
2229
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2230 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2231 struct stmmac_dma_conf *dma_conf)
2232 {
2233 u32 tx_count = priv->plat->tx_queues_to_use;
2234 u32 queue;
2235 int ret;
2236
2237 /* TX queues buffers and DMA */
2238 for (queue = 0; queue < tx_count; queue++) {
2239 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2240 if (ret)
2241 goto err_dma;
2242 }
2243
2244 return 0;
2245
2246 err_dma:
2247 free_dma_tx_desc_resources(priv, dma_conf);
2248 return ret;
2249 }
2250
2251 /**
2252 * alloc_dma_desc_resources - alloc TX/RX resources.
2253 * @priv: private structure
2254 * @dma_conf: structure to take the dma data
2255 * Description: according to which descriptor can be used (extend or basic)
2256 * this function allocates the resources for TX and RX paths. In case of
2257 * reception, for example, it pre-allocated the RX socket buffer in order to
2258 * allow zero-copy mechanism.
2259 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2260 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2261 struct stmmac_dma_conf *dma_conf)
2262 {
2263 /* RX Allocation */
2264 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2265
2266 if (ret)
2267 return ret;
2268
2269 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2270
2271 return ret;
2272 }
2273
2274 /**
2275 * free_dma_desc_resources - free dma desc resources
2276 * @priv: private structure
2277 * @dma_conf: structure to take the dma data
2278 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2279 static void free_dma_desc_resources(struct stmmac_priv *priv,
2280 struct stmmac_dma_conf *dma_conf)
2281 {
2282 /* Release the DMA TX socket buffers */
2283 free_dma_tx_desc_resources(priv, dma_conf);
2284
2285 /* Release the DMA RX socket buffers later
2286 * to ensure all pending XDP_TX buffers are returned.
2287 */
2288 free_dma_rx_desc_resources(priv, dma_conf);
2289 }
2290
2291 /**
2292 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2293 * @priv: driver private structure
2294 * Description: It is used for enabling the rx queues in the MAC
2295 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2296 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2297 {
2298 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2299 int queue;
2300 u8 mode;
2301
2302 for (queue = 0; queue < rx_queues_count; queue++) {
2303 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2304 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2305 }
2306 }
2307
2308 /**
2309 * stmmac_start_rx_dma - start RX DMA channel
2310 * @priv: driver private structure
2311 * @chan: RX channel index
2312 * Description:
2313 * This starts a RX DMA channel
2314 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2315 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2316 {
2317 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2318 stmmac_start_rx(priv, priv->ioaddr, chan);
2319 }
2320
2321 /**
2322 * stmmac_start_tx_dma - start TX DMA channel
2323 * @priv: driver private structure
2324 * @chan: TX channel index
2325 * Description:
2326 * This starts a TX DMA channel
2327 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2328 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2329 {
2330 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2331 stmmac_start_tx(priv, priv->ioaddr, chan);
2332 }
2333
2334 /**
2335 * stmmac_stop_rx_dma - stop RX DMA channel
2336 * @priv: driver private structure
2337 * @chan: RX channel index
2338 * Description:
2339 * This stops a RX DMA channel
2340 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2341 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2342 {
2343 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2344 stmmac_stop_rx(priv, priv->ioaddr, chan);
2345 }
2346
2347 /**
2348 * stmmac_stop_tx_dma - stop TX DMA channel
2349 * @priv: driver private structure
2350 * @chan: TX channel index
2351 * Description:
2352 * This stops a TX DMA channel
2353 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2354 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2355 {
2356 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2357 stmmac_stop_tx(priv, priv->ioaddr, chan);
2358 }
2359
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2360 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2361 {
2362 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2363 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2364 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2365 u32 chan;
2366
2367 for (chan = 0; chan < dma_csr_ch; chan++) {
2368 struct stmmac_channel *ch = &priv->channel[chan];
2369 unsigned long flags;
2370
2371 spin_lock_irqsave(&ch->lock, flags);
2372 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2373 spin_unlock_irqrestore(&ch->lock, flags);
2374 }
2375 }
2376
2377 /**
2378 * stmmac_start_all_dma - start all RX and TX DMA channels
2379 * @priv: driver private structure
2380 * Description:
2381 * This starts all the RX and TX DMA channels
2382 */
stmmac_start_all_dma(struct stmmac_priv * priv)2383 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2384 {
2385 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2386 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2387 u32 chan = 0;
2388
2389 for (chan = 0; chan < rx_channels_count; chan++)
2390 stmmac_start_rx_dma(priv, chan);
2391
2392 for (chan = 0; chan < tx_channels_count; chan++)
2393 stmmac_start_tx_dma(priv, chan);
2394 }
2395
2396 /**
2397 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2398 * @priv: driver private structure
2399 * Description:
2400 * This stops the RX and TX DMA channels
2401 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2402 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2403 {
2404 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2405 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2406 u32 chan = 0;
2407
2408 for (chan = 0; chan < rx_channels_count; chan++)
2409 stmmac_stop_rx_dma(priv, chan);
2410
2411 for (chan = 0; chan < tx_channels_count; chan++)
2412 stmmac_stop_tx_dma(priv, chan);
2413 }
2414
2415 /**
2416 * stmmac_dma_operation_mode - HW DMA operation mode
2417 * @priv: driver private structure
2418 * Description: it is used for configuring the DMA operation mode register in
2419 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2420 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2421 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2422 {
2423 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2424 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2425 int rxfifosz = priv->plat->rx_fifo_size;
2426 int txfifosz = priv->plat->tx_fifo_size;
2427 u32 txmode = 0;
2428 u32 rxmode = 0;
2429 u32 chan = 0;
2430 u8 qmode = 0;
2431
2432 if (rxfifosz == 0)
2433 rxfifosz = priv->dma_cap.rx_fifo_size;
2434 if (txfifosz == 0)
2435 txfifosz = priv->dma_cap.tx_fifo_size;
2436
2437 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2438 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2439 rxfifosz /= rx_channels_count;
2440 txfifosz /= tx_channels_count;
2441 }
2442
2443 if (priv->plat->force_thresh_dma_mode) {
2444 txmode = tc;
2445 rxmode = tc;
2446 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2447 /*
2448 * In case of GMAC, SF mode can be enabled
2449 * to perform the TX COE in HW. This depends on:
2450 * 1) TX COE if actually supported
2451 * 2) There is no bugged Jumbo frame support
2452 * that needs to not insert csum in the TDES.
2453 */
2454 txmode = SF_DMA_MODE;
2455 rxmode = SF_DMA_MODE;
2456 priv->xstats.threshold = SF_DMA_MODE;
2457 } else {
2458 txmode = tc;
2459 rxmode = SF_DMA_MODE;
2460 }
2461
2462 /* configure all channels */
2463 for (chan = 0; chan < rx_channels_count; chan++) {
2464 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2465 u32 buf_size;
2466
2467 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2468
2469 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2470 rxfifosz, qmode);
2471
2472 if (rx_q->xsk_pool) {
2473 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2474 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2475 buf_size,
2476 chan);
2477 } else {
2478 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2479 priv->dma_conf.dma_buf_sz,
2480 chan);
2481 }
2482 }
2483
2484 for (chan = 0; chan < tx_channels_count; chan++) {
2485 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2486
2487 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2488 txfifosz, qmode);
2489 }
2490 }
2491
stmmac_xsk_request_timestamp(void * _priv)2492 static void stmmac_xsk_request_timestamp(void *_priv)
2493 {
2494 struct stmmac_metadata_request *meta_req = _priv;
2495
2496 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2497 *meta_req->set_ic = true;
2498 }
2499
stmmac_xsk_fill_timestamp(void * _priv)2500 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2501 {
2502 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2503 struct stmmac_priv *priv = tx_compl->priv;
2504 struct dma_desc *desc = tx_compl->desc;
2505 bool found = false;
2506 u64 ns = 0;
2507
2508 if (!priv->hwts_tx_en)
2509 return 0;
2510
2511 /* check tx tstamp status */
2512 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2513 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2514 found = true;
2515 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2516 found = true;
2517 }
2518
2519 if (found) {
2520 ns -= priv->plat->cdc_error_adj;
2521 return ns_to_ktime(ns);
2522 }
2523
2524 return 0;
2525 }
2526
2527 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2528 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2529 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2530 };
2531
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2532 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2533 {
2534 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2535 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2536 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2537 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2538 unsigned int entry = tx_q->cur_tx;
2539 struct dma_desc *tx_desc = NULL;
2540 struct xdp_desc xdp_desc;
2541 bool work_done = true;
2542 u32 tx_set_ic_bit = 0;
2543
2544 /* Avoids TX time-out as we are sharing with slow path */
2545 txq_trans_cond_update(nq);
2546
2547 budget = min(budget, stmmac_tx_avail(priv, queue));
2548
2549 while (budget-- > 0) {
2550 struct stmmac_metadata_request meta_req;
2551 struct xsk_tx_metadata *meta = NULL;
2552 dma_addr_t dma_addr;
2553 bool set_ic;
2554
2555 /* We are sharing with slow path and stop XSK TX desc submission when
2556 * available TX ring is less than threshold.
2557 */
2558 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2559 !netif_carrier_ok(priv->dev)) {
2560 work_done = false;
2561 break;
2562 }
2563
2564 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2565 break;
2566
2567 if (priv->est && priv->est->enable &&
2568 priv->est->max_sdu[queue] &&
2569 xdp_desc.len > priv->est->max_sdu[queue]) {
2570 priv->xstats.max_sdu_txq_drop[queue]++;
2571 continue;
2572 }
2573
2574 if (likely(priv->extend_desc))
2575 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2576 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2577 tx_desc = &tx_q->dma_entx[entry].basic;
2578 else
2579 tx_desc = tx_q->dma_tx + entry;
2580
2581 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2582 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2583 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2584
2585 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2586
2587 /* To return XDP buffer to XSK pool, we simple call
2588 * xsk_tx_completed(), so we don't need to fill up
2589 * 'buf' and 'xdpf'.
2590 */
2591 tx_q->tx_skbuff_dma[entry].buf = 0;
2592 tx_q->xdpf[entry] = NULL;
2593
2594 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2595 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2596 tx_q->tx_skbuff_dma[entry].last_segment = true;
2597 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2598
2599 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2600
2601 tx_q->tx_count_frames++;
2602
2603 if (!priv->tx_coal_frames[queue])
2604 set_ic = false;
2605 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2606 set_ic = true;
2607 else
2608 set_ic = false;
2609
2610 meta_req.priv = priv;
2611 meta_req.tx_desc = tx_desc;
2612 meta_req.set_ic = &set_ic;
2613 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2614 &meta_req);
2615 if (set_ic) {
2616 tx_q->tx_count_frames = 0;
2617 stmmac_set_tx_ic(priv, tx_desc);
2618 tx_set_ic_bit++;
2619 }
2620
2621 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2622 true, priv->mode, true, true,
2623 xdp_desc.len);
2624
2625 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2626
2627 xsk_tx_metadata_to_compl(meta,
2628 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2629
2630 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2631 entry = tx_q->cur_tx;
2632 }
2633 u64_stats_update_begin(&txq_stats->napi_syncp);
2634 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2635 u64_stats_update_end(&txq_stats->napi_syncp);
2636
2637 if (tx_desc) {
2638 stmmac_flush_tx_descriptors(priv, queue);
2639 xsk_tx_release(pool);
2640 }
2641
2642 /* Return true if all of the 3 conditions are met
2643 * a) TX Budget is still available
2644 * b) work_done = true when XSK TX desc peek is empty (no more
2645 * pending XSK TX for transmission)
2646 */
2647 return !!budget && work_done;
2648 }
2649
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2650 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2651 {
2652 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2653 tc += 64;
2654
2655 if (priv->plat->force_thresh_dma_mode)
2656 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2657 else
2658 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2659 chan);
2660
2661 priv->xstats.threshold = tc;
2662 }
2663 }
2664
2665 /**
2666 * stmmac_tx_clean - to manage the transmission completion
2667 * @priv: driver private structure
2668 * @budget: napi budget limiting this functions packet handling
2669 * @queue: TX queue index
2670 * @pending_packets: signal to arm the TX coal timer
2671 * Description: it reclaims the transmit resources after transmission completes.
2672 * If some packets still needs to be handled, due to TX coalesce, set
2673 * pending_packets to true to make NAPI arm the TX coal timer.
2674 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2675 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2676 bool *pending_packets)
2677 {
2678 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2679 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2680 unsigned int bytes_compl = 0, pkts_compl = 0;
2681 unsigned int entry, xmits = 0, count = 0;
2682 u32 tx_packets = 0, tx_errors = 0;
2683
2684 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2685
2686 tx_q->xsk_frames_done = 0;
2687
2688 entry = tx_q->dirty_tx;
2689
2690 /* Try to clean all TX complete frame in 1 shot */
2691 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2692 struct xdp_frame *xdpf;
2693 struct sk_buff *skb;
2694 struct dma_desc *p;
2695 int status;
2696
2697 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2698 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2699 xdpf = tx_q->xdpf[entry];
2700 skb = NULL;
2701 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2702 xdpf = NULL;
2703 skb = tx_q->tx_skbuff[entry];
2704 } else {
2705 xdpf = NULL;
2706 skb = NULL;
2707 }
2708
2709 if (priv->extend_desc)
2710 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2711 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2712 p = &tx_q->dma_entx[entry].basic;
2713 else
2714 p = tx_q->dma_tx + entry;
2715
2716 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2717 /* Check if the descriptor is owned by the DMA */
2718 if (unlikely(status & tx_dma_own))
2719 break;
2720
2721 count++;
2722
2723 /* Make sure descriptor fields are read after reading
2724 * the own bit.
2725 */
2726 dma_rmb();
2727
2728 /* Just consider the last segment and ...*/
2729 if (likely(!(status & tx_not_ls))) {
2730 /* ... verify the status error condition */
2731 if (unlikely(status & tx_err)) {
2732 tx_errors++;
2733 if (unlikely(status & tx_err_bump_tc))
2734 stmmac_bump_dma_threshold(priv, queue);
2735 } else {
2736 tx_packets++;
2737 }
2738 if (skb) {
2739 stmmac_get_tx_hwtstamp(priv, p, skb);
2740 } else if (tx_q->xsk_pool &&
2741 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2742 struct stmmac_xsk_tx_complete tx_compl = {
2743 .priv = priv,
2744 .desc = p,
2745 };
2746
2747 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2748 &stmmac_xsk_tx_metadata_ops,
2749 &tx_compl);
2750 }
2751 }
2752
2753 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2754 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2755 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2756 dma_unmap_page(priv->device,
2757 tx_q->tx_skbuff_dma[entry].buf,
2758 tx_q->tx_skbuff_dma[entry].len,
2759 DMA_TO_DEVICE);
2760 else
2761 dma_unmap_single(priv->device,
2762 tx_q->tx_skbuff_dma[entry].buf,
2763 tx_q->tx_skbuff_dma[entry].len,
2764 DMA_TO_DEVICE);
2765 tx_q->tx_skbuff_dma[entry].buf = 0;
2766 tx_q->tx_skbuff_dma[entry].len = 0;
2767 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2768 }
2769
2770 stmmac_clean_desc3(priv, tx_q, p);
2771
2772 tx_q->tx_skbuff_dma[entry].last_segment = false;
2773 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2774
2775 if (xdpf &&
2776 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2777 xdp_return_frame_rx_napi(xdpf);
2778 tx_q->xdpf[entry] = NULL;
2779 }
2780
2781 if (xdpf &&
2782 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2783 xdp_return_frame(xdpf);
2784 tx_q->xdpf[entry] = NULL;
2785 }
2786
2787 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2788 tx_q->xsk_frames_done++;
2789
2790 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2791 if (likely(skb)) {
2792 pkts_compl++;
2793 bytes_compl += skb->len;
2794 dev_consume_skb_any(skb);
2795 tx_q->tx_skbuff[entry] = NULL;
2796 }
2797 }
2798
2799 stmmac_release_tx_desc(priv, p, priv->mode);
2800
2801 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2802 }
2803 tx_q->dirty_tx = entry;
2804
2805 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2806 pkts_compl, bytes_compl);
2807
2808 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2809 queue))) &&
2810 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2811
2812 netif_dbg(priv, tx_done, priv->dev,
2813 "%s: restart transmit\n", __func__);
2814 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2815 }
2816
2817 if (tx_q->xsk_pool) {
2818 bool work_done;
2819
2820 if (tx_q->xsk_frames_done)
2821 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2822
2823 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2824 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2825
2826 /* For XSK TX, we try to send as many as possible.
2827 * If XSK work done (XSK TX desc empty and budget still
2828 * available), return "budget - 1" to reenable TX IRQ.
2829 * Else, return "budget" to make NAPI continue polling.
2830 */
2831 work_done = stmmac_xdp_xmit_zc(priv, queue,
2832 STMMAC_XSK_TX_BUDGET_MAX);
2833 if (work_done)
2834 xmits = budget - 1;
2835 else
2836 xmits = budget;
2837 }
2838
2839 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2840 stmmac_restart_sw_lpi_timer(priv);
2841
2842 /* We still have pending packets, let's call for a new scheduling */
2843 if (tx_q->dirty_tx != tx_q->cur_tx)
2844 *pending_packets = true;
2845
2846 u64_stats_update_begin(&txq_stats->napi_syncp);
2847 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2848 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2849 u64_stats_inc(&txq_stats->napi.tx_clean);
2850 u64_stats_update_end(&txq_stats->napi_syncp);
2851
2852 priv->xstats.tx_errors += tx_errors;
2853
2854 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2855
2856 /* Combine decisions from TX clean and XSK TX */
2857 return max(count, xmits);
2858 }
2859
2860 /**
2861 * stmmac_tx_err - to manage the tx error
2862 * @priv: driver private structure
2863 * @chan: channel index
2864 * Description: it cleans the descriptors and restarts the transmission
2865 * in case of transmission errors.
2866 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2867 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2868 {
2869 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2870
2871 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2872
2873 stmmac_stop_tx_dma(priv, chan);
2874 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2875 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2876 stmmac_reset_tx_queue(priv, chan);
2877 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2878 tx_q->dma_tx_phy, chan);
2879 stmmac_start_tx_dma(priv, chan);
2880
2881 priv->xstats.tx_errors++;
2882 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2883 }
2884
2885 /**
2886 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2887 * @priv: driver private structure
2888 * @txmode: TX operating mode
2889 * @rxmode: RX operating mode
2890 * @chan: channel index
2891 * Description: it is used for configuring of the DMA operation mode in
2892 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2893 * mode.
2894 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2895 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2896 u32 rxmode, u32 chan)
2897 {
2898 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2899 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2900 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2901 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2902 int rxfifosz = priv->plat->rx_fifo_size;
2903 int txfifosz = priv->plat->tx_fifo_size;
2904
2905 if (rxfifosz == 0)
2906 rxfifosz = priv->dma_cap.rx_fifo_size;
2907 if (txfifosz == 0)
2908 txfifosz = priv->dma_cap.tx_fifo_size;
2909
2910 /* Adjust for real per queue fifo size */
2911 rxfifosz /= rx_channels_count;
2912 txfifosz /= tx_channels_count;
2913
2914 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2915 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2916 }
2917
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2918 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2919 {
2920 int ret;
2921
2922 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2923 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2924 if (ret && (ret != -EINVAL)) {
2925 stmmac_global_err(priv);
2926 return true;
2927 }
2928
2929 return false;
2930 }
2931
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2932 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2933 {
2934 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2935 &priv->xstats, chan, dir);
2936 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2937 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2938 struct stmmac_channel *ch = &priv->channel[chan];
2939 struct napi_struct *rx_napi;
2940 struct napi_struct *tx_napi;
2941 unsigned long flags;
2942
2943 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2944 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2945
2946 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2947 if (napi_schedule_prep(rx_napi)) {
2948 spin_lock_irqsave(&ch->lock, flags);
2949 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2950 spin_unlock_irqrestore(&ch->lock, flags);
2951 __napi_schedule(rx_napi);
2952 }
2953 }
2954
2955 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2956 if (napi_schedule_prep(tx_napi)) {
2957 spin_lock_irqsave(&ch->lock, flags);
2958 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2959 spin_unlock_irqrestore(&ch->lock, flags);
2960 __napi_schedule(tx_napi);
2961 }
2962 }
2963
2964 return status;
2965 }
2966
2967 /**
2968 * stmmac_dma_interrupt - DMA ISR
2969 * @priv: driver private structure
2970 * Description: this is the DMA ISR. It is called by the main ISR.
2971 * It calls the dwmac dma routine and schedule poll method in case of some
2972 * work can be done.
2973 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2974 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2975 {
2976 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2977 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2978 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2979 tx_channel_count : rx_channel_count;
2980 u32 chan;
2981 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2982
2983 /* Make sure we never check beyond our status buffer. */
2984 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2985 channels_to_check = ARRAY_SIZE(status);
2986
2987 for (chan = 0; chan < channels_to_check; chan++)
2988 status[chan] = stmmac_napi_check(priv, chan,
2989 DMA_DIR_RXTX);
2990
2991 for (chan = 0; chan < tx_channel_count; chan++) {
2992 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2993 /* Try to bump up the dma threshold on this failure */
2994 stmmac_bump_dma_threshold(priv, chan);
2995 } else if (unlikely(status[chan] == tx_hard_error)) {
2996 stmmac_tx_err(priv, chan);
2997 }
2998 }
2999 }
3000
3001 /**
3002 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3003 * @priv: driver private structure
3004 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3005 */
stmmac_mmc_setup(struct stmmac_priv * priv)3006 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3007 {
3008 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3009 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3010
3011 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3012
3013 if (priv->dma_cap.rmon) {
3014 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3015 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3016 } else
3017 netdev_info(priv->dev, "No MAC Management Counters available\n");
3018 }
3019
3020 /**
3021 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3022 * @priv: driver private structure
3023 * Description:
3024 * new GMAC chip generations have a new register to indicate the
3025 * presence of the optional feature/functions.
3026 * This can be also used to override the value passed through the
3027 * platform and necessary for old MAC10/100 and GMAC chips.
3028 */
stmmac_get_hw_features(struct stmmac_priv * priv)3029 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3030 {
3031 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3032 }
3033
3034 /**
3035 * stmmac_check_ether_addr - check if the MAC addr is valid
3036 * @priv: driver private structure
3037 * Description:
3038 * it is to verify if the MAC address is valid, in case of failures it
3039 * generates a random MAC address
3040 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3041 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3042 {
3043 u8 addr[ETH_ALEN];
3044
3045 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3046 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3047 if (is_valid_ether_addr(addr))
3048 eth_hw_addr_set(priv->dev, addr);
3049 else
3050 eth_hw_addr_random(priv->dev);
3051 dev_info(priv->device, "device MAC address %pM\n",
3052 priv->dev->dev_addr);
3053 }
3054 }
3055
3056 /**
3057 * stmmac_init_dma_engine - DMA init.
3058 * @priv: driver private structure
3059 * Description:
3060 * It inits the DMA invoking the specific MAC/GMAC callback.
3061 * Some DMA parameters can be passed from the platform;
3062 * in case of these are not passed a default is kept for the MAC or GMAC.
3063 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3064 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3065 {
3066 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3067 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3068 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3069 struct stmmac_rx_queue *rx_q;
3070 struct stmmac_tx_queue *tx_q;
3071 u32 chan = 0;
3072 int ret = 0;
3073
3074 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3075 dev_err(priv->device, "Invalid DMA configuration\n");
3076 return -EINVAL;
3077 }
3078
3079 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3080 priv->plat->dma_cfg->atds = 1;
3081
3082 ret = stmmac_reset(priv, priv->ioaddr);
3083 if (ret) {
3084 dev_err(priv->device, "Failed to reset the dma\n");
3085 return ret;
3086 }
3087
3088 /* DMA Configuration */
3089 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3090
3091 if (priv->plat->axi)
3092 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3093
3094 /* DMA CSR Channel configuration */
3095 for (chan = 0; chan < dma_csr_ch; chan++) {
3096 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3097 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3098 }
3099
3100 /* DMA RX Channel Configuration */
3101 for (chan = 0; chan < rx_channels_count; chan++) {
3102 rx_q = &priv->dma_conf.rx_queue[chan];
3103
3104 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3105 rx_q->dma_rx_phy, chan);
3106
3107 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3108 (rx_q->buf_alloc_num *
3109 sizeof(struct dma_desc));
3110 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3111 rx_q->rx_tail_addr, chan);
3112 }
3113
3114 /* DMA TX Channel Configuration */
3115 for (chan = 0; chan < tx_channels_count; chan++) {
3116 tx_q = &priv->dma_conf.tx_queue[chan];
3117
3118 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3119 tx_q->dma_tx_phy, chan);
3120
3121 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3122 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3123 tx_q->tx_tail_addr, chan);
3124 }
3125
3126 return ret;
3127 }
3128
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3129 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3130 {
3131 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3132 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3133 struct stmmac_channel *ch;
3134 struct napi_struct *napi;
3135
3136 if (!tx_coal_timer)
3137 return;
3138
3139 ch = &priv->channel[tx_q->queue_index];
3140 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3141
3142 /* Arm timer only if napi is not already scheduled.
3143 * Try to cancel any timer if napi is scheduled, timer will be armed
3144 * again in the next scheduled napi.
3145 */
3146 if (unlikely(!napi_is_scheduled(napi)))
3147 hrtimer_start(&tx_q->txtimer,
3148 STMMAC_COAL_TIMER(tx_coal_timer),
3149 HRTIMER_MODE_REL);
3150 else
3151 hrtimer_try_to_cancel(&tx_q->txtimer);
3152 }
3153
3154 /**
3155 * stmmac_tx_timer - mitigation sw timer for tx.
3156 * @t: data pointer
3157 * Description:
3158 * This is the timer handler to directly invoke the stmmac_tx_clean.
3159 */
stmmac_tx_timer(struct hrtimer * t)3160 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3161 {
3162 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3163 struct stmmac_priv *priv = tx_q->priv_data;
3164 struct stmmac_channel *ch;
3165 struct napi_struct *napi;
3166
3167 ch = &priv->channel[tx_q->queue_index];
3168 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3169
3170 if (likely(napi_schedule_prep(napi))) {
3171 unsigned long flags;
3172
3173 spin_lock_irqsave(&ch->lock, flags);
3174 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3175 spin_unlock_irqrestore(&ch->lock, flags);
3176 __napi_schedule(napi);
3177 }
3178
3179 return HRTIMER_NORESTART;
3180 }
3181
3182 /**
3183 * stmmac_init_coalesce - init mitigation options.
3184 * @priv: driver private structure
3185 * Description:
3186 * This inits the coalesce parameters: i.e. timer rate,
3187 * timer handler and default threshold used for enabling the
3188 * interrupt on completion bit.
3189 */
stmmac_init_coalesce(struct stmmac_priv * priv)3190 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3191 {
3192 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3193 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3194 u32 chan;
3195
3196 for (chan = 0; chan < tx_channel_count; chan++) {
3197 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3198
3199 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3200 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3201
3202 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3203 tx_q->txtimer.function = stmmac_tx_timer;
3204 }
3205
3206 for (chan = 0; chan < rx_channel_count; chan++)
3207 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3208 }
3209
stmmac_set_rings_length(struct stmmac_priv * priv)3210 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3211 {
3212 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3213 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3214 u32 chan;
3215
3216 /* set TX ring length */
3217 for (chan = 0; chan < tx_channels_count; chan++)
3218 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3219 (priv->dma_conf.dma_tx_size - 1), chan);
3220
3221 /* set RX ring length */
3222 for (chan = 0; chan < rx_channels_count; chan++)
3223 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3224 (priv->dma_conf.dma_rx_size - 1), chan);
3225 }
3226
3227 /**
3228 * stmmac_set_tx_queue_weight - Set TX queue weight
3229 * @priv: driver private structure
3230 * Description: It is used for setting TX queues weight
3231 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3232 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3233 {
3234 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3235 u32 weight;
3236 u32 queue;
3237
3238 for (queue = 0; queue < tx_queues_count; queue++) {
3239 weight = priv->plat->tx_queues_cfg[queue].weight;
3240 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3241 }
3242 }
3243
3244 /**
3245 * stmmac_configure_cbs - Configure CBS in TX queue
3246 * @priv: driver private structure
3247 * Description: It is used for configuring CBS in AVB TX queues
3248 */
stmmac_configure_cbs(struct stmmac_priv * priv)3249 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3250 {
3251 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3252 u32 mode_to_use;
3253 u32 queue;
3254
3255 /* queue 0 is reserved for legacy traffic */
3256 for (queue = 1; queue < tx_queues_count; queue++) {
3257 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3258 if (mode_to_use == MTL_QUEUE_DCB)
3259 continue;
3260
3261 stmmac_config_cbs(priv, priv->hw,
3262 priv->plat->tx_queues_cfg[queue].send_slope,
3263 priv->plat->tx_queues_cfg[queue].idle_slope,
3264 priv->plat->tx_queues_cfg[queue].high_credit,
3265 priv->plat->tx_queues_cfg[queue].low_credit,
3266 queue);
3267 }
3268 }
3269
3270 /**
3271 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3272 * @priv: driver private structure
3273 * Description: It is used for mapping RX queues to RX dma channels
3274 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3275 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3276 {
3277 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3278 u32 queue;
3279 u32 chan;
3280
3281 for (queue = 0; queue < rx_queues_count; queue++) {
3282 chan = priv->plat->rx_queues_cfg[queue].chan;
3283 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3284 }
3285 }
3286
3287 /**
3288 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3289 * @priv: driver private structure
3290 * Description: It is used for configuring the RX Queue Priority
3291 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3292 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3293 {
3294 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3295 u32 queue;
3296 u32 prio;
3297
3298 for (queue = 0; queue < rx_queues_count; queue++) {
3299 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3300 continue;
3301
3302 prio = priv->plat->rx_queues_cfg[queue].prio;
3303 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3304 }
3305 }
3306
3307 /**
3308 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3309 * @priv: driver private structure
3310 * Description: It is used for configuring the TX Queue Priority
3311 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3312 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3313 {
3314 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3315 u32 queue;
3316 u32 prio;
3317
3318 for (queue = 0; queue < tx_queues_count; queue++) {
3319 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3320 continue;
3321
3322 prio = priv->plat->tx_queues_cfg[queue].prio;
3323 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3324 }
3325 }
3326
3327 /**
3328 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3329 * @priv: driver private structure
3330 * Description: It is used for configuring the RX queue routing
3331 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3332 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3333 {
3334 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3335 u32 queue;
3336 u8 packet;
3337
3338 for (queue = 0; queue < rx_queues_count; queue++) {
3339 /* no specific packet type routing specified for the queue */
3340 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3341 continue;
3342
3343 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3344 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3345 }
3346 }
3347
stmmac_mac_config_rss(struct stmmac_priv * priv)3348 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3349 {
3350 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3351 priv->rss.enable = false;
3352 return;
3353 }
3354
3355 if (priv->dev->features & NETIF_F_RXHASH)
3356 priv->rss.enable = true;
3357 else
3358 priv->rss.enable = false;
3359
3360 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3361 priv->plat->rx_queues_to_use);
3362 }
3363
3364 /**
3365 * stmmac_mtl_configuration - Configure MTL
3366 * @priv: driver private structure
3367 * Description: It is used for configurring MTL
3368 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3369 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3370 {
3371 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3372 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3373
3374 if (tx_queues_count > 1)
3375 stmmac_set_tx_queue_weight(priv);
3376
3377 /* Configure MTL RX algorithms */
3378 if (rx_queues_count > 1)
3379 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3380 priv->plat->rx_sched_algorithm);
3381
3382 /* Configure MTL TX algorithms */
3383 if (tx_queues_count > 1)
3384 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3385 priv->plat->tx_sched_algorithm);
3386
3387 /* Configure CBS in AVB TX queues */
3388 if (tx_queues_count > 1)
3389 stmmac_configure_cbs(priv);
3390
3391 /* Map RX MTL to DMA channels */
3392 stmmac_rx_queue_dma_chan_map(priv);
3393
3394 /* Enable MAC RX Queues */
3395 stmmac_mac_enable_rx_queues(priv);
3396
3397 /* Set RX priorities */
3398 if (rx_queues_count > 1)
3399 stmmac_mac_config_rx_queues_prio(priv);
3400
3401 /* Set TX priorities */
3402 if (tx_queues_count > 1)
3403 stmmac_mac_config_tx_queues_prio(priv);
3404
3405 /* Set RX routing */
3406 if (rx_queues_count > 1)
3407 stmmac_mac_config_rx_queues_routing(priv);
3408
3409 /* Receive Side Scaling */
3410 if (rx_queues_count > 1)
3411 stmmac_mac_config_rss(priv);
3412 }
3413
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3414 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3415 {
3416 if (priv->dma_cap.asp) {
3417 netdev_info(priv->dev, "Enabling Safety Features\n");
3418 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3419 priv->plat->safety_feat_cfg);
3420 } else {
3421 netdev_info(priv->dev, "No Safety Features support found\n");
3422 }
3423 }
3424
3425 /**
3426 * stmmac_hw_setup - setup mac in a usable state.
3427 * @dev : pointer to the device structure.
3428 * @ptp_register: register PTP if set
3429 * Description:
3430 * this is the main function to setup the HW in a usable state because the
3431 * dma engine is reset, the core registers are configured (e.g. AXI,
3432 * Checksum features, timers). The DMA is ready to start receiving and
3433 * transmitting.
3434 * Return value:
3435 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3436 * file on failure.
3437 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3438 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3439 {
3440 struct stmmac_priv *priv = netdev_priv(dev);
3441 u32 rx_cnt = priv->plat->rx_queues_to_use;
3442 u32 tx_cnt = priv->plat->tx_queues_to_use;
3443 bool sph_en;
3444 u32 chan;
3445 int ret;
3446
3447 /* Make sure RX clock is enabled */
3448 if (priv->hw->phylink_pcs)
3449 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3450
3451 /* DMA initialization and SW reset */
3452 ret = stmmac_init_dma_engine(priv);
3453 if (ret < 0) {
3454 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3455 __func__);
3456 return ret;
3457 }
3458
3459 /* Copy the MAC addr into the HW */
3460 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3461
3462 /* PS and related bits will be programmed according to the speed */
3463 if (priv->hw->pcs) {
3464 int speed = priv->plat->mac_port_sel_speed;
3465
3466 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3467 (speed == SPEED_1000)) {
3468 priv->hw->ps = speed;
3469 } else {
3470 dev_warn(priv->device, "invalid port speed\n");
3471 priv->hw->ps = 0;
3472 }
3473 }
3474
3475 /* Initialize the MAC Core */
3476 stmmac_core_init(priv, priv->hw, dev);
3477
3478 /* Initialize MTL*/
3479 stmmac_mtl_configuration(priv);
3480
3481 /* Initialize Safety Features */
3482 stmmac_safety_feat_configuration(priv);
3483
3484 ret = stmmac_rx_ipc(priv, priv->hw);
3485 if (!ret) {
3486 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3487 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3488 priv->hw->rx_csum = 0;
3489 }
3490
3491 /* Enable the MAC Rx/Tx */
3492 stmmac_mac_set(priv, priv->ioaddr, true);
3493
3494 /* Set the HW DMA mode and the COE */
3495 stmmac_dma_operation_mode(priv);
3496
3497 stmmac_mmc_setup(priv);
3498
3499 if (ptp_register) {
3500 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3501 if (ret < 0)
3502 netdev_warn(priv->dev,
3503 "failed to enable PTP reference clock: %pe\n",
3504 ERR_PTR(ret));
3505 }
3506
3507 ret = stmmac_init_ptp(priv);
3508 if (ret == -EOPNOTSUPP)
3509 netdev_info(priv->dev, "PTP not supported by HW\n");
3510 else if (ret)
3511 netdev_warn(priv->dev, "PTP init failed\n");
3512 else if (ptp_register)
3513 stmmac_ptp_register(priv);
3514
3515 if (priv->use_riwt) {
3516 u32 queue;
3517
3518 for (queue = 0; queue < rx_cnt; queue++) {
3519 if (!priv->rx_riwt[queue])
3520 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3521
3522 stmmac_rx_watchdog(priv, priv->ioaddr,
3523 priv->rx_riwt[queue], queue);
3524 }
3525 }
3526
3527 if (priv->hw->pcs)
3528 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3529
3530 /* set TX and RX rings length */
3531 stmmac_set_rings_length(priv);
3532
3533 /* Enable TSO */
3534 if (priv->tso) {
3535 for (chan = 0; chan < tx_cnt; chan++) {
3536 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3537
3538 /* TSO and TBS cannot co-exist */
3539 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3540 continue;
3541
3542 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3543 }
3544 }
3545
3546 /* Enable Split Header */
3547 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3548 for (chan = 0; chan < rx_cnt; chan++)
3549 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3550
3551
3552 /* VLAN Tag Insertion */
3553 if (priv->dma_cap.vlins)
3554 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3555
3556 /* TBS */
3557 for (chan = 0; chan < tx_cnt; chan++) {
3558 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3559 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3560
3561 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3562 }
3563
3564 /* Configure real RX and TX queues */
3565 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3566 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3567
3568 /* Start the ball rolling... */
3569 stmmac_start_all_dma(priv);
3570
3571 stmmac_set_hw_vlan_mode(priv, priv->hw);
3572
3573 return 0;
3574 }
3575
stmmac_hw_teardown(struct net_device * dev)3576 static void stmmac_hw_teardown(struct net_device *dev)
3577 {
3578 struct stmmac_priv *priv = netdev_priv(dev);
3579
3580 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3581 }
3582
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3583 static void stmmac_free_irq(struct net_device *dev,
3584 enum request_irq_err irq_err, int irq_idx)
3585 {
3586 struct stmmac_priv *priv = netdev_priv(dev);
3587 int j;
3588
3589 switch (irq_err) {
3590 case REQ_IRQ_ERR_ALL:
3591 irq_idx = priv->plat->tx_queues_to_use;
3592 fallthrough;
3593 case REQ_IRQ_ERR_TX:
3594 for (j = irq_idx - 1; j >= 0; j--) {
3595 if (priv->tx_irq[j] > 0) {
3596 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3597 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3598 }
3599 }
3600 irq_idx = priv->plat->rx_queues_to_use;
3601 fallthrough;
3602 case REQ_IRQ_ERR_RX:
3603 for (j = irq_idx - 1; j >= 0; j--) {
3604 if (priv->rx_irq[j] > 0) {
3605 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3606 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3607 }
3608 }
3609
3610 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3611 free_irq(priv->sfty_ue_irq, dev);
3612 fallthrough;
3613 case REQ_IRQ_ERR_SFTY_UE:
3614 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3615 free_irq(priv->sfty_ce_irq, dev);
3616 fallthrough;
3617 case REQ_IRQ_ERR_SFTY_CE:
3618 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3619 free_irq(priv->lpi_irq, dev);
3620 fallthrough;
3621 case REQ_IRQ_ERR_LPI:
3622 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3623 free_irq(priv->wol_irq, dev);
3624 fallthrough;
3625 case REQ_IRQ_ERR_SFTY:
3626 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3627 free_irq(priv->sfty_irq, dev);
3628 fallthrough;
3629 case REQ_IRQ_ERR_WOL:
3630 free_irq(dev->irq, dev);
3631 fallthrough;
3632 case REQ_IRQ_ERR_MAC:
3633 case REQ_IRQ_ERR_NO:
3634 /* If MAC IRQ request error, no more IRQ to free */
3635 break;
3636 }
3637 }
3638
stmmac_request_irq_multi_msi(struct net_device * dev)3639 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3640 {
3641 struct stmmac_priv *priv = netdev_priv(dev);
3642 enum request_irq_err irq_err;
3643 cpumask_t cpu_mask;
3644 int irq_idx = 0;
3645 char *int_name;
3646 int ret;
3647 int i;
3648
3649 /* For common interrupt */
3650 int_name = priv->int_name_mac;
3651 sprintf(int_name, "%s:%s", dev->name, "mac");
3652 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3653 0, int_name, dev);
3654 if (unlikely(ret < 0)) {
3655 netdev_err(priv->dev,
3656 "%s: alloc mac MSI %d (error: %d)\n",
3657 __func__, dev->irq, ret);
3658 irq_err = REQ_IRQ_ERR_MAC;
3659 goto irq_error;
3660 }
3661
3662 /* Request the Wake IRQ in case of another line
3663 * is used for WoL
3664 */
3665 priv->wol_irq_disabled = true;
3666 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3667 int_name = priv->int_name_wol;
3668 sprintf(int_name, "%s:%s", dev->name, "wol");
3669 ret = request_irq(priv->wol_irq,
3670 stmmac_mac_interrupt,
3671 0, int_name, dev);
3672 if (unlikely(ret < 0)) {
3673 netdev_err(priv->dev,
3674 "%s: alloc wol MSI %d (error: %d)\n",
3675 __func__, priv->wol_irq, ret);
3676 irq_err = REQ_IRQ_ERR_WOL;
3677 goto irq_error;
3678 }
3679 }
3680
3681 /* Request the LPI IRQ in case of another line
3682 * is used for LPI
3683 */
3684 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3685 int_name = priv->int_name_lpi;
3686 sprintf(int_name, "%s:%s", dev->name, "lpi");
3687 ret = request_irq(priv->lpi_irq,
3688 stmmac_mac_interrupt,
3689 0, int_name, dev);
3690 if (unlikely(ret < 0)) {
3691 netdev_err(priv->dev,
3692 "%s: alloc lpi MSI %d (error: %d)\n",
3693 __func__, priv->lpi_irq, ret);
3694 irq_err = REQ_IRQ_ERR_LPI;
3695 goto irq_error;
3696 }
3697 }
3698
3699 /* Request the common Safety Feature Correctible/Uncorrectible
3700 * Error line in case of another line is used
3701 */
3702 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3703 int_name = priv->int_name_sfty;
3704 sprintf(int_name, "%s:%s", dev->name, "safety");
3705 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3706 0, int_name, dev);
3707 if (unlikely(ret < 0)) {
3708 netdev_err(priv->dev,
3709 "%s: alloc sfty MSI %d (error: %d)\n",
3710 __func__, priv->sfty_irq, ret);
3711 irq_err = REQ_IRQ_ERR_SFTY;
3712 goto irq_error;
3713 }
3714 }
3715
3716 /* Request the Safety Feature Correctible Error line in
3717 * case of another line is used
3718 */
3719 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3720 int_name = priv->int_name_sfty_ce;
3721 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3722 ret = request_irq(priv->sfty_ce_irq,
3723 stmmac_safety_interrupt,
3724 0, int_name, dev);
3725 if (unlikely(ret < 0)) {
3726 netdev_err(priv->dev,
3727 "%s: alloc sfty ce MSI %d (error: %d)\n",
3728 __func__, priv->sfty_ce_irq, ret);
3729 irq_err = REQ_IRQ_ERR_SFTY_CE;
3730 goto irq_error;
3731 }
3732 }
3733
3734 /* Request the Safety Feature Uncorrectible Error line in
3735 * case of another line is used
3736 */
3737 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3738 int_name = priv->int_name_sfty_ue;
3739 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3740 ret = request_irq(priv->sfty_ue_irq,
3741 stmmac_safety_interrupt,
3742 0, int_name, dev);
3743 if (unlikely(ret < 0)) {
3744 netdev_err(priv->dev,
3745 "%s: alloc sfty ue MSI %d (error: %d)\n",
3746 __func__, priv->sfty_ue_irq, ret);
3747 irq_err = REQ_IRQ_ERR_SFTY_UE;
3748 goto irq_error;
3749 }
3750 }
3751
3752 /* Request Rx MSI irq */
3753 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3754 if (i >= MTL_MAX_RX_QUEUES)
3755 break;
3756 if (priv->rx_irq[i] == 0)
3757 continue;
3758
3759 int_name = priv->int_name_rx_irq[i];
3760 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3761 ret = request_irq(priv->rx_irq[i],
3762 stmmac_msi_intr_rx,
3763 0, int_name, &priv->dma_conf.rx_queue[i]);
3764 if (unlikely(ret < 0)) {
3765 netdev_err(priv->dev,
3766 "%s: alloc rx-%d MSI %d (error: %d)\n",
3767 __func__, i, priv->rx_irq[i], ret);
3768 irq_err = REQ_IRQ_ERR_RX;
3769 irq_idx = i;
3770 goto irq_error;
3771 }
3772 cpumask_clear(&cpu_mask);
3773 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3774 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3775 }
3776
3777 /* Request Tx MSI irq */
3778 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3779 if (i >= MTL_MAX_TX_QUEUES)
3780 break;
3781 if (priv->tx_irq[i] == 0)
3782 continue;
3783
3784 int_name = priv->int_name_tx_irq[i];
3785 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3786 ret = request_irq(priv->tx_irq[i],
3787 stmmac_msi_intr_tx,
3788 0, int_name, &priv->dma_conf.tx_queue[i]);
3789 if (unlikely(ret < 0)) {
3790 netdev_err(priv->dev,
3791 "%s: alloc tx-%d MSI %d (error: %d)\n",
3792 __func__, i, priv->tx_irq[i], ret);
3793 irq_err = REQ_IRQ_ERR_TX;
3794 irq_idx = i;
3795 goto irq_error;
3796 }
3797 cpumask_clear(&cpu_mask);
3798 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3799 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3800 }
3801
3802 return 0;
3803
3804 irq_error:
3805 stmmac_free_irq(dev, irq_err, irq_idx);
3806 return ret;
3807 }
3808
stmmac_request_irq_single(struct net_device * dev)3809 static int stmmac_request_irq_single(struct net_device *dev)
3810 {
3811 struct stmmac_priv *priv = netdev_priv(dev);
3812 enum request_irq_err irq_err;
3813 int ret;
3814
3815 ret = request_irq(dev->irq, stmmac_interrupt,
3816 IRQF_SHARED, dev->name, dev);
3817 if (unlikely(ret < 0)) {
3818 netdev_err(priv->dev,
3819 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3820 __func__, dev->irq, ret);
3821 irq_err = REQ_IRQ_ERR_MAC;
3822 goto irq_error;
3823 }
3824
3825 /* Request the Wake IRQ in case of another line
3826 * is used for WoL
3827 */
3828 priv->wol_irq_disabled = true;
3829 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3830 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3831 IRQF_SHARED, dev->name, dev);
3832 if (unlikely(ret < 0)) {
3833 netdev_err(priv->dev,
3834 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3835 __func__, priv->wol_irq, ret);
3836 irq_err = REQ_IRQ_ERR_WOL;
3837 goto irq_error;
3838 }
3839 }
3840
3841 /* Request the IRQ lines */
3842 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3843 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3844 IRQF_SHARED, dev->name, dev);
3845 if (unlikely(ret < 0)) {
3846 netdev_err(priv->dev,
3847 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3848 __func__, priv->lpi_irq, ret);
3849 irq_err = REQ_IRQ_ERR_LPI;
3850 goto irq_error;
3851 }
3852 }
3853
3854 /* Request the common Safety Feature Correctible/Uncorrectible
3855 * Error line in case of another line is used
3856 */
3857 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3858 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3859 IRQF_SHARED, dev->name, dev);
3860 if (unlikely(ret < 0)) {
3861 netdev_err(priv->dev,
3862 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3863 __func__, priv->sfty_irq, ret);
3864 irq_err = REQ_IRQ_ERR_SFTY;
3865 goto irq_error;
3866 }
3867 }
3868
3869 return 0;
3870
3871 irq_error:
3872 stmmac_free_irq(dev, irq_err, 0);
3873 return ret;
3874 }
3875
stmmac_request_irq(struct net_device * dev)3876 static int stmmac_request_irq(struct net_device *dev)
3877 {
3878 struct stmmac_priv *priv = netdev_priv(dev);
3879 int ret;
3880
3881 /* Request the IRQ lines */
3882 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3883 ret = stmmac_request_irq_multi_msi(dev);
3884 else
3885 ret = stmmac_request_irq_single(dev);
3886
3887 return ret;
3888 }
3889
3890 /**
3891 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3892 * @priv: driver private structure
3893 * @mtu: MTU to setup the dma queue and buf with
3894 * Description: Allocate and generate a dma_conf based on the provided MTU.
3895 * Allocate the Tx/Rx DMA queue and init them.
3896 * Return value:
3897 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3898 */
3899 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3900 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3901 {
3902 struct stmmac_dma_conf *dma_conf;
3903 int chan, bfsize, ret;
3904
3905 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3906 if (!dma_conf) {
3907 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3908 __func__);
3909 return ERR_PTR(-ENOMEM);
3910 }
3911
3912 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3913 if (bfsize < 0)
3914 bfsize = 0;
3915
3916 if (bfsize < BUF_SIZE_16KiB)
3917 bfsize = stmmac_set_bfsize(mtu, 0);
3918
3919 dma_conf->dma_buf_sz = bfsize;
3920 /* Chose the tx/rx size from the already defined one in the
3921 * priv struct. (if defined)
3922 */
3923 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3924 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3925
3926 if (!dma_conf->dma_tx_size)
3927 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3928 if (!dma_conf->dma_rx_size)
3929 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3930
3931 /* Earlier check for TBS */
3932 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3933 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3934 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3935
3936 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3937 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3938 }
3939
3940 ret = alloc_dma_desc_resources(priv, dma_conf);
3941 if (ret < 0) {
3942 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3943 __func__);
3944 goto alloc_error;
3945 }
3946
3947 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3948 if (ret < 0) {
3949 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3950 __func__);
3951 goto init_error;
3952 }
3953
3954 return dma_conf;
3955
3956 init_error:
3957 free_dma_desc_resources(priv, dma_conf);
3958 alloc_error:
3959 kfree(dma_conf);
3960 return ERR_PTR(ret);
3961 }
3962
3963 /**
3964 * __stmmac_open - open entry point of the driver
3965 * @dev : pointer to the device structure.
3966 * @dma_conf : structure to take the dma data
3967 * Description:
3968 * This function is the open entry point of the driver.
3969 * Return value:
3970 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3971 * file on failure.
3972 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3973 static int __stmmac_open(struct net_device *dev,
3974 struct stmmac_dma_conf *dma_conf)
3975 {
3976 struct stmmac_priv *priv = netdev_priv(dev);
3977 int mode = priv->plat->phy_interface;
3978 u32 chan;
3979 int ret;
3980
3981 /* Initialise the tx lpi timer, converting from msec to usec */
3982 if (!priv->tx_lpi_timer)
3983 priv->tx_lpi_timer = eee_timer * 1000;
3984
3985 ret = pm_runtime_resume_and_get(priv->device);
3986 if (ret < 0)
3987 return ret;
3988
3989 if ((!priv->hw->xpcs ||
3990 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3991 ret = stmmac_init_phy(dev);
3992 if (ret) {
3993 netdev_err(priv->dev,
3994 "%s: Cannot attach to PHY (error: %d)\n",
3995 __func__, ret);
3996 goto init_phy_error;
3997 }
3998 }
3999
4000 buf_sz = dma_conf->dma_buf_sz;
4001 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4002 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4003 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4004 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4005
4006 stmmac_reset_queues_param(priv);
4007
4008 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4009 priv->plat->serdes_powerup) {
4010 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4011 if (ret < 0) {
4012 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4013 __func__);
4014 goto init_error;
4015 }
4016 }
4017
4018 ret = stmmac_hw_setup(dev, true);
4019 if (ret < 0) {
4020 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4021 goto init_error;
4022 }
4023
4024 stmmac_init_coalesce(priv);
4025
4026 phylink_start(priv->phylink);
4027 /* We may have called phylink_speed_down before */
4028 phylink_speed_up(priv->phylink);
4029
4030 ret = stmmac_request_irq(dev);
4031 if (ret)
4032 goto irq_error;
4033
4034 stmmac_enable_all_queues(priv);
4035 netif_tx_start_all_queues(priv->dev);
4036 stmmac_enable_all_dma_irq(priv);
4037
4038 return 0;
4039
4040 irq_error:
4041 phylink_stop(priv->phylink);
4042
4043 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4044 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4045
4046 stmmac_hw_teardown(dev);
4047 init_error:
4048 phylink_disconnect_phy(priv->phylink);
4049 init_phy_error:
4050 pm_runtime_put(priv->device);
4051 return ret;
4052 }
4053
stmmac_open(struct net_device * dev)4054 static int stmmac_open(struct net_device *dev)
4055 {
4056 struct stmmac_priv *priv = netdev_priv(dev);
4057 struct stmmac_dma_conf *dma_conf;
4058 int ret;
4059
4060 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4061 if (IS_ERR(dma_conf))
4062 return PTR_ERR(dma_conf);
4063
4064 ret = __stmmac_open(dev, dma_conf);
4065 if (ret)
4066 free_dma_desc_resources(priv, dma_conf);
4067
4068 kfree(dma_conf);
4069 return ret;
4070 }
4071
4072 /**
4073 * stmmac_release - close entry point of the driver
4074 * @dev : device pointer.
4075 * Description:
4076 * This is the stop entry point of the driver.
4077 */
stmmac_release(struct net_device * dev)4078 static int stmmac_release(struct net_device *dev)
4079 {
4080 struct stmmac_priv *priv = netdev_priv(dev);
4081 u32 chan;
4082
4083 if (device_may_wakeup(priv->device))
4084 phylink_speed_down(priv->phylink, false);
4085 /* Stop and disconnect the PHY */
4086 phylink_stop(priv->phylink);
4087 phylink_disconnect_phy(priv->phylink);
4088
4089 stmmac_disable_all_queues(priv);
4090
4091 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4092 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4093
4094 netif_tx_disable(dev);
4095
4096 /* Free the IRQ lines */
4097 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4098
4099 /* Stop TX/RX DMA and clear the descriptors */
4100 stmmac_stop_all_dma(priv);
4101
4102 /* Release and free the Rx/Tx resources */
4103 free_dma_desc_resources(priv, &priv->dma_conf);
4104
4105 /* Disable the MAC Rx/Tx */
4106 stmmac_mac_set(priv, priv->ioaddr, false);
4107
4108 /* Powerdown Serdes if there is */
4109 if (priv->plat->serdes_powerdown)
4110 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4111
4112 stmmac_release_ptp(priv);
4113
4114 if (stmmac_fpe_supported(priv))
4115 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4116
4117 pm_runtime_put(priv->device);
4118
4119 return 0;
4120 }
4121
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4122 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4123 struct stmmac_tx_queue *tx_q)
4124 {
4125 u16 tag = 0x0, inner_tag = 0x0;
4126 u32 inner_type = 0x0;
4127 struct dma_desc *p;
4128
4129 if (!priv->dma_cap.vlins)
4130 return false;
4131 if (!skb_vlan_tag_present(skb))
4132 return false;
4133 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4134 inner_tag = skb_vlan_tag_get(skb);
4135 inner_type = STMMAC_VLAN_INSERT;
4136 }
4137
4138 tag = skb_vlan_tag_get(skb);
4139
4140 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4141 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4142 else
4143 p = &tx_q->dma_tx[tx_q->cur_tx];
4144
4145 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4146 return false;
4147
4148 stmmac_set_tx_owner(priv, p);
4149 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4150 return true;
4151 }
4152
4153 /**
4154 * stmmac_tso_allocator - close entry point of the driver
4155 * @priv: driver private structure
4156 * @des: buffer start address
4157 * @total_len: total length to fill in descriptors
4158 * @last_segment: condition for the last descriptor
4159 * @queue: TX queue index
4160 * Description:
4161 * This function fills descriptor and request new descriptors according to
4162 * buffer length to fill
4163 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4164 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4165 int total_len, bool last_segment, u32 queue)
4166 {
4167 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4168 struct dma_desc *desc;
4169 u32 buff_size;
4170 int tmp_len;
4171
4172 tmp_len = total_len;
4173
4174 while (tmp_len > 0) {
4175 dma_addr_t curr_addr;
4176
4177 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4178 priv->dma_conf.dma_tx_size);
4179 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4180
4181 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4182 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4183 else
4184 desc = &tx_q->dma_tx[tx_q->cur_tx];
4185
4186 curr_addr = des + (total_len - tmp_len);
4187 stmmac_set_desc_addr(priv, desc, curr_addr);
4188 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4189 TSO_MAX_BUFF_SIZE : tmp_len;
4190
4191 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4192 0, 1,
4193 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4194 0, 0);
4195
4196 tmp_len -= TSO_MAX_BUFF_SIZE;
4197 }
4198 }
4199
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4200 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4201 {
4202 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4203 int desc_size;
4204
4205 if (likely(priv->extend_desc))
4206 desc_size = sizeof(struct dma_extended_desc);
4207 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4208 desc_size = sizeof(struct dma_edesc);
4209 else
4210 desc_size = sizeof(struct dma_desc);
4211
4212 /* The own bit must be the latest setting done when prepare the
4213 * descriptor and then barrier is needed to make sure that
4214 * all is coherent before granting the DMA engine.
4215 */
4216 wmb();
4217
4218 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4219 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4220 }
4221
4222 /**
4223 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4224 * @skb : the socket buffer
4225 * @dev : device pointer
4226 * Description: this is the transmit function that is called on TSO frames
4227 * (support available on GMAC4 and newer chips).
4228 * Diagram below show the ring programming in case of TSO frames:
4229 *
4230 * First Descriptor
4231 * --------
4232 * | DES0 |---> buffer1 = L2/L3/L4 header
4233 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4234 * | | width is 32-bit, but we never use it.
4235 * | | Also can be used as the most-significant 8-bits or 16-bits of
4236 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4237 * | | or 48-bit, and we always use it.
4238 * | DES2 |---> buffer1 len
4239 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4240 * --------
4241 * --------
4242 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4243 * | DES1 |---> same as the First Descriptor
4244 * | DES2 |---> buffer1 len
4245 * | DES3 |
4246 * --------
4247 * |
4248 * ...
4249 * |
4250 * --------
4251 * | DES0 |---> buffer1 = Split TCP Payload
4252 * | DES1 |---> same as the First Descriptor
4253 * | DES2 |---> buffer1 len
4254 * | DES3 |
4255 * --------
4256 *
4257 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4258 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4259 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4260 {
4261 struct dma_desc *desc, *first, *mss_desc = NULL;
4262 struct stmmac_priv *priv = netdev_priv(dev);
4263 unsigned int first_entry, tx_packets;
4264 struct stmmac_txq_stats *txq_stats;
4265 struct stmmac_tx_queue *tx_q;
4266 u32 pay_len, mss, queue;
4267 int i, first_tx, nfrags;
4268 u8 proto_hdr_len, hdr;
4269 dma_addr_t des;
4270 bool set_ic;
4271
4272 /* Always insert VLAN tag to SKB payload for TSO frames.
4273 *
4274 * Never insert VLAN tag by HW, since segments splited by
4275 * TSO engine will be un-tagged by mistake.
4276 */
4277 if (skb_vlan_tag_present(skb)) {
4278 skb = __vlan_hwaccel_push_inside(skb);
4279 if (unlikely(!skb)) {
4280 priv->xstats.tx_dropped++;
4281 return NETDEV_TX_OK;
4282 }
4283 }
4284
4285 nfrags = skb_shinfo(skb)->nr_frags;
4286 queue = skb_get_queue_mapping(skb);
4287
4288 tx_q = &priv->dma_conf.tx_queue[queue];
4289 txq_stats = &priv->xstats.txq_stats[queue];
4290 first_tx = tx_q->cur_tx;
4291
4292 /* Compute header lengths */
4293 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4294 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4295 hdr = sizeof(struct udphdr);
4296 } else {
4297 proto_hdr_len = skb_tcp_all_headers(skb);
4298 hdr = tcp_hdrlen(skb);
4299 }
4300
4301 /* Desc availability based on threshold should be enough safe */
4302 if (unlikely(stmmac_tx_avail(priv, queue) <
4303 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4304 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4305 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4306 queue));
4307 /* This is a hard error, log it. */
4308 netdev_err(priv->dev,
4309 "%s: Tx Ring full when queue awake\n",
4310 __func__);
4311 }
4312 return NETDEV_TX_BUSY;
4313 }
4314
4315 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4316
4317 mss = skb_shinfo(skb)->gso_size;
4318
4319 /* set new MSS value if needed */
4320 if (mss != tx_q->mss) {
4321 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4322 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4323 else
4324 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4325
4326 stmmac_set_mss(priv, mss_desc, mss);
4327 tx_q->mss = mss;
4328 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4329 priv->dma_conf.dma_tx_size);
4330 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4331 }
4332
4333 if (netif_msg_tx_queued(priv)) {
4334 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4335 __func__, hdr, proto_hdr_len, pay_len, mss);
4336 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4337 skb->data_len);
4338 }
4339
4340 first_entry = tx_q->cur_tx;
4341 WARN_ON(tx_q->tx_skbuff[first_entry]);
4342
4343 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4344 desc = &tx_q->dma_entx[first_entry].basic;
4345 else
4346 desc = &tx_q->dma_tx[first_entry];
4347 first = desc;
4348
4349 /* first descriptor: fill Headers on Buf1 */
4350 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4351 DMA_TO_DEVICE);
4352 if (dma_mapping_error(priv->device, des))
4353 goto dma_map_err;
4354
4355 stmmac_set_desc_addr(priv, first, des);
4356 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4357 (nfrags == 0), queue);
4358
4359 /* In case two or more DMA transmit descriptors are allocated for this
4360 * non-paged SKB data, the DMA buffer address should be saved to
4361 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4362 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4363 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4364 * since the tail areas of the DMA buffer can be accessed by DMA engine
4365 * sooner or later.
4366 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4367 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4368 * this DMA buffer right after the DMA engine completely finishes the
4369 * full buffer transmission.
4370 */
4371 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4372 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4373 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4374 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4375
4376 /* Prepare fragments */
4377 for (i = 0; i < nfrags; i++) {
4378 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4379
4380 des = skb_frag_dma_map(priv->device, frag, 0,
4381 skb_frag_size(frag),
4382 DMA_TO_DEVICE);
4383 if (dma_mapping_error(priv->device, des))
4384 goto dma_map_err;
4385
4386 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4387 (i == nfrags - 1), queue);
4388
4389 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4390 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4391 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4392 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4393 }
4394
4395 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4396
4397 /* Only the last descriptor gets to point to the skb. */
4398 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4399 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4400
4401 /* Manage tx mitigation */
4402 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4403 tx_q->tx_count_frames += tx_packets;
4404
4405 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4406 set_ic = true;
4407 else if (!priv->tx_coal_frames[queue])
4408 set_ic = false;
4409 else if (tx_packets > priv->tx_coal_frames[queue])
4410 set_ic = true;
4411 else if ((tx_q->tx_count_frames %
4412 priv->tx_coal_frames[queue]) < tx_packets)
4413 set_ic = true;
4414 else
4415 set_ic = false;
4416
4417 if (set_ic) {
4418 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4419 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4420 else
4421 desc = &tx_q->dma_tx[tx_q->cur_tx];
4422
4423 tx_q->tx_count_frames = 0;
4424 stmmac_set_tx_ic(priv, desc);
4425 }
4426
4427 /* We've used all descriptors we need for this skb, however,
4428 * advance cur_tx so that it references a fresh descriptor.
4429 * ndo_start_xmit will fill this descriptor the next time it's
4430 * called and stmmac_tx_clean may clean up to this descriptor.
4431 */
4432 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4433
4434 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4435 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4436 __func__);
4437 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4438 }
4439
4440 u64_stats_update_begin(&txq_stats->q_syncp);
4441 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4442 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4443 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4444 if (set_ic)
4445 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4446 u64_stats_update_end(&txq_stats->q_syncp);
4447
4448 if (priv->sarc_type)
4449 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4450
4451 skb_tx_timestamp(skb);
4452
4453 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4454 priv->hwts_tx_en)) {
4455 /* declare that device is doing timestamping */
4456 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4457 stmmac_enable_tx_timestamp(priv, first);
4458 }
4459
4460 /* Complete the first descriptor before granting the DMA */
4461 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4462 tx_q->tx_skbuff_dma[first_entry].last_segment,
4463 hdr / 4, (skb->len - proto_hdr_len));
4464
4465 /* If context desc is used to change MSS */
4466 if (mss_desc) {
4467 /* Make sure that first descriptor has been completely
4468 * written, including its own bit. This is because MSS is
4469 * actually before first descriptor, so we need to make
4470 * sure that MSS's own bit is the last thing written.
4471 */
4472 dma_wmb();
4473 stmmac_set_tx_owner(priv, mss_desc);
4474 }
4475
4476 if (netif_msg_pktdata(priv)) {
4477 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4478 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4479 tx_q->cur_tx, first, nfrags);
4480 pr_info(">>> frame to be transmitted: ");
4481 print_pkt(skb->data, skb_headlen(skb));
4482 }
4483
4484 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4485
4486 stmmac_flush_tx_descriptors(priv, queue);
4487 stmmac_tx_timer_arm(priv, queue);
4488
4489 return NETDEV_TX_OK;
4490
4491 dma_map_err:
4492 dev_err(priv->device, "Tx dma map failed\n");
4493 dev_kfree_skb(skb);
4494 priv->xstats.tx_dropped++;
4495 return NETDEV_TX_OK;
4496 }
4497
4498 /**
4499 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4500 * @skb: socket buffer to check
4501 *
4502 * Check if a packet has an ethertype that will trigger the IP header checks
4503 * and IP/TCP checksum engine of the stmmac core.
4504 *
4505 * Return: true if the ethertype can trigger the checksum engine, false
4506 * otherwise
4507 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4508 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4509 {
4510 int depth = 0;
4511 __be16 proto;
4512
4513 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4514 &depth);
4515
4516 return (depth <= ETH_HLEN) &&
4517 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4518 }
4519
4520 /**
4521 * stmmac_xmit - Tx entry point of the driver
4522 * @skb : the socket buffer
4523 * @dev : device pointer
4524 * Description : this is the tx entry point of the driver.
4525 * It programs the chain or the ring and supports oversized frames
4526 * and SG feature.
4527 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4528 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4529 {
4530 unsigned int first_entry, tx_packets, enh_desc;
4531 struct stmmac_priv *priv = netdev_priv(dev);
4532 unsigned int nopaged_len = skb_headlen(skb);
4533 int i, csum_insertion = 0, is_jumbo = 0;
4534 u32 queue = skb_get_queue_mapping(skb);
4535 int nfrags = skb_shinfo(skb)->nr_frags;
4536 int gso = skb_shinfo(skb)->gso_type;
4537 struct stmmac_txq_stats *txq_stats;
4538 struct dma_edesc *tbs_desc = NULL;
4539 struct dma_desc *desc, *first;
4540 struct stmmac_tx_queue *tx_q;
4541 bool has_vlan, set_ic;
4542 int entry, first_tx;
4543 dma_addr_t des;
4544
4545 tx_q = &priv->dma_conf.tx_queue[queue];
4546 txq_stats = &priv->xstats.txq_stats[queue];
4547 first_tx = tx_q->cur_tx;
4548
4549 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4550 stmmac_stop_sw_lpi(priv);
4551
4552 /* Manage oversized TCP frames for GMAC4 device */
4553 if (skb_is_gso(skb) && priv->tso) {
4554 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4555 return stmmac_tso_xmit(skb, dev);
4556 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4557 return stmmac_tso_xmit(skb, dev);
4558 }
4559
4560 if (priv->est && priv->est->enable &&
4561 priv->est->max_sdu[queue] &&
4562 skb->len > priv->est->max_sdu[queue]){
4563 priv->xstats.max_sdu_txq_drop[queue]++;
4564 goto max_sdu_err;
4565 }
4566
4567 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4568 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4569 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4570 queue));
4571 /* This is a hard error, log it. */
4572 netdev_err(priv->dev,
4573 "%s: Tx Ring full when queue awake\n",
4574 __func__);
4575 }
4576 return NETDEV_TX_BUSY;
4577 }
4578
4579 /* Check if VLAN can be inserted by HW */
4580 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4581
4582 entry = tx_q->cur_tx;
4583 first_entry = entry;
4584 WARN_ON(tx_q->tx_skbuff[first_entry]);
4585
4586 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4587 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4588 * queues. In that case, checksum offloading for those queues that don't
4589 * support tx coe needs to fallback to software checksum calculation.
4590 *
4591 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4592 * also have to be checksummed in software.
4593 */
4594 if (csum_insertion &&
4595 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4596 !stmmac_has_ip_ethertype(skb))) {
4597 if (unlikely(skb_checksum_help(skb)))
4598 goto dma_map_err;
4599 csum_insertion = !csum_insertion;
4600 }
4601
4602 if (likely(priv->extend_desc))
4603 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4604 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4605 desc = &tx_q->dma_entx[entry].basic;
4606 else
4607 desc = tx_q->dma_tx + entry;
4608
4609 first = desc;
4610
4611 if (has_vlan)
4612 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4613
4614 enh_desc = priv->plat->enh_desc;
4615 /* To program the descriptors according to the size of the frame */
4616 if (enh_desc)
4617 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4618
4619 if (unlikely(is_jumbo)) {
4620 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4621 if (unlikely(entry < 0) && (entry != -EINVAL))
4622 goto dma_map_err;
4623 }
4624
4625 for (i = 0; i < nfrags; i++) {
4626 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4627 int len = skb_frag_size(frag);
4628 bool last_segment = (i == (nfrags - 1));
4629
4630 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4631 WARN_ON(tx_q->tx_skbuff[entry]);
4632
4633 if (likely(priv->extend_desc))
4634 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4635 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4636 desc = &tx_q->dma_entx[entry].basic;
4637 else
4638 desc = tx_q->dma_tx + entry;
4639
4640 des = skb_frag_dma_map(priv->device, frag, 0, len,
4641 DMA_TO_DEVICE);
4642 if (dma_mapping_error(priv->device, des))
4643 goto dma_map_err; /* should reuse desc w/o issues */
4644
4645 tx_q->tx_skbuff_dma[entry].buf = des;
4646
4647 stmmac_set_desc_addr(priv, desc, des);
4648
4649 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4650 tx_q->tx_skbuff_dma[entry].len = len;
4651 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4652 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4653
4654 /* Prepare the descriptor and set the own bit too */
4655 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4656 priv->mode, 1, last_segment, skb->len);
4657 }
4658
4659 /* Only the last descriptor gets to point to the skb. */
4660 tx_q->tx_skbuff[entry] = skb;
4661 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4662
4663 /* According to the coalesce parameter the IC bit for the latest
4664 * segment is reset and the timer re-started to clean the tx status.
4665 * This approach takes care about the fragments: desc is the first
4666 * element in case of no SG.
4667 */
4668 tx_packets = (entry + 1) - first_tx;
4669 tx_q->tx_count_frames += tx_packets;
4670
4671 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4672 set_ic = true;
4673 else if (!priv->tx_coal_frames[queue])
4674 set_ic = false;
4675 else if (tx_packets > priv->tx_coal_frames[queue])
4676 set_ic = true;
4677 else if ((tx_q->tx_count_frames %
4678 priv->tx_coal_frames[queue]) < tx_packets)
4679 set_ic = true;
4680 else
4681 set_ic = false;
4682
4683 if (set_ic) {
4684 if (likely(priv->extend_desc))
4685 desc = &tx_q->dma_etx[entry].basic;
4686 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4687 desc = &tx_q->dma_entx[entry].basic;
4688 else
4689 desc = &tx_q->dma_tx[entry];
4690
4691 tx_q->tx_count_frames = 0;
4692 stmmac_set_tx_ic(priv, desc);
4693 }
4694
4695 /* We've used all descriptors we need for this skb, however,
4696 * advance cur_tx so that it references a fresh descriptor.
4697 * ndo_start_xmit will fill this descriptor the next time it's
4698 * called and stmmac_tx_clean may clean up to this descriptor.
4699 */
4700 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4701 tx_q->cur_tx = entry;
4702
4703 if (netif_msg_pktdata(priv)) {
4704 netdev_dbg(priv->dev,
4705 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4706 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4707 entry, first, nfrags);
4708
4709 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4710 print_pkt(skb->data, skb->len);
4711 }
4712
4713 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4714 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4715 __func__);
4716 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4717 }
4718
4719 u64_stats_update_begin(&txq_stats->q_syncp);
4720 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4721 if (set_ic)
4722 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4723 u64_stats_update_end(&txq_stats->q_syncp);
4724
4725 if (priv->sarc_type)
4726 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4727
4728 skb_tx_timestamp(skb);
4729
4730 /* Ready to fill the first descriptor and set the OWN bit w/o any
4731 * problems because all the descriptors are actually ready to be
4732 * passed to the DMA engine.
4733 */
4734 if (likely(!is_jumbo)) {
4735 bool last_segment = (nfrags == 0);
4736
4737 des = dma_map_single(priv->device, skb->data,
4738 nopaged_len, DMA_TO_DEVICE);
4739 if (dma_mapping_error(priv->device, des))
4740 goto dma_map_err;
4741
4742 tx_q->tx_skbuff_dma[first_entry].buf = des;
4743 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4744 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4745
4746 stmmac_set_desc_addr(priv, first, des);
4747
4748 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4749 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4750
4751 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4752 priv->hwts_tx_en)) {
4753 /* declare that device is doing timestamping */
4754 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4755 stmmac_enable_tx_timestamp(priv, first);
4756 }
4757
4758 /* Prepare the first descriptor setting the OWN bit too */
4759 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4760 csum_insertion, priv->mode, 0, last_segment,
4761 skb->len);
4762 }
4763
4764 if (tx_q->tbs & STMMAC_TBS_EN) {
4765 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4766
4767 tbs_desc = &tx_q->dma_entx[first_entry];
4768 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4769 }
4770
4771 stmmac_set_tx_owner(priv, first);
4772
4773 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4774
4775 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4776
4777 stmmac_flush_tx_descriptors(priv, queue);
4778 stmmac_tx_timer_arm(priv, queue);
4779
4780 return NETDEV_TX_OK;
4781
4782 dma_map_err:
4783 netdev_err(priv->dev, "Tx DMA map failed\n");
4784 max_sdu_err:
4785 dev_kfree_skb(skb);
4786 priv->xstats.tx_dropped++;
4787 return NETDEV_TX_OK;
4788 }
4789
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4790 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4791 {
4792 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4793 __be16 vlan_proto = veth->h_vlan_proto;
4794 u16 vlanid;
4795
4796 if ((vlan_proto == htons(ETH_P_8021Q) &&
4797 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4798 (vlan_proto == htons(ETH_P_8021AD) &&
4799 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4800 /* pop the vlan tag */
4801 vlanid = ntohs(veth->h_vlan_TCI);
4802 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4803 skb_pull(skb, VLAN_HLEN);
4804 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4805 }
4806 }
4807
4808 /**
4809 * stmmac_rx_refill - refill used skb preallocated buffers
4810 * @priv: driver private structure
4811 * @queue: RX queue index
4812 * Description : this is to reallocate the skb for the reception process
4813 * that is based on zero-copy.
4814 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4815 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4816 {
4817 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4818 int dirty = stmmac_rx_dirty(priv, queue);
4819 unsigned int entry = rx_q->dirty_rx;
4820 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4821
4822 if (priv->dma_cap.host_dma_width <= 32)
4823 gfp |= GFP_DMA32;
4824
4825 while (dirty-- > 0) {
4826 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4827 struct dma_desc *p;
4828 bool use_rx_wd;
4829
4830 if (priv->extend_desc)
4831 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4832 else
4833 p = rx_q->dma_rx + entry;
4834
4835 if (!buf->page) {
4836 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4837 if (!buf->page)
4838 break;
4839 }
4840
4841 if (priv->sph && !buf->sec_page) {
4842 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4843 if (!buf->sec_page)
4844 break;
4845
4846 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4847 }
4848
4849 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4850
4851 stmmac_set_desc_addr(priv, p, buf->addr);
4852 if (priv->sph)
4853 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4854 else
4855 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4856 stmmac_refill_desc3(priv, rx_q, p);
4857
4858 rx_q->rx_count_frames++;
4859 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4860 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4861 rx_q->rx_count_frames = 0;
4862
4863 use_rx_wd = !priv->rx_coal_frames[queue];
4864 use_rx_wd |= rx_q->rx_count_frames > 0;
4865 if (!priv->use_riwt)
4866 use_rx_wd = false;
4867
4868 dma_wmb();
4869 stmmac_set_rx_owner(priv, p, use_rx_wd);
4870
4871 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4872 }
4873 rx_q->dirty_rx = entry;
4874 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4875 (rx_q->dirty_rx * sizeof(struct dma_desc));
4876 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4877 }
4878
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4879 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4880 struct dma_desc *p,
4881 int status, unsigned int len)
4882 {
4883 unsigned int plen = 0, hlen = 0;
4884 int coe = priv->hw->rx_csum;
4885
4886 /* Not first descriptor, buffer is always zero */
4887 if (priv->sph && len)
4888 return 0;
4889
4890 /* First descriptor, get split header length */
4891 stmmac_get_rx_header_len(priv, p, &hlen);
4892 if (priv->sph && hlen) {
4893 priv->xstats.rx_split_hdr_pkt_n++;
4894 return hlen;
4895 }
4896
4897 /* First descriptor, not last descriptor and not split header */
4898 if (status & rx_not_ls)
4899 return priv->dma_conf.dma_buf_sz;
4900
4901 plen = stmmac_get_rx_frame_len(priv, p, coe);
4902
4903 /* First descriptor and last descriptor and not split header */
4904 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4905 }
4906
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4907 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4908 struct dma_desc *p,
4909 int status, unsigned int len)
4910 {
4911 int coe = priv->hw->rx_csum;
4912 unsigned int plen = 0;
4913
4914 /* Not split header, buffer is not available */
4915 if (!priv->sph)
4916 return 0;
4917
4918 /* Not last descriptor */
4919 if (status & rx_not_ls)
4920 return priv->dma_conf.dma_buf_sz;
4921
4922 plen = stmmac_get_rx_frame_len(priv, p, coe);
4923
4924 /* Last descriptor */
4925 return plen - len;
4926 }
4927
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4928 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4929 struct xdp_frame *xdpf, bool dma_map)
4930 {
4931 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4932 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4933 unsigned int entry = tx_q->cur_tx;
4934 struct dma_desc *tx_desc;
4935 dma_addr_t dma_addr;
4936 bool set_ic;
4937
4938 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4939 return STMMAC_XDP_CONSUMED;
4940
4941 if (priv->est && priv->est->enable &&
4942 priv->est->max_sdu[queue] &&
4943 xdpf->len > priv->est->max_sdu[queue]) {
4944 priv->xstats.max_sdu_txq_drop[queue]++;
4945 return STMMAC_XDP_CONSUMED;
4946 }
4947
4948 if (likely(priv->extend_desc))
4949 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4950 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4951 tx_desc = &tx_q->dma_entx[entry].basic;
4952 else
4953 tx_desc = tx_q->dma_tx + entry;
4954
4955 if (dma_map) {
4956 dma_addr = dma_map_single(priv->device, xdpf->data,
4957 xdpf->len, DMA_TO_DEVICE);
4958 if (dma_mapping_error(priv->device, dma_addr))
4959 return STMMAC_XDP_CONSUMED;
4960
4961 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4962 } else {
4963 struct page *page = virt_to_page(xdpf->data);
4964
4965 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4966 xdpf->headroom;
4967 dma_sync_single_for_device(priv->device, dma_addr,
4968 xdpf->len, DMA_BIDIRECTIONAL);
4969
4970 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4971 }
4972
4973 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4974 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4975 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4976 tx_q->tx_skbuff_dma[entry].last_segment = true;
4977 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4978
4979 tx_q->xdpf[entry] = xdpf;
4980
4981 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4982
4983 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4984 true, priv->mode, true, true,
4985 xdpf->len);
4986
4987 tx_q->tx_count_frames++;
4988
4989 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4990 set_ic = true;
4991 else
4992 set_ic = false;
4993
4994 if (set_ic) {
4995 tx_q->tx_count_frames = 0;
4996 stmmac_set_tx_ic(priv, tx_desc);
4997 u64_stats_update_begin(&txq_stats->q_syncp);
4998 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4999 u64_stats_update_end(&txq_stats->q_syncp);
5000 }
5001
5002 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5003
5004 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5005 tx_q->cur_tx = entry;
5006
5007 return STMMAC_XDP_TX;
5008 }
5009
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5010 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5011 int cpu)
5012 {
5013 int index = cpu;
5014
5015 if (unlikely(index < 0))
5016 index = 0;
5017
5018 while (index >= priv->plat->tx_queues_to_use)
5019 index -= priv->plat->tx_queues_to_use;
5020
5021 return index;
5022 }
5023
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5024 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5025 struct xdp_buff *xdp)
5026 {
5027 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5028 int cpu = smp_processor_id();
5029 struct netdev_queue *nq;
5030 int queue;
5031 int res;
5032
5033 if (unlikely(!xdpf))
5034 return STMMAC_XDP_CONSUMED;
5035
5036 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5037 nq = netdev_get_tx_queue(priv->dev, queue);
5038
5039 __netif_tx_lock(nq, cpu);
5040 /* Avoids TX time-out as we are sharing with slow path */
5041 txq_trans_cond_update(nq);
5042
5043 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5044 if (res == STMMAC_XDP_TX)
5045 stmmac_flush_tx_descriptors(priv, queue);
5046
5047 __netif_tx_unlock(nq);
5048
5049 return res;
5050 }
5051
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5052 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5053 struct bpf_prog *prog,
5054 struct xdp_buff *xdp)
5055 {
5056 u32 act;
5057 int res;
5058
5059 act = bpf_prog_run_xdp(prog, xdp);
5060 switch (act) {
5061 case XDP_PASS:
5062 res = STMMAC_XDP_PASS;
5063 break;
5064 case XDP_TX:
5065 res = stmmac_xdp_xmit_back(priv, xdp);
5066 break;
5067 case XDP_REDIRECT:
5068 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5069 res = STMMAC_XDP_CONSUMED;
5070 else
5071 res = STMMAC_XDP_REDIRECT;
5072 break;
5073 default:
5074 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5075 fallthrough;
5076 case XDP_ABORTED:
5077 trace_xdp_exception(priv->dev, prog, act);
5078 fallthrough;
5079 case XDP_DROP:
5080 res = STMMAC_XDP_CONSUMED;
5081 break;
5082 }
5083
5084 return res;
5085 }
5086
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5087 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5088 struct xdp_buff *xdp)
5089 {
5090 struct bpf_prog *prog;
5091 int res;
5092
5093 prog = READ_ONCE(priv->xdp_prog);
5094 if (!prog) {
5095 res = STMMAC_XDP_PASS;
5096 goto out;
5097 }
5098
5099 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5100 out:
5101 return ERR_PTR(-res);
5102 }
5103
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5104 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5105 int xdp_status)
5106 {
5107 int cpu = smp_processor_id();
5108 int queue;
5109
5110 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5111
5112 if (xdp_status & STMMAC_XDP_TX)
5113 stmmac_tx_timer_arm(priv, queue);
5114
5115 if (xdp_status & STMMAC_XDP_REDIRECT)
5116 xdp_do_flush();
5117 }
5118
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5119 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5120 struct xdp_buff *xdp)
5121 {
5122 unsigned int metasize = xdp->data - xdp->data_meta;
5123 unsigned int datasize = xdp->data_end - xdp->data;
5124 struct sk_buff *skb;
5125
5126 skb = napi_alloc_skb(&ch->rxtx_napi,
5127 xdp->data_end - xdp->data_hard_start);
5128 if (unlikely(!skb))
5129 return NULL;
5130
5131 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5132 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5133 if (metasize)
5134 skb_metadata_set(skb, metasize);
5135
5136 return skb;
5137 }
5138
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5139 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5140 struct dma_desc *p, struct dma_desc *np,
5141 struct xdp_buff *xdp)
5142 {
5143 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5144 struct stmmac_channel *ch = &priv->channel[queue];
5145 unsigned int len = xdp->data_end - xdp->data;
5146 enum pkt_hash_types hash_type;
5147 int coe = priv->hw->rx_csum;
5148 struct sk_buff *skb;
5149 u32 hash;
5150
5151 skb = stmmac_construct_skb_zc(ch, xdp);
5152 if (!skb) {
5153 priv->xstats.rx_dropped++;
5154 return;
5155 }
5156
5157 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5158 if (priv->hw->hw_vlan_en)
5159 /* MAC level stripping. */
5160 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5161 else
5162 /* Driver level stripping. */
5163 stmmac_rx_vlan(priv->dev, skb);
5164 skb->protocol = eth_type_trans(skb, priv->dev);
5165
5166 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5167 skb_checksum_none_assert(skb);
5168 else
5169 skb->ip_summed = CHECKSUM_UNNECESSARY;
5170
5171 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5172 skb_set_hash(skb, hash, hash_type);
5173
5174 skb_record_rx_queue(skb, queue);
5175 napi_gro_receive(&ch->rxtx_napi, skb);
5176
5177 u64_stats_update_begin(&rxq_stats->napi_syncp);
5178 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5179 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5180 u64_stats_update_end(&rxq_stats->napi_syncp);
5181 }
5182
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5183 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5184 {
5185 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5186 unsigned int entry = rx_q->dirty_rx;
5187 struct dma_desc *rx_desc = NULL;
5188 bool ret = true;
5189
5190 budget = min(budget, stmmac_rx_dirty(priv, queue));
5191
5192 while (budget-- > 0 && entry != rx_q->cur_rx) {
5193 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5194 dma_addr_t dma_addr;
5195 bool use_rx_wd;
5196
5197 if (!buf->xdp) {
5198 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5199 if (!buf->xdp) {
5200 ret = false;
5201 break;
5202 }
5203 }
5204
5205 if (priv->extend_desc)
5206 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5207 else
5208 rx_desc = rx_q->dma_rx + entry;
5209
5210 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5211 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5212 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5213 stmmac_refill_desc3(priv, rx_q, rx_desc);
5214
5215 rx_q->rx_count_frames++;
5216 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5217 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5218 rx_q->rx_count_frames = 0;
5219
5220 use_rx_wd = !priv->rx_coal_frames[queue];
5221 use_rx_wd |= rx_q->rx_count_frames > 0;
5222 if (!priv->use_riwt)
5223 use_rx_wd = false;
5224
5225 dma_wmb();
5226 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5227
5228 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5229 }
5230
5231 if (rx_desc) {
5232 rx_q->dirty_rx = entry;
5233 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5234 (rx_q->dirty_rx * sizeof(struct dma_desc));
5235 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5236 }
5237
5238 return ret;
5239 }
5240
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5241 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5242 {
5243 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5244 * to represent incoming packet, whereas cb field in the same structure
5245 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5246 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5247 */
5248 return (struct stmmac_xdp_buff *)xdp;
5249 }
5250
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5251 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5252 {
5253 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5254 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5255 unsigned int count = 0, error = 0, len = 0;
5256 int dirty = stmmac_rx_dirty(priv, queue);
5257 unsigned int next_entry = rx_q->cur_rx;
5258 u32 rx_errors = 0, rx_dropped = 0;
5259 unsigned int desc_size;
5260 struct bpf_prog *prog;
5261 bool failure = false;
5262 int xdp_status = 0;
5263 int status = 0;
5264
5265 if (netif_msg_rx_status(priv)) {
5266 void *rx_head;
5267
5268 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5269 if (priv->extend_desc) {
5270 rx_head = (void *)rx_q->dma_erx;
5271 desc_size = sizeof(struct dma_extended_desc);
5272 } else {
5273 rx_head = (void *)rx_q->dma_rx;
5274 desc_size = sizeof(struct dma_desc);
5275 }
5276
5277 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5278 rx_q->dma_rx_phy, desc_size);
5279 }
5280 while (count < limit) {
5281 struct stmmac_rx_buffer *buf;
5282 struct stmmac_xdp_buff *ctx;
5283 unsigned int buf1_len = 0;
5284 struct dma_desc *np, *p;
5285 int entry;
5286 int res;
5287
5288 if (!count && rx_q->state_saved) {
5289 error = rx_q->state.error;
5290 len = rx_q->state.len;
5291 } else {
5292 rx_q->state_saved = false;
5293 error = 0;
5294 len = 0;
5295 }
5296
5297 if (count >= limit)
5298 break;
5299
5300 read_again:
5301 buf1_len = 0;
5302 entry = next_entry;
5303 buf = &rx_q->buf_pool[entry];
5304
5305 if (dirty >= STMMAC_RX_FILL_BATCH) {
5306 failure = failure ||
5307 !stmmac_rx_refill_zc(priv, queue, dirty);
5308 dirty = 0;
5309 }
5310
5311 if (priv->extend_desc)
5312 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5313 else
5314 p = rx_q->dma_rx + entry;
5315
5316 /* read the status of the incoming frame */
5317 status = stmmac_rx_status(priv, &priv->xstats, p);
5318 /* check if managed by the DMA otherwise go ahead */
5319 if (unlikely(status & dma_own))
5320 break;
5321
5322 /* Prefetch the next RX descriptor */
5323 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5324 priv->dma_conf.dma_rx_size);
5325 next_entry = rx_q->cur_rx;
5326
5327 if (priv->extend_desc)
5328 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5329 else
5330 np = rx_q->dma_rx + next_entry;
5331
5332 prefetch(np);
5333
5334 /* Ensure a valid XSK buffer before proceed */
5335 if (!buf->xdp)
5336 break;
5337
5338 if (priv->extend_desc)
5339 stmmac_rx_extended_status(priv, &priv->xstats,
5340 rx_q->dma_erx + entry);
5341 if (unlikely(status == discard_frame)) {
5342 xsk_buff_free(buf->xdp);
5343 buf->xdp = NULL;
5344 dirty++;
5345 error = 1;
5346 if (!priv->hwts_rx_en)
5347 rx_errors++;
5348 }
5349
5350 if (unlikely(error && (status & rx_not_ls)))
5351 goto read_again;
5352 if (unlikely(error)) {
5353 count++;
5354 continue;
5355 }
5356
5357 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5358 if (likely(status & rx_not_ls)) {
5359 xsk_buff_free(buf->xdp);
5360 buf->xdp = NULL;
5361 dirty++;
5362 count++;
5363 goto read_again;
5364 }
5365
5366 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5367 ctx->priv = priv;
5368 ctx->desc = p;
5369 ctx->ndesc = np;
5370
5371 /* XDP ZC Frame only support primary buffers for now */
5372 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5373 len += buf1_len;
5374
5375 /* ACS is disabled; strip manually. */
5376 if (likely(!(status & rx_not_ls))) {
5377 buf1_len -= ETH_FCS_LEN;
5378 len -= ETH_FCS_LEN;
5379 }
5380
5381 /* RX buffer is good and fit into a XSK pool buffer */
5382 buf->xdp->data_end = buf->xdp->data + buf1_len;
5383 xsk_buff_dma_sync_for_cpu(buf->xdp);
5384
5385 prog = READ_ONCE(priv->xdp_prog);
5386 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5387
5388 switch (res) {
5389 case STMMAC_XDP_PASS:
5390 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5391 xsk_buff_free(buf->xdp);
5392 break;
5393 case STMMAC_XDP_CONSUMED:
5394 xsk_buff_free(buf->xdp);
5395 rx_dropped++;
5396 break;
5397 case STMMAC_XDP_TX:
5398 case STMMAC_XDP_REDIRECT:
5399 xdp_status |= res;
5400 break;
5401 }
5402
5403 buf->xdp = NULL;
5404 dirty++;
5405 count++;
5406 }
5407
5408 if (status & rx_not_ls) {
5409 rx_q->state_saved = true;
5410 rx_q->state.error = error;
5411 rx_q->state.len = len;
5412 }
5413
5414 stmmac_finalize_xdp_rx(priv, xdp_status);
5415
5416 u64_stats_update_begin(&rxq_stats->napi_syncp);
5417 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5418 u64_stats_update_end(&rxq_stats->napi_syncp);
5419
5420 priv->xstats.rx_dropped += rx_dropped;
5421 priv->xstats.rx_errors += rx_errors;
5422
5423 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5424 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5425 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5426 else
5427 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5428
5429 return (int)count;
5430 }
5431
5432 return failure ? limit : (int)count;
5433 }
5434
5435 /**
5436 * stmmac_rx - manage the receive process
5437 * @priv: driver private structure
5438 * @limit: napi bugget
5439 * @queue: RX queue index.
5440 * Description : this the function called by the napi poll method.
5441 * It gets all the frames inside the ring.
5442 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5443 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5444 {
5445 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5446 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5447 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5448 struct stmmac_channel *ch = &priv->channel[queue];
5449 unsigned int count = 0, error = 0, len = 0;
5450 int status = 0, coe = priv->hw->rx_csum;
5451 unsigned int next_entry = rx_q->cur_rx;
5452 enum dma_data_direction dma_dir;
5453 unsigned int desc_size;
5454 struct sk_buff *skb = NULL;
5455 struct stmmac_xdp_buff ctx;
5456 int xdp_status = 0;
5457 int buf_sz;
5458
5459 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5460 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5461 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5462
5463 if (netif_msg_rx_status(priv)) {
5464 void *rx_head;
5465
5466 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5467 if (priv->extend_desc) {
5468 rx_head = (void *)rx_q->dma_erx;
5469 desc_size = sizeof(struct dma_extended_desc);
5470 } else {
5471 rx_head = (void *)rx_q->dma_rx;
5472 desc_size = sizeof(struct dma_desc);
5473 }
5474
5475 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5476 rx_q->dma_rx_phy, desc_size);
5477 }
5478 while (count < limit) {
5479 unsigned int buf1_len = 0, buf2_len = 0;
5480 enum pkt_hash_types hash_type;
5481 struct stmmac_rx_buffer *buf;
5482 struct dma_desc *np, *p;
5483 int entry;
5484 u32 hash;
5485
5486 if (!count && rx_q->state_saved) {
5487 skb = rx_q->state.skb;
5488 error = rx_q->state.error;
5489 len = rx_q->state.len;
5490 } else {
5491 rx_q->state_saved = false;
5492 skb = NULL;
5493 error = 0;
5494 len = 0;
5495 }
5496
5497 read_again:
5498 if (count >= limit)
5499 break;
5500
5501 buf1_len = 0;
5502 buf2_len = 0;
5503 entry = next_entry;
5504 buf = &rx_q->buf_pool[entry];
5505
5506 if (priv->extend_desc)
5507 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5508 else
5509 p = rx_q->dma_rx + entry;
5510
5511 /* read the status of the incoming frame */
5512 status = stmmac_rx_status(priv, &priv->xstats, p);
5513 /* check if managed by the DMA otherwise go ahead */
5514 if (unlikely(status & dma_own))
5515 break;
5516
5517 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5518 priv->dma_conf.dma_rx_size);
5519 next_entry = rx_q->cur_rx;
5520
5521 if (priv->extend_desc)
5522 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5523 else
5524 np = rx_q->dma_rx + next_entry;
5525
5526 prefetch(np);
5527
5528 if (priv->extend_desc)
5529 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5530 if (unlikely(status == discard_frame)) {
5531 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5532 buf->page = NULL;
5533 error = 1;
5534 if (!priv->hwts_rx_en)
5535 rx_errors++;
5536 }
5537
5538 if (unlikely(error && (status & rx_not_ls)))
5539 goto read_again;
5540 if (unlikely(error)) {
5541 dev_kfree_skb(skb);
5542 skb = NULL;
5543 count++;
5544 continue;
5545 }
5546
5547 /* Buffer is good. Go on. */
5548
5549 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5550 len += buf1_len;
5551 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5552 len += buf2_len;
5553
5554 /* ACS is disabled; strip manually. */
5555 if (likely(!(status & rx_not_ls))) {
5556 if (buf2_len) {
5557 buf2_len -= ETH_FCS_LEN;
5558 len -= ETH_FCS_LEN;
5559 } else if (buf1_len) {
5560 buf1_len -= ETH_FCS_LEN;
5561 len -= ETH_FCS_LEN;
5562 }
5563 }
5564
5565 if (!skb) {
5566 unsigned int pre_len, sync_len;
5567
5568 dma_sync_single_for_cpu(priv->device, buf->addr,
5569 buf1_len, dma_dir);
5570 net_prefetch(page_address(buf->page) +
5571 buf->page_offset);
5572
5573 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5574 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5575 buf->page_offset, buf1_len, true);
5576
5577 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5578 buf->page_offset;
5579
5580 ctx.priv = priv;
5581 ctx.desc = p;
5582 ctx.ndesc = np;
5583
5584 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5585 /* Due xdp_adjust_tail: DMA sync for_device
5586 * cover max len CPU touch
5587 */
5588 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5589 buf->page_offset;
5590 sync_len = max(sync_len, pre_len);
5591
5592 /* For Not XDP_PASS verdict */
5593 if (IS_ERR(skb)) {
5594 unsigned int xdp_res = -PTR_ERR(skb);
5595
5596 if (xdp_res & STMMAC_XDP_CONSUMED) {
5597 page_pool_put_page(rx_q->page_pool,
5598 virt_to_head_page(ctx.xdp.data),
5599 sync_len, true);
5600 buf->page = NULL;
5601 rx_dropped++;
5602
5603 /* Clear skb as it was set as
5604 * status by XDP program.
5605 */
5606 skb = NULL;
5607
5608 if (unlikely((status & rx_not_ls)))
5609 goto read_again;
5610
5611 count++;
5612 continue;
5613 } else if (xdp_res & (STMMAC_XDP_TX |
5614 STMMAC_XDP_REDIRECT)) {
5615 xdp_status |= xdp_res;
5616 buf->page = NULL;
5617 skb = NULL;
5618 count++;
5619 continue;
5620 }
5621 }
5622 }
5623
5624 if (!skb) {
5625 unsigned int head_pad_len;
5626
5627 /* XDP program may expand or reduce tail */
5628 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5629
5630 skb = napi_build_skb(page_address(buf->page),
5631 rx_q->napi_skb_frag_size);
5632 if (!skb) {
5633 page_pool_recycle_direct(rx_q->page_pool,
5634 buf->page);
5635 rx_dropped++;
5636 count++;
5637 goto drain_data;
5638 }
5639
5640 /* XDP program may adjust header */
5641 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5642 skb_reserve(skb, head_pad_len);
5643 skb_put(skb, buf1_len);
5644 skb_mark_for_recycle(skb);
5645 buf->page = NULL;
5646 } else if (buf1_len) {
5647 dma_sync_single_for_cpu(priv->device, buf->addr,
5648 buf1_len, dma_dir);
5649 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5650 buf->page, buf->page_offset, buf1_len,
5651 priv->dma_conf.dma_buf_sz);
5652 buf->page = NULL;
5653 }
5654
5655 if (buf2_len) {
5656 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5657 buf2_len, dma_dir);
5658 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5659 buf->sec_page, 0, buf2_len,
5660 priv->dma_conf.dma_buf_sz);
5661 buf->sec_page = NULL;
5662 }
5663
5664 drain_data:
5665 if (likely(status & rx_not_ls))
5666 goto read_again;
5667 if (!skb)
5668 continue;
5669
5670 /* Got entire packet into SKB. Finish it. */
5671
5672 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5673
5674 if (priv->hw->hw_vlan_en)
5675 /* MAC level stripping. */
5676 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5677 else
5678 /* Driver level stripping. */
5679 stmmac_rx_vlan(priv->dev, skb);
5680
5681 skb->protocol = eth_type_trans(skb, priv->dev);
5682
5683 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5684 skb_checksum_none_assert(skb);
5685 else
5686 skb->ip_summed = CHECKSUM_UNNECESSARY;
5687
5688 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5689 skb_set_hash(skb, hash, hash_type);
5690
5691 skb_record_rx_queue(skb, queue);
5692 napi_gro_receive(&ch->rx_napi, skb);
5693 skb = NULL;
5694
5695 rx_packets++;
5696 rx_bytes += len;
5697 count++;
5698 }
5699
5700 if (status & rx_not_ls || skb) {
5701 rx_q->state_saved = true;
5702 rx_q->state.skb = skb;
5703 rx_q->state.error = error;
5704 rx_q->state.len = len;
5705 }
5706
5707 stmmac_finalize_xdp_rx(priv, xdp_status);
5708
5709 stmmac_rx_refill(priv, queue);
5710
5711 u64_stats_update_begin(&rxq_stats->napi_syncp);
5712 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5713 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5714 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5715 u64_stats_update_end(&rxq_stats->napi_syncp);
5716
5717 priv->xstats.rx_dropped += rx_dropped;
5718 priv->xstats.rx_errors += rx_errors;
5719
5720 return count;
5721 }
5722
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5723 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5724 {
5725 struct stmmac_channel *ch =
5726 container_of(napi, struct stmmac_channel, rx_napi);
5727 struct stmmac_priv *priv = ch->priv_data;
5728 struct stmmac_rxq_stats *rxq_stats;
5729 u32 chan = ch->index;
5730 int work_done;
5731
5732 rxq_stats = &priv->xstats.rxq_stats[chan];
5733 u64_stats_update_begin(&rxq_stats->napi_syncp);
5734 u64_stats_inc(&rxq_stats->napi.poll);
5735 u64_stats_update_end(&rxq_stats->napi_syncp);
5736
5737 work_done = stmmac_rx(priv, budget, chan);
5738 if (work_done < budget && napi_complete_done(napi, work_done)) {
5739 unsigned long flags;
5740
5741 spin_lock_irqsave(&ch->lock, flags);
5742 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5743 spin_unlock_irqrestore(&ch->lock, flags);
5744 }
5745
5746 return work_done;
5747 }
5748
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5749 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5750 {
5751 struct stmmac_channel *ch =
5752 container_of(napi, struct stmmac_channel, tx_napi);
5753 struct stmmac_priv *priv = ch->priv_data;
5754 struct stmmac_txq_stats *txq_stats;
5755 bool pending_packets = false;
5756 u32 chan = ch->index;
5757 int work_done;
5758
5759 txq_stats = &priv->xstats.txq_stats[chan];
5760 u64_stats_update_begin(&txq_stats->napi_syncp);
5761 u64_stats_inc(&txq_stats->napi.poll);
5762 u64_stats_update_end(&txq_stats->napi_syncp);
5763
5764 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5765 work_done = min(work_done, budget);
5766
5767 if (work_done < budget && napi_complete_done(napi, work_done)) {
5768 unsigned long flags;
5769
5770 spin_lock_irqsave(&ch->lock, flags);
5771 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5772 spin_unlock_irqrestore(&ch->lock, flags);
5773 }
5774
5775 /* TX still have packet to handle, check if we need to arm tx timer */
5776 if (pending_packets)
5777 stmmac_tx_timer_arm(priv, chan);
5778
5779 return work_done;
5780 }
5781
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5782 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5783 {
5784 struct stmmac_channel *ch =
5785 container_of(napi, struct stmmac_channel, rxtx_napi);
5786 struct stmmac_priv *priv = ch->priv_data;
5787 bool tx_pending_packets = false;
5788 int rx_done, tx_done, rxtx_done;
5789 struct stmmac_rxq_stats *rxq_stats;
5790 struct stmmac_txq_stats *txq_stats;
5791 u32 chan = ch->index;
5792
5793 rxq_stats = &priv->xstats.rxq_stats[chan];
5794 u64_stats_update_begin(&rxq_stats->napi_syncp);
5795 u64_stats_inc(&rxq_stats->napi.poll);
5796 u64_stats_update_end(&rxq_stats->napi_syncp);
5797
5798 txq_stats = &priv->xstats.txq_stats[chan];
5799 u64_stats_update_begin(&txq_stats->napi_syncp);
5800 u64_stats_inc(&txq_stats->napi.poll);
5801 u64_stats_update_end(&txq_stats->napi_syncp);
5802
5803 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5804 tx_done = min(tx_done, budget);
5805
5806 rx_done = stmmac_rx_zc(priv, budget, chan);
5807
5808 rxtx_done = max(tx_done, rx_done);
5809
5810 /* If either TX or RX work is not complete, return budget
5811 * and keep pooling
5812 */
5813 if (rxtx_done >= budget)
5814 return budget;
5815
5816 /* all work done, exit the polling mode */
5817 if (napi_complete_done(napi, rxtx_done)) {
5818 unsigned long flags;
5819
5820 spin_lock_irqsave(&ch->lock, flags);
5821 /* Both RX and TX work done are compelte,
5822 * so enable both RX & TX IRQs.
5823 */
5824 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5825 spin_unlock_irqrestore(&ch->lock, flags);
5826 }
5827
5828 /* TX still have packet to handle, check if we need to arm tx timer */
5829 if (tx_pending_packets)
5830 stmmac_tx_timer_arm(priv, chan);
5831
5832 return min(rxtx_done, budget - 1);
5833 }
5834
5835 /**
5836 * stmmac_tx_timeout
5837 * @dev : Pointer to net device structure
5838 * @txqueue: the index of the hanging transmit queue
5839 * Description: this function is called when a packet transmission fails to
5840 * complete within a reasonable time. The driver will mark the error in the
5841 * netdev structure and arrange for the device to be reset to a sane state
5842 * in order to transmit a new packet.
5843 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5844 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5845 {
5846 struct stmmac_priv *priv = netdev_priv(dev);
5847
5848 stmmac_global_err(priv);
5849 }
5850
5851 /**
5852 * stmmac_set_rx_mode - entry point for multicast addressing
5853 * @dev : pointer to the device structure
5854 * Description:
5855 * This function is a driver entry point which gets called by the kernel
5856 * whenever multicast addresses must be enabled/disabled.
5857 * Return value:
5858 * void.
5859 */
stmmac_set_rx_mode(struct net_device * dev)5860 static void stmmac_set_rx_mode(struct net_device *dev)
5861 {
5862 struct stmmac_priv *priv = netdev_priv(dev);
5863
5864 stmmac_set_filter(priv, priv->hw, dev);
5865 }
5866
5867 /**
5868 * stmmac_change_mtu - entry point to change MTU size for the device.
5869 * @dev : device pointer.
5870 * @new_mtu : the new MTU size for the device.
5871 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5872 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5873 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5874 * Return value:
5875 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5876 * file on failure.
5877 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5878 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5879 {
5880 struct stmmac_priv *priv = netdev_priv(dev);
5881 int txfifosz = priv->plat->tx_fifo_size;
5882 struct stmmac_dma_conf *dma_conf;
5883 const int mtu = new_mtu;
5884 int ret;
5885
5886 if (txfifosz == 0)
5887 txfifosz = priv->dma_cap.tx_fifo_size;
5888
5889 txfifosz /= priv->plat->tx_queues_to_use;
5890
5891 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5892 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5893 return -EINVAL;
5894 }
5895
5896 new_mtu = STMMAC_ALIGN(new_mtu);
5897
5898 /* If condition true, FIFO is too small or MTU too large */
5899 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5900 return -EINVAL;
5901
5902 if (netif_running(dev)) {
5903 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5904 /* Try to allocate the new DMA conf with the new mtu */
5905 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5906 if (IS_ERR(dma_conf)) {
5907 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5908 mtu);
5909 return PTR_ERR(dma_conf);
5910 }
5911
5912 stmmac_release(dev);
5913
5914 ret = __stmmac_open(dev, dma_conf);
5915 if (ret) {
5916 free_dma_desc_resources(priv, dma_conf);
5917 kfree(dma_conf);
5918 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5919 return ret;
5920 }
5921
5922 kfree(dma_conf);
5923
5924 stmmac_set_rx_mode(dev);
5925 }
5926
5927 WRITE_ONCE(dev->mtu, mtu);
5928 netdev_update_features(dev);
5929
5930 return 0;
5931 }
5932
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5933 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5934 netdev_features_t features)
5935 {
5936 struct stmmac_priv *priv = netdev_priv(dev);
5937
5938 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5939 features &= ~NETIF_F_RXCSUM;
5940
5941 if (!priv->plat->tx_coe)
5942 features &= ~NETIF_F_CSUM_MASK;
5943
5944 /* Some GMAC devices have a bugged Jumbo frame support that
5945 * needs to have the Tx COE disabled for oversized frames
5946 * (due to limited buffer sizes). In this case we disable
5947 * the TX csum insertion in the TDES and not use SF.
5948 */
5949 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5950 features &= ~NETIF_F_CSUM_MASK;
5951
5952 /* Disable tso if asked by ethtool */
5953 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5954 if (features & NETIF_F_TSO)
5955 priv->tso = true;
5956 else
5957 priv->tso = false;
5958 }
5959
5960 return features;
5961 }
5962
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5963 static int stmmac_set_features(struct net_device *netdev,
5964 netdev_features_t features)
5965 {
5966 struct stmmac_priv *priv = netdev_priv(netdev);
5967
5968 /* Keep the COE Type in case of csum is supporting */
5969 if (features & NETIF_F_RXCSUM)
5970 priv->hw->rx_csum = priv->plat->rx_coe;
5971 else
5972 priv->hw->rx_csum = 0;
5973 /* No check needed because rx_coe has been set before and it will be
5974 * fixed in case of issue.
5975 */
5976 stmmac_rx_ipc(priv, priv->hw);
5977
5978 if (priv->sph_cap) {
5979 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5980 u32 chan;
5981
5982 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5983 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5984 }
5985
5986 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5987 priv->hw->hw_vlan_en = true;
5988 else
5989 priv->hw->hw_vlan_en = false;
5990
5991 stmmac_set_hw_vlan_mode(priv, priv->hw);
5992
5993 return 0;
5994 }
5995
stmmac_common_interrupt(struct stmmac_priv * priv)5996 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5997 {
5998 u32 rx_cnt = priv->plat->rx_queues_to_use;
5999 u32 tx_cnt = priv->plat->tx_queues_to_use;
6000 u32 queues_count;
6001 u32 queue;
6002 bool xmac;
6003
6004 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6005 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6006
6007 if (priv->irq_wake)
6008 pm_wakeup_event(priv->device, 0);
6009
6010 if (priv->dma_cap.estsel)
6011 stmmac_est_irq_status(priv, priv, priv->dev,
6012 &priv->xstats, tx_cnt);
6013
6014 if (stmmac_fpe_supported(priv))
6015 stmmac_fpe_irq_status(priv);
6016
6017 /* To handle GMAC own interrupts */
6018 if ((priv->plat->has_gmac) || xmac) {
6019 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6020
6021 if (unlikely(status)) {
6022 /* For LPI we need to save the tx status */
6023 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6024 priv->tx_path_in_lpi_mode = true;
6025 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6026 priv->tx_path_in_lpi_mode = false;
6027 }
6028
6029 for (queue = 0; queue < queues_count; queue++)
6030 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6031
6032 /* PCS link status */
6033 if (priv->hw->pcs &&
6034 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6035 if (priv->xstats.pcs_link)
6036 netif_carrier_on(priv->dev);
6037 else
6038 netif_carrier_off(priv->dev);
6039 }
6040
6041 stmmac_timestamp_interrupt(priv, priv);
6042 }
6043 }
6044
6045 /**
6046 * stmmac_interrupt - main ISR
6047 * @irq: interrupt number.
6048 * @dev_id: to pass the net device pointer.
6049 * Description: this is the main driver interrupt service routine.
6050 * It can call:
6051 * o DMA service routine (to manage incoming frame reception and transmission
6052 * status)
6053 * o Core interrupts to manage: remote wake-up, management counter, LPI
6054 * interrupts.
6055 */
stmmac_interrupt(int irq,void * dev_id)6056 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6057 {
6058 struct net_device *dev = (struct net_device *)dev_id;
6059 struct stmmac_priv *priv = netdev_priv(dev);
6060
6061 /* Check if adapter is up */
6062 if (test_bit(STMMAC_DOWN, &priv->state))
6063 return IRQ_HANDLED;
6064
6065 /* Check ASP error if it isn't delivered via an individual IRQ */
6066 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6067 return IRQ_HANDLED;
6068
6069 /* To handle Common interrupts */
6070 stmmac_common_interrupt(priv);
6071
6072 /* To handle DMA interrupts */
6073 stmmac_dma_interrupt(priv);
6074
6075 return IRQ_HANDLED;
6076 }
6077
stmmac_mac_interrupt(int irq,void * dev_id)6078 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6079 {
6080 struct net_device *dev = (struct net_device *)dev_id;
6081 struct stmmac_priv *priv = netdev_priv(dev);
6082
6083 /* Check if adapter is up */
6084 if (test_bit(STMMAC_DOWN, &priv->state))
6085 return IRQ_HANDLED;
6086
6087 /* To handle Common interrupts */
6088 stmmac_common_interrupt(priv);
6089
6090 return IRQ_HANDLED;
6091 }
6092
stmmac_safety_interrupt(int irq,void * dev_id)6093 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6094 {
6095 struct net_device *dev = (struct net_device *)dev_id;
6096 struct stmmac_priv *priv = netdev_priv(dev);
6097
6098 /* Check if adapter is up */
6099 if (test_bit(STMMAC_DOWN, &priv->state))
6100 return IRQ_HANDLED;
6101
6102 /* Check if a fatal error happened */
6103 stmmac_safety_feat_interrupt(priv);
6104
6105 return IRQ_HANDLED;
6106 }
6107
stmmac_msi_intr_tx(int irq,void * data)6108 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6109 {
6110 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6111 struct stmmac_dma_conf *dma_conf;
6112 int chan = tx_q->queue_index;
6113 struct stmmac_priv *priv;
6114 int status;
6115
6116 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6117 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6118
6119 /* Check if adapter is up */
6120 if (test_bit(STMMAC_DOWN, &priv->state))
6121 return IRQ_HANDLED;
6122
6123 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6124
6125 if (unlikely(status & tx_hard_error_bump_tc)) {
6126 /* Try to bump up the dma threshold on this failure */
6127 stmmac_bump_dma_threshold(priv, chan);
6128 } else if (unlikely(status == tx_hard_error)) {
6129 stmmac_tx_err(priv, chan);
6130 }
6131
6132 return IRQ_HANDLED;
6133 }
6134
stmmac_msi_intr_rx(int irq,void * data)6135 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6136 {
6137 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6138 struct stmmac_dma_conf *dma_conf;
6139 int chan = rx_q->queue_index;
6140 struct stmmac_priv *priv;
6141
6142 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6143 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6144
6145 /* Check if adapter is up */
6146 if (test_bit(STMMAC_DOWN, &priv->state))
6147 return IRQ_HANDLED;
6148
6149 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6150
6151 return IRQ_HANDLED;
6152 }
6153
6154 /**
6155 * stmmac_ioctl - Entry point for the Ioctl
6156 * @dev: Device pointer.
6157 * @rq: An IOCTL specefic structure, that can contain a pointer to
6158 * a proprietary structure used to pass information to the driver.
6159 * @cmd: IOCTL command
6160 * Description:
6161 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6162 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6163 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6164 {
6165 struct stmmac_priv *priv = netdev_priv (dev);
6166 int ret = -EOPNOTSUPP;
6167
6168 if (!netif_running(dev))
6169 return -EINVAL;
6170
6171 switch (cmd) {
6172 case SIOCGMIIPHY:
6173 case SIOCGMIIREG:
6174 case SIOCSMIIREG:
6175 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6176 break;
6177 case SIOCSHWTSTAMP:
6178 ret = stmmac_hwtstamp_set(dev, rq);
6179 break;
6180 case SIOCGHWTSTAMP:
6181 ret = stmmac_hwtstamp_get(dev, rq);
6182 break;
6183 default:
6184 break;
6185 }
6186
6187 return ret;
6188 }
6189
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6190 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6191 void *cb_priv)
6192 {
6193 struct stmmac_priv *priv = cb_priv;
6194 int ret = -EOPNOTSUPP;
6195
6196 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6197 return ret;
6198
6199 __stmmac_disable_all_queues(priv);
6200
6201 switch (type) {
6202 case TC_SETUP_CLSU32:
6203 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6204 break;
6205 case TC_SETUP_CLSFLOWER:
6206 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6207 break;
6208 default:
6209 break;
6210 }
6211
6212 stmmac_enable_all_queues(priv);
6213 return ret;
6214 }
6215
6216 static LIST_HEAD(stmmac_block_cb_list);
6217
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6218 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6219 void *type_data)
6220 {
6221 struct stmmac_priv *priv = netdev_priv(ndev);
6222
6223 switch (type) {
6224 case TC_QUERY_CAPS:
6225 return stmmac_tc_query_caps(priv, priv, type_data);
6226 case TC_SETUP_QDISC_MQPRIO:
6227 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6228 case TC_SETUP_BLOCK:
6229 return flow_block_cb_setup_simple(type_data,
6230 &stmmac_block_cb_list,
6231 stmmac_setup_tc_block_cb,
6232 priv, priv, true);
6233 case TC_SETUP_QDISC_CBS:
6234 return stmmac_tc_setup_cbs(priv, priv, type_data);
6235 case TC_SETUP_QDISC_TAPRIO:
6236 return stmmac_tc_setup_taprio(priv, priv, type_data);
6237 case TC_SETUP_QDISC_ETF:
6238 return stmmac_tc_setup_etf(priv, priv, type_data);
6239 default:
6240 return -EOPNOTSUPP;
6241 }
6242 }
6243
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6244 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6245 struct net_device *sb_dev)
6246 {
6247 int gso = skb_shinfo(skb)->gso_type;
6248
6249 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6250 /*
6251 * There is no way to determine the number of TSO/USO
6252 * capable Queues. Let's use always the Queue 0
6253 * because if TSO/USO is supported then at least this
6254 * one will be capable.
6255 */
6256 return 0;
6257 }
6258
6259 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6260 }
6261
stmmac_set_mac_address(struct net_device * ndev,void * addr)6262 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6263 {
6264 struct stmmac_priv *priv = netdev_priv(ndev);
6265 int ret = 0;
6266
6267 ret = pm_runtime_resume_and_get(priv->device);
6268 if (ret < 0)
6269 return ret;
6270
6271 ret = eth_mac_addr(ndev, addr);
6272 if (ret)
6273 goto set_mac_error;
6274
6275 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6276
6277 set_mac_error:
6278 pm_runtime_put(priv->device);
6279
6280 return ret;
6281 }
6282
6283 #ifdef CONFIG_DEBUG_FS
6284 static struct dentry *stmmac_fs_dir;
6285
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6286 static void sysfs_display_ring(void *head, int size, int extend_desc,
6287 struct seq_file *seq, dma_addr_t dma_phy_addr)
6288 {
6289 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6290 struct dma_desc *p = (struct dma_desc *)head;
6291 unsigned int desc_size;
6292 dma_addr_t dma_addr;
6293 int i;
6294
6295 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6296 for (i = 0; i < size; i++) {
6297 dma_addr = dma_phy_addr + i * desc_size;
6298 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6299 i, &dma_addr,
6300 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6301 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6302 if (extend_desc)
6303 p = &(++ep)->basic;
6304 else
6305 p++;
6306 }
6307 }
6308
stmmac_rings_status_show(struct seq_file * seq,void * v)6309 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6310 {
6311 struct net_device *dev = seq->private;
6312 struct stmmac_priv *priv = netdev_priv(dev);
6313 u32 rx_count = priv->plat->rx_queues_to_use;
6314 u32 tx_count = priv->plat->tx_queues_to_use;
6315 u32 queue;
6316
6317 if ((dev->flags & IFF_UP) == 0)
6318 return 0;
6319
6320 for (queue = 0; queue < rx_count; queue++) {
6321 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6322
6323 seq_printf(seq, "RX Queue %d:\n", queue);
6324
6325 if (priv->extend_desc) {
6326 seq_printf(seq, "Extended descriptor ring:\n");
6327 sysfs_display_ring((void *)rx_q->dma_erx,
6328 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6329 } else {
6330 seq_printf(seq, "Descriptor ring:\n");
6331 sysfs_display_ring((void *)rx_q->dma_rx,
6332 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6333 }
6334 }
6335
6336 for (queue = 0; queue < tx_count; queue++) {
6337 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6338
6339 seq_printf(seq, "TX Queue %d:\n", queue);
6340
6341 if (priv->extend_desc) {
6342 seq_printf(seq, "Extended descriptor ring:\n");
6343 sysfs_display_ring((void *)tx_q->dma_etx,
6344 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6345 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6346 seq_printf(seq, "Descriptor ring:\n");
6347 sysfs_display_ring((void *)tx_q->dma_tx,
6348 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6349 }
6350 }
6351
6352 return 0;
6353 }
6354 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6355
stmmac_dma_cap_show(struct seq_file * seq,void * v)6356 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6357 {
6358 static const char * const dwxgmac_timestamp_source[] = {
6359 "None",
6360 "Internal",
6361 "External",
6362 "Both",
6363 };
6364 static const char * const dwxgmac_safety_feature_desc[] = {
6365 "No",
6366 "All Safety Features with ECC and Parity",
6367 "All Safety Features without ECC or Parity",
6368 "All Safety Features with Parity Only",
6369 "ECC Only",
6370 "UNDEFINED",
6371 "UNDEFINED",
6372 "UNDEFINED",
6373 };
6374 struct net_device *dev = seq->private;
6375 struct stmmac_priv *priv = netdev_priv(dev);
6376
6377 if (!priv->hw_cap_support) {
6378 seq_printf(seq, "DMA HW features not supported\n");
6379 return 0;
6380 }
6381
6382 seq_printf(seq, "==============================\n");
6383 seq_printf(seq, "\tDMA HW features\n");
6384 seq_printf(seq, "==============================\n");
6385
6386 seq_printf(seq, "\t10/100 Mbps: %s\n",
6387 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6388 seq_printf(seq, "\t1000 Mbps: %s\n",
6389 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6390 seq_printf(seq, "\tHalf duplex: %s\n",
6391 (priv->dma_cap.half_duplex) ? "Y" : "N");
6392 if (priv->plat->has_xgmac) {
6393 seq_printf(seq,
6394 "\tNumber of Additional MAC address registers: %d\n",
6395 priv->dma_cap.multi_addr);
6396 } else {
6397 seq_printf(seq, "\tHash Filter: %s\n",
6398 (priv->dma_cap.hash_filter) ? "Y" : "N");
6399 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6400 (priv->dma_cap.multi_addr) ? "Y" : "N");
6401 }
6402 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6403 (priv->dma_cap.pcs) ? "Y" : "N");
6404 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6405 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6406 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6407 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6408 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6409 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6410 seq_printf(seq, "\tRMON module: %s\n",
6411 (priv->dma_cap.rmon) ? "Y" : "N");
6412 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6413 (priv->dma_cap.time_stamp) ? "Y" : "N");
6414 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6415 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6416 if (priv->plat->has_xgmac)
6417 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6418 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6419 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6420 (priv->dma_cap.eee) ? "Y" : "N");
6421 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6422 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6423 (priv->dma_cap.tx_coe) ? "Y" : "N");
6424 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6425 priv->plat->has_xgmac) {
6426 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6427 (priv->dma_cap.rx_coe) ? "Y" : "N");
6428 } else {
6429 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6430 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6431 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6432 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6433 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6434 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6435 }
6436 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6437 priv->dma_cap.number_rx_channel);
6438 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6439 priv->dma_cap.number_tx_channel);
6440 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6441 priv->dma_cap.number_rx_queues);
6442 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6443 priv->dma_cap.number_tx_queues);
6444 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6445 (priv->dma_cap.enh_desc) ? "Y" : "N");
6446 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6447 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6448 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6449 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6450 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6451 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6452 priv->dma_cap.pps_out_num);
6453 seq_printf(seq, "\tSafety Features: %s\n",
6454 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6455 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6456 priv->dma_cap.frpsel ? "Y" : "N");
6457 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6458 priv->dma_cap.host_dma_width);
6459 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6460 priv->dma_cap.rssen ? "Y" : "N");
6461 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6462 priv->dma_cap.vlhash ? "Y" : "N");
6463 seq_printf(seq, "\tSplit Header: %s\n",
6464 priv->dma_cap.sphen ? "Y" : "N");
6465 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6466 priv->dma_cap.vlins ? "Y" : "N");
6467 seq_printf(seq, "\tDouble VLAN: %s\n",
6468 priv->dma_cap.dvlan ? "Y" : "N");
6469 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6470 priv->dma_cap.l3l4fnum);
6471 seq_printf(seq, "\tARP Offloading: %s\n",
6472 priv->dma_cap.arpoffsel ? "Y" : "N");
6473 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6474 priv->dma_cap.estsel ? "Y" : "N");
6475 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6476 priv->dma_cap.fpesel ? "Y" : "N");
6477 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6478 priv->dma_cap.tbssel ? "Y" : "N");
6479 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6480 priv->dma_cap.tbs_ch_num);
6481 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6482 priv->dma_cap.sgfsel ? "Y" : "N");
6483 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6484 BIT(priv->dma_cap.ttsfd) >> 1);
6485 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6486 priv->dma_cap.numtc);
6487 seq_printf(seq, "\tDCB Feature: %s\n",
6488 priv->dma_cap.dcben ? "Y" : "N");
6489 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6490 priv->dma_cap.advthword ? "Y" : "N");
6491 seq_printf(seq, "\tPTP Offload: %s\n",
6492 priv->dma_cap.ptoen ? "Y" : "N");
6493 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6494 priv->dma_cap.osten ? "Y" : "N");
6495 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6496 priv->dma_cap.pfcen ? "Y" : "N");
6497 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6498 BIT(priv->dma_cap.frpes) << 6);
6499 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6500 BIT(priv->dma_cap.frpbs) << 6);
6501 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6502 priv->dma_cap.frppipe_num);
6503 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6504 priv->dma_cap.nrvf_num ?
6505 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6506 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6507 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6508 seq_printf(seq, "\tDepth of GCL: %lu\n",
6509 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6510 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6511 priv->dma_cap.cbtisel ? "Y" : "N");
6512 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6513 priv->dma_cap.aux_snapshot_n);
6514 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6515 priv->dma_cap.pou_ost_en ? "Y" : "N");
6516 seq_printf(seq, "\tEnhanced DMA: %s\n",
6517 priv->dma_cap.edma ? "Y" : "N");
6518 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6519 priv->dma_cap.ediffc ? "Y" : "N");
6520 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6521 priv->dma_cap.vxn ? "Y" : "N");
6522 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6523 priv->dma_cap.dbgmem ? "Y" : "N");
6524 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6525 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6526 return 0;
6527 }
6528 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6529
6530 /* Use network device events to rename debugfs file entries.
6531 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6532 static int stmmac_device_event(struct notifier_block *unused,
6533 unsigned long event, void *ptr)
6534 {
6535 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6536 struct stmmac_priv *priv = netdev_priv(dev);
6537
6538 if (dev->netdev_ops != &stmmac_netdev_ops)
6539 goto done;
6540
6541 switch (event) {
6542 case NETDEV_CHANGENAME:
6543 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6544 break;
6545 }
6546 done:
6547 return NOTIFY_DONE;
6548 }
6549
6550 static struct notifier_block stmmac_notifier = {
6551 .notifier_call = stmmac_device_event,
6552 };
6553
stmmac_init_fs(struct net_device * dev)6554 static void stmmac_init_fs(struct net_device *dev)
6555 {
6556 struct stmmac_priv *priv = netdev_priv(dev);
6557
6558 rtnl_lock();
6559
6560 /* Create per netdev entries */
6561 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6562
6563 /* Entry to report DMA RX/TX rings */
6564 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6565 &stmmac_rings_status_fops);
6566
6567 /* Entry to report the DMA HW features */
6568 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6569 &stmmac_dma_cap_fops);
6570
6571 rtnl_unlock();
6572 }
6573
stmmac_exit_fs(struct net_device * dev)6574 static void stmmac_exit_fs(struct net_device *dev)
6575 {
6576 struct stmmac_priv *priv = netdev_priv(dev);
6577
6578 debugfs_remove_recursive(priv->dbgfs_dir);
6579 }
6580 #endif /* CONFIG_DEBUG_FS */
6581
stmmac_vid_crc32_le(__le16 vid_le)6582 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6583 {
6584 unsigned char *data = (unsigned char *)&vid_le;
6585 unsigned char data_byte = 0;
6586 u32 crc = ~0x0;
6587 u32 temp = 0;
6588 int i, bits;
6589
6590 bits = get_bitmask_order(VLAN_VID_MASK);
6591 for (i = 0; i < bits; i++) {
6592 if ((i % 8) == 0)
6593 data_byte = data[i / 8];
6594
6595 temp = ((crc & 1) ^ data_byte) & 1;
6596 crc >>= 1;
6597 data_byte >>= 1;
6598
6599 if (temp)
6600 crc ^= 0xedb88320;
6601 }
6602
6603 return crc;
6604 }
6605
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6606 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6607 {
6608 u32 crc, hash = 0;
6609 u16 pmatch = 0;
6610 int count = 0;
6611 u16 vid = 0;
6612
6613 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6614 __le16 vid_le = cpu_to_le16(vid);
6615 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6616 hash |= (1 << crc);
6617 count++;
6618 }
6619
6620 if (!priv->dma_cap.vlhash) {
6621 if (count > 2) /* VID = 0 always passes filter */
6622 return -EOPNOTSUPP;
6623
6624 pmatch = vid;
6625 hash = 0;
6626 }
6627
6628 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6629 }
6630
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6631 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6632 {
6633 struct stmmac_priv *priv = netdev_priv(ndev);
6634 bool is_double = false;
6635 int ret;
6636
6637 ret = pm_runtime_resume_and_get(priv->device);
6638 if (ret < 0)
6639 return ret;
6640
6641 if (be16_to_cpu(proto) == ETH_P_8021AD)
6642 is_double = true;
6643
6644 set_bit(vid, priv->active_vlans);
6645 ret = stmmac_vlan_update(priv, is_double);
6646 if (ret) {
6647 clear_bit(vid, priv->active_vlans);
6648 goto err_pm_put;
6649 }
6650
6651 if (priv->hw->num_vlan) {
6652 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6653 if (ret)
6654 goto err_pm_put;
6655 }
6656 err_pm_put:
6657 pm_runtime_put(priv->device);
6658
6659 return ret;
6660 }
6661
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6662 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6663 {
6664 struct stmmac_priv *priv = netdev_priv(ndev);
6665 bool is_double = false;
6666 int ret;
6667
6668 ret = pm_runtime_resume_and_get(priv->device);
6669 if (ret < 0)
6670 return ret;
6671
6672 if (be16_to_cpu(proto) == ETH_P_8021AD)
6673 is_double = true;
6674
6675 clear_bit(vid, priv->active_vlans);
6676
6677 if (priv->hw->num_vlan) {
6678 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6679 if (ret)
6680 goto del_vlan_error;
6681 }
6682
6683 ret = stmmac_vlan_update(priv, is_double);
6684
6685 del_vlan_error:
6686 pm_runtime_put(priv->device);
6687
6688 return ret;
6689 }
6690
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6691 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6692 {
6693 struct stmmac_priv *priv = netdev_priv(dev);
6694
6695 switch (bpf->command) {
6696 case XDP_SETUP_PROG:
6697 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6698 case XDP_SETUP_XSK_POOL:
6699 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6700 bpf->xsk.queue_id);
6701 default:
6702 return -EOPNOTSUPP;
6703 }
6704 }
6705
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6706 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6707 struct xdp_frame **frames, u32 flags)
6708 {
6709 struct stmmac_priv *priv = netdev_priv(dev);
6710 int cpu = smp_processor_id();
6711 struct netdev_queue *nq;
6712 int i, nxmit = 0;
6713 int queue;
6714
6715 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6716 return -ENETDOWN;
6717
6718 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6719 return -EINVAL;
6720
6721 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6722 nq = netdev_get_tx_queue(priv->dev, queue);
6723
6724 __netif_tx_lock(nq, cpu);
6725 /* Avoids TX time-out as we are sharing with slow path */
6726 txq_trans_cond_update(nq);
6727
6728 for (i = 0; i < num_frames; i++) {
6729 int res;
6730
6731 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6732 if (res == STMMAC_XDP_CONSUMED)
6733 break;
6734
6735 nxmit++;
6736 }
6737
6738 if (flags & XDP_XMIT_FLUSH) {
6739 stmmac_flush_tx_descriptors(priv, queue);
6740 stmmac_tx_timer_arm(priv, queue);
6741 }
6742
6743 __netif_tx_unlock(nq);
6744
6745 return nxmit;
6746 }
6747
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6748 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6749 {
6750 struct stmmac_channel *ch = &priv->channel[queue];
6751 unsigned long flags;
6752
6753 spin_lock_irqsave(&ch->lock, flags);
6754 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6755 spin_unlock_irqrestore(&ch->lock, flags);
6756
6757 stmmac_stop_rx_dma(priv, queue);
6758 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6759 }
6760
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6761 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6762 {
6763 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6764 struct stmmac_channel *ch = &priv->channel[queue];
6765 unsigned long flags;
6766 u32 buf_size;
6767 int ret;
6768
6769 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6770 if (ret) {
6771 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6772 return;
6773 }
6774
6775 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6776 if (ret) {
6777 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6778 netdev_err(priv->dev, "Failed to init RX desc.\n");
6779 return;
6780 }
6781
6782 stmmac_reset_rx_queue(priv, queue);
6783 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6784
6785 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6786 rx_q->dma_rx_phy, rx_q->queue_index);
6787
6788 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6789 sizeof(struct dma_desc));
6790 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6791 rx_q->rx_tail_addr, rx_q->queue_index);
6792
6793 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6794 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6795 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6796 buf_size,
6797 rx_q->queue_index);
6798 } else {
6799 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6800 priv->dma_conf.dma_buf_sz,
6801 rx_q->queue_index);
6802 }
6803
6804 stmmac_start_rx_dma(priv, queue);
6805
6806 spin_lock_irqsave(&ch->lock, flags);
6807 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6808 spin_unlock_irqrestore(&ch->lock, flags);
6809 }
6810
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6811 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6812 {
6813 struct stmmac_channel *ch = &priv->channel[queue];
6814 unsigned long flags;
6815
6816 spin_lock_irqsave(&ch->lock, flags);
6817 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6818 spin_unlock_irqrestore(&ch->lock, flags);
6819
6820 stmmac_stop_tx_dma(priv, queue);
6821 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6822 }
6823
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6824 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6825 {
6826 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6827 struct stmmac_channel *ch = &priv->channel[queue];
6828 unsigned long flags;
6829 int ret;
6830
6831 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6832 if (ret) {
6833 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6834 return;
6835 }
6836
6837 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6838 if (ret) {
6839 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6840 netdev_err(priv->dev, "Failed to init TX desc.\n");
6841 return;
6842 }
6843
6844 stmmac_reset_tx_queue(priv, queue);
6845 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6846
6847 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6848 tx_q->dma_tx_phy, tx_q->queue_index);
6849
6850 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6851 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6852
6853 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6854 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6855 tx_q->tx_tail_addr, tx_q->queue_index);
6856
6857 stmmac_start_tx_dma(priv, queue);
6858
6859 spin_lock_irqsave(&ch->lock, flags);
6860 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6861 spin_unlock_irqrestore(&ch->lock, flags);
6862 }
6863
stmmac_xdp_release(struct net_device * dev)6864 void stmmac_xdp_release(struct net_device *dev)
6865 {
6866 struct stmmac_priv *priv = netdev_priv(dev);
6867 u32 chan;
6868
6869 /* Ensure tx function is not running */
6870 netif_tx_disable(dev);
6871
6872 /* Disable NAPI process */
6873 stmmac_disable_all_queues(priv);
6874
6875 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6876 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6877
6878 /* Free the IRQ lines */
6879 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6880
6881 /* Stop TX/RX DMA channels */
6882 stmmac_stop_all_dma(priv);
6883
6884 /* Release and free the Rx/Tx resources */
6885 free_dma_desc_resources(priv, &priv->dma_conf);
6886
6887 /* Disable the MAC Rx/Tx */
6888 stmmac_mac_set(priv, priv->ioaddr, false);
6889
6890 /* set trans_start so we don't get spurious
6891 * watchdogs during reset
6892 */
6893 netif_trans_update(dev);
6894 netif_carrier_off(dev);
6895 }
6896
stmmac_xdp_open(struct net_device * dev)6897 int stmmac_xdp_open(struct net_device *dev)
6898 {
6899 struct stmmac_priv *priv = netdev_priv(dev);
6900 u32 rx_cnt = priv->plat->rx_queues_to_use;
6901 u32 tx_cnt = priv->plat->tx_queues_to_use;
6902 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6903 struct stmmac_rx_queue *rx_q;
6904 struct stmmac_tx_queue *tx_q;
6905 u32 buf_size;
6906 bool sph_en;
6907 u32 chan;
6908 int ret;
6909
6910 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6911 if (ret < 0) {
6912 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6913 __func__);
6914 goto dma_desc_error;
6915 }
6916
6917 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6918 if (ret < 0) {
6919 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6920 __func__);
6921 goto init_error;
6922 }
6923
6924 stmmac_reset_queues_param(priv);
6925
6926 /* DMA CSR Channel configuration */
6927 for (chan = 0; chan < dma_csr_ch; chan++) {
6928 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6929 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6930 }
6931
6932 /* Adjust Split header */
6933 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6934
6935 /* DMA RX Channel Configuration */
6936 for (chan = 0; chan < rx_cnt; chan++) {
6937 rx_q = &priv->dma_conf.rx_queue[chan];
6938
6939 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6940 rx_q->dma_rx_phy, chan);
6941
6942 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6943 (rx_q->buf_alloc_num *
6944 sizeof(struct dma_desc));
6945 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6946 rx_q->rx_tail_addr, chan);
6947
6948 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6949 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6950 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6951 buf_size,
6952 rx_q->queue_index);
6953 } else {
6954 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6955 priv->dma_conf.dma_buf_sz,
6956 rx_q->queue_index);
6957 }
6958
6959 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6960 }
6961
6962 /* DMA TX Channel Configuration */
6963 for (chan = 0; chan < tx_cnt; chan++) {
6964 tx_q = &priv->dma_conf.tx_queue[chan];
6965
6966 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6967 tx_q->dma_tx_phy, chan);
6968
6969 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6970 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6971 tx_q->tx_tail_addr, chan);
6972
6973 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6974 tx_q->txtimer.function = stmmac_tx_timer;
6975 }
6976
6977 /* Enable the MAC Rx/Tx */
6978 stmmac_mac_set(priv, priv->ioaddr, true);
6979
6980 /* Start Rx & Tx DMA Channels */
6981 stmmac_start_all_dma(priv);
6982
6983 ret = stmmac_request_irq(dev);
6984 if (ret)
6985 goto irq_error;
6986
6987 /* Enable NAPI process*/
6988 stmmac_enable_all_queues(priv);
6989 netif_carrier_on(dev);
6990 netif_tx_start_all_queues(dev);
6991 stmmac_enable_all_dma_irq(priv);
6992
6993 return 0;
6994
6995 irq_error:
6996 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6997 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6998
6999 stmmac_hw_teardown(dev);
7000 init_error:
7001 free_dma_desc_resources(priv, &priv->dma_conf);
7002 dma_desc_error:
7003 return ret;
7004 }
7005
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7006 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7007 {
7008 struct stmmac_priv *priv = netdev_priv(dev);
7009 struct stmmac_rx_queue *rx_q;
7010 struct stmmac_tx_queue *tx_q;
7011 struct stmmac_channel *ch;
7012
7013 if (test_bit(STMMAC_DOWN, &priv->state) ||
7014 !netif_carrier_ok(priv->dev))
7015 return -ENETDOWN;
7016
7017 if (!stmmac_xdp_is_enabled(priv))
7018 return -EINVAL;
7019
7020 if (queue >= priv->plat->rx_queues_to_use ||
7021 queue >= priv->plat->tx_queues_to_use)
7022 return -EINVAL;
7023
7024 rx_q = &priv->dma_conf.rx_queue[queue];
7025 tx_q = &priv->dma_conf.tx_queue[queue];
7026 ch = &priv->channel[queue];
7027
7028 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7029 return -EINVAL;
7030
7031 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7032 /* EQoS does not have per-DMA channel SW interrupt,
7033 * so we schedule RX Napi straight-away.
7034 */
7035 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7036 __napi_schedule(&ch->rxtx_napi);
7037 }
7038
7039 return 0;
7040 }
7041
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7042 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7043 {
7044 struct stmmac_priv *priv = netdev_priv(dev);
7045 u32 tx_cnt = priv->plat->tx_queues_to_use;
7046 u32 rx_cnt = priv->plat->rx_queues_to_use;
7047 unsigned int start;
7048 int q;
7049
7050 for (q = 0; q < tx_cnt; q++) {
7051 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7052 u64 tx_packets;
7053 u64 tx_bytes;
7054
7055 do {
7056 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7057 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7058 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7059 do {
7060 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7061 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7062 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7063
7064 stats->tx_packets += tx_packets;
7065 stats->tx_bytes += tx_bytes;
7066 }
7067
7068 for (q = 0; q < rx_cnt; q++) {
7069 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7070 u64 rx_packets;
7071 u64 rx_bytes;
7072
7073 do {
7074 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7075 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7076 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7077 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7078
7079 stats->rx_packets += rx_packets;
7080 stats->rx_bytes += rx_bytes;
7081 }
7082
7083 stats->rx_dropped = priv->xstats.rx_dropped;
7084 stats->rx_errors = priv->xstats.rx_errors;
7085 stats->tx_dropped = priv->xstats.tx_dropped;
7086 stats->tx_errors = priv->xstats.tx_errors;
7087 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7088 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7089 stats->rx_length_errors = priv->xstats.rx_length;
7090 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7091 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7092 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7093 }
7094
7095 static const struct net_device_ops stmmac_netdev_ops = {
7096 .ndo_open = stmmac_open,
7097 .ndo_start_xmit = stmmac_xmit,
7098 .ndo_stop = stmmac_release,
7099 .ndo_change_mtu = stmmac_change_mtu,
7100 .ndo_fix_features = stmmac_fix_features,
7101 .ndo_set_features = stmmac_set_features,
7102 .ndo_set_rx_mode = stmmac_set_rx_mode,
7103 .ndo_tx_timeout = stmmac_tx_timeout,
7104 .ndo_eth_ioctl = stmmac_ioctl,
7105 .ndo_get_stats64 = stmmac_get_stats64,
7106 .ndo_setup_tc = stmmac_setup_tc,
7107 .ndo_select_queue = stmmac_select_queue,
7108 .ndo_set_mac_address = stmmac_set_mac_address,
7109 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7110 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7111 .ndo_bpf = stmmac_bpf,
7112 .ndo_xdp_xmit = stmmac_xdp_xmit,
7113 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7114 };
7115
stmmac_reset_subtask(struct stmmac_priv * priv)7116 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7117 {
7118 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7119 return;
7120 if (test_bit(STMMAC_DOWN, &priv->state))
7121 return;
7122
7123 netdev_err(priv->dev, "Reset adapter.\n");
7124
7125 rtnl_lock();
7126 netif_trans_update(priv->dev);
7127 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7128 usleep_range(1000, 2000);
7129
7130 set_bit(STMMAC_DOWN, &priv->state);
7131 dev_close(priv->dev);
7132 dev_open(priv->dev, NULL);
7133 clear_bit(STMMAC_DOWN, &priv->state);
7134 clear_bit(STMMAC_RESETING, &priv->state);
7135 rtnl_unlock();
7136 }
7137
stmmac_service_task(struct work_struct * work)7138 static void stmmac_service_task(struct work_struct *work)
7139 {
7140 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7141 service_task);
7142
7143 stmmac_reset_subtask(priv);
7144 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7145 }
7146
7147 /**
7148 * stmmac_hw_init - Init the MAC device
7149 * @priv: driver private structure
7150 * Description: this function is to configure the MAC device according to
7151 * some platform parameters or the HW capability register. It prepares the
7152 * driver to use either ring or chain modes and to setup either enhanced or
7153 * normal descriptors.
7154 */
stmmac_hw_init(struct stmmac_priv * priv)7155 static int stmmac_hw_init(struct stmmac_priv *priv)
7156 {
7157 int ret;
7158
7159 /* dwmac-sun8i only work in chain mode */
7160 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7161 chain_mode = 1;
7162 priv->chain_mode = chain_mode;
7163
7164 /* Initialize HW Interface */
7165 ret = stmmac_hwif_init(priv);
7166 if (ret)
7167 return ret;
7168
7169 /* Get the HW capability (new GMAC newer than 3.50a) */
7170 priv->hw_cap_support = stmmac_get_hw_features(priv);
7171 if (priv->hw_cap_support) {
7172 dev_info(priv->device, "DMA HW capability register supported\n");
7173
7174 /* We can override some gmac/dma configuration fields: e.g.
7175 * enh_desc, tx_coe (e.g. that are passed through the
7176 * platform) with the values from the HW capability
7177 * register (if supported).
7178 */
7179 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7180 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7181 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7182 priv->hw->pmt = priv->plat->pmt;
7183 if (priv->dma_cap.hash_tb_sz) {
7184 priv->hw->multicast_filter_bins =
7185 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7186 priv->hw->mcast_bits_log2 =
7187 ilog2(priv->hw->multicast_filter_bins);
7188 }
7189
7190 /* TXCOE doesn't work in thresh DMA mode */
7191 if (priv->plat->force_thresh_dma_mode)
7192 priv->plat->tx_coe = 0;
7193 else
7194 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7195
7196 /* In case of GMAC4 rx_coe is from HW cap register. */
7197 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7198
7199 if (priv->dma_cap.rx_coe_type2)
7200 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7201 else if (priv->dma_cap.rx_coe_type1)
7202 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7203
7204 } else {
7205 dev_info(priv->device, "No HW DMA feature register supported\n");
7206 }
7207
7208 if (priv->plat->rx_coe) {
7209 priv->hw->rx_csum = priv->plat->rx_coe;
7210 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7211 if (priv->synopsys_id < DWMAC_CORE_4_00)
7212 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7213 }
7214 if (priv->plat->tx_coe)
7215 dev_info(priv->device, "TX Checksum insertion supported\n");
7216
7217 if (priv->plat->pmt) {
7218 dev_info(priv->device, "Wake-Up On Lan supported\n");
7219 device_set_wakeup_capable(priv->device, 1);
7220 }
7221
7222 if (priv->dma_cap.tsoen)
7223 dev_info(priv->device, "TSO supported\n");
7224
7225 if (priv->dma_cap.number_rx_queues &&
7226 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7227 dev_warn(priv->device,
7228 "Number of Rx queues (%u) exceeds dma capability\n",
7229 priv->plat->rx_queues_to_use);
7230 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7231 }
7232 if (priv->dma_cap.number_tx_queues &&
7233 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7234 dev_warn(priv->device,
7235 "Number of Tx queues (%u) exceeds dma capability\n",
7236 priv->plat->tx_queues_to_use);
7237 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7238 }
7239
7240 if (priv->dma_cap.rx_fifo_size &&
7241 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7242 dev_warn(priv->device,
7243 "Rx FIFO size (%u) exceeds dma capability\n",
7244 priv->plat->rx_fifo_size);
7245 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7246 }
7247 if (priv->dma_cap.tx_fifo_size &&
7248 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7249 dev_warn(priv->device,
7250 "Tx FIFO size (%u) exceeds dma capability\n",
7251 priv->plat->tx_fifo_size);
7252 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7253 }
7254
7255 priv->hw->vlan_fail_q_en =
7256 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7257 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7258
7259 /* Run HW quirks, if any */
7260 if (priv->hwif_quirks) {
7261 ret = priv->hwif_quirks(priv);
7262 if (ret)
7263 return ret;
7264 }
7265
7266 /* Rx Watchdog is available in the COREs newer than the 3.40.
7267 * In some case, for example on bugged HW this feature
7268 * has to be disable and this can be done by passing the
7269 * riwt_off field from the platform.
7270 */
7271 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7272 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7273 priv->use_riwt = 1;
7274 dev_info(priv->device,
7275 "Enable RX Mitigation via HW Watchdog Timer\n");
7276 }
7277
7278 return 0;
7279 }
7280
stmmac_napi_add(struct net_device * dev)7281 static void stmmac_napi_add(struct net_device *dev)
7282 {
7283 struct stmmac_priv *priv = netdev_priv(dev);
7284 u32 queue, maxq;
7285
7286 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7287
7288 for (queue = 0; queue < maxq; queue++) {
7289 struct stmmac_channel *ch = &priv->channel[queue];
7290
7291 ch->priv_data = priv;
7292 ch->index = queue;
7293 spin_lock_init(&ch->lock);
7294
7295 if (queue < priv->plat->rx_queues_to_use) {
7296 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7297 }
7298 if (queue < priv->plat->tx_queues_to_use) {
7299 netif_napi_add_tx(dev, &ch->tx_napi,
7300 stmmac_napi_poll_tx);
7301 }
7302 if (queue < priv->plat->rx_queues_to_use &&
7303 queue < priv->plat->tx_queues_to_use) {
7304 netif_napi_add(dev, &ch->rxtx_napi,
7305 stmmac_napi_poll_rxtx);
7306 }
7307 }
7308 }
7309
stmmac_napi_del(struct net_device * dev)7310 static void stmmac_napi_del(struct net_device *dev)
7311 {
7312 struct stmmac_priv *priv = netdev_priv(dev);
7313 u32 queue, maxq;
7314
7315 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7316
7317 for (queue = 0; queue < maxq; queue++) {
7318 struct stmmac_channel *ch = &priv->channel[queue];
7319
7320 if (queue < priv->plat->rx_queues_to_use)
7321 netif_napi_del(&ch->rx_napi);
7322 if (queue < priv->plat->tx_queues_to_use)
7323 netif_napi_del(&ch->tx_napi);
7324 if (queue < priv->plat->rx_queues_to_use &&
7325 queue < priv->plat->tx_queues_to_use) {
7326 netif_napi_del(&ch->rxtx_napi);
7327 }
7328 }
7329 }
7330
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7331 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7332 {
7333 struct stmmac_priv *priv = netdev_priv(dev);
7334 int ret = 0, i;
7335
7336 if (netif_running(dev))
7337 stmmac_release(dev);
7338
7339 stmmac_napi_del(dev);
7340
7341 priv->plat->rx_queues_to_use = rx_cnt;
7342 priv->plat->tx_queues_to_use = tx_cnt;
7343 if (!netif_is_rxfh_configured(dev))
7344 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7345 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7346 rx_cnt);
7347
7348 stmmac_napi_add(dev);
7349
7350 if (netif_running(dev))
7351 ret = stmmac_open(dev);
7352
7353 return ret;
7354 }
7355
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7356 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7357 {
7358 struct stmmac_priv *priv = netdev_priv(dev);
7359 int ret = 0;
7360
7361 if (netif_running(dev))
7362 stmmac_release(dev);
7363
7364 priv->dma_conf.dma_rx_size = rx_size;
7365 priv->dma_conf.dma_tx_size = tx_size;
7366
7367 if (netif_running(dev))
7368 ret = stmmac_open(dev);
7369
7370 return ret;
7371 }
7372
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7373 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7374 {
7375 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7376 struct dma_desc *desc_contains_ts = ctx->desc;
7377 struct stmmac_priv *priv = ctx->priv;
7378 struct dma_desc *ndesc = ctx->ndesc;
7379 struct dma_desc *desc = ctx->desc;
7380 u64 ns = 0;
7381
7382 if (!priv->hwts_rx_en)
7383 return -ENODATA;
7384
7385 /* For GMAC4, the valid timestamp is from CTX next desc. */
7386 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7387 desc_contains_ts = ndesc;
7388
7389 /* Check if timestamp is available */
7390 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7391 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7392 ns -= priv->plat->cdc_error_adj;
7393 *timestamp = ns_to_ktime(ns);
7394 return 0;
7395 }
7396
7397 return -ENODATA;
7398 }
7399
7400 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7401 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7402 };
7403
7404 /**
7405 * stmmac_dvr_probe
7406 * @device: device pointer
7407 * @plat_dat: platform data pointer
7408 * @res: stmmac resource pointer
7409 * Description: this is the main probe function used to
7410 * call the alloc_etherdev, allocate the priv structure.
7411 * Return:
7412 * returns 0 on success, otherwise errno.
7413 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7414 int stmmac_dvr_probe(struct device *device,
7415 struct plat_stmmacenet_data *plat_dat,
7416 struct stmmac_resources *res)
7417 {
7418 struct net_device *ndev = NULL;
7419 struct stmmac_priv *priv;
7420 u32 rxq;
7421 int i, ret = 0;
7422
7423 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7424 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7425 if (!ndev)
7426 return -ENOMEM;
7427
7428 SET_NETDEV_DEV(ndev, device);
7429
7430 priv = netdev_priv(ndev);
7431 priv->device = device;
7432 priv->dev = ndev;
7433
7434 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7435 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7436 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7437 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7438 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7439 }
7440
7441 priv->xstats.pcpu_stats =
7442 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7443 if (!priv->xstats.pcpu_stats)
7444 return -ENOMEM;
7445
7446 stmmac_set_ethtool_ops(ndev);
7447 priv->pause = pause;
7448 priv->plat = plat_dat;
7449 priv->ioaddr = res->addr;
7450 priv->dev->base_addr = (unsigned long)res->addr;
7451 priv->plat->dma_cfg->multi_msi_en =
7452 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7453
7454 priv->dev->irq = res->irq;
7455 priv->wol_irq = res->wol_irq;
7456 priv->lpi_irq = res->lpi_irq;
7457 priv->sfty_irq = res->sfty_irq;
7458 priv->sfty_ce_irq = res->sfty_ce_irq;
7459 priv->sfty_ue_irq = res->sfty_ue_irq;
7460 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7461 priv->rx_irq[i] = res->rx_irq[i];
7462 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7463 priv->tx_irq[i] = res->tx_irq[i];
7464
7465 if (!is_zero_ether_addr(res->mac))
7466 eth_hw_addr_set(priv->dev, res->mac);
7467
7468 dev_set_drvdata(device, priv->dev);
7469
7470 /* Verify driver arguments */
7471 stmmac_verify_args();
7472
7473 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7474 if (!priv->af_xdp_zc_qps)
7475 return -ENOMEM;
7476
7477 /* Allocate workqueue */
7478 priv->wq = create_singlethread_workqueue("stmmac_wq");
7479 if (!priv->wq) {
7480 dev_err(priv->device, "failed to create workqueue\n");
7481 ret = -ENOMEM;
7482 goto error_wq_init;
7483 }
7484
7485 INIT_WORK(&priv->service_task, stmmac_service_task);
7486
7487 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7488
7489 /* Override with kernel parameters if supplied XXX CRS XXX
7490 * this needs to have multiple instances
7491 */
7492 if ((phyaddr >= 0) && (phyaddr <= 31))
7493 priv->plat->phy_addr = phyaddr;
7494
7495 if (priv->plat->stmmac_rst) {
7496 ret = reset_control_assert(priv->plat->stmmac_rst);
7497 reset_control_deassert(priv->plat->stmmac_rst);
7498 /* Some reset controllers have only reset callback instead of
7499 * assert + deassert callbacks pair.
7500 */
7501 if (ret == -ENOTSUPP)
7502 reset_control_reset(priv->plat->stmmac_rst);
7503 }
7504
7505 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7506 if (ret == -ENOTSUPP)
7507 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7508 ERR_PTR(ret));
7509
7510 /* Wait a bit for the reset to take effect */
7511 udelay(10);
7512
7513 /* Init MAC and get the capabilities */
7514 ret = stmmac_hw_init(priv);
7515 if (ret)
7516 goto error_hw_init;
7517
7518 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7519 */
7520 if (priv->synopsys_id < DWMAC_CORE_5_20)
7521 priv->plat->dma_cfg->dche = false;
7522
7523 stmmac_check_ether_addr(priv);
7524
7525 ndev->netdev_ops = &stmmac_netdev_ops;
7526
7527 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7528 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7529
7530 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7531 NETIF_F_RXCSUM;
7532 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7533 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7534
7535 ret = stmmac_tc_init(priv, priv);
7536 if (!ret) {
7537 ndev->hw_features |= NETIF_F_HW_TC;
7538 }
7539
7540 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7541 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7542 if (priv->plat->has_gmac4)
7543 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7544 priv->tso = true;
7545 dev_info(priv->device, "TSO feature enabled\n");
7546 }
7547
7548 if (priv->dma_cap.sphen &&
7549 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7550 ndev->hw_features |= NETIF_F_GRO;
7551 priv->sph_cap = true;
7552 priv->sph = priv->sph_cap;
7553 dev_info(priv->device, "SPH feature enabled\n");
7554 }
7555
7556 /* Ideally our host DMA address width is the same as for the
7557 * device. However, it may differ and then we have to use our
7558 * host DMA width for allocation and the device DMA width for
7559 * register handling.
7560 */
7561 if (priv->plat->host_dma_width)
7562 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7563 else
7564 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7565
7566 if (priv->dma_cap.host_dma_width) {
7567 ret = dma_set_mask_and_coherent(device,
7568 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7569 if (!ret) {
7570 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7571 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7572
7573 /*
7574 * If more than 32 bits can be addressed, make sure to
7575 * enable enhanced addressing mode.
7576 */
7577 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7578 priv->plat->dma_cfg->eame = true;
7579 } else {
7580 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7581 if (ret) {
7582 dev_err(priv->device, "Failed to set DMA Mask\n");
7583 goto error_hw_init;
7584 }
7585
7586 priv->dma_cap.host_dma_width = 32;
7587 }
7588 }
7589
7590 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7591 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7592 #ifdef STMMAC_VLAN_TAG_USED
7593 /* Both mac100 and gmac support receive VLAN tag detection */
7594 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7595 if (priv->plat->has_gmac4) {
7596 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7597 priv->hw->hw_vlan_en = true;
7598 }
7599 if (priv->dma_cap.vlhash) {
7600 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7601 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7602 }
7603 if (priv->dma_cap.vlins) {
7604 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7605 if (priv->dma_cap.dvlan)
7606 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7607 }
7608 #endif
7609 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7610
7611 priv->xstats.threshold = tc;
7612
7613 /* Initialize RSS */
7614 rxq = priv->plat->rx_queues_to_use;
7615 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7616 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7617 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7618
7619 if (priv->dma_cap.rssen && priv->plat->rss_en)
7620 ndev->features |= NETIF_F_RXHASH;
7621
7622 ndev->vlan_features |= ndev->features;
7623
7624 /* MTU range: 46 - hw-specific max */
7625 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7626 if (priv->plat->has_xgmac)
7627 ndev->max_mtu = XGMAC_JUMBO_LEN;
7628 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7629 ndev->max_mtu = JUMBO_LEN;
7630 else
7631 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7632 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7633 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7634 */
7635 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7636 (priv->plat->maxmtu >= ndev->min_mtu))
7637 ndev->max_mtu = priv->plat->maxmtu;
7638 else if (priv->plat->maxmtu < ndev->min_mtu)
7639 dev_warn(priv->device,
7640 "%s: warning: maxmtu having invalid value (%d)\n",
7641 __func__, priv->plat->maxmtu);
7642
7643 if (flow_ctrl)
7644 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7645
7646 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7647
7648 /* Setup channels NAPI */
7649 stmmac_napi_add(ndev);
7650
7651 mutex_init(&priv->lock);
7652
7653 stmmac_fpe_init(priv);
7654
7655 /* If a specific clk_csr value is passed from the platform
7656 * this means that the CSR Clock Range selection cannot be
7657 * changed at run-time and it is fixed. Viceversa the driver'll try to
7658 * set the MDC clock dynamically according to the csr actual
7659 * clock input.
7660 */
7661 if (priv->plat->clk_csr >= 0)
7662 priv->clk_csr = priv->plat->clk_csr;
7663 else
7664 stmmac_clk_csr_set(priv);
7665
7666 stmmac_check_pcs_mode(priv);
7667
7668 pm_runtime_get_noresume(device);
7669 pm_runtime_set_active(device);
7670 if (!pm_runtime_enabled(device))
7671 pm_runtime_enable(device);
7672
7673 ret = stmmac_mdio_register(ndev);
7674 if (ret < 0) {
7675 dev_err_probe(priv->device, ret,
7676 "MDIO bus (id: %d) registration failed\n",
7677 priv->plat->bus_id);
7678 goto error_mdio_register;
7679 }
7680
7681 if (priv->plat->speed_mode_2500)
7682 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7683
7684 ret = stmmac_pcs_setup(ndev);
7685 if (ret)
7686 goto error_pcs_setup;
7687
7688 ret = stmmac_phy_setup(priv);
7689 if (ret) {
7690 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7691 goto error_phy_setup;
7692 }
7693
7694 ret = register_netdev(ndev);
7695 if (ret) {
7696 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7697 __func__, ret);
7698 goto error_netdev_register;
7699 }
7700
7701 #ifdef CONFIG_DEBUG_FS
7702 stmmac_init_fs(ndev);
7703 #endif
7704
7705 if (priv->plat->dump_debug_regs)
7706 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7707
7708 /* Let pm_runtime_put() disable the clocks.
7709 * If CONFIG_PM is not enabled, the clocks will stay powered.
7710 */
7711 pm_runtime_put(device);
7712
7713 return ret;
7714
7715 error_netdev_register:
7716 phylink_destroy(priv->phylink);
7717 error_phy_setup:
7718 stmmac_pcs_clean(ndev);
7719 error_pcs_setup:
7720 stmmac_mdio_unregister(ndev);
7721 error_mdio_register:
7722 stmmac_napi_del(ndev);
7723 error_hw_init:
7724 destroy_workqueue(priv->wq);
7725 error_wq_init:
7726 bitmap_free(priv->af_xdp_zc_qps);
7727
7728 return ret;
7729 }
7730 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7731
7732 /**
7733 * stmmac_dvr_remove
7734 * @dev: device pointer
7735 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7736 * changes the link status, releases the DMA descriptor rings.
7737 */
stmmac_dvr_remove(struct device * dev)7738 void stmmac_dvr_remove(struct device *dev)
7739 {
7740 struct net_device *ndev = dev_get_drvdata(dev);
7741 struct stmmac_priv *priv = netdev_priv(ndev);
7742
7743 netdev_info(priv->dev, "%s: removing driver", __func__);
7744
7745 pm_runtime_get_sync(dev);
7746
7747 stmmac_stop_all_dma(priv);
7748 stmmac_mac_set(priv, priv->ioaddr, false);
7749 unregister_netdev(ndev);
7750
7751 #ifdef CONFIG_DEBUG_FS
7752 stmmac_exit_fs(ndev);
7753 #endif
7754 phylink_destroy(priv->phylink);
7755 if (priv->plat->stmmac_rst)
7756 reset_control_assert(priv->plat->stmmac_rst);
7757 reset_control_assert(priv->plat->stmmac_ahb_rst);
7758
7759 stmmac_pcs_clean(ndev);
7760 stmmac_mdio_unregister(ndev);
7761
7762 destroy_workqueue(priv->wq);
7763 mutex_destroy(&priv->lock);
7764 bitmap_free(priv->af_xdp_zc_qps);
7765
7766 pm_runtime_disable(dev);
7767 pm_runtime_put_noidle(dev);
7768 }
7769 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7770
7771 /**
7772 * stmmac_suspend - suspend callback
7773 * @dev: device pointer
7774 * Description: this is the function to suspend the device and it is called
7775 * by the platform driver to stop the network queue, release the resources,
7776 * program the PMT register (for WoL), clean and release driver resources.
7777 */
stmmac_suspend(struct device * dev)7778 int stmmac_suspend(struct device *dev)
7779 {
7780 struct net_device *ndev = dev_get_drvdata(dev);
7781 struct stmmac_priv *priv = netdev_priv(ndev);
7782 u32 chan;
7783
7784 if (!ndev || !netif_running(ndev))
7785 return 0;
7786
7787 mutex_lock(&priv->lock);
7788
7789 netif_device_detach(ndev);
7790
7791 stmmac_disable_all_queues(priv);
7792
7793 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7794 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7795
7796 if (priv->eee_sw_timer_en) {
7797 priv->tx_path_in_lpi_mode = false;
7798 del_timer_sync(&priv->eee_ctrl_timer);
7799 }
7800
7801 /* Stop TX/RX DMA */
7802 stmmac_stop_all_dma(priv);
7803
7804 if (priv->plat->serdes_powerdown)
7805 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7806
7807 /* Enable Power down mode by programming the PMT regs */
7808 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7809 stmmac_pmt(priv, priv->hw, priv->wolopts);
7810 priv->irq_wake = 1;
7811 } else {
7812 stmmac_mac_set(priv, priv->ioaddr, false);
7813 pinctrl_pm_select_sleep_state(priv->device);
7814 }
7815
7816 mutex_unlock(&priv->lock);
7817
7818 rtnl_lock();
7819 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7820 phylink_suspend(priv->phylink, true);
7821 } else {
7822 if (device_may_wakeup(priv->device))
7823 phylink_speed_down(priv->phylink, false);
7824 phylink_suspend(priv->phylink, false);
7825 }
7826 rtnl_unlock();
7827
7828 if (stmmac_fpe_supported(priv))
7829 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7830
7831 priv->speed = SPEED_UNKNOWN;
7832 return 0;
7833 }
7834 EXPORT_SYMBOL_GPL(stmmac_suspend);
7835
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7836 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7837 {
7838 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7839
7840 rx_q->cur_rx = 0;
7841 rx_q->dirty_rx = 0;
7842 }
7843
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7844 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7845 {
7846 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7847
7848 tx_q->cur_tx = 0;
7849 tx_q->dirty_tx = 0;
7850 tx_q->mss = 0;
7851
7852 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7853 }
7854
7855 /**
7856 * stmmac_reset_queues_param - reset queue parameters
7857 * @priv: device pointer
7858 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7859 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7860 {
7861 u32 rx_cnt = priv->plat->rx_queues_to_use;
7862 u32 tx_cnt = priv->plat->tx_queues_to_use;
7863 u32 queue;
7864
7865 for (queue = 0; queue < rx_cnt; queue++)
7866 stmmac_reset_rx_queue(priv, queue);
7867
7868 for (queue = 0; queue < tx_cnt; queue++)
7869 stmmac_reset_tx_queue(priv, queue);
7870 }
7871
7872 /**
7873 * stmmac_resume - resume callback
7874 * @dev: device pointer
7875 * Description: when resume this function is invoked to setup the DMA and CORE
7876 * in a usable state.
7877 */
stmmac_resume(struct device * dev)7878 int stmmac_resume(struct device *dev)
7879 {
7880 struct net_device *ndev = dev_get_drvdata(dev);
7881 struct stmmac_priv *priv = netdev_priv(ndev);
7882 int ret;
7883
7884 if (!netif_running(ndev))
7885 return 0;
7886
7887 /* Power Down bit, into the PM register, is cleared
7888 * automatically as soon as a magic packet or a Wake-up frame
7889 * is received. Anyway, it's better to manually clear
7890 * this bit because it can generate problems while resuming
7891 * from another devices (e.g. serial console).
7892 */
7893 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7894 mutex_lock(&priv->lock);
7895 stmmac_pmt(priv, priv->hw, 0);
7896 mutex_unlock(&priv->lock);
7897 priv->irq_wake = 0;
7898 } else {
7899 pinctrl_pm_select_default_state(priv->device);
7900 /* reset the phy so that it's ready */
7901 if (priv->mii)
7902 stmmac_mdio_reset(priv->mii);
7903 }
7904
7905 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7906 priv->plat->serdes_powerup) {
7907 ret = priv->plat->serdes_powerup(ndev,
7908 priv->plat->bsp_priv);
7909
7910 if (ret < 0)
7911 return ret;
7912 }
7913
7914 rtnl_lock();
7915 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7916 phylink_resume(priv->phylink);
7917 } else {
7918 phylink_resume(priv->phylink);
7919 if (device_may_wakeup(priv->device))
7920 phylink_speed_up(priv->phylink);
7921 }
7922 rtnl_unlock();
7923
7924 rtnl_lock();
7925 mutex_lock(&priv->lock);
7926
7927 stmmac_reset_queues_param(priv);
7928
7929 stmmac_free_tx_skbufs(priv);
7930 stmmac_clear_descriptors(priv, &priv->dma_conf);
7931
7932 stmmac_hw_setup(ndev, false);
7933 stmmac_init_coalesce(priv);
7934 stmmac_set_rx_mode(ndev);
7935
7936 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7937
7938 stmmac_enable_all_queues(priv);
7939 stmmac_enable_all_dma_irq(priv);
7940
7941 mutex_unlock(&priv->lock);
7942 rtnl_unlock();
7943
7944 netif_device_attach(ndev);
7945
7946 return 0;
7947 }
7948 EXPORT_SYMBOL_GPL(stmmac_resume);
7949
7950 #ifndef MODULE
stmmac_cmdline_opt(char * str)7951 static int __init stmmac_cmdline_opt(char *str)
7952 {
7953 char *opt;
7954
7955 if (!str || !*str)
7956 return 1;
7957 while ((opt = strsep(&str, ",")) != NULL) {
7958 if (!strncmp(opt, "debug:", 6)) {
7959 if (kstrtoint(opt + 6, 0, &debug))
7960 goto err;
7961 } else if (!strncmp(opt, "phyaddr:", 8)) {
7962 if (kstrtoint(opt + 8, 0, &phyaddr))
7963 goto err;
7964 } else if (!strncmp(opt, "buf_sz:", 7)) {
7965 if (kstrtoint(opt + 7, 0, &buf_sz))
7966 goto err;
7967 } else if (!strncmp(opt, "tc:", 3)) {
7968 if (kstrtoint(opt + 3, 0, &tc))
7969 goto err;
7970 } else if (!strncmp(opt, "watchdog:", 9)) {
7971 if (kstrtoint(opt + 9, 0, &watchdog))
7972 goto err;
7973 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7974 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7975 goto err;
7976 } else if (!strncmp(opt, "pause:", 6)) {
7977 if (kstrtoint(opt + 6, 0, &pause))
7978 goto err;
7979 } else if (!strncmp(opt, "eee_timer:", 10)) {
7980 if (kstrtoint(opt + 10, 0, &eee_timer))
7981 goto err;
7982 } else if (!strncmp(opt, "chain_mode:", 11)) {
7983 if (kstrtoint(opt + 11, 0, &chain_mode))
7984 goto err;
7985 }
7986 }
7987 return 1;
7988
7989 err:
7990 pr_err("%s: ERROR broken module parameter conversion", __func__);
7991 return 1;
7992 }
7993
7994 __setup("stmmaceth=", stmmac_cmdline_opt);
7995 #endif /* MODULE */
7996
stmmac_init(void)7997 static int __init stmmac_init(void)
7998 {
7999 #ifdef CONFIG_DEBUG_FS
8000 /* Create debugfs main directory if it doesn't exist yet */
8001 if (!stmmac_fs_dir)
8002 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8003 register_netdevice_notifier(&stmmac_notifier);
8004 #endif
8005
8006 return 0;
8007 }
8008
stmmac_exit(void)8009 static void __exit stmmac_exit(void)
8010 {
8011 #ifdef CONFIG_DEBUG_FS
8012 unregister_netdevice_notifier(&stmmac_notifier);
8013 debugfs_remove_recursive(stmmac_fs_dir);
8014 #endif
8015 }
8016
8017 module_init(stmmac_init)
8018 module_exit(stmmac_exit)
8019
8020 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8021 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8022 MODULE_LICENSE("GPL");
8023