1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54
55 /* As long as the interface is active, we keep the timestamping counter enabled
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
57 * (clock jumps) when changing timestamping settings at runtime.
58 */
59 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 PTP_TCR_TSCTRLSSR)
61
62 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64
65 /* Module parameters */
66 #define TX_TIMEO 5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
110 NETIF_MSG_LINK | NETIF_MSG_IFUP |
111 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
112
113 #define STMMAC_DEFAULT_LPI_TIMER 1000
114 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
115 module_param(eee_timer, uint, 0644);
116 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
117 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
118
119 /* By default the driver will use the ring mode to manage tx and rx descriptors,
120 * but allow user to force to use the chain instead of the ring
121 */
122 static unsigned int chain_mode;
123 module_param(chain_mode, int, 0444);
124 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
125
126 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
127 /* For MSI interrupts handling */
128 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
129 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
131 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
133 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
135 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
137 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
138 u32 rxmode, u32 chan);
139
140 #ifdef CONFIG_DEBUG_FS
141 static const struct net_device_ops stmmac_netdev_ops;
142 static void stmmac_init_fs(struct net_device *dev);
143 static void stmmac_exit_fs(struct net_device *dev);
144 #endif
145
146 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
147
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)148 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
149 {
150 int ret = 0;
151
152 if (enabled) {
153 ret = clk_prepare_enable(priv->plat->stmmac_clk);
154 if (ret)
155 return ret;
156 ret = clk_prepare_enable(priv->plat->pclk);
157 if (ret) {
158 clk_disable_unprepare(priv->plat->stmmac_clk);
159 return ret;
160 }
161 if (priv->plat->clks_config) {
162 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
163 if (ret) {
164 clk_disable_unprepare(priv->plat->stmmac_clk);
165 clk_disable_unprepare(priv->plat->pclk);
166 return ret;
167 }
168 }
169 } else {
170 clk_disable_unprepare(priv->plat->stmmac_clk);
171 clk_disable_unprepare(priv->plat->pclk);
172 if (priv->plat->clks_config)
173 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
174 }
175
176 return ret;
177 }
178 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
179
180 /**
181 * stmmac_verify_args - verify the driver parameters.
182 * Description: it checks the driver parameters and set a default in case of
183 * errors.
184 */
stmmac_verify_args(void)185 static void stmmac_verify_args(void)
186 {
187 if (unlikely(watchdog < 0))
188 watchdog = TX_TIMEO;
189 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
190 buf_sz = DEFAULT_BUFSIZE;
191 if (unlikely(flow_ctrl > 1))
192 flow_ctrl = FLOW_AUTO;
193 else if (likely(flow_ctrl < 0))
194 flow_ctrl = FLOW_OFF;
195 if (unlikely((pause < 0) || (pause > 0xffff)))
196 pause = PAUSE_TIME;
197 }
198
__stmmac_disable_all_queues(struct stmmac_priv * priv)199 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
200 {
201 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
202 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
203 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
204 u32 queue;
205
206 for (queue = 0; queue < maxq; queue++) {
207 struct stmmac_channel *ch = &priv->channel[queue];
208
209 if (stmmac_xdp_is_enabled(priv) &&
210 test_bit(queue, priv->af_xdp_zc_qps)) {
211 napi_disable(&ch->rxtx_napi);
212 continue;
213 }
214
215 if (queue < rx_queues_cnt)
216 napi_disable(&ch->rx_napi);
217 if (queue < tx_queues_cnt)
218 napi_disable(&ch->tx_napi);
219 }
220 }
221
222 /**
223 * stmmac_disable_all_queues - Disable all queues
224 * @priv: driver private structure
225 */
stmmac_disable_all_queues(struct stmmac_priv * priv)226 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
227 {
228 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
229 struct stmmac_rx_queue *rx_q;
230 u32 queue;
231
232 /* synchronize_rcu() needed for pending XDP buffers to drain */
233 for (queue = 0; queue < rx_queues_cnt; queue++) {
234 rx_q = &priv->dma_conf.rx_queue[queue];
235 if (rx_q->xsk_pool) {
236 synchronize_rcu();
237 break;
238 }
239 }
240
241 __stmmac_disable_all_queues(priv);
242 }
243
244 /**
245 * stmmac_enable_all_queues - Enable all queues
246 * @priv: driver private structure
247 */
stmmac_enable_all_queues(struct stmmac_priv * priv)248 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
249 {
250 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
251 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
252 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
253 u32 queue;
254
255 for (queue = 0; queue < maxq; queue++) {
256 struct stmmac_channel *ch = &priv->channel[queue];
257
258 if (stmmac_xdp_is_enabled(priv) &&
259 test_bit(queue, priv->af_xdp_zc_qps)) {
260 napi_enable(&ch->rxtx_napi);
261 continue;
262 }
263
264 if (queue < rx_queues_cnt)
265 napi_enable(&ch->rx_napi);
266 if (queue < tx_queues_cnt)
267 napi_enable(&ch->tx_napi);
268 }
269 }
270
stmmac_service_event_schedule(struct stmmac_priv * priv)271 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
272 {
273 if (!test_bit(STMMAC_DOWN, &priv->state) &&
274 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
275 queue_work(priv->wq, &priv->service_task);
276 }
277
stmmac_global_err(struct stmmac_priv * priv)278 static void stmmac_global_err(struct stmmac_priv *priv)
279 {
280 netif_carrier_off(priv->dev);
281 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
282 stmmac_service_event_schedule(priv);
283 }
284
285 /**
286 * stmmac_clk_csr_set - dynamically set the MDC clock
287 * @priv: driver private structure
288 * Description: this is to dynamically set the MDC clock according to the csr
289 * clock input.
290 * Note:
291 * If a specific clk_csr value is passed from the platform
292 * this means that the CSR Clock Range selection cannot be
293 * changed at run-time and it is fixed (as reported in the driver
294 * documentation). Viceversa the driver will try to set the MDC
295 * clock dynamically according to the actual clock input.
296 */
stmmac_clk_csr_set(struct stmmac_priv * priv)297 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
298 {
299 unsigned long clk_rate;
300
301 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
302
303 /* Platform provided default clk_csr would be assumed valid
304 * for all other cases except for the below mentioned ones.
305 * For values higher than the IEEE 802.3 specified frequency
306 * we can not estimate the proper divider as it is not known
307 * the frequency of clk_csr_i. So we do not change the default
308 * divider.
309 */
310 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
311 if (clk_rate < CSR_F_35M)
312 priv->clk_csr = STMMAC_CSR_20_35M;
313 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
314 priv->clk_csr = STMMAC_CSR_35_60M;
315 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
316 priv->clk_csr = STMMAC_CSR_60_100M;
317 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
318 priv->clk_csr = STMMAC_CSR_100_150M;
319 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
320 priv->clk_csr = STMMAC_CSR_150_250M;
321 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
322 priv->clk_csr = STMMAC_CSR_250_300M;
323 else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
324 priv->clk_csr = STMMAC_CSR_300_500M;
325 else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
326 priv->clk_csr = STMMAC_CSR_500_800M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_disable_hw_lpi_timer(struct stmmac_priv * priv)393 static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv)
394 {
395 stmmac_set_eee_lpi_timer(priv, priv->hw, 0);
396 }
397
stmmac_enable_hw_lpi_timer(struct stmmac_priv * priv)398 static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv)
399 {
400 stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer);
401 }
402
stmmac_eee_tx_busy(struct stmmac_priv * priv)403 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
404 {
405 u32 tx_cnt = priv->plat->tx_queues_to_use;
406 u32 queue;
407
408 /* check if all TX queues have the work finished */
409 for (queue = 0; queue < tx_cnt; queue++) {
410 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
411
412 if (tx_q->dirty_tx != tx_q->cur_tx)
413 return true; /* still unfinished work */
414 }
415
416 return false;
417 }
418
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)419 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
420 {
421 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
422 }
423
424 /**
425 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
426 * @priv: driver private structure
427 * Description: this function is to verify and enter in LPI mode in case of
428 * EEE.
429 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)430 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
431 {
432 if (stmmac_eee_tx_busy(priv)) {
433 stmmac_restart_sw_lpi_timer(priv);
434 return;
435 }
436
437 /* Check and enter in LPI mode */
438 if (!priv->tx_path_in_lpi_mode)
439 stmmac_set_eee_mode(priv, priv->hw,
440 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
441 }
442
443 /**
444 * stmmac_stop_sw_lpi - stop transmitting LPI
445 * @priv: driver private structure
446 * Description: When using software-controlled LPI, stop transmitting LPI state.
447 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)448 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
449 {
450 stmmac_reset_eee_mode(priv, priv->hw);
451 del_timer_sync(&priv->eee_ctrl_timer);
452 priv->tx_path_in_lpi_mode = false;
453 }
454
455 /**
456 * stmmac_eee_ctrl_timer - EEE TX SW timer.
457 * @t: timer_list struct containing private info
458 * Description:
459 * if there is no data transfer and if we are not in LPI state,
460 * then MAC Transmitter can be moved to LPI state.
461 */
stmmac_eee_ctrl_timer(struct timer_list * t)462 static void stmmac_eee_ctrl_timer(struct timer_list *t)
463 {
464 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
465
466 stmmac_try_to_start_sw_lpi(priv);
467 }
468
469 /**
470 * stmmac_eee_init - init EEE
471 * @priv: driver private structure
472 * @active: indicates whether EEE should be enabled.
473 * Description:
474 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
475 * can also manage EEE, this function enable the LPI state and start related
476 * timer.
477 */
stmmac_eee_init(struct stmmac_priv * priv,bool active)478 static void stmmac_eee_init(struct stmmac_priv *priv, bool active)
479 {
480 priv->eee_active = active;
481
482 /* Check if MAC core supports the EEE feature. */
483 if (!priv->dma_cap.eee) {
484 priv->eee_enabled = false;
485 return;
486 }
487
488 mutex_lock(&priv->lock);
489
490 /* Check if it needs to be deactivated */
491 if (!priv->eee_active) {
492 if (priv->eee_enabled) {
493 netdev_dbg(priv->dev, "disable EEE\n");
494 priv->eee_sw_timer_en = false;
495 stmmac_disable_hw_lpi_timer(priv);
496 del_timer_sync(&priv->eee_ctrl_timer);
497 stmmac_set_eee_timer(priv, priv->hw, 0,
498 STMMAC_DEFAULT_TWT_LS);
499 if (priv->hw->xpcs)
500 xpcs_config_eee(priv->hw->xpcs,
501 priv->plat->mult_fact_100ns,
502 false);
503 }
504 priv->eee_enabled = false;
505 mutex_unlock(&priv->lock);
506 return;
507 }
508
509 if (priv->eee_active && !priv->eee_enabled) {
510 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
511 STMMAC_DEFAULT_TWT_LS);
512 if (priv->hw->xpcs)
513 xpcs_config_eee(priv->hw->xpcs,
514 priv->plat->mult_fact_100ns,
515 true);
516 }
517
518 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
519 /* Use hardware LPI mode */
520 del_timer_sync(&priv->eee_ctrl_timer);
521 priv->tx_path_in_lpi_mode = false;
522 priv->eee_sw_timer_en = false;
523 stmmac_enable_hw_lpi_timer(priv);
524 } else {
525 /* Use software LPI mode */
526 priv->eee_sw_timer_en = true;
527 stmmac_disable_hw_lpi_timer(priv);
528 stmmac_restart_sw_lpi_timer(priv);
529 }
530
531 priv->eee_enabled = true;
532
533 mutex_unlock(&priv->lock);
534 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
535 }
536
537 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
538 * @priv: driver private structure
539 * @p : descriptor pointer
540 * @skb : the socket buffer
541 * Description :
542 * This function will read timestamp from the descriptor & pass it to stack.
543 * and also perform some sanity checks.
544 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)545 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
546 struct dma_desc *p, struct sk_buff *skb)
547 {
548 struct skb_shared_hwtstamps shhwtstamp;
549 bool found = false;
550 u64 ns = 0;
551
552 if (!priv->hwts_tx_en)
553 return;
554
555 /* exit if skb doesn't support hw tstamp */
556 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
557 return;
558
559 /* check tx tstamp status */
560 if (stmmac_get_tx_timestamp_status(priv, p)) {
561 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
562 found = true;
563 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
564 found = true;
565 }
566
567 if (found) {
568 ns -= priv->plat->cdc_error_adj;
569
570 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
571 shhwtstamp.hwtstamp = ns_to_ktime(ns);
572
573 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
574 /* pass tstamp to stack */
575 skb_tstamp_tx(skb, &shhwtstamp);
576 }
577 }
578
579 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
580 * @priv: driver private structure
581 * @p : descriptor pointer
582 * @np : next descriptor pointer
583 * @skb : the socket buffer
584 * Description :
585 * This function will read received packet's timestamp from the descriptor
586 * and pass it to stack. It also perform some sanity checks.
587 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)588 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
589 struct dma_desc *np, struct sk_buff *skb)
590 {
591 struct skb_shared_hwtstamps *shhwtstamp = NULL;
592 struct dma_desc *desc = p;
593 u64 ns = 0;
594
595 if (!priv->hwts_rx_en)
596 return;
597 /* For GMAC4, the valid timestamp is from CTX next desc. */
598 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
599 desc = np;
600
601 /* Check if timestamp is available */
602 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
603 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
604
605 ns -= priv->plat->cdc_error_adj;
606
607 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
608 shhwtstamp = skb_hwtstamps(skb);
609 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
610 shhwtstamp->hwtstamp = ns_to_ktime(ns);
611 } else {
612 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
613 }
614 }
615
616 /**
617 * stmmac_hwtstamp_set - control hardware timestamping.
618 * @dev: device pointer.
619 * @ifr: An IOCTL specific structure, that can contain a pointer to
620 * a proprietary structure used to pass information to the driver.
621 * Description:
622 * This function configures the MAC to enable/disable both outgoing(TX)
623 * and incoming(RX) packets time stamping based on user input.
624 * Return Value:
625 * 0 on success and an appropriate -ve integer on failure.
626 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)627 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
628 {
629 struct stmmac_priv *priv = netdev_priv(dev);
630 struct hwtstamp_config config;
631 u32 ptp_v2 = 0;
632 u32 tstamp_all = 0;
633 u32 ptp_over_ipv4_udp = 0;
634 u32 ptp_over_ipv6_udp = 0;
635 u32 ptp_over_ethernet = 0;
636 u32 snap_type_sel = 0;
637 u32 ts_master_en = 0;
638 u32 ts_event_en = 0;
639
640 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
641 netdev_alert(priv->dev, "No support for HW time stamping\n");
642 priv->hwts_tx_en = 0;
643 priv->hwts_rx_en = 0;
644
645 return -EOPNOTSUPP;
646 }
647
648 if (copy_from_user(&config, ifr->ifr_data,
649 sizeof(config)))
650 return -EFAULT;
651
652 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
653 __func__, config.flags, config.tx_type, config.rx_filter);
654
655 if (config.tx_type != HWTSTAMP_TX_OFF &&
656 config.tx_type != HWTSTAMP_TX_ON)
657 return -ERANGE;
658
659 if (priv->adv_ts) {
660 switch (config.rx_filter) {
661 case HWTSTAMP_FILTER_NONE:
662 /* time stamp no incoming packet at all */
663 config.rx_filter = HWTSTAMP_FILTER_NONE;
664 break;
665
666 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
667 /* PTP v1, UDP, any kind of event packet */
668 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
669 /* 'xmac' hardware can support Sync, Pdelay_Req and
670 * Pdelay_resp by setting bit14 and bits17/16 to 01
671 * This leaves Delay_Req timestamps out.
672 * Enable all events *and* general purpose message
673 * timestamping
674 */
675 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
676 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
677 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
678 break;
679
680 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
681 /* PTP v1, UDP, Sync packet */
682 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
683 /* take time stamp for SYNC messages only */
684 ts_event_en = PTP_TCR_TSEVNTENA;
685
686 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 break;
689
690 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
691 /* PTP v1, UDP, Delay_req packet */
692 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
693 /* take time stamp for Delay_Req messages only */
694 ts_master_en = PTP_TCR_TSMSTRENA;
695 ts_event_en = PTP_TCR_TSEVNTENA;
696
697 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 break;
700
701 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
702 /* PTP v2, UDP, any kind of event packet */
703 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
704 ptp_v2 = PTP_TCR_TSVER2ENA;
705 /* take time stamp for all event messages */
706 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
707
708 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
709 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
710 break;
711
712 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
713 /* PTP v2, UDP, Sync packet */
714 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
715 ptp_v2 = PTP_TCR_TSVER2ENA;
716 /* take time stamp for SYNC messages only */
717 ts_event_en = PTP_TCR_TSEVNTENA;
718
719 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
720 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
721 break;
722
723 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
724 /* PTP v2, UDP, Delay_req packet */
725 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
726 ptp_v2 = PTP_TCR_TSVER2ENA;
727 /* take time stamp for Delay_Req messages only */
728 ts_master_en = PTP_TCR_TSMSTRENA;
729 ts_event_en = PTP_TCR_TSEVNTENA;
730
731 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
732 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
733 break;
734
735 case HWTSTAMP_FILTER_PTP_V2_EVENT:
736 /* PTP v2/802.AS1 any layer, any kind of event packet */
737 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
738 ptp_v2 = PTP_TCR_TSVER2ENA;
739 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
740 if (priv->synopsys_id < DWMAC_CORE_4_10)
741 ts_event_en = PTP_TCR_TSEVNTENA;
742 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
743 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
744 ptp_over_ethernet = PTP_TCR_TSIPENA;
745 break;
746
747 case HWTSTAMP_FILTER_PTP_V2_SYNC:
748 /* PTP v2/802.AS1, any layer, Sync packet */
749 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
750 ptp_v2 = PTP_TCR_TSVER2ENA;
751 /* take time stamp for SYNC messages only */
752 ts_event_en = PTP_TCR_TSEVNTENA;
753
754 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
755 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
756 ptp_over_ethernet = PTP_TCR_TSIPENA;
757 break;
758
759 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
760 /* PTP v2/802.AS1, any layer, Delay_req packet */
761 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
762 ptp_v2 = PTP_TCR_TSVER2ENA;
763 /* take time stamp for Delay_Req messages only */
764 ts_master_en = PTP_TCR_TSMSTRENA;
765 ts_event_en = PTP_TCR_TSEVNTENA;
766
767 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
768 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
769 ptp_over_ethernet = PTP_TCR_TSIPENA;
770 break;
771
772 case HWTSTAMP_FILTER_NTP_ALL:
773 case HWTSTAMP_FILTER_ALL:
774 /* time stamp any incoming packet */
775 config.rx_filter = HWTSTAMP_FILTER_ALL;
776 tstamp_all = PTP_TCR_TSENALL;
777 break;
778
779 default:
780 return -ERANGE;
781 }
782 } else {
783 switch (config.rx_filter) {
784 case HWTSTAMP_FILTER_NONE:
785 config.rx_filter = HWTSTAMP_FILTER_NONE;
786 break;
787 default:
788 /* PTP v1, UDP, any kind of event packet */
789 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
790 break;
791 }
792 }
793 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
794 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
795
796 priv->systime_flags = STMMAC_HWTS_ACTIVE;
797
798 if (priv->hwts_tx_en || priv->hwts_rx_en) {
799 priv->systime_flags |= tstamp_all | ptp_v2 |
800 ptp_over_ethernet | ptp_over_ipv6_udp |
801 ptp_over_ipv4_udp | ts_event_en |
802 ts_master_en | snap_type_sel;
803 }
804
805 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
806
807 memcpy(&priv->tstamp_config, &config, sizeof(config));
808
809 return copy_to_user(ifr->ifr_data, &config,
810 sizeof(config)) ? -EFAULT : 0;
811 }
812
813 /**
814 * stmmac_hwtstamp_get - read hardware timestamping.
815 * @dev: device pointer.
816 * @ifr: An IOCTL specific structure, that can contain a pointer to
817 * a proprietary structure used to pass information to the driver.
818 * Description:
819 * This function obtain the current hardware timestamping settings
820 * as requested.
821 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)822 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
823 {
824 struct stmmac_priv *priv = netdev_priv(dev);
825 struct hwtstamp_config *config = &priv->tstamp_config;
826
827 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
828 return -EOPNOTSUPP;
829
830 return copy_to_user(ifr->ifr_data, config,
831 sizeof(*config)) ? -EFAULT : 0;
832 }
833
834 /**
835 * stmmac_init_tstamp_counter - init hardware timestamping counter
836 * @priv: driver private structure
837 * @systime_flags: timestamping flags
838 * Description:
839 * Initialize hardware counter for packet timestamping.
840 * This is valid as long as the interface is open and not suspended.
841 * Will be rerun after resuming from suspend, case in which the timestamping
842 * flags updated by stmmac_hwtstamp_set() also need to be restored.
843 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)844 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
845 {
846 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
847 struct timespec64 now;
848 u32 sec_inc = 0;
849 u64 temp = 0;
850
851 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
852 return -EOPNOTSUPP;
853
854 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
855 priv->systime_flags = systime_flags;
856
857 /* program Sub Second Increment reg */
858 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
859 priv->plat->clk_ptp_rate,
860 xmac, &sec_inc);
861 temp = div_u64(1000000000ULL, sec_inc);
862
863 /* Store sub second increment for later use */
864 priv->sub_second_inc = sec_inc;
865
866 /* calculate default added value:
867 * formula is :
868 * addend = (2^32)/freq_div_ratio;
869 * where, freq_div_ratio = 1e9ns/sec_inc
870 */
871 temp = (u64)(temp << 32);
872 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
873 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
874
875 /* initialize system time */
876 ktime_get_real_ts64(&now);
877
878 /* lower 32 bits of tv_sec are safe until y2106 */
879 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
880
881 return 0;
882 }
883 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
884
885 /**
886 * stmmac_init_ptp - init PTP
887 * @priv: driver private structure
888 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
889 * This is done by looking at the HW cap. register.
890 * This function also registers the ptp driver.
891 */
stmmac_init_ptp(struct stmmac_priv * priv)892 static int stmmac_init_ptp(struct stmmac_priv *priv)
893 {
894 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
895 int ret;
896
897 if (priv->plat->ptp_clk_freq_config)
898 priv->plat->ptp_clk_freq_config(priv);
899
900 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
901 if (ret)
902 return ret;
903
904 priv->adv_ts = 0;
905 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
906 if (xmac && priv->dma_cap.atime_stamp)
907 priv->adv_ts = 1;
908 /* Dwmac 3.x core with extend_desc can support adv_ts */
909 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
910 priv->adv_ts = 1;
911
912 if (priv->dma_cap.time_stamp)
913 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
914
915 if (priv->adv_ts)
916 netdev_info(priv->dev,
917 "IEEE 1588-2008 Advanced Timestamp supported\n");
918
919 priv->hwts_tx_en = 0;
920 priv->hwts_rx_en = 0;
921
922 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
923 stmmac_hwtstamp_correct_latency(priv, priv);
924
925 return 0;
926 }
927
stmmac_release_ptp(struct stmmac_priv * priv)928 static void stmmac_release_ptp(struct stmmac_priv *priv)
929 {
930 clk_disable_unprepare(priv->plat->clk_ptp_ref);
931 stmmac_ptp_unregister(priv);
932 }
933
934 /**
935 * stmmac_mac_flow_ctrl - Configure flow control in all queues
936 * @priv: driver private structure
937 * @duplex: duplex passed to the next function
938 * Description: It is used for configuring the flow control in all queues
939 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)940 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
941 {
942 u32 tx_cnt = priv->plat->tx_queues_to_use;
943
944 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
945 priv->pause, tx_cnt);
946 }
947
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)948 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
949 phy_interface_t interface)
950 {
951 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
952
953 /* Refresh the MAC-specific capabilities */
954 stmmac_mac_update_caps(priv);
955
956 config->mac_capabilities = priv->hw->link.caps;
957
958 if (priv->plat->max_speed)
959 phylink_limit_mac_speed(config, priv->plat->max_speed);
960
961 return config->mac_capabilities;
962 }
963
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)964 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
965 phy_interface_t interface)
966 {
967 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
968 struct phylink_pcs *pcs;
969
970 if (priv->plat->select_pcs) {
971 pcs = priv->plat->select_pcs(priv, interface);
972 if (!IS_ERR(pcs))
973 return pcs;
974 }
975
976 return NULL;
977 }
978
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)979 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
980 const struct phylink_link_state *state)
981 {
982 /* Nothing to do, xpcs_config() handles everything */
983 }
984
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)985 static void stmmac_mac_link_down(struct phylink_config *config,
986 unsigned int mode, phy_interface_t interface)
987 {
988 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
989
990 stmmac_mac_set(priv, priv->ioaddr, false);
991 if (priv->dma_cap.eee)
992 stmmac_set_eee_pls(priv, priv->hw, false);
993
994 if (stmmac_fpe_supported(priv))
995 stmmac_fpe_link_state_handle(priv, false);
996 }
997
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)998 static void stmmac_mac_link_up(struct phylink_config *config,
999 struct phy_device *phy,
1000 unsigned int mode, phy_interface_t interface,
1001 int speed, int duplex,
1002 bool tx_pause, bool rx_pause)
1003 {
1004 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1005 u32 old_ctrl, ctrl;
1006
1007 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1008 priv->plat->serdes_powerup)
1009 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1010
1011 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1012 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1013
1014 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1015 switch (speed) {
1016 case SPEED_10000:
1017 ctrl |= priv->hw->link.xgmii.speed10000;
1018 break;
1019 case SPEED_5000:
1020 ctrl |= priv->hw->link.xgmii.speed5000;
1021 break;
1022 case SPEED_2500:
1023 ctrl |= priv->hw->link.xgmii.speed2500;
1024 break;
1025 default:
1026 return;
1027 }
1028 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1029 switch (speed) {
1030 case SPEED_100000:
1031 ctrl |= priv->hw->link.xlgmii.speed100000;
1032 break;
1033 case SPEED_50000:
1034 ctrl |= priv->hw->link.xlgmii.speed50000;
1035 break;
1036 case SPEED_40000:
1037 ctrl |= priv->hw->link.xlgmii.speed40000;
1038 break;
1039 case SPEED_25000:
1040 ctrl |= priv->hw->link.xlgmii.speed25000;
1041 break;
1042 case SPEED_10000:
1043 ctrl |= priv->hw->link.xgmii.speed10000;
1044 break;
1045 case SPEED_2500:
1046 ctrl |= priv->hw->link.speed2500;
1047 break;
1048 case SPEED_1000:
1049 ctrl |= priv->hw->link.speed1000;
1050 break;
1051 default:
1052 return;
1053 }
1054 } else {
1055 switch (speed) {
1056 case SPEED_2500:
1057 ctrl |= priv->hw->link.speed2500;
1058 break;
1059 case SPEED_1000:
1060 ctrl |= priv->hw->link.speed1000;
1061 break;
1062 case SPEED_100:
1063 ctrl |= priv->hw->link.speed100;
1064 break;
1065 case SPEED_10:
1066 ctrl |= priv->hw->link.speed10;
1067 break;
1068 default:
1069 return;
1070 }
1071 }
1072
1073 priv->speed = speed;
1074
1075 if (priv->plat->fix_mac_speed)
1076 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1077
1078 if (!duplex)
1079 ctrl &= ~priv->hw->link.duplex;
1080 else
1081 ctrl |= priv->hw->link.duplex;
1082
1083 /* Flow Control operation */
1084 if (rx_pause && tx_pause)
1085 priv->flow_ctrl = FLOW_AUTO;
1086 else if (rx_pause && !tx_pause)
1087 priv->flow_ctrl = FLOW_RX;
1088 else if (!rx_pause && tx_pause)
1089 priv->flow_ctrl = FLOW_TX;
1090 else
1091 priv->flow_ctrl = FLOW_OFF;
1092
1093 stmmac_mac_flow_ctrl(priv, duplex);
1094
1095 if (ctrl != old_ctrl)
1096 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1097
1098 stmmac_mac_set(priv, priv->ioaddr, true);
1099 if (priv->dma_cap.eee)
1100 stmmac_set_eee_pls(priv, priv->hw, true);
1101
1102 if (stmmac_fpe_supported(priv))
1103 stmmac_fpe_link_state_handle(priv, true);
1104
1105 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1106 stmmac_hwtstamp_correct_latency(priv, priv);
1107 }
1108
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1109 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1110 {
1111 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1112
1113 stmmac_eee_init(priv, false);
1114 }
1115
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1116 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1117 bool tx_clk_stop)
1118 {
1119 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1120
1121 priv->tx_lpi_timer = timer;
1122 stmmac_eee_init(priv, true);
1123
1124 return 0;
1125 }
1126
1127 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1128 .mac_get_caps = stmmac_mac_get_caps,
1129 .mac_select_pcs = stmmac_mac_select_pcs,
1130 .mac_config = stmmac_mac_config,
1131 .mac_link_down = stmmac_mac_link_down,
1132 .mac_link_up = stmmac_mac_link_up,
1133 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1134 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1135 };
1136
1137 /**
1138 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1139 * @priv: driver private structure
1140 * Description: this is to verify if the HW supports the PCS.
1141 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1142 * configured for the TBI, RTBI, or SGMII PHY interface.
1143 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1144 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1145 {
1146 int interface = priv->plat->mac_interface;
1147
1148 if (priv->dma_cap.pcs) {
1149 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1150 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1151 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1152 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1153 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1154 priv->hw->pcs = STMMAC_PCS_RGMII;
1155 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1156 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1157 priv->hw->pcs = STMMAC_PCS_SGMII;
1158 }
1159 }
1160 }
1161
1162 /**
1163 * stmmac_init_phy - PHY initialization
1164 * @dev: net device structure
1165 * Description: it initializes the driver's PHY state, and attaches the PHY
1166 * to the mac driver.
1167 * Return value:
1168 * 0 on success
1169 */
stmmac_init_phy(struct net_device * dev)1170 static int stmmac_init_phy(struct net_device *dev)
1171 {
1172 struct stmmac_priv *priv = netdev_priv(dev);
1173 struct fwnode_handle *phy_fwnode;
1174 struct fwnode_handle *fwnode;
1175 int ret;
1176
1177 if (!phylink_expects_phy(priv->phylink))
1178 return 0;
1179
1180 fwnode = priv->plat->port_node;
1181 if (!fwnode)
1182 fwnode = dev_fwnode(priv->device);
1183
1184 if (fwnode)
1185 phy_fwnode = fwnode_get_phy_node(fwnode);
1186 else
1187 phy_fwnode = NULL;
1188
1189 /* Some DT bindings do not set-up the PHY handle. Let's try to
1190 * manually parse it
1191 */
1192 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1193 int addr = priv->plat->phy_addr;
1194 struct phy_device *phydev;
1195
1196 if (addr < 0) {
1197 netdev_err(priv->dev, "no phy found\n");
1198 return -ENODEV;
1199 }
1200
1201 phydev = mdiobus_get_phy(priv->mii, addr);
1202 if (!phydev) {
1203 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1204 return -ENODEV;
1205 }
1206
1207 ret = phylink_connect_phy(priv->phylink, phydev);
1208 } else {
1209 fwnode_handle_put(phy_fwnode);
1210 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1211 }
1212
1213 if (ret == 0) {
1214 struct ethtool_keee eee;
1215
1216 /* Configure phylib's copy of the LPI timer. Normally,
1217 * phylink_config.lpi_timer_default would do this, but there is
1218 * a chance that userspace could change the eee_timer setting
1219 * via sysfs before the first open. Thus, preserve existing
1220 * behaviour.
1221 */
1222 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1223 eee.tx_lpi_timer = priv->tx_lpi_timer;
1224 phylink_ethtool_set_eee(priv->phylink, &eee);
1225 }
1226 }
1227
1228 if (!priv->plat->pmt) {
1229 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1230
1231 phylink_ethtool_get_wol(priv->phylink, &wol);
1232 device_set_wakeup_capable(priv->device, !!wol.supported);
1233 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1234 }
1235
1236 return ret;
1237 }
1238
stmmac_phy_setup(struct stmmac_priv * priv)1239 static int stmmac_phy_setup(struct stmmac_priv *priv)
1240 {
1241 struct stmmac_mdio_bus_data *mdio_bus_data;
1242 int mode = priv->plat->phy_interface;
1243 struct fwnode_handle *fwnode;
1244 struct phylink_pcs *pcs;
1245 struct phylink *phylink;
1246
1247 priv->phylink_config.dev = &priv->dev->dev;
1248 priv->phylink_config.type = PHYLINK_NETDEV;
1249 priv->phylink_config.mac_managed_pm = true;
1250
1251 /* Stmmac always requires an RX clock for hardware initialization */
1252 priv->phylink_config.mac_requires_rxc = true;
1253
1254 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1255 priv->phylink_config.eee_rx_clk_stop_enable = true;
1256
1257 mdio_bus_data = priv->plat->mdio_bus_data;
1258 if (mdio_bus_data)
1259 priv->phylink_config.default_an_inband =
1260 mdio_bus_data->default_an_inband;
1261
1262 /* Set the platform/firmware specified interface mode. Note, phylink
1263 * deals with the PHY interface mode, not the MAC interface mode.
1264 */
1265 __set_bit(mode, priv->phylink_config.supported_interfaces);
1266
1267 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1268 if (priv->hw->xpcs)
1269 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1270 else
1271 pcs = priv->hw->phylink_pcs;
1272
1273 if (pcs)
1274 phy_interface_or(priv->phylink_config.supported_interfaces,
1275 priv->phylink_config.supported_interfaces,
1276 pcs->supported_interfaces);
1277
1278 if (priv->dma_cap.eee) {
1279 /* Assume all supported interfaces also support LPI */
1280 memcpy(priv->phylink_config.lpi_interfaces,
1281 priv->phylink_config.supported_interfaces,
1282 sizeof(priv->phylink_config.lpi_interfaces));
1283
1284 /* All full duplex speeds above 100Mbps are supported */
1285 priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
1286 MAC_100FD;
1287 priv->phylink_config.lpi_timer_default = eee_timer * 1000;
1288 priv->phylink_config.eee_enabled_default = true;
1289 }
1290
1291 fwnode = priv->plat->port_node;
1292 if (!fwnode)
1293 fwnode = dev_fwnode(priv->device);
1294
1295 phylink = phylink_create(&priv->phylink_config, fwnode,
1296 mode, &stmmac_phylink_mac_ops);
1297 if (IS_ERR(phylink))
1298 return PTR_ERR(phylink);
1299
1300 priv->phylink = phylink;
1301 return 0;
1302 }
1303
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1304 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1305 struct stmmac_dma_conf *dma_conf)
1306 {
1307 u32 rx_cnt = priv->plat->rx_queues_to_use;
1308 unsigned int desc_size;
1309 void *head_rx;
1310 u32 queue;
1311
1312 /* Display RX rings */
1313 for (queue = 0; queue < rx_cnt; queue++) {
1314 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1315
1316 pr_info("\tRX Queue %u rings\n", queue);
1317
1318 if (priv->extend_desc) {
1319 head_rx = (void *)rx_q->dma_erx;
1320 desc_size = sizeof(struct dma_extended_desc);
1321 } else {
1322 head_rx = (void *)rx_q->dma_rx;
1323 desc_size = sizeof(struct dma_desc);
1324 }
1325
1326 /* Display RX ring */
1327 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1328 rx_q->dma_rx_phy, desc_size);
1329 }
1330 }
1331
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1332 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1333 struct stmmac_dma_conf *dma_conf)
1334 {
1335 u32 tx_cnt = priv->plat->tx_queues_to_use;
1336 unsigned int desc_size;
1337 void *head_tx;
1338 u32 queue;
1339
1340 /* Display TX rings */
1341 for (queue = 0; queue < tx_cnt; queue++) {
1342 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1343
1344 pr_info("\tTX Queue %d rings\n", queue);
1345
1346 if (priv->extend_desc) {
1347 head_tx = (void *)tx_q->dma_etx;
1348 desc_size = sizeof(struct dma_extended_desc);
1349 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1350 head_tx = (void *)tx_q->dma_entx;
1351 desc_size = sizeof(struct dma_edesc);
1352 } else {
1353 head_tx = (void *)tx_q->dma_tx;
1354 desc_size = sizeof(struct dma_desc);
1355 }
1356
1357 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1358 tx_q->dma_tx_phy, desc_size);
1359 }
1360 }
1361
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1362 static void stmmac_display_rings(struct stmmac_priv *priv,
1363 struct stmmac_dma_conf *dma_conf)
1364 {
1365 /* Display RX ring */
1366 stmmac_display_rx_rings(priv, dma_conf);
1367
1368 /* Display TX ring */
1369 stmmac_display_tx_rings(priv, dma_conf);
1370 }
1371
stmmac_rx_offset(struct stmmac_priv * priv)1372 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1373 {
1374 if (stmmac_xdp_is_enabled(priv))
1375 return XDP_PACKET_HEADROOM;
1376
1377 return NET_SKB_PAD;
1378 }
1379
stmmac_set_bfsize(int mtu,int bufsize)1380 static int stmmac_set_bfsize(int mtu, int bufsize)
1381 {
1382 int ret = bufsize;
1383
1384 if (mtu >= BUF_SIZE_8KiB)
1385 ret = BUF_SIZE_16KiB;
1386 else if (mtu >= BUF_SIZE_4KiB)
1387 ret = BUF_SIZE_8KiB;
1388 else if (mtu >= BUF_SIZE_2KiB)
1389 ret = BUF_SIZE_4KiB;
1390 else if (mtu > DEFAULT_BUFSIZE)
1391 ret = BUF_SIZE_2KiB;
1392 else
1393 ret = DEFAULT_BUFSIZE;
1394
1395 return ret;
1396 }
1397
1398 /**
1399 * stmmac_clear_rx_descriptors - clear RX descriptors
1400 * @priv: driver private structure
1401 * @dma_conf: structure to take the dma data
1402 * @queue: RX queue index
1403 * Description: this function is called to clear the RX descriptors
1404 * in case of both basic and extended descriptors are used.
1405 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1406 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1407 struct stmmac_dma_conf *dma_conf,
1408 u32 queue)
1409 {
1410 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1411 int i;
1412
1413 /* Clear the RX descriptors */
1414 for (i = 0; i < dma_conf->dma_rx_size; i++)
1415 if (priv->extend_desc)
1416 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1417 priv->use_riwt, priv->mode,
1418 (i == dma_conf->dma_rx_size - 1),
1419 dma_conf->dma_buf_sz);
1420 else
1421 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1422 priv->use_riwt, priv->mode,
1423 (i == dma_conf->dma_rx_size - 1),
1424 dma_conf->dma_buf_sz);
1425 }
1426
1427 /**
1428 * stmmac_clear_tx_descriptors - clear tx descriptors
1429 * @priv: driver private structure
1430 * @dma_conf: structure to take the dma data
1431 * @queue: TX queue index.
1432 * Description: this function is called to clear the TX descriptors
1433 * in case of both basic and extended descriptors are used.
1434 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1435 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1436 struct stmmac_dma_conf *dma_conf,
1437 u32 queue)
1438 {
1439 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1440 int i;
1441
1442 /* Clear the TX descriptors */
1443 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1444 int last = (i == (dma_conf->dma_tx_size - 1));
1445 struct dma_desc *p;
1446
1447 if (priv->extend_desc)
1448 p = &tx_q->dma_etx[i].basic;
1449 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1450 p = &tx_q->dma_entx[i].basic;
1451 else
1452 p = &tx_q->dma_tx[i];
1453
1454 stmmac_init_tx_desc(priv, p, priv->mode, last);
1455 }
1456 }
1457
1458 /**
1459 * stmmac_clear_descriptors - clear descriptors
1460 * @priv: driver private structure
1461 * @dma_conf: structure to take the dma data
1462 * Description: this function is called to clear the TX and RX descriptors
1463 * in case of both basic and extended descriptors are used.
1464 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1465 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1466 struct stmmac_dma_conf *dma_conf)
1467 {
1468 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1469 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1470 u32 queue;
1471
1472 /* Clear the RX descriptors */
1473 for (queue = 0; queue < rx_queue_cnt; queue++)
1474 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1475
1476 /* Clear the TX descriptors */
1477 for (queue = 0; queue < tx_queue_cnt; queue++)
1478 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1479 }
1480
1481 /**
1482 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1483 * @priv: driver private structure
1484 * @dma_conf: structure to take the dma data
1485 * @p: descriptor pointer
1486 * @i: descriptor index
1487 * @flags: gfp flag
1488 * @queue: RX queue index
1489 * Description: this function is called to allocate a receive buffer, perform
1490 * the DMA mapping and init the descriptor.
1491 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1492 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1493 struct stmmac_dma_conf *dma_conf,
1494 struct dma_desc *p,
1495 int i, gfp_t flags, u32 queue)
1496 {
1497 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1498 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1499 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1500
1501 if (priv->dma_cap.host_dma_width <= 32)
1502 gfp |= GFP_DMA32;
1503
1504 if (!buf->page) {
1505 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1506 if (!buf->page)
1507 return -ENOMEM;
1508 buf->page_offset = stmmac_rx_offset(priv);
1509 }
1510
1511 if (priv->sph && !buf->sec_page) {
1512 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1513 if (!buf->sec_page)
1514 return -ENOMEM;
1515
1516 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1517 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1518 } else {
1519 buf->sec_page = NULL;
1520 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1521 }
1522
1523 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1524
1525 stmmac_set_desc_addr(priv, p, buf->addr);
1526 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1527 stmmac_init_desc3(priv, p);
1528
1529 return 0;
1530 }
1531
1532 /**
1533 * stmmac_free_rx_buffer - free RX dma buffers
1534 * @priv: private structure
1535 * @rx_q: RX queue
1536 * @i: buffer index.
1537 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1538 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1539 struct stmmac_rx_queue *rx_q,
1540 int i)
1541 {
1542 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1543
1544 if (buf->page)
1545 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1546 buf->page = NULL;
1547
1548 if (buf->sec_page)
1549 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1550 buf->sec_page = NULL;
1551 }
1552
1553 /**
1554 * stmmac_free_tx_buffer - free RX dma buffers
1555 * @priv: private structure
1556 * @dma_conf: structure to take the dma data
1557 * @queue: RX queue index
1558 * @i: buffer index.
1559 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1560 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1561 struct stmmac_dma_conf *dma_conf,
1562 u32 queue, int i)
1563 {
1564 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1565
1566 if (tx_q->tx_skbuff_dma[i].buf &&
1567 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1568 if (tx_q->tx_skbuff_dma[i].map_as_page)
1569 dma_unmap_page(priv->device,
1570 tx_q->tx_skbuff_dma[i].buf,
1571 tx_q->tx_skbuff_dma[i].len,
1572 DMA_TO_DEVICE);
1573 else
1574 dma_unmap_single(priv->device,
1575 tx_q->tx_skbuff_dma[i].buf,
1576 tx_q->tx_skbuff_dma[i].len,
1577 DMA_TO_DEVICE);
1578 }
1579
1580 if (tx_q->xdpf[i] &&
1581 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1582 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1583 xdp_return_frame(tx_q->xdpf[i]);
1584 tx_q->xdpf[i] = NULL;
1585 }
1586
1587 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1588 tx_q->xsk_frames_done++;
1589
1590 if (tx_q->tx_skbuff[i] &&
1591 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1592 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1593 tx_q->tx_skbuff[i] = NULL;
1594 }
1595
1596 tx_q->tx_skbuff_dma[i].buf = 0;
1597 tx_q->tx_skbuff_dma[i].map_as_page = false;
1598 }
1599
1600 /**
1601 * dma_free_rx_skbufs - free RX dma buffers
1602 * @priv: private structure
1603 * @dma_conf: structure to take the dma data
1604 * @queue: RX queue index
1605 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1606 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1607 struct stmmac_dma_conf *dma_conf,
1608 u32 queue)
1609 {
1610 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1611 int i;
1612
1613 for (i = 0; i < dma_conf->dma_rx_size; i++)
1614 stmmac_free_rx_buffer(priv, rx_q, i);
1615 }
1616
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1617 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1618 struct stmmac_dma_conf *dma_conf,
1619 u32 queue, gfp_t flags)
1620 {
1621 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1622 int i;
1623
1624 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1625 struct dma_desc *p;
1626 int ret;
1627
1628 if (priv->extend_desc)
1629 p = &((rx_q->dma_erx + i)->basic);
1630 else
1631 p = rx_q->dma_rx + i;
1632
1633 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1634 queue);
1635 if (ret)
1636 return ret;
1637
1638 rx_q->buf_alloc_num++;
1639 }
1640
1641 return 0;
1642 }
1643
1644 /**
1645 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1646 * @priv: private structure
1647 * @dma_conf: structure to take the dma data
1648 * @queue: RX queue index
1649 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1650 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1651 struct stmmac_dma_conf *dma_conf,
1652 u32 queue)
1653 {
1654 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1655 int i;
1656
1657 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1658 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1659
1660 if (!buf->xdp)
1661 continue;
1662
1663 xsk_buff_free(buf->xdp);
1664 buf->xdp = NULL;
1665 }
1666 }
1667
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1668 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1669 struct stmmac_dma_conf *dma_conf,
1670 u32 queue)
1671 {
1672 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1673 int i;
1674
1675 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1676 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1677 * use this macro to make sure no size violations.
1678 */
1679 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1680
1681 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1682 struct stmmac_rx_buffer *buf;
1683 dma_addr_t dma_addr;
1684 struct dma_desc *p;
1685
1686 if (priv->extend_desc)
1687 p = (struct dma_desc *)(rx_q->dma_erx + i);
1688 else
1689 p = rx_q->dma_rx + i;
1690
1691 buf = &rx_q->buf_pool[i];
1692
1693 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1694 if (!buf->xdp)
1695 return -ENOMEM;
1696
1697 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1698 stmmac_set_desc_addr(priv, p, dma_addr);
1699 rx_q->buf_alloc_num++;
1700 }
1701
1702 return 0;
1703 }
1704
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1705 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1706 {
1707 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1708 return NULL;
1709
1710 return xsk_get_pool_from_qid(priv->dev, queue);
1711 }
1712
1713 /**
1714 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1715 * @priv: driver private structure
1716 * @dma_conf: structure to take the dma data
1717 * @queue: RX queue index
1718 * @flags: gfp flag.
1719 * Description: this function initializes the DMA RX descriptors
1720 * and allocates the socket buffers. It supports the chained and ring
1721 * modes.
1722 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1723 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1724 struct stmmac_dma_conf *dma_conf,
1725 u32 queue, gfp_t flags)
1726 {
1727 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1728 int ret;
1729
1730 netif_dbg(priv, probe, priv->dev,
1731 "(%s) dma_rx_phy=0x%08x\n", __func__,
1732 (u32)rx_q->dma_rx_phy);
1733
1734 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1735
1736 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1737
1738 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1739
1740 if (rx_q->xsk_pool) {
1741 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1742 MEM_TYPE_XSK_BUFF_POOL,
1743 NULL));
1744 netdev_info(priv->dev,
1745 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1746 rx_q->queue_index);
1747 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1748 } else {
1749 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1750 MEM_TYPE_PAGE_POOL,
1751 rx_q->page_pool));
1752 netdev_info(priv->dev,
1753 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1754 rx_q->queue_index);
1755 }
1756
1757 if (rx_q->xsk_pool) {
1758 /* RX XDP ZC buffer pool may not be populated, e.g.
1759 * xdpsock TX-only.
1760 */
1761 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1762 } else {
1763 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1764 if (ret < 0)
1765 return -ENOMEM;
1766 }
1767
1768 /* Setup the chained descriptor addresses */
1769 if (priv->mode == STMMAC_CHAIN_MODE) {
1770 if (priv->extend_desc)
1771 stmmac_mode_init(priv, rx_q->dma_erx,
1772 rx_q->dma_rx_phy,
1773 dma_conf->dma_rx_size, 1);
1774 else
1775 stmmac_mode_init(priv, rx_q->dma_rx,
1776 rx_q->dma_rx_phy,
1777 dma_conf->dma_rx_size, 0);
1778 }
1779
1780 return 0;
1781 }
1782
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1783 static int init_dma_rx_desc_rings(struct net_device *dev,
1784 struct stmmac_dma_conf *dma_conf,
1785 gfp_t flags)
1786 {
1787 struct stmmac_priv *priv = netdev_priv(dev);
1788 u32 rx_count = priv->plat->rx_queues_to_use;
1789 int queue;
1790 int ret;
1791
1792 /* RX INITIALIZATION */
1793 netif_dbg(priv, probe, priv->dev,
1794 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1795
1796 for (queue = 0; queue < rx_count; queue++) {
1797 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1798 if (ret)
1799 goto err_init_rx_buffers;
1800 }
1801
1802 return 0;
1803
1804 err_init_rx_buffers:
1805 while (queue >= 0) {
1806 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1807
1808 if (rx_q->xsk_pool)
1809 dma_free_rx_xskbufs(priv, dma_conf, queue);
1810 else
1811 dma_free_rx_skbufs(priv, dma_conf, queue);
1812
1813 rx_q->buf_alloc_num = 0;
1814 rx_q->xsk_pool = NULL;
1815
1816 queue--;
1817 }
1818
1819 return ret;
1820 }
1821
1822 /**
1823 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1824 * @priv: driver private structure
1825 * @dma_conf: structure to take the dma data
1826 * @queue: TX queue index
1827 * Description: this function initializes the DMA TX descriptors
1828 * and allocates the socket buffers. It supports the chained and ring
1829 * modes.
1830 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1831 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1832 struct stmmac_dma_conf *dma_conf,
1833 u32 queue)
1834 {
1835 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1836 int i;
1837
1838 netif_dbg(priv, probe, priv->dev,
1839 "(%s) dma_tx_phy=0x%08x\n", __func__,
1840 (u32)tx_q->dma_tx_phy);
1841
1842 /* Setup the chained descriptor addresses */
1843 if (priv->mode == STMMAC_CHAIN_MODE) {
1844 if (priv->extend_desc)
1845 stmmac_mode_init(priv, tx_q->dma_etx,
1846 tx_q->dma_tx_phy,
1847 dma_conf->dma_tx_size, 1);
1848 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1849 stmmac_mode_init(priv, tx_q->dma_tx,
1850 tx_q->dma_tx_phy,
1851 dma_conf->dma_tx_size, 0);
1852 }
1853
1854 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1855
1856 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1857 struct dma_desc *p;
1858
1859 if (priv->extend_desc)
1860 p = &((tx_q->dma_etx + i)->basic);
1861 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1862 p = &((tx_q->dma_entx + i)->basic);
1863 else
1864 p = tx_q->dma_tx + i;
1865
1866 stmmac_clear_desc(priv, p);
1867
1868 tx_q->tx_skbuff_dma[i].buf = 0;
1869 tx_q->tx_skbuff_dma[i].map_as_page = false;
1870 tx_q->tx_skbuff_dma[i].len = 0;
1871 tx_q->tx_skbuff_dma[i].last_segment = false;
1872 tx_q->tx_skbuff[i] = NULL;
1873 }
1874
1875 return 0;
1876 }
1877
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1878 static int init_dma_tx_desc_rings(struct net_device *dev,
1879 struct stmmac_dma_conf *dma_conf)
1880 {
1881 struct stmmac_priv *priv = netdev_priv(dev);
1882 u32 tx_queue_cnt;
1883 u32 queue;
1884
1885 tx_queue_cnt = priv->plat->tx_queues_to_use;
1886
1887 for (queue = 0; queue < tx_queue_cnt; queue++)
1888 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1889
1890 return 0;
1891 }
1892
1893 /**
1894 * init_dma_desc_rings - init the RX/TX descriptor rings
1895 * @dev: net device structure
1896 * @dma_conf: structure to take the dma data
1897 * @flags: gfp flag.
1898 * Description: this function initializes the DMA RX/TX descriptors
1899 * and allocates the socket buffers. It supports the chained and ring
1900 * modes.
1901 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1902 static int init_dma_desc_rings(struct net_device *dev,
1903 struct stmmac_dma_conf *dma_conf,
1904 gfp_t flags)
1905 {
1906 struct stmmac_priv *priv = netdev_priv(dev);
1907 int ret;
1908
1909 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1910 if (ret)
1911 return ret;
1912
1913 ret = init_dma_tx_desc_rings(dev, dma_conf);
1914
1915 stmmac_clear_descriptors(priv, dma_conf);
1916
1917 if (netif_msg_hw(priv))
1918 stmmac_display_rings(priv, dma_conf);
1919
1920 return ret;
1921 }
1922
1923 /**
1924 * dma_free_tx_skbufs - free TX dma buffers
1925 * @priv: private structure
1926 * @dma_conf: structure to take the dma data
1927 * @queue: TX queue index
1928 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1929 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1930 struct stmmac_dma_conf *dma_conf,
1931 u32 queue)
1932 {
1933 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1934 int i;
1935
1936 tx_q->xsk_frames_done = 0;
1937
1938 for (i = 0; i < dma_conf->dma_tx_size; i++)
1939 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1940
1941 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1942 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1943 tx_q->xsk_frames_done = 0;
1944 tx_q->xsk_pool = NULL;
1945 }
1946 }
1947
1948 /**
1949 * stmmac_free_tx_skbufs - free TX skb buffers
1950 * @priv: private structure
1951 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1952 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1953 {
1954 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1955 u32 queue;
1956
1957 for (queue = 0; queue < tx_queue_cnt; queue++)
1958 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1959 }
1960
1961 /**
1962 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1963 * @priv: private structure
1964 * @dma_conf: structure to take the dma data
1965 * @queue: RX queue index
1966 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1967 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1968 struct stmmac_dma_conf *dma_conf,
1969 u32 queue)
1970 {
1971 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1972
1973 /* Release the DMA RX socket buffers */
1974 if (rx_q->xsk_pool)
1975 dma_free_rx_xskbufs(priv, dma_conf, queue);
1976 else
1977 dma_free_rx_skbufs(priv, dma_conf, queue);
1978
1979 rx_q->buf_alloc_num = 0;
1980 rx_q->xsk_pool = NULL;
1981
1982 /* Free DMA regions of consistent memory previously allocated */
1983 if (!priv->extend_desc)
1984 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1985 sizeof(struct dma_desc),
1986 rx_q->dma_rx, rx_q->dma_rx_phy);
1987 else
1988 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1989 sizeof(struct dma_extended_desc),
1990 rx_q->dma_erx, rx_q->dma_rx_phy);
1991
1992 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1993 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1994
1995 kfree(rx_q->buf_pool);
1996 if (rx_q->page_pool)
1997 page_pool_destroy(rx_q->page_pool);
1998 }
1999
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2000 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2001 struct stmmac_dma_conf *dma_conf)
2002 {
2003 u32 rx_count = priv->plat->rx_queues_to_use;
2004 u32 queue;
2005
2006 /* Free RX queue resources */
2007 for (queue = 0; queue < rx_count; queue++)
2008 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2009 }
2010
2011 /**
2012 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2013 * @priv: private structure
2014 * @dma_conf: structure to take the dma data
2015 * @queue: TX queue index
2016 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2017 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2018 struct stmmac_dma_conf *dma_conf,
2019 u32 queue)
2020 {
2021 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2022 size_t size;
2023 void *addr;
2024
2025 /* Release the DMA TX socket buffers */
2026 dma_free_tx_skbufs(priv, dma_conf, queue);
2027
2028 if (priv->extend_desc) {
2029 size = sizeof(struct dma_extended_desc);
2030 addr = tx_q->dma_etx;
2031 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2032 size = sizeof(struct dma_edesc);
2033 addr = tx_q->dma_entx;
2034 } else {
2035 size = sizeof(struct dma_desc);
2036 addr = tx_q->dma_tx;
2037 }
2038
2039 size *= dma_conf->dma_tx_size;
2040
2041 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2042
2043 kfree(tx_q->tx_skbuff_dma);
2044 kfree(tx_q->tx_skbuff);
2045 }
2046
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2047 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2048 struct stmmac_dma_conf *dma_conf)
2049 {
2050 u32 tx_count = priv->plat->tx_queues_to_use;
2051 u32 queue;
2052
2053 /* Free TX queue resources */
2054 for (queue = 0; queue < tx_count; queue++)
2055 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2056 }
2057
2058 /**
2059 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2060 * @priv: private structure
2061 * @dma_conf: structure to take the dma data
2062 * @queue: RX queue index
2063 * Description: according to which descriptor can be used (extend or basic)
2064 * this function allocates the resources for TX and RX paths. In case of
2065 * reception, for example, it pre-allocated the RX socket buffer in order to
2066 * allow zero-copy mechanism.
2067 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2068 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2069 struct stmmac_dma_conf *dma_conf,
2070 u32 queue)
2071 {
2072 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2073 struct stmmac_channel *ch = &priv->channel[queue];
2074 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2075 struct page_pool_params pp_params = { 0 };
2076 unsigned int dma_buf_sz_pad, num_pages;
2077 unsigned int napi_id;
2078 int ret;
2079
2080 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2081 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2082 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2083
2084 rx_q->queue_index = queue;
2085 rx_q->priv_data = priv;
2086 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2087
2088 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2089 pp_params.pool_size = dma_conf->dma_rx_size;
2090 pp_params.order = order_base_2(num_pages);
2091 pp_params.nid = dev_to_node(priv->device);
2092 pp_params.dev = priv->device;
2093 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2094 pp_params.offset = stmmac_rx_offset(priv);
2095 pp_params.max_len = dma_conf->dma_buf_sz;
2096
2097 rx_q->page_pool = page_pool_create(&pp_params);
2098 if (IS_ERR(rx_q->page_pool)) {
2099 ret = PTR_ERR(rx_q->page_pool);
2100 rx_q->page_pool = NULL;
2101 return ret;
2102 }
2103
2104 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2105 sizeof(*rx_q->buf_pool),
2106 GFP_KERNEL);
2107 if (!rx_q->buf_pool)
2108 return -ENOMEM;
2109
2110 if (priv->extend_desc) {
2111 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2112 dma_conf->dma_rx_size *
2113 sizeof(struct dma_extended_desc),
2114 &rx_q->dma_rx_phy,
2115 GFP_KERNEL);
2116 if (!rx_q->dma_erx)
2117 return -ENOMEM;
2118
2119 } else {
2120 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2121 dma_conf->dma_rx_size *
2122 sizeof(struct dma_desc),
2123 &rx_q->dma_rx_phy,
2124 GFP_KERNEL);
2125 if (!rx_q->dma_rx)
2126 return -ENOMEM;
2127 }
2128
2129 if (stmmac_xdp_is_enabled(priv) &&
2130 test_bit(queue, priv->af_xdp_zc_qps))
2131 napi_id = ch->rxtx_napi.napi_id;
2132 else
2133 napi_id = ch->rx_napi.napi_id;
2134
2135 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2136 rx_q->queue_index,
2137 napi_id);
2138 if (ret) {
2139 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2140 return -EINVAL;
2141 }
2142
2143 return 0;
2144 }
2145
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2146 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2147 struct stmmac_dma_conf *dma_conf)
2148 {
2149 u32 rx_count = priv->plat->rx_queues_to_use;
2150 u32 queue;
2151 int ret;
2152
2153 /* RX queues buffers and DMA */
2154 for (queue = 0; queue < rx_count; queue++) {
2155 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2156 if (ret)
2157 goto err_dma;
2158 }
2159
2160 return 0;
2161
2162 err_dma:
2163 free_dma_rx_desc_resources(priv, dma_conf);
2164
2165 return ret;
2166 }
2167
2168 /**
2169 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2170 * @priv: private structure
2171 * @dma_conf: structure to take the dma data
2172 * @queue: TX queue index
2173 * Description: according to which descriptor can be used (extend or basic)
2174 * this function allocates the resources for TX and RX paths. In case of
2175 * reception, for example, it pre-allocated the RX socket buffer in order to
2176 * allow zero-copy mechanism.
2177 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2178 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2179 struct stmmac_dma_conf *dma_conf,
2180 u32 queue)
2181 {
2182 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2183 size_t size;
2184 void *addr;
2185
2186 tx_q->queue_index = queue;
2187 tx_q->priv_data = priv;
2188
2189 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2190 sizeof(*tx_q->tx_skbuff_dma),
2191 GFP_KERNEL);
2192 if (!tx_q->tx_skbuff_dma)
2193 return -ENOMEM;
2194
2195 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2196 sizeof(struct sk_buff *),
2197 GFP_KERNEL);
2198 if (!tx_q->tx_skbuff)
2199 return -ENOMEM;
2200
2201 if (priv->extend_desc)
2202 size = sizeof(struct dma_extended_desc);
2203 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2204 size = sizeof(struct dma_edesc);
2205 else
2206 size = sizeof(struct dma_desc);
2207
2208 size *= dma_conf->dma_tx_size;
2209
2210 addr = dma_alloc_coherent(priv->device, size,
2211 &tx_q->dma_tx_phy, GFP_KERNEL);
2212 if (!addr)
2213 return -ENOMEM;
2214
2215 if (priv->extend_desc)
2216 tx_q->dma_etx = addr;
2217 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2218 tx_q->dma_entx = addr;
2219 else
2220 tx_q->dma_tx = addr;
2221
2222 return 0;
2223 }
2224
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2225 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2226 struct stmmac_dma_conf *dma_conf)
2227 {
2228 u32 tx_count = priv->plat->tx_queues_to_use;
2229 u32 queue;
2230 int ret;
2231
2232 /* TX queues buffers and DMA */
2233 for (queue = 0; queue < tx_count; queue++) {
2234 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2235 if (ret)
2236 goto err_dma;
2237 }
2238
2239 return 0;
2240
2241 err_dma:
2242 free_dma_tx_desc_resources(priv, dma_conf);
2243 return ret;
2244 }
2245
2246 /**
2247 * alloc_dma_desc_resources - alloc TX/RX resources.
2248 * @priv: private structure
2249 * @dma_conf: structure to take the dma data
2250 * Description: according to which descriptor can be used (extend or basic)
2251 * this function allocates the resources for TX and RX paths. In case of
2252 * reception, for example, it pre-allocated the RX socket buffer in order to
2253 * allow zero-copy mechanism.
2254 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2255 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2256 struct stmmac_dma_conf *dma_conf)
2257 {
2258 /* RX Allocation */
2259 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2260
2261 if (ret)
2262 return ret;
2263
2264 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2265
2266 return ret;
2267 }
2268
2269 /**
2270 * free_dma_desc_resources - free dma desc resources
2271 * @priv: private structure
2272 * @dma_conf: structure to take the dma data
2273 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2274 static void free_dma_desc_resources(struct stmmac_priv *priv,
2275 struct stmmac_dma_conf *dma_conf)
2276 {
2277 /* Release the DMA TX socket buffers */
2278 free_dma_tx_desc_resources(priv, dma_conf);
2279
2280 /* Release the DMA RX socket buffers later
2281 * to ensure all pending XDP_TX buffers are returned.
2282 */
2283 free_dma_rx_desc_resources(priv, dma_conf);
2284 }
2285
2286 /**
2287 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2288 * @priv: driver private structure
2289 * Description: It is used for enabling the rx queues in the MAC
2290 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2291 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2292 {
2293 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2294 int queue;
2295 u8 mode;
2296
2297 for (queue = 0; queue < rx_queues_count; queue++) {
2298 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2299 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2300 }
2301 }
2302
2303 /**
2304 * stmmac_start_rx_dma - start RX DMA channel
2305 * @priv: driver private structure
2306 * @chan: RX channel index
2307 * Description:
2308 * This starts a RX DMA channel
2309 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2310 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2311 {
2312 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2313 stmmac_start_rx(priv, priv->ioaddr, chan);
2314 }
2315
2316 /**
2317 * stmmac_start_tx_dma - start TX DMA channel
2318 * @priv: driver private structure
2319 * @chan: TX channel index
2320 * Description:
2321 * This starts a TX DMA channel
2322 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2323 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2324 {
2325 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2326 stmmac_start_tx(priv, priv->ioaddr, chan);
2327 }
2328
2329 /**
2330 * stmmac_stop_rx_dma - stop RX DMA channel
2331 * @priv: driver private structure
2332 * @chan: RX channel index
2333 * Description:
2334 * This stops a RX DMA channel
2335 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2336 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2337 {
2338 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2339 stmmac_stop_rx(priv, priv->ioaddr, chan);
2340 }
2341
2342 /**
2343 * stmmac_stop_tx_dma - stop TX DMA channel
2344 * @priv: driver private structure
2345 * @chan: TX channel index
2346 * Description:
2347 * This stops a TX DMA channel
2348 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2349 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2350 {
2351 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2352 stmmac_stop_tx(priv, priv->ioaddr, chan);
2353 }
2354
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2355 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2356 {
2357 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2358 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2359 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2360 u32 chan;
2361
2362 for (chan = 0; chan < dma_csr_ch; chan++) {
2363 struct stmmac_channel *ch = &priv->channel[chan];
2364 unsigned long flags;
2365
2366 spin_lock_irqsave(&ch->lock, flags);
2367 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2368 spin_unlock_irqrestore(&ch->lock, flags);
2369 }
2370 }
2371
2372 /**
2373 * stmmac_start_all_dma - start all RX and TX DMA channels
2374 * @priv: driver private structure
2375 * Description:
2376 * This starts all the RX and TX DMA channels
2377 */
stmmac_start_all_dma(struct stmmac_priv * priv)2378 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2379 {
2380 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2381 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2382 u32 chan = 0;
2383
2384 for (chan = 0; chan < rx_channels_count; chan++)
2385 stmmac_start_rx_dma(priv, chan);
2386
2387 for (chan = 0; chan < tx_channels_count; chan++)
2388 stmmac_start_tx_dma(priv, chan);
2389 }
2390
2391 /**
2392 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2393 * @priv: driver private structure
2394 * Description:
2395 * This stops the RX and TX DMA channels
2396 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2397 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2398 {
2399 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2400 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2401 u32 chan = 0;
2402
2403 for (chan = 0; chan < rx_channels_count; chan++)
2404 stmmac_stop_rx_dma(priv, chan);
2405
2406 for (chan = 0; chan < tx_channels_count; chan++)
2407 stmmac_stop_tx_dma(priv, chan);
2408 }
2409
2410 /**
2411 * stmmac_dma_operation_mode - HW DMA operation mode
2412 * @priv: driver private structure
2413 * Description: it is used for configuring the DMA operation mode register in
2414 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2415 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2416 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2417 {
2418 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2419 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2420 int rxfifosz = priv->plat->rx_fifo_size;
2421 int txfifosz = priv->plat->tx_fifo_size;
2422 u32 txmode = 0;
2423 u32 rxmode = 0;
2424 u32 chan = 0;
2425 u8 qmode = 0;
2426
2427 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2428 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2429 rxfifosz /= rx_channels_count;
2430 txfifosz /= tx_channels_count;
2431 }
2432
2433 if (priv->plat->force_thresh_dma_mode) {
2434 txmode = tc;
2435 rxmode = tc;
2436 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2437 /*
2438 * In case of GMAC, SF mode can be enabled
2439 * to perform the TX COE in HW. This depends on:
2440 * 1) TX COE if actually supported
2441 * 2) There is no bugged Jumbo frame support
2442 * that needs to not insert csum in the TDES.
2443 */
2444 txmode = SF_DMA_MODE;
2445 rxmode = SF_DMA_MODE;
2446 priv->xstats.threshold = SF_DMA_MODE;
2447 } else {
2448 txmode = tc;
2449 rxmode = SF_DMA_MODE;
2450 }
2451
2452 /* configure all channels */
2453 for (chan = 0; chan < rx_channels_count; chan++) {
2454 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2455 u32 buf_size;
2456
2457 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2458
2459 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2460 rxfifosz, qmode);
2461
2462 if (rx_q->xsk_pool) {
2463 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2464 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2465 buf_size,
2466 chan);
2467 } else {
2468 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2469 priv->dma_conf.dma_buf_sz,
2470 chan);
2471 }
2472 }
2473
2474 for (chan = 0; chan < tx_channels_count; chan++) {
2475 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2476
2477 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2478 txfifosz, qmode);
2479 }
2480 }
2481
stmmac_xsk_request_timestamp(void * _priv)2482 static void stmmac_xsk_request_timestamp(void *_priv)
2483 {
2484 struct stmmac_metadata_request *meta_req = _priv;
2485
2486 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2487 *meta_req->set_ic = true;
2488 }
2489
stmmac_xsk_fill_timestamp(void * _priv)2490 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2491 {
2492 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2493 struct stmmac_priv *priv = tx_compl->priv;
2494 struct dma_desc *desc = tx_compl->desc;
2495 bool found = false;
2496 u64 ns = 0;
2497
2498 if (!priv->hwts_tx_en)
2499 return 0;
2500
2501 /* check tx tstamp status */
2502 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2503 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2504 found = true;
2505 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2506 found = true;
2507 }
2508
2509 if (found) {
2510 ns -= priv->plat->cdc_error_adj;
2511 return ns_to_ktime(ns);
2512 }
2513
2514 return 0;
2515 }
2516
2517 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2518 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2519 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2520 };
2521
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2522 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2523 {
2524 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2525 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2526 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2527 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2528 unsigned int entry = tx_q->cur_tx;
2529 struct dma_desc *tx_desc = NULL;
2530 struct xdp_desc xdp_desc;
2531 bool work_done = true;
2532 u32 tx_set_ic_bit = 0;
2533
2534 /* Avoids TX time-out as we are sharing with slow path */
2535 txq_trans_cond_update(nq);
2536
2537 budget = min(budget, stmmac_tx_avail(priv, queue));
2538
2539 while (budget-- > 0) {
2540 struct stmmac_metadata_request meta_req;
2541 struct xsk_tx_metadata *meta = NULL;
2542 dma_addr_t dma_addr;
2543 bool set_ic;
2544
2545 /* We are sharing with slow path and stop XSK TX desc submission when
2546 * available TX ring is less than threshold.
2547 */
2548 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2549 !netif_carrier_ok(priv->dev)) {
2550 work_done = false;
2551 break;
2552 }
2553
2554 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2555 break;
2556
2557 if (priv->est && priv->est->enable &&
2558 priv->est->max_sdu[queue] &&
2559 xdp_desc.len > priv->est->max_sdu[queue]) {
2560 priv->xstats.max_sdu_txq_drop[queue]++;
2561 continue;
2562 }
2563
2564 if (likely(priv->extend_desc))
2565 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2566 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2567 tx_desc = &tx_q->dma_entx[entry].basic;
2568 else
2569 tx_desc = tx_q->dma_tx + entry;
2570
2571 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2572 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2573 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2574
2575 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2576
2577 /* To return XDP buffer to XSK pool, we simple call
2578 * xsk_tx_completed(), so we don't need to fill up
2579 * 'buf' and 'xdpf'.
2580 */
2581 tx_q->tx_skbuff_dma[entry].buf = 0;
2582 tx_q->xdpf[entry] = NULL;
2583
2584 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2585 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2586 tx_q->tx_skbuff_dma[entry].last_segment = true;
2587 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2588
2589 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2590
2591 tx_q->tx_count_frames++;
2592
2593 if (!priv->tx_coal_frames[queue])
2594 set_ic = false;
2595 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2596 set_ic = true;
2597 else
2598 set_ic = false;
2599
2600 meta_req.priv = priv;
2601 meta_req.tx_desc = tx_desc;
2602 meta_req.set_ic = &set_ic;
2603 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2604 &meta_req);
2605 if (set_ic) {
2606 tx_q->tx_count_frames = 0;
2607 stmmac_set_tx_ic(priv, tx_desc);
2608 tx_set_ic_bit++;
2609 }
2610
2611 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2612 true, priv->mode, true, true,
2613 xdp_desc.len);
2614
2615 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2616
2617 xsk_tx_metadata_to_compl(meta,
2618 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2619
2620 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2621 entry = tx_q->cur_tx;
2622 }
2623 u64_stats_update_begin(&txq_stats->napi_syncp);
2624 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2625 u64_stats_update_end(&txq_stats->napi_syncp);
2626
2627 if (tx_desc) {
2628 stmmac_flush_tx_descriptors(priv, queue);
2629 xsk_tx_release(pool);
2630 }
2631
2632 /* Return true if all of the 3 conditions are met
2633 * a) TX Budget is still available
2634 * b) work_done = true when XSK TX desc peek is empty (no more
2635 * pending XSK TX for transmission)
2636 */
2637 return !!budget && work_done;
2638 }
2639
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2640 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2641 {
2642 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2643 tc += 64;
2644
2645 if (priv->plat->force_thresh_dma_mode)
2646 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2647 else
2648 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2649 chan);
2650
2651 priv->xstats.threshold = tc;
2652 }
2653 }
2654
2655 /**
2656 * stmmac_tx_clean - to manage the transmission completion
2657 * @priv: driver private structure
2658 * @budget: napi budget limiting this functions packet handling
2659 * @queue: TX queue index
2660 * @pending_packets: signal to arm the TX coal timer
2661 * Description: it reclaims the transmit resources after transmission completes.
2662 * If some packets still needs to be handled, due to TX coalesce, set
2663 * pending_packets to true to make NAPI arm the TX coal timer.
2664 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2665 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2666 bool *pending_packets)
2667 {
2668 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2669 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2670 unsigned int bytes_compl = 0, pkts_compl = 0;
2671 unsigned int entry, xmits = 0, count = 0;
2672 u32 tx_packets = 0, tx_errors = 0;
2673
2674 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2675
2676 tx_q->xsk_frames_done = 0;
2677
2678 entry = tx_q->dirty_tx;
2679
2680 /* Try to clean all TX complete frame in 1 shot */
2681 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2682 struct xdp_frame *xdpf;
2683 struct sk_buff *skb;
2684 struct dma_desc *p;
2685 int status;
2686
2687 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2688 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2689 xdpf = tx_q->xdpf[entry];
2690 skb = NULL;
2691 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2692 xdpf = NULL;
2693 skb = tx_q->tx_skbuff[entry];
2694 } else {
2695 xdpf = NULL;
2696 skb = NULL;
2697 }
2698
2699 if (priv->extend_desc)
2700 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2701 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2702 p = &tx_q->dma_entx[entry].basic;
2703 else
2704 p = tx_q->dma_tx + entry;
2705
2706 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2707 /* Check if the descriptor is owned by the DMA */
2708 if (unlikely(status & tx_dma_own))
2709 break;
2710
2711 count++;
2712
2713 /* Make sure descriptor fields are read after reading
2714 * the own bit.
2715 */
2716 dma_rmb();
2717
2718 /* Just consider the last segment and ...*/
2719 if (likely(!(status & tx_not_ls))) {
2720 /* ... verify the status error condition */
2721 if (unlikely(status & tx_err)) {
2722 tx_errors++;
2723 if (unlikely(status & tx_err_bump_tc))
2724 stmmac_bump_dma_threshold(priv, queue);
2725 } else {
2726 tx_packets++;
2727 }
2728 if (skb) {
2729 stmmac_get_tx_hwtstamp(priv, p, skb);
2730 } else if (tx_q->xsk_pool &&
2731 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2732 struct stmmac_xsk_tx_complete tx_compl = {
2733 .priv = priv,
2734 .desc = p,
2735 };
2736
2737 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2738 &stmmac_xsk_tx_metadata_ops,
2739 &tx_compl);
2740 }
2741 }
2742
2743 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2744 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2745 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2746 dma_unmap_page(priv->device,
2747 tx_q->tx_skbuff_dma[entry].buf,
2748 tx_q->tx_skbuff_dma[entry].len,
2749 DMA_TO_DEVICE);
2750 else
2751 dma_unmap_single(priv->device,
2752 tx_q->tx_skbuff_dma[entry].buf,
2753 tx_q->tx_skbuff_dma[entry].len,
2754 DMA_TO_DEVICE);
2755 tx_q->tx_skbuff_dma[entry].buf = 0;
2756 tx_q->tx_skbuff_dma[entry].len = 0;
2757 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2758 }
2759
2760 stmmac_clean_desc3(priv, tx_q, p);
2761
2762 tx_q->tx_skbuff_dma[entry].last_segment = false;
2763 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2764
2765 if (xdpf &&
2766 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2767 xdp_return_frame_rx_napi(xdpf);
2768 tx_q->xdpf[entry] = NULL;
2769 }
2770
2771 if (xdpf &&
2772 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2773 xdp_return_frame(xdpf);
2774 tx_q->xdpf[entry] = NULL;
2775 }
2776
2777 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2778 tx_q->xsk_frames_done++;
2779
2780 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2781 if (likely(skb)) {
2782 pkts_compl++;
2783 bytes_compl += skb->len;
2784 dev_consume_skb_any(skb);
2785 tx_q->tx_skbuff[entry] = NULL;
2786 }
2787 }
2788
2789 stmmac_release_tx_desc(priv, p, priv->mode);
2790
2791 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2792 }
2793 tx_q->dirty_tx = entry;
2794
2795 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2796 pkts_compl, bytes_compl);
2797
2798 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2799 queue))) &&
2800 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2801
2802 netif_dbg(priv, tx_done, priv->dev,
2803 "%s: restart transmit\n", __func__);
2804 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2805 }
2806
2807 if (tx_q->xsk_pool) {
2808 bool work_done;
2809
2810 if (tx_q->xsk_frames_done)
2811 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2812
2813 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2814 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2815
2816 /* For XSK TX, we try to send as many as possible.
2817 * If XSK work done (XSK TX desc empty and budget still
2818 * available), return "budget - 1" to reenable TX IRQ.
2819 * Else, return "budget" to make NAPI continue polling.
2820 */
2821 work_done = stmmac_xdp_xmit_zc(priv, queue,
2822 STMMAC_XSK_TX_BUDGET_MAX);
2823 if (work_done)
2824 xmits = budget - 1;
2825 else
2826 xmits = budget;
2827 }
2828
2829 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2830 stmmac_restart_sw_lpi_timer(priv);
2831
2832 /* We still have pending packets, let's call for a new scheduling */
2833 if (tx_q->dirty_tx != tx_q->cur_tx)
2834 *pending_packets = true;
2835
2836 u64_stats_update_begin(&txq_stats->napi_syncp);
2837 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2838 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2839 u64_stats_inc(&txq_stats->napi.tx_clean);
2840 u64_stats_update_end(&txq_stats->napi_syncp);
2841
2842 priv->xstats.tx_errors += tx_errors;
2843
2844 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2845
2846 /* Combine decisions from TX clean and XSK TX */
2847 return max(count, xmits);
2848 }
2849
2850 /**
2851 * stmmac_tx_err - to manage the tx error
2852 * @priv: driver private structure
2853 * @chan: channel index
2854 * Description: it cleans the descriptors and restarts the transmission
2855 * in case of transmission errors.
2856 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2857 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2858 {
2859 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2860
2861 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2862
2863 stmmac_stop_tx_dma(priv, chan);
2864 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2865 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2866 stmmac_reset_tx_queue(priv, chan);
2867 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2868 tx_q->dma_tx_phy, chan);
2869 stmmac_start_tx_dma(priv, chan);
2870
2871 priv->xstats.tx_errors++;
2872 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2873 }
2874
2875 /**
2876 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2877 * @priv: driver private structure
2878 * @txmode: TX operating mode
2879 * @rxmode: RX operating mode
2880 * @chan: channel index
2881 * Description: it is used for configuring of the DMA operation mode in
2882 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2883 * mode.
2884 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2885 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2886 u32 rxmode, u32 chan)
2887 {
2888 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2889 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2890 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2891 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2892 int rxfifosz = priv->plat->rx_fifo_size;
2893 int txfifosz = priv->plat->tx_fifo_size;
2894
2895 /* Adjust for real per queue fifo size */
2896 rxfifosz /= rx_channels_count;
2897 txfifosz /= tx_channels_count;
2898
2899 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2900 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2901 }
2902
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2903 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2904 {
2905 int ret;
2906
2907 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2908 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2909 if (ret && (ret != -EINVAL)) {
2910 stmmac_global_err(priv);
2911 return true;
2912 }
2913
2914 return false;
2915 }
2916
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2917 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2918 {
2919 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2920 &priv->xstats, chan, dir);
2921 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2922 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2923 struct stmmac_channel *ch = &priv->channel[chan];
2924 struct napi_struct *rx_napi;
2925 struct napi_struct *tx_napi;
2926 unsigned long flags;
2927
2928 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2929 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2930
2931 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2932 if (napi_schedule_prep(rx_napi)) {
2933 spin_lock_irqsave(&ch->lock, flags);
2934 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2935 spin_unlock_irqrestore(&ch->lock, flags);
2936 __napi_schedule(rx_napi);
2937 }
2938 }
2939
2940 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2941 if (napi_schedule_prep(tx_napi)) {
2942 spin_lock_irqsave(&ch->lock, flags);
2943 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2944 spin_unlock_irqrestore(&ch->lock, flags);
2945 __napi_schedule(tx_napi);
2946 }
2947 }
2948
2949 return status;
2950 }
2951
2952 /**
2953 * stmmac_dma_interrupt - DMA ISR
2954 * @priv: driver private structure
2955 * Description: this is the DMA ISR. It is called by the main ISR.
2956 * It calls the dwmac dma routine and schedule poll method in case of some
2957 * work can be done.
2958 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2959 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2960 {
2961 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2962 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2963 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2964 tx_channel_count : rx_channel_count;
2965 u32 chan;
2966 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2967
2968 /* Make sure we never check beyond our status buffer. */
2969 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2970 channels_to_check = ARRAY_SIZE(status);
2971
2972 for (chan = 0; chan < channels_to_check; chan++)
2973 status[chan] = stmmac_napi_check(priv, chan,
2974 DMA_DIR_RXTX);
2975
2976 for (chan = 0; chan < tx_channel_count; chan++) {
2977 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2978 /* Try to bump up the dma threshold on this failure */
2979 stmmac_bump_dma_threshold(priv, chan);
2980 } else if (unlikely(status[chan] == tx_hard_error)) {
2981 stmmac_tx_err(priv, chan);
2982 }
2983 }
2984 }
2985
2986 /**
2987 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2988 * @priv: driver private structure
2989 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2990 */
stmmac_mmc_setup(struct stmmac_priv * priv)2991 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2992 {
2993 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2994 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2995
2996 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2997
2998 if (priv->dma_cap.rmon) {
2999 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3000 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3001 } else
3002 netdev_info(priv->dev, "No MAC Management Counters available\n");
3003 }
3004
3005 /**
3006 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3007 * @priv: driver private structure
3008 * Description:
3009 * new GMAC chip generations have a new register to indicate the
3010 * presence of the optional feature/functions.
3011 * This can be also used to override the value passed through the
3012 * platform and necessary for old MAC10/100 and GMAC chips.
3013 */
stmmac_get_hw_features(struct stmmac_priv * priv)3014 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3015 {
3016 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3017 }
3018
3019 /**
3020 * stmmac_check_ether_addr - check if the MAC addr is valid
3021 * @priv: driver private structure
3022 * Description:
3023 * it is to verify if the MAC address is valid, in case of failures it
3024 * generates a random MAC address
3025 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3026 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3027 {
3028 u8 addr[ETH_ALEN];
3029
3030 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3031 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3032 if (is_valid_ether_addr(addr))
3033 eth_hw_addr_set(priv->dev, addr);
3034 else
3035 eth_hw_addr_random(priv->dev);
3036 dev_info(priv->device, "device MAC address %pM\n",
3037 priv->dev->dev_addr);
3038 }
3039 }
3040
3041 /**
3042 * stmmac_init_dma_engine - DMA init.
3043 * @priv: driver private structure
3044 * Description:
3045 * It inits the DMA invoking the specific MAC/GMAC callback.
3046 * Some DMA parameters can be passed from the platform;
3047 * in case of these are not passed a default is kept for the MAC or GMAC.
3048 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3049 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3050 {
3051 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3052 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3053 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3054 struct stmmac_rx_queue *rx_q;
3055 struct stmmac_tx_queue *tx_q;
3056 u32 chan = 0;
3057 int ret = 0;
3058
3059 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3060 dev_err(priv->device, "Invalid DMA configuration\n");
3061 return -EINVAL;
3062 }
3063
3064 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3065 priv->plat->dma_cfg->atds = 1;
3066
3067 ret = stmmac_reset(priv, priv->ioaddr);
3068 if (ret) {
3069 dev_err(priv->device, "Failed to reset the dma\n");
3070 return ret;
3071 }
3072
3073 /* DMA Configuration */
3074 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3075
3076 if (priv->plat->axi)
3077 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3078
3079 /* DMA CSR Channel configuration */
3080 for (chan = 0; chan < dma_csr_ch; chan++) {
3081 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3082 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3083 }
3084
3085 /* DMA RX Channel Configuration */
3086 for (chan = 0; chan < rx_channels_count; chan++) {
3087 rx_q = &priv->dma_conf.rx_queue[chan];
3088
3089 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3090 rx_q->dma_rx_phy, chan);
3091
3092 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3093 (rx_q->buf_alloc_num *
3094 sizeof(struct dma_desc));
3095 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3096 rx_q->rx_tail_addr, chan);
3097 }
3098
3099 /* DMA TX Channel Configuration */
3100 for (chan = 0; chan < tx_channels_count; chan++) {
3101 tx_q = &priv->dma_conf.tx_queue[chan];
3102
3103 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3104 tx_q->dma_tx_phy, chan);
3105
3106 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3107 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3108 tx_q->tx_tail_addr, chan);
3109 }
3110
3111 return ret;
3112 }
3113
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3114 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3115 {
3116 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3117 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3118 struct stmmac_channel *ch;
3119 struct napi_struct *napi;
3120
3121 if (!tx_coal_timer)
3122 return;
3123
3124 ch = &priv->channel[tx_q->queue_index];
3125 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3126
3127 /* Arm timer only if napi is not already scheduled.
3128 * Try to cancel any timer if napi is scheduled, timer will be armed
3129 * again in the next scheduled napi.
3130 */
3131 if (unlikely(!napi_is_scheduled(napi)))
3132 hrtimer_start(&tx_q->txtimer,
3133 STMMAC_COAL_TIMER(tx_coal_timer),
3134 HRTIMER_MODE_REL);
3135 else
3136 hrtimer_try_to_cancel(&tx_q->txtimer);
3137 }
3138
3139 /**
3140 * stmmac_tx_timer - mitigation sw timer for tx.
3141 * @t: data pointer
3142 * Description:
3143 * This is the timer handler to directly invoke the stmmac_tx_clean.
3144 */
stmmac_tx_timer(struct hrtimer * t)3145 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3146 {
3147 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3148 struct stmmac_priv *priv = tx_q->priv_data;
3149 struct stmmac_channel *ch;
3150 struct napi_struct *napi;
3151
3152 ch = &priv->channel[tx_q->queue_index];
3153 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3154
3155 if (likely(napi_schedule_prep(napi))) {
3156 unsigned long flags;
3157
3158 spin_lock_irqsave(&ch->lock, flags);
3159 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3160 spin_unlock_irqrestore(&ch->lock, flags);
3161 __napi_schedule(napi);
3162 }
3163
3164 return HRTIMER_NORESTART;
3165 }
3166
3167 /**
3168 * stmmac_init_coalesce - init mitigation options.
3169 * @priv: driver private structure
3170 * Description:
3171 * This inits the coalesce parameters: i.e. timer rate,
3172 * timer handler and default threshold used for enabling the
3173 * interrupt on completion bit.
3174 */
stmmac_init_coalesce(struct stmmac_priv * priv)3175 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3176 {
3177 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3178 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3179 u32 chan;
3180
3181 for (chan = 0; chan < tx_channel_count; chan++) {
3182 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3183
3184 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3185 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3186
3187 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3188 tx_q->txtimer.function = stmmac_tx_timer;
3189 }
3190
3191 for (chan = 0; chan < rx_channel_count; chan++)
3192 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3193 }
3194
stmmac_set_rings_length(struct stmmac_priv * priv)3195 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3196 {
3197 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3198 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3199 u32 chan;
3200
3201 /* set TX ring length */
3202 for (chan = 0; chan < tx_channels_count; chan++)
3203 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3204 (priv->dma_conf.dma_tx_size - 1), chan);
3205
3206 /* set RX ring length */
3207 for (chan = 0; chan < rx_channels_count; chan++)
3208 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3209 (priv->dma_conf.dma_rx_size - 1), chan);
3210 }
3211
3212 /**
3213 * stmmac_set_tx_queue_weight - Set TX queue weight
3214 * @priv: driver private structure
3215 * Description: It is used for setting TX queues weight
3216 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3217 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3218 {
3219 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3220 u32 weight;
3221 u32 queue;
3222
3223 for (queue = 0; queue < tx_queues_count; queue++) {
3224 weight = priv->plat->tx_queues_cfg[queue].weight;
3225 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3226 }
3227 }
3228
3229 /**
3230 * stmmac_configure_cbs - Configure CBS in TX queue
3231 * @priv: driver private structure
3232 * Description: It is used for configuring CBS in AVB TX queues
3233 */
stmmac_configure_cbs(struct stmmac_priv * priv)3234 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3235 {
3236 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3237 u32 mode_to_use;
3238 u32 queue;
3239
3240 /* queue 0 is reserved for legacy traffic */
3241 for (queue = 1; queue < tx_queues_count; queue++) {
3242 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3243 if (mode_to_use == MTL_QUEUE_DCB)
3244 continue;
3245
3246 stmmac_config_cbs(priv, priv->hw,
3247 priv->plat->tx_queues_cfg[queue].send_slope,
3248 priv->plat->tx_queues_cfg[queue].idle_slope,
3249 priv->plat->tx_queues_cfg[queue].high_credit,
3250 priv->plat->tx_queues_cfg[queue].low_credit,
3251 queue);
3252 }
3253 }
3254
3255 /**
3256 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3257 * @priv: driver private structure
3258 * Description: It is used for mapping RX queues to RX dma channels
3259 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3260 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3261 {
3262 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3263 u32 queue;
3264 u32 chan;
3265
3266 for (queue = 0; queue < rx_queues_count; queue++) {
3267 chan = priv->plat->rx_queues_cfg[queue].chan;
3268 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3269 }
3270 }
3271
3272 /**
3273 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3274 * @priv: driver private structure
3275 * Description: It is used for configuring the RX Queue Priority
3276 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3277 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3278 {
3279 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3280 u32 queue;
3281 u32 prio;
3282
3283 for (queue = 0; queue < rx_queues_count; queue++) {
3284 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3285 continue;
3286
3287 prio = priv->plat->rx_queues_cfg[queue].prio;
3288 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3289 }
3290 }
3291
3292 /**
3293 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3294 * @priv: driver private structure
3295 * Description: It is used for configuring the TX Queue Priority
3296 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3297 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3298 {
3299 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3300 u32 queue;
3301 u32 prio;
3302
3303 for (queue = 0; queue < tx_queues_count; queue++) {
3304 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3305 continue;
3306
3307 prio = priv->plat->tx_queues_cfg[queue].prio;
3308 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3309 }
3310 }
3311
3312 /**
3313 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3314 * @priv: driver private structure
3315 * Description: It is used for configuring the RX queue routing
3316 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3317 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3318 {
3319 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3320 u32 queue;
3321 u8 packet;
3322
3323 for (queue = 0; queue < rx_queues_count; queue++) {
3324 /* no specific packet type routing specified for the queue */
3325 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3326 continue;
3327
3328 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3329 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3330 }
3331 }
3332
stmmac_mac_config_rss(struct stmmac_priv * priv)3333 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3334 {
3335 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3336 priv->rss.enable = false;
3337 return;
3338 }
3339
3340 if (priv->dev->features & NETIF_F_RXHASH)
3341 priv->rss.enable = true;
3342 else
3343 priv->rss.enable = false;
3344
3345 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3346 priv->plat->rx_queues_to_use);
3347 }
3348
3349 /**
3350 * stmmac_mtl_configuration - Configure MTL
3351 * @priv: driver private structure
3352 * Description: It is used for configurring MTL
3353 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3354 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3355 {
3356 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3357 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3358
3359 if (tx_queues_count > 1)
3360 stmmac_set_tx_queue_weight(priv);
3361
3362 /* Configure MTL RX algorithms */
3363 if (rx_queues_count > 1)
3364 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3365 priv->plat->rx_sched_algorithm);
3366
3367 /* Configure MTL TX algorithms */
3368 if (tx_queues_count > 1)
3369 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3370 priv->plat->tx_sched_algorithm);
3371
3372 /* Configure CBS in AVB TX queues */
3373 if (tx_queues_count > 1)
3374 stmmac_configure_cbs(priv);
3375
3376 /* Map RX MTL to DMA channels */
3377 stmmac_rx_queue_dma_chan_map(priv);
3378
3379 /* Enable MAC RX Queues */
3380 stmmac_mac_enable_rx_queues(priv);
3381
3382 /* Set RX priorities */
3383 if (rx_queues_count > 1)
3384 stmmac_mac_config_rx_queues_prio(priv);
3385
3386 /* Set TX priorities */
3387 if (tx_queues_count > 1)
3388 stmmac_mac_config_tx_queues_prio(priv);
3389
3390 /* Set RX routing */
3391 if (rx_queues_count > 1)
3392 stmmac_mac_config_rx_queues_routing(priv);
3393
3394 /* Receive Side Scaling */
3395 if (rx_queues_count > 1)
3396 stmmac_mac_config_rss(priv);
3397 }
3398
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3399 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3400 {
3401 if (priv->dma_cap.asp) {
3402 netdev_info(priv->dev, "Enabling Safety Features\n");
3403 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3404 priv->plat->safety_feat_cfg);
3405 } else {
3406 netdev_info(priv->dev, "No Safety Features support found\n");
3407 }
3408 }
3409
3410 /**
3411 * stmmac_hw_setup - setup mac in a usable state.
3412 * @dev : pointer to the device structure.
3413 * @ptp_register: register PTP if set
3414 * Description:
3415 * this is the main function to setup the HW in a usable state because the
3416 * dma engine is reset, the core registers are configured (e.g. AXI,
3417 * Checksum features, timers). The DMA is ready to start receiving and
3418 * transmitting.
3419 * Return value:
3420 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3421 * file on failure.
3422 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3423 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3424 {
3425 struct stmmac_priv *priv = netdev_priv(dev);
3426 u32 rx_cnt = priv->plat->rx_queues_to_use;
3427 u32 tx_cnt = priv->plat->tx_queues_to_use;
3428 bool sph_en;
3429 u32 chan;
3430 int ret;
3431
3432 /* Make sure RX clock is enabled */
3433 if (priv->hw->phylink_pcs)
3434 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3435
3436 /* DMA initialization and SW reset */
3437 ret = stmmac_init_dma_engine(priv);
3438 if (ret < 0) {
3439 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3440 __func__);
3441 return ret;
3442 }
3443
3444 /* Copy the MAC addr into the HW */
3445 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3446
3447 /* PS and related bits will be programmed according to the speed */
3448 if (priv->hw->pcs) {
3449 int speed = priv->plat->mac_port_sel_speed;
3450
3451 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3452 (speed == SPEED_1000)) {
3453 priv->hw->ps = speed;
3454 } else {
3455 dev_warn(priv->device, "invalid port speed\n");
3456 priv->hw->ps = 0;
3457 }
3458 }
3459
3460 /* Initialize the MAC Core */
3461 stmmac_core_init(priv, priv->hw, dev);
3462
3463 /* Initialize MTL*/
3464 stmmac_mtl_configuration(priv);
3465
3466 /* Initialize Safety Features */
3467 stmmac_safety_feat_configuration(priv);
3468
3469 ret = stmmac_rx_ipc(priv, priv->hw);
3470 if (!ret) {
3471 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3472 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3473 priv->hw->rx_csum = 0;
3474 }
3475
3476 /* Enable the MAC Rx/Tx */
3477 stmmac_mac_set(priv, priv->ioaddr, true);
3478
3479 /* Set the HW DMA mode and the COE */
3480 stmmac_dma_operation_mode(priv);
3481
3482 stmmac_mmc_setup(priv);
3483
3484 if (ptp_register) {
3485 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3486 if (ret < 0)
3487 netdev_warn(priv->dev,
3488 "failed to enable PTP reference clock: %pe\n",
3489 ERR_PTR(ret));
3490 }
3491
3492 ret = stmmac_init_ptp(priv);
3493 if (ret == -EOPNOTSUPP)
3494 netdev_info(priv->dev, "PTP not supported by HW\n");
3495 else if (ret)
3496 netdev_warn(priv->dev, "PTP init failed\n");
3497 else if (ptp_register)
3498 stmmac_ptp_register(priv);
3499
3500 if (priv->use_riwt) {
3501 u32 queue;
3502
3503 for (queue = 0; queue < rx_cnt; queue++) {
3504 if (!priv->rx_riwt[queue])
3505 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3506
3507 stmmac_rx_watchdog(priv, priv->ioaddr,
3508 priv->rx_riwt[queue], queue);
3509 }
3510 }
3511
3512 if (priv->hw->pcs)
3513 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3514
3515 /* set TX and RX rings length */
3516 stmmac_set_rings_length(priv);
3517
3518 /* Enable TSO */
3519 if (priv->tso) {
3520 for (chan = 0; chan < tx_cnt; chan++) {
3521 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3522
3523 /* TSO and TBS cannot co-exist */
3524 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3525 continue;
3526
3527 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3528 }
3529 }
3530
3531 /* Enable Split Header */
3532 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3533 for (chan = 0; chan < rx_cnt; chan++)
3534 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3535
3536
3537 /* VLAN Tag Insertion */
3538 if (priv->dma_cap.vlins)
3539 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3540
3541 /* TBS */
3542 for (chan = 0; chan < tx_cnt; chan++) {
3543 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3544 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3545
3546 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3547 }
3548
3549 /* Configure real RX and TX queues */
3550 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3551 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3552
3553 /* Start the ball rolling... */
3554 stmmac_start_all_dma(priv);
3555
3556 stmmac_set_hw_vlan_mode(priv, priv->hw);
3557
3558 return 0;
3559 }
3560
stmmac_hw_teardown(struct net_device * dev)3561 static void stmmac_hw_teardown(struct net_device *dev)
3562 {
3563 struct stmmac_priv *priv = netdev_priv(dev);
3564
3565 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3566 }
3567
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3568 static void stmmac_free_irq(struct net_device *dev,
3569 enum request_irq_err irq_err, int irq_idx)
3570 {
3571 struct stmmac_priv *priv = netdev_priv(dev);
3572 int j;
3573
3574 switch (irq_err) {
3575 case REQ_IRQ_ERR_ALL:
3576 irq_idx = priv->plat->tx_queues_to_use;
3577 fallthrough;
3578 case REQ_IRQ_ERR_TX:
3579 for (j = irq_idx - 1; j >= 0; j--) {
3580 if (priv->tx_irq[j] > 0) {
3581 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3582 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3583 }
3584 }
3585 irq_idx = priv->plat->rx_queues_to_use;
3586 fallthrough;
3587 case REQ_IRQ_ERR_RX:
3588 for (j = irq_idx - 1; j >= 0; j--) {
3589 if (priv->rx_irq[j] > 0) {
3590 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3591 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3592 }
3593 }
3594
3595 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3596 free_irq(priv->sfty_ue_irq, dev);
3597 fallthrough;
3598 case REQ_IRQ_ERR_SFTY_UE:
3599 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3600 free_irq(priv->sfty_ce_irq, dev);
3601 fallthrough;
3602 case REQ_IRQ_ERR_SFTY_CE:
3603 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3604 free_irq(priv->lpi_irq, dev);
3605 fallthrough;
3606 case REQ_IRQ_ERR_LPI:
3607 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3608 free_irq(priv->wol_irq, dev);
3609 fallthrough;
3610 case REQ_IRQ_ERR_SFTY:
3611 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3612 free_irq(priv->sfty_irq, dev);
3613 fallthrough;
3614 case REQ_IRQ_ERR_WOL:
3615 free_irq(dev->irq, dev);
3616 fallthrough;
3617 case REQ_IRQ_ERR_MAC:
3618 case REQ_IRQ_ERR_NO:
3619 /* If MAC IRQ request error, no more IRQ to free */
3620 break;
3621 }
3622 }
3623
stmmac_request_irq_multi_msi(struct net_device * dev)3624 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3625 {
3626 struct stmmac_priv *priv = netdev_priv(dev);
3627 enum request_irq_err irq_err;
3628 cpumask_t cpu_mask;
3629 int irq_idx = 0;
3630 char *int_name;
3631 int ret;
3632 int i;
3633
3634 /* For common interrupt */
3635 int_name = priv->int_name_mac;
3636 sprintf(int_name, "%s:%s", dev->name, "mac");
3637 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3638 0, int_name, dev);
3639 if (unlikely(ret < 0)) {
3640 netdev_err(priv->dev,
3641 "%s: alloc mac MSI %d (error: %d)\n",
3642 __func__, dev->irq, ret);
3643 irq_err = REQ_IRQ_ERR_MAC;
3644 goto irq_error;
3645 }
3646
3647 /* Request the Wake IRQ in case of another line
3648 * is used for WoL
3649 */
3650 priv->wol_irq_disabled = true;
3651 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3652 int_name = priv->int_name_wol;
3653 sprintf(int_name, "%s:%s", dev->name, "wol");
3654 ret = request_irq(priv->wol_irq,
3655 stmmac_mac_interrupt,
3656 0, int_name, dev);
3657 if (unlikely(ret < 0)) {
3658 netdev_err(priv->dev,
3659 "%s: alloc wol MSI %d (error: %d)\n",
3660 __func__, priv->wol_irq, ret);
3661 irq_err = REQ_IRQ_ERR_WOL;
3662 goto irq_error;
3663 }
3664 }
3665
3666 /* Request the LPI IRQ in case of another line
3667 * is used for LPI
3668 */
3669 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3670 int_name = priv->int_name_lpi;
3671 sprintf(int_name, "%s:%s", dev->name, "lpi");
3672 ret = request_irq(priv->lpi_irq,
3673 stmmac_mac_interrupt,
3674 0, int_name, dev);
3675 if (unlikely(ret < 0)) {
3676 netdev_err(priv->dev,
3677 "%s: alloc lpi MSI %d (error: %d)\n",
3678 __func__, priv->lpi_irq, ret);
3679 irq_err = REQ_IRQ_ERR_LPI;
3680 goto irq_error;
3681 }
3682 }
3683
3684 /* Request the common Safety Feature Correctible/Uncorrectible
3685 * Error line in case of another line is used
3686 */
3687 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3688 int_name = priv->int_name_sfty;
3689 sprintf(int_name, "%s:%s", dev->name, "safety");
3690 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3691 0, int_name, dev);
3692 if (unlikely(ret < 0)) {
3693 netdev_err(priv->dev,
3694 "%s: alloc sfty MSI %d (error: %d)\n",
3695 __func__, priv->sfty_irq, ret);
3696 irq_err = REQ_IRQ_ERR_SFTY;
3697 goto irq_error;
3698 }
3699 }
3700
3701 /* Request the Safety Feature Correctible Error line in
3702 * case of another line is used
3703 */
3704 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3705 int_name = priv->int_name_sfty_ce;
3706 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3707 ret = request_irq(priv->sfty_ce_irq,
3708 stmmac_safety_interrupt,
3709 0, int_name, dev);
3710 if (unlikely(ret < 0)) {
3711 netdev_err(priv->dev,
3712 "%s: alloc sfty ce MSI %d (error: %d)\n",
3713 __func__, priv->sfty_ce_irq, ret);
3714 irq_err = REQ_IRQ_ERR_SFTY_CE;
3715 goto irq_error;
3716 }
3717 }
3718
3719 /* Request the Safety Feature Uncorrectible Error line in
3720 * case of another line is used
3721 */
3722 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3723 int_name = priv->int_name_sfty_ue;
3724 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3725 ret = request_irq(priv->sfty_ue_irq,
3726 stmmac_safety_interrupt,
3727 0, int_name, dev);
3728 if (unlikely(ret < 0)) {
3729 netdev_err(priv->dev,
3730 "%s: alloc sfty ue MSI %d (error: %d)\n",
3731 __func__, priv->sfty_ue_irq, ret);
3732 irq_err = REQ_IRQ_ERR_SFTY_UE;
3733 goto irq_error;
3734 }
3735 }
3736
3737 /* Request Rx MSI irq */
3738 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3739 if (i >= MTL_MAX_RX_QUEUES)
3740 break;
3741 if (priv->rx_irq[i] == 0)
3742 continue;
3743
3744 int_name = priv->int_name_rx_irq[i];
3745 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3746 ret = request_irq(priv->rx_irq[i],
3747 stmmac_msi_intr_rx,
3748 0, int_name, &priv->dma_conf.rx_queue[i]);
3749 if (unlikely(ret < 0)) {
3750 netdev_err(priv->dev,
3751 "%s: alloc rx-%d MSI %d (error: %d)\n",
3752 __func__, i, priv->rx_irq[i], ret);
3753 irq_err = REQ_IRQ_ERR_RX;
3754 irq_idx = i;
3755 goto irq_error;
3756 }
3757 cpumask_clear(&cpu_mask);
3758 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3759 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3760 }
3761
3762 /* Request Tx MSI irq */
3763 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3764 if (i >= MTL_MAX_TX_QUEUES)
3765 break;
3766 if (priv->tx_irq[i] == 0)
3767 continue;
3768
3769 int_name = priv->int_name_tx_irq[i];
3770 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3771 ret = request_irq(priv->tx_irq[i],
3772 stmmac_msi_intr_tx,
3773 0, int_name, &priv->dma_conf.tx_queue[i]);
3774 if (unlikely(ret < 0)) {
3775 netdev_err(priv->dev,
3776 "%s: alloc tx-%d MSI %d (error: %d)\n",
3777 __func__, i, priv->tx_irq[i], ret);
3778 irq_err = REQ_IRQ_ERR_TX;
3779 irq_idx = i;
3780 goto irq_error;
3781 }
3782 cpumask_clear(&cpu_mask);
3783 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3784 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3785 }
3786
3787 return 0;
3788
3789 irq_error:
3790 stmmac_free_irq(dev, irq_err, irq_idx);
3791 return ret;
3792 }
3793
stmmac_request_irq_single(struct net_device * dev)3794 static int stmmac_request_irq_single(struct net_device *dev)
3795 {
3796 struct stmmac_priv *priv = netdev_priv(dev);
3797 enum request_irq_err irq_err;
3798 int ret;
3799
3800 ret = request_irq(dev->irq, stmmac_interrupt,
3801 IRQF_SHARED, dev->name, dev);
3802 if (unlikely(ret < 0)) {
3803 netdev_err(priv->dev,
3804 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3805 __func__, dev->irq, ret);
3806 irq_err = REQ_IRQ_ERR_MAC;
3807 goto irq_error;
3808 }
3809
3810 /* Request the Wake IRQ in case of another line
3811 * is used for WoL
3812 */
3813 priv->wol_irq_disabled = true;
3814 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3815 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3816 IRQF_SHARED, dev->name, dev);
3817 if (unlikely(ret < 0)) {
3818 netdev_err(priv->dev,
3819 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3820 __func__, priv->wol_irq, ret);
3821 irq_err = REQ_IRQ_ERR_WOL;
3822 goto irq_error;
3823 }
3824 }
3825
3826 /* Request the IRQ lines */
3827 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3828 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3829 IRQF_SHARED, dev->name, dev);
3830 if (unlikely(ret < 0)) {
3831 netdev_err(priv->dev,
3832 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3833 __func__, priv->lpi_irq, ret);
3834 irq_err = REQ_IRQ_ERR_LPI;
3835 goto irq_error;
3836 }
3837 }
3838
3839 /* Request the common Safety Feature Correctible/Uncorrectible
3840 * Error line in case of another line is used
3841 */
3842 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3843 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3844 IRQF_SHARED, dev->name, dev);
3845 if (unlikely(ret < 0)) {
3846 netdev_err(priv->dev,
3847 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3848 __func__, priv->sfty_irq, ret);
3849 irq_err = REQ_IRQ_ERR_SFTY;
3850 goto irq_error;
3851 }
3852 }
3853
3854 return 0;
3855
3856 irq_error:
3857 stmmac_free_irq(dev, irq_err, 0);
3858 return ret;
3859 }
3860
stmmac_request_irq(struct net_device * dev)3861 static int stmmac_request_irq(struct net_device *dev)
3862 {
3863 struct stmmac_priv *priv = netdev_priv(dev);
3864 int ret;
3865
3866 /* Request the IRQ lines */
3867 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3868 ret = stmmac_request_irq_multi_msi(dev);
3869 else
3870 ret = stmmac_request_irq_single(dev);
3871
3872 return ret;
3873 }
3874
3875 /**
3876 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3877 * @priv: driver private structure
3878 * @mtu: MTU to setup the dma queue and buf with
3879 * Description: Allocate and generate a dma_conf based on the provided MTU.
3880 * Allocate the Tx/Rx DMA queue and init them.
3881 * Return value:
3882 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3883 */
3884 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3885 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3886 {
3887 struct stmmac_dma_conf *dma_conf;
3888 int chan, bfsize, ret;
3889
3890 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3891 if (!dma_conf) {
3892 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3893 __func__);
3894 return ERR_PTR(-ENOMEM);
3895 }
3896
3897 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3898 if (bfsize < 0)
3899 bfsize = 0;
3900
3901 if (bfsize < BUF_SIZE_16KiB)
3902 bfsize = stmmac_set_bfsize(mtu, 0);
3903
3904 dma_conf->dma_buf_sz = bfsize;
3905 /* Chose the tx/rx size from the already defined one in the
3906 * priv struct. (if defined)
3907 */
3908 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3909 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3910
3911 if (!dma_conf->dma_tx_size)
3912 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3913 if (!dma_conf->dma_rx_size)
3914 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3915
3916 /* Earlier check for TBS */
3917 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3918 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3919 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3920
3921 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3922 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3923 }
3924
3925 ret = alloc_dma_desc_resources(priv, dma_conf);
3926 if (ret < 0) {
3927 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3928 __func__);
3929 goto alloc_error;
3930 }
3931
3932 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3933 if (ret < 0) {
3934 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3935 __func__);
3936 goto init_error;
3937 }
3938
3939 return dma_conf;
3940
3941 init_error:
3942 free_dma_desc_resources(priv, dma_conf);
3943 alloc_error:
3944 kfree(dma_conf);
3945 return ERR_PTR(ret);
3946 }
3947
3948 /**
3949 * __stmmac_open - open entry point of the driver
3950 * @dev : pointer to the device structure.
3951 * @dma_conf : structure to take the dma data
3952 * Description:
3953 * This function is the open entry point of the driver.
3954 * Return value:
3955 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3956 * file on failure.
3957 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3958 static int __stmmac_open(struct net_device *dev,
3959 struct stmmac_dma_conf *dma_conf)
3960 {
3961 struct stmmac_priv *priv = netdev_priv(dev);
3962 int mode = priv->plat->phy_interface;
3963 u32 chan;
3964 int ret;
3965
3966 /* Initialise the tx lpi timer, converting from msec to usec */
3967 if (!priv->tx_lpi_timer)
3968 priv->tx_lpi_timer = eee_timer * 1000;
3969
3970 ret = pm_runtime_resume_and_get(priv->device);
3971 if (ret < 0)
3972 return ret;
3973
3974 if ((!priv->hw->xpcs ||
3975 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
3976 ret = stmmac_init_phy(dev);
3977 if (ret) {
3978 netdev_err(priv->dev,
3979 "%s: Cannot attach to PHY (error: %d)\n",
3980 __func__, ret);
3981 goto init_phy_error;
3982 }
3983 }
3984
3985 buf_sz = dma_conf->dma_buf_sz;
3986 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3987 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3988 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3989 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3990
3991 stmmac_reset_queues_param(priv);
3992
3993 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3994 priv->plat->serdes_powerup) {
3995 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3996 if (ret < 0) {
3997 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3998 __func__);
3999 goto init_error;
4000 }
4001 }
4002
4003 ret = stmmac_hw_setup(dev, true);
4004 if (ret < 0) {
4005 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4006 goto init_error;
4007 }
4008
4009 stmmac_init_coalesce(priv);
4010
4011 phylink_start(priv->phylink);
4012 /* We may have called phylink_speed_down before */
4013 phylink_speed_up(priv->phylink);
4014
4015 ret = stmmac_request_irq(dev);
4016 if (ret)
4017 goto irq_error;
4018
4019 stmmac_enable_all_queues(priv);
4020 netif_tx_start_all_queues(priv->dev);
4021 stmmac_enable_all_dma_irq(priv);
4022
4023 return 0;
4024
4025 irq_error:
4026 phylink_stop(priv->phylink);
4027
4028 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4029 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4030
4031 stmmac_hw_teardown(dev);
4032 init_error:
4033 phylink_disconnect_phy(priv->phylink);
4034 init_phy_error:
4035 pm_runtime_put(priv->device);
4036 return ret;
4037 }
4038
stmmac_open(struct net_device * dev)4039 static int stmmac_open(struct net_device *dev)
4040 {
4041 struct stmmac_priv *priv = netdev_priv(dev);
4042 struct stmmac_dma_conf *dma_conf;
4043 int ret;
4044
4045 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4046 if (IS_ERR(dma_conf))
4047 return PTR_ERR(dma_conf);
4048
4049 ret = __stmmac_open(dev, dma_conf);
4050 if (ret)
4051 free_dma_desc_resources(priv, dma_conf);
4052
4053 kfree(dma_conf);
4054 return ret;
4055 }
4056
4057 /**
4058 * stmmac_release - close entry point of the driver
4059 * @dev : device pointer.
4060 * Description:
4061 * This is the stop entry point of the driver.
4062 */
stmmac_release(struct net_device * dev)4063 static int stmmac_release(struct net_device *dev)
4064 {
4065 struct stmmac_priv *priv = netdev_priv(dev);
4066 u32 chan;
4067
4068 if (device_may_wakeup(priv->device))
4069 phylink_speed_down(priv->phylink, false);
4070 /* Stop and disconnect the PHY */
4071 phylink_stop(priv->phylink);
4072 phylink_disconnect_phy(priv->phylink);
4073
4074 stmmac_disable_all_queues(priv);
4075
4076 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4077 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4078
4079 netif_tx_disable(dev);
4080
4081 /* Free the IRQ lines */
4082 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4083
4084 /* Stop TX/RX DMA and clear the descriptors */
4085 stmmac_stop_all_dma(priv);
4086
4087 /* Release and free the Rx/Tx resources */
4088 free_dma_desc_resources(priv, &priv->dma_conf);
4089
4090 /* Disable the MAC Rx/Tx */
4091 stmmac_mac_set(priv, priv->ioaddr, false);
4092
4093 /* Powerdown Serdes if there is */
4094 if (priv->plat->serdes_powerdown)
4095 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4096
4097 stmmac_release_ptp(priv);
4098
4099 if (stmmac_fpe_supported(priv))
4100 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4101
4102 pm_runtime_put(priv->device);
4103
4104 return 0;
4105 }
4106
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4107 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4108 struct stmmac_tx_queue *tx_q)
4109 {
4110 u16 tag = 0x0, inner_tag = 0x0;
4111 u32 inner_type = 0x0;
4112 struct dma_desc *p;
4113
4114 if (!priv->dma_cap.vlins)
4115 return false;
4116 if (!skb_vlan_tag_present(skb))
4117 return false;
4118 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4119 inner_tag = skb_vlan_tag_get(skb);
4120 inner_type = STMMAC_VLAN_INSERT;
4121 }
4122
4123 tag = skb_vlan_tag_get(skb);
4124
4125 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4126 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4127 else
4128 p = &tx_q->dma_tx[tx_q->cur_tx];
4129
4130 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4131 return false;
4132
4133 stmmac_set_tx_owner(priv, p);
4134 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4135 return true;
4136 }
4137
4138 /**
4139 * stmmac_tso_allocator - close entry point of the driver
4140 * @priv: driver private structure
4141 * @des: buffer start address
4142 * @total_len: total length to fill in descriptors
4143 * @last_segment: condition for the last descriptor
4144 * @queue: TX queue index
4145 * Description:
4146 * This function fills descriptor and request new descriptors according to
4147 * buffer length to fill
4148 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4149 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4150 int total_len, bool last_segment, u32 queue)
4151 {
4152 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4153 struct dma_desc *desc;
4154 u32 buff_size;
4155 int tmp_len;
4156
4157 tmp_len = total_len;
4158
4159 while (tmp_len > 0) {
4160 dma_addr_t curr_addr;
4161
4162 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4163 priv->dma_conf.dma_tx_size);
4164 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4165
4166 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4167 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4168 else
4169 desc = &tx_q->dma_tx[tx_q->cur_tx];
4170
4171 curr_addr = des + (total_len - tmp_len);
4172 stmmac_set_desc_addr(priv, desc, curr_addr);
4173 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4174 TSO_MAX_BUFF_SIZE : tmp_len;
4175
4176 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4177 0, 1,
4178 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4179 0, 0);
4180
4181 tmp_len -= TSO_MAX_BUFF_SIZE;
4182 }
4183 }
4184
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4185 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4186 {
4187 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4188 int desc_size;
4189
4190 if (likely(priv->extend_desc))
4191 desc_size = sizeof(struct dma_extended_desc);
4192 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4193 desc_size = sizeof(struct dma_edesc);
4194 else
4195 desc_size = sizeof(struct dma_desc);
4196
4197 /* The own bit must be the latest setting done when prepare the
4198 * descriptor and then barrier is needed to make sure that
4199 * all is coherent before granting the DMA engine.
4200 */
4201 wmb();
4202
4203 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4204 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4205 }
4206
4207 /**
4208 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4209 * @skb : the socket buffer
4210 * @dev : device pointer
4211 * Description: this is the transmit function that is called on TSO frames
4212 * (support available on GMAC4 and newer chips).
4213 * Diagram below show the ring programming in case of TSO frames:
4214 *
4215 * First Descriptor
4216 * --------
4217 * | DES0 |---> buffer1 = L2/L3/L4 header
4218 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4219 * | | width is 32-bit, but we never use it.
4220 * | | Also can be used as the most-significant 8-bits or 16-bits of
4221 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4222 * | | or 48-bit, and we always use it.
4223 * | DES2 |---> buffer1 len
4224 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4225 * --------
4226 * --------
4227 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4228 * | DES1 |---> same as the First Descriptor
4229 * | DES2 |---> buffer1 len
4230 * | DES3 |
4231 * --------
4232 * |
4233 * ...
4234 * |
4235 * --------
4236 * | DES0 |---> buffer1 = Split TCP Payload
4237 * | DES1 |---> same as the First Descriptor
4238 * | DES2 |---> buffer1 len
4239 * | DES3 |
4240 * --------
4241 *
4242 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4243 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4244 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4245 {
4246 struct dma_desc *desc, *first, *mss_desc = NULL;
4247 struct stmmac_priv *priv = netdev_priv(dev);
4248 unsigned int first_entry, tx_packets;
4249 struct stmmac_txq_stats *txq_stats;
4250 struct stmmac_tx_queue *tx_q;
4251 u32 pay_len, mss, queue;
4252 int i, first_tx, nfrags;
4253 u8 proto_hdr_len, hdr;
4254 dma_addr_t des;
4255 bool set_ic;
4256
4257 /* Always insert VLAN tag to SKB payload for TSO frames.
4258 *
4259 * Never insert VLAN tag by HW, since segments splited by
4260 * TSO engine will be un-tagged by mistake.
4261 */
4262 if (skb_vlan_tag_present(skb)) {
4263 skb = __vlan_hwaccel_push_inside(skb);
4264 if (unlikely(!skb)) {
4265 priv->xstats.tx_dropped++;
4266 return NETDEV_TX_OK;
4267 }
4268 }
4269
4270 nfrags = skb_shinfo(skb)->nr_frags;
4271 queue = skb_get_queue_mapping(skb);
4272
4273 tx_q = &priv->dma_conf.tx_queue[queue];
4274 txq_stats = &priv->xstats.txq_stats[queue];
4275 first_tx = tx_q->cur_tx;
4276
4277 /* Compute header lengths */
4278 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4279 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4280 hdr = sizeof(struct udphdr);
4281 } else {
4282 proto_hdr_len = skb_tcp_all_headers(skb);
4283 hdr = tcp_hdrlen(skb);
4284 }
4285
4286 /* Desc availability based on threshold should be enough safe */
4287 if (unlikely(stmmac_tx_avail(priv, queue) <
4288 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4289 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4290 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4291 queue));
4292 /* This is a hard error, log it. */
4293 netdev_err(priv->dev,
4294 "%s: Tx Ring full when queue awake\n",
4295 __func__);
4296 }
4297 return NETDEV_TX_BUSY;
4298 }
4299
4300 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4301
4302 mss = skb_shinfo(skb)->gso_size;
4303
4304 /* set new MSS value if needed */
4305 if (mss != tx_q->mss) {
4306 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4307 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4308 else
4309 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4310
4311 stmmac_set_mss(priv, mss_desc, mss);
4312 tx_q->mss = mss;
4313 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4314 priv->dma_conf.dma_tx_size);
4315 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4316 }
4317
4318 if (netif_msg_tx_queued(priv)) {
4319 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4320 __func__, hdr, proto_hdr_len, pay_len, mss);
4321 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4322 skb->data_len);
4323 }
4324
4325 first_entry = tx_q->cur_tx;
4326 WARN_ON(tx_q->tx_skbuff[first_entry]);
4327
4328 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4329 desc = &tx_q->dma_entx[first_entry].basic;
4330 else
4331 desc = &tx_q->dma_tx[first_entry];
4332 first = desc;
4333
4334 /* first descriptor: fill Headers on Buf1 */
4335 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4336 DMA_TO_DEVICE);
4337 if (dma_mapping_error(priv->device, des))
4338 goto dma_map_err;
4339
4340 stmmac_set_desc_addr(priv, first, des);
4341 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4342 (nfrags == 0), queue);
4343
4344 /* In case two or more DMA transmit descriptors are allocated for this
4345 * non-paged SKB data, the DMA buffer address should be saved to
4346 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4347 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4348 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4349 * since the tail areas of the DMA buffer can be accessed by DMA engine
4350 * sooner or later.
4351 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4352 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4353 * this DMA buffer right after the DMA engine completely finishes the
4354 * full buffer transmission.
4355 */
4356 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4357 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4358 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4359 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4360
4361 /* Prepare fragments */
4362 for (i = 0; i < nfrags; i++) {
4363 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4364
4365 des = skb_frag_dma_map(priv->device, frag, 0,
4366 skb_frag_size(frag),
4367 DMA_TO_DEVICE);
4368 if (dma_mapping_error(priv->device, des))
4369 goto dma_map_err;
4370
4371 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4372 (i == nfrags - 1), queue);
4373
4374 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4375 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4376 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4377 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4378 }
4379
4380 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4381
4382 /* Only the last descriptor gets to point to the skb. */
4383 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4384 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4385
4386 /* Manage tx mitigation */
4387 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4388 tx_q->tx_count_frames += tx_packets;
4389
4390 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4391 set_ic = true;
4392 else if (!priv->tx_coal_frames[queue])
4393 set_ic = false;
4394 else if (tx_packets > priv->tx_coal_frames[queue])
4395 set_ic = true;
4396 else if ((tx_q->tx_count_frames %
4397 priv->tx_coal_frames[queue]) < tx_packets)
4398 set_ic = true;
4399 else
4400 set_ic = false;
4401
4402 if (set_ic) {
4403 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4404 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4405 else
4406 desc = &tx_q->dma_tx[tx_q->cur_tx];
4407
4408 tx_q->tx_count_frames = 0;
4409 stmmac_set_tx_ic(priv, desc);
4410 }
4411
4412 /* We've used all descriptors we need for this skb, however,
4413 * advance cur_tx so that it references a fresh descriptor.
4414 * ndo_start_xmit will fill this descriptor the next time it's
4415 * called and stmmac_tx_clean may clean up to this descriptor.
4416 */
4417 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4418
4419 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4420 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4421 __func__);
4422 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4423 }
4424
4425 u64_stats_update_begin(&txq_stats->q_syncp);
4426 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4427 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4428 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4429 if (set_ic)
4430 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4431 u64_stats_update_end(&txq_stats->q_syncp);
4432
4433 if (priv->sarc_type)
4434 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4435
4436 skb_tx_timestamp(skb);
4437
4438 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4439 priv->hwts_tx_en)) {
4440 /* declare that device is doing timestamping */
4441 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4442 stmmac_enable_tx_timestamp(priv, first);
4443 }
4444
4445 /* Complete the first descriptor before granting the DMA */
4446 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4447 tx_q->tx_skbuff_dma[first_entry].last_segment,
4448 hdr / 4, (skb->len - proto_hdr_len));
4449
4450 /* If context desc is used to change MSS */
4451 if (mss_desc) {
4452 /* Make sure that first descriptor has been completely
4453 * written, including its own bit. This is because MSS is
4454 * actually before first descriptor, so we need to make
4455 * sure that MSS's own bit is the last thing written.
4456 */
4457 dma_wmb();
4458 stmmac_set_tx_owner(priv, mss_desc);
4459 }
4460
4461 if (netif_msg_pktdata(priv)) {
4462 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4463 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4464 tx_q->cur_tx, first, nfrags);
4465 pr_info(">>> frame to be transmitted: ");
4466 print_pkt(skb->data, skb_headlen(skb));
4467 }
4468
4469 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4470
4471 stmmac_flush_tx_descriptors(priv, queue);
4472 stmmac_tx_timer_arm(priv, queue);
4473
4474 return NETDEV_TX_OK;
4475
4476 dma_map_err:
4477 dev_err(priv->device, "Tx dma map failed\n");
4478 dev_kfree_skb(skb);
4479 priv->xstats.tx_dropped++;
4480 return NETDEV_TX_OK;
4481 }
4482
4483 /**
4484 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4485 * @skb: socket buffer to check
4486 *
4487 * Check if a packet has an ethertype that will trigger the IP header checks
4488 * and IP/TCP checksum engine of the stmmac core.
4489 *
4490 * Return: true if the ethertype can trigger the checksum engine, false
4491 * otherwise
4492 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4493 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4494 {
4495 int depth = 0;
4496 __be16 proto;
4497
4498 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4499 &depth);
4500
4501 return (depth <= ETH_HLEN) &&
4502 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4503 }
4504
4505 /**
4506 * stmmac_xmit - Tx entry point of the driver
4507 * @skb : the socket buffer
4508 * @dev : device pointer
4509 * Description : this is the tx entry point of the driver.
4510 * It programs the chain or the ring and supports oversized frames
4511 * and SG feature.
4512 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4513 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4514 {
4515 unsigned int first_entry, tx_packets, enh_desc;
4516 struct stmmac_priv *priv = netdev_priv(dev);
4517 unsigned int nopaged_len = skb_headlen(skb);
4518 int i, csum_insertion = 0, is_jumbo = 0;
4519 u32 queue = skb_get_queue_mapping(skb);
4520 int nfrags = skb_shinfo(skb)->nr_frags;
4521 int gso = skb_shinfo(skb)->gso_type;
4522 struct stmmac_txq_stats *txq_stats;
4523 struct dma_edesc *tbs_desc = NULL;
4524 struct dma_desc *desc, *first;
4525 struct stmmac_tx_queue *tx_q;
4526 bool has_vlan, set_ic;
4527 int entry, first_tx;
4528 dma_addr_t des;
4529
4530 tx_q = &priv->dma_conf.tx_queue[queue];
4531 txq_stats = &priv->xstats.txq_stats[queue];
4532 first_tx = tx_q->cur_tx;
4533
4534 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4535 stmmac_stop_sw_lpi(priv);
4536
4537 /* Manage oversized TCP frames for GMAC4 device */
4538 if (skb_is_gso(skb) && priv->tso) {
4539 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4540 return stmmac_tso_xmit(skb, dev);
4541 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4542 return stmmac_tso_xmit(skb, dev);
4543 }
4544
4545 if (priv->est && priv->est->enable &&
4546 priv->est->max_sdu[queue] &&
4547 skb->len > priv->est->max_sdu[queue]){
4548 priv->xstats.max_sdu_txq_drop[queue]++;
4549 goto max_sdu_err;
4550 }
4551
4552 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4553 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4554 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4555 queue));
4556 /* This is a hard error, log it. */
4557 netdev_err(priv->dev,
4558 "%s: Tx Ring full when queue awake\n",
4559 __func__);
4560 }
4561 return NETDEV_TX_BUSY;
4562 }
4563
4564 /* Check if VLAN can be inserted by HW */
4565 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4566
4567 entry = tx_q->cur_tx;
4568 first_entry = entry;
4569 WARN_ON(tx_q->tx_skbuff[first_entry]);
4570
4571 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4572 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4573 * queues. In that case, checksum offloading for those queues that don't
4574 * support tx coe needs to fallback to software checksum calculation.
4575 *
4576 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4577 * also have to be checksummed in software.
4578 */
4579 if (csum_insertion &&
4580 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4581 !stmmac_has_ip_ethertype(skb))) {
4582 if (unlikely(skb_checksum_help(skb)))
4583 goto dma_map_err;
4584 csum_insertion = !csum_insertion;
4585 }
4586
4587 if (likely(priv->extend_desc))
4588 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4589 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4590 desc = &tx_q->dma_entx[entry].basic;
4591 else
4592 desc = tx_q->dma_tx + entry;
4593
4594 first = desc;
4595
4596 if (has_vlan)
4597 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4598
4599 enh_desc = priv->plat->enh_desc;
4600 /* To program the descriptors according to the size of the frame */
4601 if (enh_desc)
4602 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4603
4604 if (unlikely(is_jumbo)) {
4605 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4606 if (unlikely(entry < 0) && (entry != -EINVAL))
4607 goto dma_map_err;
4608 }
4609
4610 for (i = 0; i < nfrags; i++) {
4611 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4612 int len = skb_frag_size(frag);
4613 bool last_segment = (i == (nfrags - 1));
4614
4615 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4616 WARN_ON(tx_q->tx_skbuff[entry]);
4617
4618 if (likely(priv->extend_desc))
4619 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4620 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4621 desc = &tx_q->dma_entx[entry].basic;
4622 else
4623 desc = tx_q->dma_tx + entry;
4624
4625 des = skb_frag_dma_map(priv->device, frag, 0, len,
4626 DMA_TO_DEVICE);
4627 if (dma_mapping_error(priv->device, des))
4628 goto dma_map_err; /* should reuse desc w/o issues */
4629
4630 tx_q->tx_skbuff_dma[entry].buf = des;
4631
4632 stmmac_set_desc_addr(priv, desc, des);
4633
4634 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4635 tx_q->tx_skbuff_dma[entry].len = len;
4636 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4637 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4638
4639 /* Prepare the descriptor and set the own bit too */
4640 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4641 priv->mode, 1, last_segment, skb->len);
4642 }
4643
4644 /* Only the last descriptor gets to point to the skb. */
4645 tx_q->tx_skbuff[entry] = skb;
4646 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4647
4648 /* According to the coalesce parameter the IC bit for the latest
4649 * segment is reset and the timer re-started to clean the tx status.
4650 * This approach takes care about the fragments: desc is the first
4651 * element in case of no SG.
4652 */
4653 tx_packets = (entry + 1) - first_tx;
4654 tx_q->tx_count_frames += tx_packets;
4655
4656 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4657 set_ic = true;
4658 else if (!priv->tx_coal_frames[queue])
4659 set_ic = false;
4660 else if (tx_packets > priv->tx_coal_frames[queue])
4661 set_ic = true;
4662 else if ((tx_q->tx_count_frames %
4663 priv->tx_coal_frames[queue]) < tx_packets)
4664 set_ic = true;
4665 else
4666 set_ic = false;
4667
4668 if (set_ic) {
4669 if (likely(priv->extend_desc))
4670 desc = &tx_q->dma_etx[entry].basic;
4671 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4672 desc = &tx_q->dma_entx[entry].basic;
4673 else
4674 desc = &tx_q->dma_tx[entry];
4675
4676 tx_q->tx_count_frames = 0;
4677 stmmac_set_tx_ic(priv, desc);
4678 }
4679
4680 /* We've used all descriptors we need for this skb, however,
4681 * advance cur_tx so that it references a fresh descriptor.
4682 * ndo_start_xmit will fill this descriptor the next time it's
4683 * called and stmmac_tx_clean may clean up to this descriptor.
4684 */
4685 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4686 tx_q->cur_tx = entry;
4687
4688 if (netif_msg_pktdata(priv)) {
4689 netdev_dbg(priv->dev,
4690 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4691 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4692 entry, first, nfrags);
4693
4694 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4695 print_pkt(skb->data, skb->len);
4696 }
4697
4698 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4699 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4700 __func__);
4701 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4702 }
4703
4704 u64_stats_update_begin(&txq_stats->q_syncp);
4705 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4706 if (set_ic)
4707 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4708 u64_stats_update_end(&txq_stats->q_syncp);
4709
4710 if (priv->sarc_type)
4711 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4712
4713 skb_tx_timestamp(skb);
4714
4715 /* Ready to fill the first descriptor and set the OWN bit w/o any
4716 * problems because all the descriptors are actually ready to be
4717 * passed to the DMA engine.
4718 */
4719 if (likely(!is_jumbo)) {
4720 bool last_segment = (nfrags == 0);
4721
4722 des = dma_map_single(priv->device, skb->data,
4723 nopaged_len, DMA_TO_DEVICE);
4724 if (dma_mapping_error(priv->device, des))
4725 goto dma_map_err;
4726
4727 tx_q->tx_skbuff_dma[first_entry].buf = des;
4728 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4729 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4730
4731 stmmac_set_desc_addr(priv, first, des);
4732
4733 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4734 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4735
4736 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4737 priv->hwts_tx_en)) {
4738 /* declare that device is doing timestamping */
4739 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4740 stmmac_enable_tx_timestamp(priv, first);
4741 }
4742
4743 /* Prepare the first descriptor setting the OWN bit too */
4744 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4745 csum_insertion, priv->mode, 0, last_segment,
4746 skb->len);
4747 }
4748
4749 if (tx_q->tbs & STMMAC_TBS_EN) {
4750 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4751
4752 tbs_desc = &tx_q->dma_entx[first_entry];
4753 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4754 }
4755
4756 stmmac_set_tx_owner(priv, first);
4757
4758 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4759
4760 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4761
4762 stmmac_flush_tx_descriptors(priv, queue);
4763 stmmac_tx_timer_arm(priv, queue);
4764
4765 return NETDEV_TX_OK;
4766
4767 dma_map_err:
4768 netdev_err(priv->dev, "Tx DMA map failed\n");
4769 max_sdu_err:
4770 dev_kfree_skb(skb);
4771 priv->xstats.tx_dropped++;
4772 return NETDEV_TX_OK;
4773 }
4774
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4775 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4776 {
4777 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4778 __be16 vlan_proto = veth->h_vlan_proto;
4779 u16 vlanid;
4780
4781 if ((vlan_proto == htons(ETH_P_8021Q) &&
4782 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4783 (vlan_proto == htons(ETH_P_8021AD) &&
4784 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4785 /* pop the vlan tag */
4786 vlanid = ntohs(veth->h_vlan_TCI);
4787 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4788 skb_pull(skb, VLAN_HLEN);
4789 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4790 }
4791 }
4792
4793 /**
4794 * stmmac_rx_refill - refill used skb preallocated buffers
4795 * @priv: driver private structure
4796 * @queue: RX queue index
4797 * Description : this is to reallocate the skb for the reception process
4798 * that is based on zero-copy.
4799 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4800 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4801 {
4802 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4803 int dirty = stmmac_rx_dirty(priv, queue);
4804 unsigned int entry = rx_q->dirty_rx;
4805 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4806
4807 if (priv->dma_cap.host_dma_width <= 32)
4808 gfp |= GFP_DMA32;
4809
4810 while (dirty-- > 0) {
4811 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4812 struct dma_desc *p;
4813 bool use_rx_wd;
4814
4815 if (priv->extend_desc)
4816 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4817 else
4818 p = rx_q->dma_rx + entry;
4819
4820 if (!buf->page) {
4821 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4822 if (!buf->page)
4823 break;
4824 }
4825
4826 if (priv->sph && !buf->sec_page) {
4827 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4828 if (!buf->sec_page)
4829 break;
4830
4831 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4832 }
4833
4834 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4835
4836 stmmac_set_desc_addr(priv, p, buf->addr);
4837 if (priv->sph)
4838 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4839 else
4840 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4841 stmmac_refill_desc3(priv, rx_q, p);
4842
4843 rx_q->rx_count_frames++;
4844 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4845 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4846 rx_q->rx_count_frames = 0;
4847
4848 use_rx_wd = !priv->rx_coal_frames[queue];
4849 use_rx_wd |= rx_q->rx_count_frames > 0;
4850 if (!priv->use_riwt)
4851 use_rx_wd = false;
4852
4853 dma_wmb();
4854 stmmac_set_rx_owner(priv, p, use_rx_wd);
4855
4856 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4857 }
4858 rx_q->dirty_rx = entry;
4859 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4860 (rx_q->dirty_rx * sizeof(struct dma_desc));
4861 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4862 }
4863
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4864 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4865 struct dma_desc *p,
4866 int status, unsigned int len)
4867 {
4868 unsigned int plen = 0, hlen = 0;
4869 int coe = priv->hw->rx_csum;
4870
4871 /* Not first descriptor, buffer is always zero */
4872 if (priv->sph && len)
4873 return 0;
4874
4875 /* First descriptor, get split header length */
4876 stmmac_get_rx_header_len(priv, p, &hlen);
4877 if (priv->sph && hlen) {
4878 priv->xstats.rx_split_hdr_pkt_n++;
4879 return hlen;
4880 }
4881
4882 /* First descriptor, not last descriptor and not split header */
4883 if (status & rx_not_ls)
4884 return priv->dma_conf.dma_buf_sz;
4885
4886 plen = stmmac_get_rx_frame_len(priv, p, coe);
4887
4888 /* First descriptor and last descriptor and not split header */
4889 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4890 }
4891
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4892 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4893 struct dma_desc *p,
4894 int status, unsigned int len)
4895 {
4896 int coe = priv->hw->rx_csum;
4897 unsigned int plen = 0;
4898
4899 /* Not split header, buffer is not available */
4900 if (!priv->sph)
4901 return 0;
4902
4903 /* Not last descriptor */
4904 if (status & rx_not_ls)
4905 return priv->dma_conf.dma_buf_sz;
4906
4907 plen = stmmac_get_rx_frame_len(priv, p, coe);
4908
4909 /* Last descriptor */
4910 return plen - len;
4911 }
4912
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4913 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4914 struct xdp_frame *xdpf, bool dma_map)
4915 {
4916 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4917 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4918 unsigned int entry = tx_q->cur_tx;
4919 struct dma_desc *tx_desc;
4920 dma_addr_t dma_addr;
4921 bool set_ic;
4922
4923 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4924 return STMMAC_XDP_CONSUMED;
4925
4926 if (priv->est && priv->est->enable &&
4927 priv->est->max_sdu[queue] &&
4928 xdpf->len > priv->est->max_sdu[queue]) {
4929 priv->xstats.max_sdu_txq_drop[queue]++;
4930 return STMMAC_XDP_CONSUMED;
4931 }
4932
4933 if (likely(priv->extend_desc))
4934 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4935 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4936 tx_desc = &tx_q->dma_entx[entry].basic;
4937 else
4938 tx_desc = tx_q->dma_tx + entry;
4939
4940 if (dma_map) {
4941 dma_addr = dma_map_single(priv->device, xdpf->data,
4942 xdpf->len, DMA_TO_DEVICE);
4943 if (dma_mapping_error(priv->device, dma_addr))
4944 return STMMAC_XDP_CONSUMED;
4945
4946 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4947 } else {
4948 struct page *page = virt_to_page(xdpf->data);
4949
4950 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4951 xdpf->headroom;
4952 dma_sync_single_for_device(priv->device, dma_addr,
4953 xdpf->len, DMA_BIDIRECTIONAL);
4954
4955 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4956 }
4957
4958 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4959 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4960 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4961 tx_q->tx_skbuff_dma[entry].last_segment = true;
4962 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4963
4964 tx_q->xdpf[entry] = xdpf;
4965
4966 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4967
4968 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4969 true, priv->mode, true, true,
4970 xdpf->len);
4971
4972 tx_q->tx_count_frames++;
4973
4974 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4975 set_ic = true;
4976 else
4977 set_ic = false;
4978
4979 if (set_ic) {
4980 tx_q->tx_count_frames = 0;
4981 stmmac_set_tx_ic(priv, tx_desc);
4982 u64_stats_update_begin(&txq_stats->q_syncp);
4983 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4984 u64_stats_update_end(&txq_stats->q_syncp);
4985 }
4986
4987 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4988
4989 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4990 tx_q->cur_tx = entry;
4991
4992 return STMMAC_XDP_TX;
4993 }
4994
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4995 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4996 int cpu)
4997 {
4998 int index = cpu;
4999
5000 if (unlikely(index < 0))
5001 index = 0;
5002
5003 while (index >= priv->plat->tx_queues_to_use)
5004 index -= priv->plat->tx_queues_to_use;
5005
5006 return index;
5007 }
5008
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5009 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5010 struct xdp_buff *xdp)
5011 {
5012 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5013 int cpu = smp_processor_id();
5014 struct netdev_queue *nq;
5015 int queue;
5016 int res;
5017
5018 if (unlikely(!xdpf))
5019 return STMMAC_XDP_CONSUMED;
5020
5021 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5022 nq = netdev_get_tx_queue(priv->dev, queue);
5023
5024 __netif_tx_lock(nq, cpu);
5025 /* Avoids TX time-out as we are sharing with slow path */
5026 txq_trans_cond_update(nq);
5027
5028 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5029 if (res == STMMAC_XDP_TX)
5030 stmmac_flush_tx_descriptors(priv, queue);
5031
5032 __netif_tx_unlock(nq);
5033
5034 return res;
5035 }
5036
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5037 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5038 struct bpf_prog *prog,
5039 struct xdp_buff *xdp)
5040 {
5041 u32 act;
5042 int res;
5043
5044 act = bpf_prog_run_xdp(prog, xdp);
5045 switch (act) {
5046 case XDP_PASS:
5047 res = STMMAC_XDP_PASS;
5048 break;
5049 case XDP_TX:
5050 res = stmmac_xdp_xmit_back(priv, xdp);
5051 break;
5052 case XDP_REDIRECT:
5053 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5054 res = STMMAC_XDP_CONSUMED;
5055 else
5056 res = STMMAC_XDP_REDIRECT;
5057 break;
5058 default:
5059 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5060 fallthrough;
5061 case XDP_ABORTED:
5062 trace_xdp_exception(priv->dev, prog, act);
5063 fallthrough;
5064 case XDP_DROP:
5065 res = STMMAC_XDP_CONSUMED;
5066 break;
5067 }
5068
5069 return res;
5070 }
5071
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5072 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5073 struct xdp_buff *xdp)
5074 {
5075 struct bpf_prog *prog;
5076 int res;
5077
5078 prog = READ_ONCE(priv->xdp_prog);
5079 if (!prog) {
5080 res = STMMAC_XDP_PASS;
5081 goto out;
5082 }
5083
5084 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5085 out:
5086 return ERR_PTR(-res);
5087 }
5088
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5089 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5090 int xdp_status)
5091 {
5092 int cpu = smp_processor_id();
5093 int queue;
5094
5095 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5096
5097 if (xdp_status & STMMAC_XDP_TX)
5098 stmmac_tx_timer_arm(priv, queue);
5099
5100 if (xdp_status & STMMAC_XDP_REDIRECT)
5101 xdp_do_flush();
5102 }
5103
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5104 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5105 struct xdp_buff *xdp)
5106 {
5107 unsigned int metasize = xdp->data - xdp->data_meta;
5108 unsigned int datasize = xdp->data_end - xdp->data;
5109 struct sk_buff *skb;
5110
5111 skb = napi_alloc_skb(&ch->rxtx_napi,
5112 xdp->data_end - xdp->data_hard_start);
5113 if (unlikely(!skb))
5114 return NULL;
5115
5116 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5117 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5118 if (metasize)
5119 skb_metadata_set(skb, metasize);
5120
5121 return skb;
5122 }
5123
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5124 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5125 struct dma_desc *p, struct dma_desc *np,
5126 struct xdp_buff *xdp)
5127 {
5128 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5129 struct stmmac_channel *ch = &priv->channel[queue];
5130 unsigned int len = xdp->data_end - xdp->data;
5131 enum pkt_hash_types hash_type;
5132 int coe = priv->hw->rx_csum;
5133 struct sk_buff *skb;
5134 u32 hash;
5135
5136 skb = stmmac_construct_skb_zc(ch, xdp);
5137 if (!skb) {
5138 priv->xstats.rx_dropped++;
5139 return;
5140 }
5141
5142 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5143 if (priv->hw->hw_vlan_en)
5144 /* MAC level stripping. */
5145 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5146 else
5147 /* Driver level stripping. */
5148 stmmac_rx_vlan(priv->dev, skb);
5149 skb->protocol = eth_type_trans(skb, priv->dev);
5150
5151 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5152 skb_checksum_none_assert(skb);
5153 else
5154 skb->ip_summed = CHECKSUM_UNNECESSARY;
5155
5156 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5157 skb_set_hash(skb, hash, hash_type);
5158
5159 skb_record_rx_queue(skb, queue);
5160 napi_gro_receive(&ch->rxtx_napi, skb);
5161
5162 u64_stats_update_begin(&rxq_stats->napi_syncp);
5163 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5164 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5165 u64_stats_update_end(&rxq_stats->napi_syncp);
5166 }
5167
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5168 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5169 {
5170 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5171 unsigned int entry = rx_q->dirty_rx;
5172 struct dma_desc *rx_desc = NULL;
5173 bool ret = true;
5174
5175 budget = min(budget, stmmac_rx_dirty(priv, queue));
5176
5177 while (budget-- > 0 && entry != rx_q->cur_rx) {
5178 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5179 dma_addr_t dma_addr;
5180 bool use_rx_wd;
5181
5182 if (!buf->xdp) {
5183 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5184 if (!buf->xdp) {
5185 ret = false;
5186 break;
5187 }
5188 }
5189
5190 if (priv->extend_desc)
5191 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5192 else
5193 rx_desc = rx_q->dma_rx + entry;
5194
5195 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5196 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5197 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5198 stmmac_refill_desc3(priv, rx_q, rx_desc);
5199
5200 rx_q->rx_count_frames++;
5201 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5202 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5203 rx_q->rx_count_frames = 0;
5204
5205 use_rx_wd = !priv->rx_coal_frames[queue];
5206 use_rx_wd |= rx_q->rx_count_frames > 0;
5207 if (!priv->use_riwt)
5208 use_rx_wd = false;
5209
5210 dma_wmb();
5211 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5212
5213 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5214 }
5215
5216 if (rx_desc) {
5217 rx_q->dirty_rx = entry;
5218 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5219 (rx_q->dirty_rx * sizeof(struct dma_desc));
5220 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5221 }
5222
5223 return ret;
5224 }
5225
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5226 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5227 {
5228 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5229 * to represent incoming packet, whereas cb field in the same structure
5230 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5231 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5232 */
5233 return (struct stmmac_xdp_buff *)xdp;
5234 }
5235
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5236 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5237 {
5238 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5239 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5240 unsigned int count = 0, error = 0, len = 0;
5241 int dirty = stmmac_rx_dirty(priv, queue);
5242 unsigned int next_entry = rx_q->cur_rx;
5243 u32 rx_errors = 0, rx_dropped = 0;
5244 unsigned int desc_size;
5245 struct bpf_prog *prog;
5246 bool failure = false;
5247 int xdp_status = 0;
5248 int status = 0;
5249
5250 if (netif_msg_rx_status(priv)) {
5251 void *rx_head;
5252
5253 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5254 if (priv->extend_desc) {
5255 rx_head = (void *)rx_q->dma_erx;
5256 desc_size = sizeof(struct dma_extended_desc);
5257 } else {
5258 rx_head = (void *)rx_q->dma_rx;
5259 desc_size = sizeof(struct dma_desc);
5260 }
5261
5262 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5263 rx_q->dma_rx_phy, desc_size);
5264 }
5265 while (count < limit) {
5266 struct stmmac_rx_buffer *buf;
5267 struct stmmac_xdp_buff *ctx;
5268 unsigned int buf1_len = 0;
5269 struct dma_desc *np, *p;
5270 int entry;
5271 int res;
5272
5273 if (!count && rx_q->state_saved) {
5274 error = rx_q->state.error;
5275 len = rx_q->state.len;
5276 } else {
5277 rx_q->state_saved = false;
5278 error = 0;
5279 len = 0;
5280 }
5281
5282 if (count >= limit)
5283 break;
5284
5285 read_again:
5286 buf1_len = 0;
5287 entry = next_entry;
5288 buf = &rx_q->buf_pool[entry];
5289
5290 if (dirty >= STMMAC_RX_FILL_BATCH) {
5291 failure = failure ||
5292 !stmmac_rx_refill_zc(priv, queue, dirty);
5293 dirty = 0;
5294 }
5295
5296 if (priv->extend_desc)
5297 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5298 else
5299 p = rx_q->dma_rx + entry;
5300
5301 /* read the status of the incoming frame */
5302 status = stmmac_rx_status(priv, &priv->xstats, p);
5303 /* check if managed by the DMA otherwise go ahead */
5304 if (unlikely(status & dma_own))
5305 break;
5306
5307 /* Prefetch the next RX descriptor */
5308 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5309 priv->dma_conf.dma_rx_size);
5310 next_entry = rx_q->cur_rx;
5311
5312 if (priv->extend_desc)
5313 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5314 else
5315 np = rx_q->dma_rx + next_entry;
5316
5317 prefetch(np);
5318
5319 /* Ensure a valid XSK buffer before proceed */
5320 if (!buf->xdp)
5321 break;
5322
5323 if (priv->extend_desc)
5324 stmmac_rx_extended_status(priv, &priv->xstats,
5325 rx_q->dma_erx + entry);
5326 if (unlikely(status == discard_frame)) {
5327 xsk_buff_free(buf->xdp);
5328 buf->xdp = NULL;
5329 dirty++;
5330 error = 1;
5331 if (!priv->hwts_rx_en)
5332 rx_errors++;
5333 }
5334
5335 if (unlikely(error && (status & rx_not_ls)))
5336 goto read_again;
5337 if (unlikely(error)) {
5338 count++;
5339 continue;
5340 }
5341
5342 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5343 if (likely(status & rx_not_ls)) {
5344 xsk_buff_free(buf->xdp);
5345 buf->xdp = NULL;
5346 dirty++;
5347 count++;
5348 goto read_again;
5349 }
5350
5351 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5352 ctx->priv = priv;
5353 ctx->desc = p;
5354 ctx->ndesc = np;
5355
5356 /* XDP ZC Frame only support primary buffers for now */
5357 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5358 len += buf1_len;
5359
5360 /* ACS is disabled; strip manually. */
5361 if (likely(!(status & rx_not_ls))) {
5362 buf1_len -= ETH_FCS_LEN;
5363 len -= ETH_FCS_LEN;
5364 }
5365
5366 /* RX buffer is good and fit into a XSK pool buffer */
5367 buf->xdp->data_end = buf->xdp->data + buf1_len;
5368 xsk_buff_dma_sync_for_cpu(buf->xdp);
5369
5370 prog = READ_ONCE(priv->xdp_prog);
5371 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5372
5373 switch (res) {
5374 case STMMAC_XDP_PASS:
5375 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5376 xsk_buff_free(buf->xdp);
5377 break;
5378 case STMMAC_XDP_CONSUMED:
5379 xsk_buff_free(buf->xdp);
5380 rx_dropped++;
5381 break;
5382 case STMMAC_XDP_TX:
5383 case STMMAC_XDP_REDIRECT:
5384 xdp_status |= res;
5385 break;
5386 }
5387
5388 buf->xdp = NULL;
5389 dirty++;
5390 count++;
5391 }
5392
5393 if (status & rx_not_ls) {
5394 rx_q->state_saved = true;
5395 rx_q->state.error = error;
5396 rx_q->state.len = len;
5397 }
5398
5399 stmmac_finalize_xdp_rx(priv, xdp_status);
5400
5401 u64_stats_update_begin(&rxq_stats->napi_syncp);
5402 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5403 u64_stats_update_end(&rxq_stats->napi_syncp);
5404
5405 priv->xstats.rx_dropped += rx_dropped;
5406 priv->xstats.rx_errors += rx_errors;
5407
5408 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5409 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5410 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5411 else
5412 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5413
5414 return (int)count;
5415 }
5416
5417 return failure ? limit : (int)count;
5418 }
5419
5420 /**
5421 * stmmac_rx - manage the receive process
5422 * @priv: driver private structure
5423 * @limit: napi bugget
5424 * @queue: RX queue index.
5425 * Description : this the function called by the napi poll method.
5426 * It gets all the frames inside the ring.
5427 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5428 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5429 {
5430 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5431 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5432 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5433 struct stmmac_channel *ch = &priv->channel[queue];
5434 unsigned int count = 0, error = 0, len = 0;
5435 int status = 0, coe = priv->hw->rx_csum;
5436 unsigned int next_entry = rx_q->cur_rx;
5437 enum dma_data_direction dma_dir;
5438 unsigned int desc_size;
5439 struct sk_buff *skb = NULL;
5440 struct stmmac_xdp_buff ctx;
5441 int xdp_status = 0;
5442 int buf_sz;
5443
5444 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5445 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5446 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5447
5448 if (netif_msg_rx_status(priv)) {
5449 void *rx_head;
5450
5451 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5452 if (priv->extend_desc) {
5453 rx_head = (void *)rx_q->dma_erx;
5454 desc_size = sizeof(struct dma_extended_desc);
5455 } else {
5456 rx_head = (void *)rx_q->dma_rx;
5457 desc_size = sizeof(struct dma_desc);
5458 }
5459
5460 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5461 rx_q->dma_rx_phy, desc_size);
5462 }
5463 while (count < limit) {
5464 unsigned int buf1_len = 0, buf2_len = 0;
5465 enum pkt_hash_types hash_type;
5466 struct stmmac_rx_buffer *buf;
5467 struct dma_desc *np, *p;
5468 int entry;
5469 u32 hash;
5470
5471 if (!count && rx_q->state_saved) {
5472 skb = rx_q->state.skb;
5473 error = rx_q->state.error;
5474 len = rx_q->state.len;
5475 } else {
5476 rx_q->state_saved = false;
5477 skb = NULL;
5478 error = 0;
5479 len = 0;
5480 }
5481
5482 read_again:
5483 if (count >= limit)
5484 break;
5485
5486 buf1_len = 0;
5487 buf2_len = 0;
5488 entry = next_entry;
5489 buf = &rx_q->buf_pool[entry];
5490
5491 if (priv->extend_desc)
5492 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5493 else
5494 p = rx_q->dma_rx + entry;
5495
5496 /* read the status of the incoming frame */
5497 status = stmmac_rx_status(priv, &priv->xstats, p);
5498 /* check if managed by the DMA otherwise go ahead */
5499 if (unlikely(status & dma_own))
5500 break;
5501
5502 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5503 priv->dma_conf.dma_rx_size);
5504 next_entry = rx_q->cur_rx;
5505
5506 if (priv->extend_desc)
5507 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5508 else
5509 np = rx_q->dma_rx + next_entry;
5510
5511 prefetch(np);
5512
5513 if (priv->extend_desc)
5514 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5515 if (unlikely(status == discard_frame)) {
5516 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5517 buf->page = NULL;
5518 error = 1;
5519 if (!priv->hwts_rx_en)
5520 rx_errors++;
5521 }
5522
5523 if (unlikely(error && (status & rx_not_ls)))
5524 goto read_again;
5525 if (unlikely(error)) {
5526 dev_kfree_skb(skb);
5527 skb = NULL;
5528 count++;
5529 continue;
5530 }
5531
5532 /* Buffer is good. Go on. */
5533
5534 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5535 len += buf1_len;
5536 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5537 len += buf2_len;
5538
5539 /* ACS is disabled; strip manually. */
5540 if (likely(!(status & rx_not_ls))) {
5541 if (buf2_len) {
5542 buf2_len -= ETH_FCS_LEN;
5543 len -= ETH_FCS_LEN;
5544 } else if (buf1_len) {
5545 buf1_len -= ETH_FCS_LEN;
5546 len -= ETH_FCS_LEN;
5547 }
5548 }
5549
5550 if (!skb) {
5551 unsigned int pre_len, sync_len;
5552
5553 dma_sync_single_for_cpu(priv->device, buf->addr,
5554 buf1_len, dma_dir);
5555 net_prefetch(page_address(buf->page) +
5556 buf->page_offset);
5557
5558 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5559 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5560 buf->page_offset, buf1_len, true);
5561
5562 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5563 buf->page_offset;
5564
5565 ctx.priv = priv;
5566 ctx.desc = p;
5567 ctx.ndesc = np;
5568
5569 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5570 /* Due xdp_adjust_tail: DMA sync for_device
5571 * cover max len CPU touch
5572 */
5573 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5574 buf->page_offset;
5575 sync_len = max(sync_len, pre_len);
5576
5577 /* For Not XDP_PASS verdict */
5578 if (IS_ERR(skb)) {
5579 unsigned int xdp_res = -PTR_ERR(skb);
5580
5581 if (xdp_res & STMMAC_XDP_CONSUMED) {
5582 page_pool_put_page(rx_q->page_pool,
5583 virt_to_head_page(ctx.xdp.data),
5584 sync_len, true);
5585 buf->page = NULL;
5586 rx_dropped++;
5587
5588 /* Clear skb as it was set as
5589 * status by XDP program.
5590 */
5591 skb = NULL;
5592
5593 if (unlikely((status & rx_not_ls)))
5594 goto read_again;
5595
5596 count++;
5597 continue;
5598 } else if (xdp_res & (STMMAC_XDP_TX |
5599 STMMAC_XDP_REDIRECT)) {
5600 xdp_status |= xdp_res;
5601 buf->page = NULL;
5602 skb = NULL;
5603 count++;
5604 continue;
5605 }
5606 }
5607 }
5608
5609 if (!skb) {
5610 unsigned int head_pad_len;
5611
5612 /* XDP program may expand or reduce tail */
5613 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5614
5615 skb = napi_build_skb(page_address(buf->page),
5616 rx_q->napi_skb_frag_size);
5617 if (!skb) {
5618 page_pool_recycle_direct(rx_q->page_pool,
5619 buf->page);
5620 rx_dropped++;
5621 count++;
5622 goto drain_data;
5623 }
5624
5625 /* XDP program may adjust header */
5626 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5627 skb_reserve(skb, head_pad_len);
5628 skb_put(skb, buf1_len);
5629 skb_mark_for_recycle(skb);
5630 buf->page = NULL;
5631 } else if (buf1_len) {
5632 dma_sync_single_for_cpu(priv->device, buf->addr,
5633 buf1_len, dma_dir);
5634 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5635 buf->page, buf->page_offset, buf1_len,
5636 priv->dma_conf.dma_buf_sz);
5637 buf->page = NULL;
5638 }
5639
5640 if (buf2_len) {
5641 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5642 buf2_len, dma_dir);
5643 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5644 buf->sec_page, 0, buf2_len,
5645 priv->dma_conf.dma_buf_sz);
5646 buf->sec_page = NULL;
5647 }
5648
5649 drain_data:
5650 if (likely(status & rx_not_ls))
5651 goto read_again;
5652 if (!skb)
5653 continue;
5654
5655 /* Got entire packet into SKB. Finish it. */
5656
5657 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5658
5659 if (priv->hw->hw_vlan_en)
5660 /* MAC level stripping. */
5661 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5662 else
5663 /* Driver level stripping. */
5664 stmmac_rx_vlan(priv->dev, skb);
5665
5666 skb->protocol = eth_type_trans(skb, priv->dev);
5667
5668 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5669 skb_checksum_none_assert(skb);
5670 else
5671 skb->ip_summed = CHECKSUM_UNNECESSARY;
5672
5673 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5674 skb_set_hash(skb, hash, hash_type);
5675
5676 skb_record_rx_queue(skb, queue);
5677 napi_gro_receive(&ch->rx_napi, skb);
5678 skb = NULL;
5679
5680 rx_packets++;
5681 rx_bytes += len;
5682 count++;
5683 }
5684
5685 if (status & rx_not_ls || skb) {
5686 rx_q->state_saved = true;
5687 rx_q->state.skb = skb;
5688 rx_q->state.error = error;
5689 rx_q->state.len = len;
5690 }
5691
5692 stmmac_finalize_xdp_rx(priv, xdp_status);
5693
5694 stmmac_rx_refill(priv, queue);
5695
5696 u64_stats_update_begin(&rxq_stats->napi_syncp);
5697 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5698 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5699 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5700 u64_stats_update_end(&rxq_stats->napi_syncp);
5701
5702 priv->xstats.rx_dropped += rx_dropped;
5703 priv->xstats.rx_errors += rx_errors;
5704
5705 return count;
5706 }
5707
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5708 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5709 {
5710 struct stmmac_channel *ch =
5711 container_of(napi, struct stmmac_channel, rx_napi);
5712 struct stmmac_priv *priv = ch->priv_data;
5713 struct stmmac_rxq_stats *rxq_stats;
5714 u32 chan = ch->index;
5715 int work_done;
5716
5717 rxq_stats = &priv->xstats.rxq_stats[chan];
5718 u64_stats_update_begin(&rxq_stats->napi_syncp);
5719 u64_stats_inc(&rxq_stats->napi.poll);
5720 u64_stats_update_end(&rxq_stats->napi_syncp);
5721
5722 work_done = stmmac_rx(priv, budget, chan);
5723 if (work_done < budget && napi_complete_done(napi, work_done)) {
5724 unsigned long flags;
5725
5726 spin_lock_irqsave(&ch->lock, flags);
5727 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5728 spin_unlock_irqrestore(&ch->lock, flags);
5729 }
5730
5731 return work_done;
5732 }
5733
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5734 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5735 {
5736 struct stmmac_channel *ch =
5737 container_of(napi, struct stmmac_channel, tx_napi);
5738 struct stmmac_priv *priv = ch->priv_data;
5739 struct stmmac_txq_stats *txq_stats;
5740 bool pending_packets = false;
5741 u32 chan = ch->index;
5742 int work_done;
5743
5744 txq_stats = &priv->xstats.txq_stats[chan];
5745 u64_stats_update_begin(&txq_stats->napi_syncp);
5746 u64_stats_inc(&txq_stats->napi.poll);
5747 u64_stats_update_end(&txq_stats->napi_syncp);
5748
5749 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5750 work_done = min(work_done, budget);
5751
5752 if (work_done < budget && napi_complete_done(napi, work_done)) {
5753 unsigned long flags;
5754
5755 spin_lock_irqsave(&ch->lock, flags);
5756 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5757 spin_unlock_irqrestore(&ch->lock, flags);
5758 }
5759
5760 /* TX still have packet to handle, check if we need to arm tx timer */
5761 if (pending_packets)
5762 stmmac_tx_timer_arm(priv, chan);
5763
5764 return work_done;
5765 }
5766
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5767 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5768 {
5769 struct stmmac_channel *ch =
5770 container_of(napi, struct stmmac_channel, rxtx_napi);
5771 struct stmmac_priv *priv = ch->priv_data;
5772 bool tx_pending_packets = false;
5773 int rx_done, tx_done, rxtx_done;
5774 struct stmmac_rxq_stats *rxq_stats;
5775 struct stmmac_txq_stats *txq_stats;
5776 u32 chan = ch->index;
5777
5778 rxq_stats = &priv->xstats.rxq_stats[chan];
5779 u64_stats_update_begin(&rxq_stats->napi_syncp);
5780 u64_stats_inc(&rxq_stats->napi.poll);
5781 u64_stats_update_end(&rxq_stats->napi_syncp);
5782
5783 txq_stats = &priv->xstats.txq_stats[chan];
5784 u64_stats_update_begin(&txq_stats->napi_syncp);
5785 u64_stats_inc(&txq_stats->napi.poll);
5786 u64_stats_update_end(&txq_stats->napi_syncp);
5787
5788 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5789 tx_done = min(tx_done, budget);
5790
5791 rx_done = stmmac_rx_zc(priv, budget, chan);
5792
5793 rxtx_done = max(tx_done, rx_done);
5794
5795 /* If either TX or RX work is not complete, return budget
5796 * and keep pooling
5797 */
5798 if (rxtx_done >= budget)
5799 return budget;
5800
5801 /* all work done, exit the polling mode */
5802 if (napi_complete_done(napi, rxtx_done)) {
5803 unsigned long flags;
5804
5805 spin_lock_irqsave(&ch->lock, flags);
5806 /* Both RX and TX work done are compelte,
5807 * so enable both RX & TX IRQs.
5808 */
5809 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5810 spin_unlock_irqrestore(&ch->lock, flags);
5811 }
5812
5813 /* TX still have packet to handle, check if we need to arm tx timer */
5814 if (tx_pending_packets)
5815 stmmac_tx_timer_arm(priv, chan);
5816
5817 return min(rxtx_done, budget - 1);
5818 }
5819
5820 /**
5821 * stmmac_tx_timeout
5822 * @dev : Pointer to net device structure
5823 * @txqueue: the index of the hanging transmit queue
5824 * Description: this function is called when a packet transmission fails to
5825 * complete within a reasonable time. The driver will mark the error in the
5826 * netdev structure and arrange for the device to be reset to a sane state
5827 * in order to transmit a new packet.
5828 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5829 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5830 {
5831 struct stmmac_priv *priv = netdev_priv(dev);
5832
5833 stmmac_global_err(priv);
5834 }
5835
5836 /**
5837 * stmmac_set_rx_mode - entry point for multicast addressing
5838 * @dev : pointer to the device structure
5839 * Description:
5840 * This function is a driver entry point which gets called by the kernel
5841 * whenever multicast addresses must be enabled/disabled.
5842 * Return value:
5843 * void.
5844 */
stmmac_set_rx_mode(struct net_device * dev)5845 static void stmmac_set_rx_mode(struct net_device *dev)
5846 {
5847 struct stmmac_priv *priv = netdev_priv(dev);
5848
5849 stmmac_set_filter(priv, priv->hw, dev);
5850 }
5851
5852 /**
5853 * stmmac_change_mtu - entry point to change MTU size for the device.
5854 * @dev : device pointer.
5855 * @new_mtu : the new MTU size for the device.
5856 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5857 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5858 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5859 * Return value:
5860 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5861 * file on failure.
5862 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5863 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5864 {
5865 struct stmmac_priv *priv = netdev_priv(dev);
5866 int txfifosz = priv->plat->tx_fifo_size;
5867 struct stmmac_dma_conf *dma_conf;
5868 const int mtu = new_mtu;
5869 int ret;
5870
5871 txfifosz /= priv->plat->tx_queues_to_use;
5872
5873 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5874 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5875 return -EINVAL;
5876 }
5877
5878 new_mtu = STMMAC_ALIGN(new_mtu);
5879
5880 /* If condition true, FIFO is too small or MTU too large */
5881 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5882 return -EINVAL;
5883
5884 if (netif_running(dev)) {
5885 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5886 /* Try to allocate the new DMA conf with the new mtu */
5887 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5888 if (IS_ERR(dma_conf)) {
5889 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5890 mtu);
5891 return PTR_ERR(dma_conf);
5892 }
5893
5894 stmmac_release(dev);
5895
5896 ret = __stmmac_open(dev, dma_conf);
5897 if (ret) {
5898 free_dma_desc_resources(priv, dma_conf);
5899 kfree(dma_conf);
5900 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5901 return ret;
5902 }
5903
5904 kfree(dma_conf);
5905
5906 stmmac_set_rx_mode(dev);
5907 }
5908
5909 WRITE_ONCE(dev->mtu, mtu);
5910 netdev_update_features(dev);
5911
5912 return 0;
5913 }
5914
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5915 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5916 netdev_features_t features)
5917 {
5918 struct stmmac_priv *priv = netdev_priv(dev);
5919
5920 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5921 features &= ~NETIF_F_RXCSUM;
5922
5923 if (!priv->plat->tx_coe)
5924 features &= ~NETIF_F_CSUM_MASK;
5925
5926 /* Some GMAC devices have a bugged Jumbo frame support that
5927 * needs to have the Tx COE disabled for oversized frames
5928 * (due to limited buffer sizes). In this case we disable
5929 * the TX csum insertion in the TDES and not use SF.
5930 */
5931 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5932 features &= ~NETIF_F_CSUM_MASK;
5933
5934 /* Disable tso if asked by ethtool */
5935 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5936 if (features & NETIF_F_TSO)
5937 priv->tso = true;
5938 else
5939 priv->tso = false;
5940 }
5941
5942 return features;
5943 }
5944
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5945 static int stmmac_set_features(struct net_device *netdev,
5946 netdev_features_t features)
5947 {
5948 struct stmmac_priv *priv = netdev_priv(netdev);
5949
5950 /* Keep the COE Type in case of csum is supporting */
5951 if (features & NETIF_F_RXCSUM)
5952 priv->hw->rx_csum = priv->plat->rx_coe;
5953 else
5954 priv->hw->rx_csum = 0;
5955 /* No check needed because rx_coe has been set before and it will be
5956 * fixed in case of issue.
5957 */
5958 stmmac_rx_ipc(priv, priv->hw);
5959
5960 if (priv->sph_cap) {
5961 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5962 u32 chan;
5963
5964 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5965 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5966 }
5967
5968 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5969 priv->hw->hw_vlan_en = true;
5970 else
5971 priv->hw->hw_vlan_en = false;
5972
5973 stmmac_set_hw_vlan_mode(priv, priv->hw);
5974
5975 return 0;
5976 }
5977
stmmac_common_interrupt(struct stmmac_priv * priv)5978 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5979 {
5980 u32 rx_cnt = priv->plat->rx_queues_to_use;
5981 u32 tx_cnt = priv->plat->tx_queues_to_use;
5982 u32 queues_count;
5983 u32 queue;
5984 bool xmac;
5985
5986 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5987 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5988
5989 if (priv->irq_wake)
5990 pm_wakeup_event(priv->device, 0);
5991
5992 if (priv->dma_cap.estsel)
5993 stmmac_est_irq_status(priv, priv, priv->dev,
5994 &priv->xstats, tx_cnt);
5995
5996 if (stmmac_fpe_supported(priv))
5997 stmmac_fpe_irq_status(priv);
5998
5999 /* To handle GMAC own interrupts */
6000 if ((priv->plat->has_gmac) || xmac) {
6001 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6002
6003 if (unlikely(status)) {
6004 /* For LPI we need to save the tx status */
6005 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6006 priv->tx_path_in_lpi_mode = true;
6007 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6008 priv->tx_path_in_lpi_mode = false;
6009 }
6010
6011 for (queue = 0; queue < queues_count; queue++)
6012 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6013
6014 /* PCS link status */
6015 if (priv->hw->pcs &&
6016 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6017 if (priv->xstats.pcs_link)
6018 netif_carrier_on(priv->dev);
6019 else
6020 netif_carrier_off(priv->dev);
6021 }
6022
6023 stmmac_timestamp_interrupt(priv, priv);
6024 }
6025 }
6026
6027 /**
6028 * stmmac_interrupt - main ISR
6029 * @irq: interrupt number.
6030 * @dev_id: to pass the net device pointer.
6031 * Description: this is the main driver interrupt service routine.
6032 * It can call:
6033 * o DMA service routine (to manage incoming frame reception and transmission
6034 * status)
6035 * o Core interrupts to manage: remote wake-up, management counter, LPI
6036 * interrupts.
6037 */
stmmac_interrupt(int irq,void * dev_id)6038 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6039 {
6040 struct net_device *dev = (struct net_device *)dev_id;
6041 struct stmmac_priv *priv = netdev_priv(dev);
6042
6043 /* Check if adapter is up */
6044 if (test_bit(STMMAC_DOWN, &priv->state))
6045 return IRQ_HANDLED;
6046
6047 /* Check ASP error if it isn't delivered via an individual IRQ */
6048 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6049 return IRQ_HANDLED;
6050
6051 /* To handle Common interrupts */
6052 stmmac_common_interrupt(priv);
6053
6054 /* To handle DMA interrupts */
6055 stmmac_dma_interrupt(priv);
6056
6057 return IRQ_HANDLED;
6058 }
6059
stmmac_mac_interrupt(int irq,void * dev_id)6060 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6061 {
6062 struct net_device *dev = (struct net_device *)dev_id;
6063 struct stmmac_priv *priv = netdev_priv(dev);
6064
6065 /* Check if adapter is up */
6066 if (test_bit(STMMAC_DOWN, &priv->state))
6067 return IRQ_HANDLED;
6068
6069 /* To handle Common interrupts */
6070 stmmac_common_interrupt(priv);
6071
6072 return IRQ_HANDLED;
6073 }
6074
stmmac_safety_interrupt(int irq,void * dev_id)6075 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6076 {
6077 struct net_device *dev = (struct net_device *)dev_id;
6078 struct stmmac_priv *priv = netdev_priv(dev);
6079
6080 /* Check if adapter is up */
6081 if (test_bit(STMMAC_DOWN, &priv->state))
6082 return IRQ_HANDLED;
6083
6084 /* Check if a fatal error happened */
6085 stmmac_safety_feat_interrupt(priv);
6086
6087 return IRQ_HANDLED;
6088 }
6089
stmmac_msi_intr_tx(int irq,void * data)6090 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6091 {
6092 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6093 struct stmmac_dma_conf *dma_conf;
6094 int chan = tx_q->queue_index;
6095 struct stmmac_priv *priv;
6096 int status;
6097
6098 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6099 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6100
6101 /* Check if adapter is up */
6102 if (test_bit(STMMAC_DOWN, &priv->state))
6103 return IRQ_HANDLED;
6104
6105 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6106
6107 if (unlikely(status & tx_hard_error_bump_tc)) {
6108 /* Try to bump up the dma threshold on this failure */
6109 stmmac_bump_dma_threshold(priv, chan);
6110 } else if (unlikely(status == tx_hard_error)) {
6111 stmmac_tx_err(priv, chan);
6112 }
6113
6114 return IRQ_HANDLED;
6115 }
6116
stmmac_msi_intr_rx(int irq,void * data)6117 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6118 {
6119 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6120 struct stmmac_dma_conf *dma_conf;
6121 int chan = rx_q->queue_index;
6122 struct stmmac_priv *priv;
6123
6124 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6125 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6126
6127 /* Check if adapter is up */
6128 if (test_bit(STMMAC_DOWN, &priv->state))
6129 return IRQ_HANDLED;
6130
6131 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6132
6133 return IRQ_HANDLED;
6134 }
6135
6136 /**
6137 * stmmac_ioctl - Entry point for the Ioctl
6138 * @dev: Device pointer.
6139 * @rq: An IOCTL specefic structure, that can contain a pointer to
6140 * a proprietary structure used to pass information to the driver.
6141 * @cmd: IOCTL command
6142 * Description:
6143 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6144 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6145 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6146 {
6147 struct stmmac_priv *priv = netdev_priv (dev);
6148 int ret = -EOPNOTSUPP;
6149
6150 if (!netif_running(dev))
6151 return -EINVAL;
6152
6153 switch (cmd) {
6154 case SIOCGMIIPHY:
6155 case SIOCGMIIREG:
6156 case SIOCSMIIREG:
6157 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6158 break;
6159 case SIOCSHWTSTAMP:
6160 ret = stmmac_hwtstamp_set(dev, rq);
6161 break;
6162 case SIOCGHWTSTAMP:
6163 ret = stmmac_hwtstamp_get(dev, rq);
6164 break;
6165 default:
6166 break;
6167 }
6168
6169 return ret;
6170 }
6171
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6172 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6173 void *cb_priv)
6174 {
6175 struct stmmac_priv *priv = cb_priv;
6176 int ret = -EOPNOTSUPP;
6177
6178 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6179 return ret;
6180
6181 __stmmac_disable_all_queues(priv);
6182
6183 switch (type) {
6184 case TC_SETUP_CLSU32:
6185 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6186 break;
6187 case TC_SETUP_CLSFLOWER:
6188 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6189 break;
6190 default:
6191 break;
6192 }
6193
6194 stmmac_enable_all_queues(priv);
6195 return ret;
6196 }
6197
6198 static LIST_HEAD(stmmac_block_cb_list);
6199
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6200 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6201 void *type_data)
6202 {
6203 struct stmmac_priv *priv = netdev_priv(ndev);
6204
6205 switch (type) {
6206 case TC_QUERY_CAPS:
6207 return stmmac_tc_query_caps(priv, priv, type_data);
6208 case TC_SETUP_QDISC_MQPRIO:
6209 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6210 case TC_SETUP_BLOCK:
6211 return flow_block_cb_setup_simple(type_data,
6212 &stmmac_block_cb_list,
6213 stmmac_setup_tc_block_cb,
6214 priv, priv, true);
6215 case TC_SETUP_QDISC_CBS:
6216 return stmmac_tc_setup_cbs(priv, priv, type_data);
6217 case TC_SETUP_QDISC_TAPRIO:
6218 return stmmac_tc_setup_taprio(priv, priv, type_data);
6219 case TC_SETUP_QDISC_ETF:
6220 return stmmac_tc_setup_etf(priv, priv, type_data);
6221 default:
6222 return -EOPNOTSUPP;
6223 }
6224 }
6225
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6226 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6227 struct net_device *sb_dev)
6228 {
6229 int gso = skb_shinfo(skb)->gso_type;
6230
6231 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6232 /*
6233 * There is no way to determine the number of TSO/USO
6234 * capable Queues. Let's use always the Queue 0
6235 * because if TSO/USO is supported then at least this
6236 * one will be capable.
6237 */
6238 return 0;
6239 }
6240
6241 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6242 }
6243
stmmac_set_mac_address(struct net_device * ndev,void * addr)6244 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6245 {
6246 struct stmmac_priv *priv = netdev_priv(ndev);
6247 int ret = 0;
6248
6249 ret = pm_runtime_resume_and_get(priv->device);
6250 if (ret < 0)
6251 return ret;
6252
6253 ret = eth_mac_addr(ndev, addr);
6254 if (ret)
6255 goto set_mac_error;
6256
6257 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6258
6259 set_mac_error:
6260 pm_runtime_put(priv->device);
6261
6262 return ret;
6263 }
6264
6265 #ifdef CONFIG_DEBUG_FS
6266 static struct dentry *stmmac_fs_dir;
6267
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6268 static void sysfs_display_ring(void *head, int size, int extend_desc,
6269 struct seq_file *seq, dma_addr_t dma_phy_addr)
6270 {
6271 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6272 struct dma_desc *p = (struct dma_desc *)head;
6273 unsigned int desc_size;
6274 dma_addr_t dma_addr;
6275 int i;
6276
6277 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6278 for (i = 0; i < size; i++) {
6279 dma_addr = dma_phy_addr + i * desc_size;
6280 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6281 i, &dma_addr,
6282 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6283 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6284 if (extend_desc)
6285 p = &(++ep)->basic;
6286 else
6287 p++;
6288 }
6289 }
6290
stmmac_rings_status_show(struct seq_file * seq,void * v)6291 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6292 {
6293 struct net_device *dev = seq->private;
6294 struct stmmac_priv *priv = netdev_priv(dev);
6295 u32 rx_count = priv->plat->rx_queues_to_use;
6296 u32 tx_count = priv->plat->tx_queues_to_use;
6297 u32 queue;
6298
6299 if ((dev->flags & IFF_UP) == 0)
6300 return 0;
6301
6302 for (queue = 0; queue < rx_count; queue++) {
6303 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6304
6305 seq_printf(seq, "RX Queue %d:\n", queue);
6306
6307 if (priv->extend_desc) {
6308 seq_printf(seq, "Extended descriptor ring:\n");
6309 sysfs_display_ring((void *)rx_q->dma_erx,
6310 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6311 } else {
6312 seq_printf(seq, "Descriptor ring:\n");
6313 sysfs_display_ring((void *)rx_q->dma_rx,
6314 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6315 }
6316 }
6317
6318 for (queue = 0; queue < tx_count; queue++) {
6319 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6320
6321 seq_printf(seq, "TX Queue %d:\n", queue);
6322
6323 if (priv->extend_desc) {
6324 seq_printf(seq, "Extended descriptor ring:\n");
6325 sysfs_display_ring((void *)tx_q->dma_etx,
6326 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6327 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6328 seq_printf(seq, "Descriptor ring:\n");
6329 sysfs_display_ring((void *)tx_q->dma_tx,
6330 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6331 }
6332 }
6333
6334 return 0;
6335 }
6336 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6337
stmmac_dma_cap_show(struct seq_file * seq,void * v)6338 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6339 {
6340 static const char * const dwxgmac_timestamp_source[] = {
6341 "None",
6342 "Internal",
6343 "External",
6344 "Both",
6345 };
6346 static const char * const dwxgmac_safety_feature_desc[] = {
6347 "No",
6348 "All Safety Features with ECC and Parity",
6349 "All Safety Features without ECC or Parity",
6350 "All Safety Features with Parity Only",
6351 "ECC Only",
6352 "UNDEFINED",
6353 "UNDEFINED",
6354 "UNDEFINED",
6355 };
6356 struct net_device *dev = seq->private;
6357 struct stmmac_priv *priv = netdev_priv(dev);
6358
6359 if (!priv->hw_cap_support) {
6360 seq_printf(seq, "DMA HW features not supported\n");
6361 return 0;
6362 }
6363
6364 seq_printf(seq, "==============================\n");
6365 seq_printf(seq, "\tDMA HW features\n");
6366 seq_printf(seq, "==============================\n");
6367
6368 seq_printf(seq, "\t10/100 Mbps: %s\n",
6369 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6370 seq_printf(seq, "\t1000 Mbps: %s\n",
6371 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6372 seq_printf(seq, "\tHalf duplex: %s\n",
6373 (priv->dma_cap.half_duplex) ? "Y" : "N");
6374 if (priv->plat->has_xgmac) {
6375 seq_printf(seq,
6376 "\tNumber of Additional MAC address registers: %d\n",
6377 priv->dma_cap.multi_addr);
6378 } else {
6379 seq_printf(seq, "\tHash Filter: %s\n",
6380 (priv->dma_cap.hash_filter) ? "Y" : "N");
6381 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6382 (priv->dma_cap.multi_addr) ? "Y" : "N");
6383 }
6384 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6385 (priv->dma_cap.pcs) ? "Y" : "N");
6386 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6387 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6388 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6389 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6390 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6391 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6392 seq_printf(seq, "\tRMON module: %s\n",
6393 (priv->dma_cap.rmon) ? "Y" : "N");
6394 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6395 (priv->dma_cap.time_stamp) ? "Y" : "N");
6396 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6397 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6398 if (priv->plat->has_xgmac)
6399 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6400 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6401 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6402 (priv->dma_cap.eee) ? "Y" : "N");
6403 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6404 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6405 (priv->dma_cap.tx_coe) ? "Y" : "N");
6406 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6407 priv->plat->has_xgmac) {
6408 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6409 (priv->dma_cap.rx_coe) ? "Y" : "N");
6410 } else {
6411 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6412 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6413 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6414 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6415 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6416 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6417 }
6418 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6419 priv->dma_cap.number_rx_channel);
6420 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6421 priv->dma_cap.number_tx_channel);
6422 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6423 priv->dma_cap.number_rx_queues);
6424 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6425 priv->dma_cap.number_tx_queues);
6426 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6427 (priv->dma_cap.enh_desc) ? "Y" : "N");
6428 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6429 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6430 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6431 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6432 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6433 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6434 priv->dma_cap.pps_out_num);
6435 seq_printf(seq, "\tSafety Features: %s\n",
6436 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6437 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6438 priv->dma_cap.frpsel ? "Y" : "N");
6439 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6440 priv->dma_cap.host_dma_width);
6441 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6442 priv->dma_cap.rssen ? "Y" : "N");
6443 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6444 priv->dma_cap.vlhash ? "Y" : "N");
6445 seq_printf(seq, "\tSplit Header: %s\n",
6446 priv->dma_cap.sphen ? "Y" : "N");
6447 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6448 priv->dma_cap.vlins ? "Y" : "N");
6449 seq_printf(seq, "\tDouble VLAN: %s\n",
6450 priv->dma_cap.dvlan ? "Y" : "N");
6451 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6452 priv->dma_cap.l3l4fnum);
6453 seq_printf(seq, "\tARP Offloading: %s\n",
6454 priv->dma_cap.arpoffsel ? "Y" : "N");
6455 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6456 priv->dma_cap.estsel ? "Y" : "N");
6457 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6458 priv->dma_cap.fpesel ? "Y" : "N");
6459 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6460 priv->dma_cap.tbssel ? "Y" : "N");
6461 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6462 priv->dma_cap.tbs_ch_num);
6463 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6464 priv->dma_cap.sgfsel ? "Y" : "N");
6465 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6466 BIT(priv->dma_cap.ttsfd) >> 1);
6467 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6468 priv->dma_cap.numtc);
6469 seq_printf(seq, "\tDCB Feature: %s\n",
6470 priv->dma_cap.dcben ? "Y" : "N");
6471 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6472 priv->dma_cap.advthword ? "Y" : "N");
6473 seq_printf(seq, "\tPTP Offload: %s\n",
6474 priv->dma_cap.ptoen ? "Y" : "N");
6475 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6476 priv->dma_cap.osten ? "Y" : "N");
6477 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6478 priv->dma_cap.pfcen ? "Y" : "N");
6479 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6480 BIT(priv->dma_cap.frpes) << 6);
6481 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6482 BIT(priv->dma_cap.frpbs) << 6);
6483 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6484 priv->dma_cap.frppipe_num);
6485 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6486 priv->dma_cap.nrvf_num ?
6487 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6488 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6489 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6490 seq_printf(seq, "\tDepth of GCL: %lu\n",
6491 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6492 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6493 priv->dma_cap.cbtisel ? "Y" : "N");
6494 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6495 priv->dma_cap.aux_snapshot_n);
6496 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6497 priv->dma_cap.pou_ost_en ? "Y" : "N");
6498 seq_printf(seq, "\tEnhanced DMA: %s\n",
6499 priv->dma_cap.edma ? "Y" : "N");
6500 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6501 priv->dma_cap.ediffc ? "Y" : "N");
6502 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6503 priv->dma_cap.vxn ? "Y" : "N");
6504 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6505 priv->dma_cap.dbgmem ? "Y" : "N");
6506 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6507 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6508 return 0;
6509 }
6510 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6511
6512 /* Use network device events to rename debugfs file entries.
6513 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6514 static int stmmac_device_event(struct notifier_block *unused,
6515 unsigned long event, void *ptr)
6516 {
6517 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6518 struct stmmac_priv *priv = netdev_priv(dev);
6519
6520 if (dev->netdev_ops != &stmmac_netdev_ops)
6521 goto done;
6522
6523 switch (event) {
6524 case NETDEV_CHANGENAME:
6525 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6526 break;
6527 }
6528 done:
6529 return NOTIFY_DONE;
6530 }
6531
6532 static struct notifier_block stmmac_notifier = {
6533 .notifier_call = stmmac_device_event,
6534 };
6535
stmmac_init_fs(struct net_device * dev)6536 static void stmmac_init_fs(struct net_device *dev)
6537 {
6538 struct stmmac_priv *priv = netdev_priv(dev);
6539
6540 rtnl_lock();
6541
6542 /* Create per netdev entries */
6543 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6544
6545 /* Entry to report DMA RX/TX rings */
6546 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6547 &stmmac_rings_status_fops);
6548
6549 /* Entry to report the DMA HW features */
6550 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6551 &stmmac_dma_cap_fops);
6552
6553 rtnl_unlock();
6554 }
6555
stmmac_exit_fs(struct net_device * dev)6556 static void stmmac_exit_fs(struct net_device *dev)
6557 {
6558 struct stmmac_priv *priv = netdev_priv(dev);
6559
6560 debugfs_remove_recursive(priv->dbgfs_dir);
6561 }
6562 #endif /* CONFIG_DEBUG_FS */
6563
stmmac_vid_crc32_le(__le16 vid_le)6564 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6565 {
6566 unsigned char *data = (unsigned char *)&vid_le;
6567 unsigned char data_byte = 0;
6568 u32 crc = ~0x0;
6569 u32 temp = 0;
6570 int i, bits;
6571
6572 bits = get_bitmask_order(VLAN_VID_MASK);
6573 for (i = 0; i < bits; i++) {
6574 if ((i % 8) == 0)
6575 data_byte = data[i / 8];
6576
6577 temp = ((crc & 1) ^ data_byte) & 1;
6578 crc >>= 1;
6579 data_byte >>= 1;
6580
6581 if (temp)
6582 crc ^= 0xedb88320;
6583 }
6584
6585 return crc;
6586 }
6587
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6588 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6589 {
6590 u32 crc, hash = 0;
6591 u16 pmatch = 0;
6592 int count = 0;
6593 u16 vid = 0;
6594
6595 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6596 __le16 vid_le = cpu_to_le16(vid);
6597 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6598 hash |= (1 << crc);
6599 count++;
6600 }
6601
6602 if (!priv->dma_cap.vlhash) {
6603 if (count > 2) /* VID = 0 always passes filter */
6604 return -EOPNOTSUPP;
6605
6606 pmatch = vid;
6607 hash = 0;
6608 }
6609
6610 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6611 }
6612
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6613 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6614 {
6615 struct stmmac_priv *priv = netdev_priv(ndev);
6616 bool is_double = false;
6617 int ret;
6618
6619 ret = pm_runtime_resume_and_get(priv->device);
6620 if (ret < 0)
6621 return ret;
6622
6623 if (be16_to_cpu(proto) == ETH_P_8021AD)
6624 is_double = true;
6625
6626 set_bit(vid, priv->active_vlans);
6627 ret = stmmac_vlan_update(priv, is_double);
6628 if (ret) {
6629 clear_bit(vid, priv->active_vlans);
6630 goto err_pm_put;
6631 }
6632
6633 if (priv->hw->num_vlan) {
6634 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6635 if (ret)
6636 goto err_pm_put;
6637 }
6638 err_pm_put:
6639 pm_runtime_put(priv->device);
6640
6641 return ret;
6642 }
6643
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6644 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6645 {
6646 struct stmmac_priv *priv = netdev_priv(ndev);
6647 bool is_double = false;
6648 int ret;
6649
6650 ret = pm_runtime_resume_and_get(priv->device);
6651 if (ret < 0)
6652 return ret;
6653
6654 if (be16_to_cpu(proto) == ETH_P_8021AD)
6655 is_double = true;
6656
6657 clear_bit(vid, priv->active_vlans);
6658
6659 if (priv->hw->num_vlan) {
6660 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6661 if (ret)
6662 goto del_vlan_error;
6663 }
6664
6665 ret = stmmac_vlan_update(priv, is_double);
6666
6667 del_vlan_error:
6668 pm_runtime_put(priv->device);
6669
6670 return ret;
6671 }
6672
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6673 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6674 {
6675 struct stmmac_priv *priv = netdev_priv(dev);
6676
6677 switch (bpf->command) {
6678 case XDP_SETUP_PROG:
6679 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6680 case XDP_SETUP_XSK_POOL:
6681 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6682 bpf->xsk.queue_id);
6683 default:
6684 return -EOPNOTSUPP;
6685 }
6686 }
6687
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6688 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6689 struct xdp_frame **frames, u32 flags)
6690 {
6691 struct stmmac_priv *priv = netdev_priv(dev);
6692 int cpu = smp_processor_id();
6693 struct netdev_queue *nq;
6694 int i, nxmit = 0;
6695 int queue;
6696
6697 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6698 return -ENETDOWN;
6699
6700 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6701 return -EINVAL;
6702
6703 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6704 nq = netdev_get_tx_queue(priv->dev, queue);
6705
6706 __netif_tx_lock(nq, cpu);
6707 /* Avoids TX time-out as we are sharing with slow path */
6708 txq_trans_cond_update(nq);
6709
6710 for (i = 0; i < num_frames; i++) {
6711 int res;
6712
6713 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6714 if (res == STMMAC_XDP_CONSUMED)
6715 break;
6716
6717 nxmit++;
6718 }
6719
6720 if (flags & XDP_XMIT_FLUSH) {
6721 stmmac_flush_tx_descriptors(priv, queue);
6722 stmmac_tx_timer_arm(priv, queue);
6723 }
6724
6725 __netif_tx_unlock(nq);
6726
6727 return nxmit;
6728 }
6729
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6730 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6731 {
6732 struct stmmac_channel *ch = &priv->channel[queue];
6733 unsigned long flags;
6734
6735 spin_lock_irqsave(&ch->lock, flags);
6736 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6737 spin_unlock_irqrestore(&ch->lock, flags);
6738
6739 stmmac_stop_rx_dma(priv, queue);
6740 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6741 }
6742
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6743 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6744 {
6745 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6746 struct stmmac_channel *ch = &priv->channel[queue];
6747 unsigned long flags;
6748 u32 buf_size;
6749 int ret;
6750
6751 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6752 if (ret) {
6753 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6754 return;
6755 }
6756
6757 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6758 if (ret) {
6759 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6760 netdev_err(priv->dev, "Failed to init RX desc.\n");
6761 return;
6762 }
6763
6764 stmmac_reset_rx_queue(priv, queue);
6765 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6766
6767 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6768 rx_q->dma_rx_phy, rx_q->queue_index);
6769
6770 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6771 sizeof(struct dma_desc));
6772 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6773 rx_q->rx_tail_addr, rx_q->queue_index);
6774
6775 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6776 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6777 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6778 buf_size,
6779 rx_q->queue_index);
6780 } else {
6781 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6782 priv->dma_conf.dma_buf_sz,
6783 rx_q->queue_index);
6784 }
6785
6786 stmmac_start_rx_dma(priv, queue);
6787
6788 spin_lock_irqsave(&ch->lock, flags);
6789 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6790 spin_unlock_irqrestore(&ch->lock, flags);
6791 }
6792
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6793 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6794 {
6795 struct stmmac_channel *ch = &priv->channel[queue];
6796 unsigned long flags;
6797
6798 spin_lock_irqsave(&ch->lock, flags);
6799 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6800 spin_unlock_irqrestore(&ch->lock, flags);
6801
6802 stmmac_stop_tx_dma(priv, queue);
6803 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6804 }
6805
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6806 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6807 {
6808 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6809 struct stmmac_channel *ch = &priv->channel[queue];
6810 unsigned long flags;
6811 int ret;
6812
6813 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6814 if (ret) {
6815 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6816 return;
6817 }
6818
6819 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6820 if (ret) {
6821 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6822 netdev_err(priv->dev, "Failed to init TX desc.\n");
6823 return;
6824 }
6825
6826 stmmac_reset_tx_queue(priv, queue);
6827 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6828
6829 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6830 tx_q->dma_tx_phy, tx_q->queue_index);
6831
6832 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6833 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6834
6835 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6836 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6837 tx_q->tx_tail_addr, tx_q->queue_index);
6838
6839 stmmac_start_tx_dma(priv, queue);
6840
6841 spin_lock_irqsave(&ch->lock, flags);
6842 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6843 spin_unlock_irqrestore(&ch->lock, flags);
6844 }
6845
stmmac_xdp_release(struct net_device * dev)6846 void stmmac_xdp_release(struct net_device *dev)
6847 {
6848 struct stmmac_priv *priv = netdev_priv(dev);
6849 u32 chan;
6850
6851 /* Ensure tx function is not running */
6852 netif_tx_disable(dev);
6853
6854 /* Disable NAPI process */
6855 stmmac_disable_all_queues(priv);
6856
6857 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6858 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6859
6860 /* Free the IRQ lines */
6861 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6862
6863 /* Stop TX/RX DMA channels */
6864 stmmac_stop_all_dma(priv);
6865
6866 /* Release and free the Rx/Tx resources */
6867 free_dma_desc_resources(priv, &priv->dma_conf);
6868
6869 /* Disable the MAC Rx/Tx */
6870 stmmac_mac_set(priv, priv->ioaddr, false);
6871
6872 /* set trans_start so we don't get spurious
6873 * watchdogs during reset
6874 */
6875 netif_trans_update(dev);
6876 netif_carrier_off(dev);
6877 }
6878
stmmac_xdp_open(struct net_device * dev)6879 int stmmac_xdp_open(struct net_device *dev)
6880 {
6881 struct stmmac_priv *priv = netdev_priv(dev);
6882 u32 rx_cnt = priv->plat->rx_queues_to_use;
6883 u32 tx_cnt = priv->plat->tx_queues_to_use;
6884 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6885 struct stmmac_rx_queue *rx_q;
6886 struct stmmac_tx_queue *tx_q;
6887 u32 buf_size;
6888 bool sph_en;
6889 u32 chan;
6890 int ret;
6891
6892 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6893 if (ret < 0) {
6894 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6895 __func__);
6896 goto dma_desc_error;
6897 }
6898
6899 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6900 if (ret < 0) {
6901 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6902 __func__);
6903 goto init_error;
6904 }
6905
6906 stmmac_reset_queues_param(priv);
6907
6908 /* DMA CSR Channel configuration */
6909 for (chan = 0; chan < dma_csr_ch; chan++) {
6910 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6911 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6912 }
6913
6914 /* Adjust Split header */
6915 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6916
6917 /* DMA RX Channel Configuration */
6918 for (chan = 0; chan < rx_cnt; chan++) {
6919 rx_q = &priv->dma_conf.rx_queue[chan];
6920
6921 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6922 rx_q->dma_rx_phy, chan);
6923
6924 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6925 (rx_q->buf_alloc_num *
6926 sizeof(struct dma_desc));
6927 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6928 rx_q->rx_tail_addr, chan);
6929
6930 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6931 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6932 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6933 buf_size,
6934 rx_q->queue_index);
6935 } else {
6936 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6937 priv->dma_conf.dma_buf_sz,
6938 rx_q->queue_index);
6939 }
6940
6941 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6942 }
6943
6944 /* DMA TX Channel Configuration */
6945 for (chan = 0; chan < tx_cnt; chan++) {
6946 tx_q = &priv->dma_conf.tx_queue[chan];
6947
6948 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6949 tx_q->dma_tx_phy, chan);
6950
6951 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6952 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6953 tx_q->tx_tail_addr, chan);
6954
6955 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6956 tx_q->txtimer.function = stmmac_tx_timer;
6957 }
6958
6959 /* Enable the MAC Rx/Tx */
6960 stmmac_mac_set(priv, priv->ioaddr, true);
6961
6962 /* Start Rx & Tx DMA Channels */
6963 stmmac_start_all_dma(priv);
6964
6965 ret = stmmac_request_irq(dev);
6966 if (ret)
6967 goto irq_error;
6968
6969 /* Enable NAPI process*/
6970 stmmac_enable_all_queues(priv);
6971 netif_carrier_on(dev);
6972 netif_tx_start_all_queues(dev);
6973 stmmac_enable_all_dma_irq(priv);
6974
6975 return 0;
6976
6977 irq_error:
6978 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6979 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6980
6981 stmmac_hw_teardown(dev);
6982 init_error:
6983 free_dma_desc_resources(priv, &priv->dma_conf);
6984 dma_desc_error:
6985 return ret;
6986 }
6987
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6988 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6989 {
6990 struct stmmac_priv *priv = netdev_priv(dev);
6991 struct stmmac_rx_queue *rx_q;
6992 struct stmmac_tx_queue *tx_q;
6993 struct stmmac_channel *ch;
6994
6995 if (test_bit(STMMAC_DOWN, &priv->state) ||
6996 !netif_carrier_ok(priv->dev))
6997 return -ENETDOWN;
6998
6999 if (!stmmac_xdp_is_enabled(priv))
7000 return -EINVAL;
7001
7002 if (queue >= priv->plat->rx_queues_to_use ||
7003 queue >= priv->plat->tx_queues_to_use)
7004 return -EINVAL;
7005
7006 rx_q = &priv->dma_conf.rx_queue[queue];
7007 tx_q = &priv->dma_conf.tx_queue[queue];
7008 ch = &priv->channel[queue];
7009
7010 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7011 return -EINVAL;
7012
7013 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7014 /* EQoS does not have per-DMA channel SW interrupt,
7015 * so we schedule RX Napi straight-away.
7016 */
7017 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7018 __napi_schedule(&ch->rxtx_napi);
7019 }
7020
7021 return 0;
7022 }
7023
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7024 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7025 {
7026 struct stmmac_priv *priv = netdev_priv(dev);
7027 u32 tx_cnt = priv->plat->tx_queues_to_use;
7028 u32 rx_cnt = priv->plat->rx_queues_to_use;
7029 unsigned int start;
7030 int q;
7031
7032 for (q = 0; q < tx_cnt; q++) {
7033 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7034 u64 tx_packets;
7035 u64 tx_bytes;
7036
7037 do {
7038 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7039 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7040 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7041 do {
7042 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7043 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7044 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7045
7046 stats->tx_packets += tx_packets;
7047 stats->tx_bytes += tx_bytes;
7048 }
7049
7050 for (q = 0; q < rx_cnt; q++) {
7051 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7052 u64 rx_packets;
7053 u64 rx_bytes;
7054
7055 do {
7056 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7057 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7058 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7059 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7060
7061 stats->rx_packets += rx_packets;
7062 stats->rx_bytes += rx_bytes;
7063 }
7064
7065 stats->rx_dropped = priv->xstats.rx_dropped;
7066 stats->rx_errors = priv->xstats.rx_errors;
7067 stats->tx_dropped = priv->xstats.tx_dropped;
7068 stats->tx_errors = priv->xstats.tx_errors;
7069 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7070 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7071 stats->rx_length_errors = priv->xstats.rx_length;
7072 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7073 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7074 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7075 }
7076
7077 static const struct net_device_ops stmmac_netdev_ops = {
7078 .ndo_open = stmmac_open,
7079 .ndo_start_xmit = stmmac_xmit,
7080 .ndo_stop = stmmac_release,
7081 .ndo_change_mtu = stmmac_change_mtu,
7082 .ndo_fix_features = stmmac_fix_features,
7083 .ndo_set_features = stmmac_set_features,
7084 .ndo_set_rx_mode = stmmac_set_rx_mode,
7085 .ndo_tx_timeout = stmmac_tx_timeout,
7086 .ndo_eth_ioctl = stmmac_ioctl,
7087 .ndo_get_stats64 = stmmac_get_stats64,
7088 .ndo_setup_tc = stmmac_setup_tc,
7089 .ndo_select_queue = stmmac_select_queue,
7090 .ndo_set_mac_address = stmmac_set_mac_address,
7091 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7092 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7093 .ndo_bpf = stmmac_bpf,
7094 .ndo_xdp_xmit = stmmac_xdp_xmit,
7095 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7096 };
7097
stmmac_reset_subtask(struct stmmac_priv * priv)7098 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7099 {
7100 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7101 return;
7102 if (test_bit(STMMAC_DOWN, &priv->state))
7103 return;
7104
7105 netdev_err(priv->dev, "Reset adapter.\n");
7106
7107 rtnl_lock();
7108 netif_trans_update(priv->dev);
7109 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7110 usleep_range(1000, 2000);
7111
7112 set_bit(STMMAC_DOWN, &priv->state);
7113 dev_close(priv->dev);
7114 dev_open(priv->dev, NULL);
7115 clear_bit(STMMAC_DOWN, &priv->state);
7116 clear_bit(STMMAC_RESETING, &priv->state);
7117 rtnl_unlock();
7118 }
7119
stmmac_service_task(struct work_struct * work)7120 static void stmmac_service_task(struct work_struct *work)
7121 {
7122 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7123 service_task);
7124
7125 stmmac_reset_subtask(priv);
7126 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7127 }
7128
7129 /**
7130 * stmmac_hw_init - Init the MAC device
7131 * @priv: driver private structure
7132 * Description: this function is to configure the MAC device according to
7133 * some platform parameters or the HW capability register. It prepares the
7134 * driver to use either ring or chain modes and to setup either enhanced or
7135 * normal descriptors.
7136 */
stmmac_hw_init(struct stmmac_priv * priv)7137 static int stmmac_hw_init(struct stmmac_priv *priv)
7138 {
7139 int ret;
7140
7141 /* dwmac-sun8i only work in chain mode */
7142 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7143 chain_mode = 1;
7144 priv->chain_mode = chain_mode;
7145
7146 /* Initialize HW Interface */
7147 ret = stmmac_hwif_init(priv);
7148 if (ret)
7149 return ret;
7150
7151 /* Get the HW capability (new GMAC newer than 3.50a) */
7152 priv->hw_cap_support = stmmac_get_hw_features(priv);
7153 if (priv->hw_cap_support) {
7154 dev_info(priv->device, "DMA HW capability register supported\n");
7155
7156 /* We can override some gmac/dma configuration fields: e.g.
7157 * enh_desc, tx_coe (e.g. that are passed through the
7158 * platform) with the values from the HW capability
7159 * register (if supported).
7160 */
7161 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7162 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7163 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7164 priv->hw->pmt = priv->plat->pmt;
7165 if (priv->dma_cap.hash_tb_sz) {
7166 priv->hw->multicast_filter_bins =
7167 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7168 priv->hw->mcast_bits_log2 =
7169 ilog2(priv->hw->multicast_filter_bins);
7170 }
7171
7172 /* TXCOE doesn't work in thresh DMA mode */
7173 if (priv->plat->force_thresh_dma_mode)
7174 priv->plat->tx_coe = 0;
7175 else
7176 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7177
7178 /* In case of GMAC4 rx_coe is from HW cap register. */
7179 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7180
7181 if (priv->dma_cap.rx_coe_type2)
7182 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7183 else if (priv->dma_cap.rx_coe_type1)
7184 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7185
7186 } else {
7187 dev_info(priv->device, "No HW DMA feature register supported\n");
7188 }
7189
7190 if (priv->plat->rx_coe) {
7191 priv->hw->rx_csum = priv->plat->rx_coe;
7192 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7193 if (priv->synopsys_id < DWMAC_CORE_4_00)
7194 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7195 }
7196 if (priv->plat->tx_coe)
7197 dev_info(priv->device, "TX Checksum insertion supported\n");
7198
7199 if (priv->plat->pmt) {
7200 dev_info(priv->device, "Wake-Up On Lan supported\n");
7201 device_set_wakeup_capable(priv->device, 1);
7202 }
7203
7204 if (priv->dma_cap.tsoen)
7205 dev_info(priv->device, "TSO supported\n");
7206
7207 if (priv->dma_cap.number_rx_queues &&
7208 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7209 dev_warn(priv->device,
7210 "Number of Rx queues (%u) exceeds dma capability\n",
7211 priv->plat->rx_queues_to_use);
7212 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7213 }
7214 if (priv->dma_cap.number_tx_queues &&
7215 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7216 dev_warn(priv->device,
7217 "Number of Tx queues (%u) exceeds dma capability\n",
7218 priv->plat->tx_queues_to_use);
7219 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7220 }
7221
7222 if (!priv->plat->rx_fifo_size) {
7223 if (priv->dma_cap.rx_fifo_size) {
7224 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7225 } else {
7226 dev_err(priv->device, "Can't specify Rx FIFO size\n");
7227 return -ENODEV;
7228 }
7229 } else if (priv->dma_cap.rx_fifo_size &&
7230 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7231 dev_warn(priv->device,
7232 "Rx FIFO size (%u) exceeds dma capability\n",
7233 priv->plat->rx_fifo_size);
7234 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7235 }
7236 if (!priv->plat->tx_fifo_size) {
7237 if (priv->dma_cap.tx_fifo_size) {
7238 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7239 } else {
7240 dev_err(priv->device, "Can't specify Tx FIFO size\n");
7241 return -ENODEV;
7242 }
7243 } else if (priv->dma_cap.tx_fifo_size &&
7244 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7245 dev_warn(priv->device,
7246 "Tx FIFO size (%u) exceeds dma capability\n",
7247 priv->plat->tx_fifo_size);
7248 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7249 }
7250
7251 priv->hw->vlan_fail_q_en =
7252 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7253 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7254
7255 /* Run HW quirks, if any */
7256 if (priv->hwif_quirks) {
7257 ret = priv->hwif_quirks(priv);
7258 if (ret)
7259 return ret;
7260 }
7261
7262 /* Rx Watchdog is available in the COREs newer than the 3.40.
7263 * In some case, for example on bugged HW this feature
7264 * has to be disable and this can be done by passing the
7265 * riwt_off field from the platform.
7266 */
7267 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7268 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7269 priv->use_riwt = 1;
7270 dev_info(priv->device,
7271 "Enable RX Mitigation via HW Watchdog Timer\n");
7272 }
7273
7274 return 0;
7275 }
7276
stmmac_napi_add(struct net_device * dev)7277 static void stmmac_napi_add(struct net_device *dev)
7278 {
7279 struct stmmac_priv *priv = netdev_priv(dev);
7280 u32 queue, maxq;
7281
7282 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7283
7284 for (queue = 0; queue < maxq; queue++) {
7285 struct stmmac_channel *ch = &priv->channel[queue];
7286
7287 ch->priv_data = priv;
7288 ch->index = queue;
7289 spin_lock_init(&ch->lock);
7290
7291 if (queue < priv->plat->rx_queues_to_use) {
7292 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7293 }
7294 if (queue < priv->plat->tx_queues_to_use) {
7295 netif_napi_add_tx(dev, &ch->tx_napi,
7296 stmmac_napi_poll_tx);
7297 }
7298 if (queue < priv->plat->rx_queues_to_use &&
7299 queue < priv->plat->tx_queues_to_use) {
7300 netif_napi_add(dev, &ch->rxtx_napi,
7301 stmmac_napi_poll_rxtx);
7302 }
7303 }
7304 }
7305
stmmac_napi_del(struct net_device * dev)7306 static void stmmac_napi_del(struct net_device *dev)
7307 {
7308 struct stmmac_priv *priv = netdev_priv(dev);
7309 u32 queue, maxq;
7310
7311 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7312
7313 for (queue = 0; queue < maxq; queue++) {
7314 struct stmmac_channel *ch = &priv->channel[queue];
7315
7316 if (queue < priv->plat->rx_queues_to_use)
7317 netif_napi_del(&ch->rx_napi);
7318 if (queue < priv->plat->tx_queues_to_use)
7319 netif_napi_del(&ch->tx_napi);
7320 if (queue < priv->plat->rx_queues_to_use &&
7321 queue < priv->plat->tx_queues_to_use) {
7322 netif_napi_del(&ch->rxtx_napi);
7323 }
7324 }
7325 }
7326
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7327 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7328 {
7329 struct stmmac_priv *priv = netdev_priv(dev);
7330 int ret = 0, i;
7331
7332 if (netif_running(dev))
7333 stmmac_release(dev);
7334
7335 stmmac_napi_del(dev);
7336
7337 priv->plat->rx_queues_to_use = rx_cnt;
7338 priv->plat->tx_queues_to_use = tx_cnt;
7339 if (!netif_is_rxfh_configured(dev))
7340 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7341 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7342 rx_cnt);
7343
7344 stmmac_napi_add(dev);
7345
7346 if (netif_running(dev))
7347 ret = stmmac_open(dev);
7348
7349 return ret;
7350 }
7351
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7352 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7353 {
7354 struct stmmac_priv *priv = netdev_priv(dev);
7355 int ret = 0;
7356
7357 if (netif_running(dev))
7358 stmmac_release(dev);
7359
7360 priv->dma_conf.dma_rx_size = rx_size;
7361 priv->dma_conf.dma_tx_size = tx_size;
7362
7363 if (netif_running(dev))
7364 ret = stmmac_open(dev);
7365
7366 return ret;
7367 }
7368
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7369 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7370 {
7371 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7372 struct dma_desc *desc_contains_ts = ctx->desc;
7373 struct stmmac_priv *priv = ctx->priv;
7374 struct dma_desc *ndesc = ctx->ndesc;
7375 struct dma_desc *desc = ctx->desc;
7376 u64 ns = 0;
7377
7378 if (!priv->hwts_rx_en)
7379 return -ENODATA;
7380
7381 /* For GMAC4, the valid timestamp is from CTX next desc. */
7382 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7383 desc_contains_ts = ndesc;
7384
7385 /* Check if timestamp is available */
7386 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7387 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7388 ns -= priv->plat->cdc_error_adj;
7389 *timestamp = ns_to_ktime(ns);
7390 return 0;
7391 }
7392
7393 return -ENODATA;
7394 }
7395
7396 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7397 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7398 };
7399
7400 /**
7401 * stmmac_dvr_probe
7402 * @device: device pointer
7403 * @plat_dat: platform data pointer
7404 * @res: stmmac resource pointer
7405 * Description: this is the main probe function used to
7406 * call the alloc_etherdev, allocate the priv structure.
7407 * Return:
7408 * returns 0 on success, otherwise errno.
7409 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7410 int stmmac_dvr_probe(struct device *device,
7411 struct plat_stmmacenet_data *plat_dat,
7412 struct stmmac_resources *res)
7413 {
7414 struct net_device *ndev = NULL;
7415 struct stmmac_priv *priv;
7416 u32 rxq;
7417 int i, ret = 0;
7418
7419 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7420 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7421 if (!ndev)
7422 return -ENOMEM;
7423
7424 SET_NETDEV_DEV(ndev, device);
7425
7426 priv = netdev_priv(ndev);
7427 priv->device = device;
7428 priv->dev = ndev;
7429
7430 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7431 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7432 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7433 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7434 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7435 }
7436
7437 priv->xstats.pcpu_stats =
7438 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7439 if (!priv->xstats.pcpu_stats)
7440 return -ENOMEM;
7441
7442 stmmac_set_ethtool_ops(ndev);
7443 priv->pause = pause;
7444 priv->plat = plat_dat;
7445 priv->ioaddr = res->addr;
7446 priv->dev->base_addr = (unsigned long)res->addr;
7447 priv->plat->dma_cfg->multi_msi_en =
7448 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7449
7450 priv->dev->irq = res->irq;
7451 priv->wol_irq = res->wol_irq;
7452 priv->lpi_irq = res->lpi_irq;
7453 priv->sfty_irq = res->sfty_irq;
7454 priv->sfty_ce_irq = res->sfty_ce_irq;
7455 priv->sfty_ue_irq = res->sfty_ue_irq;
7456 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7457 priv->rx_irq[i] = res->rx_irq[i];
7458 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7459 priv->tx_irq[i] = res->tx_irq[i];
7460
7461 if (!is_zero_ether_addr(res->mac))
7462 eth_hw_addr_set(priv->dev, res->mac);
7463
7464 dev_set_drvdata(device, priv->dev);
7465
7466 /* Verify driver arguments */
7467 stmmac_verify_args();
7468
7469 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7470 if (!priv->af_xdp_zc_qps)
7471 return -ENOMEM;
7472
7473 /* Allocate workqueue */
7474 priv->wq = create_singlethread_workqueue("stmmac_wq");
7475 if (!priv->wq) {
7476 dev_err(priv->device, "failed to create workqueue\n");
7477 ret = -ENOMEM;
7478 goto error_wq_init;
7479 }
7480
7481 INIT_WORK(&priv->service_task, stmmac_service_task);
7482
7483 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7484
7485 /* Override with kernel parameters if supplied XXX CRS XXX
7486 * this needs to have multiple instances
7487 */
7488 if ((phyaddr >= 0) && (phyaddr <= 31))
7489 priv->plat->phy_addr = phyaddr;
7490
7491 if (priv->plat->stmmac_rst) {
7492 ret = reset_control_assert(priv->plat->stmmac_rst);
7493 reset_control_deassert(priv->plat->stmmac_rst);
7494 /* Some reset controllers have only reset callback instead of
7495 * assert + deassert callbacks pair.
7496 */
7497 if (ret == -ENOTSUPP)
7498 reset_control_reset(priv->plat->stmmac_rst);
7499 }
7500
7501 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7502 if (ret == -ENOTSUPP)
7503 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7504 ERR_PTR(ret));
7505
7506 /* Wait a bit for the reset to take effect */
7507 udelay(10);
7508
7509 /* Init MAC and get the capabilities */
7510 ret = stmmac_hw_init(priv);
7511 if (ret)
7512 goto error_hw_init;
7513
7514 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7515 */
7516 if (priv->synopsys_id < DWMAC_CORE_5_20)
7517 priv->plat->dma_cfg->dche = false;
7518
7519 stmmac_check_ether_addr(priv);
7520
7521 ndev->netdev_ops = &stmmac_netdev_ops;
7522
7523 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7524 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7525
7526 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7527 NETIF_F_RXCSUM;
7528 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7529 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7530
7531 ret = stmmac_tc_init(priv, priv);
7532 if (!ret) {
7533 ndev->hw_features |= NETIF_F_HW_TC;
7534 }
7535
7536 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7537 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7538 if (priv->plat->has_gmac4)
7539 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7540 priv->tso = true;
7541 dev_info(priv->device, "TSO feature enabled\n");
7542 }
7543
7544 if (priv->dma_cap.sphen &&
7545 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7546 ndev->hw_features |= NETIF_F_GRO;
7547 priv->sph_cap = true;
7548 priv->sph = priv->sph_cap;
7549 dev_info(priv->device, "SPH feature enabled\n");
7550 }
7551
7552 /* Ideally our host DMA address width is the same as for the
7553 * device. However, it may differ and then we have to use our
7554 * host DMA width for allocation and the device DMA width for
7555 * register handling.
7556 */
7557 if (priv->plat->host_dma_width)
7558 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7559 else
7560 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7561
7562 if (priv->dma_cap.host_dma_width) {
7563 ret = dma_set_mask_and_coherent(device,
7564 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7565 if (!ret) {
7566 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7567 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7568
7569 /*
7570 * If more than 32 bits can be addressed, make sure to
7571 * enable enhanced addressing mode.
7572 */
7573 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7574 priv->plat->dma_cfg->eame = true;
7575 } else {
7576 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7577 if (ret) {
7578 dev_err(priv->device, "Failed to set DMA Mask\n");
7579 goto error_hw_init;
7580 }
7581
7582 priv->dma_cap.host_dma_width = 32;
7583 }
7584 }
7585
7586 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7587 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7588 #ifdef STMMAC_VLAN_TAG_USED
7589 /* Both mac100 and gmac support receive VLAN tag detection */
7590 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7591 if (priv->plat->has_gmac4) {
7592 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7593 priv->hw->hw_vlan_en = true;
7594 }
7595 if (priv->dma_cap.vlhash) {
7596 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7597 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7598 }
7599 if (priv->dma_cap.vlins) {
7600 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7601 if (priv->dma_cap.dvlan)
7602 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7603 }
7604 #endif
7605 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7606
7607 priv->xstats.threshold = tc;
7608
7609 /* Initialize RSS */
7610 rxq = priv->plat->rx_queues_to_use;
7611 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7612 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7613 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7614
7615 if (priv->dma_cap.rssen && priv->plat->rss_en)
7616 ndev->features |= NETIF_F_RXHASH;
7617
7618 ndev->vlan_features |= ndev->features;
7619
7620 /* MTU range: 46 - hw-specific max */
7621 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7622 if (priv->plat->has_xgmac)
7623 ndev->max_mtu = XGMAC_JUMBO_LEN;
7624 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7625 ndev->max_mtu = JUMBO_LEN;
7626 else
7627 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7628 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7629 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7630 */
7631 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7632 (priv->plat->maxmtu >= ndev->min_mtu))
7633 ndev->max_mtu = priv->plat->maxmtu;
7634 else if (priv->plat->maxmtu < ndev->min_mtu)
7635 dev_warn(priv->device,
7636 "%s: warning: maxmtu having invalid value (%d)\n",
7637 __func__, priv->plat->maxmtu);
7638
7639 if (flow_ctrl)
7640 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7641
7642 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7643
7644 /* Setup channels NAPI */
7645 stmmac_napi_add(ndev);
7646
7647 mutex_init(&priv->lock);
7648
7649 stmmac_fpe_init(priv);
7650
7651 /* If a specific clk_csr value is passed from the platform
7652 * this means that the CSR Clock Range selection cannot be
7653 * changed at run-time and it is fixed. Viceversa the driver'll try to
7654 * set the MDC clock dynamically according to the csr actual
7655 * clock input.
7656 */
7657 if (priv->plat->clk_csr >= 0)
7658 priv->clk_csr = priv->plat->clk_csr;
7659 else
7660 stmmac_clk_csr_set(priv);
7661
7662 stmmac_check_pcs_mode(priv);
7663
7664 pm_runtime_get_noresume(device);
7665 pm_runtime_set_active(device);
7666 if (!pm_runtime_enabled(device))
7667 pm_runtime_enable(device);
7668
7669 ret = stmmac_mdio_register(ndev);
7670 if (ret < 0) {
7671 dev_err_probe(priv->device, ret,
7672 "MDIO bus (id: %d) registration failed\n",
7673 priv->plat->bus_id);
7674 goto error_mdio_register;
7675 }
7676
7677 if (priv->plat->speed_mode_2500)
7678 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7679
7680 ret = stmmac_pcs_setup(ndev);
7681 if (ret)
7682 goto error_pcs_setup;
7683
7684 ret = stmmac_phy_setup(priv);
7685 if (ret) {
7686 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7687 goto error_phy_setup;
7688 }
7689
7690 ret = register_netdev(ndev);
7691 if (ret) {
7692 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7693 __func__, ret);
7694 goto error_netdev_register;
7695 }
7696
7697 #ifdef CONFIG_DEBUG_FS
7698 stmmac_init_fs(ndev);
7699 #endif
7700
7701 if (priv->plat->dump_debug_regs)
7702 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7703
7704 /* Let pm_runtime_put() disable the clocks.
7705 * If CONFIG_PM is not enabled, the clocks will stay powered.
7706 */
7707 pm_runtime_put(device);
7708
7709 return ret;
7710
7711 error_netdev_register:
7712 phylink_destroy(priv->phylink);
7713 error_phy_setup:
7714 stmmac_pcs_clean(ndev);
7715 error_pcs_setup:
7716 stmmac_mdio_unregister(ndev);
7717 error_mdio_register:
7718 stmmac_napi_del(ndev);
7719 error_hw_init:
7720 destroy_workqueue(priv->wq);
7721 error_wq_init:
7722 bitmap_free(priv->af_xdp_zc_qps);
7723
7724 return ret;
7725 }
7726 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7727
7728 /**
7729 * stmmac_dvr_remove
7730 * @dev: device pointer
7731 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7732 * changes the link status, releases the DMA descriptor rings.
7733 */
stmmac_dvr_remove(struct device * dev)7734 void stmmac_dvr_remove(struct device *dev)
7735 {
7736 struct net_device *ndev = dev_get_drvdata(dev);
7737 struct stmmac_priv *priv = netdev_priv(ndev);
7738
7739 netdev_info(priv->dev, "%s: removing driver", __func__);
7740
7741 pm_runtime_get_sync(dev);
7742
7743 stmmac_stop_all_dma(priv);
7744 stmmac_mac_set(priv, priv->ioaddr, false);
7745 unregister_netdev(ndev);
7746
7747 #ifdef CONFIG_DEBUG_FS
7748 stmmac_exit_fs(ndev);
7749 #endif
7750 phylink_destroy(priv->phylink);
7751 if (priv->plat->stmmac_rst)
7752 reset_control_assert(priv->plat->stmmac_rst);
7753 reset_control_assert(priv->plat->stmmac_ahb_rst);
7754
7755 stmmac_pcs_clean(ndev);
7756 stmmac_mdio_unregister(ndev);
7757
7758 destroy_workqueue(priv->wq);
7759 mutex_destroy(&priv->lock);
7760 bitmap_free(priv->af_xdp_zc_qps);
7761
7762 pm_runtime_disable(dev);
7763 pm_runtime_put_noidle(dev);
7764 }
7765 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7766
7767 /**
7768 * stmmac_suspend - suspend callback
7769 * @dev: device pointer
7770 * Description: this is the function to suspend the device and it is called
7771 * by the platform driver to stop the network queue, release the resources,
7772 * program the PMT register (for WoL), clean and release driver resources.
7773 */
stmmac_suspend(struct device * dev)7774 int stmmac_suspend(struct device *dev)
7775 {
7776 struct net_device *ndev = dev_get_drvdata(dev);
7777 struct stmmac_priv *priv = netdev_priv(ndev);
7778 u32 chan;
7779
7780 if (!ndev || !netif_running(ndev))
7781 return 0;
7782
7783 mutex_lock(&priv->lock);
7784
7785 netif_device_detach(ndev);
7786
7787 stmmac_disable_all_queues(priv);
7788
7789 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7790 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7791
7792 if (priv->eee_sw_timer_en) {
7793 priv->tx_path_in_lpi_mode = false;
7794 del_timer_sync(&priv->eee_ctrl_timer);
7795 }
7796
7797 /* Stop TX/RX DMA */
7798 stmmac_stop_all_dma(priv);
7799
7800 if (priv->plat->serdes_powerdown)
7801 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7802
7803 /* Enable Power down mode by programming the PMT regs */
7804 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7805 stmmac_pmt(priv, priv->hw, priv->wolopts);
7806 priv->irq_wake = 1;
7807 } else {
7808 stmmac_mac_set(priv, priv->ioaddr, false);
7809 pinctrl_pm_select_sleep_state(priv->device);
7810 }
7811
7812 mutex_unlock(&priv->lock);
7813
7814 rtnl_lock();
7815 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7816 phylink_suspend(priv->phylink, true);
7817 } else {
7818 if (device_may_wakeup(priv->device))
7819 phylink_speed_down(priv->phylink, false);
7820 phylink_suspend(priv->phylink, false);
7821 }
7822 rtnl_unlock();
7823
7824 if (stmmac_fpe_supported(priv))
7825 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7826
7827 priv->speed = SPEED_UNKNOWN;
7828 return 0;
7829 }
7830 EXPORT_SYMBOL_GPL(stmmac_suspend);
7831
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7832 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7833 {
7834 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7835
7836 rx_q->cur_rx = 0;
7837 rx_q->dirty_rx = 0;
7838 }
7839
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7840 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7841 {
7842 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7843
7844 tx_q->cur_tx = 0;
7845 tx_q->dirty_tx = 0;
7846 tx_q->mss = 0;
7847
7848 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7849 }
7850
7851 /**
7852 * stmmac_reset_queues_param - reset queue parameters
7853 * @priv: device pointer
7854 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7855 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7856 {
7857 u32 rx_cnt = priv->plat->rx_queues_to_use;
7858 u32 tx_cnt = priv->plat->tx_queues_to_use;
7859 u32 queue;
7860
7861 for (queue = 0; queue < rx_cnt; queue++)
7862 stmmac_reset_rx_queue(priv, queue);
7863
7864 for (queue = 0; queue < tx_cnt; queue++)
7865 stmmac_reset_tx_queue(priv, queue);
7866 }
7867
7868 /**
7869 * stmmac_resume - resume callback
7870 * @dev: device pointer
7871 * Description: when resume this function is invoked to setup the DMA and CORE
7872 * in a usable state.
7873 */
stmmac_resume(struct device * dev)7874 int stmmac_resume(struct device *dev)
7875 {
7876 struct net_device *ndev = dev_get_drvdata(dev);
7877 struct stmmac_priv *priv = netdev_priv(ndev);
7878 int ret;
7879
7880 if (!netif_running(ndev))
7881 return 0;
7882
7883 /* Power Down bit, into the PM register, is cleared
7884 * automatically as soon as a magic packet or a Wake-up frame
7885 * is received. Anyway, it's better to manually clear
7886 * this bit because it can generate problems while resuming
7887 * from another devices (e.g. serial console).
7888 */
7889 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7890 mutex_lock(&priv->lock);
7891 stmmac_pmt(priv, priv->hw, 0);
7892 mutex_unlock(&priv->lock);
7893 priv->irq_wake = 0;
7894 } else {
7895 pinctrl_pm_select_default_state(priv->device);
7896 /* reset the phy so that it's ready */
7897 if (priv->mii)
7898 stmmac_mdio_reset(priv->mii);
7899 }
7900
7901 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7902 priv->plat->serdes_powerup) {
7903 ret = priv->plat->serdes_powerup(ndev,
7904 priv->plat->bsp_priv);
7905
7906 if (ret < 0)
7907 return ret;
7908 }
7909
7910 rtnl_lock();
7911 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7912 phylink_resume(priv->phylink);
7913 } else {
7914 phylink_resume(priv->phylink);
7915 if (device_may_wakeup(priv->device))
7916 phylink_speed_up(priv->phylink);
7917 }
7918 rtnl_unlock();
7919
7920 rtnl_lock();
7921 mutex_lock(&priv->lock);
7922
7923 stmmac_reset_queues_param(priv);
7924
7925 stmmac_free_tx_skbufs(priv);
7926 stmmac_clear_descriptors(priv, &priv->dma_conf);
7927
7928 stmmac_hw_setup(ndev, false);
7929 stmmac_init_coalesce(priv);
7930 stmmac_set_rx_mode(ndev);
7931
7932 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7933
7934 stmmac_enable_all_queues(priv);
7935 stmmac_enable_all_dma_irq(priv);
7936
7937 mutex_unlock(&priv->lock);
7938 rtnl_unlock();
7939
7940 netif_device_attach(ndev);
7941
7942 return 0;
7943 }
7944 EXPORT_SYMBOL_GPL(stmmac_resume);
7945
7946 #ifndef MODULE
stmmac_cmdline_opt(char * str)7947 static int __init stmmac_cmdline_opt(char *str)
7948 {
7949 char *opt;
7950
7951 if (!str || !*str)
7952 return 1;
7953 while ((opt = strsep(&str, ",")) != NULL) {
7954 if (!strncmp(opt, "debug:", 6)) {
7955 if (kstrtoint(opt + 6, 0, &debug))
7956 goto err;
7957 } else if (!strncmp(opt, "phyaddr:", 8)) {
7958 if (kstrtoint(opt + 8, 0, &phyaddr))
7959 goto err;
7960 } else if (!strncmp(opt, "buf_sz:", 7)) {
7961 if (kstrtoint(opt + 7, 0, &buf_sz))
7962 goto err;
7963 } else if (!strncmp(opt, "tc:", 3)) {
7964 if (kstrtoint(opt + 3, 0, &tc))
7965 goto err;
7966 } else if (!strncmp(opt, "watchdog:", 9)) {
7967 if (kstrtoint(opt + 9, 0, &watchdog))
7968 goto err;
7969 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7970 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7971 goto err;
7972 } else if (!strncmp(opt, "pause:", 6)) {
7973 if (kstrtoint(opt + 6, 0, &pause))
7974 goto err;
7975 } else if (!strncmp(opt, "eee_timer:", 10)) {
7976 if (kstrtoint(opt + 10, 0, &eee_timer))
7977 goto err;
7978 } else if (!strncmp(opt, "chain_mode:", 11)) {
7979 if (kstrtoint(opt + 11, 0, &chain_mode))
7980 goto err;
7981 }
7982 }
7983 return 1;
7984
7985 err:
7986 pr_err("%s: ERROR broken module parameter conversion", __func__);
7987 return 1;
7988 }
7989
7990 __setup("stmmaceth=", stmmac_cmdline_opt);
7991 #endif /* MODULE */
7992
stmmac_init(void)7993 static int __init stmmac_init(void)
7994 {
7995 #ifdef CONFIG_DEBUG_FS
7996 /* Create debugfs main directory if it doesn't exist yet */
7997 if (!stmmac_fs_dir)
7998 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7999 register_netdevice_notifier(&stmmac_notifier);
8000 #endif
8001
8002 return 0;
8003 }
8004
stmmac_exit(void)8005 static void __exit stmmac_exit(void)
8006 {
8007 #ifdef CONFIG_DEBUG_FS
8008 unregister_netdevice_notifier(&stmmac_notifier);
8009 debugfs_remove_recursive(stmmac_fs_dir);
8010 #endif
8011 }
8012
8013 module_init(stmmac_init)
8014 module_exit(stmmac_exit)
8015
8016 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8017 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8018 MODULE_LICENSE("GPL");
8019