1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54
55 /* As long as the interface is active, we keep the timestamping counter enabled
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
57 * (clock jumps) when changing timestamping settings at runtime.
58 */
59 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 PTP_TCR_TSCTRLSSR)
61
62 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64
65 /* Module parameters */
66 #define TX_TIMEO 5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = 0xdead;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 /* This is unused */
105 #define DEFAULT_BUFSIZE 1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER 1000
115 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, uint, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121 * but allow user to force to use the chain instead of the ring
122 */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139 u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151 int ret = 0;
152
153 if (enabled) {
154 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155 if (ret)
156 return ret;
157 ret = clk_prepare_enable(priv->plat->pclk);
158 if (ret) {
159 clk_disable_unprepare(priv->plat->stmmac_clk);
160 return ret;
161 }
162 if (priv->plat->clks_config) {
163 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164 if (ret) {
165 clk_disable_unprepare(priv->plat->stmmac_clk);
166 clk_disable_unprepare(priv->plat->pclk);
167 return ret;
168 }
169 }
170 } else {
171 clk_disable_unprepare(priv->plat->stmmac_clk);
172 clk_disable_unprepare(priv->plat->pclk);
173 if (priv->plat->clks_config)
174 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175 }
176
177 return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
183 * @bsp_priv: BSP private data structure (unused)
184 * @clk_tx_i: the transmit clock
185 * @interface: the selected interface mode
186 * @speed: the speed that the MAC will be operating at
187 *
188 * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
189 * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
190 * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
191 * the plat_data->set_clk_tx_rate method directly, call it via their own
192 * implementation, or implement their own method should they have more
193 * complex requirements. It is intended to only be used in this method.
194 *
195 * plat_data->clk_tx_i must be filled in.
196 */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)197 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
198 phy_interface_t interface, int speed)
199 {
200 long rate = rgmii_clock(speed);
201
202 /* Silently ignore unsupported speeds as rgmii_clock() only
203 * supports 10, 100 and 1000Mbps. We do not want to spit
204 * errors for 2500 and higher speeds here.
205 */
206 if (rate < 0)
207 return 0;
208
209 return clk_set_rate(clk_tx_i, rate);
210 }
211 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
212
213 /**
214 * stmmac_verify_args - verify the driver parameters.
215 * Description: it checks the driver parameters and set a default in case of
216 * errors.
217 */
stmmac_verify_args(void)218 static void stmmac_verify_args(void)
219 {
220 if (unlikely(watchdog < 0))
221 watchdog = TX_TIMEO;
222 if (unlikely((pause < 0) || (pause > 0xffff)))
223 pause = PAUSE_TIME;
224
225 if (flow_ctrl != 0xdead)
226 pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
227 }
228
__stmmac_disable_all_queues(struct stmmac_priv * priv)229 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
233 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
234 u32 queue;
235
236 for (queue = 0; queue < maxq; queue++) {
237 struct stmmac_channel *ch = &priv->channel[queue];
238
239 if (stmmac_xdp_is_enabled(priv) &&
240 test_bit(queue, priv->af_xdp_zc_qps)) {
241 napi_disable(&ch->rxtx_napi);
242 continue;
243 }
244
245 if (queue < rx_queues_cnt)
246 napi_disable(&ch->rx_napi);
247 if (queue < tx_queues_cnt)
248 napi_disable(&ch->tx_napi);
249 }
250 }
251
252 /**
253 * stmmac_disable_all_queues - Disable all queues
254 * @priv: driver private structure
255 */
stmmac_disable_all_queues(struct stmmac_priv * priv)256 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
257 {
258 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
259 struct stmmac_rx_queue *rx_q;
260 u32 queue;
261
262 /* synchronize_rcu() needed for pending XDP buffers to drain */
263 for (queue = 0; queue < rx_queues_cnt; queue++) {
264 rx_q = &priv->dma_conf.rx_queue[queue];
265 if (rx_q->xsk_pool) {
266 synchronize_rcu();
267 break;
268 }
269 }
270
271 __stmmac_disable_all_queues(priv);
272 }
273
274 /**
275 * stmmac_enable_all_queues - Enable all queues
276 * @priv: driver private structure
277 */
stmmac_enable_all_queues(struct stmmac_priv * priv)278 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
279 {
280 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
281 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
282 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
283 u32 queue;
284
285 for (queue = 0; queue < maxq; queue++) {
286 struct stmmac_channel *ch = &priv->channel[queue];
287
288 if (stmmac_xdp_is_enabled(priv) &&
289 test_bit(queue, priv->af_xdp_zc_qps)) {
290 napi_enable(&ch->rxtx_napi);
291 continue;
292 }
293
294 if (queue < rx_queues_cnt)
295 napi_enable(&ch->rx_napi);
296 if (queue < tx_queues_cnt)
297 napi_enable(&ch->tx_napi);
298 }
299 }
300
stmmac_service_event_schedule(struct stmmac_priv * priv)301 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
302 {
303 if (!test_bit(STMMAC_DOWN, &priv->state) &&
304 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
305 queue_work(priv->wq, &priv->service_task);
306 }
307
stmmac_global_err(struct stmmac_priv * priv)308 static void stmmac_global_err(struct stmmac_priv *priv)
309 {
310 netif_carrier_off(priv->dev);
311 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
312 stmmac_service_event_schedule(priv);
313 }
314
315 /**
316 * stmmac_clk_csr_set - dynamically set the MDC clock
317 * @priv: driver private structure
318 * Description: this is to dynamically set the MDC clock according to the csr
319 * clock input.
320 * Note:
321 * If a specific clk_csr value is passed from the platform
322 * this means that the CSR Clock Range selection cannot be
323 * changed at run-time and it is fixed (as reported in the driver
324 * documentation). Viceversa the driver will try to set the MDC
325 * clock dynamically according to the actual clock input.
326 */
stmmac_clk_csr_set(struct stmmac_priv * priv)327 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
328 {
329 unsigned long clk_rate;
330
331 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
332
333 /* Platform provided default clk_csr would be assumed valid
334 * for all other cases except for the below mentioned ones.
335 * For values higher than the IEEE 802.3 specified frequency
336 * we can not estimate the proper divider as it is not known
337 * the frequency of clk_csr_i. So we do not change the default
338 * divider.
339 */
340 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
341 if (clk_rate < CSR_F_35M)
342 priv->clk_csr = STMMAC_CSR_20_35M;
343 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
344 priv->clk_csr = STMMAC_CSR_35_60M;
345 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
346 priv->clk_csr = STMMAC_CSR_60_100M;
347 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
348 priv->clk_csr = STMMAC_CSR_100_150M;
349 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
350 priv->clk_csr = STMMAC_CSR_150_250M;
351 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
352 priv->clk_csr = STMMAC_CSR_250_300M;
353 else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
354 priv->clk_csr = STMMAC_CSR_300_500M;
355 else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
356 priv->clk_csr = STMMAC_CSR_500_800M;
357 }
358
359 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
360 if (clk_rate > 160000000)
361 priv->clk_csr = 0x03;
362 else if (clk_rate > 80000000)
363 priv->clk_csr = 0x02;
364 else if (clk_rate > 40000000)
365 priv->clk_csr = 0x01;
366 else
367 priv->clk_csr = 0;
368 }
369
370 if (priv->plat->has_xgmac) {
371 if (clk_rate > 400000000)
372 priv->clk_csr = 0x5;
373 else if (clk_rate > 350000000)
374 priv->clk_csr = 0x4;
375 else if (clk_rate > 300000000)
376 priv->clk_csr = 0x3;
377 else if (clk_rate > 250000000)
378 priv->clk_csr = 0x2;
379 else if (clk_rate > 150000000)
380 priv->clk_csr = 0x1;
381 else
382 priv->clk_csr = 0x0;
383 }
384 }
385
print_pkt(unsigned char * buf,int len)386 static void print_pkt(unsigned char *buf, int len)
387 {
388 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
389 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
390 }
391
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)392 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
393 {
394 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
395 u32 avail;
396
397 if (tx_q->dirty_tx > tx_q->cur_tx)
398 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
399 else
400 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
401
402 return avail;
403 }
404
405 /**
406 * stmmac_rx_dirty - Get RX queue dirty
407 * @priv: driver private structure
408 * @queue: RX queue index
409 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)410 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
411 {
412 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
413 u32 dirty;
414
415 if (rx_q->dirty_rx <= rx_q->cur_rx)
416 dirty = rx_q->cur_rx - rx_q->dirty_rx;
417 else
418 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
419
420 return dirty;
421 }
422
stmmac_eee_tx_busy(struct stmmac_priv * priv)423 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
424 {
425 u32 tx_cnt = priv->plat->tx_queues_to_use;
426 u32 queue;
427
428 /* check if all TX queues have the work finished */
429 for (queue = 0; queue < tx_cnt; queue++) {
430 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
431
432 if (tx_q->dirty_tx != tx_q->cur_tx)
433 return true; /* still unfinished work */
434 }
435
436 return false;
437 }
438
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)439 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
440 {
441 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
442 }
443
444 /**
445 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
446 * @priv: driver private structure
447 * Description: this function is to verify and enter in LPI mode in case of
448 * EEE.
449 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)450 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
451 {
452 if (stmmac_eee_tx_busy(priv)) {
453 stmmac_restart_sw_lpi_timer(priv);
454 return;
455 }
456
457 /* Check and enter in LPI mode */
458 if (!priv->tx_path_in_lpi_mode)
459 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
460 priv->tx_lpi_clk_stop, 0);
461 }
462
463 /**
464 * stmmac_stop_sw_lpi - stop transmitting LPI
465 * @priv: driver private structure
466 * Description: When using software-controlled LPI, stop transmitting LPI state.
467 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)468 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
469 {
470 del_timer_sync(&priv->eee_ctrl_timer);
471 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
472 priv->tx_path_in_lpi_mode = false;
473 }
474
475 /**
476 * stmmac_eee_ctrl_timer - EEE TX SW timer.
477 * @t: timer_list struct containing private info
478 * Description:
479 * if there is no data transfer and if we are not in LPI state,
480 * then MAC Transmitter can be moved to LPI state.
481 */
stmmac_eee_ctrl_timer(struct timer_list * t)482 static void stmmac_eee_ctrl_timer(struct timer_list *t)
483 {
484 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
485
486 stmmac_try_to_start_sw_lpi(priv);
487 }
488
489 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
490 * @priv: driver private structure
491 * @p : descriptor pointer
492 * @skb : the socket buffer
493 * Description :
494 * This function will read timestamp from the descriptor & pass it to stack.
495 * and also perform some sanity checks.
496 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)497 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
498 struct dma_desc *p, struct sk_buff *skb)
499 {
500 struct skb_shared_hwtstamps shhwtstamp;
501 bool found = false;
502 u64 ns = 0;
503
504 if (!priv->hwts_tx_en)
505 return;
506
507 /* exit if skb doesn't support hw tstamp */
508 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
509 return;
510
511 /* check tx tstamp status */
512 if (stmmac_get_tx_timestamp_status(priv, p)) {
513 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
514 found = true;
515 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
516 found = true;
517 }
518
519 if (found) {
520 ns -= priv->plat->cdc_error_adj;
521
522 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
523 shhwtstamp.hwtstamp = ns_to_ktime(ns);
524
525 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
526 /* pass tstamp to stack */
527 skb_tstamp_tx(skb, &shhwtstamp);
528 }
529 }
530
531 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
532 * @priv: driver private structure
533 * @p : descriptor pointer
534 * @np : next descriptor pointer
535 * @skb : the socket buffer
536 * Description :
537 * This function will read received packet's timestamp from the descriptor
538 * and pass it to stack. It also perform some sanity checks.
539 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)540 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
541 struct dma_desc *np, struct sk_buff *skb)
542 {
543 struct skb_shared_hwtstamps *shhwtstamp = NULL;
544 struct dma_desc *desc = p;
545 u64 ns = 0;
546
547 if (!priv->hwts_rx_en)
548 return;
549 /* For GMAC4, the valid timestamp is from CTX next desc. */
550 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
551 desc = np;
552
553 /* Check if timestamp is available */
554 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
555 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
556
557 ns -= priv->plat->cdc_error_adj;
558
559 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
560 shhwtstamp = skb_hwtstamps(skb);
561 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 shhwtstamp->hwtstamp = ns_to_ktime(ns);
563 } else {
564 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
565 }
566 }
567
568 /**
569 * stmmac_hwtstamp_set - control hardware timestamping.
570 * @dev: device pointer.
571 * @ifr: An IOCTL specific structure, that can contain a pointer to
572 * a proprietary structure used to pass information to the driver.
573 * Description:
574 * This function configures the MAC to enable/disable both outgoing(TX)
575 * and incoming(RX) packets time stamping based on user input.
576 * Return Value:
577 * 0 on success and an appropriate -ve integer on failure.
578 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)579 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
580 {
581 struct stmmac_priv *priv = netdev_priv(dev);
582 struct hwtstamp_config config;
583 u32 ptp_v2 = 0;
584 u32 tstamp_all = 0;
585 u32 ptp_over_ipv4_udp = 0;
586 u32 ptp_over_ipv6_udp = 0;
587 u32 ptp_over_ethernet = 0;
588 u32 snap_type_sel = 0;
589 u32 ts_master_en = 0;
590 u32 ts_event_en = 0;
591
592 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
593 netdev_alert(priv->dev, "No support for HW time stamping\n");
594 priv->hwts_tx_en = 0;
595 priv->hwts_rx_en = 0;
596
597 return -EOPNOTSUPP;
598 }
599
600 if (copy_from_user(&config, ifr->ifr_data,
601 sizeof(config)))
602 return -EFAULT;
603
604 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
605 __func__, config.flags, config.tx_type, config.rx_filter);
606
607 if (config.tx_type != HWTSTAMP_TX_OFF &&
608 config.tx_type != HWTSTAMP_TX_ON)
609 return -ERANGE;
610
611 if (priv->adv_ts) {
612 switch (config.rx_filter) {
613 case HWTSTAMP_FILTER_NONE:
614 /* time stamp no incoming packet at all */
615 config.rx_filter = HWTSTAMP_FILTER_NONE;
616 break;
617
618 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
619 /* PTP v1, UDP, any kind of event packet */
620 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
621 /* 'xmac' hardware can support Sync, Pdelay_Req and
622 * Pdelay_resp by setting bit14 and bits17/16 to 01
623 * This leaves Delay_Req timestamps out.
624 * Enable all events *and* general purpose message
625 * timestamping
626 */
627 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
628 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
629 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
630 break;
631
632 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
633 /* PTP v1, UDP, Sync packet */
634 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
635 /* take time stamp for SYNC messages only */
636 ts_event_en = PTP_TCR_TSEVNTENA;
637
638 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
639 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
640 break;
641
642 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
643 /* PTP v1, UDP, Delay_req packet */
644 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
645 /* take time stamp for Delay_Req messages only */
646 ts_master_en = PTP_TCR_TSMSTRENA;
647 ts_event_en = PTP_TCR_TSEVNTENA;
648
649 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
650 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
651 break;
652
653 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
654 /* PTP v2, UDP, any kind of event packet */
655 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
656 ptp_v2 = PTP_TCR_TSVER2ENA;
657 /* take time stamp for all event messages */
658 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
659
660 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
661 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
662 break;
663
664 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
665 /* PTP v2, UDP, Sync packet */
666 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
667 ptp_v2 = PTP_TCR_TSVER2ENA;
668 /* take time stamp for SYNC messages only */
669 ts_event_en = PTP_TCR_TSEVNTENA;
670
671 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
672 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
673 break;
674
675 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
676 /* PTP v2, UDP, Delay_req packet */
677 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
678 ptp_v2 = PTP_TCR_TSVER2ENA;
679 /* take time stamp for Delay_Req messages only */
680 ts_master_en = PTP_TCR_TSMSTRENA;
681 ts_event_en = PTP_TCR_TSEVNTENA;
682
683 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685 break;
686
687 case HWTSTAMP_FILTER_PTP_V2_EVENT:
688 /* PTP v2/802.AS1 any layer, any kind of event packet */
689 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
690 ptp_v2 = PTP_TCR_TSVER2ENA;
691 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
692 if (priv->synopsys_id < DWMAC_CORE_4_10)
693 ts_event_en = PTP_TCR_TSEVNTENA;
694 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696 ptp_over_ethernet = PTP_TCR_TSIPENA;
697 break;
698
699 case HWTSTAMP_FILTER_PTP_V2_SYNC:
700 /* PTP v2/802.AS1, any layer, Sync packet */
701 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
702 ptp_v2 = PTP_TCR_TSVER2ENA;
703 /* take time stamp for SYNC messages only */
704 ts_event_en = PTP_TCR_TSEVNTENA;
705
706 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
707 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
708 ptp_over_ethernet = PTP_TCR_TSIPENA;
709 break;
710
711 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
712 /* PTP v2/802.AS1, any layer, Delay_req packet */
713 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
714 ptp_v2 = PTP_TCR_TSVER2ENA;
715 /* take time stamp for Delay_Req messages only */
716 ts_master_en = PTP_TCR_TSMSTRENA;
717 ts_event_en = PTP_TCR_TSEVNTENA;
718
719 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
720 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
721 ptp_over_ethernet = PTP_TCR_TSIPENA;
722 break;
723
724 case HWTSTAMP_FILTER_NTP_ALL:
725 case HWTSTAMP_FILTER_ALL:
726 /* time stamp any incoming packet */
727 config.rx_filter = HWTSTAMP_FILTER_ALL;
728 tstamp_all = PTP_TCR_TSENALL;
729 break;
730
731 default:
732 return -ERANGE;
733 }
734 } else {
735 switch (config.rx_filter) {
736 case HWTSTAMP_FILTER_NONE:
737 config.rx_filter = HWTSTAMP_FILTER_NONE;
738 break;
739 default:
740 /* PTP v1, UDP, any kind of event packet */
741 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
742 break;
743 }
744 }
745 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
746 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
747
748 priv->systime_flags = STMMAC_HWTS_ACTIVE;
749
750 if (priv->hwts_tx_en || priv->hwts_rx_en) {
751 priv->systime_flags |= tstamp_all | ptp_v2 |
752 ptp_over_ethernet | ptp_over_ipv6_udp |
753 ptp_over_ipv4_udp | ts_event_en |
754 ts_master_en | snap_type_sel;
755 }
756
757 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
758
759 memcpy(&priv->tstamp_config, &config, sizeof(config));
760
761 return copy_to_user(ifr->ifr_data, &config,
762 sizeof(config)) ? -EFAULT : 0;
763 }
764
765 /**
766 * stmmac_hwtstamp_get - read hardware timestamping.
767 * @dev: device pointer.
768 * @ifr: An IOCTL specific structure, that can contain a pointer to
769 * a proprietary structure used to pass information to the driver.
770 * Description:
771 * This function obtain the current hardware timestamping settings
772 * as requested.
773 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)774 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
775 {
776 struct stmmac_priv *priv = netdev_priv(dev);
777 struct hwtstamp_config *config = &priv->tstamp_config;
778
779 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
780 return -EOPNOTSUPP;
781
782 return copy_to_user(ifr->ifr_data, config,
783 sizeof(*config)) ? -EFAULT : 0;
784 }
785
786 /**
787 * stmmac_init_tstamp_counter - init hardware timestamping counter
788 * @priv: driver private structure
789 * @systime_flags: timestamping flags
790 * Description:
791 * Initialize hardware counter for packet timestamping.
792 * This is valid as long as the interface is open and not suspended.
793 * Will be rerun after resuming from suspend, case in which the timestamping
794 * flags updated by stmmac_hwtstamp_set() also need to be restored.
795 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)796 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
797 {
798 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
799 struct timespec64 now;
800 u32 sec_inc = 0;
801 u64 temp = 0;
802
803 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
804 return -EOPNOTSUPP;
805
806 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
807 priv->systime_flags = systime_flags;
808
809 /* program Sub Second Increment reg */
810 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
811 priv->plat->clk_ptp_rate,
812 xmac, &sec_inc);
813 temp = div_u64(1000000000ULL, sec_inc);
814
815 /* Store sub second increment for later use */
816 priv->sub_second_inc = sec_inc;
817
818 /* calculate default added value:
819 * formula is :
820 * addend = (2^32)/freq_div_ratio;
821 * where, freq_div_ratio = 1e9ns/sec_inc
822 */
823 temp = (u64)(temp << 32);
824 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
825 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
826
827 /* initialize system time */
828 ktime_get_real_ts64(&now);
829
830 /* lower 32 bits of tv_sec are safe until y2106 */
831 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
832
833 return 0;
834 }
835 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
836
837 /**
838 * stmmac_init_ptp - init PTP
839 * @priv: driver private structure
840 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
841 * This is done by looking at the HW cap. register.
842 * This function also registers the ptp driver.
843 */
stmmac_init_ptp(struct stmmac_priv * priv)844 static int stmmac_init_ptp(struct stmmac_priv *priv)
845 {
846 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
847 int ret;
848
849 if (priv->plat->ptp_clk_freq_config)
850 priv->plat->ptp_clk_freq_config(priv);
851
852 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
853 if (ret)
854 return ret;
855
856 priv->adv_ts = 0;
857 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
858 if (xmac && priv->dma_cap.atime_stamp)
859 priv->adv_ts = 1;
860 /* Dwmac 3.x core with extend_desc can support adv_ts */
861 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
862 priv->adv_ts = 1;
863
864 if (priv->dma_cap.time_stamp)
865 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
866
867 if (priv->adv_ts)
868 netdev_info(priv->dev,
869 "IEEE 1588-2008 Advanced Timestamp supported\n");
870
871 priv->hwts_tx_en = 0;
872 priv->hwts_rx_en = 0;
873
874 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
875 stmmac_hwtstamp_correct_latency(priv, priv);
876
877 return 0;
878 }
879
stmmac_release_ptp(struct stmmac_priv * priv)880 static void stmmac_release_ptp(struct stmmac_priv *priv)
881 {
882 clk_disable_unprepare(priv->plat->clk_ptp_ref);
883 stmmac_ptp_unregister(priv);
884 }
885
886 /**
887 * stmmac_mac_flow_ctrl - Configure flow control in all queues
888 * @priv: driver private structure
889 * @duplex: duplex passed to the next function
890 * @flow_ctrl: desired flow control modes
891 * Description: It is used for configuring the flow control in all queues
892 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)893 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
894 unsigned int flow_ctrl)
895 {
896 u32 tx_cnt = priv->plat->tx_queues_to_use;
897
898 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
899 tx_cnt);
900 }
901
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)902 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
903 phy_interface_t interface)
904 {
905 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
906
907 /* Refresh the MAC-specific capabilities */
908 stmmac_mac_update_caps(priv);
909
910 config->mac_capabilities = priv->hw->link.caps;
911
912 if (priv->plat->max_speed)
913 phylink_limit_mac_speed(config, priv->plat->max_speed);
914
915 return config->mac_capabilities;
916 }
917
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)918 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
919 phy_interface_t interface)
920 {
921 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
922 struct phylink_pcs *pcs;
923
924 if (priv->plat->select_pcs) {
925 pcs = priv->plat->select_pcs(priv, interface);
926 if (!IS_ERR(pcs))
927 return pcs;
928 }
929
930 return NULL;
931 }
932
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)933 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
934 const struct phylink_link_state *state)
935 {
936 /* Nothing to do, xpcs_config() handles everything */
937 }
938
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)939 static void stmmac_mac_link_down(struct phylink_config *config,
940 unsigned int mode, phy_interface_t interface)
941 {
942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943
944 stmmac_mac_set(priv, priv->ioaddr, false);
945 if (priv->dma_cap.eee)
946 stmmac_set_eee_pls(priv, priv->hw, false);
947
948 if (stmmac_fpe_supported(priv))
949 stmmac_fpe_link_state_handle(priv, false);
950 }
951
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)952 static void stmmac_mac_link_up(struct phylink_config *config,
953 struct phy_device *phy,
954 unsigned int mode, phy_interface_t interface,
955 int speed, int duplex,
956 bool tx_pause, bool rx_pause)
957 {
958 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
959 unsigned int flow_ctrl;
960 u32 old_ctrl, ctrl;
961 int ret;
962
963 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
964 priv->plat->serdes_powerup)
965 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
966
967 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
968 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
969
970 if (interface == PHY_INTERFACE_MODE_USXGMII) {
971 switch (speed) {
972 case SPEED_10000:
973 ctrl |= priv->hw->link.xgmii.speed10000;
974 break;
975 case SPEED_5000:
976 ctrl |= priv->hw->link.xgmii.speed5000;
977 break;
978 case SPEED_2500:
979 ctrl |= priv->hw->link.xgmii.speed2500;
980 break;
981 default:
982 return;
983 }
984 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
985 switch (speed) {
986 case SPEED_100000:
987 ctrl |= priv->hw->link.xlgmii.speed100000;
988 break;
989 case SPEED_50000:
990 ctrl |= priv->hw->link.xlgmii.speed50000;
991 break;
992 case SPEED_40000:
993 ctrl |= priv->hw->link.xlgmii.speed40000;
994 break;
995 case SPEED_25000:
996 ctrl |= priv->hw->link.xlgmii.speed25000;
997 break;
998 case SPEED_10000:
999 ctrl |= priv->hw->link.xgmii.speed10000;
1000 break;
1001 case SPEED_2500:
1002 ctrl |= priv->hw->link.speed2500;
1003 break;
1004 case SPEED_1000:
1005 ctrl |= priv->hw->link.speed1000;
1006 break;
1007 default:
1008 return;
1009 }
1010 } else {
1011 switch (speed) {
1012 case SPEED_2500:
1013 ctrl |= priv->hw->link.speed2500;
1014 break;
1015 case SPEED_1000:
1016 ctrl |= priv->hw->link.speed1000;
1017 break;
1018 case SPEED_100:
1019 ctrl |= priv->hw->link.speed100;
1020 break;
1021 case SPEED_10:
1022 ctrl |= priv->hw->link.speed10;
1023 break;
1024 default:
1025 return;
1026 }
1027 }
1028
1029 if (priv->plat->fix_mac_speed)
1030 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1031
1032 if (!duplex)
1033 ctrl &= ~priv->hw->link.duplex;
1034 else
1035 ctrl |= priv->hw->link.duplex;
1036
1037 /* Flow Control operation */
1038 if (rx_pause && tx_pause)
1039 flow_ctrl = FLOW_AUTO;
1040 else if (rx_pause && !tx_pause)
1041 flow_ctrl = FLOW_RX;
1042 else if (!rx_pause && tx_pause)
1043 flow_ctrl = FLOW_TX;
1044 else
1045 flow_ctrl = FLOW_OFF;
1046
1047 stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1048
1049 if (ctrl != old_ctrl)
1050 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1051
1052 if (priv->plat->set_clk_tx_rate) {
1053 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1054 priv->plat->clk_tx_i,
1055 interface, speed);
1056 if (ret < 0)
1057 netdev_err(priv->dev,
1058 "failed to configure transmit clock for %dMbps: %pe\n",
1059 speed, ERR_PTR(ret));
1060 }
1061
1062 stmmac_mac_set(priv, priv->ioaddr, true);
1063 if (priv->dma_cap.eee)
1064 stmmac_set_eee_pls(priv, priv->hw, true);
1065
1066 if (stmmac_fpe_supported(priv))
1067 stmmac_fpe_link_state_handle(priv, true);
1068
1069 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1070 stmmac_hwtstamp_correct_latency(priv, priv);
1071 }
1072
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1073 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1074 {
1075 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1076
1077 priv->eee_active = false;
1078
1079 mutex_lock(&priv->lock);
1080
1081 priv->eee_enabled = false;
1082
1083 netdev_dbg(priv->dev, "disable EEE\n");
1084 priv->eee_sw_timer_en = false;
1085 del_timer_sync(&priv->eee_ctrl_timer);
1086 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1087 priv->tx_path_in_lpi_mode = false;
1088
1089 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1090 mutex_unlock(&priv->lock);
1091 }
1092
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1093 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1094 bool tx_clk_stop)
1095 {
1096 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1097 int ret;
1098
1099 priv->tx_lpi_timer = timer;
1100 priv->eee_active = true;
1101
1102 mutex_lock(&priv->lock);
1103
1104 priv->eee_enabled = true;
1105
1106 /* Update the transmit clock stop according to PHY capability if
1107 * the platform allows
1108 */
1109 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1110 priv->tx_lpi_clk_stop = tx_clk_stop;
1111
1112 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1113 STMMAC_DEFAULT_TWT_LS);
1114
1115 /* Try to cnfigure the hardware timer. */
1116 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1117 priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1118
1119 if (ret) {
1120 /* Hardware timer mode not supported, or value out of range.
1121 * Fall back to using software LPI mode
1122 */
1123 priv->eee_sw_timer_en = true;
1124 stmmac_restart_sw_lpi_timer(priv);
1125 }
1126
1127 mutex_unlock(&priv->lock);
1128 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1129
1130 return 0;
1131 }
1132
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1133 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1134 phy_interface_t interface)
1135 {
1136 struct net_device *ndev = to_net_dev(config->dev);
1137 struct stmmac_priv *priv = netdev_priv(ndev);
1138
1139 if (priv->plat->mac_finish)
1140 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1141
1142 return 0;
1143 }
1144
1145 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1146 .mac_get_caps = stmmac_mac_get_caps,
1147 .mac_select_pcs = stmmac_mac_select_pcs,
1148 .mac_config = stmmac_mac_config,
1149 .mac_link_down = stmmac_mac_link_down,
1150 .mac_link_up = stmmac_mac_link_up,
1151 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1152 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1153 .mac_finish = stmmac_mac_finish,
1154 };
1155
1156 /**
1157 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1158 * @priv: driver private structure
1159 * Description: this is to verify if the HW supports the PCS.
1160 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1161 * configured for the TBI, RTBI, or SGMII PHY interface.
1162 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1163 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1164 {
1165 int interface = priv->plat->mac_interface;
1166
1167 if (priv->dma_cap.pcs) {
1168 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1169 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1170 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1171 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1172 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1173 priv->hw->pcs = STMMAC_PCS_RGMII;
1174 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1175 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1176 priv->hw->pcs = STMMAC_PCS_SGMII;
1177 }
1178 }
1179 }
1180
1181 /**
1182 * stmmac_init_phy - PHY initialization
1183 * @dev: net device structure
1184 * Description: it initializes the driver's PHY state, and attaches the PHY
1185 * to the mac driver.
1186 * Return value:
1187 * 0 on success
1188 */
stmmac_init_phy(struct net_device * dev)1189 static int stmmac_init_phy(struct net_device *dev)
1190 {
1191 struct stmmac_priv *priv = netdev_priv(dev);
1192 struct fwnode_handle *phy_fwnode;
1193 struct fwnode_handle *fwnode;
1194 int ret;
1195
1196 if (!phylink_expects_phy(priv->phylink))
1197 return 0;
1198
1199 fwnode = priv->plat->port_node;
1200 if (!fwnode)
1201 fwnode = dev_fwnode(priv->device);
1202
1203 if (fwnode)
1204 phy_fwnode = fwnode_get_phy_node(fwnode);
1205 else
1206 phy_fwnode = NULL;
1207
1208 /* Some DT bindings do not set-up the PHY handle. Let's try to
1209 * manually parse it
1210 */
1211 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1212 int addr = priv->plat->phy_addr;
1213 struct phy_device *phydev;
1214
1215 if (addr < 0) {
1216 netdev_err(priv->dev, "no phy found\n");
1217 return -ENODEV;
1218 }
1219
1220 phydev = mdiobus_get_phy(priv->mii, addr);
1221 if (!phydev) {
1222 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1223 return -ENODEV;
1224 }
1225
1226 ret = phylink_connect_phy(priv->phylink, phydev);
1227 } else {
1228 fwnode_handle_put(phy_fwnode);
1229 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1230 }
1231
1232 if (ret == 0) {
1233 struct ethtool_keee eee;
1234
1235 /* Configure phylib's copy of the LPI timer. Normally,
1236 * phylink_config.lpi_timer_default would do this, but there is
1237 * a chance that userspace could change the eee_timer setting
1238 * via sysfs before the first open. Thus, preserve existing
1239 * behaviour.
1240 */
1241 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1242 eee.tx_lpi_timer = priv->tx_lpi_timer;
1243 phylink_ethtool_set_eee(priv->phylink, &eee);
1244 }
1245 }
1246
1247 if (!priv->plat->pmt) {
1248 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1249
1250 phylink_ethtool_get_wol(priv->phylink, &wol);
1251 device_set_wakeup_capable(priv->device, !!wol.supported);
1252 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1253 }
1254
1255 return ret;
1256 }
1257
stmmac_phy_setup(struct stmmac_priv * priv)1258 static int stmmac_phy_setup(struct stmmac_priv *priv)
1259 {
1260 struct stmmac_mdio_bus_data *mdio_bus_data;
1261 int mode = priv->plat->phy_interface;
1262 struct fwnode_handle *fwnode;
1263 struct phylink_pcs *pcs;
1264 struct phylink *phylink;
1265
1266 priv->phylink_config.dev = &priv->dev->dev;
1267 priv->phylink_config.type = PHYLINK_NETDEV;
1268 priv->phylink_config.mac_managed_pm = true;
1269
1270 /* Stmmac always requires an RX clock for hardware initialization */
1271 priv->phylink_config.mac_requires_rxc = true;
1272
1273 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1274 priv->phylink_config.eee_rx_clk_stop_enable = true;
1275
1276 /* Set the default transmit clock stop bit based on the platform glue */
1277 priv->tx_lpi_clk_stop = priv->plat->flags &
1278 STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1279
1280 mdio_bus_data = priv->plat->mdio_bus_data;
1281 if (mdio_bus_data)
1282 priv->phylink_config.default_an_inband =
1283 mdio_bus_data->default_an_inband;
1284
1285 /* Set the platform/firmware specified interface mode. Note, phylink
1286 * deals with the PHY interface mode, not the MAC interface mode.
1287 */
1288 __set_bit(mode, priv->phylink_config.supported_interfaces);
1289
1290 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1291 if (priv->hw->xpcs)
1292 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1293 else
1294 pcs = priv->hw->phylink_pcs;
1295
1296 if (pcs)
1297 phy_interface_or(priv->phylink_config.supported_interfaces,
1298 priv->phylink_config.supported_interfaces,
1299 pcs->supported_interfaces);
1300
1301 if (priv->dma_cap.eee) {
1302 /* Assume all supported interfaces also support LPI */
1303 memcpy(priv->phylink_config.lpi_interfaces,
1304 priv->phylink_config.supported_interfaces,
1305 sizeof(priv->phylink_config.lpi_interfaces));
1306
1307 /* All full duplex speeds above 100Mbps are supported */
1308 priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) |
1309 MAC_100FD;
1310 priv->phylink_config.lpi_timer_default = eee_timer * 1000;
1311 priv->phylink_config.eee_enabled_default = true;
1312 }
1313
1314 fwnode = priv->plat->port_node;
1315 if (!fwnode)
1316 fwnode = dev_fwnode(priv->device);
1317
1318 phylink = phylink_create(&priv->phylink_config, fwnode,
1319 mode, &stmmac_phylink_mac_ops);
1320 if (IS_ERR(phylink))
1321 return PTR_ERR(phylink);
1322
1323 priv->phylink = phylink;
1324 return 0;
1325 }
1326
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1327 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1328 struct stmmac_dma_conf *dma_conf)
1329 {
1330 u32 rx_cnt = priv->plat->rx_queues_to_use;
1331 unsigned int desc_size;
1332 void *head_rx;
1333 u32 queue;
1334
1335 /* Display RX rings */
1336 for (queue = 0; queue < rx_cnt; queue++) {
1337 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1338
1339 pr_info("\tRX Queue %u rings\n", queue);
1340
1341 if (priv->extend_desc) {
1342 head_rx = (void *)rx_q->dma_erx;
1343 desc_size = sizeof(struct dma_extended_desc);
1344 } else {
1345 head_rx = (void *)rx_q->dma_rx;
1346 desc_size = sizeof(struct dma_desc);
1347 }
1348
1349 /* Display RX ring */
1350 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1351 rx_q->dma_rx_phy, desc_size);
1352 }
1353 }
1354
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1355 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1356 struct stmmac_dma_conf *dma_conf)
1357 {
1358 u32 tx_cnt = priv->plat->tx_queues_to_use;
1359 unsigned int desc_size;
1360 void *head_tx;
1361 u32 queue;
1362
1363 /* Display TX rings */
1364 for (queue = 0; queue < tx_cnt; queue++) {
1365 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1366
1367 pr_info("\tTX Queue %d rings\n", queue);
1368
1369 if (priv->extend_desc) {
1370 head_tx = (void *)tx_q->dma_etx;
1371 desc_size = sizeof(struct dma_extended_desc);
1372 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1373 head_tx = (void *)tx_q->dma_entx;
1374 desc_size = sizeof(struct dma_edesc);
1375 } else {
1376 head_tx = (void *)tx_q->dma_tx;
1377 desc_size = sizeof(struct dma_desc);
1378 }
1379
1380 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1381 tx_q->dma_tx_phy, desc_size);
1382 }
1383 }
1384
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1385 static void stmmac_display_rings(struct stmmac_priv *priv,
1386 struct stmmac_dma_conf *dma_conf)
1387 {
1388 /* Display RX ring */
1389 stmmac_display_rx_rings(priv, dma_conf);
1390
1391 /* Display TX ring */
1392 stmmac_display_tx_rings(priv, dma_conf);
1393 }
1394
stmmac_rx_offset(struct stmmac_priv * priv)1395 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1396 {
1397 if (stmmac_xdp_is_enabled(priv))
1398 return XDP_PACKET_HEADROOM;
1399
1400 return NET_SKB_PAD;
1401 }
1402
stmmac_set_bfsize(int mtu,int bufsize)1403 static int stmmac_set_bfsize(int mtu, int bufsize)
1404 {
1405 int ret = bufsize;
1406
1407 if (mtu >= BUF_SIZE_8KiB)
1408 ret = BUF_SIZE_16KiB;
1409 else if (mtu >= BUF_SIZE_4KiB)
1410 ret = BUF_SIZE_8KiB;
1411 else if (mtu >= BUF_SIZE_2KiB)
1412 ret = BUF_SIZE_4KiB;
1413 else if (mtu > DEFAULT_BUFSIZE)
1414 ret = BUF_SIZE_2KiB;
1415 else
1416 ret = DEFAULT_BUFSIZE;
1417
1418 return ret;
1419 }
1420
1421 /**
1422 * stmmac_clear_rx_descriptors - clear RX descriptors
1423 * @priv: driver private structure
1424 * @dma_conf: structure to take the dma data
1425 * @queue: RX queue index
1426 * Description: this function is called to clear the RX descriptors
1427 * in case of both basic and extended descriptors are used.
1428 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1429 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1430 struct stmmac_dma_conf *dma_conf,
1431 u32 queue)
1432 {
1433 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1434 int i;
1435
1436 /* Clear the RX descriptors */
1437 for (i = 0; i < dma_conf->dma_rx_size; i++)
1438 if (priv->extend_desc)
1439 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1440 priv->use_riwt, priv->mode,
1441 (i == dma_conf->dma_rx_size - 1),
1442 dma_conf->dma_buf_sz);
1443 else
1444 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1445 priv->use_riwt, priv->mode,
1446 (i == dma_conf->dma_rx_size - 1),
1447 dma_conf->dma_buf_sz);
1448 }
1449
1450 /**
1451 * stmmac_clear_tx_descriptors - clear tx descriptors
1452 * @priv: driver private structure
1453 * @dma_conf: structure to take the dma data
1454 * @queue: TX queue index.
1455 * Description: this function is called to clear the TX descriptors
1456 * in case of both basic and extended descriptors are used.
1457 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1458 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1459 struct stmmac_dma_conf *dma_conf,
1460 u32 queue)
1461 {
1462 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1463 int i;
1464
1465 /* Clear the TX descriptors */
1466 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1467 int last = (i == (dma_conf->dma_tx_size - 1));
1468 struct dma_desc *p;
1469
1470 if (priv->extend_desc)
1471 p = &tx_q->dma_etx[i].basic;
1472 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1473 p = &tx_q->dma_entx[i].basic;
1474 else
1475 p = &tx_q->dma_tx[i];
1476
1477 stmmac_init_tx_desc(priv, p, priv->mode, last);
1478 }
1479 }
1480
1481 /**
1482 * stmmac_clear_descriptors - clear descriptors
1483 * @priv: driver private structure
1484 * @dma_conf: structure to take the dma data
1485 * Description: this function is called to clear the TX and RX descriptors
1486 * in case of both basic and extended descriptors are used.
1487 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1488 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1489 struct stmmac_dma_conf *dma_conf)
1490 {
1491 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1492 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1493 u32 queue;
1494
1495 /* Clear the RX descriptors */
1496 for (queue = 0; queue < rx_queue_cnt; queue++)
1497 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1498
1499 /* Clear the TX descriptors */
1500 for (queue = 0; queue < tx_queue_cnt; queue++)
1501 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1502 }
1503
1504 /**
1505 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1506 * @priv: driver private structure
1507 * @dma_conf: structure to take the dma data
1508 * @p: descriptor pointer
1509 * @i: descriptor index
1510 * @flags: gfp flag
1511 * @queue: RX queue index
1512 * Description: this function is called to allocate a receive buffer, perform
1513 * the DMA mapping and init the descriptor.
1514 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1515 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1516 struct stmmac_dma_conf *dma_conf,
1517 struct dma_desc *p,
1518 int i, gfp_t flags, u32 queue)
1519 {
1520 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1521 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1522 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1523
1524 if (priv->dma_cap.host_dma_width <= 32)
1525 gfp |= GFP_DMA32;
1526
1527 if (!buf->page) {
1528 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1529 if (!buf->page)
1530 return -ENOMEM;
1531 buf->page_offset = stmmac_rx_offset(priv);
1532 }
1533
1534 if (priv->sph && !buf->sec_page) {
1535 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1536 if (!buf->sec_page)
1537 return -ENOMEM;
1538
1539 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1540 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1541 } else {
1542 buf->sec_page = NULL;
1543 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1544 }
1545
1546 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1547
1548 stmmac_set_desc_addr(priv, p, buf->addr);
1549 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1550 stmmac_init_desc3(priv, p);
1551
1552 return 0;
1553 }
1554
1555 /**
1556 * stmmac_free_rx_buffer - free RX dma buffers
1557 * @priv: private structure
1558 * @rx_q: RX queue
1559 * @i: buffer index.
1560 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1561 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1562 struct stmmac_rx_queue *rx_q,
1563 int i)
1564 {
1565 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1566
1567 if (buf->page)
1568 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1569 buf->page = NULL;
1570
1571 if (buf->sec_page)
1572 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1573 buf->sec_page = NULL;
1574 }
1575
1576 /**
1577 * stmmac_free_tx_buffer - free RX dma buffers
1578 * @priv: private structure
1579 * @dma_conf: structure to take the dma data
1580 * @queue: RX queue index
1581 * @i: buffer index.
1582 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1583 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1584 struct stmmac_dma_conf *dma_conf,
1585 u32 queue, int i)
1586 {
1587 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1588
1589 if (tx_q->tx_skbuff_dma[i].buf &&
1590 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1591 if (tx_q->tx_skbuff_dma[i].map_as_page)
1592 dma_unmap_page(priv->device,
1593 tx_q->tx_skbuff_dma[i].buf,
1594 tx_q->tx_skbuff_dma[i].len,
1595 DMA_TO_DEVICE);
1596 else
1597 dma_unmap_single(priv->device,
1598 tx_q->tx_skbuff_dma[i].buf,
1599 tx_q->tx_skbuff_dma[i].len,
1600 DMA_TO_DEVICE);
1601 }
1602
1603 if (tx_q->xdpf[i] &&
1604 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1605 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1606 xdp_return_frame(tx_q->xdpf[i]);
1607 tx_q->xdpf[i] = NULL;
1608 }
1609
1610 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1611 tx_q->xsk_frames_done++;
1612
1613 if (tx_q->tx_skbuff[i] &&
1614 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1615 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1616 tx_q->tx_skbuff[i] = NULL;
1617 }
1618
1619 tx_q->tx_skbuff_dma[i].buf = 0;
1620 tx_q->tx_skbuff_dma[i].map_as_page = false;
1621 }
1622
1623 /**
1624 * dma_free_rx_skbufs - free RX dma buffers
1625 * @priv: private structure
1626 * @dma_conf: structure to take the dma data
1627 * @queue: RX queue index
1628 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1629 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1630 struct stmmac_dma_conf *dma_conf,
1631 u32 queue)
1632 {
1633 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1634 int i;
1635
1636 for (i = 0; i < dma_conf->dma_rx_size; i++)
1637 stmmac_free_rx_buffer(priv, rx_q, i);
1638 }
1639
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1640 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1641 struct stmmac_dma_conf *dma_conf,
1642 u32 queue, gfp_t flags)
1643 {
1644 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1645 int i;
1646
1647 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1648 struct dma_desc *p;
1649 int ret;
1650
1651 if (priv->extend_desc)
1652 p = &((rx_q->dma_erx + i)->basic);
1653 else
1654 p = rx_q->dma_rx + i;
1655
1656 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1657 queue);
1658 if (ret)
1659 return ret;
1660
1661 rx_q->buf_alloc_num++;
1662 }
1663
1664 return 0;
1665 }
1666
1667 /**
1668 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1669 * @priv: private structure
1670 * @dma_conf: structure to take the dma data
1671 * @queue: RX queue index
1672 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1673 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1674 struct stmmac_dma_conf *dma_conf,
1675 u32 queue)
1676 {
1677 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1678 int i;
1679
1680 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1681 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1682
1683 if (!buf->xdp)
1684 continue;
1685
1686 xsk_buff_free(buf->xdp);
1687 buf->xdp = NULL;
1688 }
1689 }
1690
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1691 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1692 struct stmmac_dma_conf *dma_conf,
1693 u32 queue)
1694 {
1695 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1696 int i;
1697
1698 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1699 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1700 * use this macro to make sure no size violations.
1701 */
1702 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1703
1704 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1705 struct stmmac_rx_buffer *buf;
1706 dma_addr_t dma_addr;
1707 struct dma_desc *p;
1708
1709 if (priv->extend_desc)
1710 p = (struct dma_desc *)(rx_q->dma_erx + i);
1711 else
1712 p = rx_q->dma_rx + i;
1713
1714 buf = &rx_q->buf_pool[i];
1715
1716 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1717 if (!buf->xdp)
1718 return -ENOMEM;
1719
1720 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1721 stmmac_set_desc_addr(priv, p, dma_addr);
1722 rx_q->buf_alloc_num++;
1723 }
1724
1725 return 0;
1726 }
1727
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1728 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1729 {
1730 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1731 return NULL;
1732
1733 return xsk_get_pool_from_qid(priv->dev, queue);
1734 }
1735
1736 /**
1737 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1738 * @priv: driver private structure
1739 * @dma_conf: structure to take the dma data
1740 * @queue: RX queue index
1741 * @flags: gfp flag.
1742 * Description: this function initializes the DMA RX descriptors
1743 * and allocates the socket buffers. It supports the chained and ring
1744 * modes.
1745 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1746 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1747 struct stmmac_dma_conf *dma_conf,
1748 u32 queue, gfp_t flags)
1749 {
1750 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1751 int ret;
1752
1753 netif_dbg(priv, probe, priv->dev,
1754 "(%s) dma_rx_phy=0x%08x\n", __func__,
1755 (u32)rx_q->dma_rx_phy);
1756
1757 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1758
1759 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1760
1761 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1762
1763 if (rx_q->xsk_pool) {
1764 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1765 MEM_TYPE_XSK_BUFF_POOL,
1766 NULL));
1767 netdev_info(priv->dev,
1768 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1769 rx_q->queue_index);
1770 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1771 } else {
1772 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1773 MEM_TYPE_PAGE_POOL,
1774 rx_q->page_pool));
1775 netdev_info(priv->dev,
1776 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1777 rx_q->queue_index);
1778 }
1779
1780 if (rx_q->xsk_pool) {
1781 /* RX XDP ZC buffer pool may not be populated, e.g.
1782 * xdpsock TX-only.
1783 */
1784 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1785 } else {
1786 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1787 if (ret < 0)
1788 return -ENOMEM;
1789 }
1790
1791 /* Setup the chained descriptor addresses */
1792 if (priv->mode == STMMAC_CHAIN_MODE) {
1793 if (priv->extend_desc)
1794 stmmac_mode_init(priv, rx_q->dma_erx,
1795 rx_q->dma_rx_phy,
1796 dma_conf->dma_rx_size, 1);
1797 else
1798 stmmac_mode_init(priv, rx_q->dma_rx,
1799 rx_q->dma_rx_phy,
1800 dma_conf->dma_rx_size, 0);
1801 }
1802
1803 return 0;
1804 }
1805
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1806 static int init_dma_rx_desc_rings(struct net_device *dev,
1807 struct stmmac_dma_conf *dma_conf,
1808 gfp_t flags)
1809 {
1810 struct stmmac_priv *priv = netdev_priv(dev);
1811 u32 rx_count = priv->plat->rx_queues_to_use;
1812 int queue;
1813 int ret;
1814
1815 /* RX INITIALIZATION */
1816 netif_dbg(priv, probe, priv->dev,
1817 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1818
1819 for (queue = 0; queue < rx_count; queue++) {
1820 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1821 if (ret)
1822 goto err_init_rx_buffers;
1823 }
1824
1825 return 0;
1826
1827 err_init_rx_buffers:
1828 while (queue >= 0) {
1829 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1830
1831 if (rx_q->xsk_pool)
1832 dma_free_rx_xskbufs(priv, dma_conf, queue);
1833 else
1834 dma_free_rx_skbufs(priv, dma_conf, queue);
1835
1836 rx_q->buf_alloc_num = 0;
1837 rx_q->xsk_pool = NULL;
1838
1839 queue--;
1840 }
1841
1842 return ret;
1843 }
1844
1845 /**
1846 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1847 * @priv: driver private structure
1848 * @dma_conf: structure to take the dma data
1849 * @queue: TX queue index
1850 * Description: this function initializes the DMA TX descriptors
1851 * and allocates the socket buffers. It supports the chained and ring
1852 * modes.
1853 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1854 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1855 struct stmmac_dma_conf *dma_conf,
1856 u32 queue)
1857 {
1858 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1859 int i;
1860
1861 netif_dbg(priv, probe, priv->dev,
1862 "(%s) dma_tx_phy=0x%08x\n", __func__,
1863 (u32)tx_q->dma_tx_phy);
1864
1865 /* Setup the chained descriptor addresses */
1866 if (priv->mode == STMMAC_CHAIN_MODE) {
1867 if (priv->extend_desc)
1868 stmmac_mode_init(priv, tx_q->dma_etx,
1869 tx_q->dma_tx_phy,
1870 dma_conf->dma_tx_size, 1);
1871 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1872 stmmac_mode_init(priv, tx_q->dma_tx,
1873 tx_q->dma_tx_phy,
1874 dma_conf->dma_tx_size, 0);
1875 }
1876
1877 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1878
1879 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1880 struct dma_desc *p;
1881
1882 if (priv->extend_desc)
1883 p = &((tx_q->dma_etx + i)->basic);
1884 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1885 p = &((tx_q->dma_entx + i)->basic);
1886 else
1887 p = tx_q->dma_tx + i;
1888
1889 stmmac_clear_desc(priv, p);
1890
1891 tx_q->tx_skbuff_dma[i].buf = 0;
1892 tx_q->tx_skbuff_dma[i].map_as_page = false;
1893 tx_q->tx_skbuff_dma[i].len = 0;
1894 tx_q->tx_skbuff_dma[i].last_segment = false;
1895 tx_q->tx_skbuff[i] = NULL;
1896 }
1897
1898 return 0;
1899 }
1900
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1901 static int init_dma_tx_desc_rings(struct net_device *dev,
1902 struct stmmac_dma_conf *dma_conf)
1903 {
1904 struct stmmac_priv *priv = netdev_priv(dev);
1905 u32 tx_queue_cnt;
1906 u32 queue;
1907
1908 tx_queue_cnt = priv->plat->tx_queues_to_use;
1909
1910 for (queue = 0; queue < tx_queue_cnt; queue++)
1911 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1912
1913 return 0;
1914 }
1915
1916 /**
1917 * init_dma_desc_rings - init the RX/TX descriptor rings
1918 * @dev: net device structure
1919 * @dma_conf: structure to take the dma data
1920 * @flags: gfp flag.
1921 * Description: this function initializes the DMA RX/TX descriptors
1922 * and allocates the socket buffers. It supports the chained and ring
1923 * modes.
1924 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1925 static int init_dma_desc_rings(struct net_device *dev,
1926 struct stmmac_dma_conf *dma_conf,
1927 gfp_t flags)
1928 {
1929 struct stmmac_priv *priv = netdev_priv(dev);
1930 int ret;
1931
1932 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1933 if (ret)
1934 return ret;
1935
1936 ret = init_dma_tx_desc_rings(dev, dma_conf);
1937
1938 stmmac_clear_descriptors(priv, dma_conf);
1939
1940 if (netif_msg_hw(priv))
1941 stmmac_display_rings(priv, dma_conf);
1942
1943 return ret;
1944 }
1945
1946 /**
1947 * dma_free_tx_skbufs - free TX dma buffers
1948 * @priv: private structure
1949 * @dma_conf: structure to take the dma data
1950 * @queue: TX queue index
1951 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1952 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1953 struct stmmac_dma_conf *dma_conf,
1954 u32 queue)
1955 {
1956 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1957 int i;
1958
1959 tx_q->xsk_frames_done = 0;
1960
1961 for (i = 0; i < dma_conf->dma_tx_size; i++)
1962 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1963
1964 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1965 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1966 tx_q->xsk_frames_done = 0;
1967 tx_q->xsk_pool = NULL;
1968 }
1969 }
1970
1971 /**
1972 * stmmac_free_tx_skbufs - free TX skb buffers
1973 * @priv: private structure
1974 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1975 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1976 {
1977 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1978 u32 queue;
1979
1980 for (queue = 0; queue < tx_queue_cnt; queue++)
1981 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1982 }
1983
1984 /**
1985 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1986 * @priv: private structure
1987 * @dma_conf: structure to take the dma data
1988 * @queue: RX queue index
1989 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1990 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1991 struct stmmac_dma_conf *dma_conf,
1992 u32 queue)
1993 {
1994 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1995
1996 /* Release the DMA RX socket buffers */
1997 if (rx_q->xsk_pool)
1998 dma_free_rx_xskbufs(priv, dma_conf, queue);
1999 else
2000 dma_free_rx_skbufs(priv, dma_conf, queue);
2001
2002 rx_q->buf_alloc_num = 0;
2003 rx_q->xsk_pool = NULL;
2004
2005 /* Free DMA regions of consistent memory previously allocated */
2006 if (!priv->extend_desc)
2007 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2008 sizeof(struct dma_desc),
2009 rx_q->dma_rx, rx_q->dma_rx_phy);
2010 else
2011 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2012 sizeof(struct dma_extended_desc),
2013 rx_q->dma_erx, rx_q->dma_rx_phy);
2014
2015 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2016 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2017
2018 kfree(rx_q->buf_pool);
2019 if (rx_q->page_pool)
2020 page_pool_destroy(rx_q->page_pool);
2021 }
2022
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2023 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2024 struct stmmac_dma_conf *dma_conf)
2025 {
2026 u32 rx_count = priv->plat->rx_queues_to_use;
2027 u32 queue;
2028
2029 /* Free RX queue resources */
2030 for (queue = 0; queue < rx_count; queue++)
2031 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2032 }
2033
2034 /**
2035 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2036 * @priv: private structure
2037 * @dma_conf: structure to take the dma data
2038 * @queue: TX queue index
2039 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2040 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2041 struct stmmac_dma_conf *dma_conf,
2042 u32 queue)
2043 {
2044 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2045 size_t size;
2046 void *addr;
2047
2048 /* Release the DMA TX socket buffers */
2049 dma_free_tx_skbufs(priv, dma_conf, queue);
2050
2051 if (priv->extend_desc) {
2052 size = sizeof(struct dma_extended_desc);
2053 addr = tx_q->dma_etx;
2054 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2055 size = sizeof(struct dma_edesc);
2056 addr = tx_q->dma_entx;
2057 } else {
2058 size = sizeof(struct dma_desc);
2059 addr = tx_q->dma_tx;
2060 }
2061
2062 size *= dma_conf->dma_tx_size;
2063
2064 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2065
2066 kfree(tx_q->tx_skbuff_dma);
2067 kfree(tx_q->tx_skbuff);
2068 }
2069
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2070 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2071 struct stmmac_dma_conf *dma_conf)
2072 {
2073 u32 tx_count = priv->plat->tx_queues_to_use;
2074 u32 queue;
2075
2076 /* Free TX queue resources */
2077 for (queue = 0; queue < tx_count; queue++)
2078 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2079 }
2080
2081 /**
2082 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2083 * @priv: private structure
2084 * @dma_conf: structure to take the dma data
2085 * @queue: RX queue index
2086 * Description: according to which descriptor can be used (extend or basic)
2087 * this function allocates the resources for TX and RX paths. In case of
2088 * reception, for example, it pre-allocated the RX socket buffer in order to
2089 * allow zero-copy mechanism.
2090 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2091 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2092 struct stmmac_dma_conf *dma_conf,
2093 u32 queue)
2094 {
2095 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2096 struct stmmac_channel *ch = &priv->channel[queue];
2097 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2098 struct page_pool_params pp_params = { 0 };
2099 unsigned int dma_buf_sz_pad, num_pages;
2100 unsigned int napi_id;
2101 int ret;
2102
2103 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2104 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2105 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2106
2107 rx_q->queue_index = queue;
2108 rx_q->priv_data = priv;
2109 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2110
2111 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2112 pp_params.pool_size = dma_conf->dma_rx_size;
2113 pp_params.order = order_base_2(num_pages);
2114 pp_params.nid = dev_to_node(priv->device);
2115 pp_params.dev = priv->device;
2116 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2117 pp_params.offset = stmmac_rx_offset(priv);
2118 pp_params.max_len = dma_conf->dma_buf_sz;
2119
2120 if (priv->sph) {
2121 pp_params.offset = 0;
2122 pp_params.max_len += stmmac_rx_offset(priv);
2123 }
2124
2125 rx_q->page_pool = page_pool_create(&pp_params);
2126 if (IS_ERR(rx_q->page_pool)) {
2127 ret = PTR_ERR(rx_q->page_pool);
2128 rx_q->page_pool = NULL;
2129 return ret;
2130 }
2131
2132 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2133 sizeof(*rx_q->buf_pool),
2134 GFP_KERNEL);
2135 if (!rx_q->buf_pool)
2136 return -ENOMEM;
2137
2138 if (priv->extend_desc) {
2139 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2140 dma_conf->dma_rx_size *
2141 sizeof(struct dma_extended_desc),
2142 &rx_q->dma_rx_phy,
2143 GFP_KERNEL);
2144 if (!rx_q->dma_erx)
2145 return -ENOMEM;
2146
2147 } else {
2148 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2149 dma_conf->dma_rx_size *
2150 sizeof(struct dma_desc),
2151 &rx_q->dma_rx_phy,
2152 GFP_KERNEL);
2153 if (!rx_q->dma_rx)
2154 return -ENOMEM;
2155 }
2156
2157 if (stmmac_xdp_is_enabled(priv) &&
2158 test_bit(queue, priv->af_xdp_zc_qps))
2159 napi_id = ch->rxtx_napi.napi_id;
2160 else
2161 napi_id = ch->rx_napi.napi_id;
2162
2163 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2164 rx_q->queue_index,
2165 napi_id);
2166 if (ret) {
2167 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2168 return -EINVAL;
2169 }
2170
2171 return 0;
2172 }
2173
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2174 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2175 struct stmmac_dma_conf *dma_conf)
2176 {
2177 u32 rx_count = priv->plat->rx_queues_to_use;
2178 u32 queue;
2179 int ret;
2180
2181 /* RX queues buffers and DMA */
2182 for (queue = 0; queue < rx_count; queue++) {
2183 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2184 if (ret)
2185 goto err_dma;
2186 }
2187
2188 return 0;
2189
2190 err_dma:
2191 free_dma_rx_desc_resources(priv, dma_conf);
2192
2193 return ret;
2194 }
2195
2196 /**
2197 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2198 * @priv: private structure
2199 * @dma_conf: structure to take the dma data
2200 * @queue: TX queue index
2201 * Description: according to which descriptor can be used (extend or basic)
2202 * this function allocates the resources for TX and RX paths. In case of
2203 * reception, for example, it pre-allocated the RX socket buffer in order to
2204 * allow zero-copy mechanism.
2205 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2206 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2207 struct stmmac_dma_conf *dma_conf,
2208 u32 queue)
2209 {
2210 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2211 size_t size;
2212 void *addr;
2213
2214 tx_q->queue_index = queue;
2215 tx_q->priv_data = priv;
2216
2217 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2218 sizeof(*tx_q->tx_skbuff_dma),
2219 GFP_KERNEL);
2220 if (!tx_q->tx_skbuff_dma)
2221 return -ENOMEM;
2222
2223 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2224 sizeof(struct sk_buff *),
2225 GFP_KERNEL);
2226 if (!tx_q->tx_skbuff)
2227 return -ENOMEM;
2228
2229 if (priv->extend_desc)
2230 size = sizeof(struct dma_extended_desc);
2231 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2232 size = sizeof(struct dma_edesc);
2233 else
2234 size = sizeof(struct dma_desc);
2235
2236 size *= dma_conf->dma_tx_size;
2237
2238 addr = dma_alloc_coherent(priv->device, size,
2239 &tx_q->dma_tx_phy, GFP_KERNEL);
2240 if (!addr)
2241 return -ENOMEM;
2242
2243 if (priv->extend_desc)
2244 tx_q->dma_etx = addr;
2245 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2246 tx_q->dma_entx = addr;
2247 else
2248 tx_q->dma_tx = addr;
2249
2250 return 0;
2251 }
2252
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2253 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2254 struct stmmac_dma_conf *dma_conf)
2255 {
2256 u32 tx_count = priv->plat->tx_queues_to_use;
2257 u32 queue;
2258 int ret;
2259
2260 /* TX queues buffers and DMA */
2261 for (queue = 0; queue < tx_count; queue++) {
2262 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2263 if (ret)
2264 goto err_dma;
2265 }
2266
2267 return 0;
2268
2269 err_dma:
2270 free_dma_tx_desc_resources(priv, dma_conf);
2271 return ret;
2272 }
2273
2274 /**
2275 * alloc_dma_desc_resources - alloc TX/RX resources.
2276 * @priv: private structure
2277 * @dma_conf: structure to take the dma data
2278 * Description: according to which descriptor can be used (extend or basic)
2279 * this function allocates the resources for TX and RX paths. In case of
2280 * reception, for example, it pre-allocated the RX socket buffer in order to
2281 * allow zero-copy mechanism.
2282 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2283 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2284 struct stmmac_dma_conf *dma_conf)
2285 {
2286 /* RX Allocation */
2287 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2288
2289 if (ret)
2290 return ret;
2291
2292 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2293
2294 return ret;
2295 }
2296
2297 /**
2298 * free_dma_desc_resources - free dma desc resources
2299 * @priv: private structure
2300 * @dma_conf: structure to take the dma data
2301 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2302 static void free_dma_desc_resources(struct stmmac_priv *priv,
2303 struct stmmac_dma_conf *dma_conf)
2304 {
2305 /* Release the DMA TX socket buffers */
2306 free_dma_tx_desc_resources(priv, dma_conf);
2307
2308 /* Release the DMA RX socket buffers later
2309 * to ensure all pending XDP_TX buffers are returned.
2310 */
2311 free_dma_rx_desc_resources(priv, dma_conf);
2312 }
2313
2314 /**
2315 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2316 * @priv: driver private structure
2317 * Description: It is used for enabling the rx queues in the MAC
2318 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2319 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2320 {
2321 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2322 int queue;
2323 u8 mode;
2324
2325 for (queue = 0; queue < rx_queues_count; queue++) {
2326 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2327 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2328 }
2329 }
2330
2331 /**
2332 * stmmac_start_rx_dma - start RX DMA channel
2333 * @priv: driver private structure
2334 * @chan: RX channel index
2335 * Description:
2336 * This starts a RX DMA channel
2337 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2338 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2339 {
2340 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2341 stmmac_start_rx(priv, priv->ioaddr, chan);
2342 }
2343
2344 /**
2345 * stmmac_start_tx_dma - start TX DMA channel
2346 * @priv: driver private structure
2347 * @chan: TX channel index
2348 * Description:
2349 * This starts a TX DMA channel
2350 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2351 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2352 {
2353 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2354 stmmac_start_tx(priv, priv->ioaddr, chan);
2355 }
2356
2357 /**
2358 * stmmac_stop_rx_dma - stop RX DMA channel
2359 * @priv: driver private structure
2360 * @chan: RX channel index
2361 * Description:
2362 * This stops a RX DMA channel
2363 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2364 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2365 {
2366 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2367 stmmac_stop_rx(priv, priv->ioaddr, chan);
2368 }
2369
2370 /**
2371 * stmmac_stop_tx_dma - stop TX DMA channel
2372 * @priv: driver private structure
2373 * @chan: TX channel index
2374 * Description:
2375 * This stops a TX DMA channel
2376 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2377 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2378 {
2379 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2380 stmmac_stop_tx(priv, priv->ioaddr, chan);
2381 }
2382
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2383 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2384 {
2385 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2386 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2387 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2388 u32 chan;
2389
2390 for (chan = 0; chan < dma_csr_ch; chan++) {
2391 struct stmmac_channel *ch = &priv->channel[chan];
2392 unsigned long flags;
2393
2394 spin_lock_irqsave(&ch->lock, flags);
2395 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2396 spin_unlock_irqrestore(&ch->lock, flags);
2397 }
2398 }
2399
2400 /**
2401 * stmmac_start_all_dma - start all RX and TX DMA channels
2402 * @priv: driver private structure
2403 * Description:
2404 * This starts all the RX and TX DMA channels
2405 */
stmmac_start_all_dma(struct stmmac_priv * priv)2406 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2407 {
2408 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2409 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2410 u32 chan = 0;
2411
2412 for (chan = 0; chan < rx_channels_count; chan++)
2413 stmmac_start_rx_dma(priv, chan);
2414
2415 for (chan = 0; chan < tx_channels_count; chan++)
2416 stmmac_start_tx_dma(priv, chan);
2417 }
2418
2419 /**
2420 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2421 * @priv: driver private structure
2422 * Description:
2423 * This stops the RX and TX DMA channels
2424 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2425 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2426 {
2427 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2428 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2429 u32 chan = 0;
2430
2431 for (chan = 0; chan < rx_channels_count; chan++)
2432 stmmac_stop_rx_dma(priv, chan);
2433
2434 for (chan = 0; chan < tx_channels_count; chan++)
2435 stmmac_stop_tx_dma(priv, chan);
2436 }
2437
2438 /**
2439 * stmmac_dma_operation_mode - HW DMA operation mode
2440 * @priv: driver private structure
2441 * Description: it is used for configuring the DMA operation mode register in
2442 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2443 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2444 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2445 {
2446 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2447 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2448 int rxfifosz = priv->plat->rx_fifo_size;
2449 int txfifosz = priv->plat->tx_fifo_size;
2450 u32 txmode = 0;
2451 u32 rxmode = 0;
2452 u32 chan = 0;
2453 u8 qmode = 0;
2454
2455 if (rxfifosz == 0)
2456 rxfifosz = priv->dma_cap.rx_fifo_size;
2457 if (txfifosz == 0)
2458 txfifosz = priv->dma_cap.tx_fifo_size;
2459
2460 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2461 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2462 rxfifosz /= rx_channels_count;
2463 txfifosz /= tx_channels_count;
2464 }
2465
2466 if (priv->plat->force_thresh_dma_mode) {
2467 txmode = tc;
2468 rxmode = tc;
2469 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2470 /*
2471 * In case of GMAC, SF mode can be enabled
2472 * to perform the TX COE in HW. This depends on:
2473 * 1) TX COE if actually supported
2474 * 2) There is no bugged Jumbo frame support
2475 * that needs to not insert csum in the TDES.
2476 */
2477 txmode = SF_DMA_MODE;
2478 rxmode = SF_DMA_MODE;
2479 priv->xstats.threshold = SF_DMA_MODE;
2480 } else {
2481 txmode = tc;
2482 rxmode = SF_DMA_MODE;
2483 }
2484
2485 /* configure all channels */
2486 for (chan = 0; chan < rx_channels_count; chan++) {
2487 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2488 u32 buf_size;
2489
2490 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2491
2492 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2493 rxfifosz, qmode);
2494
2495 if (rx_q->xsk_pool) {
2496 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2497 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2498 buf_size,
2499 chan);
2500 } else {
2501 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2502 priv->dma_conf.dma_buf_sz,
2503 chan);
2504 }
2505 }
2506
2507 for (chan = 0; chan < tx_channels_count; chan++) {
2508 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2509
2510 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2511 txfifosz, qmode);
2512 }
2513 }
2514
stmmac_xsk_request_timestamp(void * _priv)2515 static void stmmac_xsk_request_timestamp(void *_priv)
2516 {
2517 struct stmmac_metadata_request *meta_req = _priv;
2518
2519 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2520 *meta_req->set_ic = true;
2521 }
2522
stmmac_xsk_fill_timestamp(void * _priv)2523 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2524 {
2525 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2526 struct stmmac_priv *priv = tx_compl->priv;
2527 struct dma_desc *desc = tx_compl->desc;
2528 bool found = false;
2529 u64 ns = 0;
2530
2531 if (!priv->hwts_tx_en)
2532 return 0;
2533
2534 /* check tx tstamp status */
2535 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2536 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2537 found = true;
2538 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2539 found = true;
2540 }
2541
2542 if (found) {
2543 ns -= priv->plat->cdc_error_adj;
2544 return ns_to_ktime(ns);
2545 }
2546
2547 return 0;
2548 }
2549
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2550 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2551 {
2552 struct timespec64 ts = ns_to_timespec64(launch_time);
2553 struct stmmac_metadata_request *meta_req = _priv;
2554
2555 if (meta_req->tbs & STMMAC_TBS_EN)
2556 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2557 ts.tv_nsec);
2558 }
2559
2560 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2561 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2562 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2563 .tmo_request_launch_time = stmmac_xsk_request_launch_time,
2564 };
2565
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2566 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2567 {
2568 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2569 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2570 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2571 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2572 unsigned int entry = tx_q->cur_tx;
2573 struct dma_desc *tx_desc = NULL;
2574 struct xdp_desc xdp_desc;
2575 bool work_done = true;
2576 u32 tx_set_ic_bit = 0;
2577
2578 /* Avoids TX time-out as we are sharing with slow path */
2579 txq_trans_cond_update(nq);
2580
2581 budget = min(budget, stmmac_tx_avail(priv, queue));
2582
2583 while (budget-- > 0) {
2584 struct stmmac_metadata_request meta_req;
2585 struct xsk_tx_metadata *meta = NULL;
2586 dma_addr_t dma_addr;
2587 bool set_ic;
2588
2589 /* We are sharing with slow path and stop XSK TX desc submission when
2590 * available TX ring is less than threshold.
2591 */
2592 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2593 !netif_carrier_ok(priv->dev)) {
2594 work_done = false;
2595 break;
2596 }
2597
2598 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2599 break;
2600
2601 if (priv->est && priv->est->enable &&
2602 priv->est->max_sdu[queue] &&
2603 xdp_desc.len > priv->est->max_sdu[queue]) {
2604 priv->xstats.max_sdu_txq_drop[queue]++;
2605 continue;
2606 }
2607
2608 if (likely(priv->extend_desc))
2609 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2610 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2611 tx_desc = &tx_q->dma_entx[entry].basic;
2612 else
2613 tx_desc = tx_q->dma_tx + entry;
2614
2615 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2616 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2617 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2618
2619 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2620
2621 /* To return XDP buffer to XSK pool, we simple call
2622 * xsk_tx_completed(), so we don't need to fill up
2623 * 'buf' and 'xdpf'.
2624 */
2625 tx_q->tx_skbuff_dma[entry].buf = 0;
2626 tx_q->xdpf[entry] = NULL;
2627
2628 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2629 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2630 tx_q->tx_skbuff_dma[entry].last_segment = true;
2631 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2632
2633 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2634
2635 tx_q->tx_count_frames++;
2636
2637 if (!priv->tx_coal_frames[queue])
2638 set_ic = false;
2639 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2640 set_ic = true;
2641 else
2642 set_ic = false;
2643
2644 meta_req.priv = priv;
2645 meta_req.tx_desc = tx_desc;
2646 meta_req.set_ic = &set_ic;
2647 meta_req.tbs = tx_q->tbs;
2648 meta_req.edesc = &tx_q->dma_entx[entry];
2649 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2650 &meta_req);
2651 if (set_ic) {
2652 tx_q->tx_count_frames = 0;
2653 stmmac_set_tx_ic(priv, tx_desc);
2654 tx_set_ic_bit++;
2655 }
2656
2657 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2658 true, priv->mode, true, true,
2659 xdp_desc.len);
2660
2661 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2662
2663 xsk_tx_metadata_to_compl(meta,
2664 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2665
2666 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2667 entry = tx_q->cur_tx;
2668 }
2669 u64_stats_update_begin(&txq_stats->napi_syncp);
2670 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2671 u64_stats_update_end(&txq_stats->napi_syncp);
2672
2673 if (tx_desc) {
2674 stmmac_flush_tx_descriptors(priv, queue);
2675 xsk_tx_release(pool);
2676 }
2677
2678 /* Return true if all of the 3 conditions are met
2679 * a) TX Budget is still available
2680 * b) work_done = true when XSK TX desc peek is empty (no more
2681 * pending XSK TX for transmission)
2682 */
2683 return !!budget && work_done;
2684 }
2685
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2686 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2687 {
2688 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2689 tc += 64;
2690
2691 if (priv->plat->force_thresh_dma_mode)
2692 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2693 else
2694 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2695 chan);
2696
2697 priv->xstats.threshold = tc;
2698 }
2699 }
2700
2701 /**
2702 * stmmac_tx_clean - to manage the transmission completion
2703 * @priv: driver private structure
2704 * @budget: napi budget limiting this functions packet handling
2705 * @queue: TX queue index
2706 * @pending_packets: signal to arm the TX coal timer
2707 * Description: it reclaims the transmit resources after transmission completes.
2708 * If some packets still needs to be handled, due to TX coalesce, set
2709 * pending_packets to true to make NAPI arm the TX coal timer.
2710 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2711 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2712 bool *pending_packets)
2713 {
2714 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2715 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2716 unsigned int bytes_compl = 0, pkts_compl = 0;
2717 unsigned int entry, xmits = 0, count = 0;
2718 u32 tx_packets = 0, tx_errors = 0;
2719
2720 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2721
2722 tx_q->xsk_frames_done = 0;
2723
2724 entry = tx_q->dirty_tx;
2725
2726 /* Try to clean all TX complete frame in 1 shot */
2727 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2728 struct xdp_frame *xdpf;
2729 struct sk_buff *skb;
2730 struct dma_desc *p;
2731 int status;
2732
2733 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2734 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2735 xdpf = tx_q->xdpf[entry];
2736 skb = NULL;
2737 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2738 xdpf = NULL;
2739 skb = tx_q->tx_skbuff[entry];
2740 } else {
2741 xdpf = NULL;
2742 skb = NULL;
2743 }
2744
2745 if (priv->extend_desc)
2746 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2747 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2748 p = &tx_q->dma_entx[entry].basic;
2749 else
2750 p = tx_q->dma_tx + entry;
2751
2752 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2753 /* Check if the descriptor is owned by the DMA */
2754 if (unlikely(status & tx_dma_own))
2755 break;
2756
2757 count++;
2758
2759 /* Make sure descriptor fields are read after reading
2760 * the own bit.
2761 */
2762 dma_rmb();
2763
2764 /* Just consider the last segment and ...*/
2765 if (likely(!(status & tx_not_ls))) {
2766 /* ... verify the status error condition */
2767 if (unlikely(status & tx_err)) {
2768 tx_errors++;
2769 if (unlikely(status & tx_err_bump_tc))
2770 stmmac_bump_dma_threshold(priv, queue);
2771 } else {
2772 tx_packets++;
2773 }
2774 if (skb) {
2775 stmmac_get_tx_hwtstamp(priv, p, skb);
2776 } else if (tx_q->xsk_pool &&
2777 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2778 struct stmmac_xsk_tx_complete tx_compl = {
2779 .priv = priv,
2780 .desc = p,
2781 };
2782
2783 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2784 &stmmac_xsk_tx_metadata_ops,
2785 &tx_compl);
2786 }
2787 }
2788
2789 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2790 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2791 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2792 dma_unmap_page(priv->device,
2793 tx_q->tx_skbuff_dma[entry].buf,
2794 tx_q->tx_skbuff_dma[entry].len,
2795 DMA_TO_DEVICE);
2796 else
2797 dma_unmap_single(priv->device,
2798 tx_q->tx_skbuff_dma[entry].buf,
2799 tx_q->tx_skbuff_dma[entry].len,
2800 DMA_TO_DEVICE);
2801 tx_q->tx_skbuff_dma[entry].buf = 0;
2802 tx_q->tx_skbuff_dma[entry].len = 0;
2803 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2804 }
2805
2806 stmmac_clean_desc3(priv, tx_q, p);
2807
2808 tx_q->tx_skbuff_dma[entry].last_segment = false;
2809 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2810
2811 if (xdpf &&
2812 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2813 xdp_return_frame_rx_napi(xdpf);
2814 tx_q->xdpf[entry] = NULL;
2815 }
2816
2817 if (xdpf &&
2818 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2819 xdp_return_frame(xdpf);
2820 tx_q->xdpf[entry] = NULL;
2821 }
2822
2823 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2824 tx_q->xsk_frames_done++;
2825
2826 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2827 if (likely(skb)) {
2828 pkts_compl++;
2829 bytes_compl += skb->len;
2830 dev_consume_skb_any(skb);
2831 tx_q->tx_skbuff[entry] = NULL;
2832 }
2833 }
2834
2835 stmmac_release_tx_desc(priv, p, priv->mode);
2836
2837 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2838 }
2839 tx_q->dirty_tx = entry;
2840
2841 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2842 pkts_compl, bytes_compl);
2843
2844 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2845 queue))) &&
2846 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2847
2848 netif_dbg(priv, tx_done, priv->dev,
2849 "%s: restart transmit\n", __func__);
2850 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2851 }
2852
2853 if (tx_q->xsk_pool) {
2854 bool work_done;
2855
2856 if (tx_q->xsk_frames_done)
2857 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2858
2859 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2860 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2861
2862 /* For XSK TX, we try to send as many as possible.
2863 * If XSK work done (XSK TX desc empty and budget still
2864 * available), return "budget - 1" to reenable TX IRQ.
2865 * Else, return "budget" to make NAPI continue polling.
2866 */
2867 work_done = stmmac_xdp_xmit_zc(priv, queue,
2868 STMMAC_XSK_TX_BUDGET_MAX);
2869 if (work_done)
2870 xmits = budget - 1;
2871 else
2872 xmits = budget;
2873 }
2874
2875 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2876 stmmac_restart_sw_lpi_timer(priv);
2877
2878 /* We still have pending packets, let's call for a new scheduling */
2879 if (tx_q->dirty_tx != tx_q->cur_tx)
2880 *pending_packets = true;
2881
2882 u64_stats_update_begin(&txq_stats->napi_syncp);
2883 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2884 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2885 u64_stats_inc(&txq_stats->napi.tx_clean);
2886 u64_stats_update_end(&txq_stats->napi_syncp);
2887
2888 priv->xstats.tx_errors += tx_errors;
2889
2890 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2891
2892 /* Combine decisions from TX clean and XSK TX */
2893 return max(count, xmits);
2894 }
2895
2896 /**
2897 * stmmac_tx_err - to manage the tx error
2898 * @priv: driver private structure
2899 * @chan: channel index
2900 * Description: it cleans the descriptors and restarts the transmission
2901 * in case of transmission errors.
2902 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2903 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2904 {
2905 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2906
2907 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2908
2909 stmmac_stop_tx_dma(priv, chan);
2910 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2911 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2912 stmmac_reset_tx_queue(priv, chan);
2913 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2914 tx_q->dma_tx_phy, chan);
2915 stmmac_start_tx_dma(priv, chan);
2916
2917 priv->xstats.tx_errors++;
2918 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2919 }
2920
2921 /**
2922 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2923 * @priv: driver private structure
2924 * @txmode: TX operating mode
2925 * @rxmode: RX operating mode
2926 * @chan: channel index
2927 * Description: it is used for configuring of the DMA operation mode in
2928 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2929 * mode.
2930 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2931 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2932 u32 rxmode, u32 chan)
2933 {
2934 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2935 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2936 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2937 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2938 int rxfifosz = priv->plat->rx_fifo_size;
2939 int txfifosz = priv->plat->tx_fifo_size;
2940
2941 if (rxfifosz == 0)
2942 rxfifosz = priv->dma_cap.rx_fifo_size;
2943 if (txfifosz == 0)
2944 txfifosz = priv->dma_cap.tx_fifo_size;
2945
2946 /* Adjust for real per queue fifo size */
2947 rxfifosz /= rx_channels_count;
2948 txfifosz /= tx_channels_count;
2949
2950 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2951 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2952 }
2953
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2954 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2955 {
2956 int ret;
2957
2958 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2959 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2960 if (ret && (ret != -EINVAL)) {
2961 stmmac_global_err(priv);
2962 return true;
2963 }
2964
2965 return false;
2966 }
2967
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2968 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2969 {
2970 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2971 &priv->xstats, chan, dir);
2972 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2973 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2974 struct stmmac_channel *ch = &priv->channel[chan];
2975 struct napi_struct *rx_napi;
2976 struct napi_struct *tx_napi;
2977 unsigned long flags;
2978
2979 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2980 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2981
2982 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2983 if (napi_schedule_prep(rx_napi)) {
2984 spin_lock_irqsave(&ch->lock, flags);
2985 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2986 spin_unlock_irqrestore(&ch->lock, flags);
2987 __napi_schedule(rx_napi);
2988 }
2989 }
2990
2991 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2992 if (napi_schedule_prep(tx_napi)) {
2993 spin_lock_irqsave(&ch->lock, flags);
2994 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2995 spin_unlock_irqrestore(&ch->lock, flags);
2996 __napi_schedule(tx_napi);
2997 }
2998 }
2999
3000 return status;
3001 }
3002
3003 /**
3004 * stmmac_dma_interrupt - DMA ISR
3005 * @priv: driver private structure
3006 * Description: this is the DMA ISR. It is called by the main ISR.
3007 * It calls the dwmac dma routine and schedule poll method in case of some
3008 * work can be done.
3009 */
stmmac_dma_interrupt(struct stmmac_priv * priv)3010 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3011 {
3012 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3013 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3014 u32 channels_to_check = tx_channel_count > rx_channel_count ?
3015 tx_channel_count : rx_channel_count;
3016 u32 chan;
3017 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3018
3019 /* Make sure we never check beyond our status buffer. */
3020 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3021 channels_to_check = ARRAY_SIZE(status);
3022
3023 for (chan = 0; chan < channels_to_check; chan++)
3024 status[chan] = stmmac_napi_check(priv, chan,
3025 DMA_DIR_RXTX);
3026
3027 for (chan = 0; chan < tx_channel_count; chan++) {
3028 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3029 /* Try to bump up the dma threshold on this failure */
3030 stmmac_bump_dma_threshold(priv, chan);
3031 } else if (unlikely(status[chan] == tx_hard_error)) {
3032 stmmac_tx_err(priv, chan);
3033 }
3034 }
3035 }
3036
3037 /**
3038 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3039 * @priv: driver private structure
3040 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3041 */
stmmac_mmc_setup(struct stmmac_priv * priv)3042 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3043 {
3044 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3045 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3046
3047 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3048
3049 if (priv->dma_cap.rmon) {
3050 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3051 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3052 } else
3053 netdev_info(priv->dev, "No MAC Management Counters available\n");
3054 }
3055
3056 /**
3057 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3058 * @priv: driver private structure
3059 * Description:
3060 * new GMAC chip generations have a new register to indicate the
3061 * presence of the optional feature/functions.
3062 * This can be also used to override the value passed through the
3063 * platform and necessary for old MAC10/100 and GMAC chips.
3064 */
stmmac_get_hw_features(struct stmmac_priv * priv)3065 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3066 {
3067 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3068 }
3069
3070 /**
3071 * stmmac_check_ether_addr - check if the MAC addr is valid
3072 * @priv: driver private structure
3073 * Description:
3074 * it is to verify if the MAC address is valid, in case of failures it
3075 * generates a random MAC address
3076 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3077 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3078 {
3079 u8 addr[ETH_ALEN];
3080
3081 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3082 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3083 if (is_valid_ether_addr(addr))
3084 eth_hw_addr_set(priv->dev, addr);
3085 else
3086 eth_hw_addr_random(priv->dev);
3087 dev_info(priv->device, "device MAC address %pM\n",
3088 priv->dev->dev_addr);
3089 }
3090 }
3091
3092 /**
3093 * stmmac_init_dma_engine - DMA init.
3094 * @priv: driver private structure
3095 * Description:
3096 * It inits the DMA invoking the specific MAC/GMAC callback.
3097 * Some DMA parameters can be passed from the platform;
3098 * in case of these are not passed a default is kept for the MAC or GMAC.
3099 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3100 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3101 {
3102 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3103 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3104 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3105 struct stmmac_rx_queue *rx_q;
3106 struct stmmac_tx_queue *tx_q;
3107 u32 chan = 0;
3108 int ret = 0;
3109
3110 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3111 netdev_err(priv->dev, "Invalid DMA configuration\n");
3112 return -EINVAL;
3113 }
3114
3115 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3116 priv->plat->dma_cfg->atds = 1;
3117
3118 ret = stmmac_reset(priv, priv->ioaddr);
3119 if (ret) {
3120 netdev_err(priv->dev, "Failed to reset the dma\n");
3121 return ret;
3122 }
3123
3124 /* DMA Configuration */
3125 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3126
3127 if (priv->plat->axi)
3128 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3129
3130 /* DMA CSR Channel configuration */
3131 for (chan = 0; chan < dma_csr_ch; chan++) {
3132 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3133 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3134 }
3135
3136 /* DMA RX Channel Configuration */
3137 for (chan = 0; chan < rx_channels_count; chan++) {
3138 rx_q = &priv->dma_conf.rx_queue[chan];
3139
3140 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3141 rx_q->dma_rx_phy, chan);
3142
3143 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3144 (rx_q->buf_alloc_num *
3145 sizeof(struct dma_desc));
3146 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3147 rx_q->rx_tail_addr, chan);
3148 }
3149
3150 /* DMA TX Channel Configuration */
3151 for (chan = 0; chan < tx_channels_count; chan++) {
3152 tx_q = &priv->dma_conf.tx_queue[chan];
3153
3154 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3155 tx_q->dma_tx_phy, chan);
3156
3157 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3158 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3159 tx_q->tx_tail_addr, chan);
3160 }
3161
3162 return ret;
3163 }
3164
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3165 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3166 {
3167 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3168 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3169 struct stmmac_channel *ch;
3170 struct napi_struct *napi;
3171
3172 if (!tx_coal_timer)
3173 return;
3174
3175 ch = &priv->channel[tx_q->queue_index];
3176 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3177
3178 /* Arm timer only if napi is not already scheduled.
3179 * Try to cancel any timer if napi is scheduled, timer will be armed
3180 * again in the next scheduled napi.
3181 */
3182 if (unlikely(!napi_is_scheduled(napi)))
3183 hrtimer_start(&tx_q->txtimer,
3184 STMMAC_COAL_TIMER(tx_coal_timer),
3185 HRTIMER_MODE_REL);
3186 else
3187 hrtimer_try_to_cancel(&tx_q->txtimer);
3188 }
3189
3190 /**
3191 * stmmac_tx_timer - mitigation sw timer for tx.
3192 * @t: data pointer
3193 * Description:
3194 * This is the timer handler to directly invoke the stmmac_tx_clean.
3195 */
stmmac_tx_timer(struct hrtimer * t)3196 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3197 {
3198 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3199 struct stmmac_priv *priv = tx_q->priv_data;
3200 struct stmmac_channel *ch;
3201 struct napi_struct *napi;
3202
3203 ch = &priv->channel[tx_q->queue_index];
3204 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3205
3206 if (likely(napi_schedule_prep(napi))) {
3207 unsigned long flags;
3208
3209 spin_lock_irqsave(&ch->lock, flags);
3210 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3211 spin_unlock_irqrestore(&ch->lock, flags);
3212 __napi_schedule(napi);
3213 }
3214
3215 return HRTIMER_NORESTART;
3216 }
3217
3218 /**
3219 * stmmac_init_coalesce - init mitigation options.
3220 * @priv: driver private structure
3221 * Description:
3222 * This inits the coalesce parameters: i.e. timer rate,
3223 * timer handler and default threshold used for enabling the
3224 * interrupt on completion bit.
3225 */
stmmac_init_coalesce(struct stmmac_priv * priv)3226 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3227 {
3228 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3229 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3230 u32 chan;
3231
3232 for (chan = 0; chan < tx_channel_count; chan++) {
3233 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3234
3235 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3236 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3237
3238 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3239 }
3240
3241 for (chan = 0; chan < rx_channel_count; chan++)
3242 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3243 }
3244
stmmac_set_rings_length(struct stmmac_priv * priv)3245 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3246 {
3247 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3248 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3249 u32 chan;
3250
3251 /* set TX ring length */
3252 for (chan = 0; chan < tx_channels_count; chan++)
3253 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3254 (priv->dma_conf.dma_tx_size - 1), chan);
3255
3256 /* set RX ring length */
3257 for (chan = 0; chan < rx_channels_count; chan++)
3258 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3259 (priv->dma_conf.dma_rx_size - 1), chan);
3260 }
3261
3262 /**
3263 * stmmac_set_tx_queue_weight - Set TX queue weight
3264 * @priv: driver private structure
3265 * Description: It is used for setting TX queues weight
3266 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3267 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3268 {
3269 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3270 u32 weight;
3271 u32 queue;
3272
3273 for (queue = 0; queue < tx_queues_count; queue++) {
3274 weight = priv->plat->tx_queues_cfg[queue].weight;
3275 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3276 }
3277 }
3278
3279 /**
3280 * stmmac_configure_cbs - Configure CBS in TX queue
3281 * @priv: driver private structure
3282 * Description: It is used for configuring CBS in AVB TX queues
3283 */
stmmac_configure_cbs(struct stmmac_priv * priv)3284 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3285 {
3286 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3287 u32 mode_to_use;
3288 u32 queue;
3289
3290 /* queue 0 is reserved for legacy traffic */
3291 for (queue = 1; queue < tx_queues_count; queue++) {
3292 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3293 if (mode_to_use == MTL_QUEUE_DCB)
3294 continue;
3295
3296 stmmac_config_cbs(priv, priv->hw,
3297 priv->plat->tx_queues_cfg[queue].send_slope,
3298 priv->plat->tx_queues_cfg[queue].idle_slope,
3299 priv->plat->tx_queues_cfg[queue].high_credit,
3300 priv->plat->tx_queues_cfg[queue].low_credit,
3301 queue);
3302 }
3303 }
3304
3305 /**
3306 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3307 * @priv: driver private structure
3308 * Description: It is used for mapping RX queues to RX dma channels
3309 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3310 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3311 {
3312 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3313 u32 queue;
3314 u32 chan;
3315
3316 for (queue = 0; queue < rx_queues_count; queue++) {
3317 chan = priv->plat->rx_queues_cfg[queue].chan;
3318 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3319 }
3320 }
3321
3322 /**
3323 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3324 * @priv: driver private structure
3325 * Description: It is used for configuring the RX Queue Priority
3326 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3327 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3328 {
3329 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3330 u32 queue;
3331 u32 prio;
3332
3333 for (queue = 0; queue < rx_queues_count; queue++) {
3334 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3335 continue;
3336
3337 prio = priv->plat->rx_queues_cfg[queue].prio;
3338 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3339 }
3340 }
3341
3342 /**
3343 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3344 * @priv: driver private structure
3345 * Description: It is used for configuring the TX Queue Priority
3346 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3347 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3348 {
3349 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3350 u32 queue;
3351 u32 prio;
3352
3353 for (queue = 0; queue < tx_queues_count; queue++) {
3354 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3355 continue;
3356
3357 prio = priv->plat->tx_queues_cfg[queue].prio;
3358 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3359 }
3360 }
3361
3362 /**
3363 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3364 * @priv: driver private structure
3365 * Description: It is used for configuring the RX queue routing
3366 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3367 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3368 {
3369 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3370 u32 queue;
3371 u8 packet;
3372
3373 for (queue = 0; queue < rx_queues_count; queue++) {
3374 /* no specific packet type routing specified for the queue */
3375 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3376 continue;
3377
3378 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3379 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3380 }
3381 }
3382
stmmac_mac_config_rss(struct stmmac_priv * priv)3383 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3384 {
3385 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3386 priv->rss.enable = false;
3387 return;
3388 }
3389
3390 if (priv->dev->features & NETIF_F_RXHASH)
3391 priv->rss.enable = true;
3392 else
3393 priv->rss.enable = false;
3394
3395 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3396 priv->plat->rx_queues_to_use);
3397 }
3398
3399 /**
3400 * stmmac_mtl_configuration - Configure MTL
3401 * @priv: driver private structure
3402 * Description: It is used for configurring MTL
3403 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3404 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3405 {
3406 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3407 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3408
3409 if (tx_queues_count > 1)
3410 stmmac_set_tx_queue_weight(priv);
3411
3412 /* Configure MTL RX algorithms */
3413 if (rx_queues_count > 1)
3414 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3415 priv->plat->rx_sched_algorithm);
3416
3417 /* Configure MTL TX algorithms */
3418 if (tx_queues_count > 1)
3419 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3420 priv->plat->tx_sched_algorithm);
3421
3422 /* Configure CBS in AVB TX queues */
3423 if (tx_queues_count > 1)
3424 stmmac_configure_cbs(priv);
3425
3426 /* Map RX MTL to DMA channels */
3427 stmmac_rx_queue_dma_chan_map(priv);
3428
3429 /* Enable MAC RX Queues */
3430 stmmac_mac_enable_rx_queues(priv);
3431
3432 /* Set RX priorities */
3433 if (rx_queues_count > 1)
3434 stmmac_mac_config_rx_queues_prio(priv);
3435
3436 /* Set TX priorities */
3437 if (tx_queues_count > 1)
3438 stmmac_mac_config_tx_queues_prio(priv);
3439
3440 /* Set RX routing */
3441 if (rx_queues_count > 1)
3442 stmmac_mac_config_rx_queues_routing(priv);
3443
3444 /* Receive Side Scaling */
3445 if (rx_queues_count > 1)
3446 stmmac_mac_config_rss(priv);
3447 }
3448
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3449 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3450 {
3451 if (priv->dma_cap.asp) {
3452 netdev_info(priv->dev, "Enabling Safety Features\n");
3453 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3454 priv->plat->safety_feat_cfg);
3455 } else {
3456 netdev_info(priv->dev, "No Safety Features support found\n");
3457 }
3458 }
3459
3460 /**
3461 * stmmac_hw_setup - setup mac in a usable state.
3462 * @dev : pointer to the device structure.
3463 * @ptp_register: register PTP if set
3464 * Description:
3465 * this is the main function to setup the HW in a usable state because the
3466 * dma engine is reset, the core registers are configured (e.g. AXI,
3467 * Checksum features, timers). The DMA is ready to start receiving and
3468 * transmitting.
3469 * Return value:
3470 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3471 * file on failure.
3472 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3473 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3474 {
3475 struct stmmac_priv *priv = netdev_priv(dev);
3476 u32 rx_cnt = priv->plat->rx_queues_to_use;
3477 u32 tx_cnt = priv->plat->tx_queues_to_use;
3478 bool sph_en;
3479 u32 chan;
3480 int ret;
3481
3482 /* Make sure RX clock is enabled */
3483 if (priv->hw->phylink_pcs)
3484 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3485
3486 /* Note that clk_rx_i must be running for reset to complete. This
3487 * clock may also be required when setting the MAC address.
3488 *
3489 * Block the receive clock stop for LPI mode at the PHY in case
3490 * the link is established with EEE mode active.
3491 */
3492 phylink_rx_clk_stop_block(priv->phylink);
3493
3494 /* DMA initialization and SW reset */
3495 ret = stmmac_init_dma_engine(priv);
3496 if (ret < 0) {
3497 phylink_rx_clk_stop_unblock(priv->phylink);
3498 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3499 __func__);
3500 return ret;
3501 }
3502
3503 /* Copy the MAC addr into the HW */
3504 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3505 phylink_rx_clk_stop_unblock(priv->phylink);
3506
3507 /* PS and related bits will be programmed according to the speed */
3508 if (priv->hw->pcs) {
3509 int speed = priv->plat->mac_port_sel_speed;
3510
3511 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3512 (speed == SPEED_1000)) {
3513 priv->hw->ps = speed;
3514 } else {
3515 dev_warn(priv->device, "invalid port speed\n");
3516 priv->hw->ps = 0;
3517 }
3518 }
3519
3520 /* Initialize the MAC Core */
3521 stmmac_core_init(priv, priv->hw, dev);
3522
3523 /* Initialize MTL*/
3524 stmmac_mtl_configuration(priv);
3525
3526 /* Initialize Safety Features */
3527 stmmac_safety_feat_configuration(priv);
3528
3529 ret = stmmac_rx_ipc(priv, priv->hw);
3530 if (!ret) {
3531 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3532 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3533 priv->hw->rx_csum = 0;
3534 }
3535
3536 /* Enable the MAC Rx/Tx */
3537 stmmac_mac_set(priv, priv->ioaddr, true);
3538
3539 /* Set the HW DMA mode and the COE */
3540 stmmac_dma_operation_mode(priv);
3541
3542 stmmac_mmc_setup(priv);
3543
3544 if (ptp_register) {
3545 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3546 if (ret < 0)
3547 netdev_warn(priv->dev,
3548 "failed to enable PTP reference clock: %pe\n",
3549 ERR_PTR(ret));
3550 }
3551
3552 ret = stmmac_init_ptp(priv);
3553 if (ret == -EOPNOTSUPP)
3554 netdev_info(priv->dev, "PTP not supported by HW\n");
3555 else if (ret)
3556 netdev_warn(priv->dev, "PTP init failed\n");
3557 else if (ptp_register)
3558 stmmac_ptp_register(priv);
3559
3560 if (priv->use_riwt) {
3561 u32 queue;
3562
3563 for (queue = 0; queue < rx_cnt; queue++) {
3564 if (!priv->rx_riwt[queue])
3565 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3566
3567 stmmac_rx_watchdog(priv, priv->ioaddr,
3568 priv->rx_riwt[queue], queue);
3569 }
3570 }
3571
3572 if (priv->hw->pcs)
3573 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3574
3575 /* set TX and RX rings length */
3576 stmmac_set_rings_length(priv);
3577
3578 /* Enable TSO */
3579 if (priv->tso) {
3580 for (chan = 0; chan < tx_cnt; chan++) {
3581 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3582
3583 /* TSO and TBS cannot co-exist */
3584 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3585 continue;
3586
3587 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3588 }
3589 }
3590
3591 /* Enable Split Header */
3592 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3593 for (chan = 0; chan < rx_cnt; chan++)
3594 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3595
3596
3597 /* VLAN Tag Insertion */
3598 if (priv->dma_cap.vlins)
3599 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3600
3601 /* TBS */
3602 for (chan = 0; chan < tx_cnt; chan++) {
3603 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3604 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3605
3606 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3607 }
3608
3609 /* Configure real RX and TX queues */
3610 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3611 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3612
3613 /* Start the ball rolling... */
3614 stmmac_start_all_dma(priv);
3615
3616 phylink_rx_clk_stop_block(priv->phylink);
3617 stmmac_set_hw_vlan_mode(priv, priv->hw);
3618 phylink_rx_clk_stop_unblock(priv->phylink);
3619
3620 return 0;
3621 }
3622
stmmac_hw_teardown(struct net_device * dev)3623 static void stmmac_hw_teardown(struct net_device *dev)
3624 {
3625 struct stmmac_priv *priv = netdev_priv(dev);
3626
3627 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3628 }
3629
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3630 static void stmmac_free_irq(struct net_device *dev,
3631 enum request_irq_err irq_err, int irq_idx)
3632 {
3633 struct stmmac_priv *priv = netdev_priv(dev);
3634 int j;
3635
3636 switch (irq_err) {
3637 case REQ_IRQ_ERR_ALL:
3638 irq_idx = priv->plat->tx_queues_to_use;
3639 fallthrough;
3640 case REQ_IRQ_ERR_TX:
3641 for (j = irq_idx - 1; j >= 0; j--) {
3642 if (priv->tx_irq[j] > 0) {
3643 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3644 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3645 }
3646 }
3647 irq_idx = priv->plat->rx_queues_to_use;
3648 fallthrough;
3649 case REQ_IRQ_ERR_RX:
3650 for (j = irq_idx - 1; j >= 0; j--) {
3651 if (priv->rx_irq[j] > 0) {
3652 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3653 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3654 }
3655 }
3656
3657 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3658 free_irq(priv->sfty_ue_irq, dev);
3659 fallthrough;
3660 case REQ_IRQ_ERR_SFTY_UE:
3661 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3662 free_irq(priv->sfty_ce_irq, dev);
3663 fallthrough;
3664 case REQ_IRQ_ERR_SFTY_CE:
3665 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3666 free_irq(priv->lpi_irq, dev);
3667 fallthrough;
3668 case REQ_IRQ_ERR_LPI:
3669 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3670 free_irq(priv->wol_irq, dev);
3671 fallthrough;
3672 case REQ_IRQ_ERR_SFTY:
3673 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3674 free_irq(priv->sfty_irq, dev);
3675 fallthrough;
3676 case REQ_IRQ_ERR_WOL:
3677 free_irq(dev->irq, dev);
3678 fallthrough;
3679 case REQ_IRQ_ERR_MAC:
3680 case REQ_IRQ_ERR_NO:
3681 /* If MAC IRQ request error, no more IRQ to free */
3682 break;
3683 }
3684 }
3685
stmmac_request_irq_multi_msi(struct net_device * dev)3686 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3687 {
3688 struct stmmac_priv *priv = netdev_priv(dev);
3689 enum request_irq_err irq_err;
3690 int irq_idx = 0;
3691 char *int_name;
3692 int ret;
3693 int i;
3694
3695 /* For common interrupt */
3696 int_name = priv->int_name_mac;
3697 sprintf(int_name, "%s:%s", dev->name, "mac");
3698 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3699 0, int_name, dev);
3700 if (unlikely(ret < 0)) {
3701 netdev_err(priv->dev,
3702 "%s: alloc mac MSI %d (error: %d)\n",
3703 __func__, dev->irq, ret);
3704 irq_err = REQ_IRQ_ERR_MAC;
3705 goto irq_error;
3706 }
3707
3708 /* Request the Wake IRQ in case of another line
3709 * is used for WoL
3710 */
3711 priv->wol_irq_disabled = true;
3712 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3713 int_name = priv->int_name_wol;
3714 sprintf(int_name, "%s:%s", dev->name, "wol");
3715 ret = request_irq(priv->wol_irq,
3716 stmmac_mac_interrupt,
3717 0, int_name, dev);
3718 if (unlikely(ret < 0)) {
3719 netdev_err(priv->dev,
3720 "%s: alloc wol MSI %d (error: %d)\n",
3721 __func__, priv->wol_irq, ret);
3722 irq_err = REQ_IRQ_ERR_WOL;
3723 goto irq_error;
3724 }
3725 }
3726
3727 /* Request the LPI IRQ in case of another line
3728 * is used for LPI
3729 */
3730 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3731 int_name = priv->int_name_lpi;
3732 sprintf(int_name, "%s:%s", dev->name, "lpi");
3733 ret = request_irq(priv->lpi_irq,
3734 stmmac_mac_interrupt,
3735 0, int_name, dev);
3736 if (unlikely(ret < 0)) {
3737 netdev_err(priv->dev,
3738 "%s: alloc lpi MSI %d (error: %d)\n",
3739 __func__, priv->lpi_irq, ret);
3740 irq_err = REQ_IRQ_ERR_LPI;
3741 goto irq_error;
3742 }
3743 }
3744
3745 /* Request the common Safety Feature Correctible/Uncorrectible
3746 * Error line in case of another line is used
3747 */
3748 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3749 int_name = priv->int_name_sfty;
3750 sprintf(int_name, "%s:%s", dev->name, "safety");
3751 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3752 0, int_name, dev);
3753 if (unlikely(ret < 0)) {
3754 netdev_err(priv->dev,
3755 "%s: alloc sfty MSI %d (error: %d)\n",
3756 __func__, priv->sfty_irq, ret);
3757 irq_err = REQ_IRQ_ERR_SFTY;
3758 goto irq_error;
3759 }
3760 }
3761
3762 /* Request the Safety Feature Correctible Error line in
3763 * case of another line is used
3764 */
3765 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3766 int_name = priv->int_name_sfty_ce;
3767 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3768 ret = request_irq(priv->sfty_ce_irq,
3769 stmmac_safety_interrupt,
3770 0, int_name, dev);
3771 if (unlikely(ret < 0)) {
3772 netdev_err(priv->dev,
3773 "%s: alloc sfty ce MSI %d (error: %d)\n",
3774 __func__, priv->sfty_ce_irq, ret);
3775 irq_err = REQ_IRQ_ERR_SFTY_CE;
3776 goto irq_error;
3777 }
3778 }
3779
3780 /* Request the Safety Feature Uncorrectible Error line in
3781 * case of another line is used
3782 */
3783 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3784 int_name = priv->int_name_sfty_ue;
3785 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3786 ret = request_irq(priv->sfty_ue_irq,
3787 stmmac_safety_interrupt,
3788 0, int_name, dev);
3789 if (unlikely(ret < 0)) {
3790 netdev_err(priv->dev,
3791 "%s: alloc sfty ue MSI %d (error: %d)\n",
3792 __func__, priv->sfty_ue_irq, ret);
3793 irq_err = REQ_IRQ_ERR_SFTY_UE;
3794 goto irq_error;
3795 }
3796 }
3797
3798 /* Request Rx MSI irq */
3799 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3800 if (i >= MTL_MAX_RX_QUEUES)
3801 break;
3802 if (priv->rx_irq[i] == 0)
3803 continue;
3804
3805 int_name = priv->int_name_rx_irq[i];
3806 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3807 ret = request_irq(priv->rx_irq[i],
3808 stmmac_msi_intr_rx,
3809 0, int_name, &priv->dma_conf.rx_queue[i]);
3810 if (unlikely(ret < 0)) {
3811 netdev_err(priv->dev,
3812 "%s: alloc rx-%d MSI %d (error: %d)\n",
3813 __func__, i, priv->rx_irq[i], ret);
3814 irq_err = REQ_IRQ_ERR_RX;
3815 irq_idx = i;
3816 goto irq_error;
3817 }
3818 irq_set_affinity_hint(priv->rx_irq[i],
3819 cpumask_of(i % num_online_cpus()));
3820 }
3821
3822 /* Request Tx MSI irq */
3823 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3824 if (i >= MTL_MAX_TX_QUEUES)
3825 break;
3826 if (priv->tx_irq[i] == 0)
3827 continue;
3828
3829 int_name = priv->int_name_tx_irq[i];
3830 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3831 ret = request_irq(priv->tx_irq[i],
3832 stmmac_msi_intr_tx,
3833 0, int_name, &priv->dma_conf.tx_queue[i]);
3834 if (unlikely(ret < 0)) {
3835 netdev_err(priv->dev,
3836 "%s: alloc tx-%d MSI %d (error: %d)\n",
3837 __func__, i, priv->tx_irq[i], ret);
3838 irq_err = REQ_IRQ_ERR_TX;
3839 irq_idx = i;
3840 goto irq_error;
3841 }
3842 irq_set_affinity_hint(priv->tx_irq[i],
3843 cpumask_of(i % num_online_cpus()));
3844 }
3845
3846 return 0;
3847
3848 irq_error:
3849 stmmac_free_irq(dev, irq_err, irq_idx);
3850 return ret;
3851 }
3852
stmmac_request_irq_single(struct net_device * dev)3853 static int stmmac_request_irq_single(struct net_device *dev)
3854 {
3855 struct stmmac_priv *priv = netdev_priv(dev);
3856 enum request_irq_err irq_err;
3857 int ret;
3858
3859 ret = request_irq(dev->irq, stmmac_interrupt,
3860 IRQF_SHARED, dev->name, dev);
3861 if (unlikely(ret < 0)) {
3862 netdev_err(priv->dev,
3863 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3864 __func__, dev->irq, ret);
3865 irq_err = REQ_IRQ_ERR_MAC;
3866 goto irq_error;
3867 }
3868
3869 /* Request the Wake IRQ in case of another line
3870 * is used for WoL
3871 */
3872 priv->wol_irq_disabled = true;
3873 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3874 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3875 IRQF_SHARED, dev->name, dev);
3876 if (unlikely(ret < 0)) {
3877 netdev_err(priv->dev,
3878 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3879 __func__, priv->wol_irq, ret);
3880 irq_err = REQ_IRQ_ERR_WOL;
3881 goto irq_error;
3882 }
3883 }
3884
3885 /* Request the IRQ lines */
3886 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3887 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3888 IRQF_SHARED, dev->name, dev);
3889 if (unlikely(ret < 0)) {
3890 netdev_err(priv->dev,
3891 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3892 __func__, priv->lpi_irq, ret);
3893 irq_err = REQ_IRQ_ERR_LPI;
3894 goto irq_error;
3895 }
3896 }
3897
3898 /* Request the common Safety Feature Correctible/Uncorrectible
3899 * Error line in case of another line is used
3900 */
3901 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3902 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3903 IRQF_SHARED, dev->name, dev);
3904 if (unlikely(ret < 0)) {
3905 netdev_err(priv->dev,
3906 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3907 __func__, priv->sfty_irq, ret);
3908 irq_err = REQ_IRQ_ERR_SFTY;
3909 goto irq_error;
3910 }
3911 }
3912
3913 return 0;
3914
3915 irq_error:
3916 stmmac_free_irq(dev, irq_err, 0);
3917 return ret;
3918 }
3919
stmmac_request_irq(struct net_device * dev)3920 static int stmmac_request_irq(struct net_device *dev)
3921 {
3922 struct stmmac_priv *priv = netdev_priv(dev);
3923 int ret;
3924
3925 /* Request the IRQ lines */
3926 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3927 ret = stmmac_request_irq_multi_msi(dev);
3928 else
3929 ret = stmmac_request_irq_single(dev);
3930
3931 return ret;
3932 }
3933
3934 /**
3935 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3936 * @priv: driver private structure
3937 * @mtu: MTU to setup the dma queue and buf with
3938 * Description: Allocate and generate a dma_conf based on the provided MTU.
3939 * Allocate the Tx/Rx DMA queue and init them.
3940 * Return value:
3941 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3942 */
3943 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3944 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3945 {
3946 struct stmmac_dma_conf *dma_conf;
3947 int chan, bfsize, ret;
3948
3949 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3950 if (!dma_conf) {
3951 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3952 __func__);
3953 return ERR_PTR(-ENOMEM);
3954 }
3955
3956 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3957 if (bfsize < 0)
3958 bfsize = 0;
3959
3960 if (bfsize < BUF_SIZE_16KiB)
3961 bfsize = stmmac_set_bfsize(mtu, 0);
3962
3963 dma_conf->dma_buf_sz = bfsize;
3964 /* Chose the tx/rx size from the already defined one in the
3965 * priv struct. (if defined)
3966 */
3967 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3968 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3969
3970 if (!dma_conf->dma_tx_size)
3971 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3972 if (!dma_conf->dma_rx_size)
3973 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3974
3975 /* Earlier check for TBS */
3976 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3977 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3978 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3979
3980 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3981 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3982 }
3983
3984 ret = alloc_dma_desc_resources(priv, dma_conf);
3985 if (ret < 0) {
3986 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3987 __func__);
3988 goto alloc_error;
3989 }
3990
3991 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3992 if (ret < 0) {
3993 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3994 __func__);
3995 goto init_error;
3996 }
3997
3998 return dma_conf;
3999
4000 init_error:
4001 free_dma_desc_resources(priv, dma_conf);
4002 alloc_error:
4003 kfree(dma_conf);
4004 return ERR_PTR(ret);
4005 }
4006
4007 /**
4008 * __stmmac_open - open entry point of the driver
4009 * @dev : pointer to the device structure.
4010 * @dma_conf : structure to take the dma data
4011 * Description:
4012 * This function is the open entry point of the driver.
4013 * Return value:
4014 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4015 * file on failure.
4016 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)4017 static int __stmmac_open(struct net_device *dev,
4018 struct stmmac_dma_conf *dma_conf)
4019 {
4020 struct stmmac_priv *priv = netdev_priv(dev);
4021 int mode = priv->plat->phy_interface;
4022 u32 chan;
4023 int ret;
4024
4025 /* Initialise the tx lpi timer, converting from msec to usec */
4026 if (!priv->tx_lpi_timer)
4027 priv->tx_lpi_timer = eee_timer * 1000;
4028
4029 ret = pm_runtime_resume_and_get(priv->device);
4030 if (ret < 0)
4031 return ret;
4032
4033 if ((!priv->hw->xpcs ||
4034 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
4035 ret = stmmac_init_phy(dev);
4036 if (ret) {
4037 netdev_err(priv->dev,
4038 "%s: Cannot attach to PHY (error: %d)\n",
4039 __func__, ret);
4040 goto init_phy_error;
4041 }
4042 }
4043
4044 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4045 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4046 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4047 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4048
4049 stmmac_reset_queues_param(priv);
4050
4051 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4052 priv->plat->serdes_powerup) {
4053 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4054 if (ret < 0) {
4055 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4056 __func__);
4057 goto init_error;
4058 }
4059 }
4060
4061 ret = stmmac_hw_setup(dev, true);
4062 if (ret < 0) {
4063 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4064 goto init_error;
4065 }
4066
4067 stmmac_init_coalesce(priv);
4068
4069 phylink_start(priv->phylink);
4070 /* We may have called phylink_speed_down before */
4071 phylink_speed_up(priv->phylink);
4072
4073 ret = stmmac_request_irq(dev);
4074 if (ret)
4075 goto irq_error;
4076
4077 stmmac_enable_all_queues(priv);
4078 netif_tx_start_all_queues(priv->dev);
4079 stmmac_enable_all_dma_irq(priv);
4080
4081 return 0;
4082
4083 irq_error:
4084 phylink_stop(priv->phylink);
4085
4086 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4087 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4088
4089 stmmac_hw_teardown(dev);
4090 init_error:
4091 phylink_disconnect_phy(priv->phylink);
4092 init_phy_error:
4093 pm_runtime_put(priv->device);
4094 return ret;
4095 }
4096
stmmac_open(struct net_device * dev)4097 static int stmmac_open(struct net_device *dev)
4098 {
4099 struct stmmac_priv *priv = netdev_priv(dev);
4100 struct stmmac_dma_conf *dma_conf;
4101 int ret;
4102
4103 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4104 if (IS_ERR(dma_conf))
4105 return PTR_ERR(dma_conf);
4106
4107 ret = __stmmac_open(dev, dma_conf);
4108 if (ret)
4109 free_dma_desc_resources(priv, dma_conf);
4110
4111 kfree(dma_conf);
4112 return ret;
4113 }
4114
4115 /**
4116 * stmmac_release - close entry point of the driver
4117 * @dev : device pointer.
4118 * Description:
4119 * This is the stop entry point of the driver.
4120 */
stmmac_release(struct net_device * dev)4121 static int stmmac_release(struct net_device *dev)
4122 {
4123 struct stmmac_priv *priv = netdev_priv(dev);
4124 u32 chan;
4125
4126 if (device_may_wakeup(priv->device))
4127 phylink_speed_down(priv->phylink, false);
4128 /* Stop and disconnect the PHY */
4129 phylink_stop(priv->phylink);
4130 phylink_disconnect_phy(priv->phylink);
4131
4132 stmmac_disable_all_queues(priv);
4133
4134 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4135 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4136
4137 netif_tx_disable(dev);
4138
4139 /* Free the IRQ lines */
4140 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4141
4142 /* Stop TX/RX DMA and clear the descriptors */
4143 stmmac_stop_all_dma(priv);
4144
4145 /* Release and free the Rx/Tx resources */
4146 free_dma_desc_resources(priv, &priv->dma_conf);
4147
4148 /* Powerdown Serdes if there is */
4149 if (priv->plat->serdes_powerdown)
4150 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4151
4152 stmmac_release_ptp(priv);
4153
4154 if (stmmac_fpe_supported(priv))
4155 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
4156
4157 pm_runtime_put(priv->device);
4158
4159 return 0;
4160 }
4161
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4162 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4163 struct stmmac_tx_queue *tx_q)
4164 {
4165 u16 tag = 0x0, inner_tag = 0x0;
4166 u32 inner_type = 0x0;
4167 struct dma_desc *p;
4168
4169 if (!priv->dma_cap.vlins)
4170 return false;
4171 if (!skb_vlan_tag_present(skb))
4172 return false;
4173 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4174 inner_tag = skb_vlan_tag_get(skb);
4175 inner_type = STMMAC_VLAN_INSERT;
4176 }
4177
4178 tag = skb_vlan_tag_get(skb);
4179
4180 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4181 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4182 else
4183 p = &tx_q->dma_tx[tx_q->cur_tx];
4184
4185 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4186 return false;
4187
4188 stmmac_set_tx_owner(priv, p);
4189 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4190 return true;
4191 }
4192
4193 /**
4194 * stmmac_tso_allocator - close entry point of the driver
4195 * @priv: driver private structure
4196 * @des: buffer start address
4197 * @total_len: total length to fill in descriptors
4198 * @last_segment: condition for the last descriptor
4199 * @queue: TX queue index
4200 * Description:
4201 * This function fills descriptor and request new descriptors according to
4202 * buffer length to fill
4203 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4204 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4205 int total_len, bool last_segment, u32 queue)
4206 {
4207 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4208 struct dma_desc *desc;
4209 u32 buff_size;
4210 int tmp_len;
4211
4212 tmp_len = total_len;
4213
4214 while (tmp_len > 0) {
4215 dma_addr_t curr_addr;
4216
4217 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4218 priv->dma_conf.dma_tx_size);
4219 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4220
4221 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4222 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4223 else
4224 desc = &tx_q->dma_tx[tx_q->cur_tx];
4225
4226 curr_addr = des + (total_len - tmp_len);
4227 stmmac_set_desc_addr(priv, desc, curr_addr);
4228 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4229 TSO_MAX_BUFF_SIZE : tmp_len;
4230
4231 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4232 0, 1,
4233 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4234 0, 0);
4235
4236 tmp_len -= TSO_MAX_BUFF_SIZE;
4237 }
4238 }
4239
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4240 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4241 {
4242 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4243 int desc_size;
4244
4245 if (likely(priv->extend_desc))
4246 desc_size = sizeof(struct dma_extended_desc);
4247 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4248 desc_size = sizeof(struct dma_edesc);
4249 else
4250 desc_size = sizeof(struct dma_desc);
4251
4252 /* The own bit must be the latest setting done when prepare the
4253 * descriptor and then barrier is needed to make sure that
4254 * all is coherent before granting the DMA engine.
4255 */
4256 wmb();
4257
4258 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4259 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4260 }
4261
4262 /**
4263 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4264 * @skb : the socket buffer
4265 * @dev : device pointer
4266 * Description: this is the transmit function that is called on TSO frames
4267 * (support available on GMAC4 and newer chips).
4268 * Diagram below show the ring programming in case of TSO frames:
4269 *
4270 * First Descriptor
4271 * --------
4272 * | DES0 |---> buffer1 = L2/L3/L4 header
4273 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4274 * | | width is 32-bit, but we never use it.
4275 * | | Also can be used as the most-significant 8-bits or 16-bits of
4276 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4277 * | | or 48-bit, and we always use it.
4278 * | DES2 |---> buffer1 len
4279 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4280 * --------
4281 * --------
4282 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4283 * | DES1 |---> same as the First Descriptor
4284 * | DES2 |---> buffer1 len
4285 * | DES3 |
4286 * --------
4287 * |
4288 * ...
4289 * |
4290 * --------
4291 * | DES0 |---> buffer1 = Split TCP Payload
4292 * | DES1 |---> same as the First Descriptor
4293 * | DES2 |---> buffer1 len
4294 * | DES3 |
4295 * --------
4296 *
4297 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4298 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4299 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4300 {
4301 struct dma_desc *desc, *first, *mss_desc = NULL;
4302 struct stmmac_priv *priv = netdev_priv(dev);
4303 unsigned int first_entry, tx_packets;
4304 struct stmmac_txq_stats *txq_stats;
4305 struct stmmac_tx_queue *tx_q;
4306 u32 pay_len, mss, queue;
4307 int i, first_tx, nfrags;
4308 u8 proto_hdr_len, hdr;
4309 dma_addr_t des;
4310 bool set_ic;
4311
4312 /* Always insert VLAN tag to SKB payload for TSO frames.
4313 *
4314 * Never insert VLAN tag by HW, since segments splited by
4315 * TSO engine will be un-tagged by mistake.
4316 */
4317 if (skb_vlan_tag_present(skb)) {
4318 skb = __vlan_hwaccel_push_inside(skb);
4319 if (unlikely(!skb)) {
4320 priv->xstats.tx_dropped++;
4321 return NETDEV_TX_OK;
4322 }
4323 }
4324
4325 nfrags = skb_shinfo(skb)->nr_frags;
4326 queue = skb_get_queue_mapping(skb);
4327
4328 tx_q = &priv->dma_conf.tx_queue[queue];
4329 txq_stats = &priv->xstats.txq_stats[queue];
4330 first_tx = tx_q->cur_tx;
4331
4332 /* Compute header lengths */
4333 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4334 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4335 hdr = sizeof(struct udphdr);
4336 } else {
4337 proto_hdr_len = skb_tcp_all_headers(skb);
4338 hdr = tcp_hdrlen(skb);
4339 }
4340
4341 /* Desc availability based on threshold should be enough safe */
4342 if (unlikely(stmmac_tx_avail(priv, queue) <
4343 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4344 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4345 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4346 queue));
4347 /* This is a hard error, log it. */
4348 netdev_err(priv->dev,
4349 "%s: Tx Ring full when queue awake\n",
4350 __func__);
4351 }
4352 return NETDEV_TX_BUSY;
4353 }
4354
4355 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4356
4357 mss = skb_shinfo(skb)->gso_size;
4358
4359 /* set new MSS value if needed */
4360 if (mss != tx_q->mss) {
4361 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4362 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4363 else
4364 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4365
4366 stmmac_set_mss(priv, mss_desc, mss);
4367 tx_q->mss = mss;
4368 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4369 priv->dma_conf.dma_tx_size);
4370 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4371 }
4372
4373 if (netif_msg_tx_queued(priv)) {
4374 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4375 __func__, hdr, proto_hdr_len, pay_len, mss);
4376 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4377 skb->data_len);
4378 }
4379
4380 first_entry = tx_q->cur_tx;
4381 WARN_ON(tx_q->tx_skbuff[first_entry]);
4382
4383 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4384 desc = &tx_q->dma_entx[first_entry].basic;
4385 else
4386 desc = &tx_q->dma_tx[first_entry];
4387 first = desc;
4388
4389 /* first descriptor: fill Headers on Buf1 */
4390 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4391 DMA_TO_DEVICE);
4392 if (dma_mapping_error(priv->device, des))
4393 goto dma_map_err;
4394
4395 stmmac_set_desc_addr(priv, first, des);
4396 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4397 (nfrags == 0), queue);
4398
4399 /* In case two or more DMA transmit descriptors are allocated for this
4400 * non-paged SKB data, the DMA buffer address should be saved to
4401 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4402 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4403 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4404 * since the tail areas of the DMA buffer can be accessed by DMA engine
4405 * sooner or later.
4406 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4407 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4408 * this DMA buffer right after the DMA engine completely finishes the
4409 * full buffer transmission.
4410 */
4411 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4412 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4413 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4414 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4415
4416 /* Prepare fragments */
4417 for (i = 0; i < nfrags; i++) {
4418 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4419
4420 des = skb_frag_dma_map(priv->device, frag, 0,
4421 skb_frag_size(frag),
4422 DMA_TO_DEVICE);
4423 if (dma_mapping_error(priv->device, des))
4424 goto dma_map_err;
4425
4426 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4427 (i == nfrags - 1), queue);
4428
4429 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4430 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4431 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4432 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4433 }
4434
4435 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4436
4437 /* Only the last descriptor gets to point to the skb. */
4438 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4439 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4440
4441 /* Manage tx mitigation */
4442 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4443 tx_q->tx_count_frames += tx_packets;
4444
4445 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4446 set_ic = true;
4447 else if (!priv->tx_coal_frames[queue])
4448 set_ic = false;
4449 else if (tx_packets > priv->tx_coal_frames[queue])
4450 set_ic = true;
4451 else if ((tx_q->tx_count_frames %
4452 priv->tx_coal_frames[queue]) < tx_packets)
4453 set_ic = true;
4454 else
4455 set_ic = false;
4456
4457 if (set_ic) {
4458 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4459 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4460 else
4461 desc = &tx_q->dma_tx[tx_q->cur_tx];
4462
4463 tx_q->tx_count_frames = 0;
4464 stmmac_set_tx_ic(priv, desc);
4465 }
4466
4467 /* We've used all descriptors we need for this skb, however,
4468 * advance cur_tx so that it references a fresh descriptor.
4469 * ndo_start_xmit will fill this descriptor the next time it's
4470 * called and stmmac_tx_clean may clean up to this descriptor.
4471 */
4472 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4473
4474 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4475 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4476 __func__);
4477 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4478 }
4479
4480 u64_stats_update_begin(&txq_stats->q_syncp);
4481 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4482 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4483 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4484 if (set_ic)
4485 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4486 u64_stats_update_end(&txq_stats->q_syncp);
4487
4488 if (priv->sarc_type)
4489 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4490
4491 skb_tx_timestamp(skb);
4492
4493 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4494 priv->hwts_tx_en)) {
4495 /* declare that device is doing timestamping */
4496 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4497 stmmac_enable_tx_timestamp(priv, first);
4498 }
4499
4500 /* Complete the first descriptor before granting the DMA */
4501 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4502 tx_q->tx_skbuff_dma[first_entry].last_segment,
4503 hdr / 4, (skb->len - proto_hdr_len));
4504
4505 /* If context desc is used to change MSS */
4506 if (mss_desc) {
4507 /* Make sure that first descriptor has been completely
4508 * written, including its own bit. This is because MSS is
4509 * actually before first descriptor, so we need to make
4510 * sure that MSS's own bit is the last thing written.
4511 */
4512 dma_wmb();
4513 stmmac_set_tx_owner(priv, mss_desc);
4514 }
4515
4516 if (netif_msg_pktdata(priv)) {
4517 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4518 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4519 tx_q->cur_tx, first, nfrags);
4520 pr_info(">>> frame to be transmitted: ");
4521 print_pkt(skb->data, skb_headlen(skb));
4522 }
4523
4524 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4525
4526 stmmac_flush_tx_descriptors(priv, queue);
4527 stmmac_tx_timer_arm(priv, queue);
4528
4529 return NETDEV_TX_OK;
4530
4531 dma_map_err:
4532 dev_err(priv->device, "Tx dma map failed\n");
4533 dev_kfree_skb(skb);
4534 priv->xstats.tx_dropped++;
4535 return NETDEV_TX_OK;
4536 }
4537
4538 /**
4539 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4540 * @skb: socket buffer to check
4541 *
4542 * Check if a packet has an ethertype that will trigger the IP header checks
4543 * and IP/TCP checksum engine of the stmmac core.
4544 *
4545 * Return: true if the ethertype can trigger the checksum engine, false
4546 * otherwise
4547 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4548 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4549 {
4550 int depth = 0;
4551 __be16 proto;
4552
4553 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4554 &depth);
4555
4556 return (depth <= ETH_HLEN) &&
4557 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4558 }
4559
4560 /**
4561 * stmmac_xmit - Tx entry point of the driver
4562 * @skb : the socket buffer
4563 * @dev : device pointer
4564 * Description : this is the tx entry point of the driver.
4565 * It programs the chain or the ring and supports oversized frames
4566 * and SG feature.
4567 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4568 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4569 {
4570 unsigned int first_entry, tx_packets, enh_desc;
4571 struct stmmac_priv *priv = netdev_priv(dev);
4572 unsigned int nopaged_len = skb_headlen(skb);
4573 int i, csum_insertion = 0, is_jumbo = 0;
4574 u32 queue = skb_get_queue_mapping(skb);
4575 int nfrags = skb_shinfo(skb)->nr_frags;
4576 int gso = skb_shinfo(skb)->gso_type;
4577 struct stmmac_txq_stats *txq_stats;
4578 struct dma_edesc *tbs_desc = NULL;
4579 struct dma_desc *desc, *first;
4580 struct stmmac_tx_queue *tx_q;
4581 bool has_vlan, set_ic;
4582 int entry, first_tx;
4583 dma_addr_t des;
4584
4585 tx_q = &priv->dma_conf.tx_queue[queue];
4586 txq_stats = &priv->xstats.txq_stats[queue];
4587 first_tx = tx_q->cur_tx;
4588
4589 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4590 stmmac_stop_sw_lpi(priv);
4591
4592 /* Manage oversized TCP frames for GMAC4 device */
4593 if (skb_is_gso(skb) && priv->tso) {
4594 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4595 return stmmac_tso_xmit(skb, dev);
4596 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4597 return stmmac_tso_xmit(skb, dev);
4598 }
4599
4600 if (priv->est && priv->est->enable &&
4601 priv->est->max_sdu[queue] &&
4602 skb->len > priv->est->max_sdu[queue]){
4603 priv->xstats.max_sdu_txq_drop[queue]++;
4604 goto max_sdu_err;
4605 }
4606
4607 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4608 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4609 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4610 queue));
4611 /* This is a hard error, log it. */
4612 netdev_err(priv->dev,
4613 "%s: Tx Ring full when queue awake\n",
4614 __func__);
4615 }
4616 return NETDEV_TX_BUSY;
4617 }
4618
4619 /* Check if VLAN can be inserted by HW */
4620 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4621
4622 entry = tx_q->cur_tx;
4623 first_entry = entry;
4624 WARN_ON(tx_q->tx_skbuff[first_entry]);
4625
4626 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4627 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4628 * queues. In that case, checksum offloading for those queues that don't
4629 * support tx coe needs to fallback to software checksum calculation.
4630 *
4631 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4632 * also have to be checksummed in software.
4633 */
4634 if (csum_insertion &&
4635 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4636 !stmmac_has_ip_ethertype(skb))) {
4637 if (unlikely(skb_checksum_help(skb)))
4638 goto dma_map_err;
4639 csum_insertion = !csum_insertion;
4640 }
4641
4642 if (likely(priv->extend_desc))
4643 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4644 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4645 desc = &tx_q->dma_entx[entry].basic;
4646 else
4647 desc = tx_q->dma_tx + entry;
4648
4649 first = desc;
4650
4651 if (has_vlan)
4652 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4653
4654 enh_desc = priv->plat->enh_desc;
4655 /* To program the descriptors according to the size of the frame */
4656 if (enh_desc)
4657 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4658
4659 if (unlikely(is_jumbo)) {
4660 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4661 if (unlikely(entry < 0) && (entry != -EINVAL))
4662 goto dma_map_err;
4663 }
4664
4665 for (i = 0; i < nfrags; i++) {
4666 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4667 int len = skb_frag_size(frag);
4668 bool last_segment = (i == (nfrags - 1));
4669
4670 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4671 WARN_ON(tx_q->tx_skbuff[entry]);
4672
4673 if (likely(priv->extend_desc))
4674 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4675 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4676 desc = &tx_q->dma_entx[entry].basic;
4677 else
4678 desc = tx_q->dma_tx + entry;
4679
4680 des = skb_frag_dma_map(priv->device, frag, 0, len,
4681 DMA_TO_DEVICE);
4682 if (dma_mapping_error(priv->device, des))
4683 goto dma_map_err; /* should reuse desc w/o issues */
4684
4685 tx_q->tx_skbuff_dma[entry].buf = des;
4686
4687 stmmac_set_desc_addr(priv, desc, des);
4688
4689 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4690 tx_q->tx_skbuff_dma[entry].len = len;
4691 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4692 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4693
4694 /* Prepare the descriptor and set the own bit too */
4695 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4696 priv->mode, 1, last_segment, skb->len);
4697 }
4698
4699 /* Only the last descriptor gets to point to the skb. */
4700 tx_q->tx_skbuff[entry] = skb;
4701 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4702
4703 /* According to the coalesce parameter the IC bit for the latest
4704 * segment is reset and the timer re-started to clean the tx status.
4705 * This approach takes care about the fragments: desc is the first
4706 * element in case of no SG.
4707 */
4708 tx_packets = (entry + 1) - first_tx;
4709 tx_q->tx_count_frames += tx_packets;
4710
4711 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4712 set_ic = true;
4713 else if (!priv->tx_coal_frames[queue])
4714 set_ic = false;
4715 else if (tx_packets > priv->tx_coal_frames[queue])
4716 set_ic = true;
4717 else if ((tx_q->tx_count_frames %
4718 priv->tx_coal_frames[queue]) < tx_packets)
4719 set_ic = true;
4720 else
4721 set_ic = false;
4722
4723 if (set_ic) {
4724 if (likely(priv->extend_desc))
4725 desc = &tx_q->dma_etx[entry].basic;
4726 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4727 desc = &tx_q->dma_entx[entry].basic;
4728 else
4729 desc = &tx_q->dma_tx[entry];
4730
4731 tx_q->tx_count_frames = 0;
4732 stmmac_set_tx_ic(priv, desc);
4733 }
4734
4735 /* We've used all descriptors we need for this skb, however,
4736 * advance cur_tx so that it references a fresh descriptor.
4737 * ndo_start_xmit will fill this descriptor the next time it's
4738 * called and stmmac_tx_clean may clean up to this descriptor.
4739 */
4740 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4741 tx_q->cur_tx = entry;
4742
4743 if (netif_msg_pktdata(priv)) {
4744 netdev_dbg(priv->dev,
4745 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4746 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4747 entry, first, nfrags);
4748
4749 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4750 print_pkt(skb->data, skb->len);
4751 }
4752
4753 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4754 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4755 __func__);
4756 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4757 }
4758
4759 u64_stats_update_begin(&txq_stats->q_syncp);
4760 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4761 if (set_ic)
4762 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4763 u64_stats_update_end(&txq_stats->q_syncp);
4764
4765 if (priv->sarc_type)
4766 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4767
4768 skb_tx_timestamp(skb);
4769
4770 /* Ready to fill the first descriptor and set the OWN bit w/o any
4771 * problems because all the descriptors are actually ready to be
4772 * passed to the DMA engine.
4773 */
4774 if (likely(!is_jumbo)) {
4775 bool last_segment = (nfrags == 0);
4776
4777 des = dma_map_single(priv->device, skb->data,
4778 nopaged_len, DMA_TO_DEVICE);
4779 if (dma_mapping_error(priv->device, des))
4780 goto dma_map_err;
4781
4782 tx_q->tx_skbuff_dma[first_entry].buf = des;
4783 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4784 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4785
4786 stmmac_set_desc_addr(priv, first, des);
4787
4788 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4789 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4790
4791 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4792 priv->hwts_tx_en)) {
4793 /* declare that device is doing timestamping */
4794 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4795 stmmac_enable_tx_timestamp(priv, first);
4796 }
4797
4798 /* Prepare the first descriptor setting the OWN bit too */
4799 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4800 csum_insertion, priv->mode, 0, last_segment,
4801 skb->len);
4802 }
4803
4804 if (tx_q->tbs & STMMAC_TBS_EN) {
4805 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4806
4807 tbs_desc = &tx_q->dma_entx[first_entry];
4808 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4809 }
4810
4811 stmmac_set_tx_owner(priv, first);
4812
4813 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4814
4815 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4816
4817 stmmac_flush_tx_descriptors(priv, queue);
4818 stmmac_tx_timer_arm(priv, queue);
4819
4820 return NETDEV_TX_OK;
4821
4822 dma_map_err:
4823 netdev_err(priv->dev, "Tx DMA map failed\n");
4824 max_sdu_err:
4825 dev_kfree_skb(skb);
4826 priv->xstats.tx_dropped++;
4827 return NETDEV_TX_OK;
4828 }
4829
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4830 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4831 {
4832 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4833 __be16 vlan_proto = veth->h_vlan_proto;
4834 u16 vlanid;
4835
4836 if ((vlan_proto == htons(ETH_P_8021Q) &&
4837 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4838 (vlan_proto == htons(ETH_P_8021AD) &&
4839 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4840 /* pop the vlan tag */
4841 vlanid = ntohs(veth->h_vlan_TCI);
4842 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4843 skb_pull(skb, VLAN_HLEN);
4844 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4845 }
4846 }
4847
4848 /**
4849 * stmmac_rx_refill - refill used skb preallocated buffers
4850 * @priv: driver private structure
4851 * @queue: RX queue index
4852 * Description : this is to reallocate the skb for the reception process
4853 * that is based on zero-copy.
4854 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4855 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4856 {
4857 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4858 int dirty = stmmac_rx_dirty(priv, queue);
4859 unsigned int entry = rx_q->dirty_rx;
4860 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4861
4862 if (priv->dma_cap.host_dma_width <= 32)
4863 gfp |= GFP_DMA32;
4864
4865 while (dirty-- > 0) {
4866 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4867 struct dma_desc *p;
4868 bool use_rx_wd;
4869
4870 if (priv->extend_desc)
4871 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4872 else
4873 p = rx_q->dma_rx + entry;
4874
4875 if (!buf->page) {
4876 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4877 if (!buf->page)
4878 break;
4879 }
4880
4881 if (priv->sph && !buf->sec_page) {
4882 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4883 if (!buf->sec_page)
4884 break;
4885
4886 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4887 }
4888
4889 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4890
4891 stmmac_set_desc_addr(priv, p, buf->addr);
4892 if (priv->sph)
4893 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4894 else
4895 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4896 stmmac_refill_desc3(priv, rx_q, p);
4897
4898 rx_q->rx_count_frames++;
4899 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4900 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4901 rx_q->rx_count_frames = 0;
4902
4903 use_rx_wd = !priv->rx_coal_frames[queue];
4904 use_rx_wd |= rx_q->rx_count_frames > 0;
4905 if (!priv->use_riwt)
4906 use_rx_wd = false;
4907
4908 dma_wmb();
4909 stmmac_set_rx_owner(priv, p, use_rx_wd);
4910
4911 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4912 }
4913 rx_q->dirty_rx = entry;
4914 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4915 (rx_q->dirty_rx * sizeof(struct dma_desc));
4916 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4917 }
4918
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4919 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4920 struct dma_desc *p,
4921 int status, unsigned int len)
4922 {
4923 unsigned int plen = 0, hlen = 0;
4924 int coe = priv->hw->rx_csum;
4925
4926 /* Not first descriptor, buffer is always zero */
4927 if (priv->sph && len)
4928 return 0;
4929
4930 /* First descriptor, get split header length */
4931 stmmac_get_rx_header_len(priv, p, &hlen);
4932 if (priv->sph && hlen) {
4933 priv->xstats.rx_split_hdr_pkt_n++;
4934 return hlen;
4935 }
4936
4937 /* First descriptor, not last descriptor and not split header */
4938 if (status & rx_not_ls)
4939 return priv->dma_conf.dma_buf_sz;
4940
4941 plen = stmmac_get_rx_frame_len(priv, p, coe);
4942
4943 /* First descriptor and last descriptor and not split header */
4944 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4945 }
4946
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4947 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4948 struct dma_desc *p,
4949 int status, unsigned int len)
4950 {
4951 int coe = priv->hw->rx_csum;
4952 unsigned int plen = 0;
4953
4954 /* Not split header, buffer is not available */
4955 if (!priv->sph)
4956 return 0;
4957
4958 /* Not last descriptor */
4959 if (status & rx_not_ls)
4960 return priv->dma_conf.dma_buf_sz;
4961
4962 plen = stmmac_get_rx_frame_len(priv, p, coe);
4963
4964 /* Last descriptor */
4965 return plen - len;
4966 }
4967
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4968 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4969 struct xdp_frame *xdpf, bool dma_map)
4970 {
4971 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4972 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4973 unsigned int entry = tx_q->cur_tx;
4974 struct dma_desc *tx_desc;
4975 dma_addr_t dma_addr;
4976 bool set_ic;
4977
4978 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4979 return STMMAC_XDP_CONSUMED;
4980
4981 if (priv->est && priv->est->enable &&
4982 priv->est->max_sdu[queue] &&
4983 xdpf->len > priv->est->max_sdu[queue]) {
4984 priv->xstats.max_sdu_txq_drop[queue]++;
4985 return STMMAC_XDP_CONSUMED;
4986 }
4987
4988 if (likely(priv->extend_desc))
4989 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4990 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4991 tx_desc = &tx_q->dma_entx[entry].basic;
4992 else
4993 tx_desc = tx_q->dma_tx + entry;
4994
4995 if (dma_map) {
4996 dma_addr = dma_map_single(priv->device, xdpf->data,
4997 xdpf->len, DMA_TO_DEVICE);
4998 if (dma_mapping_error(priv->device, dma_addr))
4999 return STMMAC_XDP_CONSUMED;
5000
5001 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5002 } else {
5003 struct page *page = virt_to_page(xdpf->data);
5004
5005 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5006 xdpf->headroom;
5007 dma_sync_single_for_device(priv->device, dma_addr,
5008 xdpf->len, DMA_BIDIRECTIONAL);
5009
5010 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5011 }
5012
5013 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5014 tx_q->tx_skbuff_dma[entry].map_as_page = false;
5015 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5016 tx_q->tx_skbuff_dma[entry].last_segment = true;
5017 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5018
5019 tx_q->xdpf[entry] = xdpf;
5020
5021 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5022
5023 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5024 true, priv->mode, true, true,
5025 xdpf->len);
5026
5027 tx_q->tx_count_frames++;
5028
5029 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5030 set_ic = true;
5031 else
5032 set_ic = false;
5033
5034 if (set_ic) {
5035 tx_q->tx_count_frames = 0;
5036 stmmac_set_tx_ic(priv, tx_desc);
5037 u64_stats_update_begin(&txq_stats->q_syncp);
5038 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5039 u64_stats_update_end(&txq_stats->q_syncp);
5040 }
5041
5042 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5043
5044 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5045 tx_q->cur_tx = entry;
5046
5047 return STMMAC_XDP_TX;
5048 }
5049
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5050 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5051 int cpu)
5052 {
5053 int index = cpu;
5054
5055 if (unlikely(index < 0))
5056 index = 0;
5057
5058 while (index >= priv->plat->tx_queues_to_use)
5059 index -= priv->plat->tx_queues_to_use;
5060
5061 return index;
5062 }
5063
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5064 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5065 struct xdp_buff *xdp)
5066 {
5067 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5068 int cpu = smp_processor_id();
5069 struct netdev_queue *nq;
5070 int queue;
5071 int res;
5072
5073 if (unlikely(!xdpf))
5074 return STMMAC_XDP_CONSUMED;
5075
5076 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5077 nq = netdev_get_tx_queue(priv->dev, queue);
5078
5079 __netif_tx_lock(nq, cpu);
5080 /* Avoids TX time-out as we are sharing with slow path */
5081 txq_trans_cond_update(nq);
5082
5083 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5084 if (res == STMMAC_XDP_TX)
5085 stmmac_flush_tx_descriptors(priv, queue);
5086
5087 __netif_tx_unlock(nq);
5088
5089 return res;
5090 }
5091
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5092 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5093 struct bpf_prog *prog,
5094 struct xdp_buff *xdp)
5095 {
5096 u32 act;
5097 int res;
5098
5099 act = bpf_prog_run_xdp(prog, xdp);
5100 switch (act) {
5101 case XDP_PASS:
5102 res = STMMAC_XDP_PASS;
5103 break;
5104 case XDP_TX:
5105 res = stmmac_xdp_xmit_back(priv, xdp);
5106 break;
5107 case XDP_REDIRECT:
5108 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5109 res = STMMAC_XDP_CONSUMED;
5110 else
5111 res = STMMAC_XDP_REDIRECT;
5112 break;
5113 default:
5114 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5115 fallthrough;
5116 case XDP_ABORTED:
5117 trace_xdp_exception(priv->dev, prog, act);
5118 fallthrough;
5119 case XDP_DROP:
5120 res = STMMAC_XDP_CONSUMED;
5121 break;
5122 }
5123
5124 return res;
5125 }
5126
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5127 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5128 struct xdp_buff *xdp)
5129 {
5130 struct bpf_prog *prog;
5131 int res;
5132
5133 prog = READ_ONCE(priv->xdp_prog);
5134 if (!prog) {
5135 res = STMMAC_XDP_PASS;
5136 goto out;
5137 }
5138
5139 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5140 out:
5141 return ERR_PTR(-res);
5142 }
5143
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5144 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5145 int xdp_status)
5146 {
5147 int cpu = smp_processor_id();
5148 int queue;
5149
5150 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5151
5152 if (xdp_status & STMMAC_XDP_TX)
5153 stmmac_tx_timer_arm(priv, queue);
5154
5155 if (xdp_status & STMMAC_XDP_REDIRECT)
5156 xdp_do_flush();
5157 }
5158
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5159 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5160 struct xdp_buff *xdp)
5161 {
5162 unsigned int metasize = xdp->data - xdp->data_meta;
5163 unsigned int datasize = xdp->data_end - xdp->data;
5164 struct sk_buff *skb;
5165
5166 skb = napi_alloc_skb(&ch->rxtx_napi,
5167 xdp->data_end - xdp->data_hard_start);
5168 if (unlikely(!skb))
5169 return NULL;
5170
5171 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5172 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5173 if (metasize)
5174 skb_metadata_set(skb, metasize);
5175
5176 return skb;
5177 }
5178
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5179 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5180 struct dma_desc *p, struct dma_desc *np,
5181 struct xdp_buff *xdp)
5182 {
5183 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5184 struct stmmac_channel *ch = &priv->channel[queue];
5185 unsigned int len = xdp->data_end - xdp->data;
5186 enum pkt_hash_types hash_type;
5187 int coe = priv->hw->rx_csum;
5188 struct sk_buff *skb;
5189 u32 hash;
5190
5191 skb = stmmac_construct_skb_zc(ch, xdp);
5192 if (!skb) {
5193 priv->xstats.rx_dropped++;
5194 return;
5195 }
5196
5197 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5198 if (priv->hw->hw_vlan_en)
5199 /* MAC level stripping. */
5200 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5201 else
5202 /* Driver level stripping. */
5203 stmmac_rx_vlan(priv->dev, skb);
5204 skb->protocol = eth_type_trans(skb, priv->dev);
5205
5206 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5207 skb_checksum_none_assert(skb);
5208 else
5209 skb->ip_summed = CHECKSUM_UNNECESSARY;
5210
5211 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5212 skb_set_hash(skb, hash, hash_type);
5213
5214 skb_record_rx_queue(skb, queue);
5215 napi_gro_receive(&ch->rxtx_napi, skb);
5216
5217 u64_stats_update_begin(&rxq_stats->napi_syncp);
5218 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5219 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5220 u64_stats_update_end(&rxq_stats->napi_syncp);
5221 }
5222
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5223 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5224 {
5225 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5226 unsigned int entry = rx_q->dirty_rx;
5227 struct dma_desc *rx_desc = NULL;
5228 bool ret = true;
5229
5230 budget = min(budget, stmmac_rx_dirty(priv, queue));
5231
5232 while (budget-- > 0 && entry != rx_q->cur_rx) {
5233 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5234 dma_addr_t dma_addr;
5235 bool use_rx_wd;
5236
5237 if (!buf->xdp) {
5238 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5239 if (!buf->xdp) {
5240 ret = false;
5241 break;
5242 }
5243 }
5244
5245 if (priv->extend_desc)
5246 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5247 else
5248 rx_desc = rx_q->dma_rx + entry;
5249
5250 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5251 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5252 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5253 stmmac_refill_desc3(priv, rx_q, rx_desc);
5254
5255 rx_q->rx_count_frames++;
5256 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5257 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5258 rx_q->rx_count_frames = 0;
5259
5260 use_rx_wd = !priv->rx_coal_frames[queue];
5261 use_rx_wd |= rx_q->rx_count_frames > 0;
5262 if (!priv->use_riwt)
5263 use_rx_wd = false;
5264
5265 dma_wmb();
5266 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5267
5268 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5269 }
5270
5271 if (rx_desc) {
5272 rx_q->dirty_rx = entry;
5273 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5274 (rx_q->dirty_rx * sizeof(struct dma_desc));
5275 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5276 }
5277
5278 return ret;
5279 }
5280
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5281 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5282 {
5283 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5284 * to represent incoming packet, whereas cb field in the same structure
5285 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5286 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5287 */
5288 return (struct stmmac_xdp_buff *)xdp;
5289 }
5290
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5291 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5292 {
5293 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5294 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5295 unsigned int count = 0, error = 0, len = 0;
5296 int dirty = stmmac_rx_dirty(priv, queue);
5297 unsigned int next_entry = rx_q->cur_rx;
5298 u32 rx_errors = 0, rx_dropped = 0;
5299 unsigned int desc_size;
5300 struct bpf_prog *prog;
5301 bool failure = false;
5302 int xdp_status = 0;
5303 int status = 0;
5304
5305 if (netif_msg_rx_status(priv)) {
5306 void *rx_head;
5307
5308 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5309 if (priv->extend_desc) {
5310 rx_head = (void *)rx_q->dma_erx;
5311 desc_size = sizeof(struct dma_extended_desc);
5312 } else {
5313 rx_head = (void *)rx_q->dma_rx;
5314 desc_size = sizeof(struct dma_desc);
5315 }
5316
5317 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5318 rx_q->dma_rx_phy, desc_size);
5319 }
5320 while (count < limit) {
5321 struct stmmac_rx_buffer *buf;
5322 struct stmmac_xdp_buff *ctx;
5323 unsigned int buf1_len = 0;
5324 struct dma_desc *np, *p;
5325 int entry;
5326 int res;
5327
5328 if (!count && rx_q->state_saved) {
5329 error = rx_q->state.error;
5330 len = rx_q->state.len;
5331 } else {
5332 rx_q->state_saved = false;
5333 error = 0;
5334 len = 0;
5335 }
5336
5337 if (count >= limit)
5338 break;
5339
5340 read_again:
5341 buf1_len = 0;
5342 entry = next_entry;
5343 buf = &rx_q->buf_pool[entry];
5344
5345 if (dirty >= STMMAC_RX_FILL_BATCH) {
5346 failure = failure ||
5347 !stmmac_rx_refill_zc(priv, queue, dirty);
5348 dirty = 0;
5349 }
5350
5351 if (priv->extend_desc)
5352 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5353 else
5354 p = rx_q->dma_rx + entry;
5355
5356 /* read the status of the incoming frame */
5357 status = stmmac_rx_status(priv, &priv->xstats, p);
5358 /* check if managed by the DMA otherwise go ahead */
5359 if (unlikely(status & dma_own))
5360 break;
5361
5362 /* Prefetch the next RX descriptor */
5363 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5364 priv->dma_conf.dma_rx_size);
5365 next_entry = rx_q->cur_rx;
5366
5367 if (priv->extend_desc)
5368 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5369 else
5370 np = rx_q->dma_rx + next_entry;
5371
5372 prefetch(np);
5373
5374 /* Ensure a valid XSK buffer before proceed */
5375 if (!buf->xdp)
5376 break;
5377
5378 if (priv->extend_desc)
5379 stmmac_rx_extended_status(priv, &priv->xstats,
5380 rx_q->dma_erx + entry);
5381 if (unlikely(status == discard_frame)) {
5382 xsk_buff_free(buf->xdp);
5383 buf->xdp = NULL;
5384 dirty++;
5385 error = 1;
5386 if (!priv->hwts_rx_en)
5387 rx_errors++;
5388 }
5389
5390 if (unlikely(error && (status & rx_not_ls)))
5391 goto read_again;
5392 if (unlikely(error)) {
5393 count++;
5394 continue;
5395 }
5396
5397 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5398 if (likely(status & rx_not_ls)) {
5399 xsk_buff_free(buf->xdp);
5400 buf->xdp = NULL;
5401 dirty++;
5402 count++;
5403 goto read_again;
5404 }
5405
5406 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5407 ctx->priv = priv;
5408 ctx->desc = p;
5409 ctx->ndesc = np;
5410
5411 /* XDP ZC Frame only support primary buffers for now */
5412 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5413 len += buf1_len;
5414
5415 /* ACS is disabled; strip manually. */
5416 if (likely(!(status & rx_not_ls))) {
5417 buf1_len -= ETH_FCS_LEN;
5418 len -= ETH_FCS_LEN;
5419 }
5420
5421 /* RX buffer is good and fit into a XSK pool buffer */
5422 buf->xdp->data_end = buf->xdp->data + buf1_len;
5423 xsk_buff_dma_sync_for_cpu(buf->xdp);
5424
5425 prog = READ_ONCE(priv->xdp_prog);
5426 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5427
5428 switch (res) {
5429 case STMMAC_XDP_PASS:
5430 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5431 xsk_buff_free(buf->xdp);
5432 break;
5433 case STMMAC_XDP_CONSUMED:
5434 xsk_buff_free(buf->xdp);
5435 rx_dropped++;
5436 break;
5437 case STMMAC_XDP_TX:
5438 case STMMAC_XDP_REDIRECT:
5439 xdp_status |= res;
5440 break;
5441 }
5442
5443 buf->xdp = NULL;
5444 dirty++;
5445 count++;
5446 }
5447
5448 if (status & rx_not_ls) {
5449 rx_q->state_saved = true;
5450 rx_q->state.error = error;
5451 rx_q->state.len = len;
5452 }
5453
5454 stmmac_finalize_xdp_rx(priv, xdp_status);
5455
5456 u64_stats_update_begin(&rxq_stats->napi_syncp);
5457 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5458 u64_stats_update_end(&rxq_stats->napi_syncp);
5459
5460 priv->xstats.rx_dropped += rx_dropped;
5461 priv->xstats.rx_errors += rx_errors;
5462
5463 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5464 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5465 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5466 else
5467 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5468
5469 return (int)count;
5470 }
5471
5472 return failure ? limit : (int)count;
5473 }
5474
5475 /**
5476 * stmmac_rx - manage the receive process
5477 * @priv: driver private structure
5478 * @limit: napi bugget
5479 * @queue: RX queue index.
5480 * Description : this the function called by the napi poll method.
5481 * It gets all the frames inside the ring.
5482 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5483 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5484 {
5485 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5486 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5487 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5488 struct stmmac_channel *ch = &priv->channel[queue];
5489 unsigned int count = 0, error = 0, len = 0;
5490 int status = 0, coe = priv->hw->rx_csum;
5491 unsigned int next_entry = rx_q->cur_rx;
5492 enum dma_data_direction dma_dir;
5493 unsigned int desc_size;
5494 struct sk_buff *skb = NULL;
5495 struct stmmac_xdp_buff ctx;
5496 int xdp_status = 0;
5497 int bufsz;
5498
5499 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5500 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5501 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5502
5503 if (netif_msg_rx_status(priv)) {
5504 void *rx_head;
5505
5506 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5507 if (priv->extend_desc) {
5508 rx_head = (void *)rx_q->dma_erx;
5509 desc_size = sizeof(struct dma_extended_desc);
5510 } else {
5511 rx_head = (void *)rx_q->dma_rx;
5512 desc_size = sizeof(struct dma_desc);
5513 }
5514
5515 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5516 rx_q->dma_rx_phy, desc_size);
5517 }
5518 while (count < limit) {
5519 unsigned int buf1_len = 0, buf2_len = 0;
5520 enum pkt_hash_types hash_type;
5521 struct stmmac_rx_buffer *buf;
5522 struct dma_desc *np, *p;
5523 int entry;
5524 u32 hash;
5525
5526 if (!count && rx_q->state_saved) {
5527 skb = rx_q->state.skb;
5528 error = rx_q->state.error;
5529 len = rx_q->state.len;
5530 } else {
5531 rx_q->state_saved = false;
5532 skb = NULL;
5533 error = 0;
5534 len = 0;
5535 }
5536
5537 read_again:
5538 if (count >= limit)
5539 break;
5540
5541 buf1_len = 0;
5542 buf2_len = 0;
5543 entry = next_entry;
5544 buf = &rx_q->buf_pool[entry];
5545
5546 if (priv->extend_desc)
5547 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5548 else
5549 p = rx_q->dma_rx + entry;
5550
5551 /* read the status of the incoming frame */
5552 status = stmmac_rx_status(priv, &priv->xstats, p);
5553 /* check if managed by the DMA otherwise go ahead */
5554 if (unlikely(status & dma_own))
5555 break;
5556
5557 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5558 priv->dma_conf.dma_rx_size);
5559 next_entry = rx_q->cur_rx;
5560
5561 if (priv->extend_desc)
5562 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5563 else
5564 np = rx_q->dma_rx + next_entry;
5565
5566 prefetch(np);
5567
5568 if (priv->extend_desc)
5569 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5570 if (unlikely(status == discard_frame)) {
5571 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5572 buf->page = NULL;
5573 error = 1;
5574 if (!priv->hwts_rx_en)
5575 rx_errors++;
5576 }
5577
5578 if (unlikely(error && (status & rx_not_ls)))
5579 goto read_again;
5580 if (unlikely(error)) {
5581 dev_kfree_skb(skb);
5582 skb = NULL;
5583 count++;
5584 continue;
5585 }
5586
5587 /* Buffer is good. Go on. */
5588
5589 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5590 len += buf1_len;
5591 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5592 len += buf2_len;
5593
5594 /* ACS is disabled; strip manually. */
5595 if (likely(!(status & rx_not_ls))) {
5596 if (buf2_len) {
5597 buf2_len -= ETH_FCS_LEN;
5598 len -= ETH_FCS_LEN;
5599 } else if (buf1_len) {
5600 buf1_len -= ETH_FCS_LEN;
5601 len -= ETH_FCS_LEN;
5602 }
5603 }
5604
5605 if (!skb) {
5606 unsigned int pre_len, sync_len;
5607
5608 dma_sync_single_for_cpu(priv->device, buf->addr,
5609 buf1_len, dma_dir);
5610 net_prefetch(page_address(buf->page) +
5611 buf->page_offset);
5612
5613 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5614 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5615 buf->page_offset, buf1_len, true);
5616
5617 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5618 buf->page_offset;
5619
5620 ctx.priv = priv;
5621 ctx.desc = p;
5622 ctx.ndesc = np;
5623
5624 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5625 /* Due xdp_adjust_tail: DMA sync for_device
5626 * cover max len CPU touch
5627 */
5628 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5629 buf->page_offset;
5630 sync_len = max(sync_len, pre_len);
5631
5632 /* For Not XDP_PASS verdict */
5633 if (IS_ERR(skb)) {
5634 unsigned int xdp_res = -PTR_ERR(skb);
5635
5636 if (xdp_res & STMMAC_XDP_CONSUMED) {
5637 page_pool_put_page(rx_q->page_pool,
5638 virt_to_head_page(ctx.xdp.data),
5639 sync_len, true);
5640 buf->page = NULL;
5641 rx_dropped++;
5642
5643 /* Clear skb as it was set as
5644 * status by XDP program.
5645 */
5646 skb = NULL;
5647
5648 if (unlikely((status & rx_not_ls)))
5649 goto read_again;
5650
5651 count++;
5652 continue;
5653 } else if (xdp_res & (STMMAC_XDP_TX |
5654 STMMAC_XDP_REDIRECT)) {
5655 xdp_status |= xdp_res;
5656 buf->page = NULL;
5657 skb = NULL;
5658 count++;
5659 continue;
5660 }
5661 }
5662 }
5663
5664 if (!skb) {
5665 unsigned int head_pad_len;
5666
5667 /* XDP program may expand or reduce tail */
5668 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5669
5670 skb = napi_build_skb(page_address(buf->page),
5671 rx_q->napi_skb_frag_size);
5672 if (!skb) {
5673 page_pool_recycle_direct(rx_q->page_pool,
5674 buf->page);
5675 rx_dropped++;
5676 count++;
5677 goto drain_data;
5678 }
5679
5680 /* XDP program may adjust header */
5681 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5682 skb_reserve(skb, head_pad_len);
5683 skb_put(skb, buf1_len);
5684 skb_mark_for_recycle(skb);
5685 buf->page = NULL;
5686 } else if (buf1_len) {
5687 dma_sync_single_for_cpu(priv->device, buf->addr,
5688 buf1_len, dma_dir);
5689 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5690 buf->page, buf->page_offset, buf1_len,
5691 priv->dma_conf.dma_buf_sz);
5692 buf->page = NULL;
5693 }
5694
5695 if (buf2_len) {
5696 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5697 buf2_len, dma_dir);
5698 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5699 buf->sec_page, 0, buf2_len,
5700 priv->dma_conf.dma_buf_sz);
5701 buf->sec_page = NULL;
5702 }
5703
5704 drain_data:
5705 if (likely(status & rx_not_ls))
5706 goto read_again;
5707 if (!skb)
5708 continue;
5709
5710 /* Got entire packet into SKB. Finish it. */
5711
5712 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5713
5714 if (priv->hw->hw_vlan_en)
5715 /* MAC level stripping. */
5716 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5717 else
5718 /* Driver level stripping. */
5719 stmmac_rx_vlan(priv->dev, skb);
5720
5721 skb->protocol = eth_type_trans(skb, priv->dev);
5722
5723 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5724 skb_checksum_none_assert(skb);
5725 else
5726 skb->ip_summed = CHECKSUM_UNNECESSARY;
5727
5728 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5729 skb_set_hash(skb, hash, hash_type);
5730
5731 skb_record_rx_queue(skb, queue);
5732 napi_gro_receive(&ch->rx_napi, skb);
5733 skb = NULL;
5734
5735 rx_packets++;
5736 rx_bytes += len;
5737 count++;
5738 }
5739
5740 if (status & rx_not_ls || skb) {
5741 rx_q->state_saved = true;
5742 rx_q->state.skb = skb;
5743 rx_q->state.error = error;
5744 rx_q->state.len = len;
5745 }
5746
5747 stmmac_finalize_xdp_rx(priv, xdp_status);
5748
5749 stmmac_rx_refill(priv, queue);
5750
5751 u64_stats_update_begin(&rxq_stats->napi_syncp);
5752 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5753 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5754 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5755 u64_stats_update_end(&rxq_stats->napi_syncp);
5756
5757 priv->xstats.rx_dropped += rx_dropped;
5758 priv->xstats.rx_errors += rx_errors;
5759
5760 return count;
5761 }
5762
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5763 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5764 {
5765 struct stmmac_channel *ch =
5766 container_of(napi, struct stmmac_channel, rx_napi);
5767 struct stmmac_priv *priv = ch->priv_data;
5768 struct stmmac_rxq_stats *rxq_stats;
5769 u32 chan = ch->index;
5770 int work_done;
5771
5772 rxq_stats = &priv->xstats.rxq_stats[chan];
5773 u64_stats_update_begin(&rxq_stats->napi_syncp);
5774 u64_stats_inc(&rxq_stats->napi.poll);
5775 u64_stats_update_end(&rxq_stats->napi_syncp);
5776
5777 work_done = stmmac_rx(priv, budget, chan);
5778 if (work_done < budget && napi_complete_done(napi, work_done)) {
5779 unsigned long flags;
5780
5781 spin_lock_irqsave(&ch->lock, flags);
5782 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5783 spin_unlock_irqrestore(&ch->lock, flags);
5784 }
5785
5786 return work_done;
5787 }
5788
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5789 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5790 {
5791 struct stmmac_channel *ch =
5792 container_of(napi, struct stmmac_channel, tx_napi);
5793 struct stmmac_priv *priv = ch->priv_data;
5794 struct stmmac_txq_stats *txq_stats;
5795 bool pending_packets = false;
5796 u32 chan = ch->index;
5797 int work_done;
5798
5799 txq_stats = &priv->xstats.txq_stats[chan];
5800 u64_stats_update_begin(&txq_stats->napi_syncp);
5801 u64_stats_inc(&txq_stats->napi.poll);
5802 u64_stats_update_end(&txq_stats->napi_syncp);
5803
5804 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5805 work_done = min(work_done, budget);
5806
5807 if (work_done < budget && napi_complete_done(napi, work_done)) {
5808 unsigned long flags;
5809
5810 spin_lock_irqsave(&ch->lock, flags);
5811 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5812 spin_unlock_irqrestore(&ch->lock, flags);
5813 }
5814
5815 /* TX still have packet to handle, check if we need to arm tx timer */
5816 if (pending_packets)
5817 stmmac_tx_timer_arm(priv, chan);
5818
5819 return work_done;
5820 }
5821
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5822 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5823 {
5824 struct stmmac_channel *ch =
5825 container_of(napi, struct stmmac_channel, rxtx_napi);
5826 struct stmmac_priv *priv = ch->priv_data;
5827 bool tx_pending_packets = false;
5828 int rx_done, tx_done, rxtx_done;
5829 struct stmmac_rxq_stats *rxq_stats;
5830 struct stmmac_txq_stats *txq_stats;
5831 u32 chan = ch->index;
5832
5833 rxq_stats = &priv->xstats.rxq_stats[chan];
5834 u64_stats_update_begin(&rxq_stats->napi_syncp);
5835 u64_stats_inc(&rxq_stats->napi.poll);
5836 u64_stats_update_end(&rxq_stats->napi_syncp);
5837
5838 txq_stats = &priv->xstats.txq_stats[chan];
5839 u64_stats_update_begin(&txq_stats->napi_syncp);
5840 u64_stats_inc(&txq_stats->napi.poll);
5841 u64_stats_update_end(&txq_stats->napi_syncp);
5842
5843 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5844 tx_done = min(tx_done, budget);
5845
5846 rx_done = stmmac_rx_zc(priv, budget, chan);
5847
5848 rxtx_done = max(tx_done, rx_done);
5849
5850 /* If either TX or RX work is not complete, return budget
5851 * and keep pooling
5852 */
5853 if (rxtx_done >= budget)
5854 return budget;
5855
5856 /* all work done, exit the polling mode */
5857 if (napi_complete_done(napi, rxtx_done)) {
5858 unsigned long flags;
5859
5860 spin_lock_irqsave(&ch->lock, flags);
5861 /* Both RX and TX work done are compelte,
5862 * so enable both RX & TX IRQs.
5863 */
5864 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5865 spin_unlock_irqrestore(&ch->lock, flags);
5866 }
5867
5868 /* TX still have packet to handle, check if we need to arm tx timer */
5869 if (tx_pending_packets)
5870 stmmac_tx_timer_arm(priv, chan);
5871
5872 return min(rxtx_done, budget - 1);
5873 }
5874
5875 /**
5876 * stmmac_tx_timeout
5877 * @dev : Pointer to net device structure
5878 * @txqueue: the index of the hanging transmit queue
5879 * Description: this function is called when a packet transmission fails to
5880 * complete within a reasonable time. The driver will mark the error in the
5881 * netdev structure and arrange for the device to be reset to a sane state
5882 * in order to transmit a new packet.
5883 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5884 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5885 {
5886 struct stmmac_priv *priv = netdev_priv(dev);
5887
5888 stmmac_global_err(priv);
5889 }
5890
5891 /**
5892 * stmmac_set_rx_mode - entry point for multicast addressing
5893 * @dev : pointer to the device structure
5894 * Description:
5895 * This function is a driver entry point which gets called by the kernel
5896 * whenever multicast addresses must be enabled/disabled.
5897 * Return value:
5898 * void.
5899 *
5900 * FIXME: This may need RXC to be running, but it may be called with BH
5901 * disabled, which means we can't call phylink_rx_clk_stop*().
5902 */
stmmac_set_rx_mode(struct net_device * dev)5903 static void stmmac_set_rx_mode(struct net_device *dev)
5904 {
5905 struct stmmac_priv *priv = netdev_priv(dev);
5906
5907 stmmac_set_filter(priv, priv->hw, dev);
5908 }
5909
5910 /**
5911 * stmmac_change_mtu - entry point to change MTU size for the device.
5912 * @dev : device pointer.
5913 * @new_mtu : the new MTU size for the device.
5914 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5915 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5916 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5917 * Return value:
5918 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5919 * file on failure.
5920 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5921 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5922 {
5923 struct stmmac_priv *priv = netdev_priv(dev);
5924 int txfifosz = priv->plat->tx_fifo_size;
5925 struct stmmac_dma_conf *dma_conf;
5926 const int mtu = new_mtu;
5927 int ret;
5928
5929 if (txfifosz == 0)
5930 txfifosz = priv->dma_cap.tx_fifo_size;
5931
5932 txfifosz /= priv->plat->tx_queues_to_use;
5933
5934 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5935 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5936 return -EINVAL;
5937 }
5938
5939 new_mtu = STMMAC_ALIGN(new_mtu);
5940
5941 /* If condition true, FIFO is too small or MTU too large */
5942 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5943 return -EINVAL;
5944
5945 if (netif_running(dev)) {
5946 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5947 /* Try to allocate the new DMA conf with the new mtu */
5948 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5949 if (IS_ERR(dma_conf)) {
5950 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5951 mtu);
5952 return PTR_ERR(dma_conf);
5953 }
5954
5955 stmmac_release(dev);
5956
5957 ret = __stmmac_open(dev, dma_conf);
5958 if (ret) {
5959 free_dma_desc_resources(priv, dma_conf);
5960 kfree(dma_conf);
5961 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5962 return ret;
5963 }
5964
5965 kfree(dma_conf);
5966
5967 stmmac_set_rx_mode(dev);
5968 }
5969
5970 WRITE_ONCE(dev->mtu, mtu);
5971 netdev_update_features(dev);
5972
5973 return 0;
5974 }
5975
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5976 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5977 netdev_features_t features)
5978 {
5979 struct stmmac_priv *priv = netdev_priv(dev);
5980
5981 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5982 features &= ~NETIF_F_RXCSUM;
5983
5984 if (!priv->plat->tx_coe)
5985 features &= ~NETIF_F_CSUM_MASK;
5986
5987 /* Some GMAC devices have a bugged Jumbo frame support that
5988 * needs to have the Tx COE disabled for oversized frames
5989 * (due to limited buffer sizes). In this case we disable
5990 * the TX csum insertion in the TDES and not use SF.
5991 */
5992 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5993 features &= ~NETIF_F_CSUM_MASK;
5994
5995 /* Disable tso if asked by ethtool */
5996 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5997 if (features & NETIF_F_TSO)
5998 priv->tso = true;
5999 else
6000 priv->tso = false;
6001 }
6002
6003 return features;
6004 }
6005
stmmac_set_features(struct net_device * netdev,netdev_features_t features)6006 static int stmmac_set_features(struct net_device *netdev,
6007 netdev_features_t features)
6008 {
6009 struct stmmac_priv *priv = netdev_priv(netdev);
6010
6011 /* Keep the COE Type in case of csum is supporting */
6012 if (features & NETIF_F_RXCSUM)
6013 priv->hw->rx_csum = priv->plat->rx_coe;
6014 else
6015 priv->hw->rx_csum = 0;
6016 /* No check needed because rx_coe has been set before and it will be
6017 * fixed in case of issue.
6018 */
6019 stmmac_rx_ipc(priv, priv->hw);
6020
6021 if (priv->sph_cap) {
6022 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6023 u32 chan;
6024
6025 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6026 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6027 }
6028
6029 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6030 priv->hw->hw_vlan_en = true;
6031 else
6032 priv->hw->hw_vlan_en = false;
6033
6034 phylink_rx_clk_stop_block(priv->phylink);
6035 stmmac_set_hw_vlan_mode(priv, priv->hw);
6036 phylink_rx_clk_stop_unblock(priv->phylink);
6037
6038 return 0;
6039 }
6040
stmmac_common_interrupt(struct stmmac_priv * priv)6041 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6042 {
6043 u32 rx_cnt = priv->plat->rx_queues_to_use;
6044 u32 tx_cnt = priv->plat->tx_queues_to_use;
6045 u32 queues_count;
6046 u32 queue;
6047 bool xmac;
6048
6049 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6050 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6051
6052 if (priv->irq_wake)
6053 pm_wakeup_event(priv->device, 0);
6054
6055 if (priv->dma_cap.estsel)
6056 stmmac_est_irq_status(priv, priv, priv->dev,
6057 &priv->xstats, tx_cnt);
6058
6059 if (stmmac_fpe_supported(priv))
6060 stmmac_fpe_irq_status(priv);
6061
6062 /* To handle GMAC own interrupts */
6063 if ((priv->plat->has_gmac) || xmac) {
6064 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6065
6066 if (unlikely(status)) {
6067 /* For LPI we need to save the tx status */
6068 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6069 priv->tx_path_in_lpi_mode = true;
6070 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6071 priv->tx_path_in_lpi_mode = false;
6072 }
6073
6074 for (queue = 0; queue < queues_count; queue++)
6075 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6076
6077 /* PCS link status */
6078 if (priv->hw->pcs &&
6079 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6080 if (priv->xstats.pcs_link)
6081 netif_carrier_on(priv->dev);
6082 else
6083 netif_carrier_off(priv->dev);
6084 }
6085
6086 stmmac_timestamp_interrupt(priv, priv);
6087 }
6088 }
6089
6090 /**
6091 * stmmac_interrupt - main ISR
6092 * @irq: interrupt number.
6093 * @dev_id: to pass the net device pointer.
6094 * Description: this is the main driver interrupt service routine.
6095 * It can call:
6096 * o DMA service routine (to manage incoming frame reception and transmission
6097 * status)
6098 * o Core interrupts to manage: remote wake-up, management counter, LPI
6099 * interrupts.
6100 */
stmmac_interrupt(int irq,void * dev_id)6101 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6102 {
6103 struct net_device *dev = (struct net_device *)dev_id;
6104 struct stmmac_priv *priv = netdev_priv(dev);
6105
6106 /* Check if adapter is up */
6107 if (test_bit(STMMAC_DOWN, &priv->state))
6108 return IRQ_HANDLED;
6109
6110 /* Check ASP error if it isn't delivered via an individual IRQ */
6111 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6112 return IRQ_HANDLED;
6113
6114 /* To handle Common interrupts */
6115 stmmac_common_interrupt(priv);
6116
6117 /* To handle DMA interrupts */
6118 stmmac_dma_interrupt(priv);
6119
6120 return IRQ_HANDLED;
6121 }
6122
stmmac_mac_interrupt(int irq,void * dev_id)6123 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6124 {
6125 struct net_device *dev = (struct net_device *)dev_id;
6126 struct stmmac_priv *priv = netdev_priv(dev);
6127
6128 /* Check if adapter is up */
6129 if (test_bit(STMMAC_DOWN, &priv->state))
6130 return IRQ_HANDLED;
6131
6132 /* To handle Common interrupts */
6133 stmmac_common_interrupt(priv);
6134
6135 return IRQ_HANDLED;
6136 }
6137
stmmac_safety_interrupt(int irq,void * dev_id)6138 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6139 {
6140 struct net_device *dev = (struct net_device *)dev_id;
6141 struct stmmac_priv *priv = netdev_priv(dev);
6142
6143 /* Check if adapter is up */
6144 if (test_bit(STMMAC_DOWN, &priv->state))
6145 return IRQ_HANDLED;
6146
6147 /* Check if a fatal error happened */
6148 stmmac_safety_feat_interrupt(priv);
6149
6150 return IRQ_HANDLED;
6151 }
6152
stmmac_msi_intr_tx(int irq,void * data)6153 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6154 {
6155 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6156 struct stmmac_dma_conf *dma_conf;
6157 int chan = tx_q->queue_index;
6158 struct stmmac_priv *priv;
6159 int status;
6160
6161 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6162 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6163
6164 /* Check if adapter is up */
6165 if (test_bit(STMMAC_DOWN, &priv->state))
6166 return IRQ_HANDLED;
6167
6168 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6169
6170 if (unlikely(status & tx_hard_error_bump_tc)) {
6171 /* Try to bump up the dma threshold on this failure */
6172 stmmac_bump_dma_threshold(priv, chan);
6173 } else if (unlikely(status == tx_hard_error)) {
6174 stmmac_tx_err(priv, chan);
6175 }
6176
6177 return IRQ_HANDLED;
6178 }
6179
stmmac_msi_intr_rx(int irq,void * data)6180 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6181 {
6182 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6183 struct stmmac_dma_conf *dma_conf;
6184 int chan = rx_q->queue_index;
6185 struct stmmac_priv *priv;
6186
6187 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6188 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6189
6190 /* Check if adapter is up */
6191 if (test_bit(STMMAC_DOWN, &priv->state))
6192 return IRQ_HANDLED;
6193
6194 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6195
6196 return IRQ_HANDLED;
6197 }
6198
6199 /**
6200 * stmmac_ioctl - Entry point for the Ioctl
6201 * @dev: Device pointer.
6202 * @rq: An IOCTL specefic structure, that can contain a pointer to
6203 * a proprietary structure used to pass information to the driver.
6204 * @cmd: IOCTL command
6205 * Description:
6206 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6207 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6208 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6209 {
6210 struct stmmac_priv *priv = netdev_priv (dev);
6211 int ret = -EOPNOTSUPP;
6212
6213 if (!netif_running(dev))
6214 return -EINVAL;
6215
6216 switch (cmd) {
6217 case SIOCGMIIPHY:
6218 case SIOCGMIIREG:
6219 case SIOCSMIIREG:
6220 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6221 break;
6222 case SIOCSHWTSTAMP:
6223 ret = stmmac_hwtstamp_set(dev, rq);
6224 break;
6225 case SIOCGHWTSTAMP:
6226 ret = stmmac_hwtstamp_get(dev, rq);
6227 break;
6228 default:
6229 break;
6230 }
6231
6232 return ret;
6233 }
6234
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6235 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6236 void *cb_priv)
6237 {
6238 struct stmmac_priv *priv = cb_priv;
6239 int ret = -EOPNOTSUPP;
6240
6241 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6242 return ret;
6243
6244 __stmmac_disable_all_queues(priv);
6245
6246 switch (type) {
6247 case TC_SETUP_CLSU32:
6248 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6249 break;
6250 case TC_SETUP_CLSFLOWER:
6251 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6252 break;
6253 default:
6254 break;
6255 }
6256
6257 stmmac_enable_all_queues(priv);
6258 return ret;
6259 }
6260
6261 static LIST_HEAD(stmmac_block_cb_list);
6262
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6263 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6264 void *type_data)
6265 {
6266 struct stmmac_priv *priv = netdev_priv(ndev);
6267
6268 switch (type) {
6269 case TC_QUERY_CAPS:
6270 return stmmac_tc_query_caps(priv, priv, type_data);
6271 case TC_SETUP_QDISC_MQPRIO:
6272 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6273 case TC_SETUP_BLOCK:
6274 return flow_block_cb_setup_simple(type_data,
6275 &stmmac_block_cb_list,
6276 stmmac_setup_tc_block_cb,
6277 priv, priv, true);
6278 case TC_SETUP_QDISC_CBS:
6279 return stmmac_tc_setup_cbs(priv, priv, type_data);
6280 case TC_SETUP_QDISC_TAPRIO:
6281 return stmmac_tc_setup_taprio(priv, priv, type_data);
6282 case TC_SETUP_QDISC_ETF:
6283 return stmmac_tc_setup_etf(priv, priv, type_data);
6284 default:
6285 return -EOPNOTSUPP;
6286 }
6287 }
6288
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6289 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6290 struct net_device *sb_dev)
6291 {
6292 int gso = skb_shinfo(skb)->gso_type;
6293
6294 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6295 /*
6296 * There is no way to determine the number of TSO/USO
6297 * capable Queues. Let's use always the Queue 0
6298 * because if TSO/USO is supported then at least this
6299 * one will be capable.
6300 */
6301 return 0;
6302 }
6303
6304 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6305 }
6306
stmmac_set_mac_address(struct net_device * ndev,void * addr)6307 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6308 {
6309 struct stmmac_priv *priv = netdev_priv(ndev);
6310 int ret = 0;
6311
6312 ret = pm_runtime_resume_and_get(priv->device);
6313 if (ret < 0)
6314 return ret;
6315
6316 ret = eth_mac_addr(ndev, addr);
6317 if (ret)
6318 goto set_mac_error;
6319
6320 phylink_rx_clk_stop_block(priv->phylink);
6321 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6322 phylink_rx_clk_stop_unblock(priv->phylink);
6323
6324 set_mac_error:
6325 pm_runtime_put(priv->device);
6326
6327 return ret;
6328 }
6329
6330 #ifdef CONFIG_DEBUG_FS
6331 static struct dentry *stmmac_fs_dir;
6332
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6333 static void sysfs_display_ring(void *head, int size, int extend_desc,
6334 struct seq_file *seq, dma_addr_t dma_phy_addr)
6335 {
6336 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6337 struct dma_desc *p = (struct dma_desc *)head;
6338 unsigned int desc_size;
6339 dma_addr_t dma_addr;
6340 int i;
6341
6342 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6343 for (i = 0; i < size; i++) {
6344 dma_addr = dma_phy_addr + i * desc_size;
6345 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6346 i, &dma_addr,
6347 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6348 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6349 if (extend_desc)
6350 p = &(++ep)->basic;
6351 else
6352 p++;
6353 }
6354 }
6355
stmmac_rings_status_show(struct seq_file * seq,void * v)6356 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6357 {
6358 struct net_device *dev = seq->private;
6359 struct stmmac_priv *priv = netdev_priv(dev);
6360 u32 rx_count = priv->plat->rx_queues_to_use;
6361 u32 tx_count = priv->plat->tx_queues_to_use;
6362 u32 queue;
6363
6364 if ((dev->flags & IFF_UP) == 0)
6365 return 0;
6366
6367 for (queue = 0; queue < rx_count; queue++) {
6368 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6369
6370 seq_printf(seq, "RX Queue %d:\n", queue);
6371
6372 if (priv->extend_desc) {
6373 seq_printf(seq, "Extended descriptor ring:\n");
6374 sysfs_display_ring((void *)rx_q->dma_erx,
6375 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6376 } else {
6377 seq_printf(seq, "Descriptor ring:\n");
6378 sysfs_display_ring((void *)rx_q->dma_rx,
6379 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6380 }
6381 }
6382
6383 for (queue = 0; queue < tx_count; queue++) {
6384 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6385
6386 seq_printf(seq, "TX Queue %d:\n", queue);
6387
6388 if (priv->extend_desc) {
6389 seq_printf(seq, "Extended descriptor ring:\n");
6390 sysfs_display_ring((void *)tx_q->dma_etx,
6391 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6392 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6393 seq_printf(seq, "Descriptor ring:\n");
6394 sysfs_display_ring((void *)tx_q->dma_tx,
6395 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6396 }
6397 }
6398
6399 return 0;
6400 }
6401 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6402
stmmac_dma_cap_show(struct seq_file * seq,void * v)6403 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6404 {
6405 static const char * const dwxgmac_timestamp_source[] = {
6406 "None",
6407 "Internal",
6408 "External",
6409 "Both",
6410 };
6411 static const char * const dwxgmac_safety_feature_desc[] = {
6412 "No",
6413 "All Safety Features with ECC and Parity",
6414 "All Safety Features without ECC or Parity",
6415 "All Safety Features with Parity Only",
6416 "ECC Only",
6417 "UNDEFINED",
6418 "UNDEFINED",
6419 "UNDEFINED",
6420 };
6421 struct net_device *dev = seq->private;
6422 struct stmmac_priv *priv = netdev_priv(dev);
6423
6424 if (!priv->hw_cap_support) {
6425 seq_printf(seq, "DMA HW features not supported\n");
6426 return 0;
6427 }
6428
6429 seq_printf(seq, "==============================\n");
6430 seq_printf(seq, "\tDMA HW features\n");
6431 seq_printf(seq, "==============================\n");
6432
6433 seq_printf(seq, "\t10/100 Mbps: %s\n",
6434 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6435 seq_printf(seq, "\t1000 Mbps: %s\n",
6436 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6437 seq_printf(seq, "\tHalf duplex: %s\n",
6438 (priv->dma_cap.half_duplex) ? "Y" : "N");
6439 if (priv->plat->has_xgmac) {
6440 seq_printf(seq,
6441 "\tNumber of Additional MAC address registers: %d\n",
6442 priv->dma_cap.multi_addr);
6443 } else {
6444 seq_printf(seq, "\tHash Filter: %s\n",
6445 (priv->dma_cap.hash_filter) ? "Y" : "N");
6446 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6447 (priv->dma_cap.multi_addr) ? "Y" : "N");
6448 }
6449 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6450 (priv->dma_cap.pcs) ? "Y" : "N");
6451 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6452 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6453 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6454 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6455 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6456 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6457 seq_printf(seq, "\tRMON module: %s\n",
6458 (priv->dma_cap.rmon) ? "Y" : "N");
6459 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6460 (priv->dma_cap.time_stamp) ? "Y" : "N");
6461 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6462 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6463 if (priv->plat->has_xgmac)
6464 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6465 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6466 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6467 (priv->dma_cap.eee) ? "Y" : "N");
6468 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6469 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6470 (priv->dma_cap.tx_coe) ? "Y" : "N");
6471 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6472 priv->plat->has_xgmac) {
6473 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6474 (priv->dma_cap.rx_coe) ? "Y" : "N");
6475 } else {
6476 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6477 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6478 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6479 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6480 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6481 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6482 }
6483 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6484 priv->dma_cap.number_rx_channel);
6485 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6486 priv->dma_cap.number_tx_channel);
6487 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6488 priv->dma_cap.number_rx_queues);
6489 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6490 priv->dma_cap.number_tx_queues);
6491 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6492 (priv->dma_cap.enh_desc) ? "Y" : "N");
6493 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6494 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6495 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6496 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6497 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6498 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6499 priv->dma_cap.pps_out_num);
6500 seq_printf(seq, "\tSafety Features: %s\n",
6501 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6502 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6503 priv->dma_cap.frpsel ? "Y" : "N");
6504 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6505 priv->dma_cap.host_dma_width);
6506 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6507 priv->dma_cap.rssen ? "Y" : "N");
6508 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6509 priv->dma_cap.vlhash ? "Y" : "N");
6510 seq_printf(seq, "\tSplit Header: %s\n",
6511 priv->dma_cap.sphen ? "Y" : "N");
6512 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6513 priv->dma_cap.vlins ? "Y" : "N");
6514 seq_printf(seq, "\tDouble VLAN: %s\n",
6515 priv->dma_cap.dvlan ? "Y" : "N");
6516 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6517 priv->dma_cap.l3l4fnum);
6518 seq_printf(seq, "\tARP Offloading: %s\n",
6519 priv->dma_cap.arpoffsel ? "Y" : "N");
6520 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6521 priv->dma_cap.estsel ? "Y" : "N");
6522 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6523 priv->dma_cap.fpesel ? "Y" : "N");
6524 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6525 priv->dma_cap.tbssel ? "Y" : "N");
6526 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6527 priv->dma_cap.tbs_ch_num);
6528 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6529 priv->dma_cap.sgfsel ? "Y" : "N");
6530 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6531 BIT(priv->dma_cap.ttsfd) >> 1);
6532 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6533 priv->dma_cap.numtc);
6534 seq_printf(seq, "\tDCB Feature: %s\n",
6535 priv->dma_cap.dcben ? "Y" : "N");
6536 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6537 priv->dma_cap.advthword ? "Y" : "N");
6538 seq_printf(seq, "\tPTP Offload: %s\n",
6539 priv->dma_cap.ptoen ? "Y" : "N");
6540 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6541 priv->dma_cap.osten ? "Y" : "N");
6542 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6543 priv->dma_cap.pfcen ? "Y" : "N");
6544 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6545 BIT(priv->dma_cap.frpes) << 6);
6546 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6547 BIT(priv->dma_cap.frpbs) << 6);
6548 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6549 priv->dma_cap.frppipe_num);
6550 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6551 priv->dma_cap.nrvf_num ?
6552 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6553 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6554 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6555 seq_printf(seq, "\tDepth of GCL: %lu\n",
6556 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6557 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6558 priv->dma_cap.cbtisel ? "Y" : "N");
6559 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6560 priv->dma_cap.aux_snapshot_n);
6561 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6562 priv->dma_cap.pou_ost_en ? "Y" : "N");
6563 seq_printf(seq, "\tEnhanced DMA: %s\n",
6564 priv->dma_cap.edma ? "Y" : "N");
6565 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6566 priv->dma_cap.ediffc ? "Y" : "N");
6567 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6568 priv->dma_cap.vxn ? "Y" : "N");
6569 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6570 priv->dma_cap.dbgmem ? "Y" : "N");
6571 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6572 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6573 return 0;
6574 }
6575 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6576
6577 /* Use network device events to rename debugfs file entries.
6578 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6579 static int stmmac_device_event(struct notifier_block *unused,
6580 unsigned long event, void *ptr)
6581 {
6582 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6583 struct stmmac_priv *priv = netdev_priv(dev);
6584
6585 if (dev->netdev_ops != &stmmac_netdev_ops)
6586 goto done;
6587
6588 switch (event) {
6589 case NETDEV_CHANGENAME:
6590 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6591 break;
6592 }
6593 done:
6594 return NOTIFY_DONE;
6595 }
6596
6597 static struct notifier_block stmmac_notifier = {
6598 .notifier_call = stmmac_device_event,
6599 };
6600
stmmac_init_fs(struct net_device * dev)6601 static void stmmac_init_fs(struct net_device *dev)
6602 {
6603 struct stmmac_priv *priv = netdev_priv(dev);
6604
6605 rtnl_lock();
6606
6607 /* Create per netdev entries */
6608 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6609
6610 /* Entry to report DMA RX/TX rings */
6611 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6612 &stmmac_rings_status_fops);
6613
6614 /* Entry to report the DMA HW features */
6615 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6616 &stmmac_dma_cap_fops);
6617
6618 rtnl_unlock();
6619 }
6620
stmmac_exit_fs(struct net_device * dev)6621 static void stmmac_exit_fs(struct net_device *dev)
6622 {
6623 struct stmmac_priv *priv = netdev_priv(dev);
6624
6625 debugfs_remove_recursive(priv->dbgfs_dir);
6626 }
6627 #endif /* CONFIG_DEBUG_FS */
6628
stmmac_vid_crc32_le(__le16 vid_le)6629 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6630 {
6631 unsigned char *data = (unsigned char *)&vid_le;
6632 unsigned char data_byte = 0;
6633 u32 crc = ~0x0;
6634 u32 temp = 0;
6635 int i, bits;
6636
6637 bits = get_bitmask_order(VLAN_VID_MASK);
6638 for (i = 0; i < bits; i++) {
6639 if ((i % 8) == 0)
6640 data_byte = data[i / 8];
6641
6642 temp = ((crc & 1) ^ data_byte) & 1;
6643 crc >>= 1;
6644 data_byte >>= 1;
6645
6646 if (temp)
6647 crc ^= 0xedb88320;
6648 }
6649
6650 return crc;
6651 }
6652
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6653 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6654 {
6655 u32 crc, hash = 0;
6656 u16 pmatch = 0;
6657 int count = 0;
6658 u16 vid = 0;
6659
6660 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6661 __le16 vid_le = cpu_to_le16(vid);
6662 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6663 hash |= (1 << crc);
6664 count++;
6665 }
6666
6667 if (!priv->dma_cap.vlhash) {
6668 if (count > 2) /* VID = 0 always passes filter */
6669 return -EOPNOTSUPP;
6670
6671 pmatch = vid;
6672 hash = 0;
6673 }
6674
6675 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6676 }
6677
6678 /* FIXME: This may need RXC to be running, but it may be called with BH
6679 * disabled, which means we can't call phylink_rx_clk_stop*().
6680 */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6681 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6682 {
6683 struct stmmac_priv *priv = netdev_priv(ndev);
6684 bool is_double = false;
6685 int ret;
6686
6687 ret = pm_runtime_resume_and_get(priv->device);
6688 if (ret < 0)
6689 return ret;
6690
6691 if (be16_to_cpu(proto) == ETH_P_8021AD)
6692 is_double = true;
6693
6694 set_bit(vid, priv->active_vlans);
6695 ret = stmmac_vlan_update(priv, is_double);
6696 if (ret) {
6697 clear_bit(vid, priv->active_vlans);
6698 goto err_pm_put;
6699 }
6700
6701 if (priv->hw->num_vlan) {
6702 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6703 if (ret)
6704 goto err_pm_put;
6705 }
6706 err_pm_put:
6707 pm_runtime_put(priv->device);
6708
6709 return ret;
6710 }
6711
6712 /* FIXME: This may need RXC to be running, but it may be called with BH
6713 * disabled, which means we can't call phylink_rx_clk_stop*().
6714 */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6715 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6716 {
6717 struct stmmac_priv *priv = netdev_priv(ndev);
6718 bool is_double = false;
6719 int ret;
6720
6721 ret = pm_runtime_resume_and_get(priv->device);
6722 if (ret < 0)
6723 return ret;
6724
6725 if (be16_to_cpu(proto) == ETH_P_8021AD)
6726 is_double = true;
6727
6728 clear_bit(vid, priv->active_vlans);
6729
6730 if (priv->hw->num_vlan) {
6731 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6732 if (ret)
6733 goto del_vlan_error;
6734 }
6735
6736 ret = stmmac_vlan_update(priv, is_double);
6737
6738 del_vlan_error:
6739 pm_runtime_put(priv->device);
6740
6741 return ret;
6742 }
6743
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6744 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6745 {
6746 struct stmmac_priv *priv = netdev_priv(dev);
6747
6748 switch (bpf->command) {
6749 case XDP_SETUP_PROG:
6750 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6751 case XDP_SETUP_XSK_POOL:
6752 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6753 bpf->xsk.queue_id);
6754 default:
6755 return -EOPNOTSUPP;
6756 }
6757 }
6758
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6759 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6760 struct xdp_frame **frames, u32 flags)
6761 {
6762 struct stmmac_priv *priv = netdev_priv(dev);
6763 int cpu = smp_processor_id();
6764 struct netdev_queue *nq;
6765 int i, nxmit = 0;
6766 int queue;
6767
6768 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6769 return -ENETDOWN;
6770
6771 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6772 return -EINVAL;
6773
6774 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6775 nq = netdev_get_tx_queue(priv->dev, queue);
6776
6777 __netif_tx_lock(nq, cpu);
6778 /* Avoids TX time-out as we are sharing with slow path */
6779 txq_trans_cond_update(nq);
6780
6781 for (i = 0; i < num_frames; i++) {
6782 int res;
6783
6784 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6785 if (res == STMMAC_XDP_CONSUMED)
6786 break;
6787
6788 nxmit++;
6789 }
6790
6791 if (flags & XDP_XMIT_FLUSH) {
6792 stmmac_flush_tx_descriptors(priv, queue);
6793 stmmac_tx_timer_arm(priv, queue);
6794 }
6795
6796 __netif_tx_unlock(nq);
6797
6798 return nxmit;
6799 }
6800
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6801 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6802 {
6803 struct stmmac_channel *ch = &priv->channel[queue];
6804 unsigned long flags;
6805
6806 spin_lock_irqsave(&ch->lock, flags);
6807 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6808 spin_unlock_irqrestore(&ch->lock, flags);
6809
6810 stmmac_stop_rx_dma(priv, queue);
6811 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6812 }
6813
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6814 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6815 {
6816 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6817 struct stmmac_channel *ch = &priv->channel[queue];
6818 unsigned long flags;
6819 u32 buf_size;
6820 int ret;
6821
6822 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6823 if (ret) {
6824 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6825 return;
6826 }
6827
6828 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6829 if (ret) {
6830 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6831 netdev_err(priv->dev, "Failed to init RX desc.\n");
6832 return;
6833 }
6834
6835 stmmac_reset_rx_queue(priv, queue);
6836 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6837
6838 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6839 rx_q->dma_rx_phy, rx_q->queue_index);
6840
6841 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6842 sizeof(struct dma_desc));
6843 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6844 rx_q->rx_tail_addr, rx_q->queue_index);
6845
6846 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6847 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6848 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6849 buf_size,
6850 rx_q->queue_index);
6851 } else {
6852 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6853 priv->dma_conf.dma_buf_sz,
6854 rx_q->queue_index);
6855 }
6856
6857 stmmac_start_rx_dma(priv, queue);
6858
6859 spin_lock_irqsave(&ch->lock, flags);
6860 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6861 spin_unlock_irqrestore(&ch->lock, flags);
6862 }
6863
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6864 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6865 {
6866 struct stmmac_channel *ch = &priv->channel[queue];
6867 unsigned long flags;
6868
6869 spin_lock_irqsave(&ch->lock, flags);
6870 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6871 spin_unlock_irqrestore(&ch->lock, flags);
6872
6873 stmmac_stop_tx_dma(priv, queue);
6874 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6875 }
6876
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6877 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6878 {
6879 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6880 struct stmmac_channel *ch = &priv->channel[queue];
6881 unsigned long flags;
6882 int ret;
6883
6884 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6885 if (ret) {
6886 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6887 return;
6888 }
6889
6890 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6891 if (ret) {
6892 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6893 netdev_err(priv->dev, "Failed to init TX desc.\n");
6894 return;
6895 }
6896
6897 stmmac_reset_tx_queue(priv, queue);
6898 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6899
6900 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6901 tx_q->dma_tx_phy, tx_q->queue_index);
6902
6903 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6904 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6905
6906 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6907 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6908 tx_q->tx_tail_addr, tx_q->queue_index);
6909
6910 stmmac_start_tx_dma(priv, queue);
6911
6912 spin_lock_irqsave(&ch->lock, flags);
6913 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6914 spin_unlock_irqrestore(&ch->lock, flags);
6915 }
6916
stmmac_xdp_release(struct net_device * dev)6917 void stmmac_xdp_release(struct net_device *dev)
6918 {
6919 struct stmmac_priv *priv = netdev_priv(dev);
6920 u32 chan;
6921
6922 /* Ensure tx function is not running */
6923 netif_tx_disable(dev);
6924
6925 /* Disable NAPI process */
6926 stmmac_disable_all_queues(priv);
6927
6928 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6929 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6930
6931 /* Free the IRQ lines */
6932 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6933
6934 /* Stop TX/RX DMA channels */
6935 stmmac_stop_all_dma(priv);
6936
6937 /* Release and free the Rx/Tx resources */
6938 free_dma_desc_resources(priv, &priv->dma_conf);
6939
6940 /* Disable the MAC Rx/Tx */
6941 stmmac_mac_set(priv, priv->ioaddr, false);
6942
6943 /* set trans_start so we don't get spurious
6944 * watchdogs during reset
6945 */
6946 netif_trans_update(dev);
6947 netif_carrier_off(dev);
6948 }
6949
stmmac_xdp_open(struct net_device * dev)6950 int stmmac_xdp_open(struct net_device *dev)
6951 {
6952 struct stmmac_priv *priv = netdev_priv(dev);
6953 u32 rx_cnt = priv->plat->rx_queues_to_use;
6954 u32 tx_cnt = priv->plat->tx_queues_to_use;
6955 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6956 struct stmmac_rx_queue *rx_q;
6957 struct stmmac_tx_queue *tx_q;
6958 u32 buf_size;
6959 bool sph_en;
6960 u32 chan;
6961 int ret;
6962
6963 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6964 if (ret < 0) {
6965 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6966 __func__);
6967 goto dma_desc_error;
6968 }
6969
6970 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6971 if (ret < 0) {
6972 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6973 __func__);
6974 goto init_error;
6975 }
6976
6977 stmmac_reset_queues_param(priv);
6978
6979 /* DMA CSR Channel configuration */
6980 for (chan = 0; chan < dma_csr_ch; chan++) {
6981 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6982 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6983 }
6984
6985 /* Adjust Split header */
6986 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6987
6988 /* DMA RX Channel Configuration */
6989 for (chan = 0; chan < rx_cnt; chan++) {
6990 rx_q = &priv->dma_conf.rx_queue[chan];
6991
6992 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6993 rx_q->dma_rx_phy, chan);
6994
6995 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6996 (rx_q->buf_alloc_num *
6997 sizeof(struct dma_desc));
6998 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6999 rx_q->rx_tail_addr, chan);
7000
7001 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7002 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7003 stmmac_set_dma_bfsize(priv, priv->ioaddr,
7004 buf_size,
7005 rx_q->queue_index);
7006 } else {
7007 stmmac_set_dma_bfsize(priv, priv->ioaddr,
7008 priv->dma_conf.dma_buf_sz,
7009 rx_q->queue_index);
7010 }
7011
7012 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7013 }
7014
7015 /* DMA TX Channel Configuration */
7016 for (chan = 0; chan < tx_cnt; chan++) {
7017 tx_q = &priv->dma_conf.tx_queue[chan];
7018
7019 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7020 tx_q->dma_tx_phy, chan);
7021
7022 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7023 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7024 tx_q->tx_tail_addr, chan);
7025
7026 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7027 }
7028
7029 /* Enable the MAC Rx/Tx */
7030 stmmac_mac_set(priv, priv->ioaddr, true);
7031
7032 /* Start Rx & Tx DMA Channels */
7033 stmmac_start_all_dma(priv);
7034
7035 ret = stmmac_request_irq(dev);
7036 if (ret)
7037 goto irq_error;
7038
7039 /* Enable NAPI process*/
7040 stmmac_enable_all_queues(priv);
7041 netif_carrier_on(dev);
7042 netif_tx_start_all_queues(dev);
7043 stmmac_enable_all_dma_irq(priv);
7044
7045 return 0;
7046
7047 irq_error:
7048 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7049 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7050
7051 stmmac_hw_teardown(dev);
7052 init_error:
7053 free_dma_desc_resources(priv, &priv->dma_conf);
7054 dma_desc_error:
7055 return ret;
7056 }
7057
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7058 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7059 {
7060 struct stmmac_priv *priv = netdev_priv(dev);
7061 struct stmmac_rx_queue *rx_q;
7062 struct stmmac_tx_queue *tx_q;
7063 struct stmmac_channel *ch;
7064
7065 if (test_bit(STMMAC_DOWN, &priv->state) ||
7066 !netif_carrier_ok(priv->dev))
7067 return -ENETDOWN;
7068
7069 if (!stmmac_xdp_is_enabled(priv))
7070 return -EINVAL;
7071
7072 if (queue >= priv->plat->rx_queues_to_use ||
7073 queue >= priv->plat->tx_queues_to_use)
7074 return -EINVAL;
7075
7076 rx_q = &priv->dma_conf.rx_queue[queue];
7077 tx_q = &priv->dma_conf.tx_queue[queue];
7078 ch = &priv->channel[queue];
7079
7080 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7081 return -EINVAL;
7082
7083 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7084 /* EQoS does not have per-DMA channel SW interrupt,
7085 * so we schedule RX Napi straight-away.
7086 */
7087 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7088 __napi_schedule(&ch->rxtx_napi);
7089 }
7090
7091 return 0;
7092 }
7093
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7094 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7095 {
7096 struct stmmac_priv *priv = netdev_priv(dev);
7097 u32 tx_cnt = priv->plat->tx_queues_to_use;
7098 u32 rx_cnt = priv->plat->rx_queues_to_use;
7099 unsigned int start;
7100 int q;
7101
7102 for (q = 0; q < tx_cnt; q++) {
7103 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7104 u64 tx_packets;
7105 u64 tx_bytes;
7106
7107 do {
7108 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7109 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7110 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7111 do {
7112 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7113 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7114 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7115
7116 stats->tx_packets += tx_packets;
7117 stats->tx_bytes += tx_bytes;
7118 }
7119
7120 for (q = 0; q < rx_cnt; q++) {
7121 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7122 u64 rx_packets;
7123 u64 rx_bytes;
7124
7125 do {
7126 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7127 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7128 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7129 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7130
7131 stats->rx_packets += rx_packets;
7132 stats->rx_bytes += rx_bytes;
7133 }
7134
7135 stats->rx_dropped = priv->xstats.rx_dropped;
7136 stats->rx_errors = priv->xstats.rx_errors;
7137 stats->tx_dropped = priv->xstats.tx_dropped;
7138 stats->tx_errors = priv->xstats.tx_errors;
7139 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7140 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7141 stats->rx_length_errors = priv->xstats.rx_length;
7142 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7143 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7144 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7145 }
7146
7147 static const struct net_device_ops stmmac_netdev_ops = {
7148 .ndo_open = stmmac_open,
7149 .ndo_start_xmit = stmmac_xmit,
7150 .ndo_stop = stmmac_release,
7151 .ndo_change_mtu = stmmac_change_mtu,
7152 .ndo_fix_features = stmmac_fix_features,
7153 .ndo_set_features = stmmac_set_features,
7154 .ndo_set_rx_mode = stmmac_set_rx_mode,
7155 .ndo_tx_timeout = stmmac_tx_timeout,
7156 .ndo_eth_ioctl = stmmac_ioctl,
7157 .ndo_get_stats64 = stmmac_get_stats64,
7158 .ndo_setup_tc = stmmac_setup_tc,
7159 .ndo_select_queue = stmmac_select_queue,
7160 .ndo_set_mac_address = stmmac_set_mac_address,
7161 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7162 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7163 .ndo_bpf = stmmac_bpf,
7164 .ndo_xdp_xmit = stmmac_xdp_xmit,
7165 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7166 };
7167
stmmac_reset_subtask(struct stmmac_priv * priv)7168 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7169 {
7170 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7171 return;
7172 if (test_bit(STMMAC_DOWN, &priv->state))
7173 return;
7174
7175 netdev_err(priv->dev, "Reset adapter.\n");
7176
7177 rtnl_lock();
7178 netif_trans_update(priv->dev);
7179 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7180 usleep_range(1000, 2000);
7181
7182 set_bit(STMMAC_DOWN, &priv->state);
7183 dev_close(priv->dev);
7184 dev_open(priv->dev, NULL);
7185 clear_bit(STMMAC_DOWN, &priv->state);
7186 clear_bit(STMMAC_RESETING, &priv->state);
7187 rtnl_unlock();
7188 }
7189
stmmac_service_task(struct work_struct * work)7190 static void stmmac_service_task(struct work_struct *work)
7191 {
7192 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7193 service_task);
7194
7195 stmmac_reset_subtask(priv);
7196 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7197 }
7198
7199 /**
7200 * stmmac_hw_init - Init the MAC device
7201 * @priv: driver private structure
7202 * Description: this function is to configure the MAC device according to
7203 * some platform parameters or the HW capability register. It prepares the
7204 * driver to use either ring or chain modes and to setup either enhanced or
7205 * normal descriptors.
7206 */
stmmac_hw_init(struct stmmac_priv * priv)7207 static int stmmac_hw_init(struct stmmac_priv *priv)
7208 {
7209 int ret;
7210
7211 /* dwmac-sun8i only work in chain mode */
7212 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7213 chain_mode = 1;
7214 priv->chain_mode = chain_mode;
7215
7216 /* Initialize HW Interface */
7217 ret = stmmac_hwif_init(priv);
7218 if (ret)
7219 return ret;
7220
7221 /* Get the HW capability (new GMAC newer than 3.50a) */
7222 priv->hw_cap_support = stmmac_get_hw_features(priv);
7223 if (priv->hw_cap_support) {
7224 dev_info(priv->device, "DMA HW capability register supported\n");
7225
7226 /* We can override some gmac/dma configuration fields: e.g.
7227 * enh_desc, tx_coe (e.g. that are passed through the
7228 * platform) with the values from the HW capability
7229 * register (if supported).
7230 */
7231 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7232 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7233 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7234 priv->hw->pmt = priv->plat->pmt;
7235 if (priv->dma_cap.hash_tb_sz) {
7236 priv->hw->multicast_filter_bins =
7237 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7238 priv->hw->mcast_bits_log2 =
7239 ilog2(priv->hw->multicast_filter_bins);
7240 }
7241
7242 /* TXCOE doesn't work in thresh DMA mode */
7243 if (priv->plat->force_thresh_dma_mode)
7244 priv->plat->tx_coe = 0;
7245 else
7246 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7247
7248 /* In case of GMAC4 rx_coe is from HW cap register. */
7249 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7250
7251 if (priv->dma_cap.rx_coe_type2)
7252 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7253 else if (priv->dma_cap.rx_coe_type1)
7254 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7255
7256 } else {
7257 dev_info(priv->device, "No HW DMA feature register supported\n");
7258 }
7259
7260 if (priv->plat->rx_coe) {
7261 priv->hw->rx_csum = priv->plat->rx_coe;
7262 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7263 if (priv->synopsys_id < DWMAC_CORE_4_00)
7264 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7265 }
7266 if (priv->plat->tx_coe)
7267 dev_info(priv->device, "TX Checksum insertion supported\n");
7268
7269 if (priv->plat->pmt) {
7270 dev_info(priv->device, "Wake-Up On Lan supported\n");
7271 device_set_wakeup_capable(priv->device, 1);
7272 }
7273
7274 if (priv->dma_cap.tsoen)
7275 dev_info(priv->device, "TSO supported\n");
7276
7277 if (priv->dma_cap.number_rx_queues &&
7278 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7279 dev_warn(priv->device,
7280 "Number of Rx queues (%u) exceeds dma capability\n",
7281 priv->plat->rx_queues_to_use);
7282 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7283 }
7284 if (priv->dma_cap.number_tx_queues &&
7285 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7286 dev_warn(priv->device,
7287 "Number of Tx queues (%u) exceeds dma capability\n",
7288 priv->plat->tx_queues_to_use);
7289 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7290 }
7291
7292 if (priv->dma_cap.rx_fifo_size &&
7293 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7294 dev_warn(priv->device,
7295 "Rx FIFO size (%u) exceeds dma capability\n",
7296 priv->plat->rx_fifo_size);
7297 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7298 }
7299 if (priv->dma_cap.tx_fifo_size &&
7300 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7301 dev_warn(priv->device,
7302 "Tx FIFO size (%u) exceeds dma capability\n",
7303 priv->plat->tx_fifo_size);
7304 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7305 }
7306
7307 priv->hw->vlan_fail_q_en =
7308 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7309 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7310
7311 /* Run HW quirks, if any */
7312 if (priv->hwif_quirks) {
7313 ret = priv->hwif_quirks(priv);
7314 if (ret)
7315 return ret;
7316 }
7317
7318 /* Rx Watchdog is available in the COREs newer than the 3.40.
7319 * In some case, for example on bugged HW this feature
7320 * has to be disable and this can be done by passing the
7321 * riwt_off field from the platform.
7322 */
7323 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7324 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7325 priv->use_riwt = 1;
7326 dev_info(priv->device,
7327 "Enable RX Mitigation via HW Watchdog Timer\n");
7328 }
7329
7330 return 0;
7331 }
7332
stmmac_napi_add(struct net_device * dev)7333 static void stmmac_napi_add(struct net_device *dev)
7334 {
7335 struct stmmac_priv *priv = netdev_priv(dev);
7336 u32 queue, maxq;
7337
7338 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7339
7340 for (queue = 0; queue < maxq; queue++) {
7341 struct stmmac_channel *ch = &priv->channel[queue];
7342
7343 ch->priv_data = priv;
7344 ch->index = queue;
7345 spin_lock_init(&ch->lock);
7346
7347 if (queue < priv->plat->rx_queues_to_use) {
7348 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7349 }
7350 if (queue < priv->plat->tx_queues_to_use) {
7351 netif_napi_add_tx(dev, &ch->tx_napi,
7352 stmmac_napi_poll_tx);
7353 }
7354 if (queue < priv->plat->rx_queues_to_use &&
7355 queue < priv->plat->tx_queues_to_use) {
7356 netif_napi_add(dev, &ch->rxtx_napi,
7357 stmmac_napi_poll_rxtx);
7358 }
7359 }
7360 }
7361
stmmac_napi_del(struct net_device * dev)7362 static void stmmac_napi_del(struct net_device *dev)
7363 {
7364 struct stmmac_priv *priv = netdev_priv(dev);
7365 u32 queue, maxq;
7366
7367 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7368
7369 for (queue = 0; queue < maxq; queue++) {
7370 struct stmmac_channel *ch = &priv->channel[queue];
7371
7372 if (queue < priv->plat->rx_queues_to_use)
7373 netif_napi_del(&ch->rx_napi);
7374 if (queue < priv->plat->tx_queues_to_use)
7375 netif_napi_del(&ch->tx_napi);
7376 if (queue < priv->plat->rx_queues_to_use &&
7377 queue < priv->plat->tx_queues_to_use) {
7378 netif_napi_del(&ch->rxtx_napi);
7379 }
7380 }
7381 }
7382
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7383 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7384 {
7385 struct stmmac_priv *priv = netdev_priv(dev);
7386 int ret = 0, i;
7387
7388 if (netif_running(dev))
7389 stmmac_release(dev);
7390
7391 stmmac_napi_del(dev);
7392
7393 priv->plat->rx_queues_to_use = rx_cnt;
7394 priv->plat->tx_queues_to_use = tx_cnt;
7395 if (!netif_is_rxfh_configured(dev))
7396 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7397 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7398 rx_cnt);
7399
7400 stmmac_napi_add(dev);
7401
7402 if (netif_running(dev))
7403 ret = stmmac_open(dev);
7404
7405 return ret;
7406 }
7407
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7408 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7409 {
7410 struct stmmac_priv *priv = netdev_priv(dev);
7411 int ret = 0;
7412
7413 if (netif_running(dev))
7414 stmmac_release(dev);
7415
7416 priv->dma_conf.dma_rx_size = rx_size;
7417 priv->dma_conf.dma_tx_size = tx_size;
7418
7419 if (netif_running(dev))
7420 ret = stmmac_open(dev);
7421
7422 return ret;
7423 }
7424
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7425 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7426 {
7427 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7428 struct dma_desc *desc_contains_ts = ctx->desc;
7429 struct stmmac_priv *priv = ctx->priv;
7430 struct dma_desc *ndesc = ctx->ndesc;
7431 struct dma_desc *desc = ctx->desc;
7432 u64 ns = 0;
7433
7434 if (!priv->hwts_rx_en)
7435 return -ENODATA;
7436
7437 /* For GMAC4, the valid timestamp is from CTX next desc. */
7438 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7439 desc_contains_ts = ndesc;
7440
7441 /* Check if timestamp is available */
7442 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7443 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7444 ns -= priv->plat->cdc_error_adj;
7445 *timestamp = ns_to_ktime(ns);
7446 return 0;
7447 }
7448
7449 return -ENODATA;
7450 }
7451
7452 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7453 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7454 };
7455
7456 /**
7457 * stmmac_dvr_probe
7458 * @device: device pointer
7459 * @plat_dat: platform data pointer
7460 * @res: stmmac resource pointer
7461 * Description: this is the main probe function used to
7462 * call the alloc_etherdev, allocate the priv structure.
7463 * Return:
7464 * returns 0 on success, otherwise errno.
7465 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7466 int stmmac_dvr_probe(struct device *device,
7467 struct plat_stmmacenet_data *plat_dat,
7468 struct stmmac_resources *res)
7469 {
7470 struct net_device *ndev = NULL;
7471 struct stmmac_priv *priv;
7472 u32 rxq;
7473 int i, ret = 0;
7474
7475 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7476 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7477 if (!ndev)
7478 return -ENOMEM;
7479
7480 SET_NETDEV_DEV(ndev, device);
7481
7482 priv = netdev_priv(ndev);
7483 priv->device = device;
7484 priv->dev = ndev;
7485
7486 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7487 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7488 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7489 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7490 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7491 }
7492
7493 priv->xstats.pcpu_stats =
7494 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7495 if (!priv->xstats.pcpu_stats)
7496 return -ENOMEM;
7497
7498 stmmac_set_ethtool_ops(ndev);
7499 priv->pause_time = pause;
7500 priv->plat = plat_dat;
7501 priv->ioaddr = res->addr;
7502 priv->dev->base_addr = (unsigned long)res->addr;
7503 priv->plat->dma_cfg->multi_msi_en =
7504 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7505
7506 priv->dev->irq = res->irq;
7507 priv->wol_irq = res->wol_irq;
7508 priv->lpi_irq = res->lpi_irq;
7509 priv->sfty_irq = res->sfty_irq;
7510 priv->sfty_ce_irq = res->sfty_ce_irq;
7511 priv->sfty_ue_irq = res->sfty_ue_irq;
7512 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7513 priv->rx_irq[i] = res->rx_irq[i];
7514 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7515 priv->tx_irq[i] = res->tx_irq[i];
7516
7517 if (!is_zero_ether_addr(res->mac))
7518 eth_hw_addr_set(priv->dev, res->mac);
7519
7520 dev_set_drvdata(device, priv->dev);
7521
7522 /* Verify driver arguments */
7523 stmmac_verify_args();
7524
7525 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7526 if (!priv->af_xdp_zc_qps)
7527 return -ENOMEM;
7528
7529 /* Allocate workqueue */
7530 priv->wq = create_singlethread_workqueue("stmmac_wq");
7531 if (!priv->wq) {
7532 dev_err(priv->device, "failed to create workqueue\n");
7533 ret = -ENOMEM;
7534 goto error_wq_init;
7535 }
7536
7537 INIT_WORK(&priv->service_task, stmmac_service_task);
7538
7539 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7540
7541 /* Override with kernel parameters if supplied XXX CRS XXX
7542 * this needs to have multiple instances
7543 */
7544 if ((phyaddr >= 0) && (phyaddr <= 31))
7545 priv->plat->phy_addr = phyaddr;
7546
7547 if (priv->plat->stmmac_rst) {
7548 ret = reset_control_assert(priv->plat->stmmac_rst);
7549 reset_control_deassert(priv->plat->stmmac_rst);
7550 /* Some reset controllers have only reset callback instead of
7551 * assert + deassert callbacks pair.
7552 */
7553 if (ret == -ENOTSUPP)
7554 reset_control_reset(priv->plat->stmmac_rst);
7555 }
7556
7557 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7558 if (ret == -ENOTSUPP)
7559 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7560 ERR_PTR(ret));
7561
7562 /* Wait a bit for the reset to take effect */
7563 udelay(10);
7564
7565 /* Init MAC and get the capabilities */
7566 ret = stmmac_hw_init(priv);
7567 if (ret)
7568 goto error_hw_init;
7569
7570 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7571 */
7572 if (priv->synopsys_id < DWMAC_CORE_5_20)
7573 priv->plat->dma_cfg->dche = false;
7574
7575 stmmac_check_ether_addr(priv);
7576
7577 ndev->netdev_ops = &stmmac_netdev_ops;
7578
7579 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7580 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7581
7582 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7583 NETIF_F_RXCSUM;
7584 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7585 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7586
7587 ret = stmmac_tc_init(priv, priv);
7588 if (!ret) {
7589 ndev->hw_features |= NETIF_F_HW_TC;
7590 }
7591
7592 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7593 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7594 if (priv->plat->has_gmac4)
7595 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7596 priv->tso = true;
7597 dev_info(priv->device, "TSO feature enabled\n");
7598 }
7599
7600 if (priv->dma_cap.sphen &&
7601 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7602 ndev->hw_features |= NETIF_F_GRO;
7603 priv->sph_cap = true;
7604 priv->sph = priv->sph_cap;
7605 dev_info(priv->device, "SPH feature enabled\n");
7606 }
7607
7608 /* Ideally our host DMA address width is the same as for the
7609 * device. However, it may differ and then we have to use our
7610 * host DMA width for allocation and the device DMA width for
7611 * register handling.
7612 */
7613 if (priv->plat->host_dma_width)
7614 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7615 else
7616 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7617
7618 if (priv->dma_cap.host_dma_width) {
7619 ret = dma_set_mask_and_coherent(device,
7620 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7621 if (!ret) {
7622 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7623 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7624
7625 /*
7626 * If more than 32 bits can be addressed, make sure to
7627 * enable enhanced addressing mode.
7628 */
7629 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7630 priv->plat->dma_cfg->eame = true;
7631 } else {
7632 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7633 if (ret) {
7634 dev_err(priv->device, "Failed to set DMA Mask\n");
7635 goto error_hw_init;
7636 }
7637
7638 priv->dma_cap.host_dma_width = 32;
7639 }
7640 }
7641
7642 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7643 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7644 #ifdef STMMAC_VLAN_TAG_USED
7645 /* Both mac100 and gmac support receive VLAN tag detection */
7646 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7647 if (priv->plat->has_gmac4) {
7648 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7649 priv->hw->hw_vlan_en = true;
7650 }
7651 if (priv->dma_cap.vlhash) {
7652 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7653 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7654 }
7655 if (priv->dma_cap.vlins) {
7656 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7657 if (priv->dma_cap.dvlan)
7658 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7659 }
7660 #endif
7661 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7662
7663 priv->xstats.threshold = tc;
7664
7665 /* Initialize RSS */
7666 rxq = priv->plat->rx_queues_to_use;
7667 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7668 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7669 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7670
7671 if (priv->dma_cap.rssen && priv->plat->rss_en)
7672 ndev->features |= NETIF_F_RXHASH;
7673
7674 ndev->vlan_features |= ndev->features;
7675
7676 /* MTU range: 46 - hw-specific max */
7677 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7678 if (priv->plat->has_xgmac)
7679 ndev->max_mtu = XGMAC_JUMBO_LEN;
7680 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7681 ndev->max_mtu = JUMBO_LEN;
7682 else
7683 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7684 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7685 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7686 */
7687 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7688 (priv->plat->maxmtu >= ndev->min_mtu))
7689 ndev->max_mtu = priv->plat->maxmtu;
7690 else if (priv->plat->maxmtu < ndev->min_mtu)
7691 dev_warn(priv->device,
7692 "%s: warning: maxmtu having invalid value (%d)\n",
7693 __func__, priv->plat->maxmtu);
7694
7695 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7696
7697 /* Setup channels NAPI */
7698 stmmac_napi_add(ndev);
7699
7700 mutex_init(&priv->lock);
7701
7702 stmmac_fpe_init(priv);
7703
7704 /* If a specific clk_csr value is passed from the platform
7705 * this means that the CSR Clock Range selection cannot be
7706 * changed at run-time and it is fixed. Viceversa the driver'll try to
7707 * set the MDC clock dynamically according to the csr actual
7708 * clock input.
7709 */
7710 if (priv->plat->clk_csr >= 0)
7711 priv->clk_csr = priv->plat->clk_csr;
7712 else
7713 stmmac_clk_csr_set(priv);
7714
7715 stmmac_check_pcs_mode(priv);
7716
7717 pm_runtime_get_noresume(device);
7718 pm_runtime_set_active(device);
7719 if (!pm_runtime_enabled(device))
7720 pm_runtime_enable(device);
7721
7722 ret = stmmac_mdio_register(ndev);
7723 if (ret < 0) {
7724 dev_err_probe(priv->device, ret,
7725 "MDIO bus (id: %d) registration failed\n",
7726 priv->plat->bus_id);
7727 goto error_mdio_register;
7728 }
7729
7730 if (priv->plat->speed_mode_2500)
7731 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7732
7733 ret = stmmac_pcs_setup(ndev);
7734 if (ret)
7735 goto error_pcs_setup;
7736
7737 ret = stmmac_phy_setup(priv);
7738 if (ret) {
7739 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7740 goto error_phy_setup;
7741 }
7742
7743 ret = register_netdev(ndev);
7744 if (ret) {
7745 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7746 __func__, ret);
7747 goto error_netdev_register;
7748 }
7749
7750 #ifdef CONFIG_DEBUG_FS
7751 stmmac_init_fs(ndev);
7752 #endif
7753
7754 if (priv->plat->dump_debug_regs)
7755 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7756
7757 /* Let pm_runtime_put() disable the clocks.
7758 * If CONFIG_PM is not enabled, the clocks will stay powered.
7759 */
7760 pm_runtime_put(device);
7761
7762 return ret;
7763
7764 error_netdev_register:
7765 phylink_destroy(priv->phylink);
7766 error_phy_setup:
7767 stmmac_pcs_clean(ndev);
7768 error_pcs_setup:
7769 stmmac_mdio_unregister(ndev);
7770 error_mdio_register:
7771 stmmac_napi_del(ndev);
7772 error_hw_init:
7773 destroy_workqueue(priv->wq);
7774 error_wq_init:
7775 bitmap_free(priv->af_xdp_zc_qps);
7776
7777 return ret;
7778 }
7779 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7780
7781 /**
7782 * stmmac_dvr_remove
7783 * @dev: device pointer
7784 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7785 * changes the link status, releases the DMA descriptor rings.
7786 */
stmmac_dvr_remove(struct device * dev)7787 void stmmac_dvr_remove(struct device *dev)
7788 {
7789 struct net_device *ndev = dev_get_drvdata(dev);
7790 struct stmmac_priv *priv = netdev_priv(ndev);
7791
7792 netdev_info(priv->dev, "%s: removing driver", __func__);
7793
7794 pm_runtime_get_sync(dev);
7795
7796 unregister_netdev(ndev);
7797
7798 #ifdef CONFIG_DEBUG_FS
7799 stmmac_exit_fs(ndev);
7800 #endif
7801 phylink_destroy(priv->phylink);
7802 if (priv->plat->stmmac_rst)
7803 reset_control_assert(priv->plat->stmmac_rst);
7804 reset_control_assert(priv->plat->stmmac_ahb_rst);
7805
7806 stmmac_pcs_clean(ndev);
7807 stmmac_mdio_unregister(ndev);
7808
7809 destroy_workqueue(priv->wq);
7810 mutex_destroy(&priv->lock);
7811 bitmap_free(priv->af_xdp_zc_qps);
7812
7813 pm_runtime_disable(dev);
7814 pm_runtime_put_noidle(dev);
7815 }
7816 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7817
7818 /**
7819 * stmmac_suspend - suspend callback
7820 * @dev: device pointer
7821 * Description: this is the function to suspend the device and it is called
7822 * by the platform driver to stop the network queue, release the resources,
7823 * program the PMT register (for WoL), clean and release driver resources.
7824 */
stmmac_suspend(struct device * dev)7825 int stmmac_suspend(struct device *dev)
7826 {
7827 struct net_device *ndev = dev_get_drvdata(dev);
7828 struct stmmac_priv *priv = netdev_priv(ndev);
7829 u32 chan;
7830
7831 if (!ndev || !netif_running(ndev))
7832 return 0;
7833
7834 mutex_lock(&priv->lock);
7835
7836 netif_device_detach(ndev);
7837
7838 stmmac_disable_all_queues(priv);
7839
7840 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7841 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7842
7843 if (priv->eee_sw_timer_en) {
7844 priv->tx_path_in_lpi_mode = false;
7845 del_timer_sync(&priv->eee_ctrl_timer);
7846 }
7847
7848 /* Stop TX/RX DMA */
7849 stmmac_stop_all_dma(priv);
7850
7851 if (priv->plat->serdes_powerdown)
7852 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7853
7854 /* Enable Power down mode by programming the PMT regs */
7855 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7856 stmmac_pmt(priv, priv->hw, priv->wolopts);
7857 priv->irq_wake = 1;
7858 } else {
7859 stmmac_mac_set(priv, priv->ioaddr, false);
7860 pinctrl_pm_select_sleep_state(priv->device);
7861 }
7862
7863 mutex_unlock(&priv->lock);
7864
7865 rtnl_lock();
7866 if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7867 phylink_speed_down(priv->phylink, false);
7868
7869 phylink_suspend(priv->phylink,
7870 device_may_wakeup(priv->device) && priv->plat->pmt);
7871 rtnl_unlock();
7872
7873 if (stmmac_fpe_supported(priv))
7874 timer_shutdown_sync(&priv->fpe_cfg.verify_timer);
7875
7876 return 0;
7877 }
7878 EXPORT_SYMBOL_GPL(stmmac_suspend);
7879
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7880 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7881 {
7882 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7883
7884 rx_q->cur_rx = 0;
7885 rx_q->dirty_rx = 0;
7886 }
7887
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7888 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7889 {
7890 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7891
7892 tx_q->cur_tx = 0;
7893 tx_q->dirty_tx = 0;
7894 tx_q->mss = 0;
7895
7896 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7897 }
7898
7899 /**
7900 * stmmac_reset_queues_param - reset queue parameters
7901 * @priv: device pointer
7902 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7903 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7904 {
7905 u32 rx_cnt = priv->plat->rx_queues_to_use;
7906 u32 tx_cnt = priv->plat->tx_queues_to_use;
7907 u32 queue;
7908
7909 for (queue = 0; queue < rx_cnt; queue++)
7910 stmmac_reset_rx_queue(priv, queue);
7911
7912 for (queue = 0; queue < tx_cnt; queue++)
7913 stmmac_reset_tx_queue(priv, queue);
7914 }
7915
7916 /**
7917 * stmmac_resume - resume callback
7918 * @dev: device pointer
7919 * Description: when resume this function is invoked to setup the DMA and CORE
7920 * in a usable state.
7921 */
stmmac_resume(struct device * dev)7922 int stmmac_resume(struct device *dev)
7923 {
7924 struct net_device *ndev = dev_get_drvdata(dev);
7925 struct stmmac_priv *priv = netdev_priv(ndev);
7926 int ret;
7927
7928 if (!netif_running(ndev))
7929 return 0;
7930
7931 /* Power Down bit, into the PM register, is cleared
7932 * automatically as soon as a magic packet or a Wake-up frame
7933 * is received. Anyway, it's better to manually clear
7934 * this bit because it can generate problems while resuming
7935 * from another devices (e.g. serial console).
7936 */
7937 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7938 mutex_lock(&priv->lock);
7939 stmmac_pmt(priv, priv->hw, 0);
7940 mutex_unlock(&priv->lock);
7941 priv->irq_wake = 0;
7942 } else {
7943 pinctrl_pm_select_default_state(priv->device);
7944 /* reset the phy so that it's ready */
7945 if (priv->mii)
7946 stmmac_mdio_reset(priv->mii);
7947 }
7948
7949 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7950 priv->plat->serdes_powerup) {
7951 ret = priv->plat->serdes_powerup(ndev,
7952 priv->plat->bsp_priv);
7953
7954 if (ret < 0)
7955 return ret;
7956 }
7957
7958 rtnl_lock();
7959
7960 /* Prepare the PHY to resume, ensuring that its clocks which are
7961 * necessary for the MAC DMA reset to complete are running
7962 */
7963 phylink_prepare_resume(priv->phylink);
7964
7965 mutex_lock(&priv->lock);
7966
7967 stmmac_reset_queues_param(priv);
7968
7969 stmmac_free_tx_skbufs(priv);
7970 stmmac_clear_descriptors(priv, &priv->dma_conf);
7971
7972 stmmac_hw_setup(ndev, false);
7973 stmmac_init_coalesce(priv);
7974 phylink_rx_clk_stop_block(priv->phylink);
7975 stmmac_set_rx_mode(ndev);
7976
7977 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7978 phylink_rx_clk_stop_unblock(priv->phylink);
7979
7980 stmmac_enable_all_queues(priv);
7981 stmmac_enable_all_dma_irq(priv);
7982
7983 mutex_unlock(&priv->lock);
7984
7985 /* phylink_resume() must be called after the hardware has been
7986 * initialised because it may bring the link up immediately in a
7987 * workqueue thread, which will race with initialisation.
7988 */
7989 phylink_resume(priv->phylink);
7990 if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7991 phylink_speed_up(priv->phylink);
7992
7993 rtnl_unlock();
7994
7995 netif_device_attach(ndev);
7996
7997 return 0;
7998 }
7999 EXPORT_SYMBOL_GPL(stmmac_resume);
8000
8001 #ifndef MODULE
stmmac_cmdline_opt(char * str)8002 static int __init stmmac_cmdline_opt(char *str)
8003 {
8004 char *opt;
8005
8006 if (!str || !*str)
8007 return 1;
8008 while ((opt = strsep(&str, ",")) != NULL) {
8009 if (!strncmp(opt, "debug:", 6)) {
8010 if (kstrtoint(opt + 6, 0, &debug))
8011 goto err;
8012 } else if (!strncmp(opt, "phyaddr:", 8)) {
8013 if (kstrtoint(opt + 8, 0, &phyaddr))
8014 goto err;
8015 } else if (!strncmp(opt, "tc:", 3)) {
8016 if (kstrtoint(opt + 3, 0, &tc))
8017 goto err;
8018 } else if (!strncmp(opt, "watchdog:", 9)) {
8019 if (kstrtoint(opt + 9, 0, &watchdog))
8020 goto err;
8021 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8022 if (kstrtoint(opt + 10, 0, &flow_ctrl))
8023 goto err;
8024 } else if (!strncmp(opt, "pause:", 6)) {
8025 if (kstrtoint(opt + 6, 0, &pause))
8026 goto err;
8027 } else if (!strncmp(opt, "eee_timer:", 10)) {
8028 if (kstrtoint(opt + 10, 0, &eee_timer))
8029 goto err;
8030 } else if (!strncmp(opt, "chain_mode:", 11)) {
8031 if (kstrtoint(opt + 11, 0, &chain_mode))
8032 goto err;
8033 }
8034 }
8035 return 1;
8036
8037 err:
8038 pr_err("%s: ERROR broken module parameter conversion", __func__);
8039 return 1;
8040 }
8041
8042 __setup("stmmaceth=", stmmac_cmdline_opt);
8043 #endif /* MODULE */
8044
stmmac_init(void)8045 static int __init stmmac_init(void)
8046 {
8047 #ifdef CONFIG_DEBUG_FS
8048 /* Create debugfs main directory if it doesn't exist yet */
8049 if (!stmmac_fs_dir)
8050 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8051 register_netdevice_notifier(&stmmac_notifier);
8052 #endif
8053
8054 return 0;
8055 }
8056
stmmac_exit(void)8057 static void __exit stmmac_exit(void)
8058 {
8059 #ifdef CONFIG_DEBUG_FS
8060 unregister_netdevice_notifier(&stmmac_notifier);
8061 debugfs_remove_recursive(stmmac_fs_dir);
8062 #endif
8063 }
8064
8065 module_init(stmmac_init)
8066 module_exit(stmmac_exit)
8067
8068 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8069 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8070 MODULE_LICENSE("GPL");
8071