1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac_fpe.h"
47 #include "stmmac.h"
48 #include "stmmac_xdp.h"
49 #include <linux/reset.h>
50 #include <linux/of_mdio.h>
51 #include "dwmac1000.h"
52 #include "dwxgmac2.h"
53 #include "hwif.h"
54
55 /* As long as the interface is active, we keep the timestamping counter enabled
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
57 * (clock jumps) when changing timestamping settings at runtime.
58 */
59 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 PTP_TCR_TSCTRLSSR)
61
62 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
64
65 /* Module parameters */
66 #define TX_TIMEO 5000
67 static int watchdog = TX_TIMEO;
68 module_param(watchdog, int, 0644);
69 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
70
71 static int debug = -1;
72 module_param(debug, int, 0644);
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74
75 static int phyaddr = -1;
76 module_param(phyaddr, int, 0444);
77 MODULE_PARM_DESC(phyaddr, "Physical device address");
78
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = 0xdead;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 /* This is unused */
105 #define DEFAULT_BUFSIZE 1536
106 static int buf_sz = DEFAULT_BUFSIZE;
107 module_param(buf_sz, int, 0644);
108 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
109
110 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
114 #define STMMAC_DEFAULT_LPI_TIMER 1000
115 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
116 module_param(eee_timer, uint, 0644);
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
119
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121 * but allow user to force to use the chain instead of the ring
122 */
123 static unsigned int chain_mode;
124 module_param(chain_mode, int, 0444);
125 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
127 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
128 /* For MSI interrupts handling */
129 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
133 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
134 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
136 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
138 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
139 u32 rxmode, u32 chan);
140
141 #ifdef CONFIG_DEBUG_FS
142 static const struct net_device_ops stmmac_netdev_ops;
143 static void stmmac_init_fs(struct net_device *dev);
144 static void stmmac_exit_fs(struct net_device *dev);
145 #endif
146
147 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
148
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)149 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
150 {
151 int ret = 0;
152
153 if (enabled) {
154 ret = clk_prepare_enable(priv->plat->stmmac_clk);
155 if (ret)
156 return ret;
157 ret = clk_prepare_enable(priv->plat->pclk);
158 if (ret) {
159 clk_disable_unprepare(priv->plat->stmmac_clk);
160 return ret;
161 }
162 if (priv->plat->clks_config) {
163 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
164 if (ret) {
165 clk_disable_unprepare(priv->plat->stmmac_clk);
166 clk_disable_unprepare(priv->plat->pclk);
167 return ret;
168 }
169 }
170 } else {
171 clk_disable_unprepare(priv->plat->stmmac_clk);
172 clk_disable_unprepare(priv->plat->pclk);
173 if (priv->plat->clks_config)
174 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
175 }
176
177 return ret;
178 }
179 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
180
181 /**
182 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
183 * @bsp_priv: BSP private data structure (unused)
184 * @clk_tx_i: the transmit clock
185 * @interface: the selected interface mode
186 * @speed: the speed that the MAC will be operating at
187 *
188 * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
189 * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
190 * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
191 * the plat_data->set_clk_tx_rate method directly, call it via their own
192 * implementation, or implement their own method should they have more
193 * complex requirements. It is intended to only be used in this method.
194 *
195 * plat_data->clk_tx_i must be filled in.
196 */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)197 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
198 phy_interface_t interface, int speed)
199 {
200 long rate = rgmii_clock(speed);
201
202 /* Silently ignore unsupported speeds as rgmii_clock() only
203 * supports 10, 100 and 1000Mbps. We do not want to spit
204 * errors for 2500 and higher speeds here.
205 */
206 if (rate < 0)
207 return 0;
208
209 return clk_set_rate(clk_tx_i, rate);
210 }
211 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
212
213 /**
214 * stmmac_verify_args - verify the driver parameters.
215 * Description: it checks the driver parameters and set a default in case of
216 * errors.
217 */
stmmac_verify_args(void)218 static void stmmac_verify_args(void)
219 {
220 if (unlikely(watchdog < 0))
221 watchdog = TX_TIMEO;
222 if (unlikely((pause < 0) || (pause > 0xffff)))
223 pause = PAUSE_TIME;
224
225 if (flow_ctrl != 0xdead)
226 pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
227 }
228
__stmmac_disable_all_queues(struct stmmac_priv * priv)229 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
230 {
231 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
232 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
233 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
234 u32 queue;
235
236 for (queue = 0; queue < maxq; queue++) {
237 struct stmmac_channel *ch = &priv->channel[queue];
238
239 if (stmmac_xdp_is_enabled(priv) &&
240 test_bit(queue, priv->af_xdp_zc_qps)) {
241 napi_disable(&ch->rxtx_napi);
242 continue;
243 }
244
245 if (queue < rx_queues_cnt)
246 napi_disable(&ch->rx_napi);
247 if (queue < tx_queues_cnt)
248 napi_disable(&ch->tx_napi);
249 }
250 }
251
252 /**
253 * stmmac_disable_all_queues - Disable all queues
254 * @priv: driver private structure
255 */
stmmac_disable_all_queues(struct stmmac_priv * priv)256 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
257 {
258 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
259 struct stmmac_rx_queue *rx_q;
260 u32 queue;
261
262 /* synchronize_rcu() needed for pending XDP buffers to drain */
263 for (queue = 0; queue < rx_queues_cnt; queue++) {
264 rx_q = &priv->dma_conf.rx_queue[queue];
265 if (rx_q->xsk_pool) {
266 synchronize_rcu();
267 break;
268 }
269 }
270
271 __stmmac_disable_all_queues(priv);
272 }
273
274 /**
275 * stmmac_enable_all_queues - Enable all queues
276 * @priv: driver private structure
277 */
stmmac_enable_all_queues(struct stmmac_priv * priv)278 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
279 {
280 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
281 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
282 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
283 u32 queue;
284
285 for (queue = 0; queue < maxq; queue++) {
286 struct stmmac_channel *ch = &priv->channel[queue];
287
288 if (stmmac_xdp_is_enabled(priv) &&
289 test_bit(queue, priv->af_xdp_zc_qps)) {
290 napi_enable(&ch->rxtx_napi);
291 continue;
292 }
293
294 if (queue < rx_queues_cnt)
295 napi_enable(&ch->rx_napi);
296 if (queue < tx_queues_cnt)
297 napi_enable(&ch->tx_napi);
298 }
299 }
300
stmmac_service_event_schedule(struct stmmac_priv * priv)301 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
302 {
303 if (!test_bit(STMMAC_DOWN, &priv->state) &&
304 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
305 queue_work(priv->wq, &priv->service_task);
306 }
307
stmmac_global_err(struct stmmac_priv * priv)308 static void stmmac_global_err(struct stmmac_priv *priv)
309 {
310 netif_carrier_off(priv->dev);
311 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
312 stmmac_service_event_schedule(priv);
313 }
314
315 /**
316 * stmmac_clk_csr_set - dynamically set the MDC clock
317 * @priv: driver private structure
318 * Description: this is to dynamically set the MDC clock according to the csr
319 * clock input.
320 * Note:
321 * If a specific clk_csr value is passed from the platform
322 * this means that the CSR Clock Range selection cannot be
323 * changed at run-time and it is fixed (as reported in the driver
324 * documentation). Viceversa the driver will try to set the MDC
325 * clock dynamically according to the actual clock input.
326 */
stmmac_clk_csr_set(struct stmmac_priv * priv)327 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
328 {
329 unsigned long clk_rate;
330
331 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
332
333 /* Platform provided default clk_csr would be assumed valid
334 * for all other cases except for the below mentioned ones.
335 * For values higher than the IEEE 802.3 specified frequency
336 * we can not estimate the proper divider as it is not known
337 * the frequency of clk_csr_i. So we do not change the default
338 * divider.
339 */
340 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
341 if (clk_rate < CSR_F_35M)
342 priv->clk_csr = STMMAC_CSR_20_35M;
343 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
344 priv->clk_csr = STMMAC_CSR_35_60M;
345 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
346 priv->clk_csr = STMMAC_CSR_60_100M;
347 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
348 priv->clk_csr = STMMAC_CSR_100_150M;
349 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
350 priv->clk_csr = STMMAC_CSR_150_250M;
351 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
352 priv->clk_csr = STMMAC_CSR_250_300M;
353 else if ((clk_rate >= CSR_F_300M) && (clk_rate < CSR_F_500M))
354 priv->clk_csr = STMMAC_CSR_300_500M;
355 else if ((clk_rate >= CSR_F_500M) && (clk_rate < CSR_F_800M))
356 priv->clk_csr = STMMAC_CSR_500_800M;
357 }
358
359 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
360 if (clk_rate > 160000000)
361 priv->clk_csr = 0x03;
362 else if (clk_rate > 80000000)
363 priv->clk_csr = 0x02;
364 else if (clk_rate > 40000000)
365 priv->clk_csr = 0x01;
366 else
367 priv->clk_csr = 0;
368 }
369
370 if (priv->plat->has_xgmac) {
371 if (clk_rate > 400000000)
372 priv->clk_csr = 0x5;
373 else if (clk_rate > 350000000)
374 priv->clk_csr = 0x4;
375 else if (clk_rate > 300000000)
376 priv->clk_csr = 0x3;
377 else if (clk_rate > 250000000)
378 priv->clk_csr = 0x2;
379 else if (clk_rate > 150000000)
380 priv->clk_csr = 0x1;
381 else
382 priv->clk_csr = 0x0;
383 }
384 }
385
print_pkt(unsigned char * buf,int len)386 static void print_pkt(unsigned char *buf, int len)
387 {
388 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
389 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
390 }
391
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)392 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
393 {
394 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
395 u32 avail;
396
397 if (tx_q->dirty_tx > tx_q->cur_tx)
398 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
399 else
400 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
401
402 return avail;
403 }
404
405 /**
406 * stmmac_rx_dirty - Get RX queue dirty
407 * @priv: driver private structure
408 * @queue: RX queue index
409 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)410 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
411 {
412 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
413 u32 dirty;
414
415 if (rx_q->dirty_rx <= rx_q->cur_rx)
416 dirty = rx_q->cur_rx - rx_q->dirty_rx;
417 else
418 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
419
420 return dirty;
421 }
422
stmmac_eee_tx_busy(struct stmmac_priv * priv)423 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
424 {
425 u32 tx_cnt = priv->plat->tx_queues_to_use;
426 u32 queue;
427
428 /* check if all TX queues have the work finished */
429 for (queue = 0; queue < tx_cnt; queue++) {
430 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
431
432 if (tx_q->dirty_tx != tx_q->cur_tx)
433 return true; /* still unfinished work */
434 }
435
436 return false;
437 }
438
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)439 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
440 {
441 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
442 }
443
444 /**
445 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
446 * @priv: driver private structure
447 * Description: this function is to verify and enter in LPI mode in case of
448 * EEE.
449 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)450 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
451 {
452 if (stmmac_eee_tx_busy(priv)) {
453 stmmac_restart_sw_lpi_timer(priv);
454 return;
455 }
456
457 /* Check and enter in LPI mode */
458 if (!priv->tx_path_in_lpi_mode)
459 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
460 priv->tx_lpi_clk_stop, 0);
461 }
462
463 /**
464 * stmmac_stop_sw_lpi - stop transmitting LPI
465 * @priv: driver private structure
466 * Description: When using software-controlled LPI, stop transmitting LPI state.
467 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)468 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
469 {
470 timer_delete_sync(&priv->eee_ctrl_timer);
471 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
472 priv->tx_path_in_lpi_mode = false;
473 }
474
475 /**
476 * stmmac_eee_ctrl_timer - EEE TX SW timer.
477 * @t: timer_list struct containing private info
478 * Description:
479 * if there is no data transfer and if we are not in LPI state,
480 * then MAC Transmitter can be moved to LPI state.
481 */
stmmac_eee_ctrl_timer(struct timer_list * t)482 static void stmmac_eee_ctrl_timer(struct timer_list *t)
483 {
484 struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
485
486 stmmac_try_to_start_sw_lpi(priv);
487 }
488
489 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
490 * @priv: driver private structure
491 * @p : descriptor pointer
492 * @skb : the socket buffer
493 * Description :
494 * This function will read timestamp from the descriptor & pass it to stack.
495 * and also perform some sanity checks.
496 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)497 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
498 struct dma_desc *p, struct sk_buff *skb)
499 {
500 struct skb_shared_hwtstamps shhwtstamp;
501 bool found = false;
502 u64 ns = 0;
503
504 if (!priv->hwts_tx_en)
505 return;
506
507 /* exit if skb doesn't support hw tstamp */
508 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
509 return;
510
511 /* check tx tstamp status */
512 if (stmmac_get_tx_timestamp_status(priv, p)) {
513 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
514 found = true;
515 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
516 found = true;
517 }
518
519 if (found) {
520 ns -= priv->plat->cdc_error_adj;
521
522 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
523 shhwtstamp.hwtstamp = ns_to_ktime(ns);
524
525 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
526 /* pass tstamp to stack */
527 skb_tstamp_tx(skb, &shhwtstamp);
528 }
529 }
530
531 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
532 * @priv: driver private structure
533 * @p : descriptor pointer
534 * @np : next descriptor pointer
535 * @skb : the socket buffer
536 * Description :
537 * This function will read received packet's timestamp from the descriptor
538 * and pass it to stack. It also perform some sanity checks.
539 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)540 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
541 struct dma_desc *np, struct sk_buff *skb)
542 {
543 struct skb_shared_hwtstamps *shhwtstamp = NULL;
544 struct dma_desc *desc = p;
545 u64 ns = 0;
546
547 if (!priv->hwts_rx_en)
548 return;
549 /* For GMAC4, the valid timestamp is from CTX next desc. */
550 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
551 desc = np;
552
553 /* Check if timestamp is available */
554 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
555 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
556
557 ns -= priv->plat->cdc_error_adj;
558
559 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
560 shhwtstamp = skb_hwtstamps(skb);
561 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 shhwtstamp->hwtstamp = ns_to_ktime(ns);
563 } else {
564 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
565 }
566 }
567
568 /**
569 * stmmac_hwtstamp_set - control hardware timestamping.
570 * @dev: device pointer.
571 * @config: the timestamping configuration.
572 * @extack: netlink extended ack structure for error reporting.
573 * Description:
574 * This function configures the MAC to enable/disable both outgoing(TX)
575 * and incoming(RX) packets time stamping based on user input.
576 * Return Value:
577 * 0 on success and an appropriate -ve integer on failure.
578 */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)579 static int stmmac_hwtstamp_set(struct net_device *dev,
580 struct kernel_hwtstamp_config *config,
581 struct netlink_ext_ack *extack)
582 {
583 struct stmmac_priv *priv = netdev_priv(dev);
584 u32 ptp_v2 = 0;
585 u32 tstamp_all = 0;
586 u32 ptp_over_ipv4_udp = 0;
587 u32 ptp_over_ipv6_udp = 0;
588 u32 ptp_over_ethernet = 0;
589 u32 snap_type_sel = 0;
590 u32 ts_master_en = 0;
591 u32 ts_event_en = 0;
592
593 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
594 NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
595 priv->hwts_tx_en = 0;
596 priv->hwts_rx_en = 0;
597
598 return -EOPNOTSUPP;
599 }
600
601 if (!netif_running(dev)) {
602 NL_SET_ERR_MSG_MOD(extack,
603 "Cannot change timestamping configuration while down");
604 return -ENODEV;
605 }
606
607 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
608 __func__, config->flags, config->tx_type, config->rx_filter);
609
610 if (config->tx_type != HWTSTAMP_TX_OFF &&
611 config->tx_type != HWTSTAMP_TX_ON)
612 return -ERANGE;
613
614 if (priv->adv_ts) {
615 switch (config->rx_filter) {
616 case HWTSTAMP_FILTER_NONE:
617 /* time stamp no incoming packet at all */
618 config->rx_filter = HWTSTAMP_FILTER_NONE;
619 break;
620
621 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
622 /* PTP v1, UDP, any kind of event packet */
623 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
624 /* 'xmac' hardware can support Sync, Pdelay_Req and
625 * Pdelay_resp by setting bit14 and bits17/16 to 01
626 * This leaves Delay_Req timestamps out.
627 * Enable all events *and* general purpose message
628 * timestamping
629 */
630 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
631 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
632 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
633 break;
634
635 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
636 /* PTP v1, UDP, Sync packet */
637 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
638 /* take time stamp for SYNC messages only */
639 ts_event_en = PTP_TCR_TSEVNTENA;
640
641 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
642 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
643 break;
644
645 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
646 /* PTP v1, UDP, Delay_req packet */
647 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
648 /* take time stamp for Delay_Req messages only */
649 ts_master_en = PTP_TCR_TSMSTRENA;
650 ts_event_en = PTP_TCR_TSEVNTENA;
651
652 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
653 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
654 break;
655
656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
657 /* PTP v2, UDP, any kind of event packet */
658 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
659 ptp_v2 = PTP_TCR_TSVER2ENA;
660 /* take time stamp for all event messages */
661 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
662
663 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
664 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
665 break;
666
667 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
668 /* PTP v2, UDP, Sync packet */
669 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
670 ptp_v2 = PTP_TCR_TSVER2ENA;
671 /* take time stamp for SYNC messages only */
672 ts_event_en = PTP_TCR_TSEVNTENA;
673
674 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
675 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
676 break;
677
678 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
679 /* PTP v2, UDP, Delay_req packet */
680 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
681 ptp_v2 = PTP_TCR_TSVER2ENA;
682 /* take time stamp for Delay_Req messages only */
683 ts_master_en = PTP_TCR_TSMSTRENA;
684 ts_event_en = PTP_TCR_TSEVNTENA;
685
686 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
687 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
688 break;
689
690 case HWTSTAMP_FILTER_PTP_V2_EVENT:
691 /* PTP v2/802.AS1 any layer, any kind of event packet */
692 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
693 ptp_v2 = PTP_TCR_TSVER2ENA;
694 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
695 if (priv->synopsys_id < DWMAC_CORE_4_10)
696 ts_event_en = PTP_TCR_TSEVNTENA;
697 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
698 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
699 ptp_over_ethernet = PTP_TCR_TSIPENA;
700 break;
701
702 case HWTSTAMP_FILTER_PTP_V2_SYNC:
703 /* PTP v2/802.AS1, any layer, Sync packet */
704 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
705 ptp_v2 = PTP_TCR_TSVER2ENA;
706 /* take time stamp for SYNC messages only */
707 ts_event_en = PTP_TCR_TSEVNTENA;
708
709 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
710 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
711 ptp_over_ethernet = PTP_TCR_TSIPENA;
712 break;
713
714 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
715 /* PTP v2/802.AS1, any layer, Delay_req packet */
716 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
717 ptp_v2 = PTP_TCR_TSVER2ENA;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en = PTP_TCR_TSMSTRENA;
720 ts_event_en = PTP_TCR_TSEVNTENA;
721
722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 ptp_over_ethernet = PTP_TCR_TSIPENA;
725 break;
726
727 case HWTSTAMP_FILTER_NTP_ALL:
728 case HWTSTAMP_FILTER_ALL:
729 /* time stamp any incoming packet */
730 config->rx_filter = HWTSTAMP_FILTER_ALL;
731 tstamp_all = PTP_TCR_TSENALL;
732 break;
733
734 default:
735 return -ERANGE;
736 }
737 } else {
738 switch (config->rx_filter) {
739 case HWTSTAMP_FILTER_NONE:
740 config->rx_filter = HWTSTAMP_FILTER_NONE;
741 break;
742 default:
743 /* PTP v1, UDP, any kind of event packet */
744 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
745 break;
746 }
747 }
748 priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
749 priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
750
751 priv->systime_flags = STMMAC_HWTS_ACTIVE;
752
753 if (priv->hwts_tx_en || priv->hwts_rx_en) {
754 priv->systime_flags |= tstamp_all | ptp_v2 |
755 ptp_over_ethernet | ptp_over_ipv6_udp |
756 ptp_over_ipv4_udp | ts_event_en |
757 ts_master_en | snap_type_sel;
758 }
759
760 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
761
762 priv->tstamp_config = *config;
763
764 return 0;
765 }
766
767 /**
768 * stmmac_hwtstamp_get - read hardware timestamping.
769 * @dev: device pointer.
770 * @config: the timestamping configuration.
771 * Description:
772 * This function obtain the current hardware timestamping settings
773 * as requested.
774 */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)775 static int stmmac_hwtstamp_get(struct net_device *dev,
776 struct kernel_hwtstamp_config *config)
777 {
778 struct stmmac_priv *priv = netdev_priv(dev);
779
780 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
781 return -EOPNOTSUPP;
782
783 *config = priv->tstamp_config;
784
785 return 0;
786 }
787
788 /**
789 * stmmac_init_tstamp_counter - init hardware timestamping counter
790 * @priv: driver private structure
791 * @systime_flags: timestamping flags
792 * Description:
793 * Initialize hardware counter for packet timestamping.
794 * This is valid as long as the interface is open and not suspended.
795 * Will be rerun after resuming from suspend, case in which the timestamping
796 * flags updated by stmmac_hwtstamp_set() also need to be restored.
797 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)798 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
799 {
800 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
801 struct timespec64 now;
802 u32 sec_inc = 0;
803 u64 temp = 0;
804
805 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806 return -EOPNOTSUPP;
807
808 if (!priv->plat->clk_ptp_rate) {
809 netdev_err(priv->dev, "Invalid PTP clock rate");
810 return -EINVAL;
811 }
812
813 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
814 priv->systime_flags = systime_flags;
815
816 /* program Sub Second Increment reg */
817 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
818 priv->plat->clk_ptp_rate,
819 xmac, &sec_inc);
820 temp = div_u64(1000000000ULL, sec_inc);
821
822 /* Store sub second increment for later use */
823 priv->sub_second_inc = sec_inc;
824
825 /* calculate default added value:
826 * formula is :
827 * addend = (2^32)/freq_div_ratio;
828 * where, freq_div_ratio = 1e9ns/sec_inc
829 */
830 temp = (u64)(temp << 32);
831 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
832 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
833
834 /* initialize system time */
835 ktime_get_real_ts64(&now);
836
837 /* lower 32 bits of tv_sec are safe until y2106 */
838 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
839
840 return 0;
841 }
842 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
843
844 /**
845 * stmmac_init_ptp - init PTP
846 * @priv: driver private structure
847 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
848 * This is done by looking at the HW cap. register.
849 * This function also registers the ptp driver.
850 */
stmmac_init_ptp(struct stmmac_priv * priv)851 static int stmmac_init_ptp(struct stmmac_priv *priv)
852 {
853 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
854 int ret;
855
856 if (priv->plat->ptp_clk_freq_config)
857 priv->plat->ptp_clk_freq_config(priv);
858
859 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
860 if (ret)
861 return ret;
862
863 priv->adv_ts = 0;
864 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
865 if (xmac && priv->dma_cap.atime_stamp)
866 priv->adv_ts = 1;
867 /* Dwmac 3.x core with extend_desc can support adv_ts */
868 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
869 priv->adv_ts = 1;
870
871 if (priv->dma_cap.time_stamp)
872 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
873
874 if (priv->adv_ts)
875 netdev_info(priv->dev,
876 "IEEE 1588-2008 Advanced Timestamp supported\n");
877
878 priv->hwts_tx_en = 0;
879 priv->hwts_rx_en = 0;
880
881 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
882 stmmac_hwtstamp_correct_latency(priv, priv);
883
884 return 0;
885 }
886
stmmac_release_ptp(struct stmmac_priv * priv)887 static void stmmac_release_ptp(struct stmmac_priv *priv)
888 {
889 clk_disable_unprepare(priv->plat->clk_ptp_ref);
890 stmmac_ptp_unregister(priv);
891 }
892
893 /**
894 * stmmac_mac_flow_ctrl - Configure flow control in all queues
895 * @priv: driver private structure
896 * @duplex: duplex passed to the next function
897 * @flow_ctrl: desired flow control modes
898 * Description: It is used for configuring the flow control in all queues
899 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)900 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
901 unsigned int flow_ctrl)
902 {
903 u32 tx_cnt = priv->plat->tx_queues_to_use;
904
905 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
906 tx_cnt);
907 }
908
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)909 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
910 phy_interface_t interface)
911 {
912 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
913
914 /* Refresh the MAC-specific capabilities */
915 stmmac_mac_update_caps(priv);
916
917 config->mac_capabilities = priv->hw->link.caps;
918
919 if (priv->plat->max_speed)
920 phylink_limit_mac_speed(config, priv->plat->max_speed);
921
922 return config->mac_capabilities;
923 }
924
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)925 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
926 phy_interface_t interface)
927 {
928 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
929 struct phylink_pcs *pcs;
930
931 if (priv->plat->select_pcs) {
932 pcs = priv->plat->select_pcs(priv, interface);
933 if (!IS_ERR(pcs))
934 return pcs;
935 }
936
937 return NULL;
938 }
939
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)940 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
941 const struct phylink_link_state *state)
942 {
943 /* Nothing to do, xpcs_config() handles everything */
944 }
945
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)946 static void stmmac_mac_link_down(struct phylink_config *config,
947 unsigned int mode, phy_interface_t interface)
948 {
949 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
950
951 stmmac_mac_set(priv, priv->ioaddr, false);
952 if (priv->dma_cap.eee)
953 stmmac_set_eee_pls(priv, priv->hw, false);
954
955 if (stmmac_fpe_supported(priv))
956 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
957 }
958
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)959 static void stmmac_mac_link_up(struct phylink_config *config,
960 struct phy_device *phy,
961 unsigned int mode, phy_interface_t interface,
962 int speed, int duplex,
963 bool tx_pause, bool rx_pause)
964 {
965 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
966 unsigned int flow_ctrl;
967 u32 old_ctrl, ctrl;
968 int ret;
969
970 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
971 priv->plat->serdes_powerup)
972 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
973
974 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
975 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
976
977 if (interface == PHY_INTERFACE_MODE_USXGMII) {
978 switch (speed) {
979 case SPEED_10000:
980 ctrl |= priv->hw->link.xgmii.speed10000;
981 break;
982 case SPEED_5000:
983 ctrl |= priv->hw->link.xgmii.speed5000;
984 break;
985 case SPEED_2500:
986 ctrl |= priv->hw->link.xgmii.speed2500;
987 break;
988 default:
989 return;
990 }
991 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
992 switch (speed) {
993 case SPEED_100000:
994 ctrl |= priv->hw->link.xlgmii.speed100000;
995 break;
996 case SPEED_50000:
997 ctrl |= priv->hw->link.xlgmii.speed50000;
998 break;
999 case SPEED_40000:
1000 ctrl |= priv->hw->link.xlgmii.speed40000;
1001 break;
1002 case SPEED_25000:
1003 ctrl |= priv->hw->link.xlgmii.speed25000;
1004 break;
1005 case SPEED_10000:
1006 ctrl |= priv->hw->link.xgmii.speed10000;
1007 break;
1008 case SPEED_2500:
1009 ctrl |= priv->hw->link.speed2500;
1010 break;
1011 case SPEED_1000:
1012 ctrl |= priv->hw->link.speed1000;
1013 break;
1014 default:
1015 return;
1016 }
1017 } else {
1018 switch (speed) {
1019 case SPEED_2500:
1020 ctrl |= priv->hw->link.speed2500;
1021 break;
1022 case SPEED_1000:
1023 ctrl |= priv->hw->link.speed1000;
1024 break;
1025 case SPEED_100:
1026 ctrl |= priv->hw->link.speed100;
1027 break;
1028 case SPEED_10:
1029 ctrl |= priv->hw->link.speed10;
1030 break;
1031 default:
1032 return;
1033 }
1034 }
1035
1036 if (priv->plat->fix_mac_speed)
1037 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1038
1039 if (!duplex)
1040 ctrl &= ~priv->hw->link.duplex;
1041 else
1042 ctrl |= priv->hw->link.duplex;
1043
1044 /* Flow Control operation */
1045 if (rx_pause && tx_pause)
1046 flow_ctrl = FLOW_AUTO;
1047 else if (rx_pause && !tx_pause)
1048 flow_ctrl = FLOW_RX;
1049 else if (!rx_pause && tx_pause)
1050 flow_ctrl = FLOW_TX;
1051 else
1052 flow_ctrl = FLOW_OFF;
1053
1054 stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1055
1056 if (ctrl != old_ctrl)
1057 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1058
1059 if (priv->plat->set_clk_tx_rate) {
1060 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1061 priv->plat->clk_tx_i,
1062 interface, speed);
1063 if (ret < 0)
1064 netdev_err(priv->dev,
1065 "failed to configure transmit clock for %dMbps: %pe\n",
1066 speed, ERR_PTR(ret));
1067 }
1068
1069 stmmac_mac_set(priv, priv->ioaddr, true);
1070 if (priv->dma_cap.eee)
1071 stmmac_set_eee_pls(priv, priv->hw, true);
1072
1073 if (stmmac_fpe_supported(priv))
1074 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1075
1076 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1077 stmmac_hwtstamp_correct_latency(priv, priv);
1078 }
1079
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1080 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1081 {
1082 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1083
1084 priv->eee_active = false;
1085
1086 mutex_lock(&priv->lock);
1087
1088 priv->eee_enabled = false;
1089
1090 netdev_dbg(priv->dev, "disable EEE\n");
1091 priv->eee_sw_timer_en = false;
1092 timer_delete_sync(&priv->eee_ctrl_timer);
1093 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1094 priv->tx_path_in_lpi_mode = false;
1095
1096 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1097 mutex_unlock(&priv->lock);
1098 }
1099
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1100 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1101 bool tx_clk_stop)
1102 {
1103 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1104 int ret;
1105
1106 priv->tx_lpi_timer = timer;
1107 priv->eee_active = true;
1108
1109 mutex_lock(&priv->lock);
1110
1111 priv->eee_enabled = true;
1112
1113 /* Update the transmit clock stop according to PHY capability if
1114 * the platform allows
1115 */
1116 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1117 priv->tx_lpi_clk_stop = tx_clk_stop;
1118
1119 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1120 STMMAC_DEFAULT_TWT_LS);
1121
1122 /* Try to cnfigure the hardware timer. */
1123 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1124 priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1125
1126 if (ret) {
1127 /* Hardware timer mode not supported, or value out of range.
1128 * Fall back to using software LPI mode
1129 */
1130 priv->eee_sw_timer_en = true;
1131 stmmac_restart_sw_lpi_timer(priv);
1132 }
1133
1134 mutex_unlock(&priv->lock);
1135 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1136
1137 return 0;
1138 }
1139
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1140 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1141 phy_interface_t interface)
1142 {
1143 struct net_device *ndev = to_net_dev(config->dev);
1144 struct stmmac_priv *priv = netdev_priv(ndev);
1145
1146 if (priv->plat->mac_finish)
1147 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1148
1149 return 0;
1150 }
1151
1152 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1153 .mac_get_caps = stmmac_mac_get_caps,
1154 .mac_select_pcs = stmmac_mac_select_pcs,
1155 .mac_config = stmmac_mac_config,
1156 .mac_link_down = stmmac_mac_link_down,
1157 .mac_link_up = stmmac_mac_link_up,
1158 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1159 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1160 .mac_finish = stmmac_mac_finish,
1161 };
1162
1163 /**
1164 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1165 * @priv: driver private structure
1166 * Description: this is to verify if the HW supports the PCS.
1167 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1168 * configured for the TBI, RTBI, or SGMII PHY interface.
1169 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1170 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1171 {
1172 int interface = priv->plat->mac_interface;
1173
1174 if (priv->dma_cap.pcs) {
1175 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1176 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1177 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1178 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1179 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1180 priv->hw->pcs = STMMAC_PCS_RGMII;
1181 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1182 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1183 priv->hw->pcs = STMMAC_PCS_SGMII;
1184 }
1185 }
1186 }
1187
1188 /**
1189 * stmmac_init_phy - PHY initialization
1190 * @dev: net device structure
1191 * Description: it initializes the driver's PHY state, and attaches the PHY
1192 * to the mac driver.
1193 * Return value:
1194 * 0 on success
1195 */
stmmac_init_phy(struct net_device * dev)1196 static int stmmac_init_phy(struct net_device *dev)
1197 {
1198 struct stmmac_priv *priv = netdev_priv(dev);
1199 struct fwnode_handle *phy_fwnode;
1200 struct fwnode_handle *fwnode;
1201 int ret;
1202
1203 if (!phylink_expects_phy(priv->phylink))
1204 return 0;
1205
1206 fwnode = priv->plat->port_node;
1207 if (!fwnode)
1208 fwnode = dev_fwnode(priv->device);
1209
1210 if (fwnode)
1211 phy_fwnode = fwnode_get_phy_node(fwnode);
1212 else
1213 phy_fwnode = NULL;
1214
1215 /* Some DT bindings do not set-up the PHY handle. Let's try to
1216 * manually parse it
1217 */
1218 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1219 int addr = priv->plat->phy_addr;
1220 struct phy_device *phydev;
1221
1222 if (addr < 0) {
1223 netdev_err(priv->dev, "no phy found\n");
1224 return -ENODEV;
1225 }
1226
1227 phydev = mdiobus_get_phy(priv->mii, addr);
1228 if (!phydev) {
1229 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1230 return -ENODEV;
1231 }
1232
1233 ret = phylink_connect_phy(priv->phylink, phydev);
1234 } else {
1235 fwnode_handle_put(phy_fwnode);
1236 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1237 }
1238
1239 if (ret == 0) {
1240 struct ethtool_keee eee;
1241
1242 /* Configure phylib's copy of the LPI timer. Normally,
1243 * phylink_config.lpi_timer_default would do this, but there is
1244 * a chance that userspace could change the eee_timer setting
1245 * via sysfs before the first open. Thus, preserve existing
1246 * behaviour.
1247 */
1248 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1249 eee.tx_lpi_timer = priv->tx_lpi_timer;
1250 phylink_ethtool_set_eee(priv->phylink, &eee);
1251 }
1252 }
1253
1254 if (!priv->plat->pmt) {
1255 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1256
1257 phylink_ethtool_get_wol(priv->phylink, &wol);
1258 device_set_wakeup_capable(priv->device, !!wol.supported);
1259 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1260 }
1261
1262 return ret;
1263 }
1264
stmmac_phy_setup(struct stmmac_priv * priv)1265 static int stmmac_phy_setup(struct stmmac_priv *priv)
1266 {
1267 struct stmmac_mdio_bus_data *mdio_bus_data;
1268 struct phylink_config *config;
1269 struct fwnode_handle *fwnode;
1270 struct phylink_pcs *pcs;
1271 struct phylink *phylink;
1272
1273 config = &priv->phylink_config;
1274
1275 config->dev = &priv->dev->dev;
1276 config->type = PHYLINK_NETDEV;
1277 config->mac_managed_pm = true;
1278
1279 /* Stmmac always requires an RX clock for hardware initialization */
1280 config->mac_requires_rxc = true;
1281
1282 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1283 config->eee_rx_clk_stop_enable = true;
1284
1285 /* Set the default transmit clock stop bit based on the platform glue */
1286 priv->tx_lpi_clk_stop = priv->plat->flags &
1287 STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1288
1289 mdio_bus_data = priv->plat->mdio_bus_data;
1290 if (mdio_bus_data)
1291 config->default_an_inband = mdio_bus_data->default_an_inband;
1292
1293 /* Get the PHY interface modes (at the PHY end of the link) that
1294 * are supported by the platform.
1295 */
1296 if (priv->plat->get_interfaces)
1297 priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1298 config->supported_interfaces);
1299
1300 /* Set the platform/firmware specified interface mode if the
1301 * supported interfaces have not already been provided using
1302 * phy_interface as a last resort.
1303 */
1304 if (phy_interface_empty(config->supported_interfaces))
1305 __set_bit(priv->plat->phy_interface,
1306 config->supported_interfaces);
1307
1308 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1309 if (priv->hw->xpcs)
1310 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1311 else
1312 pcs = priv->hw->phylink_pcs;
1313
1314 if (pcs)
1315 phy_interface_or(config->supported_interfaces,
1316 config->supported_interfaces,
1317 pcs->supported_interfaces);
1318
1319 if (priv->dma_cap.eee) {
1320 /* Assume all supported interfaces also support LPI */
1321 memcpy(config->lpi_interfaces, config->supported_interfaces,
1322 sizeof(config->lpi_interfaces));
1323
1324 /* All full duplex speeds above 100Mbps are supported */
1325 config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1326 config->lpi_timer_default = eee_timer * 1000;
1327 config->eee_enabled_default = true;
1328 }
1329
1330 fwnode = priv->plat->port_node;
1331 if (!fwnode)
1332 fwnode = dev_fwnode(priv->device);
1333
1334 phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1335 &stmmac_phylink_mac_ops);
1336 if (IS_ERR(phylink))
1337 return PTR_ERR(phylink);
1338
1339 priv->phylink = phylink;
1340 return 0;
1341 }
1342
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1343 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1344 struct stmmac_dma_conf *dma_conf)
1345 {
1346 u32 rx_cnt = priv->plat->rx_queues_to_use;
1347 unsigned int desc_size;
1348 void *head_rx;
1349 u32 queue;
1350
1351 /* Display RX rings */
1352 for (queue = 0; queue < rx_cnt; queue++) {
1353 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1354
1355 pr_info("\tRX Queue %u rings\n", queue);
1356
1357 if (priv->extend_desc) {
1358 head_rx = (void *)rx_q->dma_erx;
1359 desc_size = sizeof(struct dma_extended_desc);
1360 } else {
1361 head_rx = (void *)rx_q->dma_rx;
1362 desc_size = sizeof(struct dma_desc);
1363 }
1364
1365 /* Display RX ring */
1366 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1367 rx_q->dma_rx_phy, desc_size);
1368 }
1369 }
1370
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1371 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1372 struct stmmac_dma_conf *dma_conf)
1373 {
1374 u32 tx_cnt = priv->plat->tx_queues_to_use;
1375 unsigned int desc_size;
1376 void *head_tx;
1377 u32 queue;
1378
1379 /* Display TX rings */
1380 for (queue = 0; queue < tx_cnt; queue++) {
1381 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1382
1383 pr_info("\tTX Queue %d rings\n", queue);
1384
1385 if (priv->extend_desc) {
1386 head_tx = (void *)tx_q->dma_etx;
1387 desc_size = sizeof(struct dma_extended_desc);
1388 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1389 head_tx = (void *)tx_q->dma_entx;
1390 desc_size = sizeof(struct dma_edesc);
1391 } else {
1392 head_tx = (void *)tx_q->dma_tx;
1393 desc_size = sizeof(struct dma_desc);
1394 }
1395
1396 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1397 tx_q->dma_tx_phy, desc_size);
1398 }
1399 }
1400
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1401 static void stmmac_display_rings(struct stmmac_priv *priv,
1402 struct stmmac_dma_conf *dma_conf)
1403 {
1404 /* Display RX ring */
1405 stmmac_display_rx_rings(priv, dma_conf);
1406
1407 /* Display TX ring */
1408 stmmac_display_tx_rings(priv, dma_conf);
1409 }
1410
stmmac_rx_offset(struct stmmac_priv * priv)1411 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1412 {
1413 if (stmmac_xdp_is_enabled(priv))
1414 return XDP_PACKET_HEADROOM;
1415
1416 return NET_SKB_PAD;
1417 }
1418
stmmac_set_bfsize(int mtu,int bufsize)1419 static int stmmac_set_bfsize(int mtu, int bufsize)
1420 {
1421 int ret = bufsize;
1422
1423 if (mtu >= BUF_SIZE_8KiB)
1424 ret = BUF_SIZE_16KiB;
1425 else if (mtu >= BUF_SIZE_4KiB)
1426 ret = BUF_SIZE_8KiB;
1427 else if (mtu >= BUF_SIZE_2KiB)
1428 ret = BUF_SIZE_4KiB;
1429 else if (mtu > DEFAULT_BUFSIZE)
1430 ret = BUF_SIZE_2KiB;
1431 else
1432 ret = DEFAULT_BUFSIZE;
1433
1434 return ret;
1435 }
1436
1437 /**
1438 * stmmac_clear_rx_descriptors - clear RX descriptors
1439 * @priv: driver private structure
1440 * @dma_conf: structure to take the dma data
1441 * @queue: RX queue index
1442 * Description: this function is called to clear the RX descriptors
1443 * in case of both basic and extended descriptors are used.
1444 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1445 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1446 struct stmmac_dma_conf *dma_conf,
1447 u32 queue)
1448 {
1449 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1450 int i;
1451
1452 /* Clear the RX descriptors */
1453 for (i = 0; i < dma_conf->dma_rx_size; i++)
1454 if (priv->extend_desc)
1455 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1456 priv->use_riwt, priv->mode,
1457 (i == dma_conf->dma_rx_size - 1),
1458 dma_conf->dma_buf_sz);
1459 else
1460 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1461 priv->use_riwt, priv->mode,
1462 (i == dma_conf->dma_rx_size - 1),
1463 dma_conf->dma_buf_sz);
1464 }
1465
1466 /**
1467 * stmmac_clear_tx_descriptors - clear tx descriptors
1468 * @priv: driver private structure
1469 * @dma_conf: structure to take the dma data
1470 * @queue: TX queue index.
1471 * Description: this function is called to clear the TX descriptors
1472 * in case of both basic and extended descriptors are used.
1473 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1474 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1475 struct stmmac_dma_conf *dma_conf,
1476 u32 queue)
1477 {
1478 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1479 int i;
1480
1481 /* Clear the TX descriptors */
1482 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1483 int last = (i == (dma_conf->dma_tx_size - 1));
1484 struct dma_desc *p;
1485
1486 if (priv->extend_desc)
1487 p = &tx_q->dma_etx[i].basic;
1488 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1489 p = &tx_q->dma_entx[i].basic;
1490 else
1491 p = &tx_q->dma_tx[i];
1492
1493 stmmac_init_tx_desc(priv, p, priv->mode, last);
1494 }
1495 }
1496
1497 /**
1498 * stmmac_clear_descriptors - clear descriptors
1499 * @priv: driver private structure
1500 * @dma_conf: structure to take the dma data
1501 * Description: this function is called to clear the TX and RX descriptors
1502 * in case of both basic and extended descriptors are used.
1503 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1504 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1505 struct stmmac_dma_conf *dma_conf)
1506 {
1507 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1508 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1509 u32 queue;
1510
1511 /* Clear the RX descriptors */
1512 for (queue = 0; queue < rx_queue_cnt; queue++)
1513 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1514
1515 /* Clear the TX descriptors */
1516 for (queue = 0; queue < tx_queue_cnt; queue++)
1517 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1518 }
1519
1520 /**
1521 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1522 * @priv: driver private structure
1523 * @dma_conf: structure to take the dma data
1524 * @p: descriptor pointer
1525 * @i: descriptor index
1526 * @flags: gfp flag
1527 * @queue: RX queue index
1528 * Description: this function is called to allocate a receive buffer, perform
1529 * the DMA mapping and init the descriptor.
1530 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1531 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1532 struct stmmac_dma_conf *dma_conf,
1533 struct dma_desc *p,
1534 int i, gfp_t flags, u32 queue)
1535 {
1536 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1537 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1538 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1539
1540 if (priv->dma_cap.host_dma_width <= 32)
1541 gfp |= GFP_DMA32;
1542
1543 if (!buf->page) {
1544 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1545 if (!buf->page)
1546 return -ENOMEM;
1547 buf->page_offset = stmmac_rx_offset(priv);
1548 }
1549
1550 if (priv->sph && !buf->sec_page) {
1551 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1552 if (!buf->sec_page)
1553 return -ENOMEM;
1554
1555 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1556 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1557 } else {
1558 buf->sec_page = NULL;
1559 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1560 }
1561
1562 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1563
1564 stmmac_set_desc_addr(priv, p, buf->addr);
1565 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1566 stmmac_init_desc3(priv, p);
1567
1568 return 0;
1569 }
1570
1571 /**
1572 * stmmac_free_rx_buffer - free RX dma buffers
1573 * @priv: private structure
1574 * @rx_q: RX queue
1575 * @i: buffer index.
1576 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1577 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1578 struct stmmac_rx_queue *rx_q,
1579 int i)
1580 {
1581 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1582
1583 if (buf->page)
1584 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1585 buf->page = NULL;
1586
1587 if (buf->sec_page)
1588 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1589 buf->sec_page = NULL;
1590 }
1591
1592 /**
1593 * stmmac_free_tx_buffer - free RX dma buffers
1594 * @priv: private structure
1595 * @dma_conf: structure to take the dma data
1596 * @queue: RX queue index
1597 * @i: buffer index.
1598 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1599 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1600 struct stmmac_dma_conf *dma_conf,
1601 u32 queue, int i)
1602 {
1603 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1604
1605 if (tx_q->tx_skbuff_dma[i].buf &&
1606 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1607 if (tx_q->tx_skbuff_dma[i].map_as_page)
1608 dma_unmap_page(priv->device,
1609 tx_q->tx_skbuff_dma[i].buf,
1610 tx_q->tx_skbuff_dma[i].len,
1611 DMA_TO_DEVICE);
1612 else
1613 dma_unmap_single(priv->device,
1614 tx_q->tx_skbuff_dma[i].buf,
1615 tx_q->tx_skbuff_dma[i].len,
1616 DMA_TO_DEVICE);
1617 }
1618
1619 if (tx_q->xdpf[i] &&
1620 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1621 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1622 xdp_return_frame(tx_q->xdpf[i]);
1623 tx_q->xdpf[i] = NULL;
1624 }
1625
1626 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1627 tx_q->xsk_frames_done++;
1628
1629 if (tx_q->tx_skbuff[i] &&
1630 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1631 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1632 tx_q->tx_skbuff[i] = NULL;
1633 }
1634
1635 tx_q->tx_skbuff_dma[i].buf = 0;
1636 tx_q->tx_skbuff_dma[i].map_as_page = false;
1637 }
1638
1639 /**
1640 * dma_free_rx_skbufs - free RX dma buffers
1641 * @priv: private structure
1642 * @dma_conf: structure to take the dma data
1643 * @queue: RX queue index
1644 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1645 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1646 struct stmmac_dma_conf *dma_conf,
1647 u32 queue)
1648 {
1649 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1650 int i;
1651
1652 for (i = 0; i < dma_conf->dma_rx_size; i++)
1653 stmmac_free_rx_buffer(priv, rx_q, i);
1654 }
1655
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1656 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1657 struct stmmac_dma_conf *dma_conf,
1658 u32 queue, gfp_t flags)
1659 {
1660 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1661 int i;
1662
1663 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1664 struct dma_desc *p;
1665 int ret;
1666
1667 if (priv->extend_desc)
1668 p = &((rx_q->dma_erx + i)->basic);
1669 else
1670 p = rx_q->dma_rx + i;
1671
1672 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1673 queue);
1674 if (ret)
1675 return ret;
1676
1677 rx_q->buf_alloc_num++;
1678 }
1679
1680 return 0;
1681 }
1682
1683 /**
1684 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1685 * @priv: private structure
1686 * @dma_conf: structure to take the dma data
1687 * @queue: RX queue index
1688 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1689 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1690 struct stmmac_dma_conf *dma_conf,
1691 u32 queue)
1692 {
1693 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1694 int i;
1695
1696 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1697 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1698
1699 if (!buf->xdp)
1700 continue;
1701
1702 xsk_buff_free(buf->xdp);
1703 buf->xdp = NULL;
1704 }
1705 }
1706
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1707 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1708 struct stmmac_dma_conf *dma_conf,
1709 u32 queue)
1710 {
1711 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1712 int i;
1713
1714 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1715 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1716 * use this macro to make sure no size violations.
1717 */
1718 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1719
1720 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1721 struct stmmac_rx_buffer *buf;
1722 dma_addr_t dma_addr;
1723 struct dma_desc *p;
1724
1725 if (priv->extend_desc)
1726 p = (struct dma_desc *)(rx_q->dma_erx + i);
1727 else
1728 p = rx_q->dma_rx + i;
1729
1730 buf = &rx_q->buf_pool[i];
1731
1732 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1733 if (!buf->xdp)
1734 return -ENOMEM;
1735
1736 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1737 stmmac_set_desc_addr(priv, p, dma_addr);
1738 rx_q->buf_alloc_num++;
1739 }
1740
1741 return 0;
1742 }
1743
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1744 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1745 {
1746 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1747 return NULL;
1748
1749 return xsk_get_pool_from_qid(priv->dev, queue);
1750 }
1751
1752 /**
1753 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1754 * @priv: driver private structure
1755 * @dma_conf: structure to take the dma data
1756 * @queue: RX queue index
1757 * @flags: gfp flag.
1758 * Description: this function initializes the DMA RX descriptors
1759 * and allocates the socket buffers. It supports the chained and ring
1760 * modes.
1761 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1762 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1763 struct stmmac_dma_conf *dma_conf,
1764 u32 queue, gfp_t flags)
1765 {
1766 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1767 int ret;
1768
1769 netif_dbg(priv, probe, priv->dev,
1770 "(%s) dma_rx_phy=0x%08x\n", __func__,
1771 (u32)rx_q->dma_rx_phy);
1772
1773 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1774
1775 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1776
1777 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1778
1779 if (rx_q->xsk_pool) {
1780 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1781 MEM_TYPE_XSK_BUFF_POOL,
1782 NULL));
1783 netdev_info(priv->dev,
1784 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1785 rx_q->queue_index);
1786 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1787 } else {
1788 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1789 MEM_TYPE_PAGE_POOL,
1790 rx_q->page_pool));
1791 netdev_info(priv->dev,
1792 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1793 rx_q->queue_index);
1794 }
1795
1796 if (rx_q->xsk_pool) {
1797 /* RX XDP ZC buffer pool may not be populated, e.g.
1798 * xdpsock TX-only.
1799 */
1800 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1801 } else {
1802 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1803 if (ret < 0)
1804 return -ENOMEM;
1805 }
1806
1807 /* Setup the chained descriptor addresses */
1808 if (priv->mode == STMMAC_CHAIN_MODE) {
1809 if (priv->extend_desc)
1810 stmmac_mode_init(priv, rx_q->dma_erx,
1811 rx_q->dma_rx_phy,
1812 dma_conf->dma_rx_size, 1);
1813 else
1814 stmmac_mode_init(priv, rx_q->dma_rx,
1815 rx_q->dma_rx_phy,
1816 dma_conf->dma_rx_size, 0);
1817 }
1818
1819 return 0;
1820 }
1821
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1822 static int init_dma_rx_desc_rings(struct net_device *dev,
1823 struct stmmac_dma_conf *dma_conf,
1824 gfp_t flags)
1825 {
1826 struct stmmac_priv *priv = netdev_priv(dev);
1827 u32 rx_count = priv->plat->rx_queues_to_use;
1828 int queue;
1829 int ret;
1830
1831 /* RX INITIALIZATION */
1832 netif_dbg(priv, probe, priv->dev,
1833 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1834
1835 for (queue = 0; queue < rx_count; queue++) {
1836 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1837 if (ret)
1838 goto err_init_rx_buffers;
1839 }
1840
1841 return 0;
1842
1843 err_init_rx_buffers:
1844 while (queue >= 0) {
1845 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1846
1847 if (rx_q->xsk_pool)
1848 dma_free_rx_xskbufs(priv, dma_conf, queue);
1849 else
1850 dma_free_rx_skbufs(priv, dma_conf, queue);
1851
1852 rx_q->buf_alloc_num = 0;
1853 rx_q->xsk_pool = NULL;
1854
1855 queue--;
1856 }
1857
1858 return ret;
1859 }
1860
1861 /**
1862 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1863 * @priv: driver private structure
1864 * @dma_conf: structure to take the dma data
1865 * @queue: TX queue index
1866 * Description: this function initializes the DMA TX descriptors
1867 * and allocates the socket buffers. It supports the chained and ring
1868 * modes.
1869 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1870 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1871 struct stmmac_dma_conf *dma_conf,
1872 u32 queue)
1873 {
1874 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1875 int i;
1876
1877 netif_dbg(priv, probe, priv->dev,
1878 "(%s) dma_tx_phy=0x%08x\n", __func__,
1879 (u32)tx_q->dma_tx_phy);
1880
1881 /* Setup the chained descriptor addresses */
1882 if (priv->mode == STMMAC_CHAIN_MODE) {
1883 if (priv->extend_desc)
1884 stmmac_mode_init(priv, tx_q->dma_etx,
1885 tx_q->dma_tx_phy,
1886 dma_conf->dma_tx_size, 1);
1887 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1888 stmmac_mode_init(priv, tx_q->dma_tx,
1889 tx_q->dma_tx_phy,
1890 dma_conf->dma_tx_size, 0);
1891 }
1892
1893 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1894
1895 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1896 struct dma_desc *p;
1897
1898 if (priv->extend_desc)
1899 p = &((tx_q->dma_etx + i)->basic);
1900 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1901 p = &((tx_q->dma_entx + i)->basic);
1902 else
1903 p = tx_q->dma_tx + i;
1904
1905 stmmac_clear_desc(priv, p);
1906
1907 tx_q->tx_skbuff_dma[i].buf = 0;
1908 tx_q->tx_skbuff_dma[i].map_as_page = false;
1909 tx_q->tx_skbuff_dma[i].len = 0;
1910 tx_q->tx_skbuff_dma[i].last_segment = false;
1911 tx_q->tx_skbuff[i] = NULL;
1912 }
1913
1914 return 0;
1915 }
1916
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1917 static int init_dma_tx_desc_rings(struct net_device *dev,
1918 struct stmmac_dma_conf *dma_conf)
1919 {
1920 struct stmmac_priv *priv = netdev_priv(dev);
1921 u32 tx_queue_cnt;
1922 u32 queue;
1923
1924 tx_queue_cnt = priv->plat->tx_queues_to_use;
1925
1926 for (queue = 0; queue < tx_queue_cnt; queue++)
1927 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1928
1929 return 0;
1930 }
1931
1932 /**
1933 * init_dma_desc_rings - init the RX/TX descriptor rings
1934 * @dev: net device structure
1935 * @dma_conf: structure to take the dma data
1936 * @flags: gfp flag.
1937 * Description: this function initializes the DMA RX/TX descriptors
1938 * and allocates the socket buffers. It supports the chained and ring
1939 * modes.
1940 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1941 static int init_dma_desc_rings(struct net_device *dev,
1942 struct stmmac_dma_conf *dma_conf,
1943 gfp_t flags)
1944 {
1945 struct stmmac_priv *priv = netdev_priv(dev);
1946 int ret;
1947
1948 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1949 if (ret)
1950 return ret;
1951
1952 ret = init_dma_tx_desc_rings(dev, dma_conf);
1953
1954 stmmac_clear_descriptors(priv, dma_conf);
1955
1956 if (netif_msg_hw(priv))
1957 stmmac_display_rings(priv, dma_conf);
1958
1959 return ret;
1960 }
1961
1962 /**
1963 * dma_free_tx_skbufs - free TX dma buffers
1964 * @priv: private structure
1965 * @dma_conf: structure to take the dma data
1966 * @queue: TX queue index
1967 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1968 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1969 struct stmmac_dma_conf *dma_conf,
1970 u32 queue)
1971 {
1972 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1973 int i;
1974
1975 tx_q->xsk_frames_done = 0;
1976
1977 for (i = 0; i < dma_conf->dma_tx_size; i++)
1978 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1979
1980 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1981 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1982 tx_q->xsk_frames_done = 0;
1983 tx_q->xsk_pool = NULL;
1984 }
1985 }
1986
1987 /**
1988 * stmmac_free_tx_skbufs - free TX skb buffers
1989 * @priv: private structure
1990 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1991 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1992 {
1993 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1994 u32 queue;
1995
1996 for (queue = 0; queue < tx_queue_cnt; queue++)
1997 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1998 }
1999
2000 /**
2001 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2002 * @priv: private structure
2003 * @dma_conf: structure to take the dma data
2004 * @queue: RX queue index
2005 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2006 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2007 struct stmmac_dma_conf *dma_conf,
2008 u32 queue)
2009 {
2010 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2011
2012 /* Release the DMA RX socket buffers */
2013 if (rx_q->xsk_pool)
2014 dma_free_rx_xskbufs(priv, dma_conf, queue);
2015 else
2016 dma_free_rx_skbufs(priv, dma_conf, queue);
2017
2018 rx_q->buf_alloc_num = 0;
2019 rx_q->xsk_pool = NULL;
2020
2021 /* Free DMA regions of consistent memory previously allocated */
2022 if (!priv->extend_desc)
2023 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2024 sizeof(struct dma_desc),
2025 rx_q->dma_rx, rx_q->dma_rx_phy);
2026 else
2027 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2028 sizeof(struct dma_extended_desc),
2029 rx_q->dma_erx, rx_q->dma_rx_phy);
2030
2031 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2032 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2033
2034 kfree(rx_q->buf_pool);
2035 if (rx_q->page_pool)
2036 page_pool_destroy(rx_q->page_pool);
2037 }
2038
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2039 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2040 struct stmmac_dma_conf *dma_conf)
2041 {
2042 u32 rx_count = priv->plat->rx_queues_to_use;
2043 u32 queue;
2044
2045 /* Free RX queue resources */
2046 for (queue = 0; queue < rx_count; queue++)
2047 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2048 }
2049
2050 /**
2051 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2052 * @priv: private structure
2053 * @dma_conf: structure to take the dma data
2054 * @queue: TX queue index
2055 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2056 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2057 struct stmmac_dma_conf *dma_conf,
2058 u32 queue)
2059 {
2060 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2061 size_t size;
2062 void *addr;
2063
2064 /* Release the DMA TX socket buffers */
2065 dma_free_tx_skbufs(priv, dma_conf, queue);
2066
2067 if (priv->extend_desc) {
2068 size = sizeof(struct dma_extended_desc);
2069 addr = tx_q->dma_etx;
2070 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2071 size = sizeof(struct dma_edesc);
2072 addr = tx_q->dma_entx;
2073 } else {
2074 size = sizeof(struct dma_desc);
2075 addr = tx_q->dma_tx;
2076 }
2077
2078 size *= dma_conf->dma_tx_size;
2079
2080 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2081
2082 kfree(tx_q->tx_skbuff_dma);
2083 kfree(tx_q->tx_skbuff);
2084 }
2085
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2086 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2087 struct stmmac_dma_conf *dma_conf)
2088 {
2089 u32 tx_count = priv->plat->tx_queues_to_use;
2090 u32 queue;
2091
2092 /* Free TX queue resources */
2093 for (queue = 0; queue < tx_count; queue++)
2094 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2095 }
2096
2097 /**
2098 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2099 * @priv: private structure
2100 * @dma_conf: structure to take the dma data
2101 * @queue: RX queue index
2102 * Description: according to which descriptor can be used (extend or basic)
2103 * this function allocates the resources for TX and RX paths. In case of
2104 * reception, for example, it pre-allocated the RX socket buffer in order to
2105 * allow zero-copy mechanism.
2106 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2107 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2108 struct stmmac_dma_conf *dma_conf,
2109 u32 queue)
2110 {
2111 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2112 struct stmmac_channel *ch = &priv->channel[queue];
2113 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2114 struct page_pool_params pp_params = { 0 };
2115 unsigned int dma_buf_sz_pad, num_pages;
2116 unsigned int napi_id;
2117 int ret;
2118
2119 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2120 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2121 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2122
2123 rx_q->queue_index = queue;
2124 rx_q->priv_data = priv;
2125 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2126
2127 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2128 pp_params.pool_size = dma_conf->dma_rx_size;
2129 pp_params.order = order_base_2(num_pages);
2130 pp_params.nid = dev_to_node(priv->device);
2131 pp_params.dev = priv->device;
2132 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2133 pp_params.offset = stmmac_rx_offset(priv);
2134 pp_params.max_len = dma_conf->dma_buf_sz;
2135
2136 if (priv->sph) {
2137 pp_params.offset = 0;
2138 pp_params.max_len += stmmac_rx_offset(priv);
2139 }
2140
2141 rx_q->page_pool = page_pool_create(&pp_params);
2142 if (IS_ERR(rx_q->page_pool)) {
2143 ret = PTR_ERR(rx_q->page_pool);
2144 rx_q->page_pool = NULL;
2145 return ret;
2146 }
2147
2148 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2149 sizeof(*rx_q->buf_pool),
2150 GFP_KERNEL);
2151 if (!rx_q->buf_pool)
2152 return -ENOMEM;
2153
2154 if (priv->extend_desc) {
2155 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2156 dma_conf->dma_rx_size *
2157 sizeof(struct dma_extended_desc),
2158 &rx_q->dma_rx_phy,
2159 GFP_KERNEL);
2160 if (!rx_q->dma_erx)
2161 return -ENOMEM;
2162
2163 } else {
2164 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2165 dma_conf->dma_rx_size *
2166 sizeof(struct dma_desc),
2167 &rx_q->dma_rx_phy,
2168 GFP_KERNEL);
2169 if (!rx_q->dma_rx)
2170 return -ENOMEM;
2171 }
2172
2173 if (stmmac_xdp_is_enabled(priv) &&
2174 test_bit(queue, priv->af_xdp_zc_qps))
2175 napi_id = ch->rxtx_napi.napi_id;
2176 else
2177 napi_id = ch->rx_napi.napi_id;
2178
2179 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2180 rx_q->queue_index,
2181 napi_id);
2182 if (ret) {
2183 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2184 return -EINVAL;
2185 }
2186
2187 return 0;
2188 }
2189
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2190 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2191 struct stmmac_dma_conf *dma_conf)
2192 {
2193 u32 rx_count = priv->plat->rx_queues_to_use;
2194 u32 queue;
2195 int ret;
2196
2197 /* RX queues buffers and DMA */
2198 for (queue = 0; queue < rx_count; queue++) {
2199 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2200 if (ret)
2201 goto err_dma;
2202 }
2203
2204 return 0;
2205
2206 err_dma:
2207 free_dma_rx_desc_resources(priv, dma_conf);
2208
2209 return ret;
2210 }
2211
2212 /**
2213 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2214 * @priv: private structure
2215 * @dma_conf: structure to take the dma data
2216 * @queue: TX queue index
2217 * Description: according to which descriptor can be used (extend or basic)
2218 * this function allocates the resources for TX and RX paths. In case of
2219 * reception, for example, it pre-allocated the RX socket buffer in order to
2220 * allow zero-copy mechanism.
2221 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2222 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2223 struct stmmac_dma_conf *dma_conf,
2224 u32 queue)
2225 {
2226 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2227 size_t size;
2228 void *addr;
2229
2230 tx_q->queue_index = queue;
2231 tx_q->priv_data = priv;
2232
2233 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2234 sizeof(*tx_q->tx_skbuff_dma),
2235 GFP_KERNEL);
2236 if (!tx_q->tx_skbuff_dma)
2237 return -ENOMEM;
2238
2239 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2240 sizeof(struct sk_buff *),
2241 GFP_KERNEL);
2242 if (!tx_q->tx_skbuff)
2243 return -ENOMEM;
2244
2245 if (priv->extend_desc)
2246 size = sizeof(struct dma_extended_desc);
2247 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2248 size = sizeof(struct dma_edesc);
2249 else
2250 size = sizeof(struct dma_desc);
2251
2252 size *= dma_conf->dma_tx_size;
2253
2254 addr = dma_alloc_coherent(priv->device, size,
2255 &tx_q->dma_tx_phy, GFP_KERNEL);
2256 if (!addr)
2257 return -ENOMEM;
2258
2259 if (priv->extend_desc)
2260 tx_q->dma_etx = addr;
2261 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2262 tx_q->dma_entx = addr;
2263 else
2264 tx_q->dma_tx = addr;
2265
2266 return 0;
2267 }
2268
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2269 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2270 struct stmmac_dma_conf *dma_conf)
2271 {
2272 u32 tx_count = priv->plat->tx_queues_to_use;
2273 u32 queue;
2274 int ret;
2275
2276 /* TX queues buffers and DMA */
2277 for (queue = 0; queue < tx_count; queue++) {
2278 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2279 if (ret)
2280 goto err_dma;
2281 }
2282
2283 return 0;
2284
2285 err_dma:
2286 free_dma_tx_desc_resources(priv, dma_conf);
2287 return ret;
2288 }
2289
2290 /**
2291 * alloc_dma_desc_resources - alloc TX/RX resources.
2292 * @priv: private structure
2293 * @dma_conf: structure to take the dma data
2294 * Description: according to which descriptor can be used (extend or basic)
2295 * this function allocates the resources for TX and RX paths. In case of
2296 * reception, for example, it pre-allocated the RX socket buffer in order to
2297 * allow zero-copy mechanism.
2298 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2299 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2300 struct stmmac_dma_conf *dma_conf)
2301 {
2302 /* RX Allocation */
2303 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2304
2305 if (ret)
2306 return ret;
2307
2308 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2309
2310 return ret;
2311 }
2312
2313 /**
2314 * free_dma_desc_resources - free dma desc resources
2315 * @priv: private structure
2316 * @dma_conf: structure to take the dma data
2317 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2318 static void free_dma_desc_resources(struct stmmac_priv *priv,
2319 struct stmmac_dma_conf *dma_conf)
2320 {
2321 /* Release the DMA TX socket buffers */
2322 free_dma_tx_desc_resources(priv, dma_conf);
2323
2324 /* Release the DMA RX socket buffers later
2325 * to ensure all pending XDP_TX buffers are returned.
2326 */
2327 free_dma_rx_desc_resources(priv, dma_conf);
2328 }
2329
2330 /**
2331 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2332 * @priv: driver private structure
2333 * Description: It is used for enabling the rx queues in the MAC
2334 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2335 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2336 {
2337 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2338 int queue;
2339 u8 mode;
2340
2341 for (queue = 0; queue < rx_queues_count; queue++) {
2342 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2343 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2344 }
2345 }
2346
2347 /**
2348 * stmmac_start_rx_dma - start RX DMA channel
2349 * @priv: driver private structure
2350 * @chan: RX channel index
2351 * Description:
2352 * This starts a RX DMA channel
2353 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2354 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2355 {
2356 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2357 stmmac_start_rx(priv, priv->ioaddr, chan);
2358 }
2359
2360 /**
2361 * stmmac_start_tx_dma - start TX DMA channel
2362 * @priv: driver private structure
2363 * @chan: TX channel index
2364 * Description:
2365 * This starts a TX DMA channel
2366 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2367 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2368 {
2369 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2370 stmmac_start_tx(priv, priv->ioaddr, chan);
2371 }
2372
2373 /**
2374 * stmmac_stop_rx_dma - stop RX DMA channel
2375 * @priv: driver private structure
2376 * @chan: RX channel index
2377 * Description:
2378 * This stops a RX DMA channel
2379 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2380 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2381 {
2382 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2383 stmmac_stop_rx(priv, priv->ioaddr, chan);
2384 }
2385
2386 /**
2387 * stmmac_stop_tx_dma - stop TX DMA channel
2388 * @priv: driver private structure
2389 * @chan: TX channel index
2390 * Description:
2391 * This stops a TX DMA channel
2392 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2393 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2394 {
2395 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2396 stmmac_stop_tx(priv, priv->ioaddr, chan);
2397 }
2398
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2399 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2400 {
2401 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2402 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2403 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2404 u32 chan;
2405
2406 for (chan = 0; chan < dma_csr_ch; chan++) {
2407 struct stmmac_channel *ch = &priv->channel[chan];
2408 unsigned long flags;
2409
2410 spin_lock_irqsave(&ch->lock, flags);
2411 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2412 spin_unlock_irqrestore(&ch->lock, flags);
2413 }
2414 }
2415
2416 /**
2417 * stmmac_start_all_dma - start all RX and TX DMA channels
2418 * @priv: driver private structure
2419 * Description:
2420 * This starts all the RX and TX DMA channels
2421 */
stmmac_start_all_dma(struct stmmac_priv * priv)2422 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2423 {
2424 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2425 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2426 u32 chan = 0;
2427
2428 for (chan = 0; chan < rx_channels_count; chan++)
2429 stmmac_start_rx_dma(priv, chan);
2430
2431 for (chan = 0; chan < tx_channels_count; chan++)
2432 stmmac_start_tx_dma(priv, chan);
2433 }
2434
2435 /**
2436 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2437 * @priv: driver private structure
2438 * Description:
2439 * This stops the RX and TX DMA channels
2440 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2441 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2442 {
2443 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2444 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2445 u32 chan = 0;
2446
2447 for (chan = 0; chan < rx_channels_count; chan++)
2448 stmmac_stop_rx_dma(priv, chan);
2449
2450 for (chan = 0; chan < tx_channels_count; chan++)
2451 stmmac_stop_tx_dma(priv, chan);
2452 }
2453
2454 /**
2455 * stmmac_dma_operation_mode - HW DMA operation mode
2456 * @priv: driver private structure
2457 * Description: it is used for configuring the DMA operation mode register in
2458 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2459 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2460 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2461 {
2462 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2463 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2464 int rxfifosz = priv->plat->rx_fifo_size;
2465 int txfifosz = priv->plat->tx_fifo_size;
2466 u32 txmode = 0;
2467 u32 rxmode = 0;
2468 u32 chan = 0;
2469 u8 qmode = 0;
2470
2471 if (rxfifosz == 0)
2472 rxfifosz = priv->dma_cap.rx_fifo_size;
2473 if (txfifosz == 0)
2474 txfifosz = priv->dma_cap.tx_fifo_size;
2475
2476 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2477 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2478 rxfifosz /= rx_channels_count;
2479 txfifosz /= tx_channels_count;
2480 }
2481
2482 if (priv->plat->force_thresh_dma_mode) {
2483 txmode = tc;
2484 rxmode = tc;
2485 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2486 /*
2487 * In case of GMAC, SF mode can be enabled
2488 * to perform the TX COE in HW. This depends on:
2489 * 1) TX COE if actually supported
2490 * 2) There is no bugged Jumbo frame support
2491 * that needs to not insert csum in the TDES.
2492 */
2493 txmode = SF_DMA_MODE;
2494 rxmode = SF_DMA_MODE;
2495 priv->xstats.threshold = SF_DMA_MODE;
2496 } else {
2497 txmode = tc;
2498 rxmode = SF_DMA_MODE;
2499 }
2500
2501 /* configure all channels */
2502 for (chan = 0; chan < rx_channels_count; chan++) {
2503 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2504 u32 buf_size;
2505
2506 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2507
2508 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2509 rxfifosz, qmode);
2510
2511 if (rx_q->xsk_pool) {
2512 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2513 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2514 buf_size,
2515 chan);
2516 } else {
2517 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2518 priv->dma_conf.dma_buf_sz,
2519 chan);
2520 }
2521 }
2522
2523 for (chan = 0; chan < tx_channels_count; chan++) {
2524 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2525
2526 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2527 txfifosz, qmode);
2528 }
2529 }
2530
stmmac_xsk_request_timestamp(void * _priv)2531 static void stmmac_xsk_request_timestamp(void *_priv)
2532 {
2533 struct stmmac_metadata_request *meta_req = _priv;
2534
2535 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2536 *meta_req->set_ic = true;
2537 }
2538
stmmac_xsk_fill_timestamp(void * _priv)2539 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2540 {
2541 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2542 struct stmmac_priv *priv = tx_compl->priv;
2543 struct dma_desc *desc = tx_compl->desc;
2544 bool found = false;
2545 u64 ns = 0;
2546
2547 if (!priv->hwts_tx_en)
2548 return 0;
2549
2550 /* check tx tstamp status */
2551 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2552 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2553 found = true;
2554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2555 found = true;
2556 }
2557
2558 if (found) {
2559 ns -= priv->plat->cdc_error_adj;
2560 return ns_to_ktime(ns);
2561 }
2562
2563 return 0;
2564 }
2565
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2566 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2567 {
2568 struct timespec64 ts = ns_to_timespec64(launch_time);
2569 struct stmmac_metadata_request *meta_req = _priv;
2570
2571 if (meta_req->tbs & STMMAC_TBS_EN)
2572 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2573 ts.tv_nsec);
2574 }
2575
2576 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2577 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2578 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2579 .tmo_request_launch_time = stmmac_xsk_request_launch_time,
2580 };
2581
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2582 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2583 {
2584 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2585 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2586 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2587 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2588 unsigned int entry = tx_q->cur_tx;
2589 struct dma_desc *tx_desc = NULL;
2590 struct xdp_desc xdp_desc;
2591 bool work_done = true;
2592 u32 tx_set_ic_bit = 0;
2593
2594 /* Avoids TX time-out as we are sharing with slow path */
2595 txq_trans_cond_update(nq);
2596
2597 budget = min(budget, stmmac_tx_avail(priv, queue));
2598
2599 while (budget-- > 0) {
2600 struct stmmac_metadata_request meta_req;
2601 struct xsk_tx_metadata *meta = NULL;
2602 dma_addr_t dma_addr;
2603 bool set_ic;
2604
2605 /* We are sharing with slow path and stop XSK TX desc submission when
2606 * available TX ring is less than threshold.
2607 */
2608 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2609 !netif_carrier_ok(priv->dev)) {
2610 work_done = false;
2611 break;
2612 }
2613
2614 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2615 break;
2616
2617 if (priv->est && priv->est->enable &&
2618 priv->est->max_sdu[queue] &&
2619 xdp_desc.len > priv->est->max_sdu[queue]) {
2620 priv->xstats.max_sdu_txq_drop[queue]++;
2621 continue;
2622 }
2623
2624 if (likely(priv->extend_desc))
2625 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2626 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2627 tx_desc = &tx_q->dma_entx[entry].basic;
2628 else
2629 tx_desc = tx_q->dma_tx + entry;
2630
2631 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2632 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2633 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2634
2635 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2636
2637 /* To return XDP buffer to XSK pool, we simple call
2638 * xsk_tx_completed(), so we don't need to fill up
2639 * 'buf' and 'xdpf'.
2640 */
2641 tx_q->tx_skbuff_dma[entry].buf = 0;
2642 tx_q->xdpf[entry] = NULL;
2643
2644 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2645 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2646 tx_q->tx_skbuff_dma[entry].last_segment = true;
2647 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2648
2649 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2650
2651 tx_q->tx_count_frames++;
2652
2653 if (!priv->tx_coal_frames[queue])
2654 set_ic = false;
2655 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2656 set_ic = true;
2657 else
2658 set_ic = false;
2659
2660 meta_req.priv = priv;
2661 meta_req.tx_desc = tx_desc;
2662 meta_req.set_ic = &set_ic;
2663 meta_req.tbs = tx_q->tbs;
2664 meta_req.edesc = &tx_q->dma_entx[entry];
2665 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2666 &meta_req);
2667 if (set_ic) {
2668 tx_q->tx_count_frames = 0;
2669 stmmac_set_tx_ic(priv, tx_desc);
2670 tx_set_ic_bit++;
2671 }
2672
2673 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2674 true, priv->mode, true, true,
2675 xdp_desc.len);
2676
2677 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2678
2679 xsk_tx_metadata_to_compl(meta,
2680 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2681
2682 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2683 entry = tx_q->cur_tx;
2684 }
2685 u64_stats_update_begin(&txq_stats->napi_syncp);
2686 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2687 u64_stats_update_end(&txq_stats->napi_syncp);
2688
2689 if (tx_desc) {
2690 stmmac_flush_tx_descriptors(priv, queue);
2691 xsk_tx_release(pool);
2692 }
2693
2694 /* Return true if all of the 3 conditions are met
2695 * a) TX Budget is still available
2696 * b) work_done = true when XSK TX desc peek is empty (no more
2697 * pending XSK TX for transmission)
2698 */
2699 return !!budget && work_done;
2700 }
2701
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2702 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2703 {
2704 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2705 tc += 64;
2706
2707 if (priv->plat->force_thresh_dma_mode)
2708 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2709 else
2710 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2711 chan);
2712
2713 priv->xstats.threshold = tc;
2714 }
2715 }
2716
2717 /**
2718 * stmmac_tx_clean - to manage the transmission completion
2719 * @priv: driver private structure
2720 * @budget: napi budget limiting this functions packet handling
2721 * @queue: TX queue index
2722 * @pending_packets: signal to arm the TX coal timer
2723 * Description: it reclaims the transmit resources after transmission completes.
2724 * If some packets still needs to be handled, due to TX coalesce, set
2725 * pending_packets to true to make NAPI arm the TX coal timer.
2726 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2727 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2728 bool *pending_packets)
2729 {
2730 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2731 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2732 unsigned int bytes_compl = 0, pkts_compl = 0;
2733 unsigned int entry, xmits = 0, count = 0;
2734 u32 tx_packets = 0, tx_errors = 0;
2735
2736 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2737
2738 tx_q->xsk_frames_done = 0;
2739
2740 entry = tx_q->dirty_tx;
2741
2742 /* Try to clean all TX complete frame in 1 shot */
2743 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2744 struct xdp_frame *xdpf;
2745 struct sk_buff *skb;
2746 struct dma_desc *p;
2747 int status;
2748
2749 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2750 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2751 xdpf = tx_q->xdpf[entry];
2752 skb = NULL;
2753 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2754 xdpf = NULL;
2755 skb = tx_q->tx_skbuff[entry];
2756 } else {
2757 xdpf = NULL;
2758 skb = NULL;
2759 }
2760
2761 if (priv->extend_desc)
2762 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2763 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2764 p = &tx_q->dma_entx[entry].basic;
2765 else
2766 p = tx_q->dma_tx + entry;
2767
2768 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2769 /* Check if the descriptor is owned by the DMA */
2770 if (unlikely(status & tx_dma_own))
2771 break;
2772
2773 count++;
2774
2775 /* Make sure descriptor fields are read after reading
2776 * the own bit.
2777 */
2778 dma_rmb();
2779
2780 /* Just consider the last segment and ...*/
2781 if (likely(!(status & tx_not_ls))) {
2782 /* ... verify the status error condition */
2783 if (unlikely(status & tx_err)) {
2784 tx_errors++;
2785 if (unlikely(status & tx_err_bump_tc))
2786 stmmac_bump_dma_threshold(priv, queue);
2787 } else {
2788 tx_packets++;
2789 }
2790 if (skb) {
2791 stmmac_get_tx_hwtstamp(priv, p, skb);
2792 } else if (tx_q->xsk_pool &&
2793 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2794 struct stmmac_xsk_tx_complete tx_compl = {
2795 .priv = priv,
2796 .desc = p,
2797 };
2798
2799 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2800 &stmmac_xsk_tx_metadata_ops,
2801 &tx_compl);
2802 }
2803 }
2804
2805 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2806 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2807 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2808 dma_unmap_page(priv->device,
2809 tx_q->tx_skbuff_dma[entry].buf,
2810 tx_q->tx_skbuff_dma[entry].len,
2811 DMA_TO_DEVICE);
2812 else
2813 dma_unmap_single(priv->device,
2814 tx_q->tx_skbuff_dma[entry].buf,
2815 tx_q->tx_skbuff_dma[entry].len,
2816 DMA_TO_DEVICE);
2817 tx_q->tx_skbuff_dma[entry].buf = 0;
2818 tx_q->tx_skbuff_dma[entry].len = 0;
2819 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2820 }
2821
2822 stmmac_clean_desc3(priv, tx_q, p);
2823
2824 tx_q->tx_skbuff_dma[entry].last_segment = false;
2825 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2826
2827 if (xdpf &&
2828 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2829 xdp_return_frame_rx_napi(xdpf);
2830 tx_q->xdpf[entry] = NULL;
2831 }
2832
2833 if (xdpf &&
2834 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2835 xdp_return_frame(xdpf);
2836 tx_q->xdpf[entry] = NULL;
2837 }
2838
2839 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2840 tx_q->xsk_frames_done++;
2841
2842 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2843 if (likely(skb)) {
2844 pkts_compl++;
2845 bytes_compl += skb->len;
2846 dev_consume_skb_any(skb);
2847 tx_q->tx_skbuff[entry] = NULL;
2848 }
2849 }
2850
2851 stmmac_release_tx_desc(priv, p, priv->mode);
2852
2853 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2854 }
2855 tx_q->dirty_tx = entry;
2856
2857 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2858 pkts_compl, bytes_compl);
2859
2860 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2861 queue))) &&
2862 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2863
2864 netif_dbg(priv, tx_done, priv->dev,
2865 "%s: restart transmit\n", __func__);
2866 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2867 }
2868
2869 if (tx_q->xsk_pool) {
2870 bool work_done;
2871
2872 if (tx_q->xsk_frames_done)
2873 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2874
2875 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2876 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2877
2878 /* For XSK TX, we try to send as many as possible.
2879 * If XSK work done (XSK TX desc empty and budget still
2880 * available), return "budget - 1" to reenable TX IRQ.
2881 * Else, return "budget" to make NAPI continue polling.
2882 */
2883 work_done = stmmac_xdp_xmit_zc(priv, queue,
2884 STMMAC_XSK_TX_BUDGET_MAX);
2885 if (work_done)
2886 xmits = budget - 1;
2887 else
2888 xmits = budget;
2889 }
2890
2891 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2892 stmmac_restart_sw_lpi_timer(priv);
2893
2894 /* We still have pending packets, let's call for a new scheduling */
2895 if (tx_q->dirty_tx != tx_q->cur_tx)
2896 *pending_packets = true;
2897
2898 u64_stats_update_begin(&txq_stats->napi_syncp);
2899 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2900 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2901 u64_stats_inc(&txq_stats->napi.tx_clean);
2902 u64_stats_update_end(&txq_stats->napi_syncp);
2903
2904 priv->xstats.tx_errors += tx_errors;
2905
2906 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2907
2908 /* Combine decisions from TX clean and XSK TX */
2909 return max(count, xmits);
2910 }
2911
2912 /**
2913 * stmmac_tx_err - to manage the tx error
2914 * @priv: driver private structure
2915 * @chan: channel index
2916 * Description: it cleans the descriptors and restarts the transmission
2917 * in case of transmission errors.
2918 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2919 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2920 {
2921 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2922
2923 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2924
2925 stmmac_stop_tx_dma(priv, chan);
2926 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2927 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2928 stmmac_reset_tx_queue(priv, chan);
2929 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2930 tx_q->dma_tx_phy, chan);
2931 stmmac_start_tx_dma(priv, chan);
2932
2933 priv->xstats.tx_errors++;
2934 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2935 }
2936
2937 /**
2938 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2939 * @priv: driver private structure
2940 * @txmode: TX operating mode
2941 * @rxmode: RX operating mode
2942 * @chan: channel index
2943 * Description: it is used for configuring of the DMA operation mode in
2944 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2945 * mode.
2946 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2947 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2948 u32 rxmode, u32 chan)
2949 {
2950 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2951 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2952 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2953 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2954 int rxfifosz = priv->plat->rx_fifo_size;
2955 int txfifosz = priv->plat->tx_fifo_size;
2956
2957 if (rxfifosz == 0)
2958 rxfifosz = priv->dma_cap.rx_fifo_size;
2959 if (txfifosz == 0)
2960 txfifosz = priv->dma_cap.tx_fifo_size;
2961
2962 /* Adjust for real per queue fifo size */
2963 rxfifosz /= rx_channels_count;
2964 txfifosz /= tx_channels_count;
2965
2966 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2967 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2968 }
2969
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2970 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2971 {
2972 int ret;
2973
2974 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2975 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2976 if (ret && (ret != -EINVAL)) {
2977 stmmac_global_err(priv);
2978 return true;
2979 }
2980
2981 return false;
2982 }
2983
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2984 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2985 {
2986 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2987 &priv->xstats, chan, dir);
2988 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2989 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2990 struct stmmac_channel *ch = &priv->channel[chan];
2991 struct napi_struct *rx_napi;
2992 struct napi_struct *tx_napi;
2993 unsigned long flags;
2994
2995 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2996 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2997
2998 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2999 if (napi_schedule_prep(rx_napi)) {
3000 spin_lock_irqsave(&ch->lock, flags);
3001 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3002 spin_unlock_irqrestore(&ch->lock, flags);
3003 __napi_schedule(rx_napi);
3004 }
3005 }
3006
3007 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3008 if (napi_schedule_prep(tx_napi)) {
3009 spin_lock_irqsave(&ch->lock, flags);
3010 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3011 spin_unlock_irqrestore(&ch->lock, flags);
3012 __napi_schedule(tx_napi);
3013 }
3014 }
3015
3016 return status;
3017 }
3018
3019 /**
3020 * stmmac_dma_interrupt - DMA ISR
3021 * @priv: driver private structure
3022 * Description: this is the DMA ISR. It is called by the main ISR.
3023 * It calls the dwmac dma routine and schedule poll method in case of some
3024 * work can be done.
3025 */
stmmac_dma_interrupt(struct stmmac_priv * priv)3026 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3027 {
3028 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3029 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3030 u32 channels_to_check = tx_channel_count > rx_channel_count ?
3031 tx_channel_count : rx_channel_count;
3032 u32 chan;
3033 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3034
3035 /* Make sure we never check beyond our status buffer. */
3036 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3037 channels_to_check = ARRAY_SIZE(status);
3038
3039 for (chan = 0; chan < channels_to_check; chan++)
3040 status[chan] = stmmac_napi_check(priv, chan,
3041 DMA_DIR_RXTX);
3042
3043 for (chan = 0; chan < tx_channel_count; chan++) {
3044 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3045 /* Try to bump up the dma threshold on this failure */
3046 stmmac_bump_dma_threshold(priv, chan);
3047 } else if (unlikely(status[chan] == tx_hard_error)) {
3048 stmmac_tx_err(priv, chan);
3049 }
3050 }
3051 }
3052
3053 /**
3054 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3055 * @priv: driver private structure
3056 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3057 */
stmmac_mmc_setup(struct stmmac_priv * priv)3058 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3059 {
3060 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3061 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3062
3063 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3064
3065 if (priv->dma_cap.rmon) {
3066 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3067 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3068 } else
3069 netdev_info(priv->dev, "No MAC Management Counters available\n");
3070 }
3071
3072 /**
3073 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3074 * @priv: driver private structure
3075 * Description:
3076 * new GMAC chip generations have a new register to indicate the
3077 * presence of the optional feature/functions.
3078 * This can be also used to override the value passed through the
3079 * platform and necessary for old MAC10/100 and GMAC chips.
3080 */
stmmac_get_hw_features(struct stmmac_priv * priv)3081 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3082 {
3083 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3084 }
3085
3086 /**
3087 * stmmac_check_ether_addr - check if the MAC addr is valid
3088 * @priv: driver private structure
3089 * Description:
3090 * it is to verify if the MAC address is valid, in case of failures it
3091 * generates a random MAC address
3092 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3093 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3094 {
3095 u8 addr[ETH_ALEN];
3096
3097 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3098 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3099 if (is_valid_ether_addr(addr))
3100 eth_hw_addr_set(priv->dev, addr);
3101 else
3102 eth_hw_addr_random(priv->dev);
3103 dev_info(priv->device, "device MAC address %pM\n",
3104 priv->dev->dev_addr);
3105 }
3106 }
3107
3108 /**
3109 * stmmac_init_dma_engine - DMA init.
3110 * @priv: driver private structure
3111 * Description:
3112 * It inits the DMA invoking the specific MAC/GMAC callback.
3113 * Some DMA parameters can be passed from the platform;
3114 * in case of these are not passed a default is kept for the MAC or GMAC.
3115 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3116 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3117 {
3118 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3119 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3120 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3121 struct stmmac_rx_queue *rx_q;
3122 struct stmmac_tx_queue *tx_q;
3123 u32 chan = 0;
3124 int ret = 0;
3125
3126 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3127 netdev_err(priv->dev, "Invalid DMA configuration\n");
3128 return -EINVAL;
3129 }
3130
3131 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3132 priv->plat->dma_cfg->atds = 1;
3133
3134 ret = stmmac_reset(priv, priv->ioaddr);
3135 if (ret) {
3136 netdev_err(priv->dev, "Failed to reset the dma\n");
3137 return ret;
3138 }
3139
3140 /* DMA Configuration */
3141 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3142
3143 if (priv->plat->axi)
3144 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3145
3146 /* DMA CSR Channel configuration */
3147 for (chan = 0; chan < dma_csr_ch; chan++) {
3148 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3149 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3150 }
3151
3152 /* DMA RX Channel Configuration */
3153 for (chan = 0; chan < rx_channels_count; chan++) {
3154 rx_q = &priv->dma_conf.rx_queue[chan];
3155
3156 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3157 rx_q->dma_rx_phy, chan);
3158
3159 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3160 (rx_q->buf_alloc_num *
3161 sizeof(struct dma_desc));
3162 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3163 rx_q->rx_tail_addr, chan);
3164 }
3165
3166 /* DMA TX Channel Configuration */
3167 for (chan = 0; chan < tx_channels_count; chan++) {
3168 tx_q = &priv->dma_conf.tx_queue[chan];
3169
3170 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3171 tx_q->dma_tx_phy, chan);
3172
3173 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3174 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3175 tx_q->tx_tail_addr, chan);
3176 }
3177
3178 return ret;
3179 }
3180
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3181 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3182 {
3183 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3184 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3185 struct stmmac_channel *ch;
3186 struct napi_struct *napi;
3187
3188 if (!tx_coal_timer)
3189 return;
3190
3191 ch = &priv->channel[tx_q->queue_index];
3192 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3193
3194 /* Arm timer only if napi is not already scheduled.
3195 * Try to cancel any timer if napi is scheduled, timer will be armed
3196 * again in the next scheduled napi.
3197 */
3198 if (unlikely(!napi_is_scheduled(napi)))
3199 hrtimer_start(&tx_q->txtimer,
3200 STMMAC_COAL_TIMER(tx_coal_timer),
3201 HRTIMER_MODE_REL);
3202 else
3203 hrtimer_try_to_cancel(&tx_q->txtimer);
3204 }
3205
3206 /**
3207 * stmmac_tx_timer - mitigation sw timer for tx.
3208 * @t: data pointer
3209 * Description:
3210 * This is the timer handler to directly invoke the stmmac_tx_clean.
3211 */
stmmac_tx_timer(struct hrtimer * t)3212 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3213 {
3214 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3215 struct stmmac_priv *priv = tx_q->priv_data;
3216 struct stmmac_channel *ch;
3217 struct napi_struct *napi;
3218
3219 ch = &priv->channel[tx_q->queue_index];
3220 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3221
3222 if (likely(napi_schedule_prep(napi))) {
3223 unsigned long flags;
3224
3225 spin_lock_irqsave(&ch->lock, flags);
3226 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3227 spin_unlock_irqrestore(&ch->lock, flags);
3228 __napi_schedule(napi);
3229 }
3230
3231 return HRTIMER_NORESTART;
3232 }
3233
3234 /**
3235 * stmmac_init_coalesce - init mitigation options.
3236 * @priv: driver private structure
3237 * Description:
3238 * This inits the coalesce parameters: i.e. timer rate,
3239 * timer handler and default threshold used for enabling the
3240 * interrupt on completion bit.
3241 */
stmmac_init_coalesce(struct stmmac_priv * priv)3242 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3243 {
3244 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3245 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3246 u32 chan;
3247
3248 for (chan = 0; chan < tx_channel_count; chan++) {
3249 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3250
3251 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3252 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3253
3254 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3255 }
3256
3257 for (chan = 0; chan < rx_channel_count; chan++)
3258 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3259 }
3260
stmmac_set_rings_length(struct stmmac_priv * priv)3261 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3262 {
3263 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3264 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3265 u32 chan;
3266
3267 /* set TX ring length */
3268 for (chan = 0; chan < tx_channels_count; chan++)
3269 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3270 (priv->dma_conf.dma_tx_size - 1), chan);
3271
3272 /* set RX ring length */
3273 for (chan = 0; chan < rx_channels_count; chan++)
3274 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3275 (priv->dma_conf.dma_rx_size - 1), chan);
3276 }
3277
3278 /**
3279 * stmmac_set_tx_queue_weight - Set TX queue weight
3280 * @priv: driver private structure
3281 * Description: It is used for setting TX queues weight
3282 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3283 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3284 {
3285 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3286 u32 weight;
3287 u32 queue;
3288
3289 for (queue = 0; queue < tx_queues_count; queue++) {
3290 weight = priv->plat->tx_queues_cfg[queue].weight;
3291 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3292 }
3293 }
3294
3295 /**
3296 * stmmac_configure_cbs - Configure CBS in TX queue
3297 * @priv: driver private structure
3298 * Description: It is used for configuring CBS in AVB TX queues
3299 */
stmmac_configure_cbs(struct stmmac_priv * priv)3300 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3301 {
3302 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3303 u32 mode_to_use;
3304 u32 queue;
3305
3306 /* queue 0 is reserved for legacy traffic */
3307 for (queue = 1; queue < tx_queues_count; queue++) {
3308 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3309 if (mode_to_use == MTL_QUEUE_DCB)
3310 continue;
3311
3312 stmmac_config_cbs(priv, priv->hw,
3313 priv->plat->tx_queues_cfg[queue].send_slope,
3314 priv->plat->tx_queues_cfg[queue].idle_slope,
3315 priv->plat->tx_queues_cfg[queue].high_credit,
3316 priv->plat->tx_queues_cfg[queue].low_credit,
3317 queue);
3318 }
3319 }
3320
3321 /**
3322 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3323 * @priv: driver private structure
3324 * Description: It is used for mapping RX queues to RX dma channels
3325 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3326 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3327 {
3328 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3329 u32 queue;
3330 u32 chan;
3331
3332 for (queue = 0; queue < rx_queues_count; queue++) {
3333 chan = priv->plat->rx_queues_cfg[queue].chan;
3334 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3335 }
3336 }
3337
3338 /**
3339 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3340 * @priv: driver private structure
3341 * Description: It is used for configuring the RX Queue Priority
3342 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3343 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3344 {
3345 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3346 u32 queue;
3347 u32 prio;
3348
3349 for (queue = 0; queue < rx_queues_count; queue++) {
3350 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3351 continue;
3352
3353 prio = priv->plat->rx_queues_cfg[queue].prio;
3354 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3355 }
3356 }
3357
3358 /**
3359 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3360 * @priv: driver private structure
3361 * Description: It is used for configuring the TX Queue Priority
3362 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3363 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3364 {
3365 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3366 u32 queue;
3367 u32 prio;
3368
3369 for (queue = 0; queue < tx_queues_count; queue++) {
3370 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3371 continue;
3372
3373 prio = priv->plat->tx_queues_cfg[queue].prio;
3374 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3375 }
3376 }
3377
3378 /**
3379 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3380 * @priv: driver private structure
3381 * Description: It is used for configuring the RX queue routing
3382 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3383 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3384 {
3385 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3386 u32 queue;
3387 u8 packet;
3388
3389 for (queue = 0; queue < rx_queues_count; queue++) {
3390 /* no specific packet type routing specified for the queue */
3391 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3392 continue;
3393
3394 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3395 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3396 }
3397 }
3398
stmmac_mac_config_rss(struct stmmac_priv * priv)3399 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3400 {
3401 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3402 priv->rss.enable = false;
3403 return;
3404 }
3405
3406 if (priv->dev->features & NETIF_F_RXHASH)
3407 priv->rss.enable = true;
3408 else
3409 priv->rss.enable = false;
3410
3411 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3412 priv->plat->rx_queues_to_use);
3413 }
3414
3415 /**
3416 * stmmac_mtl_configuration - Configure MTL
3417 * @priv: driver private structure
3418 * Description: It is used for configurring MTL
3419 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3420 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3421 {
3422 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3423 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3424
3425 if (tx_queues_count > 1)
3426 stmmac_set_tx_queue_weight(priv);
3427
3428 /* Configure MTL RX algorithms */
3429 if (rx_queues_count > 1)
3430 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3431 priv->plat->rx_sched_algorithm);
3432
3433 /* Configure MTL TX algorithms */
3434 if (tx_queues_count > 1)
3435 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3436 priv->plat->tx_sched_algorithm);
3437
3438 /* Configure CBS in AVB TX queues */
3439 if (tx_queues_count > 1)
3440 stmmac_configure_cbs(priv);
3441
3442 /* Map RX MTL to DMA channels */
3443 stmmac_rx_queue_dma_chan_map(priv);
3444
3445 /* Enable MAC RX Queues */
3446 stmmac_mac_enable_rx_queues(priv);
3447
3448 /* Set RX priorities */
3449 if (rx_queues_count > 1)
3450 stmmac_mac_config_rx_queues_prio(priv);
3451
3452 /* Set TX priorities */
3453 if (tx_queues_count > 1)
3454 stmmac_mac_config_tx_queues_prio(priv);
3455
3456 /* Set RX routing */
3457 if (rx_queues_count > 1)
3458 stmmac_mac_config_rx_queues_routing(priv);
3459
3460 /* Receive Side Scaling */
3461 if (rx_queues_count > 1)
3462 stmmac_mac_config_rss(priv);
3463 }
3464
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3465 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3466 {
3467 if (priv->dma_cap.asp) {
3468 netdev_info(priv->dev, "Enabling Safety Features\n");
3469 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3470 priv->plat->safety_feat_cfg);
3471 } else {
3472 netdev_info(priv->dev, "No Safety Features support found\n");
3473 }
3474 }
3475
3476 /**
3477 * stmmac_hw_setup - setup mac in a usable state.
3478 * @dev : pointer to the device structure.
3479 * @ptp_register: register PTP if set
3480 * Description:
3481 * this is the main function to setup the HW in a usable state because the
3482 * dma engine is reset, the core registers are configured (e.g. AXI,
3483 * Checksum features, timers). The DMA is ready to start receiving and
3484 * transmitting.
3485 * Return value:
3486 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3487 * file on failure.
3488 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3489 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3490 {
3491 struct stmmac_priv *priv = netdev_priv(dev);
3492 u32 rx_cnt = priv->plat->rx_queues_to_use;
3493 u32 tx_cnt = priv->plat->tx_queues_to_use;
3494 bool sph_en;
3495 u32 chan;
3496 int ret;
3497
3498 /* Make sure RX clock is enabled */
3499 if (priv->hw->phylink_pcs)
3500 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3501
3502 /* Note that clk_rx_i must be running for reset to complete. This
3503 * clock may also be required when setting the MAC address.
3504 *
3505 * Block the receive clock stop for LPI mode at the PHY in case
3506 * the link is established with EEE mode active.
3507 */
3508 phylink_rx_clk_stop_block(priv->phylink);
3509
3510 /* DMA initialization and SW reset */
3511 ret = stmmac_init_dma_engine(priv);
3512 if (ret < 0) {
3513 phylink_rx_clk_stop_unblock(priv->phylink);
3514 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3515 __func__);
3516 return ret;
3517 }
3518
3519 /* Copy the MAC addr into the HW */
3520 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3521 phylink_rx_clk_stop_unblock(priv->phylink);
3522
3523 /* PS and related bits will be programmed according to the speed */
3524 if (priv->hw->pcs) {
3525 int speed = priv->plat->mac_port_sel_speed;
3526
3527 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3528 (speed == SPEED_1000)) {
3529 priv->hw->ps = speed;
3530 } else {
3531 dev_warn(priv->device, "invalid port speed\n");
3532 priv->hw->ps = 0;
3533 }
3534 }
3535
3536 /* Initialize the MAC Core */
3537 stmmac_core_init(priv, priv->hw, dev);
3538
3539 /* Initialize MTL*/
3540 stmmac_mtl_configuration(priv);
3541
3542 /* Initialize Safety Features */
3543 stmmac_safety_feat_configuration(priv);
3544
3545 ret = stmmac_rx_ipc(priv, priv->hw);
3546 if (!ret) {
3547 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3548 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3549 priv->hw->rx_csum = 0;
3550 }
3551
3552 /* Enable the MAC Rx/Tx */
3553 stmmac_mac_set(priv, priv->ioaddr, true);
3554
3555 /* Set the HW DMA mode and the COE */
3556 stmmac_dma_operation_mode(priv);
3557
3558 stmmac_mmc_setup(priv);
3559
3560 if (ptp_register) {
3561 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3562 if (ret < 0)
3563 netdev_warn(priv->dev,
3564 "failed to enable PTP reference clock: %pe\n",
3565 ERR_PTR(ret));
3566 }
3567
3568 ret = stmmac_init_ptp(priv);
3569 if (ret == -EOPNOTSUPP)
3570 netdev_info(priv->dev, "PTP not supported by HW\n");
3571 else if (ret)
3572 netdev_warn(priv->dev, "PTP init failed\n");
3573 else if (ptp_register)
3574 stmmac_ptp_register(priv);
3575
3576 if (priv->use_riwt) {
3577 u32 queue;
3578
3579 for (queue = 0; queue < rx_cnt; queue++) {
3580 if (!priv->rx_riwt[queue])
3581 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3582
3583 stmmac_rx_watchdog(priv, priv->ioaddr,
3584 priv->rx_riwt[queue], queue);
3585 }
3586 }
3587
3588 if (priv->hw->pcs)
3589 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3590
3591 /* set TX and RX rings length */
3592 stmmac_set_rings_length(priv);
3593
3594 /* Enable TSO */
3595 if (priv->tso) {
3596 for (chan = 0; chan < tx_cnt; chan++) {
3597 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3598
3599 /* TSO and TBS cannot co-exist */
3600 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3601 continue;
3602
3603 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3604 }
3605 }
3606
3607 /* Enable Split Header */
3608 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3609 for (chan = 0; chan < rx_cnt; chan++)
3610 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3611
3612
3613 /* VLAN Tag Insertion */
3614 if (priv->dma_cap.vlins)
3615 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3616
3617 /* TBS */
3618 for (chan = 0; chan < tx_cnt; chan++) {
3619 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3620 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3621
3622 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3623 }
3624
3625 /* Configure real RX and TX queues */
3626 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3627 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3628
3629 /* Start the ball rolling... */
3630 stmmac_start_all_dma(priv);
3631
3632 phylink_rx_clk_stop_block(priv->phylink);
3633 stmmac_set_hw_vlan_mode(priv, priv->hw);
3634 phylink_rx_clk_stop_unblock(priv->phylink);
3635
3636 return 0;
3637 }
3638
stmmac_hw_teardown(struct net_device * dev)3639 static void stmmac_hw_teardown(struct net_device *dev)
3640 {
3641 struct stmmac_priv *priv = netdev_priv(dev);
3642
3643 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3644 }
3645
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3646 static void stmmac_free_irq(struct net_device *dev,
3647 enum request_irq_err irq_err, int irq_idx)
3648 {
3649 struct stmmac_priv *priv = netdev_priv(dev);
3650 int j;
3651
3652 switch (irq_err) {
3653 case REQ_IRQ_ERR_ALL:
3654 irq_idx = priv->plat->tx_queues_to_use;
3655 fallthrough;
3656 case REQ_IRQ_ERR_TX:
3657 for (j = irq_idx - 1; j >= 0; j--) {
3658 if (priv->tx_irq[j] > 0) {
3659 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3660 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3661 }
3662 }
3663 irq_idx = priv->plat->rx_queues_to_use;
3664 fallthrough;
3665 case REQ_IRQ_ERR_RX:
3666 for (j = irq_idx - 1; j >= 0; j--) {
3667 if (priv->rx_irq[j] > 0) {
3668 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3669 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3670 }
3671 }
3672
3673 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3674 free_irq(priv->sfty_ue_irq, dev);
3675 fallthrough;
3676 case REQ_IRQ_ERR_SFTY_UE:
3677 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3678 free_irq(priv->sfty_ce_irq, dev);
3679 fallthrough;
3680 case REQ_IRQ_ERR_SFTY_CE:
3681 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3682 free_irq(priv->lpi_irq, dev);
3683 fallthrough;
3684 case REQ_IRQ_ERR_LPI:
3685 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3686 free_irq(priv->wol_irq, dev);
3687 fallthrough;
3688 case REQ_IRQ_ERR_SFTY:
3689 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3690 free_irq(priv->sfty_irq, dev);
3691 fallthrough;
3692 case REQ_IRQ_ERR_WOL:
3693 free_irq(dev->irq, dev);
3694 fallthrough;
3695 case REQ_IRQ_ERR_MAC:
3696 case REQ_IRQ_ERR_NO:
3697 /* If MAC IRQ request error, no more IRQ to free */
3698 break;
3699 }
3700 }
3701
stmmac_request_irq_multi_msi(struct net_device * dev)3702 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3703 {
3704 struct stmmac_priv *priv = netdev_priv(dev);
3705 enum request_irq_err irq_err;
3706 int irq_idx = 0;
3707 char *int_name;
3708 int ret;
3709 int i;
3710
3711 /* For common interrupt */
3712 int_name = priv->int_name_mac;
3713 sprintf(int_name, "%s:%s", dev->name, "mac");
3714 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3715 0, int_name, dev);
3716 if (unlikely(ret < 0)) {
3717 netdev_err(priv->dev,
3718 "%s: alloc mac MSI %d (error: %d)\n",
3719 __func__, dev->irq, ret);
3720 irq_err = REQ_IRQ_ERR_MAC;
3721 goto irq_error;
3722 }
3723
3724 /* Request the Wake IRQ in case of another line
3725 * is used for WoL
3726 */
3727 priv->wol_irq_disabled = true;
3728 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3729 int_name = priv->int_name_wol;
3730 sprintf(int_name, "%s:%s", dev->name, "wol");
3731 ret = request_irq(priv->wol_irq,
3732 stmmac_mac_interrupt,
3733 0, int_name, dev);
3734 if (unlikely(ret < 0)) {
3735 netdev_err(priv->dev,
3736 "%s: alloc wol MSI %d (error: %d)\n",
3737 __func__, priv->wol_irq, ret);
3738 irq_err = REQ_IRQ_ERR_WOL;
3739 goto irq_error;
3740 }
3741 }
3742
3743 /* Request the LPI IRQ in case of another line
3744 * is used for LPI
3745 */
3746 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3747 int_name = priv->int_name_lpi;
3748 sprintf(int_name, "%s:%s", dev->name, "lpi");
3749 ret = request_irq(priv->lpi_irq,
3750 stmmac_mac_interrupt,
3751 0, int_name, dev);
3752 if (unlikely(ret < 0)) {
3753 netdev_err(priv->dev,
3754 "%s: alloc lpi MSI %d (error: %d)\n",
3755 __func__, priv->lpi_irq, ret);
3756 irq_err = REQ_IRQ_ERR_LPI;
3757 goto irq_error;
3758 }
3759 }
3760
3761 /* Request the common Safety Feature Correctible/Uncorrectible
3762 * Error line in case of another line is used
3763 */
3764 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3765 int_name = priv->int_name_sfty;
3766 sprintf(int_name, "%s:%s", dev->name, "safety");
3767 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3768 0, int_name, dev);
3769 if (unlikely(ret < 0)) {
3770 netdev_err(priv->dev,
3771 "%s: alloc sfty MSI %d (error: %d)\n",
3772 __func__, priv->sfty_irq, ret);
3773 irq_err = REQ_IRQ_ERR_SFTY;
3774 goto irq_error;
3775 }
3776 }
3777
3778 /* Request the Safety Feature Correctible Error line in
3779 * case of another line is used
3780 */
3781 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3782 int_name = priv->int_name_sfty_ce;
3783 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3784 ret = request_irq(priv->sfty_ce_irq,
3785 stmmac_safety_interrupt,
3786 0, int_name, dev);
3787 if (unlikely(ret < 0)) {
3788 netdev_err(priv->dev,
3789 "%s: alloc sfty ce MSI %d (error: %d)\n",
3790 __func__, priv->sfty_ce_irq, ret);
3791 irq_err = REQ_IRQ_ERR_SFTY_CE;
3792 goto irq_error;
3793 }
3794 }
3795
3796 /* Request the Safety Feature Uncorrectible Error line in
3797 * case of another line is used
3798 */
3799 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3800 int_name = priv->int_name_sfty_ue;
3801 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3802 ret = request_irq(priv->sfty_ue_irq,
3803 stmmac_safety_interrupt,
3804 0, int_name, dev);
3805 if (unlikely(ret < 0)) {
3806 netdev_err(priv->dev,
3807 "%s: alloc sfty ue MSI %d (error: %d)\n",
3808 __func__, priv->sfty_ue_irq, ret);
3809 irq_err = REQ_IRQ_ERR_SFTY_UE;
3810 goto irq_error;
3811 }
3812 }
3813
3814 /* Request Rx MSI irq */
3815 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3816 if (i >= MTL_MAX_RX_QUEUES)
3817 break;
3818 if (priv->rx_irq[i] == 0)
3819 continue;
3820
3821 int_name = priv->int_name_rx_irq[i];
3822 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3823 ret = request_irq(priv->rx_irq[i],
3824 stmmac_msi_intr_rx,
3825 0, int_name, &priv->dma_conf.rx_queue[i]);
3826 if (unlikely(ret < 0)) {
3827 netdev_err(priv->dev,
3828 "%s: alloc rx-%d MSI %d (error: %d)\n",
3829 __func__, i, priv->rx_irq[i], ret);
3830 irq_err = REQ_IRQ_ERR_RX;
3831 irq_idx = i;
3832 goto irq_error;
3833 }
3834 irq_set_affinity_hint(priv->rx_irq[i],
3835 cpumask_of(i % num_online_cpus()));
3836 }
3837
3838 /* Request Tx MSI irq */
3839 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3840 if (i >= MTL_MAX_TX_QUEUES)
3841 break;
3842 if (priv->tx_irq[i] == 0)
3843 continue;
3844
3845 int_name = priv->int_name_tx_irq[i];
3846 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3847 ret = request_irq(priv->tx_irq[i],
3848 stmmac_msi_intr_tx,
3849 0, int_name, &priv->dma_conf.tx_queue[i]);
3850 if (unlikely(ret < 0)) {
3851 netdev_err(priv->dev,
3852 "%s: alloc tx-%d MSI %d (error: %d)\n",
3853 __func__, i, priv->tx_irq[i], ret);
3854 irq_err = REQ_IRQ_ERR_TX;
3855 irq_idx = i;
3856 goto irq_error;
3857 }
3858 irq_set_affinity_hint(priv->tx_irq[i],
3859 cpumask_of(i % num_online_cpus()));
3860 }
3861
3862 return 0;
3863
3864 irq_error:
3865 stmmac_free_irq(dev, irq_err, irq_idx);
3866 return ret;
3867 }
3868
stmmac_request_irq_single(struct net_device * dev)3869 static int stmmac_request_irq_single(struct net_device *dev)
3870 {
3871 struct stmmac_priv *priv = netdev_priv(dev);
3872 enum request_irq_err irq_err;
3873 int ret;
3874
3875 ret = request_irq(dev->irq, stmmac_interrupt,
3876 IRQF_SHARED, dev->name, dev);
3877 if (unlikely(ret < 0)) {
3878 netdev_err(priv->dev,
3879 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3880 __func__, dev->irq, ret);
3881 irq_err = REQ_IRQ_ERR_MAC;
3882 goto irq_error;
3883 }
3884
3885 /* Request the Wake IRQ in case of another line
3886 * is used for WoL
3887 */
3888 priv->wol_irq_disabled = true;
3889 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3890 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3891 IRQF_SHARED, dev->name, dev);
3892 if (unlikely(ret < 0)) {
3893 netdev_err(priv->dev,
3894 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3895 __func__, priv->wol_irq, ret);
3896 irq_err = REQ_IRQ_ERR_WOL;
3897 goto irq_error;
3898 }
3899 }
3900
3901 /* Request the IRQ lines */
3902 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3903 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3904 IRQF_SHARED, dev->name, dev);
3905 if (unlikely(ret < 0)) {
3906 netdev_err(priv->dev,
3907 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3908 __func__, priv->lpi_irq, ret);
3909 irq_err = REQ_IRQ_ERR_LPI;
3910 goto irq_error;
3911 }
3912 }
3913
3914 /* Request the common Safety Feature Correctible/Uncorrectible
3915 * Error line in case of another line is used
3916 */
3917 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3918 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3919 IRQF_SHARED, dev->name, dev);
3920 if (unlikely(ret < 0)) {
3921 netdev_err(priv->dev,
3922 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3923 __func__, priv->sfty_irq, ret);
3924 irq_err = REQ_IRQ_ERR_SFTY;
3925 goto irq_error;
3926 }
3927 }
3928
3929 return 0;
3930
3931 irq_error:
3932 stmmac_free_irq(dev, irq_err, 0);
3933 return ret;
3934 }
3935
stmmac_request_irq(struct net_device * dev)3936 static int stmmac_request_irq(struct net_device *dev)
3937 {
3938 struct stmmac_priv *priv = netdev_priv(dev);
3939 int ret;
3940
3941 /* Request the IRQ lines */
3942 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3943 ret = stmmac_request_irq_multi_msi(dev);
3944 else
3945 ret = stmmac_request_irq_single(dev);
3946
3947 return ret;
3948 }
3949
3950 /**
3951 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3952 * @priv: driver private structure
3953 * @mtu: MTU to setup the dma queue and buf with
3954 * Description: Allocate and generate a dma_conf based on the provided MTU.
3955 * Allocate the Tx/Rx DMA queue and init them.
3956 * Return value:
3957 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3958 */
3959 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3960 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3961 {
3962 struct stmmac_dma_conf *dma_conf;
3963 int chan, bfsize, ret;
3964
3965 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3966 if (!dma_conf) {
3967 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3968 __func__);
3969 return ERR_PTR(-ENOMEM);
3970 }
3971
3972 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3973 if (bfsize < 0)
3974 bfsize = 0;
3975
3976 if (bfsize < BUF_SIZE_16KiB)
3977 bfsize = stmmac_set_bfsize(mtu, 0);
3978
3979 dma_conf->dma_buf_sz = bfsize;
3980 /* Chose the tx/rx size from the already defined one in the
3981 * priv struct. (if defined)
3982 */
3983 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3984 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3985
3986 if (!dma_conf->dma_tx_size)
3987 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3988 if (!dma_conf->dma_rx_size)
3989 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3990
3991 /* Earlier check for TBS */
3992 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3993 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3994 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3995
3996 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3997 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3998 }
3999
4000 ret = alloc_dma_desc_resources(priv, dma_conf);
4001 if (ret < 0) {
4002 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4003 __func__);
4004 goto alloc_error;
4005 }
4006
4007 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4008 if (ret < 0) {
4009 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4010 __func__);
4011 goto init_error;
4012 }
4013
4014 return dma_conf;
4015
4016 init_error:
4017 free_dma_desc_resources(priv, dma_conf);
4018 alloc_error:
4019 kfree(dma_conf);
4020 return ERR_PTR(ret);
4021 }
4022
4023 /**
4024 * __stmmac_open - open entry point of the driver
4025 * @dev : pointer to the device structure.
4026 * @dma_conf : structure to take the dma data
4027 * Description:
4028 * This function is the open entry point of the driver.
4029 * Return value:
4030 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4031 * file on failure.
4032 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)4033 static int __stmmac_open(struct net_device *dev,
4034 struct stmmac_dma_conf *dma_conf)
4035 {
4036 struct stmmac_priv *priv = netdev_priv(dev);
4037 int mode = priv->plat->phy_interface;
4038 u32 chan;
4039 int ret;
4040
4041 /* Initialise the tx lpi timer, converting from msec to usec */
4042 if (!priv->tx_lpi_timer)
4043 priv->tx_lpi_timer = eee_timer * 1000;
4044
4045 ret = pm_runtime_resume_and_get(priv->device);
4046 if (ret < 0)
4047 return ret;
4048
4049 if ((!priv->hw->xpcs ||
4050 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
4051 ret = stmmac_init_phy(dev);
4052 if (ret) {
4053 netdev_err(priv->dev,
4054 "%s: Cannot attach to PHY (error: %d)\n",
4055 __func__, ret);
4056 goto init_phy_error;
4057 }
4058 }
4059
4060 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4061 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4062 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4063 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4064
4065 stmmac_reset_queues_param(priv);
4066
4067 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
4068 priv->plat->serdes_powerup) {
4069 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
4070 if (ret < 0) {
4071 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
4072 __func__);
4073 goto init_error;
4074 }
4075 }
4076
4077 ret = stmmac_hw_setup(dev, true);
4078 if (ret < 0) {
4079 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4080 goto init_error;
4081 }
4082
4083 stmmac_init_coalesce(priv);
4084
4085 phylink_start(priv->phylink);
4086 /* We may have called phylink_speed_down before */
4087 phylink_speed_up(priv->phylink);
4088
4089 ret = stmmac_request_irq(dev);
4090 if (ret)
4091 goto irq_error;
4092
4093 stmmac_enable_all_queues(priv);
4094 netif_tx_start_all_queues(priv->dev);
4095 stmmac_enable_all_dma_irq(priv);
4096
4097 return 0;
4098
4099 irq_error:
4100 phylink_stop(priv->phylink);
4101
4102 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4103 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4104
4105 stmmac_hw_teardown(dev);
4106 init_error:
4107 phylink_disconnect_phy(priv->phylink);
4108 init_phy_error:
4109 pm_runtime_put(priv->device);
4110 return ret;
4111 }
4112
stmmac_open(struct net_device * dev)4113 static int stmmac_open(struct net_device *dev)
4114 {
4115 struct stmmac_priv *priv = netdev_priv(dev);
4116 struct stmmac_dma_conf *dma_conf;
4117 int ret;
4118
4119 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4120 if (IS_ERR(dma_conf))
4121 return PTR_ERR(dma_conf);
4122
4123 ret = __stmmac_open(dev, dma_conf);
4124 if (ret)
4125 free_dma_desc_resources(priv, dma_conf);
4126
4127 kfree(dma_conf);
4128 return ret;
4129 }
4130
4131 /**
4132 * stmmac_release - close entry point of the driver
4133 * @dev : device pointer.
4134 * Description:
4135 * This is the stop entry point of the driver.
4136 */
stmmac_release(struct net_device * dev)4137 static int stmmac_release(struct net_device *dev)
4138 {
4139 struct stmmac_priv *priv = netdev_priv(dev);
4140 u32 chan;
4141
4142 if (device_may_wakeup(priv->device))
4143 phylink_speed_down(priv->phylink, false);
4144 /* Stop and disconnect the PHY */
4145 phylink_stop(priv->phylink);
4146 phylink_disconnect_phy(priv->phylink);
4147
4148 stmmac_disable_all_queues(priv);
4149
4150 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4151 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4152
4153 netif_tx_disable(dev);
4154
4155 /* Free the IRQ lines */
4156 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4157
4158 /* Stop TX/RX DMA and clear the descriptors */
4159 stmmac_stop_all_dma(priv);
4160
4161 /* Release and free the Rx/Tx resources */
4162 free_dma_desc_resources(priv, &priv->dma_conf);
4163
4164 /* Powerdown Serdes if there is */
4165 if (priv->plat->serdes_powerdown)
4166 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4167
4168 stmmac_release_ptp(priv);
4169
4170 if (stmmac_fpe_supported(priv))
4171 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4172
4173 pm_runtime_put(priv->device);
4174
4175 return 0;
4176 }
4177
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4178 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4179 struct stmmac_tx_queue *tx_q)
4180 {
4181 u16 tag = 0x0, inner_tag = 0x0;
4182 u32 inner_type = 0x0;
4183 struct dma_desc *p;
4184
4185 if (!priv->dma_cap.vlins)
4186 return false;
4187 if (!skb_vlan_tag_present(skb))
4188 return false;
4189 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4190 inner_tag = skb_vlan_tag_get(skb);
4191 inner_type = STMMAC_VLAN_INSERT;
4192 }
4193
4194 tag = skb_vlan_tag_get(skb);
4195
4196 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4197 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4198 else
4199 p = &tx_q->dma_tx[tx_q->cur_tx];
4200
4201 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4202 return false;
4203
4204 stmmac_set_tx_owner(priv, p);
4205 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4206 return true;
4207 }
4208
4209 /**
4210 * stmmac_tso_allocator - close entry point of the driver
4211 * @priv: driver private structure
4212 * @des: buffer start address
4213 * @total_len: total length to fill in descriptors
4214 * @last_segment: condition for the last descriptor
4215 * @queue: TX queue index
4216 * Description:
4217 * This function fills descriptor and request new descriptors according to
4218 * buffer length to fill
4219 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4220 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4221 int total_len, bool last_segment, u32 queue)
4222 {
4223 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4224 struct dma_desc *desc;
4225 u32 buff_size;
4226 int tmp_len;
4227
4228 tmp_len = total_len;
4229
4230 while (tmp_len > 0) {
4231 dma_addr_t curr_addr;
4232
4233 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4234 priv->dma_conf.dma_tx_size);
4235 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4236
4237 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4238 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4239 else
4240 desc = &tx_q->dma_tx[tx_q->cur_tx];
4241
4242 curr_addr = des + (total_len - tmp_len);
4243 stmmac_set_desc_addr(priv, desc, curr_addr);
4244 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4245 TSO_MAX_BUFF_SIZE : tmp_len;
4246
4247 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4248 0, 1,
4249 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4250 0, 0);
4251
4252 tmp_len -= TSO_MAX_BUFF_SIZE;
4253 }
4254 }
4255
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4256 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4257 {
4258 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4259 int desc_size;
4260
4261 if (likely(priv->extend_desc))
4262 desc_size = sizeof(struct dma_extended_desc);
4263 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4264 desc_size = sizeof(struct dma_edesc);
4265 else
4266 desc_size = sizeof(struct dma_desc);
4267
4268 /* The own bit must be the latest setting done when prepare the
4269 * descriptor and then barrier is needed to make sure that
4270 * all is coherent before granting the DMA engine.
4271 */
4272 wmb();
4273
4274 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4275 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4276 }
4277
4278 /**
4279 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4280 * @skb : the socket buffer
4281 * @dev : device pointer
4282 * Description: this is the transmit function that is called on TSO frames
4283 * (support available on GMAC4 and newer chips).
4284 * Diagram below show the ring programming in case of TSO frames:
4285 *
4286 * First Descriptor
4287 * --------
4288 * | DES0 |---> buffer1 = L2/L3/L4 header
4289 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4290 * | | width is 32-bit, but we never use it.
4291 * | | Also can be used as the most-significant 8-bits or 16-bits of
4292 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4293 * | | or 48-bit, and we always use it.
4294 * | DES2 |---> buffer1 len
4295 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4296 * --------
4297 * --------
4298 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4299 * | DES1 |---> same as the First Descriptor
4300 * | DES2 |---> buffer1 len
4301 * | DES3 |
4302 * --------
4303 * |
4304 * ...
4305 * |
4306 * --------
4307 * | DES0 |---> buffer1 = Split TCP Payload
4308 * | DES1 |---> same as the First Descriptor
4309 * | DES2 |---> buffer1 len
4310 * | DES3 |
4311 * --------
4312 *
4313 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4314 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4315 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4316 {
4317 struct dma_desc *desc, *first, *mss_desc = NULL;
4318 struct stmmac_priv *priv = netdev_priv(dev);
4319 unsigned int first_entry, tx_packets;
4320 struct stmmac_txq_stats *txq_stats;
4321 struct stmmac_tx_queue *tx_q;
4322 u32 pay_len, mss, queue;
4323 int i, first_tx, nfrags;
4324 u8 proto_hdr_len, hdr;
4325 dma_addr_t des;
4326 bool set_ic;
4327
4328 /* Always insert VLAN tag to SKB payload for TSO frames.
4329 *
4330 * Never insert VLAN tag by HW, since segments splited by
4331 * TSO engine will be un-tagged by mistake.
4332 */
4333 if (skb_vlan_tag_present(skb)) {
4334 skb = __vlan_hwaccel_push_inside(skb);
4335 if (unlikely(!skb)) {
4336 priv->xstats.tx_dropped++;
4337 return NETDEV_TX_OK;
4338 }
4339 }
4340
4341 nfrags = skb_shinfo(skb)->nr_frags;
4342 queue = skb_get_queue_mapping(skb);
4343
4344 tx_q = &priv->dma_conf.tx_queue[queue];
4345 txq_stats = &priv->xstats.txq_stats[queue];
4346 first_tx = tx_q->cur_tx;
4347
4348 /* Compute header lengths */
4349 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4350 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4351 hdr = sizeof(struct udphdr);
4352 } else {
4353 proto_hdr_len = skb_tcp_all_headers(skb);
4354 hdr = tcp_hdrlen(skb);
4355 }
4356
4357 /* Desc availability based on threshold should be enough safe */
4358 if (unlikely(stmmac_tx_avail(priv, queue) <
4359 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4360 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4361 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4362 queue));
4363 /* This is a hard error, log it. */
4364 netdev_err(priv->dev,
4365 "%s: Tx Ring full when queue awake\n",
4366 __func__);
4367 }
4368 return NETDEV_TX_BUSY;
4369 }
4370
4371 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4372
4373 mss = skb_shinfo(skb)->gso_size;
4374
4375 /* set new MSS value if needed */
4376 if (mss != tx_q->mss) {
4377 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4378 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4379 else
4380 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4381
4382 stmmac_set_mss(priv, mss_desc, mss);
4383 tx_q->mss = mss;
4384 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4385 priv->dma_conf.dma_tx_size);
4386 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4387 }
4388
4389 if (netif_msg_tx_queued(priv)) {
4390 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4391 __func__, hdr, proto_hdr_len, pay_len, mss);
4392 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4393 skb->data_len);
4394 }
4395
4396 first_entry = tx_q->cur_tx;
4397 WARN_ON(tx_q->tx_skbuff[first_entry]);
4398
4399 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4400 desc = &tx_q->dma_entx[first_entry].basic;
4401 else
4402 desc = &tx_q->dma_tx[first_entry];
4403 first = desc;
4404
4405 /* first descriptor: fill Headers on Buf1 */
4406 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4407 DMA_TO_DEVICE);
4408 if (dma_mapping_error(priv->device, des))
4409 goto dma_map_err;
4410
4411 stmmac_set_desc_addr(priv, first, des);
4412 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4413 (nfrags == 0), queue);
4414
4415 /* In case two or more DMA transmit descriptors are allocated for this
4416 * non-paged SKB data, the DMA buffer address should be saved to
4417 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4418 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4419 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4420 * since the tail areas of the DMA buffer can be accessed by DMA engine
4421 * sooner or later.
4422 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4423 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4424 * this DMA buffer right after the DMA engine completely finishes the
4425 * full buffer transmission.
4426 */
4427 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4428 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4429 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4430 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4431
4432 /* Prepare fragments */
4433 for (i = 0; i < nfrags; i++) {
4434 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4435
4436 des = skb_frag_dma_map(priv->device, frag, 0,
4437 skb_frag_size(frag),
4438 DMA_TO_DEVICE);
4439 if (dma_mapping_error(priv->device, des))
4440 goto dma_map_err;
4441
4442 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4443 (i == nfrags - 1), queue);
4444
4445 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4446 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4447 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4448 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4449 }
4450
4451 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4452
4453 /* Only the last descriptor gets to point to the skb. */
4454 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4455 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4456
4457 /* Manage tx mitigation */
4458 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4459 tx_q->tx_count_frames += tx_packets;
4460
4461 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4462 set_ic = true;
4463 else if (!priv->tx_coal_frames[queue])
4464 set_ic = false;
4465 else if (tx_packets > priv->tx_coal_frames[queue])
4466 set_ic = true;
4467 else if ((tx_q->tx_count_frames %
4468 priv->tx_coal_frames[queue]) < tx_packets)
4469 set_ic = true;
4470 else
4471 set_ic = false;
4472
4473 if (set_ic) {
4474 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4475 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4476 else
4477 desc = &tx_q->dma_tx[tx_q->cur_tx];
4478
4479 tx_q->tx_count_frames = 0;
4480 stmmac_set_tx_ic(priv, desc);
4481 }
4482
4483 /* We've used all descriptors we need for this skb, however,
4484 * advance cur_tx so that it references a fresh descriptor.
4485 * ndo_start_xmit will fill this descriptor the next time it's
4486 * called and stmmac_tx_clean may clean up to this descriptor.
4487 */
4488 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4489
4490 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4491 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4492 __func__);
4493 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4494 }
4495
4496 u64_stats_update_begin(&txq_stats->q_syncp);
4497 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4498 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4499 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4500 if (set_ic)
4501 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4502 u64_stats_update_end(&txq_stats->q_syncp);
4503
4504 if (priv->sarc_type)
4505 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4506
4507 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4508 priv->hwts_tx_en)) {
4509 /* declare that device is doing timestamping */
4510 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4511 stmmac_enable_tx_timestamp(priv, first);
4512 }
4513
4514 /* Complete the first descriptor before granting the DMA */
4515 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4516 tx_q->tx_skbuff_dma[first_entry].last_segment,
4517 hdr / 4, (skb->len - proto_hdr_len));
4518
4519 /* If context desc is used to change MSS */
4520 if (mss_desc) {
4521 /* Make sure that first descriptor has been completely
4522 * written, including its own bit. This is because MSS is
4523 * actually before first descriptor, so we need to make
4524 * sure that MSS's own bit is the last thing written.
4525 */
4526 dma_wmb();
4527 stmmac_set_tx_owner(priv, mss_desc);
4528 }
4529
4530 if (netif_msg_pktdata(priv)) {
4531 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4532 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4533 tx_q->cur_tx, first, nfrags);
4534 pr_info(">>> frame to be transmitted: ");
4535 print_pkt(skb->data, skb_headlen(skb));
4536 }
4537
4538 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4539 skb_tx_timestamp(skb);
4540
4541 stmmac_flush_tx_descriptors(priv, queue);
4542 stmmac_tx_timer_arm(priv, queue);
4543
4544 return NETDEV_TX_OK;
4545
4546 dma_map_err:
4547 dev_err(priv->device, "Tx dma map failed\n");
4548 dev_kfree_skb(skb);
4549 priv->xstats.tx_dropped++;
4550 return NETDEV_TX_OK;
4551 }
4552
4553 /**
4554 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4555 * @skb: socket buffer to check
4556 *
4557 * Check if a packet has an ethertype that will trigger the IP header checks
4558 * and IP/TCP checksum engine of the stmmac core.
4559 *
4560 * Return: true if the ethertype can trigger the checksum engine, false
4561 * otherwise
4562 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4563 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4564 {
4565 int depth = 0;
4566 __be16 proto;
4567
4568 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4569 &depth);
4570
4571 return (depth <= ETH_HLEN) &&
4572 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4573 }
4574
4575 /**
4576 * stmmac_xmit - Tx entry point of the driver
4577 * @skb : the socket buffer
4578 * @dev : device pointer
4579 * Description : this is the tx entry point of the driver.
4580 * It programs the chain or the ring and supports oversized frames
4581 * and SG feature.
4582 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4583 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4584 {
4585 unsigned int first_entry, tx_packets, enh_desc;
4586 struct stmmac_priv *priv = netdev_priv(dev);
4587 unsigned int nopaged_len = skb_headlen(skb);
4588 int i, csum_insertion = 0, is_jumbo = 0;
4589 u32 queue = skb_get_queue_mapping(skb);
4590 int nfrags = skb_shinfo(skb)->nr_frags;
4591 int gso = skb_shinfo(skb)->gso_type;
4592 struct stmmac_txq_stats *txq_stats;
4593 struct dma_edesc *tbs_desc = NULL;
4594 struct dma_desc *desc, *first;
4595 struct stmmac_tx_queue *tx_q;
4596 bool has_vlan, set_ic;
4597 int entry, first_tx;
4598 dma_addr_t des;
4599
4600 tx_q = &priv->dma_conf.tx_queue[queue];
4601 txq_stats = &priv->xstats.txq_stats[queue];
4602 first_tx = tx_q->cur_tx;
4603
4604 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4605 stmmac_stop_sw_lpi(priv);
4606
4607 /* Manage oversized TCP frames for GMAC4 device */
4608 if (skb_is_gso(skb) && priv->tso) {
4609 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4610 return stmmac_tso_xmit(skb, dev);
4611 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4612 return stmmac_tso_xmit(skb, dev);
4613 }
4614
4615 if (priv->est && priv->est->enable &&
4616 priv->est->max_sdu[queue] &&
4617 skb->len > priv->est->max_sdu[queue]){
4618 priv->xstats.max_sdu_txq_drop[queue]++;
4619 goto max_sdu_err;
4620 }
4621
4622 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4623 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4624 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4625 queue));
4626 /* This is a hard error, log it. */
4627 netdev_err(priv->dev,
4628 "%s: Tx Ring full when queue awake\n",
4629 __func__);
4630 }
4631 return NETDEV_TX_BUSY;
4632 }
4633
4634 /* Check if VLAN can be inserted by HW */
4635 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4636
4637 entry = tx_q->cur_tx;
4638 first_entry = entry;
4639 WARN_ON(tx_q->tx_skbuff[first_entry]);
4640
4641 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4642 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4643 * queues. In that case, checksum offloading for those queues that don't
4644 * support tx coe needs to fallback to software checksum calculation.
4645 *
4646 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4647 * also have to be checksummed in software.
4648 */
4649 if (csum_insertion &&
4650 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4651 !stmmac_has_ip_ethertype(skb))) {
4652 if (unlikely(skb_checksum_help(skb)))
4653 goto dma_map_err;
4654 csum_insertion = !csum_insertion;
4655 }
4656
4657 if (likely(priv->extend_desc))
4658 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4659 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4660 desc = &tx_q->dma_entx[entry].basic;
4661 else
4662 desc = tx_q->dma_tx + entry;
4663
4664 first = desc;
4665
4666 if (has_vlan)
4667 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4668
4669 enh_desc = priv->plat->enh_desc;
4670 /* To program the descriptors according to the size of the frame */
4671 if (enh_desc)
4672 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4673
4674 if (unlikely(is_jumbo)) {
4675 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4676 if (unlikely(entry < 0) && (entry != -EINVAL))
4677 goto dma_map_err;
4678 }
4679
4680 for (i = 0; i < nfrags; i++) {
4681 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4682 int len = skb_frag_size(frag);
4683 bool last_segment = (i == (nfrags - 1));
4684
4685 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4686 WARN_ON(tx_q->tx_skbuff[entry]);
4687
4688 if (likely(priv->extend_desc))
4689 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4690 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4691 desc = &tx_q->dma_entx[entry].basic;
4692 else
4693 desc = tx_q->dma_tx + entry;
4694
4695 des = skb_frag_dma_map(priv->device, frag, 0, len,
4696 DMA_TO_DEVICE);
4697 if (dma_mapping_error(priv->device, des))
4698 goto dma_map_err; /* should reuse desc w/o issues */
4699
4700 tx_q->tx_skbuff_dma[entry].buf = des;
4701
4702 stmmac_set_desc_addr(priv, desc, des);
4703
4704 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4705 tx_q->tx_skbuff_dma[entry].len = len;
4706 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4707 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4708
4709 /* Prepare the descriptor and set the own bit too */
4710 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4711 priv->mode, 1, last_segment, skb->len);
4712 }
4713
4714 /* Only the last descriptor gets to point to the skb. */
4715 tx_q->tx_skbuff[entry] = skb;
4716 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4717
4718 /* According to the coalesce parameter the IC bit for the latest
4719 * segment is reset and the timer re-started to clean the tx status.
4720 * This approach takes care about the fragments: desc is the first
4721 * element in case of no SG.
4722 */
4723 tx_packets = (entry + 1) - first_tx;
4724 tx_q->tx_count_frames += tx_packets;
4725
4726 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4727 set_ic = true;
4728 else if (!priv->tx_coal_frames[queue])
4729 set_ic = false;
4730 else if (tx_packets > priv->tx_coal_frames[queue])
4731 set_ic = true;
4732 else if ((tx_q->tx_count_frames %
4733 priv->tx_coal_frames[queue]) < tx_packets)
4734 set_ic = true;
4735 else
4736 set_ic = false;
4737
4738 if (set_ic) {
4739 if (likely(priv->extend_desc))
4740 desc = &tx_q->dma_etx[entry].basic;
4741 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4742 desc = &tx_q->dma_entx[entry].basic;
4743 else
4744 desc = &tx_q->dma_tx[entry];
4745
4746 tx_q->tx_count_frames = 0;
4747 stmmac_set_tx_ic(priv, desc);
4748 }
4749
4750 /* We've used all descriptors we need for this skb, however,
4751 * advance cur_tx so that it references a fresh descriptor.
4752 * ndo_start_xmit will fill this descriptor the next time it's
4753 * called and stmmac_tx_clean may clean up to this descriptor.
4754 */
4755 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4756 tx_q->cur_tx = entry;
4757
4758 if (netif_msg_pktdata(priv)) {
4759 netdev_dbg(priv->dev,
4760 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4761 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4762 entry, first, nfrags);
4763
4764 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4765 print_pkt(skb->data, skb->len);
4766 }
4767
4768 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4769 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4770 __func__);
4771 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4772 }
4773
4774 u64_stats_update_begin(&txq_stats->q_syncp);
4775 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4776 if (set_ic)
4777 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4778 u64_stats_update_end(&txq_stats->q_syncp);
4779
4780 if (priv->sarc_type)
4781 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4782
4783 /* Ready to fill the first descriptor and set the OWN bit w/o any
4784 * problems because all the descriptors are actually ready to be
4785 * passed to the DMA engine.
4786 */
4787 if (likely(!is_jumbo)) {
4788 bool last_segment = (nfrags == 0);
4789
4790 des = dma_map_single(priv->device, skb->data,
4791 nopaged_len, DMA_TO_DEVICE);
4792 if (dma_mapping_error(priv->device, des))
4793 goto dma_map_err;
4794
4795 tx_q->tx_skbuff_dma[first_entry].buf = des;
4796 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4797 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4798
4799 stmmac_set_desc_addr(priv, first, des);
4800
4801 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4802 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4803
4804 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4805 priv->hwts_tx_en)) {
4806 /* declare that device is doing timestamping */
4807 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4808 stmmac_enable_tx_timestamp(priv, first);
4809 }
4810
4811 /* Prepare the first descriptor setting the OWN bit too */
4812 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4813 csum_insertion, priv->mode, 0, last_segment,
4814 skb->len);
4815 }
4816
4817 if (tx_q->tbs & STMMAC_TBS_EN) {
4818 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4819
4820 tbs_desc = &tx_q->dma_entx[first_entry];
4821 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4822 }
4823
4824 stmmac_set_tx_owner(priv, first);
4825
4826 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4827
4828 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4829 skb_tx_timestamp(skb);
4830 stmmac_flush_tx_descriptors(priv, queue);
4831 stmmac_tx_timer_arm(priv, queue);
4832
4833 return NETDEV_TX_OK;
4834
4835 dma_map_err:
4836 netdev_err(priv->dev, "Tx DMA map failed\n");
4837 max_sdu_err:
4838 dev_kfree_skb(skb);
4839 priv->xstats.tx_dropped++;
4840 return NETDEV_TX_OK;
4841 }
4842
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4843 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4844 {
4845 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4846 __be16 vlan_proto = veth->h_vlan_proto;
4847 u16 vlanid;
4848
4849 if ((vlan_proto == htons(ETH_P_8021Q) &&
4850 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4851 (vlan_proto == htons(ETH_P_8021AD) &&
4852 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4853 /* pop the vlan tag */
4854 vlanid = ntohs(veth->h_vlan_TCI);
4855 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4856 skb_pull(skb, VLAN_HLEN);
4857 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4858 }
4859 }
4860
4861 /**
4862 * stmmac_rx_refill - refill used skb preallocated buffers
4863 * @priv: driver private structure
4864 * @queue: RX queue index
4865 * Description : this is to reallocate the skb for the reception process
4866 * that is based on zero-copy.
4867 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4868 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4869 {
4870 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4871 int dirty = stmmac_rx_dirty(priv, queue);
4872 unsigned int entry = rx_q->dirty_rx;
4873 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4874
4875 if (priv->dma_cap.host_dma_width <= 32)
4876 gfp |= GFP_DMA32;
4877
4878 while (dirty-- > 0) {
4879 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4880 struct dma_desc *p;
4881 bool use_rx_wd;
4882
4883 if (priv->extend_desc)
4884 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4885 else
4886 p = rx_q->dma_rx + entry;
4887
4888 if (!buf->page) {
4889 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4890 if (!buf->page)
4891 break;
4892 }
4893
4894 if (priv->sph && !buf->sec_page) {
4895 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4896 if (!buf->sec_page)
4897 break;
4898
4899 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4900 }
4901
4902 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4903
4904 stmmac_set_desc_addr(priv, p, buf->addr);
4905 if (priv->sph)
4906 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4907 else
4908 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4909 stmmac_refill_desc3(priv, rx_q, p);
4910
4911 rx_q->rx_count_frames++;
4912 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4913 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4914 rx_q->rx_count_frames = 0;
4915
4916 use_rx_wd = !priv->rx_coal_frames[queue];
4917 use_rx_wd |= rx_q->rx_count_frames > 0;
4918 if (!priv->use_riwt)
4919 use_rx_wd = false;
4920
4921 dma_wmb();
4922 stmmac_set_rx_owner(priv, p, use_rx_wd);
4923
4924 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4925 }
4926 rx_q->dirty_rx = entry;
4927 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4928 (rx_q->dirty_rx * sizeof(struct dma_desc));
4929 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4930 }
4931
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4932 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4933 struct dma_desc *p,
4934 int status, unsigned int len)
4935 {
4936 unsigned int plen = 0, hlen = 0;
4937 int coe = priv->hw->rx_csum;
4938
4939 /* Not first descriptor, buffer is always zero */
4940 if (priv->sph && len)
4941 return 0;
4942
4943 /* First descriptor, get split header length */
4944 stmmac_get_rx_header_len(priv, p, &hlen);
4945 if (priv->sph && hlen) {
4946 priv->xstats.rx_split_hdr_pkt_n++;
4947 return hlen;
4948 }
4949
4950 /* First descriptor, not last descriptor and not split header */
4951 if (status & rx_not_ls)
4952 return priv->dma_conf.dma_buf_sz;
4953
4954 plen = stmmac_get_rx_frame_len(priv, p, coe);
4955
4956 /* First descriptor and last descriptor and not split header */
4957 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4958 }
4959
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4960 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4961 struct dma_desc *p,
4962 int status, unsigned int len)
4963 {
4964 int coe = priv->hw->rx_csum;
4965 unsigned int plen = 0;
4966
4967 /* Not split header, buffer is not available */
4968 if (!priv->sph)
4969 return 0;
4970
4971 /* Not last descriptor */
4972 if (status & rx_not_ls)
4973 return priv->dma_conf.dma_buf_sz;
4974
4975 plen = stmmac_get_rx_frame_len(priv, p, coe);
4976
4977 /* Last descriptor */
4978 return plen - len;
4979 }
4980
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4981 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4982 struct xdp_frame *xdpf, bool dma_map)
4983 {
4984 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4985 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4986 unsigned int entry = tx_q->cur_tx;
4987 struct dma_desc *tx_desc;
4988 dma_addr_t dma_addr;
4989 bool set_ic;
4990
4991 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4992 return STMMAC_XDP_CONSUMED;
4993
4994 if (priv->est && priv->est->enable &&
4995 priv->est->max_sdu[queue] &&
4996 xdpf->len > priv->est->max_sdu[queue]) {
4997 priv->xstats.max_sdu_txq_drop[queue]++;
4998 return STMMAC_XDP_CONSUMED;
4999 }
5000
5001 if (likely(priv->extend_desc))
5002 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5003 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5004 tx_desc = &tx_q->dma_entx[entry].basic;
5005 else
5006 tx_desc = tx_q->dma_tx + entry;
5007
5008 if (dma_map) {
5009 dma_addr = dma_map_single(priv->device, xdpf->data,
5010 xdpf->len, DMA_TO_DEVICE);
5011 if (dma_mapping_error(priv->device, dma_addr))
5012 return STMMAC_XDP_CONSUMED;
5013
5014 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5015 } else {
5016 struct page *page = virt_to_page(xdpf->data);
5017
5018 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5019 xdpf->headroom;
5020 dma_sync_single_for_device(priv->device, dma_addr,
5021 xdpf->len, DMA_BIDIRECTIONAL);
5022
5023 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5024 }
5025
5026 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5027 tx_q->tx_skbuff_dma[entry].map_as_page = false;
5028 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5029 tx_q->tx_skbuff_dma[entry].last_segment = true;
5030 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5031
5032 tx_q->xdpf[entry] = xdpf;
5033
5034 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5035
5036 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5037 true, priv->mode, true, true,
5038 xdpf->len);
5039
5040 tx_q->tx_count_frames++;
5041
5042 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5043 set_ic = true;
5044 else
5045 set_ic = false;
5046
5047 if (set_ic) {
5048 tx_q->tx_count_frames = 0;
5049 stmmac_set_tx_ic(priv, tx_desc);
5050 u64_stats_update_begin(&txq_stats->q_syncp);
5051 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5052 u64_stats_update_end(&txq_stats->q_syncp);
5053 }
5054
5055 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5056
5057 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5058 tx_q->cur_tx = entry;
5059
5060 return STMMAC_XDP_TX;
5061 }
5062
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5063 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5064 int cpu)
5065 {
5066 int index = cpu;
5067
5068 if (unlikely(index < 0))
5069 index = 0;
5070
5071 while (index >= priv->plat->tx_queues_to_use)
5072 index -= priv->plat->tx_queues_to_use;
5073
5074 return index;
5075 }
5076
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5077 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5078 struct xdp_buff *xdp)
5079 {
5080 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5081 int cpu = smp_processor_id();
5082 struct netdev_queue *nq;
5083 int queue;
5084 int res;
5085
5086 if (unlikely(!xdpf))
5087 return STMMAC_XDP_CONSUMED;
5088
5089 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5090 nq = netdev_get_tx_queue(priv->dev, queue);
5091
5092 __netif_tx_lock(nq, cpu);
5093 /* Avoids TX time-out as we are sharing with slow path */
5094 txq_trans_cond_update(nq);
5095
5096 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5097 if (res == STMMAC_XDP_TX)
5098 stmmac_flush_tx_descriptors(priv, queue);
5099
5100 __netif_tx_unlock(nq);
5101
5102 return res;
5103 }
5104
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5105 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5106 struct bpf_prog *prog,
5107 struct xdp_buff *xdp)
5108 {
5109 u32 act;
5110 int res;
5111
5112 act = bpf_prog_run_xdp(prog, xdp);
5113 switch (act) {
5114 case XDP_PASS:
5115 res = STMMAC_XDP_PASS;
5116 break;
5117 case XDP_TX:
5118 res = stmmac_xdp_xmit_back(priv, xdp);
5119 break;
5120 case XDP_REDIRECT:
5121 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5122 res = STMMAC_XDP_CONSUMED;
5123 else
5124 res = STMMAC_XDP_REDIRECT;
5125 break;
5126 default:
5127 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5128 fallthrough;
5129 case XDP_ABORTED:
5130 trace_xdp_exception(priv->dev, prog, act);
5131 fallthrough;
5132 case XDP_DROP:
5133 res = STMMAC_XDP_CONSUMED;
5134 break;
5135 }
5136
5137 return res;
5138 }
5139
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5140 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5141 struct xdp_buff *xdp)
5142 {
5143 struct bpf_prog *prog;
5144 int res;
5145
5146 prog = READ_ONCE(priv->xdp_prog);
5147 if (!prog) {
5148 res = STMMAC_XDP_PASS;
5149 goto out;
5150 }
5151
5152 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5153 out:
5154 return ERR_PTR(-res);
5155 }
5156
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5157 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5158 int xdp_status)
5159 {
5160 int cpu = smp_processor_id();
5161 int queue;
5162
5163 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5164
5165 if (xdp_status & STMMAC_XDP_TX)
5166 stmmac_tx_timer_arm(priv, queue);
5167
5168 if (xdp_status & STMMAC_XDP_REDIRECT)
5169 xdp_do_flush();
5170 }
5171
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5172 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5173 struct xdp_buff *xdp)
5174 {
5175 unsigned int metasize = xdp->data - xdp->data_meta;
5176 unsigned int datasize = xdp->data_end - xdp->data;
5177 struct sk_buff *skb;
5178
5179 skb = napi_alloc_skb(&ch->rxtx_napi,
5180 xdp->data_end - xdp->data_hard_start);
5181 if (unlikely(!skb))
5182 return NULL;
5183
5184 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5185 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5186 if (metasize)
5187 skb_metadata_set(skb, metasize);
5188
5189 return skb;
5190 }
5191
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5192 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5193 struct dma_desc *p, struct dma_desc *np,
5194 struct xdp_buff *xdp)
5195 {
5196 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5197 struct stmmac_channel *ch = &priv->channel[queue];
5198 unsigned int len = xdp->data_end - xdp->data;
5199 enum pkt_hash_types hash_type;
5200 int coe = priv->hw->rx_csum;
5201 struct sk_buff *skb;
5202 u32 hash;
5203
5204 skb = stmmac_construct_skb_zc(ch, xdp);
5205 if (!skb) {
5206 priv->xstats.rx_dropped++;
5207 return;
5208 }
5209
5210 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5211 if (priv->hw->hw_vlan_en)
5212 /* MAC level stripping. */
5213 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5214 else
5215 /* Driver level stripping. */
5216 stmmac_rx_vlan(priv->dev, skb);
5217 skb->protocol = eth_type_trans(skb, priv->dev);
5218
5219 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5220 skb_checksum_none_assert(skb);
5221 else
5222 skb->ip_summed = CHECKSUM_UNNECESSARY;
5223
5224 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5225 skb_set_hash(skb, hash, hash_type);
5226
5227 skb_record_rx_queue(skb, queue);
5228 napi_gro_receive(&ch->rxtx_napi, skb);
5229
5230 u64_stats_update_begin(&rxq_stats->napi_syncp);
5231 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5232 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5233 u64_stats_update_end(&rxq_stats->napi_syncp);
5234 }
5235
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5236 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5237 {
5238 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5239 unsigned int entry = rx_q->dirty_rx;
5240 struct dma_desc *rx_desc = NULL;
5241 bool ret = true;
5242
5243 budget = min(budget, stmmac_rx_dirty(priv, queue));
5244
5245 while (budget-- > 0 && entry != rx_q->cur_rx) {
5246 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5247 dma_addr_t dma_addr;
5248 bool use_rx_wd;
5249
5250 if (!buf->xdp) {
5251 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5252 if (!buf->xdp) {
5253 ret = false;
5254 break;
5255 }
5256 }
5257
5258 if (priv->extend_desc)
5259 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5260 else
5261 rx_desc = rx_q->dma_rx + entry;
5262
5263 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5264 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5265 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5266 stmmac_refill_desc3(priv, rx_q, rx_desc);
5267
5268 rx_q->rx_count_frames++;
5269 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5270 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5271 rx_q->rx_count_frames = 0;
5272
5273 use_rx_wd = !priv->rx_coal_frames[queue];
5274 use_rx_wd |= rx_q->rx_count_frames > 0;
5275 if (!priv->use_riwt)
5276 use_rx_wd = false;
5277
5278 dma_wmb();
5279 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5280
5281 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5282 }
5283
5284 if (rx_desc) {
5285 rx_q->dirty_rx = entry;
5286 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5287 (rx_q->dirty_rx * sizeof(struct dma_desc));
5288 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5289 }
5290
5291 return ret;
5292 }
5293
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5294 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5295 {
5296 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5297 * to represent incoming packet, whereas cb field in the same structure
5298 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5299 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5300 */
5301 return (struct stmmac_xdp_buff *)xdp;
5302 }
5303
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5304 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5305 {
5306 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5307 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5308 unsigned int count = 0, error = 0, len = 0;
5309 int dirty = stmmac_rx_dirty(priv, queue);
5310 unsigned int next_entry = rx_q->cur_rx;
5311 u32 rx_errors = 0, rx_dropped = 0;
5312 unsigned int desc_size;
5313 struct bpf_prog *prog;
5314 bool failure = false;
5315 int xdp_status = 0;
5316 int status = 0;
5317
5318 if (netif_msg_rx_status(priv)) {
5319 void *rx_head;
5320
5321 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5322 if (priv->extend_desc) {
5323 rx_head = (void *)rx_q->dma_erx;
5324 desc_size = sizeof(struct dma_extended_desc);
5325 } else {
5326 rx_head = (void *)rx_q->dma_rx;
5327 desc_size = sizeof(struct dma_desc);
5328 }
5329
5330 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5331 rx_q->dma_rx_phy, desc_size);
5332 }
5333 while (count < limit) {
5334 struct stmmac_rx_buffer *buf;
5335 struct stmmac_xdp_buff *ctx;
5336 unsigned int buf1_len = 0;
5337 struct dma_desc *np, *p;
5338 int entry;
5339 int res;
5340
5341 if (!count && rx_q->state_saved) {
5342 error = rx_q->state.error;
5343 len = rx_q->state.len;
5344 } else {
5345 rx_q->state_saved = false;
5346 error = 0;
5347 len = 0;
5348 }
5349
5350 if (count >= limit)
5351 break;
5352
5353 read_again:
5354 buf1_len = 0;
5355 entry = next_entry;
5356 buf = &rx_q->buf_pool[entry];
5357
5358 if (dirty >= STMMAC_RX_FILL_BATCH) {
5359 failure = failure ||
5360 !stmmac_rx_refill_zc(priv, queue, dirty);
5361 dirty = 0;
5362 }
5363
5364 if (priv->extend_desc)
5365 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5366 else
5367 p = rx_q->dma_rx + entry;
5368
5369 /* read the status of the incoming frame */
5370 status = stmmac_rx_status(priv, &priv->xstats, p);
5371 /* check if managed by the DMA otherwise go ahead */
5372 if (unlikely(status & dma_own))
5373 break;
5374
5375 /* Prefetch the next RX descriptor */
5376 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5377 priv->dma_conf.dma_rx_size);
5378 next_entry = rx_q->cur_rx;
5379
5380 if (priv->extend_desc)
5381 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5382 else
5383 np = rx_q->dma_rx + next_entry;
5384
5385 prefetch(np);
5386
5387 /* Ensure a valid XSK buffer before proceed */
5388 if (!buf->xdp)
5389 break;
5390
5391 if (priv->extend_desc)
5392 stmmac_rx_extended_status(priv, &priv->xstats,
5393 rx_q->dma_erx + entry);
5394 if (unlikely(status == discard_frame)) {
5395 xsk_buff_free(buf->xdp);
5396 buf->xdp = NULL;
5397 dirty++;
5398 error = 1;
5399 if (!priv->hwts_rx_en)
5400 rx_errors++;
5401 }
5402
5403 if (unlikely(error && (status & rx_not_ls)))
5404 goto read_again;
5405 if (unlikely(error)) {
5406 count++;
5407 continue;
5408 }
5409
5410 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5411 if (likely(status & rx_not_ls)) {
5412 xsk_buff_free(buf->xdp);
5413 buf->xdp = NULL;
5414 dirty++;
5415 count++;
5416 goto read_again;
5417 }
5418
5419 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5420 ctx->priv = priv;
5421 ctx->desc = p;
5422 ctx->ndesc = np;
5423
5424 /* XDP ZC Frame only support primary buffers for now */
5425 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5426 len += buf1_len;
5427
5428 /* ACS is disabled; strip manually. */
5429 if (likely(!(status & rx_not_ls))) {
5430 buf1_len -= ETH_FCS_LEN;
5431 len -= ETH_FCS_LEN;
5432 }
5433
5434 /* RX buffer is good and fit into a XSK pool buffer */
5435 buf->xdp->data_end = buf->xdp->data + buf1_len;
5436 xsk_buff_dma_sync_for_cpu(buf->xdp);
5437
5438 prog = READ_ONCE(priv->xdp_prog);
5439 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5440
5441 switch (res) {
5442 case STMMAC_XDP_PASS:
5443 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5444 xsk_buff_free(buf->xdp);
5445 break;
5446 case STMMAC_XDP_CONSUMED:
5447 xsk_buff_free(buf->xdp);
5448 rx_dropped++;
5449 break;
5450 case STMMAC_XDP_TX:
5451 case STMMAC_XDP_REDIRECT:
5452 xdp_status |= res;
5453 break;
5454 }
5455
5456 buf->xdp = NULL;
5457 dirty++;
5458 count++;
5459 }
5460
5461 if (status & rx_not_ls) {
5462 rx_q->state_saved = true;
5463 rx_q->state.error = error;
5464 rx_q->state.len = len;
5465 }
5466
5467 stmmac_finalize_xdp_rx(priv, xdp_status);
5468
5469 u64_stats_update_begin(&rxq_stats->napi_syncp);
5470 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5471 u64_stats_update_end(&rxq_stats->napi_syncp);
5472
5473 priv->xstats.rx_dropped += rx_dropped;
5474 priv->xstats.rx_errors += rx_errors;
5475
5476 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5477 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5478 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5479 else
5480 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5481
5482 return (int)count;
5483 }
5484
5485 return failure ? limit : (int)count;
5486 }
5487
5488 /**
5489 * stmmac_rx - manage the receive process
5490 * @priv: driver private structure
5491 * @limit: napi bugget
5492 * @queue: RX queue index.
5493 * Description : this the function called by the napi poll method.
5494 * It gets all the frames inside the ring.
5495 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5496 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5497 {
5498 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5499 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5500 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5501 struct stmmac_channel *ch = &priv->channel[queue];
5502 unsigned int count = 0, error = 0, len = 0;
5503 int status = 0, coe = priv->hw->rx_csum;
5504 unsigned int next_entry = rx_q->cur_rx;
5505 enum dma_data_direction dma_dir;
5506 unsigned int desc_size;
5507 struct sk_buff *skb = NULL;
5508 struct stmmac_xdp_buff ctx;
5509 int xdp_status = 0;
5510 int bufsz;
5511
5512 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5513 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5514 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5515
5516 if (netif_msg_rx_status(priv)) {
5517 void *rx_head;
5518
5519 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5520 if (priv->extend_desc) {
5521 rx_head = (void *)rx_q->dma_erx;
5522 desc_size = sizeof(struct dma_extended_desc);
5523 } else {
5524 rx_head = (void *)rx_q->dma_rx;
5525 desc_size = sizeof(struct dma_desc);
5526 }
5527
5528 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5529 rx_q->dma_rx_phy, desc_size);
5530 }
5531 while (count < limit) {
5532 unsigned int buf1_len = 0, buf2_len = 0;
5533 enum pkt_hash_types hash_type;
5534 struct stmmac_rx_buffer *buf;
5535 struct dma_desc *np, *p;
5536 int entry;
5537 u32 hash;
5538
5539 if (!count && rx_q->state_saved) {
5540 skb = rx_q->state.skb;
5541 error = rx_q->state.error;
5542 len = rx_q->state.len;
5543 } else {
5544 rx_q->state_saved = false;
5545 skb = NULL;
5546 error = 0;
5547 len = 0;
5548 }
5549
5550 read_again:
5551 if (count >= limit)
5552 break;
5553
5554 buf1_len = 0;
5555 buf2_len = 0;
5556 entry = next_entry;
5557 buf = &rx_q->buf_pool[entry];
5558
5559 if (priv->extend_desc)
5560 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5561 else
5562 p = rx_q->dma_rx + entry;
5563
5564 /* read the status of the incoming frame */
5565 status = stmmac_rx_status(priv, &priv->xstats, p);
5566 /* check if managed by the DMA otherwise go ahead */
5567 if (unlikely(status & dma_own))
5568 break;
5569
5570 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5571 priv->dma_conf.dma_rx_size);
5572 next_entry = rx_q->cur_rx;
5573
5574 if (priv->extend_desc)
5575 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5576 else
5577 np = rx_q->dma_rx + next_entry;
5578
5579 prefetch(np);
5580
5581 if (priv->extend_desc)
5582 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5583 if (unlikely(status == discard_frame)) {
5584 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5585 buf->page = NULL;
5586 error = 1;
5587 if (!priv->hwts_rx_en)
5588 rx_errors++;
5589 }
5590
5591 if (unlikely(error && (status & rx_not_ls)))
5592 goto read_again;
5593 if (unlikely(error)) {
5594 dev_kfree_skb(skb);
5595 skb = NULL;
5596 count++;
5597 continue;
5598 }
5599
5600 /* Buffer is good. Go on. */
5601
5602 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5603 len += buf1_len;
5604 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5605 len += buf2_len;
5606
5607 /* ACS is disabled; strip manually. */
5608 if (likely(!(status & rx_not_ls))) {
5609 if (buf2_len) {
5610 buf2_len -= ETH_FCS_LEN;
5611 len -= ETH_FCS_LEN;
5612 } else if (buf1_len) {
5613 buf1_len -= ETH_FCS_LEN;
5614 len -= ETH_FCS_LEN;
5615 }
5616 }
5617
5618 if (!skb) {
5619 unsigned int pre_len, sync_len;
5620
5621 dma_sync_single_for_cpu(priv->device, buf->addr,
5622 buf1_len, dma_dir);
5623 net_prefetch(page_address(buf->page) +
5624 buf->page_offset);
5625
5626 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5627 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5628 buf->page_offset, buf1_len, true);
5629
5630 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5631 buf->page_offset;
5632
5633 ctx.priv = priv;
5634 ctx.desc = p;
5635 ctx.ndesc = np;
5636
5637 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5638 /* Due xdp_adjust_tail: DMA sync for_device
5639 * cover max len CPU touch
5640 */
5641 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5642 buf->page_offset;
5643 sync_len = max(sync_len, pre_len);
5644
5645 /* For Not XDP_PASS verdict */
5646 if (IS_ERR(skb)) {
5647 unsigned int xdp_res = -PTR_ERR(skb);
5648
5649 if (xdp_res & STMMAC_XDP_CONSUMED) {
5650 page_pool_put_page(rx_q->page_pool,
5651 virt_to_head_page(ctx.xdp.data),
5652 sync_len, true);
5653 buf->page = NULL;
5654 rx_dropped++;
5655
5656 /* Clear skb as it was set as
5657 * status by XDP program.
5658 */
5659 skb = NULL;
5660
5661 if (unlikely((status & rx_not_ls)))
5662 goto read_again;
5663
5664 count++;
5665 continue;
5666 } else if (xdp_res & (STMMAC_XDP_TX |
5667 STMMAC_XDP_REDIRECT)) {
5668 xdp_status |= xdp_res;
5669 buf->page = NULL;
5670 skb = NULL;
5671 count++;
5672 continue;
5673 }
5674 }
5675 }
5676
5677 if (!skb) {
5678 unsigned int head_pad_len;
5679
5680 /* XDP program may expand or reduce tail */
5681 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5682
5683 skb = napi_build_skb(page_address(buf->page),
5684 rx_q->napi_skb_frag_size);
5685 if (!skb) {
5686 page_pool_recycle_direct(rx_q->page_pool,
5687 buf->page);
5688 rx_dropped++;
5689 count++;
5690 goto drain_data;
5691 }
5692
5693 /* XDP program may adjust header */
5694 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5695 skb_reserve(skb, head_pad_len);
5696 skb_put(skb, buf1_len);
5697 skb_mark_for_recycle(skb);
5698 buf->page = NULL;
5699 } else if (buf1_len) {
5700 dma_sync_single_for_cpu(priv->device, buf->addr,
5701 buf1_len, dma_dir);
5702 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5703 buf->page, buf->page_offset, buf1_len,
5704 priv->dma_conf.dma_buf_sz);
5705 buf->page = NULL;
5706 }
5707
5708 if (buf2_len) {
5709 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5710 buf2_len, dma_dir);
5711 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5712 buf->sec_page, 0, buf2_len,
5713 priv->dma_conf.dma_buf_sz);
5714 buf->sec_page = NULL;
5715 }
5716
5717 drain_data:
5718 if (likely(status & rx_not_ls))
5719 goto read_again;
5720 if (!skb)
5721 continue;
5722
5723 /* Got entire packet into SKB. Finish it. */
5724
5725 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5726
5727 if (priv->hw->hw_vlan_en)
5728 /* MAC level stripping. */
5729 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5730 else
5731 /* Driver level stripping. */
5732 stmmac_rx_vlan(priv->dev, skb);
5733
5734 skb->protocol = eth_type_trans(skb, priv->dev);
5735
5736 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5737 skb_checksum_none_assert(skb);
5738 else
5739 skb->ip_summed = CHECKSUM_UNNECESSARY;
5740
5741 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5742 skb_set_hash(skb, hash, hash_type);
5743
5744 skb_record_rx_queue(skb, queue);
5745 napi_gro_receive(&ch->rx_napi, skb);
5746 skb = NULL;
5747
5748 rx_packets++;
5749 rx_bytes += len;
5750 count++;
5751 }
5752
5753 if (status & rx_not_ls || skb) {
5754 rx_q->state_saved = true;
5755 rx_q->state.skb = skb;
5756 rx_q->state.error = error;
5757 rx_q->state.len = len;
5758 }
5759
5760 stmmac_finalize_xdp_rx(priv, xdp_status);
5761
5762 stmmac_rx_refill(priv, queue);
5763
5764 u64_stats_update_begin(&rxq_stats->napi_syncp);
5765 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5766 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5767 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5768 u64_stats_update_end(&rxq_stats->napi_syncp);
5769
5770 priv->xstats.rx_dropped += rx_dropped;
5771 priv->xstats.rx_errors += rx_errors;
5772
5773 return count;
5774 }
5775
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5776 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5777 {
5778 struct stmmac_channel *ch =
5779 container_of(napi, struct stmmac_channel, rx_napi);
5780 struct stmmac_priv *priv = ch->priv_data;
5781 struct stmmac_rxq_stats *rxq_stats;
5782 u32 chan = ch->index;
5783 int work_done;
5784
5785 rxq_stats = &priv->xstats.rxq_stats[chan];
5786 u64_stats_update_begin(&rxq_stats->napi_syncp);
5787 u64_stats_inc(&rxq_stats->napi.poll);
5788 u64_stats_update_end(&rxq_stats->napi_syncp);
5789
5790 work_done = stmmac_rx(priv, budget, chan);
5791 if (work_done < budget && napi_complete_done(napi, work_done)) {
5792 unsigned long flags;
5793
5794 spin_lock_irqsave(&ch->lock, flags);
5795 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5796 spin_unlock_irqrestore(&ch->lock, flags);
5797 }
5798
5799 return work_done;
5800 }
5801
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5802 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5803 {
5804 struct stmmac_channel *ch =
5805 container_of(napi, struct stmmac_channel, tx_napi);
5806 struct stmmac_priv *priv = ch->priv_data;
5807 struct stmmac_txq_stats *txq_stats;
5808 bool pending_packets = false;
5809 u32 chan = ch->index;
5810 int work_done;
5811
5812 txq_stats = &priv->xstats.txq_stats[chan];
5813 u64_stats_update_begin(&txq_stats->napi_syncp);
5814 u64_stats_inc(&txq_stats->napi.poll);
5815 u64_stats_update_end(&txq_stats->napi_syncp);
5816
5817 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5818 work_done = min(work_done, budget);
5819
5820 if (work_done < budget && napi_complete_done(napi, work_done)) {
5821 unsigned long flags;
5822
5823 spin_lock_irqsave(&ch->lock, flags);
5824 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5825 spin_unlock_irqrestore(&ch->lock, flags);
5826 }
5827
5828 /* TX still have packet to handle, check if we need to arm tx timer */
5829 if (pending_packets)
5830 stmmac_tx_timer_arm(priv, chan);
5831
5832 return work_done;
5833 }
5834
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5835 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5836 {
5837 struct stmmac_channel *ch =
5838 container_of(napi, struct stmmac_channel, rxtx_napi);
5839 struct stmmac_priv *priv = ch->priv_data;
5840 bool tx_pending_packets = false;
5841 int rx_done, tx_done, rxtx_done;
5842 struct stmmac_rxq_stats *rxq_stats;
5843 struct stmmac_txq_stats *txq_stats;
5844 u32 chan = ch->index;
5845
5846 rxq_stats = &priv->xstats.rxq_stats[chan];
5847 u64_stats_update_begin(&rxq_stats->napi_syncp);
5848 u64_stats_inc(&rxq_stats->napi.poll);
5849 u64_stats_update_end(&rxq_stats->napi_syncp);
5850
5851 txq_stats = &priv->xstats.txq_stats[chan];
5852 u64_stats_update_begin(&txq_stats->napi_syncp);
5853 u64_stats_inc(&txq_stats->napi.poll);
5854 u64_stats_update_end(&txq_stats->napi_syncp);
5855
5856 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5857 tx_done = min(tx_done, budget);
5858
5859 rx_done = stmmac_rx_zc(priv, budget, chan);
5860
5861 rxtx_done = max(tx_done, rx_done);
5862
5863 /* If either TX or RX work is not complete, return budget
5864 * and keep pooling
5865 */
5866 if (rxtx_done >= budget)
5867 return budget;
5868
5869 /* all work done, exit the polling mode */
5870 if (napi_complete_done(napi, rxtx_done)) {
5871 unsigned long flags;
5872
5873 spin_lock_irqsave(&ch->lock, flags);
5874 /* Both RX and TX work done are compelte,
5875 * so enable both RX & TX IRQs.
5876 */
5877 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5878 spin_unlock_irqrestore(&ch->lock, flags);
5879 }
5880
5881 /* TX still have packet to handle, check if we need to arm tx timer */
5882 if (tx_pending_packets)
5883 stmmac_tx_timer_arm(priv, chan);
5884
5885 return min(rxtx_done, budget - 1);
5886 }
5887
5888 /**
5889 * stmmac_tx_timeout
5890 * @dev : Pointer to net device structure
5891 * @txqueue: the index of the hanging transmit queue
5892 * Description: this function is called when a packet transmission fails to
5893 * complete within a reasonable time. The driver will mark the error in the
5894 * netdev structure and arrange for the device to be reset to a sane state
5895 * in order to transmit a new packet.
5896 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5897 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5898 {
5899 struct stmmac_priv *priv = netdev_priv(dev);
5900
5901 stmmac_global_err(priv);
5902 }
5903
5904 /**
5905 * stmmac_set_rx_mode - entry point for multicast addressing
5906 * @dev : pointer to the device structure
5907 * Description:
5908 * This function is a driver entry point which gets called by the kernel
5909 * whenever multicast addresses must be enabled/disabled.
5910 * Return value:
5911 * void.
5912 *
5913 * FIXME: This may need RXC to be running, but it may be called with BH
5914 * disabled, which means we can't call phylink_rx_clk_stop*().
5915 */
stmmac_set_rx_mode(struct net_device * dev)5916 static void stmmac_set_rx_mode(struct net_device *dev)
5917 {
5918 struct stmmac_priv *priv = netdev_priv(dev);
5919
5920 stmmac_set_filter(priv, priv->hw, dev);
5921 }
5922
5923 /**
5924 * stmmac_change_mtu - entry point to change MTU size for the device.
5925 * @dev : device pointer.
5926 * @new_mtu : the new MTU size for the device.
5927 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5928 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5929 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5930 * Return value:
5931 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5932 * file on failure.
5933 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5934 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5935 {
5936 struct stmmac_priv *priv = netdev_priv(dev);
5937 int txfifosz = priv->plat->tx_fifo_size;
5938 struct stmmac_dma_conf *dma_conf;
5939 const int mtu = new_mtu;
5940 int ret;
5941
5942 if (txfifosz == 0)
5943 txfifosz = priv->dma_cap.tx_fifo_size;
5944
5945 txfifosz /= priv->plat->tx_queues_to_use;
5946
5947 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5948 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5949 return -EINVAL;
5950 }
5951
5952 new_mtu = STMMAC_ALIGN(new_mtu);
5953
5954 /* If condition true, FIFO is too small or MTU too large */
5955 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5956 return -EINVAL;
5957
5958 if (netif_running(dev)) {
5959 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5960 /* Try to allocate the new DMA conf with the new mtu */
5961 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5962 if (IS_ERR(dma_conf)) {
5963 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5964 mtu);
5965 return PTR_ERR(dma_conf);
5966 }
5967
5968 stmmac_release(dev);
5969
5970 ret = __stmmac_open(dev, dma_conf);
5971 if (ret) {
5972 free_dma_desc_resources(priv, dma_conf);
5973 kfree(dma_conf);
5974 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5975 return ret;
5976 }
5977
5978 kfree(dma_conf);
5979
5980 stmmac_set_rx_mode(dev);
5981 }
5982
5983 WRITE_ONCE(dev->mtu, mtu);
5984 netdev_update_features(dev);
5985
5986 return 0;
5987 }
5988
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5989 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5990 netdev_features_t features)
5991 {
5992 struct stmmac_priv *priv = netdev_priv(dev);
5993
5994 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5995 features &= ~NETIF_F_RXCSUM;
5996
5997 if (!priv->plat->tx_coe)
5998 features &= ~NETIF_F_CSUM_MASK;
5999
6000 /* Some GMAC devices have a bugged Jumbo frame support that
6001 * needs to have the Tx COE disabled for oversized frames
6002 * (due to limited buffer sizes). In this case we disable
6003 * the TX csum insertion in the TDES and not use SF.
6004 */
6005 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6006 features &= ~NETIF_F_CSUM_MASK;
6007
6008 /* Disable tso if asked by ethtool */
6009 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6010 if (features & NETIF_F_TSO)
6011 priv->tso = true;
6012 else
6013 priv->tso = false;
6014 }
6015
6016 return features;
6017 }
6018
stmmac_set_features(struct net_device * netdev,netdev_features_t features)6019 static int stmmac_set_features(struct net_device *netdev,
6020 netdev_features_t features)
6021 {
6022 struct stmmac_priv *priv = netdev_priv(netdev);
6023
6024 /* Keep the COE Type in case of csum is supporting */
6025 if (features & NETIF_F_RXCSUM)
6026 priv->hw->rx_csum = priv->plat->rx_coe;
6027 else
6028 priv->hw->rx_csum = 0;
6029 /* No check needed because rx_coe has been set before and it will be
6030 * fixed in case of issue.
6031 */
6032 stmmac_rx_ipc(priv, priv->hw);
6033
6034 if (priv->sph_cap) {
6035 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6036 u32 chan;
6037
6038 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6039 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6040 }
6041
6042 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6043 priv->hw->hw_vlan_en = true;
6044 else
6045 priv->hw->hw_vlan_en = false;
6046
6047 phylink_rx_clk_stop_block(priv->phylink);
6048 stmmac_set_hw_vlan_mode(priv, priv->hw);
6049 phylink_rx_clk_stop_unblock(priv->phylink);
6050
6051 return 0;
6052 }
6053
stmmac_common_interrupt(struct stmmac_priv * priv)6054 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6055 {
6056 u32 rx_cnt = priv->plat->rx_queues_to_use;
6057 u32 tx_cnt = priv->plat->tx_queues_to_use;
6058 u32 queues_count;
6059 u32 queue;
6060 bool xmac;
6061
6062 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
6063 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6064
6065 if (priv->irq_wake)
6066 pm_wakeup_event(priv->device, 0);
6067
6068 if (priv->dma_cap.estsel)
6069 stmmac_est_irq_status(priv, priv, priv->dev,
6070 &priv->xstats, tx_cnt);
6071
6072 if (stmmac_fpe_supported(priv))
6073 stmmac_fpe_irq_status(priv);
6074
6075 /* To handle GMAC own interrupts */
6076 if ((priv->plat->has_gmac) || xmac) {
6077 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6078
6079 if (unlikely(status)) {
6080 /* For LPI we need to save the tx status */
6081 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6082 priv->tx_path_in_lpi_mode = true;
6083 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6084 priv->tx_path_in_lpi_mode = false;
6085 }
6086
6087 for (queue = 0; queue < queues_count; queue++)
6088 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6089
6090 /* PCS link status */
6091 if (priv->hw->pcs &&
6092 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6093 if (priv->xstats.pcs_link)
6094 netif_carrier_on(priv->dev);
6095 else
6096 netif_carrier_off(priv->dev);
6097 }
6098
6099 stmmac_timestamp_interrupt(priv, priv);
6100 }
6101 }
6102
6103 /**
6104 * stmmac_interrupt - main ISR
6105 * @irq: interrupt number.
6106 * @dev_id: to pass the net device pointer.
6107 * Description: this is the main driver interrupt service routine.
6108 * It can call:
6109 * o DMA service routine (to manage incoming frame reception and transmission
6110 * status)
6111 * o Core interrupts to manage: remote wake-up, management counter, LPI
6112 * interrupts.
6113 */
stmmac_interrupt(int irq,void * dev_id)6114 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6115 {
6116 struct net_device *dev = (struct net_device *)dev_id;
6117 struct stmmac_priv *priv = netdev_priv(dev);
6118
6119 /* Check if adapter is up */
6120 if (test_bit(STMMAC_DOWN, &priv->state))
6121 return IRQ_HANDLED;
6122
6123 /* Check ASP error if it isn't delivered via an individual IRQ */
6124 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6125 return IRQ_HANDLED;
6126
6127 /* To handle Common interrupts */
6128 stmmac_common_interrupt(priv);
6129
6130 /* To handle DMA interrupts */
6131 stmmac_dma_interrupt(priv);
6132
6133 return IRQ_HANDLED;
6134 }
6135
stmmac_mac_interrupt(int irq,void * dev_id)6136 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6137 {
6138 struct net_device *dev = (struct net_device *)dev_id;
6139 struct stmmac_priv *priv = netdev_priv(dev);
6140
6141 /* Check if adapter is up */
6142 if (test_bit(STMMAC_DOWN, &priv->state))
6143 return IRQ_HANDLED;
6144
6145 /* To handle Common interrupts */
6146 stmmac_common_interrupt(priv);
6147
6148 return IRQ_HANDLED;
6149 }
6150
stmmac_safety_interrupt(int irq,void * dev_id)6151 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6152 {
6153 struct net_device *dev = (struct net_device *)dev_id;
6154 struct stmmac_priv *priv = netdev_priv(dev);
6155
6156 /* Check if adapter is up */
6157 if (test_bit(STMMAC_DOWN, &priv->state))
6158 return IRQ_HANDLED;
6159
6160 /* Check if a fatal error happened */
6161 stmmac_safety_feat_interrupt(priv);
6162
6163 return IRQ_HANDLED;
6164 }
6165
stmmac_msi_intr_tx(int irq,void * data)6166 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6167 {
6168 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6169 struct stmmac_dma_conf *dma_conf;
6170 int chan = tx_q->queue_index;
6171 struct stmmac_priv *priv;
6172 int status;
6173
6174 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6175 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6176
6177 /* Check if adapter is up */
6178 if (test_bit(STMMAC_DOWN, &priv->state))
6179 return IRQ_HANDLED;
6180
6181 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6182
6183 if (unlikely(status & tx_hard_error_bump_tc)) {
6184 /* Try to bump up the dma threshold on this failure */
6185 stmmac_bump_dma_threshold(priv, chan);
6186 } else if (unlikely(status == tx_hard_error)) {
6187 stmmac_tx_err(priv, chan);
6188 }
6189
6190 return IRQ_HANDLED;
6191 }
6192
stmmac_msi_intr_rx(int irq,void * data)6193 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6194 {
6195 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6196 struct stmmac_dma_conf *dma_conf;
6197 int chan = rx_q->queue_index;
6198 struct stmmac_priv *priv;
6199
6200 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6201 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6202
6203 /* Check if adapter is up */
6204 if (test_bit(STMMAC_DOWN, &priv->state))
6205 return IRQ_HANDLED;
6206
6207 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6208
6209 return IRQ_HANDLED;
6210 }
6211
6212 /**
6213 * stmmac_ioctl - Entry point for the Ioctl
6214 * @dev: Device pointer.
6215 * @rq: An IOCTL specefic structure, that can contain a pointer to
6216 * a proprietary structure used to pass information to the driver.
6217 * @cmd: IOCTL command
6218 * Description:
6219 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6220 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6221 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6222 {
6223 struct stmmac_priv *priv = netdev_priv (dev);
6224 int ret = -EOPNOTSUPP;
6225
6226 if (!netif_running(dev))
6227 return -EINVAL;
6228
6229 switch (cmd) {
6230 case SIOCGMIIPHY:
6231 case SIOCGMIIREG:
6232 case SIOCSMIIREG:
6233 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6234 break;
6235 default:
6236 break;
6237 }
6238
6239 return ret;
6240 }
6241
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6242 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6243 void *cb_priv)
6244 {
6245 struct stmmac_priv *priv = cb_priv;
6246 int ret = -EOPNOTSUPP;
6247
6248 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6249 return ret;
6250
6251 __stmmac_disable_all_queues(priv);
6252
6253 switch (type) {
6254 case TC_SETUP_CLSU32:
6255 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6256 break;
6257 case TC_SETUP_CLSFLOWER:
6258 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6259 break;
6260 default:
6261 break;
6262 }
6263
6264 stmmac_enable_all_queues(priv);
6265 return ret;
6266 }
6267
6268 static LIST_HEAD(stmmac_block_cb_list);
6269
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6270 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6271 void *type_data)
6272 {
6273 struct stmmac_priv *priv = netdev_priv(ndev);
6274
6275 switch (type) {
6276 case TC_QUERY_CAPS:
6277 return stmmac_tc_query_caps(priv, priv, type_data);
6278 case TC_SETUP_QDISC_MQPRIO:
6279 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6280 case TC_SETUP_BLOCK:
6281 return flow_block_cb_setup_simple(type_data,
6282 &stmmac_block_cb_list,
6283 stmmac_setup_tc_block_cb,
6284 priv, priv, true);
6285 case TC_SETUP_QDISC_CBS:
6286 return stmmac_tc_setup_cbs(priv, priv, type_data);
6287 case TC_SETUP_QDISC_TAPRIO:
6288 return stmmac_tc_setup_taprio(priv, priv, type_data);
6289 case TC_SETUP_QDISC_ETF:
6290 return stmmac_tc_setup_etf(priv, priv, type_data);
6291 default:
6292 return -EOPNOTSUPP;
6293 }
6294 }
6295
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6296 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6297 struct net_device *sb_dev)
6298 {
6299 int gso = skb_shinfo(skb)->gso_type;
6300
6301 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6302 /*
6303 * There is no way to determine the number of TSO/USO
6304 * capable Queues. Let's use always the Queue 0
6305 * because if TSO/USO is supported then at least this
6306 * one will be capable.
6307 */
6308 return 0;
6309 }
6310
6311 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6312 }
6313
stmmac_set_mac_address(struct net_device * ndev,void * addr)6314 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6315 {
6316 struct stmmac_priv *priv = netdev_priv(ndev);
6317 int ret = 0;
6318
6319 ret = pm_runtime_resume_and_get(priv->device);
6320 if (ret < 0)
6321 return ret;
6322
6323 ret = eth_mac_addr(ndev, addr);
6324 if (ret)
6325 goto set_mac_error;
6326
6327 phylink_rx_clk_stop_block(priv->phylink);
6328 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6329 phylink_rx_clk_stop_unblock(priv->phylink);
6330
6331 set_mac_error:
6332 pm_runtime_put(priv->device);
6333
6334 return ret;
6335 }
6336
6337 #ifdef CONFIG_DEBUG_FS
6338 static struct dentry *stmmac_fs_dir;
6339
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6340 static void sysfs_display_ring(void *head, int size, int extend_desc,
6341 struct seq_file *seq, dma_addr_t dma_phy_addr)
6342 {
6343 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6344 struct dma_desc *p = (struct dma_desc *)head;
6345 unsigned int desc_size;
6346 dma_addr_t dma_addr;
6347 int i;
6348
6349 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6350 for (i = 0; i < size; i++) {
6351 dma_addr = dma_phy_addr + i * desc_size;
6352 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6353 i, &dma_addr,
6354 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6355 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6356 if (extend_desc)
6357 p = &(++ep)->basic;
6358 else
6359 p++;
6360 }
6361 }
6362
stmmac_rings_status_show(struct seq_file * seq,void * v)6363 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6364 {
6365 struct net_device *dev = seq->private;
6366 struct stmmac_priv *priv = netdev_priv(dev);
6367 u32 rx_count = priv->plat->rx_queues_to_use;
6368 u32 tx_count = priv->plat->tx_queues_to_use;
6369 u32 queue;
6370
6371 if ((dev->flags & IFF_UP) == 0)
6372 return 0;
6373
6374 for (queue = 0; queue < rx_count; queue++) {
6375 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6376
6377 seq_printf(seq, "RX Queue %d:\n", queue);
6378
6379 if (priv->extend_desc) {
6380 seq_printf(seq, "Extended descriptor ring:\n");
6381 sysfs_display_ring((void *)rx_q->dma_erx,
6382 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6383 } else {
6384 seq_printf(seq, "Descriptor ring:\n");
6385 sysfs_display_ring((void *)rx_q->dma_rx,
6386 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6387 }
6388 }
6389
6390 for (queue = 0; queue < tx_count; queue++) {
6391 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6392
6393 seq_printf(seq, "TX Queue %d:\n", queue);
6394
6395 if (priv->extend_desc) {
6396 seq_printf(seq, "Extended descriptor ring:\n");
6397 sysfs_display_ring((void *)tx_q->dma_etx,
6398 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6399 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6400 seq_printf(seq, "Descriptor ring:\n");
6401 sysfs_display_ring((void *)tx_q->dma_tx,
6402 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6403 }
6404 }
6405
6406 return 0;
6407 }
6408 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6409
stmmac_dma_cap_show(struct seq_file * seq,void * v)6410 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6411 {
6412 static const char * const dwxgmac_timestamp_source[] = {
6413 "None",
6414 "Internal",
6415 "External",
6416 "Both",
6417 };
6418 static const char * const dwxgmac_safety_feature_desc[] = {
6419 "No",
6420 "All Safety Features with ECC and Parity",
6421 "All Safety Features without ECC or Parity",
6422 "All Safety Features with Parity Only",
6423 "ECC Only",
6424 "UNDEFINED",
6425 "UNDEFINED",
6426 "UNDEFINED",
6427 };
6428 struct net_device *dev = seq->private;
6429 struct stmmac_priv *priv = netdev_priv(dev);
6430
6431 if (!priv->hw_cap_support) {
6432 seq_printf(seq, "DMA HW features not supported\n");
6433 return 0;
6434 }
6435
6436 seq_printf(seq, "==============================\n");
6437 seq_printf(seq, "\tDMA HW features\n");
6438 seq_printf(seq, "==============================\n");
6439
6440 seq_printf(seq, "\t10/100 Mbps: %s\n",
6441 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6442 seq_printf(seq, "\t1000 Mbps: %s\n",
6443 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6444 seq_printf(seq, "\tHalf duplex: %s\n",
6445 (priv->dma_cap.half_duplex) ? "Y" : "N");
6446 if (priv->plat->has_xgmac) {
6447 seq_printf(seq,
6448 "\tNumber of Additional MAC address registers: %d\n",
6449 priv->dma_cap.multi_addr);
6450 } else {
6451 seq_printf(seq, "\tHash Filter: %s\n",
6452 (priv->dma_cap.hash_filter) ? "Y" : "N");
6453 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6454 (priv->dma_cap.multi_addr) ? "Y" : "N");
6455 }
6456 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6457 (priv->dma_cap.pcs) ? "Y" : "N");
6458 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6459 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6460 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6461 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6462 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6463 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6464 seq_printf(seq, "\tRMON module: %s\n",
6465 (priv->dma_cap.rmon) ? "Y" : "N");
6466 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6467 (priv->dma_cap.time_stamp) ? "Y" : "N");
6468 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6469 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6470 if (priv->plat->has_xgmac)
6471 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6472 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6473 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6474 (priv->dma_cap.eee) ? "Y" : "N");
6475 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6476 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6477 (priv->dma_cap.tx_coe) ? "Y" : "N");
6478 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6479 priv->plat->has_xgmac) {
6480 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6481 (priv->dma_cap.rx_coe) ? "Y" : "N");
6482 } else {
6483 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6484 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6485 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6486 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6487 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6488 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6489 }
6490 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6491 priv->dma_cap.number_rx_channel);
6492 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6493 priv->dma_cap.number_tx_channel);
6494 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6495 priv->dma_cap.number_rx_queues);
6496 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6497 priv->dma_cap.number_tx_queues);
6498 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6499 (priv->dma_cap.enh_desc) ? "Y" : "N");
6500 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6501 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6502 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6503 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6504 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6505 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6506 priv->dma_cap.pps_out_num);
6507 seq_printf(seq, "\tSafety Features: %s\n",
6508 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6509 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6510 priv->dma_cap.frpsel ? "Y" : "N");
6511 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6512 priv->dma_cap.host_dma_width);
6513 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6514 priv->dma_cap.rssen ? "Y" : "N");
6515 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6516 priv->dma_cap.vlhash ? "Y" : "N");
6517 seq_printf(seq, "\tSplit Header: %s\n",
6518 priv->dma_cap.sphen ? "Y" : "N");
6519 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6520 priv->dma_cap.vlins ? "Y" : "N");
6521 seq_printf(seq, "\tDouble VLAN: %s\n",
6522 priv->dma_cap.dvlan ? "Y" : "N");
6523 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6524 priv->dma_cap.l3l4fnum);
6525 seq_printf(seq, "\tARP Offloading: %s\n",
6526 priv->dma_cap.arpoffsel ? "Y" : "N");
6527 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6528 priv->dma_cap.estsel ? "Y" : "N");
6529 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6530 priv->dma_cap.fpesel ? "Y" : "N");
6531 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6532 priv->dma_cap.tbssel ? "Y" : "N");
6533 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6534 priv->dma_cap.tbs_ch_num);
6535 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6536 priv->dma_cap.sgfsel ? "Y" : "N");
6537 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6538 BIT(priv->dma_cap.ttsfd) >> 1);
6539 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6540 priv->dma_cap.numtc);
6541 seq_printf(seq, "\tDCB Feature: %s\n",
6542 priv->dma_cap.dcben ? "Y" : "N");
6543 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6544 priv->dma_cap.advthword ? "Y" : "N");
6545 seq_printf(seq, "\tPTP Offload: %s\n",
6546 priv->dma_cap.ptoen ? "Y" : "N");
6547 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6548 priv->dma_cap.osten ? "Y" : "N");
6549 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6550 priv->dma_cap.pfcen ? "Y" : "N");
6551 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6552 BIT(priv->dma_cap.frpes) << 6);
6553 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6554 BIT(priv->dma_cap.frpbs) << 6);
6555 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6556 priv->dma_cap.frppipe_num);
6557 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6558 priv->dma_cap.nrvf_num ?
6559 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6560 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6561 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6562 seq_printf(seq, "\tDepth of GCL: %lu\n",
6563 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6564 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6565 priv->dma_cap.cbtisel ? "Y" : "N");
6566 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6567 priv->dma_cap.aux_snapshot_n);
6568 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6569 priv->dma_cap.pou_ost_en ? "Y" : "N");
6570 seq_printf(seq, "\tEnhanced DMA: %s\n",
6571 priv->dma_cap.edma ? "Y" : "N");
6572 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6573 priv->dma_cap.ediffc ? "Y" : "N");
6574 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6575 priv->dma_cap.vxn ? "Y" : "N");
6576 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6577 priv->dma_cap.dbgmem ? "Y" : "N");
6578 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6579 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6580 return 0;
6581 }
6582 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6583
6584 /* Use network device events to rename debugfs file entries.
6585 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6586 static int stmmac_device_event(struct notifier_block *unused,
6587 unsigned long event, void *ptr)
6588 {
6589 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6590 struct stmmac_priv *priv = netdev_priv(dev);
6591
6592 if (dev->netdev_ops != &stmmac_netdev_ops)
6593 goto done;
6594
6595 switch (event) {
6596 case NETDEV_CHANGENAME:
6597 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6598 break;
6599 }
6600 done:
6601 return NOTIFY_DONE;
6602 }
6603
6604 static struct notifier_block stmmac_notifier = {
6605 .notifier_call = stmmac_device_event,
6606 };
6607
stmmac_init_fs(struct net_device * dev)6608 static void stmmac_init_fs(struct net_device *dev)
6609 {
6610 struct stmmac_priv *priv = netdev_priv(dev);
6611
6612 rtnl_lock();
6613
6614 /* Create per netdev entries */
6615 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6616
6617 /* Entry to report DMA RX/TX rings */
6618 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6619 &stmmac_rings_status_fops);
6620
6621 /* Entry to report the DMA HW features */
6622 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6623 &stmmac_dma_cap_fops);
6624
6625 rtnl_unlock();
6626 }
6627
stmmac_exit_fs(struct net_device * dev)6628 static void stmmac_exit_fs(struct net_device *dev)
6629 {
6630 struct stmmac_priv *priv = netdev_priv(dev);
6631
6632 debugfs_remove_recursive(priv->dbgfs_dir);
6633 }
6634 #endif /* CONFIG_DEBUG_FS */
6635
stmmac_vid_crc32_le(__le16 vid_le)6636 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6637 {
6638 unsigned char *data = (unsigned char *)&vid_le;
6639 unsigned char data_byte = 0;
6640 u32 crc = ~0x0;
6641 u32 temp = 0;
6642 int i, bits;
6643
6644 bits = get_bitmask_order(VLAN_VID_MASK);
6645 for (i = 0; i < bits; i++) {
6646 if ((i % 8) == 0)
6647 data_byte = data[i / 8];
6648
6649 temp = ((crc & 1) ^ data_byte) & 1;
6650 crc >>= 1;
6651 data_byte >>= 1;
6652
6653 if (temp)
6654 crc ^= 0xedb88320;
6655 }
6656
6657 return crc;
6658 }
6659
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6660 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6661 {
6662 u32 crc, hash = 0;
6663 u16 pmatch = 0;
6664 int count = 0;
6665 u16 vid = 0;
6666
6667 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6668 __le16 vid_le = cpu_to_le16(vid);
6669 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6670 hash |= (1 << crc);
6671 count++;
6672 }
6673
6674 if (!priv->dma_cap.vlhash) {
6675 if (count > 2) /* VID = 0 always passes filter */
6676 return -EOPNOTSUPP;
6677
6678 pmatch = vid;
6679 hash = 0;
6680 }
6681
6682 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6683 }
6684
6685 /* FIXME: This may need RXC to be running, but it may be called with BH
6686 * disabled, which means we can't call phylink_rx_clk_stop*().
6687 */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6688 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6689 {
6690 struct stmmac_priv *priv = netdev_priv(ndev);
6691 bool is_double = false;
6692 int ret;
6693
6694 ret = pm_runtime_resume_and_get(priv->device);
6695 if (ret < 0)
6696 return ret;
6697
6698 if (be16_to_cpu(proto) == ETH_P_8021AD)
6699 is_double = true;
6700
6701 set_bit(vid, priv->active_vlans);
6702 ret = stmmac_vlan_update(priv, is_double);
6703 if (ret) {
6704 clear_bit(vid, priv->active_vlans);
6705 goto err_pm_put;
6706 }
6707
6708 if (priv->hw->num_vlan) {
6709 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6710 if (ret)
6711 goto err_pm_put;
6712 }
6713 err_pm_put:
6714 pm_runtime_put(priv->device);
6715
6716 return ret;
6717 }
6718
6719 /* FIXME: This may need RXC to be running, but it may be called with BH
6720 * disabled, which means we can't call phylink_rx_clk_stop*().
6721 */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6722 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6723 {
6724 struct stmmac_priv *priv = netdev_priv(ndev);
6725 bool is_double = false;
6726 int ret;
6727
6728 ret = pm_runtime_resume_and_get(priv->device);
6729 if (ret < 0)
6730 return ret;
6731
6732 if (be16_to_cpu(proto) == ETH_P_8021AD)
6733 is_double = true;
6734
6735 clear_bit(vid, priv->active_vlans);
6736
6737 if (priv->hw->num_vlan) {
6738 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6739 if (ret)
6740 goto del_vlan_error;
6741 }
6742
6743 ret = stmmac_vlan_update(priv, is_double);
6744
6745 del_vlan_error:
6746 pm_runtime_put(priv->device);
6747
6748 return ret;
6749 }
6750
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6751 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6752 {
6753 struct stmmac_priv *priv = netdev_priv(dev);
6754
6755 switch (bpf->command) {
6756 case XDP_SETUP_PROG:
6757 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6758 case XDP_SETUP_XSK_POOL:
6759 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6760 bpf->xsk.queue_id);
6761 default:
6762 return -EOPNOTSUPP;
6763 }
6764 }
6765
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6766 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6767 struct xdp_frame **frames, u32 flags)
6768 {
6769 struct stmmac_priv *priv = netdev_priv(dev);
6770 int cpu = smp_processor_id();
6771 struct netdev_queue *nq;
6772 int i, nxmit = 0;
6773 int queue;
6774
6775 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6776 return -ENETDOWN;
6777
6778 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6779 return -EINVAL;
6780
6781 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6782 nq = netdev_get_tx_queue(priv->dev, queue);
6783
6784 __netif_tx_lock(nq, cpu);
6785 /* Avoids TX time-out as we are sharing with slow path */
6786 txq_trans_cond_update(nq);
6787
6788 for (i = 0; i < num_frames; i++) {
6789 int res;
6790
6791 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6792 if (res == STMMAC_XDP_CONSUMED)
6793 break;
6794
6795 nxmit++;
6796 }
6797
6798 if (flags & XDP_XMIT_FLUSH) {
6799 stmmac_flush_tx_descriptors(priv, queue);
6800 stmmac_tx_timer_arm(priv, queue);
6801 }
6802
6803 __netif_tx_unlock(nq);
6804
6805 return nxmit;
6806 }
6807
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6808 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6809 {
6810 struct stmmac_channel *ch = &priv->channel[queue];
6811 unsigned long flags;
6812
6813 spin_lock_irqsave(&ch->lock, flags);
6814 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6815 spin_unlock_irqrestore(&ch->lock, flags);
6816
6817 stmmac_stop_rx_dma(priv, queue);
6818 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6819 }
6820
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6821 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6822 {
6823 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6824 struct stmmac_channel *ch = &priv->channel[queue];
6825 unsigned long flags;
6826 u32 buf_size;
6827 int ret;
6828
6829 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6830 if (ret) {
6831 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6832 return;
6833 }
6834
6835 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6836 if (ret) {
6837 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6838 netdev_err(priv->dev, "Failed to init RX desc.\n");
6839 return;
6840 }
6841
6842 stmmac_reset_rx_queue(priv, queue);
6843 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6844
6845 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6846 rx_q->dma_rx_phy, rx_q->queue_index);
6847
6848 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6849 sizeof(struct dma_desc));
6850 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6851 rx_q->rx_tail_addr, rx_q->queue_index);
6852
6853 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6854 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6855 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6856 buf_size,
6857 rx_q->queue_index);
6858 } else {
6859 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6860 priv->dma_conf.dma_buf_sz,
6861 rx_q->queue_index);
6862 }
6863
6864 stmmac_start_rx_dma(priv, queue);
6865
6866 spin_lock_irqsave(&ch->lock, flags);
6867 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6868 spin_unlock_irqrestore(&ch->lock, flags);
6869 }
6870
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6871 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6872 {
6873 struct stmmac_channel *ch = &priv->channel[queue];
6874 unsigned long flags;
6875
6876 spin_lock_irqsave(&ch->lock, flags);
6877 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6878 spin_unlock_irqrestore(&ch->lock, flags);
6879
6880 stmmac_stop_tx_dma(priv, queue);
6881 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6882 }
6883
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6884 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6885 {
6886 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6887 struct stmmac_channel *ch = &priv->channel[queue];
6888 unsigned long flags;
6889 int ret;
6890
6891 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6892 if (ret) {
6893 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6894 return;
6895 }
6896
6897 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6898 if (ret) {
6899 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6900 netdev_err(priv->dev, "Failed to init TX desc.\n");
6901 return;
6902 }
6903
6904 stmmac_reset_tx_queue(priv, queue);
6905 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6906
6907 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6908 tx_q->dma_tx_phy, tx_q->queue_index);
6909
6910 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6911 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6912
6913 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6914 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6915 tx_q->tx_tail_addr, tx_q->queue_index);
6916
6917 stmmac_start_tx_dma(priv, queue);
6918
6919 spin_lock_irqsave(&ch->lock, flags);
6920 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6921 spin_unlock_irqrestore(&ch->lock, flags);
6922 }
6923
stmmac_xdp_release(struct net_device * dev)6924 void stmmac_xdp_release(struct net_device *dev)
6925 {
6926 struct stmmac_priv *priv = netdev_priv(dev);
6927 u32 chan;
6928
6929 /* Ensure tx function is not running */
6930 netif_tx_disable(dev);
6931
6932 /* Disable NAPI process */
6933 stmmac_disable_all_queues(priv);
6934
6935 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6936 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6937
6938 /* Free the IRQ lines */
6939 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6940
6941 /* Stop TX/RX DMA channels */
6942 stmmac_stop_all_dma(priv);
6943
6944 /* Release and free the Rx/Tx resources */
6945 free_dma_desc_resources(priv, &priv->dma_conf);
6946
6947 /* Disable the MAC Rx/Tx */
6948 stmmac_mac_set(priv, priv->ioaddr, false);
6949
6950 /* set trans_start so we don't get spurious
6951 * watchdogs during reset
6952 */
6953 netif_trans_update(dev);
6954 netif_carrier_off(dev);
6955 }
6956
stmmac_xdp_open(struct net_device * dev)6957 int stmmac_xdp_open(struct net_device *dev)
6958 {
6959 struct stmmac_priv *priv = netdev_priv(dev);
6960 u32 rx_cnt = priv->plat->rx_queues_to_use;
6961 u32 tx_cnt = priv->plat->tx_queues_to_use;
6962 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6963 struct stmmac_rx_queue *rx_q;
6964 struct stmmac_tx_queue *tx_q;
6965 u32 buf_size;
6966 bool sph_en;
6967 u32 chan;
6968 int ret;
6969
6970 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6971 if (ret < 0) {
6972 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6973 __func__);
6974 goto dma_desc_error;
6975 }
6976
6977 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6978 if (ret < 0) {
6979 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6980 __func__);
6981 goto init_error;
6982 }
6983
6984 stmmac_reset_queues_param(priv);
6985
6986 /* DMA CSR Channel configuration */
6987 for (chan = 0; chan < dma_csr_ch; chan++) {
6988 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6989 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6990 }
6991
6992 /* Adjust Split header */
6993 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6994
6995 /* DMA RX Channel Configuration */
6996 for (chan = 0; chan < rx_cnt; chan++) {
6997 rx_q = &priv->dma_conf.rx_queue[chan];
6998
6999 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7000 rx_q->dma_rx_phy, chan);
7001
7002 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7003 (rx_q->buf_alloc_num *
7004 sizeof(struct dma_desc));
7005 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7006 rx_q->rx_tail_addr, chan);
7007
7008 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7009 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7010 stmmac_set_dma_bfsize(priv, priv->ioaddr,
7011 buf_size,
7012 rx_q->queue_index);
7013 } else {
7014 stmmac_set_dma_bfsize(priv, priv->ioaddr,
7015 priv->dma_conf.dma_buf_sz,
7016 rx_q->queue_index);
7017 }
7018
7019 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7020 }
7021
7022 /* DMA TX Channel Configuration */
7023 for (chan = 0; chan < tx_cnt; chan++) {
7024 tx_q = &priv->dma_conf.tx_queue[chan];
7025
7026 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7027 tx_q->dma_tx_phy, chan);
7028
7029 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7030 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7031 tx_q->tx_tail_addr, chan);
7032
7033 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7034 }
7035
7036 /* Enable the MAC Rx/Tx */
7037 stmmac_mac_set(priv, priv->ioaddr, true);
7038
7039 /* Start Rx & Tx DMA Channels */
7040 stmmac_start_all_dma(priv);
7041
7042 ret = stmmac_request_irq(dev);
7043 if (ret)
7044 goto irq_error;
7045
7046 /* Enable NAPI process*/
7047 stmmac_enable_all_queues(priv);
7048 netif_carrier_on(dev);
7049 netif_tx_start_all_queues(dev);
7050 stmmac_enable_all_dma_irq(priv);
7051
7052 return 0;
7053
7054 irq_error:
7055 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7056 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7057
7058 stmmac_hw_teardown(dev);
7059 init_error:
7060 free_dma_desc_resources(priv, &priv->dma_conf);
7061 dma_desc_error:
7062 return ret;
7063 }
7064
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7065 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7066 {
7067 struct stmmac_priv *priv = netdev_priv(dev);
7068 struct stmmac_rx_queue *rx_q;
7069 struct stmmac_tx_queue *tx_q;
7070 struct stmmac_channel *ch;
7071
7072 if (test_bit(STMMAC_DOWN, &priv->state) ||
7073 !netif_carrier_ok(priv->dev))
7074 return -ENETDOWN;
7075
7076 if (!stmmac_xdp_is_enabled(priv))
7077 return -EINVAL;
7078
7079 if (queue >= priv->plat->rx_queues_to_use ||
7080 queue >= priv->plat->tx_queues_to_use)
7081 return -EINVAL;
7082
7083 rx_q = &priv->dma_conf.rx_queue[queue];
7084 tx_q = &priv->dma_conf.tx_queue[queue];
7085 ch = &priv->channel[queue];
7086
7087 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7088 return -EINVAL;
7089
7090 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7091 /* EQoS does not have per-DMA channel SW interrupt,
7092 * so we schedule RX Napi straight-away.
7093 */
7094 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7095 __napi_schedule(&ch->rxtx_napi);
7096 }
7097
7098 return 0;
7099 }
7100
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7101 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7102 {
7103 struct stmmac_priv *priv = netdev_priv(dev);
7104 u32 tx_cnt = priv->plat->tx_queues_to_use;
7105 u32 rx_cnt = priv->plat->rx_queues_to_use;
7106 unsigned int start;
7107 int q;
7108
7109 for (q = 0; q < tx_cnt; q++) {
7110 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7111 u64 tx_packets;
7112 u64 tx_bytes;
7113
7114 do {
7115 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7116 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7117 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7118 do {
7119 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7120 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7121 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7122
7123 stats->tx_packets += tx_packets;
7124 stats->tx_bytes += tx_bytes;
7125 }
7126
7127 for (q = 0; q < rx_cnt; q++) {
7128 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7129 u64 rx_packets;
7130 u64 rx_bytes;
7131
7132 do {
7133 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7134 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7135 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7136 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7137
7138 stats->rx_packets += rx_packets;
7139 stats->rx_bytes += rx_bytes;
7140 }
7141
7142 stats->rx_dropped = priv->xstats.rx_dropped;
7143 stats->rx_errors = priv->xstats.rx_errors;
7144 stats->tx_dropped = priv->xstats.tx_dropped;
7145 stats->tx_errors = priv->xstats.tx_errors;
7146 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7147 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7148 stats->rx_length_errors = priv->xstats.rx_length;
7149 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7150 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7151 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7152 }
7153
7154 static const struct net_device_ops stmmac_netdev_ops = {
7155 .ndo_open = stmmac_open,
7156 .ndo_start_xmit = stmmac_xmit,
7157 .ndo_stop = stmmac_release,
7158 .ndo_change_mtu = stmmac_change_mtu,
7159 .ndo_fix_features = stmmac_fix_features,
7160 .ndo_set_features = stmmac_set_features,
7161 .ndo_set_rx_mode = stmmac_set_rx_mode,
7162 .ndo_tx_timeout = stmmac_tx_timeout,
7163 .ndo_eth_ioctl = stmmac_ioctl,
7164 .ndo_get_stats64 = stmmac_get_stats64,
7165 .ndo_setup_tc = stmmac_setup_tc,
7166 .ndo_select_queue = stmmac_select_queue,
7167 .ndo_set_mac_address = stmmac_set_mac_address,
7168 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7169 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7170 .ndo_bpf = stmmac_bpf,
7171 .ndo_xdp_xmit = stmmac_xdp_xmit,
7172 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7173 .ndo_hwtstamp_get = stmmac_hwtstamp_get,
7174 .ndo_hwtstamp_set = stmmac_hwtstamp_set,
7175 };
7176
stmmac_reset_subtask(struct stmmac_priv * priv)7177 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7178 {
7179 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7180 return;
7181 if (test_bit(STMMAC_DOWN, &priv->state))
7182 return;
7183
7184 netdev_err(priv->dev, "Reset adapter.\n");
7185
7186 rtnl_lock();
7187 netif_trans_update(priv->dev);
7188 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7189 usleep_range(1000, 2000);
7190
7191 set_bit(STMMAC_DOWN, &priv->state);
7192 dev_close(priv->dev);
7193 dev_open(priv->dev, NULL);
7194 clear_bit(STMMAC_DOWN, &priv->state);
7195 clear_bit(STMMAC_RESETING, &priv->state);
7196 rtnl_unlock();
7197 }
7198
stmmac_service_task(struct work_struct * work)7199 static void stmmac_service_task(struct work_struct *work)
7200 {
7201 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7202 service_task);
7203
7204 stmmac_reset_subtask(priv);
7205 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7206 }
7207
7208 /**
7209 * stmmac_hw_init - Init the MAC device
7210 * @priv: driver private structure
7211 * Description: this function is to configure the MAC device according to
7212 * some platform parameters or the HW capability register. It prepares the
7213 * driver to use either ring or chain modes and to setup either enhanced or
7214 * normal descriptors.
7215 */
stmmac_hw_init(struct stmmac_priv * priv)7216 static int stmmac_hw_init(struct stmmac_priv *priv)
7217 {
7218 int ret;
7219
7220 /* dwmac-sun8i only work in chain mode */
7221 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7222 chain_mode = 1;
7223 priv->chain_mode = chain_mode;
7224
7225 /* Initialize HW Interface */
7226 ret = stmmac_hwif_init(priv);
7227 if (ret)
7228 return ret;
7229
7230 /* Get the HW capability (new GMAC newer than 3.50a) */
7231 priv->hw_cap_support = stmmac_get_hw_features(priv);
7232 if (priv->hw_cap_support) {
7233 dev_info(priv->device, "DMA HW capability register supported\n");
7234
7235 /* We can override some gmac/dma configuration fields: e.g.
7236 * enh_desc, tx_coe (e.g. that are passed through the
7237 * platform) with the values from the HW capability
7238 * register (if supported).
7239 */
7240 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7241 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7242 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7243 priv->hw->pmt = priv->plat->pmt;
7244 if (priv->dma_cap.hash_tb_sz) {
7245 priv->hw->multicast_filter_bins =
7246 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7247 priv->hw->mcast_bits_log2 =
7248 ilog2(priv->hw->multicast_filter_bins);
7249 }
7250
7251 /* TXCOE doesn't work in thresh DMA mode */
7252 if (priv->plat->force_thresh_dma_mode)
7253 priv->plat->tx_coe = 0;
7254 else
7255 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7256
7257 /* In case of GMAC4 rx_coe is from HW cap register. */
7258 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7259
7260 if (priv->dma_cap.rx_coe_type2)
7261 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7262 else if (priv->dma_cap.rx_coe_type1)
7263 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7264
7265 } else {
7266 dev_info(priv->device, "No HW DMA feature register supported\n");
7267 }
7268
7269 if (priv->plat->rx_coe) {
7270 priv->hw->rx_csum = priv->plat->rx_coe;
7271 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7272 if (priv->synopsys_id < DWMAC_CORE_4_00)
7273 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7274 }
7275 if (priv->plat->tx_coe)
7276 dev_info(priv->device, "TX Checksum insertion supported\n");
7277
7278 if (priv->plat->pmt) {
7279 dev_info(priv->device, "Wake-Up On Lan supported\n");
7280 device_set_wakeup_capable(priv->device, 1);
7281 }
7282
7283 if (priv->dma_cap.tsoen)
7284 dev_info(priv->device, "TSO supported\n");
7285
7286 if (priv->dma_cap.number_rx_queues &&
7287 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7288 dev_warn(priv->device,
7289 "Number of Rx queues (%u) exceeds dma capability\n",
7290 priv->plat->rx_queues_to_use);
7291 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7292 }
7293 if (priv->dma_cap.number_tx_queues &&
7294 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7295 dev_warn(priv->device,
7296 "Number of Tx queues (%u) exceeds dma capability\n",
7297 priv->plat->tx_queues_to_use);
7298 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7299 }
7300
7301 if (priv->dma_cap.rx_fifo_size &&
7302 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7303 dev_warn(priv->device,
7304 "Rx FIFO size (%u) exceeds dma capability\n",
7305 priv->plat->rx_fifo_size);
7306 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7307 }
7308 if (priv->dma_cap.tx_fifo_size &&
7309 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7310 dev_warn(priv->device,
7311 "Tx FIFO size (%u) exceeds dma capability\n",
7312 priv->plat->tx_fifo_size);
7313 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7314 }
7315
7316 priv->hw->vlan_fail_q_en =
7317 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7318 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7319
7320 /* Run HW quirks, if any */
7321 if (priv->hwif_quirks) {
7322 ret = priv->hwif_quirks(priv);
7323 if (ret)
7324 return ret;
7325 }
7326
7327 /* Rx Watchdog is available in the COREs newer than the 3.40.
7328 * In some case, for example on bugged HW this feature
7329 * has to be disable and this can be done by passing the
7330 * riwt_off field from the platform.
7331 */
7332 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7333 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7334 priv->use_riwt = 1;
7335 dev_info(priv->device,
7336 "Enable RX Mitigation via HW Watchdog Timer\n");
7337 }
7338
7339 return 0;
7340 }
7341
stmmac_napi_add(struct net_device * dev)7342 static void stmmac_napi_add(struct net_device *dev)
7343 {
7344 struct stmmac_priv *priv = netdev_priv(dev);
7345 u32 queue, maxq;
7346
7347 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7348
7349 for (queue = 0; queue < maxq; queue++) {
7350 struct stmmac_channel *ch = &priv->channel[queue];
7351
7352 ch->priv_data = priv;
7353 ch->index = queue;
7354 spin_lock_init(&ch->lock);
7355
7356 if (queue < priv->plat->rx_queues_to_use) {
7357 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7358 }
7359 if (queue < priv->plat->tx_queues_to_use) {
7360 netif_napi_add_tx(dev, &ch->tx_napi,
7361 stmmac_napi_poll_tx);
7362 }
7363 if (queue < priv->plat->rx_queues_to_use &&
7364 queue < priv->plat->tx_queues_to_use) {
7365 netif_napi_add(dev, &ch->rxtx_napi,
7366 stmmac_napi_poll_rxtx);
7367 }
7368 }
7369 }
7370
stmmac_napi_del(struct net_device * dev)7371 static void stmmac_napi_del(struct net_device *dev)
7372 {
7373 struct stmmac_priv *priv = netdev_priv(dev);
7374 u32 queue, maxq;
7375
7376 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7377
7378 for (queue = 0; queue < maxq; queue++) {
7379 struct stmmac_channel *ch = &priv->channel[queue];
7380
7381 if (queue < priv->plat->rx_queues_to_use)
7382 netif_napi_del(&ch->rx_napi);
7383 if (queue < priv->plat->tx_queues_to_use)
7384 netif_napi_del(&ch->tx_napi);
7385 if (queue < priv->plat->rx_queues_to_use &&
7386 queue < priv->plat->tx_queues_to_use) {
7387 netif_napi_del(&ch->rxtx_napi);
7388 }
7389 }
7390 }
7391
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7392 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7393 {
7394 struct stmmac_priv *priv = netdev_priv(dev);
7395 int ret = 0, i;
7396
7397 if (netif_running(dev))
7398 stmmac_release(dev);
7399
7400 stmmac_napi_del(dev);
7401
7402 priv->plat->rx_queues_to_use = rx_cnt;
7403 priv->plat->tx_queues_to_use = tx_cnt;
7404 if (!netif_is_rxfh_configured(dev))
7405 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7406 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7407 rx_cnt);
7408
7409 stmmac_napi_add(dev);
7410
7411 if (netif_running(dev))
7412 ret = stmmac_open(dev);
7413
7414 return ret;
7415 }
7416
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7417 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7418 {
7419 struct stmmac_priv *priv = netdev_priv(dev);
7420 int ret = 0;
7421
7422 if (netif_running(dev))
7423 stmmac_release(dev);
7424
7425 priv->dma_conf.dma_rx_size = rx_size;
7426 priv->dma_conf.dma_tx_size = tx_size;
7427
7428 if (netif_running(dev))
7429 ret = stmmac_open(dev);
7430
7431 return ret;
7432 }
7433
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7434 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7435 {
7436 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7437 struct dma_desc *desc_contains_ts = ctx->desc;
7438 struct stmmac_priv *priv = ctx->priv;
7439 struct dma_desc *ndesc = ctx->ndesc;
7440 struct dma_desc *desc = ctx->desc;
7441 u64 ns = 0;
7442
7443 if (!priv->hwts_rx_en)
7444 return -ENODATA;
7445
7446 /* For GMAC4, the valid timestamp is from CTX next desc. */
7447 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7448 desc_contains_ts = ndesc;
7449
7450 /* Check if timestamp is available */
7451 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7452 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7453 ns -= priv->plat->cdc_error_adj;
7454 *timestamp = ns_to_ktime(ns);
7455 return 0;
7456 }
7457
7458 return -ENODATA;
7459 }
7460
7461 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7462 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7463 };
7464
7465 /**
7466 * stmmac_dvr_probe
7467 * @device: device pointer
7468 * @plat_dat: platform data pointer
7469 * @res: stmmac resource pointer
7470 * Description: this is the main probe function used to
7471 * call the alloc_etherdev, allocate the priv structure.
7472 * Return:
7473 * returns 0 on success, otherwise errno.
7474 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7475 int stmmac_dvr_probe(struct device *device,
7476 struct plat_stmmacenet_data *plat_dat,
7477 struct stmmac_resources *res)
7478 {
7479 struct net_device *ndev = NULL;
7480 struct stmmac_priv *priv;
7481 u32 rxq;
7482 int i, ret = 0;
7483
7484 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7485 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7486 if (!ndev)
7487 return -ENOMEM;
7488
7489 SET_NETDEV_DEV(ndev, device);
7490
7491 priv = netdev_priv(ndev);
7492 priv->device = device;
7493 priv->dev = ndev;
7494
7495 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7496 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7497 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7498 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7499 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7500 }
7501
7502 priv->xstats.pcpu_stats =
7503 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7504 if (!priv->xstats.pcpu_stats)
7505 return -ENOMEM;
7506
7507 stmmac_set_ethtool_ops(ndev);
7508 priv->pause_time = pause;
7509 priv->plat = plat_dat;
7510 priv->ioaddr = res->addr;
7511 priv->dev->base_addr = (unsigned long)res->addr;
7512 priv->plat->dma_cfg->multi_msi_en =
7513 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7514
7515 priv->dev->irq = res->irq;
7516 priv->wol_irq = res->wol_irq;
7517 priv->lpi_irq = res->lpi_irq;
7518 priv->sfty_irq = res->sfty_irq;
7519 priv->sfty_ce_irq = res->sfty_ce_irq;
7520 priv->sfty_ue_irq = res->sfty_ue_irq;
7521 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7522 priv->rx_irq[i] = res->rx_irq[i];
7523 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7524 priv->tx_irq[i] = res->tx_irq[i];
7525
7526 if (!is_zero_ether_addr(res->mac))
7527 eth_hw_addr_set(priv->dev, res->mac);
7528
7529 dev_set_drvdata(device, priv->dev);
7530
7531 /* Verify driver arguments */
7532 stmmac_verify_args();
7533
7534 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7535 if (!priv->af_xdp_zc_qps)
7536 return -ENOMEM;
7537
7538 /* Allocate workqueue */
7539 priv->wq = create_singlethread_workqueue("stmmac_wq");
7540 if (!priv->wq) {
7541 dev_err(priv->device, "failed to create workqueue\n");
7542 ret = -ENOMEM;
7543 goto error_wq_init;
7544 }
7545
7546 INIT_WORK(&priv->service_task, stmmac_service_task);
7547
7548 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7549
7550 /* Override with kernel parameters if supplied XXX CRS XXX
7551 * this needs to have multiple instances
7552 */
7553 if ((phyaddr >= 0) && (phyaddr <= 31))
7554 priv->plat->phy_addr = phyaddr;
7555
7556 if (priv->plat->stmmac_rst) {
7557 ret = reset_control_assert(priv->plat->stmmac_rst);
7558 reset_control_deassert(priv->plat->stmmac_rst);
7559 /* Some reset controllers have only reset callback instead of
7560 * assert + deassert callbacks pair.
7561 */
7562 if (ret == -ENOTSUPP)
7563 reset_control_reset(priv->plat->stmmac_rst);
7564 }
7565
7566 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7567 if (ret == -ENOTSUPP)
7568 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7569 ERR_PTR(ret));
7570
7571 /* Wait a bit for the reset to take effect */
7572 udelay(10);
7573
7574 /* Init MAC and get the capabilities */
7575 ret = stmmac_hw_init(priv);
7576 if (ret)
7577 goto error_hw_init;
7578
7579 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7580 */
7581 if (priv->synopsys_id < DWMAC_CORE_5_20)
7582 priv->plat->dma_cfg->dche = false;
7583
7584 stmmac_check_ether_addr(priv);
7585
7586 ndev->netdev_ops = &stmmac_netdev_ops;
7587
7588 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7589 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7590
7591 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7592 NETIF_F_RXCSUM;
7593 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7594 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7595
7596 ret = stmmac_tc_init(priv, priv);
7597 if (!ret) {
7598 ndev->hw_features |= NETIF_F_HW_TC;
7599 }
7600
7601 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7602 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7603 if (priv->plat->has_gmac4)
7604 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7605 priv->tso = true;
7606 dev_info(priv->device, "TSO feature enabled\n");
7607 }
7608
7609 if (priv->dma_cap.sphen &&
7610 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7611 ndev->hw_features |= NETIF_F_GRO;
7612 priv->sph_cap = true;
7613 priv->sph = priv->sph_cap;
7614 dev_info(priv->device, "SPH feature enabled\n");
7615 }
7616
7617 /* Ideally our host DMA address width is the same as for the
7618 * device. However, it may differ and then we have to use our
7619 * host DMA width for allocation and the device DMA width for
7620 * register handling.
7621 */
7622 if (priv->plat->host_dma_width)
7623 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7624 else
7625 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7626
7627 if (priv->dma_cap.host_dma_width) {
7628 ret = dma_set_mask_and_coherent(device,
7629 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7630 if (!ret) {
7631 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7632 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7633
7634 /*
7635 * If more than 32 bits can be addressed, make sure to
7636 * enable enhanced addressing mode.
7637 */
7638 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7639 priv->plat->dma_cfg->eame = true;
7640 } else {
7641 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7642 if (ret) {
7643 dev_err(priv->device, "Failed to set DMA Mask\n");
7644 goto error_hw_init;
7645 }
7646
7647 priv->dma_cap.host_dma_width = 32;
7648 }
7649 }
7650
7651 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7652 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7653 #ifdef STMMAC_VLAN_TAG_USED
7654 /* Both mac100 and gmac support receive VLAN tag detection */
7655 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7656 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7657 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7658 priv->hw->hw_vlan_en = true;
7659 }
7660 if (priv->dma_cap.vlhash) {
7661 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7662 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7663 }
7664 if (priv->dma_cap.vlins) {
7665 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7666 if (priv->dma_cap.dvlan)
7667 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7668 }
7669 #endif
7670 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7671
7672 priv->xstats.threshold = tc;
7673
7674 /* Initialize RSS */
7675 rxq = priv->plat->rx_queues_to_use;
7676 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7677 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7678 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7679
7680 if (priv->dma_cap.rssen && priv->plat->rss_en)
7681 ndev->features |= NETIF_F_RXHASH;
7682
7683 ndev->vlan_features |= ndev->features;
7684
7685 /* MTU range: 46 - hw-specific max */
7686 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7687 if (priv->plat->has_xgmac)
7688 ndev->max_mtu = XGMAC_JUMBO_LEN;
7689 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7690 ndev->max_mtu = JUMBO_LEN;
7691 else
7692 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7693 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7694 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7695 */
7696 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7697 (priv->plat->maxmtu >= ndev->min_mtu))
7698 ndev->max_mtu = priv->plat->maxmtu;
7699 else if (priv->plat->maxmtu < ndev->min_mtu)
7700 dev_warn(priv->device,
7701 "%s: warning: maxmtu having invalid value (%d)\n",
7702 __func__, priv->plat->maxmtu);
7703
7704 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7705
7706 /* Setup channels NAPI */
7707 stmmac_napi_add(ndev);
7708
7709 mutex_init(&priv->lock);
7710
7711 stmmac_fpe_init(priv);
7712
7713 /* If a specific clk_csr value is passed from the platform
7714 * this means that the CSR Clock Range selection cannot be
7715 * changed at run-time and it is fixed. Viceversa the driver'll try to
7716 * set the MDC clock dynamically according to the csr actual
7717 * clock input.
7718 */
7719 if (priv->plat->clk_csr >= 0)
7720 priv->clk_csr = priv->plat->clk_csr;
7721 else
7722 stmmac_clk_csr_set(priv);
7723
7724 stmmac_check_pcs_mode(priv);
7725
7726 pm_runtime_get_noresume(device);
7727 pm_runtime_set_active(device);
7728 if (!pm_runtime_enabled(device))
7729 pm_runtime_enable(device);
7730
7731 ret = stmmac_mdio_register(ndev);
7732 if (ret < 0) {
7733 dev_err_probe(priv->device, ret,
7734 "MDIO bus (id: %d) registration failed\n",
7735 priv->plat->bus_id);
7736 goto error_mdio_register;
7737 }
7738
7739 ret = stmmac_pcs_setup(ndev);
7740 if (ret)
7741 goto error_pcs_setup;
7742
7743 ret = stmmac_phy_setup(priv);
7744 if (ret) {
7745 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7746 goto error_phy_setup;
7747 }
7748
7749 ret = register_netdev(ndev);
7750 if (ret) {
7751 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7752 __func__, ret);
7753 goto error_netdev_register;
7754 }
7755
7756 #ifdef CONFIG_DEBUG_FS
7757 stmmac_init_fs(ndev);
7758 #endif
7759
7760 if (priv->plat->dump_debug_regs)
7761 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7762
7763 /* Let pm_runtime_put() disable the clocks.
7764 * If CONFIG_PM is not enabled, the clocks will stay powered.
7765 */
7766 pm_runtime_put(device);
7767
7768 return ret;
7769
7770 error_netdev_register:
7771 phylink_destroy(priv->phylink);
7772 error_phy_setup:
7773 stmmac_pcs_clean(ndev);
7774 error_pcs_setup:
7775 stmmac_mdio_unregister(ndev);
7776 error_mdio_register:
7777 stmmac_napi_del(ndev);
7778 error_hw_init:
7779 destroy_workqueue(priv->wq);
7780 error_wq_init:
7781 bitmap_free(priv->af_xdp_zc_qps);
7782
7783 return ret;
7784 }
7785 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7786
7787 /**
7788 * stmmac_dvr_remove
7789 * @dev: device pointer
7790 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7791 * changes the link status, releases the DMA descriptor rings.
7792 */
stmmac_dvr_remove(struct device * dev)7793 void stmmac_dvr_remove(struct device *dev)
7794 {
7795 struct net_device *ndev = dev_get_drvdata(dev);
7796 struct stmmac_priv *priv = netdev_priv(ndev);
7797
7798 netdev_info(priv->dev, "%s: removing driver", __func__);
7799
7800 pm_runtime_get_sync(dev);
7801
7802 unregister_netdev(ndev);
7803
7804 #ifdef CONFIG_DEBUG_FS
7805 stmmac_exit_fs(ndev);
7806 #endif
7807 phylink_destroy(priv->phylink);
7808 if (priv->plat->stmmac_rst)
7809 reset_control_assert(priv->plat->stmmac_rst);
7810 reset_control_assert(priv->plat->stmmac_ahb_rst);
7811
7812 stmmac_pcs_clean(ndev);
7813 stmmac_mdio_unregister(ndev);
7814
7815 destroy_workqueue(priv->wq);
7816 mutex_destroy(&priv->lock);
7817 bitmap_free(priv->af_xdp_zc_qps);
7818
7819 pm_runtime_disable(dev);
7820 pm_runtime_put_noidle(dev);
7821 }
7822 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7823
7824 /**
7825 * stmmac_suspend - suspend callback
7826 * @dev: device pointer
7827 * Description: this is the function to suspend the device and it is called
7828 * by the platform driver to stop the network queue, release the resources,
7829 * program the PMT register (for WoL), clean and release driver resources.
7830 */
stmmac_suspend(struct device * dev)7831 int stmmac_suspend(struct device *dev)
7832 {
7833 struct net_device *ndev = dev_get_drvdata(dev);
7834 struct stmmac_priv *priv = netdev_priv(ndev);
7835 u32 chan;
7836
7837 if (!ndev || !netif_running(ndev))
7838 return 0;
7839
7840 mutex_lock(&priv->lock);
7841
7842 netif_device_detach(ndev);
7843
7844 stmmac_disable_all_queues(priv);
7845
7846 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7847 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7848
7849 if (priv->eee_sw_timer_en) {
7850 priv->tx_path_in_lpi_mode = false;
7851 timer_delete_sync(&priv->eee_ctrl_timer);
7852 }
7853
7854 /* Stop TX/RX DMA */
7855 stmmac_stop_all_dma(priv);
7856
7857 if (priv->plat->serdes_powerdown)
7858 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7859
7860 /* Enable Power down mode by programming the PMT regs */
7861 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7862 stmmac_pmt(priv, priv->hw, priv->wolopts);
7863 priv->irq_wake = 1;
7864 } else {
7865 stmmac_mac_set(priv, priv->ioaddr, false);
7866 pinctrl_pm_select_sleep_state(priv->device);
7867 }
7868
7869 mutex_unlock(&priv->lock);
7870
7871 rtnl_lock();
7872 if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7873 phylink_speed_down(priv->phylink, false);
7874
7875 phylink_suspend(priv->phylink,
7876 device_may_wakeup(priv->device) && priv->plat->pmt);
7877 rtnl_unlock();
7878
7879 if (stmmac_fpe_supported(priv))
7880 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7881
7882 return 0;
7883 }
7884 EXPORT_SYMBOL_GPL(stmmac_suspend);
7885
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7886 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7887 {
7888 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7889
7890 rx_q->cur_rx = 0;
7891 rx_q->dirty_rx = 0;
7892 }
7893
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7894 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7895 {
7896 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7897
7898 tx_q->cur_tx = 0;
7899 tx_q->dirty_tx = 0;
7900 tx_q->mss = 0;
7901
7902 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7903 }
7904
7905 /**
7906 * stmmac_reset_queues_param - reset queue parameters
7907 * @priv: device pointer
7908 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7909 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7910 {
7911 u32 rx_cnt = priv->plat->rx_queues_to_use;
7912 u32 tx_cnt = priv->plat->tx_queues_to_use;
7913 u32 queue;
7914
7915 for (queue = 0; queue < rx_cnt; queue++)
7916 stmmac_reset_rx_queue(priv, queue);
7917
7918 for (queue = 0; queue < tx_cnt; queue++)
7919 stmmac_reset_tx_queue(priv, queue);
7920 }
7921
7922 /**
7923 * stmmac_resume - resume callback
7924 * @dev: device pointer
7925 * Description: when resume this function is invoked to setup the DMA and CORE
7926 * in a usable state.
7927 */
stmmac_resume(struct device * dev)7928 int stmmac_resume(struct device *dev)
7929 {
7930 struct net_device *ndev = dev_get_drvdata(dev);
7931 struct stmmac_priv *priv = netdev_priv(ndev);
7932 int ret;
7933
7934 if (!netif_running(ndev))
7935 return 0;
7936
7937 /* Power Down bit, into the PM register, is cleared
7938 * automatically as soon as a magic packet or a Wake-up frame
7939 * is received. Anyway, it's better to manually clear
7940 * this bit because it can generate problems while resuming
7941 * from another devices (e.g. serial console).
7942 */
7943 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7944 mutex_lock(&priv->lock);
7945 stmmac_pmt(priv, priv->hw, 0);
7946 mutex_unlock(&priv->lock);
7947 priv->irq_wake = 0;
7948 } else {
7949 pinctrl_pm_select_default_state(priv->device);
7950 /* reset the phy so that it's ready */
7951 if (priv->mii)
7952 stmmac_mdio_reset(priv->mii);
7953 }
7954
7955 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7956 priv->plat->serdes_powerup) {
7957 ret = priv->plat->serdes_powerup(ndev,
7958 priv->plat->bsp_priv);
7959
7960 if (ret < 0)
7961 return ret;
7962 }
7963
7964 rtnl_lock();
7965
7966 /* Prepare the PHY to resume, ensuring that its clocks which are
7967 * necessary for the MAC DMA reset to complete are running
7968 */
7969 phylink_prepare_resume(priv->phylink);
7970
7971 mutex_lock(&priv->lock);
7972
7973 stmmac_reset_queues_param(priv);
7974
7975 stmmac_free_tx_skbufs(priv);
7976 stmmac_clear_descriptors(priv, &priv->dma_conf);
7977
7978 stmmac_hw_setup(ndev, false);
7979 stmmac_init_coalesce(priv);
7980 phylink_rx_clk_stop_block(priv->phylink);
7981 stmmac_set_rx_mode(ndev);
7982
7983 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7984 phylink_rx_clk_stop_unblock(priv->phylink);
7985
7986 stmmac_enable_all_queues(priv);
7987 stmmac_enable_all_dma_irq(priv);
7988
7989 mutex_unlock(&priv->lock);
7990
7991 /* phylink_resume() must be called after the hardware has been
7992 * initialised because it may bring the link up immediately in a
7993 * workqueue thread, which will race with initialisation.
7994 */
7995 phylink_resume(priv->phylink);
7996 if (device_may_wakeup(priv->device) && !priv->plat->pmt)
7997 phylink_speed_up(priv->phylink);
7998
7999 rtnl_unlock();
8000
8001 netif_device_attach(ndev);
8002
8003 return 0;
8004 }
8005 EXPORT_SYMBOL_GPL(stmmac_resume);
8006
8007 #ifndef MODULE
stmmac_cmdline_opt(char * str)8008 static int __init stmmac_cmdline_opt(char *str)
8009 {
8010 char *opt;
8011
8012 if (!str || !*str)
8013 return 1;
8014 while ((opt = strsep(&str, ",")) != NULL) {
8015 if (!strncmp(opt, "debug:", 6)) {
8016 if (kstrtoint(opt + 6, 0, &debug))
8017 goto err;
8018 } else if (!strncmp(opt, "phyaddr:", 8)) {
8019 if (kstrtoint(opt + 8, 0, &phyaddr))
8020 goto err;
8021 } else if (!strncmp(opt, "tc:", 3)) {
8022 if (kstrtoint(opt + 3, 0, &tc))
8023 goto err;
8024 } else if (!strncmp(opt, "watchdog:", 9)) {
8025 if (kstrtoint(opt + 9, 0, &watchdog))
8026 goto err;
8027 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8028 if (kstrtoint(opt + 10, 0, &flow_ctrl))
8029 goto err;
8030 } else if (!strncmp(opt, "pause:", 6)) {
8031 if (kstrtoint(opt + 6, 0, &pause))
8032 goto err;
8033 } else if (!strncmp(opt, "eee_timer:", 10)) {
8034 if (kstrtoint(opt + 10, 0, &eee_timer))
8035 goto err;
8036 } else if (!strncmp(opt, "chain_mode:", 11)) {
8037 if (kstrtoint(opt + 11, 0, &chain_mode))
8038 goto err;
8039 }
8040 }
8041 return 1;
8042
8043 err:
8044 pr_err("%s: ERROR broken module parameter conversion", __func__);
8045 return 1;
8046 }
8047
8048 __setup("stmmaceth=", stmmac_cmdline_opt);
8049 #endif /* MODULE */
8050
stmmac_init(void)8051 static int __init stmmac_init(void)
8052 {
8053 #ifdef CONFIG_DEBUG_FS
8054 /* Create debugfs main directory if it doesn't exist yet */
8055 if (!stmmac_fs_dir)
8056 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8057 register_netdevice_notifier(&stmmac_notifier);
8058 #endif
8059
8060 return 0;
8061 }
8062
stmmac_exit(void)8063 static void __exit stmmac_exit(void)
8064 {
8065 #ifdef CONFIG_DEBUG_FS
8066 unregister_netdevice_notifier(&stmmac_notifier);
8067 debugfs_remove_recursive(stmmac_fs_dir);
8068 #endif
8069 }
8070
8071 module_init(stmmac_init)
8072 module_exit(stmmac_exit)
8073
8074 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8075 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8076 MODULE_LICENSE("GPL");
8077