1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/page_pool/helpers.h>
44 #include <net/pkt_cls.h>
45 #include <net/xdp_sock_drv.h>
46 #include "stmmac_ptp.h"
47 #include "stmmac_fpe.h"
48 #include "stmmac.h"
49 #include "stmmac_xdp.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53 #include "dwxgmac2.h"
54 #include "hwif.h"
55
56 /* As long as the interface is active, we keep the timestamping counter enabled
57 * with fine resolution and binary rollover. This avoid non-monotonic behavior
58 * (clock jumps) when changing timestamping settings at runtime.
59 */
60 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
61 PTP_TCR_TSCTRLSSR)
62
63 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
64 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
65
66 /* Module parameters */
67 #define TX_TIMEO 5000
68 static int watchdog = TX_TIMEO;
69 module_param(watchdog, int, 0644);
70 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
71
72 static int debug = -1;
73 module_param(debug, int, 0644);
74 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75
76 static int phyaddr = -1;
77 module_param(phyaddr, int, 0444);
78 MODULE_PARM_DESC(phyaddr, "Physical device address");
79
80 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
81
82 /* Limit to make sure XDP TX and slow path can coexist */
83 #define STMMAC_XSK_TX_BUDGET_MAX 256
84 #define STMMAC_TX_XSK_AVAIL 16
85 #define STMMAC_RX_FILL_BATCH 16
86
87 #define STMMAC_XDP_PASS 0
88 #define STMMAC_XDP_CONSUMED BIT(0)
89 #define STMMAC_XDP_TX BIT(1)
90 #define STMMAC_XDP_REDIRECT BIT(2)
91
92 static int flow_ctrl = 0xdead;
93 module_param(flow_ctrl, int, 0644);
94 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
95
96 static int pause = PAUSE_TIME;
97 module_param(pause, int, 0644);
98 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
99
100 #define TC_DEFAULT 64
101 static int tc = TC_DEFAULT;
102 module_param(tc, int, 0644);
103 MODULE_PARM_DESC(tc, "DMA threshold control value");
104
105 /* This is unused */
106 #define DEFAULT_BUFSIZE 1536
107 static int buf_sz = DEFAULT_BUFSIZE;
108 module_param(buf_sz, int, 0644);
109 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
110
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, uint, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
123 */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 u32 rxmode, u32 chan);
141
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149
150 /**
151 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
152 * @bsp_priv: BSP private data structure (unused)
153 * @clk_tx_i: the transmit clock
154 * @interface: the selected interface mode
155 * @speed: the speed that the MAC will be operating at
156 *
157 * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
158 * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
159 * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
160 * the plat_data->set_clk_tx_rate method directly, call it via their own
161 * implementation, or implement their own method should they have more
162 * complex requirements. It is intended to only be used in this method.
163 *
164 * plat_data->clk_tx_i must be filled in.
165 */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)166 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
167 phy_interface_t interface, int speed)
168 {
169 long rate = rgmii_clock(speed);
170
171 /* Silently ignore unsupported speeds as rgmii_clock() only
172 * supports 10, 100 and 1000Mbps. We do not want to spit
173 * errors for 2500 and higher speeds here.
174 */
175 if (rate < 0)
176 return 0;
177
178 return clk_set_rate(clk_tx_i, rate);
179 }
180 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
181
182 /**
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
185 * errors.
186 */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 if (unlikely(watchdog < 0))
190 watchdog = TX_TIMEO;
191 if (unlikely((pause < 0) || (pause > 0xffff)))
192 pause = PAUSE_TIME;
193
194 if (flow_ctrl != 0xdead)
195 pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
196 }
197
__stmmac_disable_all_queues(struct stmmac_priv * priv)198 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
199 {
200 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
201 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
202 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
203 u32 queue;
204
205 for (queue = 0; queue < maxq; queue++) {
206 struct stmmac_channel *ch = &priv->channel[queue];
207
208 if (stmmac_xdp_is_enabled(priv) &&
209 test_bit(queue, priv->af_xdp_zc_qps)) {
210 napi_disable(&ch->rxtx_napi);
211 continue;
212 }
213
214 if (queue < rx_queues_cnt)
215 napi_disable(&ch->rx_napi);
216 if (queue < tx_queues_cnt)
217 napi_disable(&ch->tx_napi);
218 }
219 }
220
221 /**
222 * stmmac_disable_all_queues - Disable all queues
223 * @priv: driver private structure
224 */
stmmac_disable_all_queues(struct stmmac_priv * priv)225 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
226 {
227 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
228 struct stmmac_rx_queue *rx_q;
229 u32 queue;
230
231 /* synchronize_rcu() needed for pending XDP buffers to drain */
232 for (queue = 0; queue < rx_queues_cnt; queue++) {
233 rx_q = &priv->dma_conf.rx_queue[queue];
234 if (rx_q->xsk_pool) {
235 synchronize_rcu();
236 break;
237 }
238 }
239
240 __stmmac_disable_all_queues(priv);
241 }
242
243 /**
244 * stmmac_enable_all_queues - Enable all queues
245 * @priv: driver private structure
246 */
stmmac_enable_all_queues(struct stmmac_priv * priv)247 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
248 {
249 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
250 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
251 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
252 u32 queue;
253
254 for (queue = 0; queue < maxq; queue++) {
255 struct stmmac_channel *ch = &priv->channel[queue];
256
257 if (stmmac_xdp_is_enabled(priv) &&
258 test_bit(queue, priv->af_xdp_zc_qps)) {
259 napi_enable(&ch->rxtx_napi);
260 continue;
261 }
262
263 if (queue < rx_queues_cnt)
264 napi_enable(&ch->rx_napi);
265 if (queue < tx_queues_cnt)
266 napi_enable(&ch->tx_napi);
267 }
268 }
269
stmmac_service_event_schedule(struct stmmac_priv * priv)270 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
271 {
272 if (!test_bit(STMMAC_DOWN, &priv->state) &&
273 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
274 queue_work(priv->wq, &priv->service_task);
275 }
276
stmmac_global_err(struct stmmac_priv * priv)277 static void stmmac_global_err(struct stmmac_priv *priv)
278 {
279 netif_carrier_off(priv->dev);
280 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
281 stmmac_service_event_schedule(priv);
282 }
283
print_pkt(unsigned char * buf,int len)284 static void print_pkt(unsigned char *buf, int len)
285 {
286 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
287 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
288 }
289
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)290 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
291 {
292 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
293 u32 avail;
294
295 if (tx_q->dirty_tx > tx_q->cur_tx)
296 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
297 else
298 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
299
300 return avail;
301 }
302
303 /**
304 * stmmac_rx_dirty - Get RX queue dirty
305 * @priv: driver private structure
306 * @queue: RX queue index
307 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)308 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
309 {
310 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
311 u32 dirty;
312
313 if (rx_q->dirty_rx <= rx_q->cur_rx)
314 dirty = rx_q->cur_rx - rx_q->dirty_rx;
315 else
316 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
317
318 return dirty;
319 }
320
stmmac_eee_tx_busy(struct stmmac_priv * priv)321 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
322 {
323 u32 tx_cnt = priv->plat->tx_queues_to_use;
324 u32 queue;
325
326 /* check if all TX queues have the work finished */
327 for (queue = 0; queue < tx_cnt; queue++) {
328 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
329
330 if (tx_q->dirty_tx != tx_q->cur_tx)
331 return true; /* still unfinished work */
332 }
333
334 return false;
335 }
336
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)337 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
338 {
339 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
340 }
341
342 /**
343 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
344 * @priv: driver private structure
345 * Description: this function is to verify and enter in LPI mode in case of
346 * EEE.
347 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)348 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
349 {
350 if (stmmac_eee_tx_busy(priv)) {
351 stmmac_restart_sw_lpi_timer(priv);
352 return;
353 }
354
355 /* Check and enter in LPI mode */
356 if (!priv->tx_path_in_lpi_mode)
357 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
358 priv->tx_lpi_clk_stop, 0);
359 }
360
361 /**
362 * stmmac_stop_sw_lpi - stop transmitting LPI
363 * @priv: driver private structure
364 * Description: When using software-controlled LPI, stop transmitting LPI state.
365 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)366 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
367 {
368 timer_delete_sync(&priv->eee_ctrl_timer);
369 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
370 priv->tx_path_in_lpi_mode = false;
371 }
372
373 /**
374 * stmmac_eee_ctrl_timer - EEE TX SW timer.
375 * @t: timer_list struct containing private info
376 * Description:
377 * if there is no data transfer and if we are not in LPI state,
378 * then MAC Transmitter can be moved to LPI state.
379 */
stmmac_eee_ctrl_timer(struct timer_list * t)380 static void stmmac_eee_ctrl_timer(struct timer_list *t)
381 {
382 struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
383
384 stmmac_try_to_start_sw_lpi(priv);
385 }
386
387 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
388 * @priv: driver private structure
389 * @p : descriptor pointer
390 * @skb : the socket buffer
391 * Description :
392 * This function will read timestamp from the descriptor & pass it to stack.
393 * and also perform some sanity checks.
394 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)395 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
396 struct dma_desc *p, struct sk_buff *skb)
397 {
398 struct skb_shared_hwtstamps shhwtstamp;
399 bool found = false;
400 u64 ns = 0;
401
402 if (!priv->hwts_tx_en)
403 return;
404
405 /* exit if skb doesn't support hw tstamp */
406 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
407 return;
408
409 /* check tx tstamp status */
410 if (stmmac_get_tx_timestamp_status(priv, p)) {
411 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
412 found = true;
413 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
414 found = true;
415 }
416
417 if (found) {
418 ns -= priv->plat->cdc_error_adj;
419
420 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
421 shhwtstamp.hwtstamp = ns_to_ktime(ns);
422
423 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
424 /* pass tstamp to stack */
425 skb_tstamp_tx(skb, &shhwtstamp);
426 }
427 }
428
429 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
430 * @priv: driver private structure
431 * @p : descriptor pointer
432 * @np : next descriptor pointer
433 * @skb : the socket buffer
434 * Description :
435 * This function will read received packet's timestamp from the descriptor
436 * and pass it to stack. It also perform some sanity checks.
437 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)438 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
439 struct dma_desc *np, struct sk_buff *skb)
440 {
441 struct skb_shared_hwtstamps *shhwtstamp = NULL;
442 struct dma_desc *desc = p;
443 u64 ns = 0;
444
445 if (!priv->hwts_rx_en)
446 return;
447 /* For GMAC4, the valid timestamp is from CTX next desc. */
448 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
449 desc = np;
450
451 /* Check if timestamp is available */
452 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
453 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
454
455 ns -= priv->plat->cdc_error_adj;
456
457 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
458 shhwtstamp = skb_hwtstamps(skb);
459 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
460 shhwtstamp->hwtstamp = ns_to_ktime(ns);
461 } else {
462 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
463 }
464 }
465
466 /**
467 * stmmac_hwtstamp_set - control hardware timestamping.
468 * @dev: device pointer.
469 * @config: the timestamping configuration.
470 * @extack: netlink extended ack structure for error reporting.
471 * Description:
472 * This function configures the MAC to enable/disable both outgoing(TX)
473 * and incoming(RX) packets time stamping based on user input.
474 * Return Value:
475 * 0 on success and an appropriate -ve integer on failure.
476 */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)477 static int stmmac_hwtstamp_set(struct net_device *dev,
478 struct kernel_hwtstamp_config *config,
479 struct netlink_ext_ack *extack)
480 {
481 struct stmmac_priv *priv = netdev_priv(dev);
482 u32 ptp_v2 = 0;
483 u32 tstamp_all = 0;
484 u32 ptp_over_ipv4_udp = 0;
485 u32 ptp_over_ipv6_udp = 0;
486 u32 ptp_over_ethernet = 0;
487 u32 snap_type_sel = 0;
488 u32 ts_master_en = 0;
489 u32 ts_event_en = 0;
490
491 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
492 NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
493 priv->hwts_tx_en = 0;
494 priv->hwts_rx_en = 0;
495
496 return -EOPNOTSUPP;
497 }
498
499 if (!netif_running(dev)) {
500 NL_SET_ERR_MSG_MOD(extack,
501 "Cannot change timestamping configuration while down");
502 return -ENODEV;
503 }
504
505 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
506 __func__, config->flags, config->tx_type, config->rx_filter);
507
508 if (config->tx_type != HWTSTAMP_TX_OFF &&
509 config->tx_type != HWTSTAMP_TX_ON)
510 return -ERANGE;
511
512 if (priv->adv_ts) {
513 switch (config->rx_filter) {
514 case HWTSTAMP_FILTER_NONE:
515 /* time stamp no incoming packet at all */
516 config->rx_filter = HWTSTAMP_FILTER_NONE;
517 break;
518
519 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
520 /* PTP v1, UDP, any kind of event packet */
521 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
522 /* 'xmac' hardware can support Sync, Pdelay_Req and
523 * Pdelay_resp by setting bit14 and bits17/16 to 01
524 * This leaves Delay_Req timestamps out.
525 * Enable all events *and* general purpose message
526 * timestamping
527 */
528 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
529 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
530 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
531 break;
532
533 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
534 /* PTP v1, UDP, Sync packet */
535 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
536 /* take time stamp for SYNC messages only */
537 ts_event_en = PTP_TCR_TSEVNTENA;
538
539 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
540 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
541 break;
542
543 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
544 /* PTP v1, UDP, Delay_req packet */
545 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
546 /* take time stamp for Delay_Req messages only */
547 ts_master_en = PTP_TCR_TSMSTRENA;
548 ts_event_en = PTP_TCR_TSEVNTENA;
549
550 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
551 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
552 break;
553
554 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
555 /* PTP v2, UDP, any kind of event packet */
556 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
557 ptp_v2 = PTP_TCR_TSVER2ENA;
558 /* take time stamp for all event messages */
559 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
560
561 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
562 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
563 break;
564
565 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
566 /* PTP v2, UDP, Sync packet */
567 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
568 ptp_v2 = PTP_TCR_TSVER2ENA;
569 /* take time stamp for SYNC messages only */
570 ts_event_en = PTP_TCR_TSEVNTENA;
571
572 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
573 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
574 break;
575
576 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
577 /* PTP v2, UDP, Delay_req packet */
578 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
579 ptp_v2 = PTP_TCR_TSVER2ENA;
580 /* take time stamp for Delay_Req messages only */
581 ts_master_en = PTP_TCR_TSMSTRENA;
582 ts_event_en = PTP_TCR_TSEVNTENA;
583
584 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
585 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
586 break;
587
588 case HWTSTAMP_FILTER_PTP_V2_EVENT:
589 /* PTP v2/802.AS1 any layer, any kind of event packet */
590 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
591 ptp_v2 = PTP_TCR_TSVER2ENA;
592 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
593 if (priv->synopsys_id < DWMAC_CORE_4_10)
594 ts_event_en = PTP_TCR_TSEVNTENA;
595 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
596 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
597 ptp_over_ethernet = PTP_TCR_TSIPENA;
598 break;
599
600 case HWTSTAMP_FILTER_PTP_V2_SYNC:
601 /* PTP v2/802.AS1, any layer, Sync packet */
602 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
603 ptp_v2 = PTP_TCR_TSVER2ENA;
604 /* take time stamp for SYNC messages only */
605 ts_event_en = PTP_TCR_TSEVNTENA;
606
607 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
608 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
609 ptp_over_ethernet = PTP_TCR_TSIPENA;
610 break;
611
612 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
613 /* PTP v2/802.AS1, any layer, Delay_req packet */
614 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
615 ptp_v2 = PTP_TCR_TSVER2ENA;
616 /* take time stamp for Delay_Req messages only */
617 ts_master_en = PTP_TCR_TSMSTRENA;
618 ts_event_en = PTP_TCR_TSEVNTENA;
619
620 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
621 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
622 ptp_over_ethernet = PTP_TCR_TSIPENA;
623 break;
624
625 case HWTSTAMP_FILTER_NTP_ALL:
626 case HWTSTAMP_FILTER_ALL:
627 /* time stamp any incoming packet */
628 config->rx_filter = HWTSTAMP_FILTER_ALL;
629 tstamp_all = PTP_TCR_TSENALL;
630 break;
631
632 default:
633 return -ERANGE;
634 }
635 } else {
636 switch (config->rx_filter) {
637 case HWTSTAMP_FILTER_NONE:
638 config->rx_filter = HWTSTAMP_FILTER_NONE;
639 break;
640 default:
641 /* PTP v1, UDP, any kind of event packet */
642 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
643 break;
644 }
645 }
646 priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
647 priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
648
649 priv->systime_flags = STMMAC_HWTS_ACTIVE;
650
651 if (priv->hwts_tx_en || priv->hwts_rx_en) {
652 priv->systime_flags |= tstamp_all | ptp_v2 |
653 ptp_over_ethernet | ptp_over_ipv6_udp |
654 ptp_over_ipv4_udp | ts_event_en |
655 ts_master_en | snap_type_sel;
656 }
657
658 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
659
660 priv->tstamp_config = *config;
661
662 return 0;
663 }
664
665 /**
666 * stmmac_hwtstamp_get - read hardware timestamping.
667 * @dev: device pointer.
668 * @config: the timestamping configuration.
669 * Description:
670 * This function obtain the current hardware timestamping settings
671 * as requested.
672 */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)673 static int stmmac_hwtstamp_get(struct net_device *dev,
674 struct kernel_hwtstamp_config *config)
675 {
676 struct stmmac_priv *priv = netdev_priv(dev);
677
678 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
679 return -EOPNOTSUPP;
680
681 *config = priv->tstamp_config;
682
683 return 0;
684 }
685
686 /**
687 * stmmac_init_tstamp_counter - init hardware timestamping counter
688 * @priv: driver private structure
689 * @systime_flags: timestamping flags
690 * Description:
691 * Initialize hardware counter for packet timestamping.
692 * This is valid as long as the interface is open and not suspended.
693 * Will be rerun after resuming from suspend, case in which the timestamping
694 * flags updated by stmmac_hwtstamp_set() also need to be restored.
695 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)696 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
697 u32 systime_flags)
698 {
699 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
700 struct timespec64 now;
701 u32 sec_inc = 0;
702 u64 temp = 0;
703
704 if (!priv->plat->clk_ptp_rate) {
705 netdev_err(priv->dev, "Invalid PTP clock rate");
706 return -EINVAL;
707 }
708
709 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
710 priv->systime_flags = systime_flags;
711
712 /* program Sub Second Increment reg */
713 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
714 priv->plat->clk_ptp_rate,
715 xmac, &sec_inc);
716 temp = div_u64(1000000000ULL, sec_inc);
717
718 /* Store sub second increment for later use */
719 priv->sub_second_inc = sec_inc;
720
721 /* calculate default added value:
722 * formula is :
723 * addend = (2^32)/freq_div_ratio;
724 * where, freq_div_ratio = 1e9ns/sec_inc
725 */
726 temp = (u64)(temp << 32);
727 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
728 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
729
730 /* initialize system time */
731 ktime_get_real_ts64(&now);
732
733 /* lower 32 bits of tv_sec are safe until y2106 */
734 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
735
736 return 0;
737 }
738
739 /**
740 * stmmac_init_timestamping - initialise timestamping
741 * @priv: driver private structure
742 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
743 * This is done by looking at the HW cap. register.
744 * This function also registers the ptp driver.
745 */
stmmac_init_timestamping(struct stmmac_priv * priv)746 static int stmmac_init_timestamping(struct stmmac_priv *priv)
747 {
748 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
749 int ret;
750
751 if (priv->plat->ptp_clk_freq_config)
752 priv->plat->ptp_clk_freq_config(priv);
753
754 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
755 netdev_info(priv->dev, "PTP not supported by HW\n");
756 return -EOPNOTSUPP;
757 }
758
759 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
760 if (ret) {
761 netdev_warn(priv->dev, "PTP init failed\n");
762 return ret;
763 }
764
765 priv->adv_ts = 0;
766 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
767 if (xmac && priv->dma_cap.atime_stamp)
768 priv->adv_ts = 1;
769 /* Dwmac 3.x core with extend_desc can support adv_ts */
770 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
771 priv->adv_ts = 1;
772
773 if (priv->dma_cap.time_stamp)
774 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
775
776 if (priv->adv_ts)
777 netdev_info(priv->dev,
778 "IEEE 1588-2008 Advanced Timestamp supported\n");
779
780 priv->hwts_tx_en = 0;
781 priv->hwts_rx_en = 0;
782
783 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
784 stmmac_hwtstamp_correct_latency(priv, priv);
785
786 return 0;
787 }
788
stmmac_setup_ptp(struct stmmac_priv * priv)789 static void stmmac_setup_ptp(struct stmmac_priv *priv)
790 {
791 int ret;
792
793 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
794 if (ret < 0)
795 netdev_warn(priv->dev,
796 "failed to enable PTP reference clock: %pe\n",
797 ERR_PTR(ret));
798
799 if (stmmac_init_timestamping(priv) == 0)
800 stmmac_ptp_register(priv);
801 }
802
stmmac_release_ptp(struct stmmac_priv * priv)803 static void stmmac_release_ptp(struct stmmac_priv *priv)
804 {
805 stmmac_ptp_unregister(priv);
806 clk_disable_unprepare(priv->plat->clk_ptp_ref);
807 }
808
809 /**
810 * stmmac_mac_flow_ctrl - Configure flow control in all queues
811 * @priv: driver private structure
812 * @duplex: duplex passed to the next function
813 * @flow_ctrl: desired flow control modes
814 * Description: It is used for configuring the flow control in all queues
815 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)816 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
817 unsigned int flow_ctrl)
818 {
819 u32 tx_cnt = priv->plat->tx_queues_to_use;
820
821 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
822 tx_cnt);
823 }
824
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)825 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
826 phy_interface_t interface)
827 {
828 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
829
830 /* Refresh the MAC-specific capabilities */
831 stmmac_mac_update_caps(priv);
832
833 config->mac_capabilities = priv->hw->link.caps;
834
835 if (priv->plat->max_speed)
836 phylink_limit_mac_speed(config, priv->plat->max_speed);
837
838 return config->mac_capabilities;
839 }
840
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)841 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
842 phy_interface_t interface)
843 {
844 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
845 struct phylink_pcs *pcs;
846
847 if (priv->plat->select_pcs) {
848 pcs = priv->plat->select_pcs(priv, interface);
849 if (!IS_ERR(pcs))
850 return pcs;
851 }
852
853 return NULL;
854 }
855
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)856 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
857 const struct phylink_link_state *state)
858 {
859 /* Nothing to do, xpcs_config() handles everything */
860 }
861
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)862 static void stmmac_mac_link_down(struct phylink_config *config,
863 unsigned int mode, phy_interface_t interface)
864 {
865 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
866
867 stmmac_mac_set(priv, priv->ioaddr, false);
868 if (priv->dma_cap.eee)
869 stmmac_set_eee_pls(priv, priv->hw, false);
870
871 if (stmmac_fpe_supported(priv))
872 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
873 }
874
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)875 static void stmmac_mac_link_up(struct phylink_config *config,
876 struct phy_device *phy,
877 unsigned int mode, phy_interface_t interface,
878 int speed, int duplex,
879 bool tx_pause, bool rx_pause)
880 {
881 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
882 unsigned int flow_ctrl;
883 u32 old_ctrl, ctrl;
884 int ret;
885
886 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
887 priv->plat->serdes_powerup)
888 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
889
890 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
891 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
892
893 if (interface == PHY_INTERFACE_MODE_USXGMII) {
894 switch (speed) {
895 case SPEED_10000:
896 ctrl |= priv->hw->link.xgmii.speed10000;
897 break;
898 case SPEED_5000:
899 ctrl |= priv->hw->link.xgmii.speed5000;
900 break;
901 case SPEED_2500:
902 ctrl |= priv->hw->link.xgmii.speed2500;
903 break;
904 default:
905 return;
906 }
907 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
908 switch (speed) {
909 case SPEED_100000:
910 ctrl |= priv->hw->link.xlgmii.speed100000;
911 break;
912 case SPEED_50000:
913 ctrl |= priv->hw->link.xlgmii.speed50000;
914 break;
915 case SPEED_40000:
916 ctrl |= priv->hw->link.xlgmii.speed40000;
917 break;
918 case SPEED_25000:
919 ctrl |= priv->hw->link.xlgmii.speed25000;
920 break;
921 case SPEED_10000:
922 ctrl |= priv->hw->link.xgmii.speed10000;
923 break;
924 case SPEED_2500:
925 ctrl |= priv->hw->link.speed2500;
926 break;
927 case SPEED_1000:
928 ctrl |= priv->hw->link.speed1000;
929 break;
930 default:
931 return;
932 }
933 } else {
934 switch (speed) {
935 case SPEED_2500:
936 ctrl |= priv->hw->link.speed2500;
937 break;
938 case SPEED_1000:
939 ctrl |= priv->hw->link.speed1000;
940 break;
941 case SPEED_100:
942 ctrl |= priv->hw->link.speed100;
943 break;
944 case SPEED_10:
945 ctrl |= priv->hw->link.speed10;
946 break;
947 default:
948 return;
949 }
950 }
951
952 if (priv->plat->fix_mac_speed)
953 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
954
955 if (!duplex)
956 ctrl &= ~priv->hw->link.duplex;
957 else
958 ctrl |= priv->hw->link.duplex;
959
960 /* Flow Control operation */
961 if (rx_pause && tx_pause)
962 flow_ctrl = FLOW_AUTO;
963 else if (rx_pause && !tx_pause)
964 flow_ctrl = FLOW_RX;
965 else if (!rx_pause && tx_pause)
966 flow_ctrl = FLOW_TX;
967 else
968 flow_ctrl = FLOW_OFF;
969
970 stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
971
972 if (ctrl != old_ctrl)
973 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
974
975 if (priv->plat->set_clk_tx_rate) {
976 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
977 priv->plat->clk_tx_i,
978 interface, speed);
979 if (ret < 0)
980 netdev_err(priv->dev,
981 "failed to configure %s transmit clock for %dMbps: %pe\n",
982 phy_modes(interface), speed, ERR_PTR(ret));
983 }
984
985 stmmac_mac_set(priv, priv->ioaddr, true);
986 if (priv->dma_cap.eee)
987 stmmac_set_eee_pls(priv, priv->hw, true);
988
989 if (stmmac_fpe_supported(priv))
990 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
991
992 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
993 stmmac_hwtstamp_correct_latency(priv, priv);
994 }
995
stmmac_mac_disable_tx_lpi(struct phylink_config * config)996 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
997 {
998 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
999
1000 priv->eee_active = false;
1001
1002 mutex_lock(&priv->lock);
1003
1004 priv->eee_enabled = false;
1005
1006 netdev_dbg(priv->dev, "disable EEE\n");
1007 priv->eee_sw_timer_en = false;
1008 timer_delete_sync(&priv->eee_ctrl_timer);
1009 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1010 priv->tx_path_in_lpi_mode = false;
1011
1012 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1013 mutex_unlock(&priv->lock);
1014 }
1015
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1016 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1017 bool tx_clk_stop)
1018 {
1019 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1020 int ret;
1021
1022 priv->tx_lpi_timer = timer;
1023 priv->eee_active = true;
1024
1025 mutex_lock(&priv->lock);
1026
1027 priv->eee_enabled = true;
1028
1029 /* Update the transmit clock stop according to PHY capability if
1030 * the platform allows
1031 */
1032 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1033 priv->tx_lpi_clk_stop = tx_clk_stop;
1034
1035 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1036 STMMAC_DEFAULT_TWT_LS);
1037
1038 /* Try to cnfigure the hardware timer. */
1039 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1040 priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1041
1042 if (ret) {
1043 /* Hardware timer mode not supported, or value out of range.
1044 * Fall back to using software LPI mode
1045 */
1046 priv->eee_sw_timer_en = true;
1047 stmmac_restart_sw_lpi_timer(priv);
1048 }
1049
1050 mutex_unlock(&priv->lock);
1051 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1052
1053 return 0;
1054 }
1055
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1056 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1057 phy_interface_t interface)
1058 {
1059 struct net_device *ndev = to_net_dev(config->dev);
1060 struct stmmac_priv *priv = netdev_priv(ndev);
1061
1062 if (priv->plat->mac_finish)
1063 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
1064
1065 return 0;
1066 }
1067
1068 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1069 .mac_get_caps = stmmac_mac_get_caps,
1070 .mac_select_pcs = stmmac_mac_select_pcs,
1071 .mac_config = stmmac_mac_config,
1072 .mac_link_down = stmmac_mac_link_down,
1073 .mac_link_up = stmmac_mac_link_up,
1074 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1075 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1076 .mac_finish = stmmac_mac_finish,
1077 };
1078
1079 /**
1080 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1081 * @priv: driver private structure
1082 * Description: this is to verify if the HW supports the PCS.
1083 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1084 * configured for the TBI, RTBI, or SGMII PHY interface.
1085 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1086 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1087 {
1088 int interface = priv->plat->phy_interface;
1089
1090 if (priv->dma_cap.pcs) {
1091 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1092 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1093 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1094 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1095 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1096 priv->hw->pcs = STMMAC_PCS_RGMII;
1097 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1098 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1099 priv->hw->pcs = STMMAC_PCS_SGMII;
1100 }
1101 }
1102 }
1103
1104 /**
1105 * stmmac_init_phy - PHY initialization
1106 * @dev: net device structure
1107 * Description: it initializes the driver's PHY state, and attaches the PHY
1108 * to the mac driver.
1109 * Return value:
1110 * 0 on success
1111 */
stmmac_init_phy(struct net_device * dev)1112 static int stmmac_init_phy(struct net_device *dev)
1113 {
1114 struct stmmac_priv *priv = netdev_priv(dev);
1115 int mode = priv->plat->phy_interface;
1116 struct fwnode_handle *phy_fwnode;
1117 struct fwnode_handle *fwnode;
1118 struct ethtool_keee eee;
1119 int ret;
1120
1121 if (!phylink_expects_phy(priv->phylink))
1122 return 0;
1123
1124 if (priv->hw->xpcs &&
1125 xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1126 return 0;
1127
1128 fwnode = priv->plat->port_node;
1129 if (!fwnode)
1130 fwnode = dev_fwnode(priv->device);
1131
1132 if (fwnode)
1133 phy_fwnode = fwnode_get_phy_node(fwnode);
1134 else
1135 phy_fwnode = NULL;
1136
1137 /* Some DT bindings do not set-up the PHY handle. Let's try to
1138 * manually parse it
1139 */
1140 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1141 int addr = priv->plat->phy_addr;
1142 struct phy_device *phydev;
1143
1144 if (addr < 0) {
1145 netdev_err(priv->dev, "no phy found\n");
1146 return -ENODEV;
1147 }
1148
1149 phydev = mdiobus_get_phy(priv->mii, addr);
1150 if (!phydev) {
1151 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1152 return -ENODEV;
1153 }
1154
1155 ret = phylink_connect_phy(priv->phylink, phydev);
1156 } else {
1157 fwnode_handle_put(phy_fwnode);
1158 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1159 }
1160
1161 if (ret) {
1162 netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1163 ERR_PTR(ret));
1164 return ret;
1165 }
1166
1167 /* Configure phylib's copy of the LPI timer. Normally,
1168 * phylink_config.lpi_timer_default would do this, but there is a
1169 * chance that userspace could change the eee_timer setting via sysfs
1170 * before the first open. Thus, preserve existing behaviour.
1171 */
1172 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1173 eee.tx_lpi_timer = priv->tx_lpi_timer;
1174 phylink_ethtool_set_eee(priv->phylink, &eee);
1175 }
1176
1177 if (!priv->plat->pmt) {
1178 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1179
1180 phylink_ethtool_get_wol(priv->phylink, &wol);
1181 device_set_wakeup_capable(priv->device, !!wol.supported);
1182 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1183 }
1184
1185 return 0;
1186 }
1187
stmmac_phy_setup(struct stmmac_priv * priv)1188 static int stmmac_phy_setup(struct stmmac_priv *priv)
1189 {
1190 struct stmmac_mdio_bus_data *mdio_bus_data;
1191 struct phylink_config *config;
1192 struct fwnode_handle *fwnode;
1193 struct phylink_pcs *pcs;
1194 struct phylink *phylink;
1195
1196 config = &priv->phylink_config;
1197
1198 config->dev = &priv->dev->dev;
1199 config->type = PHYLINK_NETDEV;
1200 config->mac_managed_pm = true;
1201
1202 /* Stmmac always requires an RX clock for hardware initialization */
1203 config->mac_requires_rxc = true;
1204
1205 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI))
1206 config->eee_rx_clk_stop_enable = true;
1207
1208 /* Set the default transmit clock stop bit based on the platform glue */
1209 priv->tx_lpi_clk_stop = priv->plat->flags &
1210 STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1211
1212 mdio_bus_data = priv->plat->mdio_bus_data;
1213 if (mdio_bus_data)
1214 config->default_an_inband = mdio_bus_data->default_an_inband;
1215
1216 /* Get the PHY interface modes (at the PHY end of the link) that
1217 * are supported by the platform.
1218 */
1219 if (priv->plat->get_interfaces)
1220 priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1221 config->supported_interfaces);
1222
1223 /* Set the platform/firmware specified interface mode if the
1224 * supported interfaces have not already been provided using
1225 * phy_interface as a last resort.
1226 */
1227 if (phy_interface_empty(config->supported_interfaces))
1228 __set_bit(priv->plat->phy_interface,
1229 config->supported_interfaces);
1230
1231 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1232 if (priv->hw->xpcs)
1233 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1234 else
1235 pcs = priv->hw->phylink_pcs;
1236
1237 if (pcs)
1238 phy_interface_or(config->supported_interfaces,
1239 config->supported_interfaces,
1240 pcs->supported_interfaces);
1241
1242 if (priv->dma_cap.eee) {
1243 /* Assume all supported interfaces also support LPI */
1244 memcpy(config->lpi_interfaces, config->supported_interfaces,
1245 sizeof(config->lpi_interfaces));
1246
1247 /* All full duplex speeds above 100Mbps are supported */
1248 config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1249 config->lpi_timer_default = eee_timer * 1000;
1250 config->eee_enabled_default = true;
1251 }
1252
1253 fwnode = priv->plat->port_node;
1254 if (!fwnode)
1255 fwnode = dev_fwnode(priv->device);
1256
1257 phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1258 &stmmac_phylink_mac_ops);
1259 if (IS_ERR(phylink))
1260 return PTR_ERR(phylink);
1261
1262 priv->phylink = phylink;
1263 return 0;
1264 }
1265
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1266 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1267 struct stmmac_dma_conf *dma_conf)
1268 {
1269 u32 rx_cnt = priv->plat->rx_queues_to_use;
1270 unsigned int desc_size;
1271 void *head_rx;
1272 u32 queue;
1273
1274 /* Display RX rings */
1275 for (queue = 0; queue < rx_cnt; queue++) {
1276 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1277
1278 pr_info("\tRX Queue %u rings\n", queue);
1279
1280 if (priv->extend_desc) {
1281 head_rx = (void *)rx_q->dma_erx;
1282 desc_size = sizeof(struct dma_extended_desc);
1283 } else {
1284 head_rx = (void *)rx_q->dma_rx;
1285 desc_size = sizeof(struct dma_desc);
1286 }
1287
1288 /* Display RX ring */
1289 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1290 rx_q->dma_rx_phy, desc_size);
1291 }
1292 }
1293
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1294 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1295 struct stmmac_dma_conf *dma_conf)
1296 {
1297 u32 tx_cnt = priv->plat->tx_queues_to_use;
1298 unsigned int desc_size;
1299 void *head_tx;
1300 u32 queue;
1301
1302 /* Display TX rings */
1303 for (queue = 0; queue < tx_cnt; queue++) {
1304 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1305
1306 pr_info("\tTX Queue %d rings\n", queue);
1307
1308 if (priv->extend_desc) {
1309 head_tx = (void *)tx_q->dma_etx;
1310 desc_size = sizeof(struct dma_extended_desc);
1311 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1312 head_tx = (void *)tx_q->dma_entx;
1313 desc_size = sizeof(struct dma_edesc);
1314 } else {
1315 head_tx = (void *)tx_q->dma_tx;
1316 desc_size = sizeof(struct dma_desc);
1317 }
1318
1319 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1320 tx_q->dma_tx_phy, desc_size);
1321 }
1322 }
1323
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1324 static void stmmac_display_rings(struct stmmac_priv *priv,
1325 struct stmmac_dma_conf *dma_conf)
1326 {
1327 /* Display RX ring */
1328 stmmac_display_rx_rings(priv, dma_conf);
1329
1330 /* Display TX ring */
1331 stmmac_display_tx_rings(priv, dma_conf);
1332 }
1333
stmmac_rx_offset(struct stmmac_priv * priv)1334 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1335 {
1336 if (stmmac_xdp_is_enabled(priv))
1337 return XDP_PACKET_HEADROOM;
1338
1339 return NET_SKB_PAD;
1340 }
1341
stmmac_set_bfsize(int mtu,int bufsize)1342 static int stmmac_set_bfsize(int mtu, int bufsize)
1343 {
1344 int ret = bufsize;
1345
1346 if (mtu >= BUF_SIZE_8KiB)
1347 ret = BUF_SIZE_16KiB;
1348 else if (mtu >= BUF_SIZE_4KiB)
1349 ret = BUF_SIZE_8KiB;
1350 else if (mtu >= BUF_SIZE_2KiB)
1351 ret = BUF_SIZE_4KiB;
1352 else if (mtu > DEFAULT_BUFSIZE)
1353 ret = BUF_SIZE_2KiB;
1354 else
1355 ret = DEFAULT_BUFSIZE;
1356
1357 return ret;
1358 }
1359
1360 /**
1361 * stmmac_clear_rx_descriptors - clear RX descriptors
1362 * @priv: driver private structure
1363 * @dma_conf: structure to take the dma data
1364 * @queue: RX queue index
1365 * Description: this function is called to clear the RX descriptors
1366 * in case of both basic and extended descriptors are used.
1367 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1368 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1369 struct stmmac_dma_conf *dma_conf,
1370 u32 queue)
1371 {
1372 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1373 int i;
1374
1375 /* Clear the RX descriptors */
1376 for (i = 0; i < dma_conf->dma_rx_size; i++)
1377 if (priv->extend_desc)
1378 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1379 priv->use_riwt, priv->mode,
1380 (i == dma_conf->dma_rx_size - 1),
1381 dma_conf->dma_buf_sz);
1382 else
1383 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1384 priv->use_riwt, priv->mode,
1385 (i == dma_conf->dma_rx_size - 1),
1386 dma_conf->dma_buf_sz);
1387 }
1388
1389 /**
1390 * stmmac_clear_tx_descriptors - clear tx descriptors
1391 * @priv: driver private structure
1392 * @dma_conf: structure to take the dma data
1393 * @queue: TX queue index.
1394 * Description: this function is called to clear the TX descriptors
1395 * in case of both basic and extended descriptors are used.
1396 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1397 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1398 struct stmmac_dma_conf *dma_conf,
1399 u32 queue)
1400 {
1401 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1402 int i;
1403
1404 /* Clear the TX descriptors */
1405 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1406 int last = (i == (dma_conf->dma_tx_size - 1));
1407 struct dma_desc *p;
1408
1409 if (priv->extend_desc)
1410 p = &tx_q->dma_etx[i].basic;
1411 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1412 p = &tx_q->dma_entx[i].basic;
1413 else
1414 p = &tx_q->dma_tx[i];
1415
1416 stmmac_init_tx_desc(priv, p, priv->mode, last);
1417 }
1418 }
1419
1420 /**
1421 * stmmac_clear_descriptors - clear descriptors
1422 * @priv: driver private structure
1423 * @dma_conf: structure to take the dma data
1424 * Description: this function is called to clear the TX and RX descriptors
1425 * in case of both basic and extended descriptors are used.
1426 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1427 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1428 struct stmmac_dma_conf *dma_conf)
1429 {
1430 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1431 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1432 u32 queue;
1433
1434 /* Clear the RX descriptors */
1435 for (queue = 0; queue < rx_queue_cnt; queue++)
1436 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1437
1438 /* Clear the TX descriptors */
1439 for (queue = 0; queue < tx_queue_cnt; queue++)
1440 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1441 }
1442
1443 /**
1444 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1445 * @priv: driver private structure
1446 * @dma_conf: structure to take the dma data
1447 * @p: descriptor pointer
1448 * @i: descriptor index
1449 * @flags: gfp flag
1450 * @queue: RX queue index
1451 * Description: this function is called to allocate a receive buffer, perform
1452 * the DMA mapping and init the descriptor.
1453 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1454 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1455 struct stmmac_dma_conf *dma_conf,
1456 struct dma_desc *p,
1457 int i, gfp_t flags, u32 queue)
1458 {
1459 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1460 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1461 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1462
1463 if (priv->dma_cap.host_dma_width <= 32)
1464 gfp |= GFP_DMA32;
1465
1466 if (!buf->page) {
1467 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1468 if (!buf->page)
1469 return -ENOMEM;
1470 buf->page_offset = stmmac_rx_offset(priv);
1471 }
1472
1473 if (priv->sph && !buf->sec_page) {
1474 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1475 if (!buf->sec_page)
1476 return -ENOMEM;
1477
1478 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1479 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1480 } else {
1481 buf->sec_page = NULL;
1482 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1483 }
1484
1485 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1486
1487 stmmac_set_desc_addr(priv, p, buf->addr);
1488 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1489 stmmac_init_desc3(priv, p);
1490
1491 return 0;
1492 }
1493
1494 /**
1495 * stmmac_free_rx_buffer - free RX dma buffers
1496 * @priv: private structure
1497 * @rx_q: RX queue
1498 * @i: buffer index.
1499 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1500 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1501 struct stmmac_rx_queue *rx_q,
1502 int i)
1503 {
1504 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1505
1506 if (buf->page)
1507 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1508 buf->page = NULL;
1509
1510 if (buf->sec_page)
1511 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1512 buf->sec_page = NULL;
1513 }
1514
1515 /**
1516 * stmmac_free_tx_buffer - free RX dma buffers
1517 * @priv: private structure
1518 * @dma_conf: structure to take the dma data
1519 * @queue: RX queue index
1520 * @i: buffer index.
1521 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1522 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1523 struct stmmac_dma_conf *dma_conf,
1524 u32 queue, int i)
1525 {
1526 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1527
1528 if (tx_q->tx_skbuff_dma[i].buf &&
1529 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1530 if (tx_q->tx_skbuff_dma[i].map_as_page)
1531 dma_unmap_page(priv->device,
1532 tx_q->tx_skbuff_dma[i].buf,
1533 tx_q->tx_skbuff_dma[i].len,
1534 DMA_TO_DEVICE);
1535 else
1536 dma_unmap_single(priv->device,
1537 tx_q->tx_skbuff_dma[i].buf,
1538 tx_q->tx_skbuff_dma[i].len,
1539 DMA_TO_DEVICE);
1540 }
1541
1542 if (tx_q->xdpf[i] &&
1543 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1545 xdp_return_frame(tx_q->xdpf[i]);
1546 tx_q->xdpf[i] = NULL;
1547 }
1548
1549 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1550 tx_q->xsk_frames_done++;
1551
1552 if (tx_q->tx_skbuff[i] &&
1553 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1554 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1555 tx_q->tx_skbuff[i] = NULL;
1556 }
1557
1558 tx_q->tx_skbuff_dma[i].buf = 0;
1559 tx_q->tx_skbuff_dma[i].map_as_page = false;
1560 }
1561
1562 /**
1563 * dma_free_rx_skbufs - free RX dma buffers
1564 * @priv: private structure
1565 * @dma_conf: structure to take the dma data
1566 * @queue: RX queue index
1567 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1568 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1569 struct stmmac_dma_conf *dma_conf,
1570 u32 queue)
1571 {
1572 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1573 int i;
1574
1575 for (i = 0; i < dma_conf->dma_rx_size; i++)
1576 stmmac_free_rx_buffer(priv, rx_q, i);
1577 }
1578
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1579 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1580 struct stmmac_dma_conf *dma_conf,
1581 u32 queue, gfp_t flags)
1582 {
1583 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1584 int i;
1585
1586 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1587 struct dma_desc *p;
1588 int ret;
1589
1590 if (priv->extend_desc)
1591 p = &((rx_q->dma_erx + i)->basic);
1592 else
1593 p = rx_q->dma_rx + i;
1594
1595 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1596 queue);
1597 if (ret)
1598 return ret;
1599
1600 rx_q->buf_alloc_num++;
1601 }
1602
1603 return 0;
1604 }
1605
1606 /**
1607 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1608 * @priv: private structure
1609 * @dma_conf: structure to take the dma data
1610 * @queue: RX queue index
1611 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1612 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1613 struct stmmac_dma_conf *dma_conf,
1614 u32 queue)
1615 {
1616 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1617 int i;
1618
1619 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1620 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1621
1622 if (!buf->xdp)
1623 continue;
1624
1625 xsk_buff_free(buf->xdp);
1626 buf->xdp = NULL;
1627 }
1628 }
1629
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1630 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1631 struct stmmac_dma_conf *dma_conf,
1632 u32 queue)
1633 {
1634 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1635 int i;
1636
1637 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1638 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1639 * use this macro to make sure no size violations.
1640 */
1641 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1642
1643 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1644 struct stmmac_rx_buffer *buf;
1645 dma_addr_t dma_addr;
1646 struct dma_desc *p;
1647
1648 if (priv->extend_desc)
1649 p = (struct dma_desc *)(rx_q->dma_erx + i);
1650 else
1651 p = rx_q->dma_rx + i;
1652
1653 buf = &rx_q->buf_pool[i];
1654
1655 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1656 if (!buf->xdp)
1657 return -ENOMEM;
1658
1659 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1660 stmmac_set_desc_addr(priv, p, dma_addr);
1661 rx_q->buf_alloc_num++;
1662 }
1663
1664 return 0;
1665 }
1666
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1667 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1668 {
1669 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1670 return NULL;
1671
1672 return xsk_get_pool_from_qid(priv->dev, queue);
1673 }
1674
1675 /**
1676 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1677 * @priv: driver private structure
1678 * @dma_conf: structure to take the dma data
1679 * @queue: RX queue index
1680 * @flags: gfp flag.
1681 * Description: this function initializes the DMA RX descriptors
1682 * and allocates the socket buffers. It supports the chained and ring
1683 * modes.
1684 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1685 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1686 struct stmmac_dma_conf *dma_conf,
1687 u32 queue, gfp_t flags)
1688 {
1689 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1690 int ret;
1691
1692 netif_dbg(priv, probe, priv->dev,
1693 "(%s) dma_rx_phy=0x%08x\n", __func__,
1694 (u32)rx_q->dma_rx_phy);
1695
1696 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1697
1698 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1699
1700 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1701
1702 if (rx_q->xsk_pool) {
1703 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1704 MEM_TYPE_XSK_BUFF_POOL,
1705 NULL));
1706 netdev_info(priv->dev,
1707 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1708 rx_q->queue_index);
1709 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1710 } else {
1711 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1712 MEM_TYPE_PAGE_POOL,
1713 rx_q->page_pool));
1714 netdev_info(priv->dev,
1715 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1716 rx_q->queue_index);
1717 }
1718
1719 if (rx_q->xsk_pool) {
1720 /* RX XDP ZC buffer pool may not be populated, e.g.
1721 * xdpsock TX-only.
1722 */
1723 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1724 } else {
1725 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1726 if (ret < 0)
1727 return -ENOMEM;
1728 }
1729
1730 /* Setup the chained descriptor addresses */
1731 if (priv->mode == STMMAC_CHAIN_MODE) {
1732 if (priv->extend_desc)
1733 stmmac_mode_init(priv, rx_q->dma_erx,
1734 rx_q->dma_rx_phy,
1735 dma_conf->dma_rx_size, 1);
1736 else
1737 stmmac_mode_init(priv, rx_q->dma_rx,
1738 rx_q->dma_rx_phy,
1739 dma_conf->dma_rx_size, 0);
1740 }
1741
1742 return 0;
1743 }
1744
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1745 static int init_dma_rx_desc_rings(struct net_device *dev,
1746 struct stmmac_dma_conf *dma_conf,
1747 gfp_t flags)
1748 {
1749 struct stmmac_priv *priv = netdev_priv(dev);
1750 u32 rx_count = priv->plat->rx_queues_to_use;
1751 int queue;
1752 int ret;
1753
1754 /* RX INITIALIZATION */
1755 netif_dbg(priv, probe, priv->dev,
1756 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1757
1758 for (queue = 0; queue < rx_count; queue++) {
1759 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1760 if (ret)
1761 goto err_init_rx_buffers;
1762 }
1763
1764 return 0;
1765
1766 err_init_rx_buffers:
1767 while (queue >= 0) {
1768 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1769
1770 if (rx_q->xsk_pool)
1771 dma_free_rx_xskbufs(priv, dma_conf, queue);
1772 else
1773 dma_free_rx_skbufs(priv, dma_conf, queue);
1774
1775 rx_q->buf_alloc_num = 0;
1776 rx_q->xsk_pool = NULL;
1777
1778 queue--;
1779 }
1780
1781 return ret;
1782 }
1783
1784 /**
1785 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1786 * @priv: driver private structure
1787 * @dma_conf: structure to take the dma data
1788 * @queue: TX queue index
1789 * Description: this function initializes the DMA TX descriptors
1790 * and allocates the socket buffers. It supports the chained and ring
1791 * modes.
1792 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1793 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1794 struct stmmac_dma_conf *dma_conf,
1795 u32 queue)
1796 {
1797 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1798 int i;
1799
1800 netif_dbg(priv, probe, priv->dev,
1801 "(%s) dma_tx_phy=0x%08x\n", __func__,
1802 (u32)tx_q->dma_tx_phy);
1803
1804 /* Setup the chained descriptor addresses */
1805 if (priv->mode == STMMAC_CHAIN_MODE) {
1806 if (priv->extend_desc)
1807 stmmac_mode_init(priv, tx_q->dma_etx,
1808 tx_q->dma_tx_phy,
1809 dma_conf->dma_tx_size, 1);
1810 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1811 stmmac_mode_init(priv, tx_q->dma_tx,
1812 tx_q->dma_tx_phy,
1813 dma_conf->dma_tx_size, 0);
1814 }
1815
1816 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1817
1818 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1819 struct dma_desc *p;
1820
1821 if (priv->extend_desc)
1822 p = &((tx_q->dma_etx + i)->basic);
1823 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1824 p = &((tx_q->dma_entx + i)->basic);
1825 else
1826 p = tx_q->dma_tx + i;
1827
1828 stmmac_clear_desc(priv, p);
1829
1830 tx_q->tx_skbuff_dma[i].buf = 0;
1831 tx_q->tx_skbuff_dma[i].map_as_page = false;
1832 tx_q->tx_skbuff_dma[i].len = 0;
1833 tx_q->tx_skbuff_dma[i].last_segment = false;
1834 tx_q->tx_skbuff[i] = NULL;
1835 }
1836
1837 return 0;
1838 }
1839
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1840 static int init_dma_tx_desc_rings(struct net_device *dev,
1841 struct stmmac_dma_conf *dma_conf)
1842 {
1843 struct stmmac_priv *priv = netdev_priv(dev);
1844 u32 tx_queue_cnt;
1845 u32 queue;
1846
1847 tx_queue_cnt = priv->plat->tx_queues_to_use;
1848
1849 for (queue = 0; queue < tx_queue_cnt; queue++)
1850 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1851
1852 return 0;
1853 }
1854
1855 /**
1856 * init_dma_desc_rings - init the RX/TX descriptor rings
1857 * @dev: net device structure
1858 * @dma_conf: structure to take the dma data
1859 * @flags: gfp flag.
1860 * Description: this function initializes the DMA RX/TX descriptors
1861 * and allocates the socket buffers. It supports the chained and ring
1862 * modes.
1863 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1864 static int init_dma_desc_rings(struct net_device *dev,
1865 struct stmmac_dma_conf *dma_conf,
1866 gfp_t flags)
1867 {
1868 struct stmmac_priv *priv = netdev_priv(dev);
1869 int ret;
1870
1871 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1872 if (ret)
1873 return ret;
1874
1875 ret = init_dma_tx_desc_rings(dev, dma_conf);
1876
1877 stmmac_clear_descriptors(priv, dma_conf);
1878
1879 if (netif_msg_hw(priv))
1880 stmmac_display_rings(priv, dma_conf);
1881
1882 return ret;
1883 }
1884
1885 /**
1886 * dma_free_tx_skbufs - free TX dma buffers
1887 * @priv: private structure
1888 * @dma_conf: structure to take the dma data
1889 * @queue: TX queue index
1890 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1891 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1892 struct stmmac_dma_conf *dma_conf,
1893 u32 queue)
1894 {
1895 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1896 int i;
1897
1898 tx_q->xsk_frames_done = 0;
1899
1900 for (i = 0; i < dma_conf->dma_tx_size; i++)
1901 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1902
1903 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1904 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1905 tx_q->xsk_frames_done = 0;
1906 tx_q->xsk_pool = NULL;
1907 }
1908 }
1909
1910 /**
1911 * stmmac_free_tx_skbufs - free TX skb buffers
1912 * @priv: private structure
1913 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1914 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1915 {
1916 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1917 u32 queue;
1918
1919 for (queue = 0; queue < tx_queue_cnt; queue++)
1920 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1921 }
1922
1923 /**
1924 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1925 * @priv: private structure
1926 * @dma_conf: structure to take the dma data
1927 * @queue: RX queue index
1928 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1929 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1930 struct stmmac_dma_conf *dma_conf,
1931 u32 queue)
1932 {
1933 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1934
1935 /* Release the DMA RX socket buffers */
1936 if (rx_q->xsk_pool)
1937 dma_free_rx_xskbufs(priv, dma_conf, queue);
1938 else
1939 dma_free_rx_skbufs(priv, dma_conf, queue);
1940
1941 rx_q->buf_alloc_num = 0;
1942 rx_q->xsk_pool = NULL;
1943
1944 /* Free DMA regions of consistent memory previously allocated */
1945 if (!priv->extend_desc)
1946 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1947 sizeof(struct dma_desc),
1948 rx_q->dma_rx, rx_q->dma_rx_phy);
1949 else
1950 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1951 sizeof(struct dma_extended_desc),
1952 rx_q->dma_erx, rx_q->dma_rx_phy);
1953
1954 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1955 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1956
1957 kfree(rx_q->buf_pool);
1958 if (rx_q->page_pool)
1959 page_pool_destroy(rx_q->page_pool);
1960 }
1961
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1962 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1963 struct stmmac_dma_conf *dma_conf)
1964 {
1965 u32 rx_count = priv->plat->rx_queues_to_use;
1966 u32 queue;
1967
1968 /* Free RX queue resources */
1969 for (queue = 0; queue < rx_count; queue++)
1970 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1971 }
1972
1973 /**
1974 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1975 * @priv: private structure
1976 * @dma_conf: structure to take the dma data
1977 * @queue: TX queue index
1978 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1979 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1980 struct stmmac_dma_conf *dma_conf,
1981 u32 queue)
1982 {
1983 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1984 size_t size;
1985 void *addr;
1986
1987 /* Release the DMA TX socket buffers */
1988 dma_free_tx_skbufs(priv, dma_conf, queue);
1989
1990 if (priv->extend_desc) {
1991 size = sizeof(struct dma_extended_desc);
1992 addr = tx_q->dma_etx;
1993 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1994 size = sizeof(struct dma_edesc);
1995 addr = tx_q->dma_entx;
1996 } else {
1997 size = sizeof(struct dma_desc);
1998 addr = tx_q->dma_tx;
1999 }
2000
2001 size *= dma_conf->dma_tx_size;
2002
2003 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2004
2005 kfree(tx_q->tx_skbuff_dma);
2006 kfree(tx_q->tx_skbuff);
2007 }
2008
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2009 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2010 struct stmmac_dma_conf *dma_conf)
2011 {
2012 u32 tx_count = priv->plat->tx_queues_to_use;
2013 u32 queue;
2014
2015 /* Free TX queue resources */
2016 for (queue = 0; queue < tx_count; queue++)
2017 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2018 }
2019
2020 /**
2021 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2022 * @priv: private structure
2023 * @dma_conf: structure to take the dma data
2024 * @queue: RX queue index
2025 * Description: according to which descriptor can be used (extend or basic)
2026 * this function allocates the resources for TX and RX paths. In case of
2027 * reception, for example, it pre-allocated the RX socket buffer in order to
2028 * allow zero-copy mechanism.
2029 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2030 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2031 struct stmmac_dma_conf *dma_conf,
2032 u32 queue)
2033 {
2034 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2035 struct stmmac_channel *ch = &priv->channel[queue];
2036 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2037 struct page_pool_params pp_params = { 0 };
2038 unsigned int dma_buf_sz_pad, num_pages;
2039 unsigned int napi_id;
2040 int ret;
2041
2042 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2043 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2044 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2045
2046 rx_q->queue_index = queue;
2047 rx_q->priv_data = priv;
2048 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2049
2050 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2051 pp_params.pool_size = dma_conf->dma_rx_size;
2052 pp_params.order = order_base_2(num_pages);
2053 pp_params.nid = dev_to_node(priv->device);
2054 pp_params.dev = priv->device;
2055 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2056 pp_params.offset = stmmac_rx_offset(priv);
2057 pp_params.max_len = dma_conf->dma_buf_sz;
2058
2059 if (priv->sph) {
2060 pp_params.offset = 0;
2061 pp_params.max_len += stmmac_rx_offset(priv);
2062 }
2063
2064 rx_q->page_pool = page_pool_create(&pp_params);
2065 if (IS_ERR(rx_q->page_pool)) {
2066 ret = PTR_ERR(rx_q->page_pool);
2067 rx_q->page_pool = NULL;
2068 return ret;
2069 }
2070
2071 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2072 sizeof(*rx_q->buf_pool),
2073 GFP_KERNEL);
2074 if (!rx_q->buf_pool)
2075 return -ENOMEM;
2076
2077 if (priv->extend_desc) {
2078 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2079 dma_conf->dma_rx_size *
2080 sizeof(struct dma_extended_desc),
2081 &rx_q->dma_rx_phy,
2082 GFP_KERNEL);
2083 if (!rx_q->dma_erx)
2084 return -ENOMEM;
2085
2086 } else {
2087 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2088 dma_conf->dma_rx_size *
2089 sizeof(struct dma_desc),
2090 &rx_q->dma_rx_phy,
2091 GFP_KERNEL);
2092 if (!rx_q->dma_rx)
2093 return -ENOMEM;
2094 }
2095
2096 if (stmmac_xdp_is_enabled(priv) &&
2097 test_bit(queue, priv->af_xdp_zc_qps))
2098 napi_id = ch->rxtx_napi.napi_id;
2099 else
2100 napi_id = ch->rx_napi.napi_id;
2101
2102 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2103 rx_q->queue_index,
2104 napi_id);
2105 if (ret) {
2106 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2107 return -EINVAL;
2108 }
2109
2110 return 0;
2111 }
2112
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2113 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2114 struct stmmac_dma_conf *dma_conf)
2115 {
2116 u32 rx_count = priv->plat->rx_queues_to_use;
2117 u32 queue;
2118 int ret;
2119
2120 /* RX queues buffers and DMA */
2121 for (queue = 0; queue < rx_count; queue++) {
2122 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2123 if (ret)
2124 goto err_dma;
2125 }
2126
2127 return 0;
2128
2129 err_dma:
2130 free_dma_rx_desc_resources(priv, dma_conf);
2131
2132 return ret;
2133 }
2134
2135 /**
2136 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2137 * @priv: private structure
2138 * @dma_conf: structure to take the dma data
2139 * @queue: TX queue index
2140 * Description: according to which descriptor can be used (extend or basic)
2141 * this function allocates the resources for TX and RX paths. In case of
2142 * reception, for example, it pre-allocated the RX socket buffer in order to
2143 * allow zero-copy mechanism.
2144 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2145 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2146 struct stmmac_dma_conf *dma_conf,
2147 u32 queue)
2148 {
2149 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2150 size_t size;
2151 void *addr;
2152
2153 tx_q->queue_index = queue;
2154 tx_q->priv_data = priv;
2155
2156 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2157 sizeof(*tx_q->tx_skbuff_dma),
2158 GFP_KERNEL);
2159 if (!tx_q->tx_skbuff_dma)
2160 return -ENOMEM;
2161
2162 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2163 sizeof(struct sk_buff *),
2164 GFP_KERNEL);
2165 if (!tx_q->tx_skbuff)
2166 return -ENOMEM;
2167
2168 if (priv->extend_desc)
2169 size = sizeof(struct dma_extended_desc);
2170 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2171 size = sizeof(struct dma_edesc);
2172 else
2173 size = sizeof(struct dma_desc);
2174
2175 size *= dma_conf->dma_tx_size;
2176
2177 addr = dma_alloc_coherent(priv->device, size,
2178 &tx_q->dma_tx_phy, GFP_KERNEL);
2179 if (!addr)
2180 return -ENOMEM;
2181
2182 if (priv->extend_desc)
2183 tx_q->dma_etx = addr;
2184 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2185 tx_q->dma_entx = addr;
2186 else
2187 tx_q->dma_tx = addr;
2188
2189 return 0;
2190 }
2191
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2192 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2193 struct stmmac_dma_conf *dma_conf)
2194 {
2195 u32 tx_count = priv->plat->tx_queues_to_use;
2196 u32 queue;
2197 int ret;
2198
2199 /* TX queues buffers and DMA */
2200 for (queue = 0; queue < tx_count; queue++) {
2201 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2202 if (ret)
2203 goto err_dma;
2204 }
2205
2206 return 0;
2207
2208 err_dma:
2209 free_dma_tx_desc_resources(priv, dma_conf);
2210 return ret;
2211 }
2212
2213 /**
2214 * alloc_dma_desc_resources - alloc TX/RX resources.
2215 * @priv: private structure
2216 * @dma_conf: structure to take the dma data
2217 * Description: according to which descriptor can be used (extend or basic)
2218 * this function allocates the resources for TX and RX paths. In case of
2219 * reception, for example, it pre-allocated the RX socket buffer in order to
2220 * allow zero-copy mechanism.
2221 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2222 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2223 struct stmmac_dma_conf *dma_conf)
2224 {
2225 /* RX Allocation */
2226 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2227
2228 if (ret)
2229 return ret;
2230
2231 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2232
2233 return ret;
2234 }
2235
2236 /**
2237 * free_dma_desc_resources - free dma desc resources
2238 * @priv: private structure
2239 * @dma_conf: structure to take the dma data
2240 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2241 static void free_dma_desc_resources(struct stmmac_priv *priv,
2242 struct stmmac_dma_conf *dma_conf)
2243 {
2244 /* Release the DMA TX socket buffers */
2245 free_dma_tx_desc_resources(priv, dma_conf);
2246
2247 /* Release the DMA RX socket buffers later
2248 * to ensure all pending XDP_TX buffers are returned.
2249 */
2250 free_dma_rx_desc_resources(priv, dma_conf);
2251 }
2252
2253 /**
2254 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2255 * @priv: driver private structure
2256 * Description: It is used for enabling the rx queues in the MAC
2257 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2258 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2259 {
2260 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2261 int queue;
2262 u8 mode;
2263
2264 for (queue = 0; queue < rx_queues_count; queue++) {
2265 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2266 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2267 }
2268 }
2269
2270 /**
2271 * stmmac_start_rx_dma - start RX DMA channel
2272 * @priv: driver private structure
2273 * @chan: RX channel index
2274 * Description:
2275 * This starts a RX DMA channel
2276 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2277 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2278 {
2279 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2280 stmmac_start_rx(priv, priv->ioaddr, chan);
2281 }
2282
2283 /**
2284 * stmmac_start_tx_dma - start TX DMA channel
2285 * @priv: driver private structure
2286 * @chan: TX channel index
2287 * Description:
2288 * This starts a TX DMA channel
2289 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2290 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2291 {
2292 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2293 stmmac_start_tx(priv, priv->ioaddr, chan);
2294 }
2295
2296 /**
2297 * stmmac_stop_rx_dma - stop RX DMA channel
2298 * @priv: driver private structure
2299 * @chan: RX channel index
2300 * Description:
2301 * This stops a RX DMA channel
2302 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2303 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2304 {
2305 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2306 stmmac_stop_rx(priv, priv->ioaddr, chan);
2307 }
2308
2309 /**
2310 * stmmac_stop_tx_dma - stop TX DMA channel
2311 * @priv: driver private structure
2312 * @chan: TX channel index
2313 * Description:
2314 * This stops a TX DMA channel
2315 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2316 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2317 {
2318 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2319 stmmac_stop_tx(priv, priv->ioaddr, chan);
2320 }
2321
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2322 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2323 {
2324 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2325 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2326 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2327 u32 chan;
2328
2329 for (chan = 0; chan < dma_csr_ch; chan++) {
2330 struct stmmac_channel *ch = &priv->channel[chan];
2331 unsigned long flags;
2332
2333 spin_lock_irqsave(&ch->lock, flags);
2334 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2335 spin_unlock_irqrestore(&ch->lock, flags);
2336 }
2337 }
2338
2339 /**
2340 * stmmac_start_all_dma - start all RX and TX DMA channels
2341 * @priv: driver private structure
2342 * Description:
2343 * This starts all the RX and TX DMA channels
2344 */
stmmac_start_all_dma(struct stmmac_priv * priv)2345 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2346 {
2347 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2348 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2349 u32 chan = 0;
2350
2351 for (chan = 0; chan < rx_channels_count; chan++)
2352 stmmac_start_rx_dma(priv, chan);
2353
2354 for (chan = 0; chan < tx_channels_count; chan++)
2355 stmmac_start_tx_dma(priv, chan);
2356 }
2357
2358 /**
2359 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2360 * @priv: driver private structure
2361 * Description:
2362 * This stops the RX and TX DMA channels
2363 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2364 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2365 {
2366 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2367 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2368 u32 chan = 0;
2369
2370 for (chan = 0; chan < rx_channels_count; chan++)
2371 stmmac_stop_rx_dma(priv, chan);
2372
2373 for (chan = 0; chan < tx_channels_count; chan++)
2374 stmmac_stop_tx_dma(priv, chan);
2375 }
2376
2377 /**
2378 * stmmac_dma_operation_mode - HW DMA operation mode
2379 * @priv: driver private structure
2380 * Description: it is used for configuring the DMA operation mode register in
2381 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2382 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2383 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2384 {
2385 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2386 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2387 int rxfifosz = priv->plat->rx_fifo_size;
2388 int txfifosz = priv->plat->tx_fifo_size;
2389 u32 txmode = 0;
2390 u32 rxmode = 0;
2391 u32 chan = 0;
2392 u8 qmode = 0;
2393
2394 if (rxfifosz == 0)
2395 rxfifosz = priv->dma_cap.rx_fifo_size;
2396 if (txfifosz == 0)
2397 txfifosz = priv->dma_cap.tx_fifo_size;
2398
2399 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2400 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
2401 rxfifosz /= rx_channels_count;
2402 txfifosz /= tx_channels_count;
2403 }
2404
2405 if (priv->plat->force_thresh_dma_mode) {
2406 txmode = tc;
2407 rxmode = tc;
2408 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2409 /*
2410 * In case of GMAC, SF mode can be enabled
2411 * to perform the TX COE in HW. This depends on:
2412 * 1) TX COE if actually supported
2413 * 2) There is no bugged Jumbo frame support
2414 * that needs to not insert csum in the TDES.
2415 */
2416 txmode = SF_DMA_MODE;
2417 rxmode = SF_DMA_MODE;
2418 priv->xstats.threshold = SF_DMA_MODE;
2419 } else {
2420 txmode = tc;
2421 rxmode = SF_DMA_MODE;
2422 }
2423
2424 /* configure all channels */
2425 for (chan = 0; chan < rx_channels_count; chan++) {
2426 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2427 u32 buf_size;
2428
2429 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2430
2431 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2432 rxfifosz, qmode);
2433
2434 if (rx_q->xsk_pool) {
2435 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2436 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2437 buf_size,
2438 chan);
2439 } else {
2440 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2441 priv->dma_conf.dma_buf_sz,
2442 chan);
2443 }
2444 }
2445
2446 for (chan = 0; chan < tx_channels_count; chan++) {
2447 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2448
2449 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2450 txfifosz, qmode);
2451 }
2452 }
2453
stmmac_xsk_request_timestamp(void * _priv)2454 static void stmmac_xsk_request_timestamp(void *_priv)
2455 {
2456 struct stmmac_metadata_request *meta_req = _priv;
2457
2458 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2459 *meta_req->set_ic = true;
2460 }
2461
stmmac_xsk_fill_timestamp(void * _priv)2462 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2463 {
2464 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2465 struct stmmac_priv *priv = tx_compl->priv;
2466 struct dma_desc *desc = tx_compl->desc;
2467 bool found = false;
2468 u64 ns = 0;
2469
2470 if (!priv->hwts_tx_en)
2471 return 0;
2472
2473 /* check tx tstamp status */
2474 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2475 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2476 found = true;
2477 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2478 found = true;
2479 }
2480
2481 if (found) {
2482 ns -= priv->plat->cdc_error_adj;
2483 return ns_to_ktime(ns);
2484 }
2485
2486 return 0;
2487 }
2488
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2489 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2490 {
2491 struct timespec64 ts = ns_to_timespec64(launch_time);
2492 struct stmmac_metadata_request *meta_req = _priv;
2493
2494 if (meta_req->tbs & STMMAC_TBS_EN)
2495 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2496 ts.tv_nsec);
2497 }
2498
2499 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2500 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2501 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2502 .tmo_request_launch_time = stmmac_xsk_request_launch_time,
2503 };
2504
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2505 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2506 {
2507 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2508 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2509 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2510 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2511 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2512 unsigned int entry = tx_q->cur_tx;
2513 struct dma_desc *tx_desc = NULL;
2514 struct xdp_desc xdp_desc;
2515 bool work_done = true;
2516 u32 tx_set_ic_bit = 0;
2517
2518 /* Avoids TX time-out as we are sharing with slow path */
2519 txq_trans_cond_update(nq);
2520
2521 budget = min(budget, stmmac_tx_avail(priv, queue));
2522
2523 for (; budget > 0; budget--) {
2524 struct stmmac_metadata_request meta_req;
2525 struct xsk_tx_metadata *meta = NULL;
2526 dma_addr_t dma_addr;
2527 bool set_ic;
2528
2529 /* We are sharing with slow path and stop XSK TX desc submission when
2530 * available TX ring is less than threshold.
2531 */
2532 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2533 !netif_carrier_ok(priv->dev)) {
2534 work_done = false;
2535 break;
2536 }
2537
2538 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2539 break;
2540
2541 if (priv->est && priv->est->enable &&
2542 priv->est->max_sdu[queue] &&
2543 xdp_desc.len > priv->est->max_sdu[queue]) {
2544 priv->xstats.max_sdu_txq_drop[queue]++;
2545 continue;
2546 }
2547
2548 if (likely(priv->extend_desc))
2549 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2550 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2551 tx_desc = &tx_q->dma_entx[entry].basic;
2552 else
2553 tx_desc = tx_q->dma_tx + entry;
2554
2555 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2556 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2557 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2558
2559 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2560
2561 /* To return XDP buffer to XSK pool, we simple call
2562 * xsk_tx_completed(), so we don't need to fill up
2563 * 'buf' and 'xdpf'.
2564 */
2565 tx_q->tx_skbuff_dma[entry].buf = 0;
2566 tx_q->xdpf[entry] = NULL;
2567
2568 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2569 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2570 tx_q->tx_skbuff_dma[entry].last_segment = true;
2571 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2572
2573 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2574
2575 tx_q->tx_count_frames++;
2576
2577 if (!priv->tx_coal_frames[queue])
2578 set_ic = false;
2579 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2580 set_ic = true;
2581 else
2582 set_ic = false;
2583
2584 meta_req.priv = priv;
2585 meta_req.tx_desc = tx_desc;
2586 meta_req.set_ic = &set_ic;
2587 meta_req.tbs = tx_q->tbs;
2588 meta_req.edesc = &tx_q->dma_entx[entry];
2589 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2590 &meta_req);
2591 if (set_ic) {
2592 tx_q->tx_count_frames = 0;
2593 stmmac_set_tx_ic(priv, tx_desc);
2594 tx_set_ic_bit++;
2595 }
2596
2597 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2598 csum, priv->mode, true, true,
2599 xdp_desc.len);
2600
2601 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2602
2603 xsk_tx_metadata_to_compl(meta,
2604 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2605
2606 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2607 entry = tx_q->cur_tx;
2608 }
2609 u64_stats_update_begin(&txq_stats->napi_syncp);
2610 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2611 u64_stats_update_end(&txq_stats->napi_syncp);
2612
2613 if (tx_desc) {
2614 stmmac_flush_tx_descriptors(priv, queue);
2615 xsk_tx_release(pool);
2616 }
2617
2618 /* Return true if all of the 3 conditions are met
2619 * a) TX Budget is still available
2620 * b) work_done = true when XSK TX desc peek is empty (no more
2621 * pending XSK TX for transmission)
2622 */
2623 return !!budget && work_done;
2624 }
2625
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2626 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2627 {
2628 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2629 tc += 64;
2630
2631 if (priv->plat->force_thresh_dma_mode)
2632 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2633 else
2634 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2635 chan);
2636
2637 priv->xstats.threshold = tc;
2638 }
2639 }
2640
2641 /**
2642 * stmmac_tx_clean - to manage the transmission completion
2643 * @priv: driver private structure
2644 * @budget: napi budget limiting this functions packet handling
2645 * @queue: TX queue index
2646 * @pending_packets: signal to arm the TX coal timer
2647 * Description: it reclaims the transmit resources after transmission completes.
2648 * If some packets still needs to be handled, due to TX coalesce, set
2649 * pending_packets to true to make NAPI arm the TX coal timer.
2650 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2651 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2652 bool *pending_packets)
2653 {
2654 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2655 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2656 unsigned int bytes_compl = 0, pkts_compl = 0;
2657 unsigned int entry, xmits = 0, count = 0;
2658 u32 tx_packets = 0, tx_errors = 0;
2659
2660 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2661
2662 tx_q->xsk_frames_done = 0;
2663
2664 entry = tx_q->dirty_tx;
2665
2666 /* Try to clean all TX complete frame in 1 shot */
2667 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2668 struct xdp_frame *xdpf;
2669 struct sk_buff *skb;
2670 struct dma_desc *p;
2671 int status;
2672
2673 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2674 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2675 xdpf = tx_q->xdpf[entry];
2676 skb = NULL;
2677 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2678 xdpf = NULL;
2679 skb = tx_q->tx_skbuff[entry];
2680 } else {
2681 xdpf = NULL;
2682 skb = NULL;
2683 }
2684
2685 if (priv->extend_desc)
2686 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2687 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2688 p = &tx_q->dma_entx[entry].basic;
2689 else
2690 p = tx_q->dma_tx + entry;
2691
2692 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2693 /* Check if the descriptor is owned by the DMA */
2694 if (unlikely(status & tx_dma_own))
2695 break;
2696
2697 count++;
2698
2699 /* Make sure descriptor fields are read after reading
2700 * the own bit.
2701 */
2702 dma_rmb();
2703
2704 /* Just consider the last segment and ...*/
2705 if (likely(!(status & tx_not_ls))) {
2706 /* ... verify the status error condition */
2707 if (unlikely(status & tx_err)) {
2708 tx_errors++;
2709 if (unlikely(status & tx_err_bump_tc))
2710 stmmac_bump_dma_threshold(priv, queue);
2711 } else {
2712 tx_packets++;
2713 }
2714 if (skb) {
2715 stmmac_get_tx_hwtstamp(priv, p, skb);
2716 } else if (tx_q->xsk_pool &&
2717 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2718 struct stmmac_xsk_tx_complete tx_compl = {
2719 .priv = priv,
2720 .desc = p,
2721 };
2722
2723 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2724 &stmmac_xsk_tx_metadata_ops,
2725 &tx_compl);
2726 }
2727 }
2728
2729 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2730 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2731 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2732 dma_unmap_page(priv->device,
2733 tx_q->tx_skbuff_dma[entry].buf,
2734 tx_q->tx_skbuff_dma[entry].len,
2735 DMA_TO_DEVICE);
2736 else
2737 dma_unmap_single(priv->device,
2738 tx_q->tx_skbuff_dma[entry].buf,
2739 tx_q->tx_skbuff_dma[entry].len,
2740 DMA_TO_DEVICE);
2741 tx_q->tx_skbuff_dma[entry].buf = 0;
2742 tx_q->tx_skbuff_dma[entry].len = 0;
2743 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2744 }
2745
2746 stmmac_clean_desc3(priv, tx_q, p);
2747
2748 tx_q->tx_skbuff_dma[entry].last_segment = false;
2749 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2750
2751 if (xdpf &&
2752 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2753 xdp_return_frame_rx_napi(xdpf);
2754 tx_q->xdpf[entry] = NULL;
2755 }
2756
2757 if (xdpf &&
2758 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2759 xdp_return_frame(xdpf);
2760 tx_q->xdpf[entry] = NULL;
2761 }
2762
2763 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2764 tx_q->xsk_frames_done++;
2765
2766 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2767 if (likely(skb)) {
2768 pkts_compl++;
2769 bytes_compl += skb->len;
2770 dev_consume_skb_any(skb);
2771 tx_q->tx_skbuff[entry] = NULL;
2772 }
2773 }
2774
2775 stmmac_release_tx_desc(priv, p, priv->mode);
2776
2777 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2778 }
2779 tx_q->dirty_tx = entry;
2780
2781 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2782 pkts_compl, bytes_compl);
2783
2784 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2785 queue))) &&
2786 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2787
2788 netif_dbg(priv, tx_done, priv->dev,
2789 "%s: restart transmit\n", __func__);
2790 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2791 }
2792
2793 if (tx_q->xsk_pool) {
2794 bool work_done;
2795
2796 if (tx_q->xsk_frames_done)
2797 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2798
2799 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2800 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2801
2802 /* For XSK TX, we try to send as many as possible.
2803 * If XSK work done (XSK TX desc empty and budget still
2804 * available), return "budget - 1" to reenable TX IRQ.
2805 * Else, return "budget" to make NAPI continue polling.
2806 */
2807 work_done = stmmac_xdp_xmit_zc(priv, queue,
2808 STMMAC_XSK_TX_BUDGET_MAX);
2809 if (work_done)
2810 xmits = budget - 1;
2811 else
2812 xmits = budget;
2813 }
2814
2815 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2816 stmmac_restart_sw_lpi_timer(priv);
2817
2818 /* We still have pending packets, let's call for a new scheduling */
2819 if (tx_q->dirty_tx != tx_q->cur_tx)
2820 *pending_packets = true;
2821
2822 u64_stats_update_begin(&txq_stats->napi_syncp);
2823 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2824 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2825 u64_stats_inc(&txq_stats->napi.tx_clean);
2826 u64_stats_update_end(&txq_stats->napi_syncp);
2827
2828 priv->xstats.tx_errors += tx_errors;
2829
2830 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2831
2832 /* Combine decisions from TX clean and XSK TX */
2833 return max(count, xmits);
2834 }
2835
2836 /**
2837 * stmmac_tx_err - to manage the tx error
2838 * @priv: driver private structure
2839 * @chan: channel index
2840 * Description: it cleans the descriptors and restarts the transmission
2841 * in case of transmission errors.
2842 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2843 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2844 {
2845 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2846
2847 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2848
2849 stmmac_stop_tx_dma(priv, chan);
2850 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2851 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2852 stmmac_reset_tx_queue(priv, chan);
2853 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2854 tx_q->dma_tx_phy, chan);
2855 stmmac_start_tx_dma(priv, chan);
2856
2857 priv->xstats.tx_errors++;
2858 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2859 }
2860
2861 /**
2862 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2863 * @priv: driver private structure
2864 * @txmode: TX operating mode
2865 * @rxmode: RX operating mode
2866 * @chan: channel index
2867 * Description: it is used for configuring of the DMA operation mode in
2868 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2869 * mode.
2870 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2871 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2872 u32 rxmode, u32 chan)
2873 {
2874 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2875 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2876 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2877 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2878 int rxfifosz = priv->plat->rx_fifo_size;
2879 int txfifosz = priv->plat->tx_fifo_size;
2880
2881 if (rxfifosz == 0)
2882 rxfifosz = priv->dma_cap.rx_fifo_size;
2883 if (txfifosz == 0)
2884 txfifosz = priv->dma_cap.tx_fifo_size;
2885
2886 /* Adjust for real per queue fifo size */
2887 rxfifosz /= rx_channels_count;
2888 txfifosz /= tx_channels_count;
2889
2890 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2891 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2892 }
2893
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2894 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2895 {
2896 int ret;
2897
2898 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2899 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2900 if (ret && (ret != -EINVAL)) {
2901 stmmac_global_err(priv);
2902 return true;
2903 }
2904
2905 return false;
2906 }
2907
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2908 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2909 {
2910 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2911 &priv->xstats, chan, dir);
2912 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2913 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2914 struct stmmac_channel *ch = &priv->channel[chan];
2915 struct napi_struct *rx_napi;
2916 struct napi_struct *tx_napi;
2917 unsigned long flags;
2918
2919 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2920 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2921
2922 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2923 if (napi_schedule_prep(rx_napi)) {
2924 spin_lock_irqsave(&ch->lock, flags);
2925 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2926 spin_unlock_irqrestore(&ch->lock, flags);
2927 __napi_schedule(rx_napi);
2928 }
2929 }
2930
2931 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2932 if (napi_schedule_prep(tx_napi)) {
2933 spin_lock_irqsave(&ch->lock, flags);
2934 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2935 spin_unlock_irqrestore(&ch->lock, flags);
2936 __napi_schedule(tx_napi);
2937 }
2938 }
2939
2940 return status;
2941 }
2942
2943 /**
2944 * stmmac_dma_interrupt - DMA ISR
2945 * @priv: driver private structure
2946 * Description: this is the DMA ISR. It is called by the main ISR.
2947 * It calls the dwmac dma routine and schedule poll method in case of some
2948 * work can be done.
2949 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2950 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2951 {
2952 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2953 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2954 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2955 tx_channel_count : rx_channel_count;
2956 u32 chan;
2957 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2958
2959 /* Make sure we never check beyond our status buffer. */
2960 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2961 channels_to_check = ARRAY_SIZE(status);
2962
2963 for (chan = 0; chan < channels_to_check; chan++)
2964 status[chan] = stmmac_napi_check(priv, chan,
2965 DMA_DIR_RXTX);
2966
2967 for (chan = 0; chan < tx_channel_count; chan++) {
2968 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2969 /* Try to bump up the dma threshold on this failure */
2970 stmmac_bump_dma_threshold(priv, chan);
2971 } else if (unlikely(status[chan] == tx_hard_error)) {
2972 stmmac_tx_err(priv, chan);
2973 }
2974 }
2975 }
2976
2977 /**
2978 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2979 * @priv: driver private structure
2980 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2981 */
stmmac_mmc_setup(struct stmmac_priv * priv)2982 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2983 {
2984 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2985 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2986
2987 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2988
2989 if (priv->dma_cap.rmon) {
2990 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2991 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2992 } else
2993 netdev_info(priv->dev, "No MAC Management Counters available\n");
2994 }
2995
2996 /**
2997 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2998 * @priv: driver private structure
2999 * Description:
3000 * new GMAC chip generations have a new register to indicate the
3001 * presence of the optional feature/functions.
3002 * This can be also used to override the value passed through the
3003 * platform and necessary for old MAC10/100 and GMAC chips.
3004 */
stmmac_get_hw_features(struct stmmac_priv * priv)3005 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3006 {
3007 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3008 }
3009
3010 /**
3011 * stmmac_check_ether_addr - check if the MAC addr is valid
3012 * @priv: driver private structure
3013 * Description:
3014 * it is to verify if the MAC address is valid, in case of failures it
3015 * generates a random MAC address
3016 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3017 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3018 {
3019 u8 addr[ETH_ALEN];
3020
3021 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3022 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3023 if (is_valid_ether_addr(addr))
3024 eth_hw_addr_set(priv->dev, addr);
3025 else
3026 eth_hw_addr_random(priv->dev);
3027 dev_info(priv->device, "device MAC address %pM\n",
3028 priv->dev->dev_addr);
3029 }
3030 }
3031
3032 /**
3033 * stmmac_init_dma_engine - DMA init.
3034 * @priv: driver private structure
3035 * Description:
3036 * It inits the DMA invoking the specific MAC/GMAC callback.
3037 * Some DMA parameters can be passed from the platform;
3038 * in case of these are not passed a default is kept for the MAC or GMAC.
3039 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3040 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3041 {
3042 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3043 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3044 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3045 struct stmmac_rx_queue *rx_q;
3046 struct stmmac_tx_queue *tx_q;
3047 u32 chan = 0;
3048 int ret = 0;
3049
3050 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3051 netdev_err(priv->dev, "Invalid DMA configuration\n");
3052 return -EINVAL;
3053 }
3054
3055 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3056 priv->plat->dma_cfg->atds = 1;
3057
3058 ret = stmmac_reset(priv, priv->ioaddr);
3059 if (ret) {
3060 netdev_err(priv->dev, "Failed to reset the dma\n");
3061 return ret;
3062 }
3063
3064 /* DMA Configuration */
3065 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3066
3067 if (priv->plat->axi)
3068 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3069
3070 /* DMA CSR Channel configuration */
3071 for (chan = 0; chan < dma_csr_ch; chan++) {
3072 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3073 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3074 }
3075
3076 /* DMA RX Channel Configuration */
3077 for (chan = 0; chan < rx_channels_count; chan++) {
3078 rx_q = &priv->dma_conf.rx_queue[chan];
3079
3080 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3081 rx_q->dma_rx_phy, chan);
3082
3083 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3084 (rx_q->buf_alloc_num *
3085 sizeof(struct dma_desc));
3086 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3087 rx_q->rx_tail_addr, chan);
3088 }
3089
3090 /* DMA TX Channel Configuration */
3091 for (chan = 0; chan < tx_channels_count; chan++) {
3092 tx_q = &priv->dma_conf.tx_queue[chan];
3093
3094 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3095 tx_q->dma_tx_phy, chan);
3096
3097 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3098 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3099 tx_q->tx_tail_addr, chan);
3100 }
3101
3102 return ret;
3103 }
3104
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3105 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3106 {
3107 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3108 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3109 struct stmmac_channel *ch;
3110 struct napi_struct *napi;
3111
3112 if (!tx_coal_timer)
3113 return;
3114
3115 ch = &priv->channel[tx_q->queue_index];
3116 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3117
3118 /* Arm timer only if napi is not already scheduled.
3119 * Try to cancel any timer if napi is scheduled, timer will be armed
3120 * again in the next scheduled napi.
3121 */
3122 if (unlikely(!napi_is_scheduled(napi)))
3123 hrtimer_start(&tx_q->txtimer,
3124 STMMAC_COAL_TIMER(tx_coal_timer),
3125 HRTIMER_MODE_REL);
3126 else
3127 hrtimer_try_to_cancel(&tx_q->txtimer);
3128 }
3129
3130 /**
3131 * stmmac_tx_timer - mitigation sw timer for tx.
3132 * @t: data pointer
3133 * Description:
3134 * This is the timer handler to directly invoke the stmmac_tx_clean.
3135 */
stmmac_tx_timer(struct hrtimer * t)3136 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3137 {
3138 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3139 struct stmmac_priv *priv = tx_q->priv_data;
3140 struct stmmac_channel *ch;
3141 struct napi_struct *napi;
3142
3143 ch = &priv->channel[tx_q->queue_index];
3144 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3145
3146 if (likely(napi_schedule_prep(napi))) {
3147 unsigned long flags;
3148
3149 spin_lock_irqsave(&ch->lock, flags);
3150 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3151 spin_unlock_irqrestore(&ch->lock, flags);
3152 __napi_schedule(napi);
3153 }
3154
3155 return HRTIMER_NORESTART;
3156 }
3157
3158 /**
3159 * stmmac_init_coalesce - init mitigation options.
3160 * @priv: driver private structure
3161 * Description:
3162 * This inits the coalesce parameters: i.e. timer rate,
3163 * timer handler and default threshold used for enabling the
3164 * interrupt on completion bit.
3165 */
stmmac_init_coalesce(struct stmmac_priv * priv)3166 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3167 {
3168 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3169 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3170 u32 chan;
3171
3172 for (chan = 0; chan < tx_channel_count; chan++) {
3173 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3174
3175 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3176 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3177
3178 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3179 }
3180
3181 for (chan = 0; chan < rx_channel_count; chan++)
3182 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3183 }
3184
stmmac_set_rings_length(struct stmmac_priv * priv)3185 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3186 {
3187 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3188 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3189 u32 chan;
3190
3191 /* set TX ring length */
3192 for (chan = 0; chan < tx_channels_count; chan++)
3193 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3194 (priv->dma_conf.dma_tx_size - 1), chan);
3195
3196 /* set RX ring length */
3197 for (chan = 0; chan < rx_channels_count; chan++)
3198 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3199 (priv->dma_conf.dma_rx_size - 1), chan);
3200 }
3201
3202 /**
3203 * stmmac_set_tx_queue_weight - Set TX queue weight
3204 * @priv: driver private structure
3205 * Description: It is used for setting TX queues weight
3206 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3207 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3208 {
3209 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3210 u32 weight;
3211 u32 queue;
3212
3213 for (queue = 0; queue < tx_queues_count; queue++) {
3214 weight = priv->plat->tx_queues_cfg[queue].weight;
3215 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3216 }
3217 }
3218
3219 /**
3220 * stmmac_configure_cbs - Configure CBS in TX queue
3221 * @priv: driver private structure
3222 * Description: It is used for configuring CBS in AVB TX queues
3223 */
stmmac_configure_cbs(struct stmmac_priv * priv)3224 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3225 {
3226 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3227 u32 mode_to_use;
3228 u32 queue;
3229
3230 /* queue 0 is reserved for legacy traffic */
3231 for (queue = 1; queue < tx_queues_count; queue++) {
3232 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3233 if (mode_to_use == MTL_QUEUE_DCB)
3234 continue;
3235
3236 stmmac_config_cbs(priv, priv->hw,
3237 priv->plat->tx_queues_cfg[queue].send_slope,
3238 priv->plat->tx_queues_cfg[queue].idle_slope,
3239 priv->plat->tx_queues_cfg[queue].high_credit,
3240 priv->plat->tx_queues_cfg[queue].low_credit,
3241 queue);
3242 }
3243 }
3244
3245 /**
3246 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3247 * @priv: driver private structure
3248 * Description: It is used for mapping RX queues to RX dma channels
3249 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3250 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3251 {
3252 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3253 u32 queue;
3254 u32 chan;
3255
3256 for (queue = 0; queue < rx_queues_count; queue++) {
3257 chan = priv->plat->rx_queues_cfg[queue].chan;
3258 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3259 }
3260 }
3261
3262 /**
3263 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3264 * @priv: driver private structure
3265 * Description: It is used for configuring the RX Queue Priority
3266 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3267 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3268 {
3269 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3270 u32 queue;
3271 u32 prio;
3272
3273 for (queue = 0; queue < rx_queues_count; queue++) {
3274 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3275 continue;
3276
3277 prio = priv->plat->rx_queues_cfg[queue].prio;
3278 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3279 }
3280 }
3281
3282 /**
3283 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3284 * @priv: driver private structure
3285 * Description: It is used for configuring the TX Queue Priority
3286 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3287 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3288 {
3289 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3290 u32 queue;
3291 u32 prio;
3292
3293 for (queue = 0; queue < tx_queues_count; queue++) {
3294 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3295 continue;
3296
3297 prio = priv->plat->tx_queues_cfg[queue].prio;
3298 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3299 }
3300 }
3301
3302 /**
3303 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3304 * @priv: driver private structure
3305 * Description: It is used for configuring the RX queue routing
3306 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3307 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3308 {
3309 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3310 u32 queue;
3311 u8 packet;
3312
3313 for (queue = 0; queue < rx_queues_count; queue++) {
3314 /* no specific packet type routing specified for the queue */
3315 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3316 continue;
3317
3318 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3319 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3320 }
3321 }
3322
stmmac_mac_config_rss(struct stmmac_priv * priv)3323 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3324 {
3325 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3326 priv->rss.enable = false;
3327 return;
3328 }
3329
3330 if (priv->dev->features & NETIF_F_RXHASH)
3331 priv->rss.enable = true;
3332 else
3333 priv->rss.enable = false;
3334
3335 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3336 priv->plat->rx_queues_to_use);
3337 }
3338
3339 /**
3340 * stmmac_mtl_configuration - Configure MTL
3341 * @priv: driver private structure
3342 * Description: It is used for configurring MTL
3343 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3344 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3345 {
3346 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3347 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3348
3349 if (tx_queues_count > 1)
3350 stmmac_set_tx_queue_weight(priv);
3351
3352 /* Configure MTL RX algorithms */
3353 if (rx_queues_count > 1)
3354 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3355 priv->plat->rx_sched_algorithm);
3356
3357 /* Configure MTL TX algorithms */
3358 if (tx_queues_count > 1)
3359 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3360 priv->plat->tx_sched_algorithm);
3361
3362 /* Configure CBS in AVB TX queues */
3363 if (tx_queues_count > 1)
3364 stmmac_configure_cbs(priv);
3365
3366 /* Map RX MTL to DMA channels */
3367 stmmac_rx_queue_dma_chan_map(priv);
3368
3369 /* Enable MAC RX Queues */
3370 stmmac_mac_enable_rx_queues(priv);
3371
3372 /* Set RX priorities */
3373 if (rx_queues_count > 1)
3374 stmmac_mac_config_rx_queues_prio(priv);
3375
3376 /* Set TX priorities */
3377 if (tx_queues_count > 1)
3378 stmmac_mac_config_tx_queues_prio(priv);
3379
3380 /* Set RX routing */
3381 if (rx_queues_count > 1)
3382 stmmac_mac_config_rx_queues_routing(priv);
3383
3384 /* Receive Side Scaling */
3385 if (rx_queues_count > 1)
3386 stmmac_mac_config_rss(priv);
3387 }
3388
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3389 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3390 {
3391 if (priv->dma_cap.asp) {
3392 netdev_info(priv->dev, "Enabling Safety Features\n");
3393 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3394 priv->plat->safety_feat_cfg);
3395 } else {
3396 netdev_info(priv->dev, "No Safety Features support found\n");
3397 }
3398 }
3399
3400 /**
3401 * stmmac_hw_setup - setup mac in a usable state.
3402 * @dev : pointer to the device structure.
3403 * Description:
3404 * this is the main function to setup the HW in a usable state because the
3405 * dma engine is reset, the core registers are configured (e.g. AXI,
3406 * Checksum features, timers). The DMA is ready to start receiving and
3407 * transmitting.
3408 * Return value:
3409 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3410 * file on failure.
3411 */
stmmac_hw_setup(struct net_device * dev)3412 static int stmmac_hw_setup(struct net_device *dev)
3413 {
3414 struct stmmac_priv *priv = netdev_priv(dev);
3415 u32 rx_cnt = priv->plat->rx_queues_to_use;
3416 u32 tx_cnt = priv->plat->tx_queues_to_use;
3417 bool sph_en;
3418 u32 chan;
3419 int ret;
3420
3421 /* Make sure RX clock is enabled */
3422 if (priv->hw->phylink_pcs)
3423 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3424
3425 /* Note that clk_rx_i must be running for reset to complete. This
3426 * clock may also be required when setting the MAC address.
3427 *
3428 * Block the receive clock stop for LPI mode at the PHY in case
3429 * the link is established with EEE mode active.
3430 */
3431 phylink_rx_clk_stop_block(priv->phylink);
3432
3433 /* DMA initialization and SW reset */
3434 ret = stmmac_init_dma_engine(priv);
3435 if (ret < 0) {
3436 phylink_rx_clk_stop_unblock(priv->phylink);
3437 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3438 __func__);
3439 return ret;
3440 }
3441
3442 /* Copy the MAC addr into the HW */
3443 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3444 phylink_rx_clk_stop_unblock(priv->phylink);
3445
3446 /* PS and related bits will be programmed according to the speed */
3447 if (priv->hw->pcs) {
3448 int speed = priv->plat->mac_port_sel_speed;
3449
3450 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3451 (speed == SPEED_1000)) {
3452 priv->hw->ps = speed;
3453 } else {
3454 dev_warn(priv->device, "invalid port speed\n");
3455 priv->hw->ps = 0;
3456 }
3457 }
3458
3459 /* Initialize the MAC Core */
3460 stmmac_core_init(priv, priv->hw, dev);
3461
3462 /* Initialize MTL*/
3463 stmmac_mtl_configuration(priv);
3464
3465 /* Initialize Safety Features */
3466 stmmac_safety_feat_configuration(priv);
3467
3468 ret = stmmac_rx_ipc(priv, priv->hw);
3469 if (!ret) {
3470 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3471 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3472 priv->hw->rx_csum = 0;
3473 }
3474
3475 /* Enable the MAC Rx/Tx */
3476 stmmac_mac_set(priv, priv->ioaddr, true);
3477
3478 /* Set the HW DMA mode and the COE */
3479 stmmac_dma_operation_mode(priv);
3480
3481 stmmac_mmc_setup(priv);
3482
3483 if (priv->use_riwt) {
3484 u32 queue;
3485
3486 for (queue = 0; queue < rx_cnt; queue++) {
3487 if (!priv->rx_riwt[queue])
3488 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3489
3490 stmmac_rx_watchdog(priv, priv->ioaddr,
3491 priv->rx_riwt[queue], queue);
3492 }
3493 }
3494
3495 if (priv->hw->pcs)
3496 stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
3497
3498 /* set TX and RX rings length */
3499 stmmac_set_rings_length(priv);
3500
3501 /* Enable TSO */
3502 if (priv->tso) {
3503 for (chan = 0; chan < tx_cnt; chan++) {
3504 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3505
3506 /* TSO and TBS cannot co-exist */
3507 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3508 continue;
3509
3510 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3511 }
3512 }
3513
3514 /* Enable Split Header */
3515 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3516 for (chan = 0; chan < rx_cnt; chan++)
3517 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3518
3519
3520 /* VLAN Tag Insertion */
3521 if (priv->dma_cap.vlins)
3522 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3523
3524 /* TBS */
3525 for (chan = 0; chan < tx_cnt; chan++) {
3526 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3527 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3528
3529 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3530 }
3531
3532 /* Configure real RX and TX queues */
3533 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3534 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3535
3536 /* Start the ball rolling... */
3537 stmmac_start_all_dma(priv);
3538
3539 phylink_rx_clk_stop_block(priv->phylink);
3540 stmmac_set_hw_vlan_mode(priv, priv->hw);
3541 phylink_rx_clk_stop_unblock(priv->phylink);
3542
3543 return 0;
3544 }
3545
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3546 static void stmmac_free_irq(struct net_device *dev,
3547 enum request_irq_err irq_err, int irq_idx)
3548 {
3549 struct stmmac_priv *priv = netdev_priv(dev);
3550 int j;
3551
3552 switch (irq_err) {
3553 case REQ_IRQ_ERR_ALL:
3554 irq_idx = priv->plat->tx_queues_to_use;
3555 fallthrough;
3556 case REQ_IRQ_ERR_TX:
3557 for (j = irq_idx - 1; j >= 0; j--) {
3558 if (priv->tx_irq[j] > 0) {
3559 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3560 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3561 }
3562 }
3563 irq_idx = priv->plat->rx_queues_to_use;
3564 fallthrough;
3565 case REQ_IRQ_ERR_RX:
3566 for (j = irq_idx - 1; j >= 0; j--) {
3567 if (priv->rx_irq[j] > 0) {
3568 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3569 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3570 }
3571 }
3572
3573 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3574 free_irq(priv->sfty_ue_irq, dev);
3575 fallthrough;
3576 case REQ_IRQ_ERR_SFTY_UE:
3577 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3578 free_irq(priv->sfty_ce_irq, dev);
3579 fallthrough;
3580 case REQ_IRQ_ERR_SFTY_CE:
3581 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3582 free_irq(priv->lpi_irq, dev);
3583 fallthrough;
3584 case REQ_IRQ_ERR_LPI:
3585 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3586 free_irq(priv->wol_irq, dev);
3587 fallthrough;
3588 case REQ_IRQ_ERR_SFTY:
3589 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3590 free_irq(priv->sfty_irq, dev);
3591 fallthrough;
3592 case REQ_IRQ_ERR_WOL:
3593 free_irq(dev->irq, dev);
3594 fallthrough;
3595 case REQ_IRQ_ERR_MAC:
3596 case REQ_IRQ_ERR_NO:
3597 /* If MAC IRQ request error, no more IRQ to free */
3598 break;
3599 }
3600 }
3601
stmmac_request_irq_multi_msi(struct net_device * dev)3602 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3603 {
3604 struct stmmac_priv *priv = netdev_priv(dev);
3605 enum request_irq_err irq_err;
3606 int irq_idx = 0;
3607 char *int_name;
3608 int ret;
3609 int i;
3610
3611 /* For common interrupt */
3612 int_name = priv->int_name_mac;
3613 sprintf(int_name, "%s:%s", dev->name, "mac");
3614 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3615 0, int_name, dev);
3616 if (unlikely(ret < 0)) {
3617 netdev_err(priv->dev,
3618 "%s: alloc mac MSI %d (error: %d)\n",
3619 __func__, dev->irq, ret);
3620 irq_err = REQ_IRQ_ERR_MAC;
3621 goto irq_error;
3622 }
3623
3624 /* Request the Wake IRQ in case of another line
3625 * is used for WoL
3626 */
3627 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3628 int_name = priv->int_name_wol;
3629 sprintf(int_name, "%s:%s", dev->name, "wol");
3630 ret = request_irq(priv->wol_irq,
3631 stmmac_mac_interrupt,
3632 0, int_name, dev);
3633 if (unlikely(ret < 0)) {
3634 netdev_err(priv->dev,
3635 "%s: alloc wol MSI %d (error: %d)\n",
3636 __func__, priv->wol_irq, ret);
3637 irq_err = REQ_IRQ_ERR_WOL;
3638 goto irq_error;
3639 }
3640 }
3641
3642 /* Request the LPI IRQ in case of another line
3643 * is used for LPI
3644 */
3645 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3646 int_name = priv->int_name_lpi;
3647 sprintf(int_name, "%s:%s", dev->name, "lpi");
3648 ret = request_irq(priv->lpi_irq,
3649 stmmac_mac_interrupt,
3650 0, int_name, dev);
3651 if (unlikely(ret < 0)) {
3652 netdev_err(priv->dev,
3653 "%s: alloc lpi MSI %d (error: %d)\n",
3654 __func__, priv->lpi_irq, ret);
3655 irq_err = REQ_IRQ_ERR_LPI;
3656 goto irq_error;
3657 }
3658 }
3659
3660 /* Request the common Safety Feature Correctible/Uncorrectible
3661 * Error line in case of another line is used
3662 */
3663 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3664 int_name = priv->int_name_sfty;
3665 sprintf(int_name, "%s:%s", dev->name, "safety");
3666 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3667 0, int_name, dev);
3668 if (unlikely(ret < 0)) {
3669 netdev_err(priv->dev,
3670 "%s: alloc sfty MSI %d (error: %d)\n",
3671 __func__, priv->sfty_irq, ret);
3672 irq_err = REQ_IRQ_ERR_SFTY;
3673 goto irq_error;
3674 }
3675 }
3676
3677 /* Request the Safety Feature Correctible Error line in
3678 * case of another line is used
3679 */
3680 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3681 int_name = priv->int_name_sfty_ce;
3682 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3683 ret = request_irq(priv->sfty_ce_irq,
3684 stmmac_safety_interrupt,
3685 0, int_name, dev);
3686 if (unlikely(ret < 0)) {
3687 netdev_err(priv->dev,
3688 "%s: alloc sfty ce MSI %d (error: %d)\n",
3689 __func__, priv->sfty_ce_irq, ret);
3690 irq_err = REQ_IRQ_ERR_SFTY_CE;
3691 goto irq_error;
3692 }
3693 }
3694
3695 /* Request the Safety Feature Uncorrectible Error line in
3696 * case of another line is used
3697 */
3698 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3699 int_name = priv->int_name_sfty_ue;
3700 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3701 ret = request_irq(priv->sfty_ue_irq,
3702 stmmac_safety_interrupt,
3703 0, int_name, dev);
3704 if (unlikely(ret < 0)) {
3705 netdev_err(priv->dev,
3706 "%s: alloc sfty ue MSI %d (error: %d)\n",
3707 __func__, priv->sfty_ue_irq, ret);
3708 irq_err = REQ_IRQ_ERR_SFTY_UE;
3709 goto irq_error;
3710 }
3711 }
3712
3713 /* Request Rx MSI irq */
3714 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3715 if (i >= MTL_MAX_RX_QUEUES)
3716 break;
3717 if (priv->rx_irq[i] == 0)
3718 continue;
3719
3720 int_name = priv->int_name_rx_irq[i];
3721 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3722 ret = request_irq(priv->rx_irq[i],
3723 stmmac_msi_intr_rx,
3724 0, int_name, &priv->dma_conf.rx_queue[i]);
3725 if (unlikely(ret < 0)) {
3726 netdev_err(priv->dev,
3727 "%s: alloc rx-%d MSI %d (error: %d)\n",
3728 __func__, i, priv->rx_irq[i], ret);
3729 irq_err = REQ_IRQ_ERR_RX;
3730 irq_idx = i;
3731 goto irq_error;
3732 }
3733 irq_set_affinity_hint(priv->rx_irq[i],
3734 cpumask_of(i % num_online_cpus()));
3735 }
3736
3737 /* Request Tx MSI irq */
3738 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3739 if (i >= MTL_MAX_TX_QUEUES)
3740 break;
3741 if (priv->tx_irq[i] == 0)
3742 continue;
3743
3744 int_name = priv->int_name_tx_irq[i];
3745 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3746 ret = request_irq(priv->tx_irq[i],
3747 stmmac_msi_intr_tx,
3748 0, int_name, &priv->dma_conf.tx_queue[i]);
3749 if (unlikely(ret < 0)) {
3750 netdev_err(priv->dev,
3751 "%s: alloc tx-%d MSI %d (error: %d)\n",
3752 __func__, i, priv->tx_irq[i], ret);
3753 irq_err = REQ_IRQ_ERR_TX;
3754 irq_idx = i;
3755 goto irq_error;
3756 }
3757 irq_set_affinity_hint(priv->tx_irq[i],
3758 cpumask_of(i % num_online_cpus()));
3759 }
3760
3761 return 0;
3762
3763 irq_error:
3764 stmmac_free_irq(dev, irq_err, irq_idx);
3765 return ret;
3766 }
3767
stmmac_request_irq_single(struct net_device * dev)3768 static int stmmac_request_irq_single(struct net_device *dev)
3769 {
3770 struct stmmac_priv *priv = netdev_priv(dev);
3771 enum request_irq_err irq_err;
3772 int ret;
3773
3774 ret = request_irq(dev->irq, stmmac_interrupt,
3775 IRQF_SHARED, dev->name, dev);
3776 if (unlikely(ret < 0)) {
3777 netdev_err(priv->dev,
3778 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3779 __func__, dev->irq, ret);
3780 irq_err = REQ_IRQ_ERR_MAC;
3781 goto irq_error;
3782 }
3783
3784 /* Request the Wake IRQ in case of another line
3785 * is used for WoL
3786 */
3787 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3788 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3789 IRQF_SHARED, dev->name, dev);
3790 if (unlikely(ret < 0)) {
3791 netdev_err(priv->dev,
3792 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3793 __func__, priv->wol_irq, ret);
3794 irq_err = REQ_IRQ_ERR_WOL;
3795 goto irq_error;
3796 }
3797 }
3798
3799 /* Request the IRQ lines */
3800 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3801 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3802 IRQF_SHARED, dev->name, dev);
3803 if (unlikely(ret < 0)) {
3804 netdev_err(priv->dev,
3805 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3806 __func__, priv->lpi_irq, ret);
3807 irq_err = REQ_IRQ_ERR_LPI;
3808 goto irq_error;
3809 }
3810 }
3811
3812 /* Request the common Safety Feature Correctible/Uncorrectible
3813 * Error line in case of another line is used
3814 */
3815 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3816 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3817 IRQF_SHARED, dev->name, dev);
3818 if (unlikely(ret < 0)) {
3819 netdev_err(priv->dev,
3820 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3821 __func__, priv->sfty_irq, ret);
3822 irq_err = REQ_IRQ_ERR_SFTY;
3823 goto irq_error;
3824 }
3825 }
3826
3827 return 0;
3828
3829 irq_error:
3830 stmmac_free_irq(dev, irq_err, 0);
3831 return ret;
3832 }
3833
stmmac_request_irq(struct net_device * dev)3834 static int stmmac_request_irq(struct net_device *dev)
3835 {
3836 struct stmmac_priv *priv = netdev_priv(dev);
3837 int ret;
3838
3839 /* Request the IRQ lines */
3840 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3841 ret = stmmac_request_irq_multi_msi(dev);
3842 else
3843 ret = stmmac_request_irq_single(dev);
3844
3845 return ret;
3846 }
3847
3848 /**
3849 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3850 * @priv: driver private structure
3851 * @mtu: MTU to setup the dma queue and buf with
3852 * Description: Allocate and generate a dma_conf based on the provided MTU.
3853 * Allocate the Tx/Rx DMA queue and init them.
3854 * Return value:
3855 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3856 */
3857 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3858 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3859 {
3860 struct stmmac_dma_conf *dma_conf;
3861 int chan, bfsize, ret;
3862
3863 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3864 if (!dma_conf) {
3865 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3866 __func__);
3867 return ERR_PTR(-ENOMEM);
3868 }
3869
3870 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3871 if (bfsize < 0)
3872 bfsize = 0;
3873
3874 if (bfsize < BUF_SIZE_16KiB)
3875 bfsize = stmmac_set_bfsize(mtu, 0);
3876
3877 dma_conf->dma_buf_sz = bfsize;
3878 /* Chose the tx/rx size from the already defined one in the
3879 * priv struct. (if defined)
3880 */
3881 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3882 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3883
3884 if (!dma_conf->dma_tx_size)
3885 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3886 if (!dma_conf->dma_rx_size)
3887 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3888
3889 /* Earlier check for TBS */
3890 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3891 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3892 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3893
3894 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3895 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3896 }
3897
3898 ret = alloc_dma_desc_resources(priv, dma_conf);
3899 if (ret < 0) {
3900 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3901 __func__);
3902 goto alloc_error;
3903 }
3904
3905 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3906 if (ret < 0) {
3907 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3908 __func__);
3909 goto init_error;
3910 }
3911
3912 return dma_conf;
3913
3914 init_error:
3915 free_dma_desc_resources(priv, dma_conf);
3916 alloc_error:
3917 kfree(dma_conf);
3918 return ERR_PTR(ret);
3919 }
3920
3921 /**
3922 * __stmmac_open - open entry point of the driver
3923 * @dev : pointer to the device structure.
3924 * @dma_conf : structure to take the dma data
3925 * Description:
3926 * This function is the open entry point of the driver.
3927 * Return value:
3928 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3929 * file on failure.
3930 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3931 static int __stmmac_open(struct net_device *dev,
3932 struct stmmac_dma_conf *dma_conf)
3933 {
3934 struct stmmac_priv *priv = netdev_priv(dev);
3935 u32 chan;
3936 int ret;
3937
3938 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3939 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3940 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3941 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3942
3943 stmmac_reset_queues_param(priv);
3944
3945 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3946 priv->plat->serdes_powerup) {
3947 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3948 if (ret < 0) {
3949 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3950 __func__);
3951 goto init_error;
3952 }
3953 }
3954
3955 ret = stmmac_hw_setup(dev);
3956 if (ret < 0) {
3957 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3958 goto init_error;
3959 }
3960
3961 stmmac_setup_ptp(priv);
3962
3963 stmmac_init_coalesce(priv);
3964
3965 phylink_start(priv->phylink);
3966 /* We may have called phylink_speed_down before */
3967 phylink_speed_up(priv->phylink);
3968
3969 ret = stmmac_request_irq(dev);
3970 if (ret)
3971 goto irq_error;
3972
3973 stmmac_enable_all_queues(priv);
3974 netif_tx_start_all_queues(priv->dev);
3975 stmmac_enable_all_dma_irq(priv);
3976
3977 return 0;
3978
3979 irq_error:
3980 phylink_stop(priv->phylink);
3981
3982 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3983 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3984
3985 stmmac_release_ptp(priv);
3986 init_error:
3987 return ret;
3988 }
3989
stmmac_open(struct net_device * dev)3990 static int stmmac_open(struct net_device *dev)
3991 {
3992 struct stmmac_priv *priv = netdev_priv(dev);
3993 struct stmmac_dma_conf *dma_conf;
3994 int ret;
3995
3996 /* Initialise the tx lpi timer, converting from msec to usec */
3997 if (!priv->tx_lpi_timer)
3998 priv->tx_lpi_timer = eee_timer * 1000;
3999
4000 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4001 if (IS_ERR(dma_conf))
4002 return PTR_ERR(dma_conf);
4003
4004 ret = pm_runtime_resume_and_get(priv->device);
4005 if (ret < 0)
4006 goto err_dma_resources;
4007
4008 ret = stmmac_init_phy(dev);
4009 if (ret)
4010 goto err_runtime_pm;
4011
4012 ret = __stmmac_open(dev, dma_conf);
4013 if (ret)
4014 goto err_disconnect_phy;
4015
4016 kfree(dma_conf);
4017
4018 return ret;
4019
4020 err_disconnect_phy:
4021 phylink_disconnect_phy(priv->phylink);
4022 err_runtime_pm:
4023 pm_runtime_put(priv->device);
4024 err_dma_resources:
4025 free_dma_desc_resources(priv, dma_conf);
4026 kfree(dma_conf);
4027 return ret;
4028 }
4029
__stmmac_release(struct net_device * dev)4030 static void __stmmac_release(struct net_device *dev)
4031 {
4032 struct stmmac_priv *priv = netdev_priv(dev);
4033 u32 chan;
4034
4035 /* If the PHY or MAC has WoL enabled, then the PHY will not be
4036 * suspended when phylink_stop() is called below. Set the PHY
4037 * to its slowest speed to save power.
4038 */
4039 if (device_may_wakeup(priv->device))
4040 phylink_speed_down(priv->phylink, false);
4041
4042 /* Stop and disconnect the PHY */
4043 phylink_stop(priv->phylink);
4044
4045 stmmac_disable_all_queues(priv);
4046
4047 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4048 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4049
4050 netif_tx_disable(dev);
4051
4052 /* Free the IRQ lines */
4053 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4054
4055 /* Stop TX/RX DMA and clear the descriptors */
4056 stmmac_stop_all_dma(priv);
4057
4058 /* Release and free the Rx/Tx resources */
4059 free_dma_desc_resources(priv, &priv->dma_conf);
4060
4061 /* Powerdown Serdes if there is */
4062 if (priv->plat->serdes_powerdown)
4063 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4064
4065 stmmac_release_ptp(priv);
4066
4067 if (stmmac_fpe_supported(priv))
4068 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4069 }
4070
4071 /**
4072 * stmmac_release - close entry point of the driver
4073 * @dev : device pointer.
4074 * Description:
4075 * This is the stop entry point of the driver.
4076 */
stmmac_release(struct net_device * dev)4077 static int stmmac_release(struct net_device *dev)
4078 {
4079 struct stmmac_priv *priv = netdev_priv(dev);
4080
4081 __stmmac_release(dev);
4082
4083 phylink_disconnect_phy(priv->phylink);
4084 pm_runtime_put(priv->device);
4085
4086 return 0;
4087 }
4088
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4089 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4090 struct stmmac_tx_queue *tx_q)
4091 {
4092 struct dma_desc *p;
4093 u16 tag = 0x0;
4094
4095 if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
4096 return false;
4097
4098 tag = skb_vlan_tag_get(skb);
4099
4100 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4101 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4102 else
4103 p = &tx_q->dma_tx[tx_q->cur_tx];
4104
4105 if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
4106 return false;
4107
4108 stmmac_set_tx_owner(priv, p);
4109 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4110 return true;
4111 }
4112
4113 /**
4114 * stmmac_tso_allocator - close entry point of the driver
4115 * @priv: driver private structure
4116 * @des: buffer start address
4117 * @total_len: total length to fill in descriptors
4118 * @last_segment: condition for the last descriptor
4119 * @queue: TX queue index
4120 * Description:
4121 * This function fills descriptor and request new descriptors according to
4122 * buffer length to fill
4123 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4124 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4125 int total_len, bool last_segment, u32 queue)
4126 {
4127 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4128 struct dma_desc *desc;
4129 u32 buff_size;
4130 int tmp_len;
4131
4132 tmp_len = total_len;
4133
4134 while (tmp_len > 0) {
4135 dma_addr_t curr_addr;
4136
4137 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4138 priv->dma_conf.dma_tx_size);
4139 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4140
4141 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4142 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4143 else
4144 desc = &tx_q->dma_tx[tx_q->cur_tx];
4145
4146 curr_addr = des + (total_len - tmp_len);
4147 stmmac_set_desc_addr(priv, desc, curr_addr);
4148 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4149 TSO_MAX_BUFF_SIZE : tmp_len;
4150
4151 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4152 0, 1,
4153 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4154 0, 0);
4155
4156 tmp_len -= TSO_MAX_BUFF_SIZE;
4157 }
4158 }
4159
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4160 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4161 {
4162 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4163 int desc_size;
4164
4165 if (likely(priv->extend_desc))
4166 desc_size = sizeof(struct dma_extended_desc);
4167 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4168 desc_size = sizeof(struct dma_edesc);
4169 else
4170 desc_size = sizeof(struct dma_desc);
4171
4172 /* The own bit must be the latest setting done when prepare the
4173 * descriptor and then barrier is needed to make sure that
4174 * all is coherent before granting the DMA engine.
4175 */
4176 wmb();
4177
4178 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4179 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4180 }
4181
4182 /**
4183 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4184 * @skb : the socket buffer
4185 * @dev : device pointer
4186 * Description: this is the transmit function that is called on TSO frames
4187 * (support available on GMAC4 and newer chips).
4188 * Diagram below show the ring programming in case of TSO frames:
4189 *
4190 * First Descriptor
4191 * --------
4192 * | DES0 |---> buffer1 = L2/L3/L4 header
4193 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4194 * | | width is 32-bit, but we never use it.
4195 * | | Also can be used as the most-significant 8-bits or 16-bits of
4196 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4197 * | | or 48-bit, and we always use it.
4198 * | DES2 |---> buffer1 len
4199 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4200 * --------
4201 * --------
4202 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4203 * | DES1 |---> same as the First Descriptor
4204 * | DES2 |---> buffer1 len
4205 * | DES3 |
4206 * --------
4207 * |
4208 * ...
4209 * |
4210 * --------
4211 * | DES0 |---> buffer1 = Split TCP Payload
4212 * | DES1 |---> same as the First Descriptor
4213 * | DES2 |---> buffer1 len
4214 * | DES3 |
4215 * --------
4216 *
4217 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4218 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4219 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4220 {
4221 struct dma_desc *desc, *first, *mss_desc = NULL;
4222 struct stmmac_priv *priv = netdev_priv(dev);
4223 unsigned int first_entry, tx_packets;
4224 struct stmmac_txq_stats *txq_stats;
4225 struct stmmac_tx_queue *tx_q;
4226 u32 pay_len, mss, queue;
4227 int i, first_tx, nfrags;
4228 u8 proto_hdr_len, hdr;
4229 dma_addr_t des;
4230 bool set_ic;
4231
4232 /* Always insert VLAN tag to SKB payload for TSO frames.
4233 *
4234 * Never insert VLAN tag by HW, since segments splited by
4235 * TSO engine will be un-tagged by mistake.
4236 */
4237 if (skb_vlan_tag_present(skb)) {
4238 skb = __vlan_hwaccel_push_inside(skb);
4239 if (unlikely(!skb)) {
4240 priv->xstats.tx_dropped++;
4241 return NETDEV_TX_OK;
4242 }
4243 }
4244
4245 nfrags = skb_shinfo(skb)->nr_frags;
4246 queue = skb_get_queue_mapping(skb);
4247
4248 tx_q = &priv->dma_conf.tx_queue[queue];
4249 txq_stats = &priv->xstats.txq_stats[queue];
4250 first_tx = tx_q->cur_tx;
4251
4252 /* Compute header lengths */
4253 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4254 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4255 hdr = sizeof(struct udphdr);
4256 } else {
4257 proto_hdr_len = skb_tcp_all_headers(skb);
4258 hdr = tcp_hdrlen(skb);
4259 }
4260
4261 /* Desc availability based on threshold should be enough safe */
4262 if (unlikely(stmmac_tx_avail(priv, queue) <
4263 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4264 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4265 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4266 queue));
4267 /* This is a hard error, log it. */
4268 netdev_err(priv->dev,
4269 "%s: Tx Ring full when queue awake\n",
4270 __func__);
4271 }
4272 return NETDEV_TX_BUSY;
4273 }
4274
4275 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4276
4277 mss = skb_shinfo(skb)->gso_size;
4278
4279 /* set new MSS value if needed */
4280 if (mss != tx_q->mss) {
4281 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4282 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4283 else
4284 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4285
4286 stmmac_set_mss(priv, mss_desc, mss);
4287 tx_q->mss = mss;
4288 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4289 priv->dma_conf.dma_tx_size);
4290 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4291 }
4292
4293 if (netif_msg_tx_queued(priv)) {
4294 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4295 __func__, hdr, proto_hdr_len, pay_len, mss);
4296 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4297 skb->data_len);
4298 }
4299
4300 first_entry = tx_q->cur_tx;
4301 WARN_ON(tx_q->tx_skbuff[first_entry]);
4302
4303 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4304 desc = &tx_q->dma_entx[first_entry].basic;
4305 else
4306 desc = &tx_q->dma_tx[first_entry];
4307 first = desc;
4308
4309 /* first descriptor: fill Headers on Buf1 */
4310 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4311 DMA_TO_DEVICE);
4312 if (dma_mapping_error(priv->device, des))
4313 goto dma_map_err;
4314
4315 stmmac_set_desc_addr(priv, first, des);
4316 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4317 (nfrags == 0), queue);
4318
4319 /* In case two or more DMA transmit descriptors are allocated for this
4320 * non-paged SKB data, the DMA buffer address should be saved to
4321 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4322 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4323 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4324 * since the tail areas of the DMA buffer can be accessed by DMA engine
4325 * sooner or later.
4326 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4327 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4328 * this DMA buffer right after the DMA engine completely finishes the
4329 * full buffer transmission.
4330 */
4331 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4332 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4333 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4334 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4335
4336 /* Prepare fragments */
4337 for (i = 0; i < nfrags; i++) {
4338 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4339
4340 des = skb_frag_dma_map(priv->device, frag, 0,
4341 skb_frag_size(frag),
4342 DMA_TO_DEVICE);
4343 if (dma_mapping_error(priv->device, des))
4344 goto dma_map_err;
4345
4346 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4347 (i == nfrags - 1), queue);
4348
4349 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4350 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4351 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4352 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4353 }
4354
4355 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4356
4357 /* Only the last descriptor gets to point to the skb. */
4358 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4359 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4360
4361 /* Manage tx mitigation */
4362 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4363 tx_q->tx_count_frames += tx_packets;
4364
4365 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4366 set_ic = true;
4367 else if (!priv->tx_coal_frames[queue])
4368 set_ic = false;
4369 else if (tx_packets > priv->tx_coal_frames[queue])
4370 set_ic = true;
4371 else if ((tx_q->tx_count_frames %
4372 priv->tx_coal_frames[queue]) < tx_packets)
4373 set_ic = true;
4374 else
4375 set_ic = false;
4376
4377 if (set_ic) {
4378 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4379 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4380 else
4381 desc = &tx_q->dma_tx[tx_q->cur_tx];
4382
4383 tx_q->tx_count_frames = 0;
4384 stmmac_set_tx_ic(priv, desc);
4385 }
4386
4387 /* We've used all descriptors we need for this skb, however,
4388 * advance cur_tx so that it references a fresh descriptor.
4389 * ndo_start_xmit will fill this descriptor the next time it's
4390 * called and stmmac_tx_clean may clean up to this descriptor.
4391 */
4392 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4393
4394 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4395 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4396 __func__);
4397 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4398 }
4399
4400 u64_stats_update_begin(&txq_stats->q_syncp);
4401 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4402 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4403 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4404 if (set_ic)
4405 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4406 u64_stats_update_end(&txq_stats->q_syncp);
4407
4408 if (priv->sarc_type)
4409 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4410
4411 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4412 priv->hwts_tx_en)) {
4413 /* declare that device is doing timestamping */
4414 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4415 stmmac_enable_tx_timestamp(priv, first);
4416 }
4417
4418 /* Complete the first descriptor before granting the DMA */
4419 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4420 tx_q->tx_skbuff_dma[first_entry].last_segment,
4421 hdr / 4, (skb->len - proto_hdr_len));
4422
4423 /* If context desc is used to change MSS */
4424 if (mss_desc) {
4425 /* Make sure that first descriptor has been completely
4426 * written, including its own bit. This is because MSS is
4427 * actually before first descriptor, so we need to make
4428 * sure that MSS's own bit is the last thing written.
4429 */
4430 dma_wmb();
4431 stmmac_set_tx_owner(priv, mss_desc);
4432 }
4433
4434 if (netif_msg_pktdata(priv)) {
4435 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4436 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4437 tx_q->cur_tx, first, nfrags);
4438 pr_info(">>> frame to be transmitted: ");
4439 print_pkt(skb->data, skb_headlen(skb));
4440 }
4441
4442 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4443 skb_tx_timestamp(skb);
4444
4445 stmmac_flush_tx_descriptors(priv, queue);
4446 stmmac_tx_timer_arm(priv, queue);
4447
4448 return NETDEV_TX_OK;
4449
4450 dma_map_err:
4451 dev_err(priv->device, "Tx dma map failed\n");
4452 dev_kfree_skb(skb);
4453 priv->xstats.tx_dropped++;
4454 return NETDEV_TX_OK;
4455 }
4456
4457 /**
4458 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4459 * @skb: socket buffer to check
4460 *
4461 * Check if a packet has an ethertype that will trigger the IP header checks
4462 * and IP/TCP checksum engine of the stmmac core.
4463 *
4464 * Return: true if the ethertype can trigger the checksum engine, false
4465 * otherwise
4466 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4467 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4468 {
4469 int depth = 0;
4470 __be16 proto;
4471
4472 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4473 &depth);
4474
4475 return (depth <= ETH_HLEN) &&
4476 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4477 }
4478
4479 /**
4480 * stmmac_xmit - Tx entry point of the driver
4481 * @skb : the socket buffer
4482 * @dev : device pointer
4483 * Description : this is the tx entry point of the driver.
4484 * It programs the chain or the ring and supports oversized frames
4485 * and SG feature.
4486 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4487 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4488 {
4489 unsigned int first_entry, tx_packets, enh_desc;
4490 struct stmmac_priv *priv = netdev_priv(dev);
4491 unsigned int nopaged_len = skb_headlen(skb);
4492 int i, csum_insertion = 0, is_jumbo = 0;
4493 u32 queue = skb_get_queue_mapping(skb);
4494 int nfrags = skb_shinfo(skb)->nr_frags;
4495 int gso = skb_shinfo(skb)->gso_type;
4496 struct stmmac_txq_stats *txq_stats;
4497 struct dma_edesc *tbs_desc = NULL;
4498 struct dma_desc *desc, *first;
4499 struct stmmac_tx_queue *tx_q;
4500 bool has_vlan, set_ic;
4501 int entry, first_tx;
4502 dma_addr_t des;
4503 u32 sdu_len;
4504
4505 tx_q = &priv->dma_conf.tx_queue[queue];
4506 txq_stats = &priv->xstats.txq_stats[queue];
4507 first_tx = tx_q->cur_tx;
4508
4509 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4510 stmmac_stop_sw_lpi(priv);
4511
4512 /* Manage oversized TCP frames for GMAC4 device */
4513 if (skb_is_gso(skb) && priv->tso) {
4514 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4515 return stmmac_tso_xmit(skb, dev);
4516 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4517 return stmmac_tso_xmit(skb, dev);
4518 }
4519
4520 if (priv->est && priv->est->enable &&
4521 priv->est->max_sdu[queue]) {
4522 sdu_len = skb->len;
4523 /* Add VLAN tag length if VLAN tag insertion offload is requested */
4524 if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
4525 sdu_len += VLAN_HLEN;
4526 if (sdu_len > priv->est->max_sdu[queue]) {
4527 priv->xstats.max_sdu_txq_drop[queue]++;
4528 goto max_sdu_err;
4529 }
4530 }
4531
4532 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4533 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4534 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4535 queue));
4536 /* This is a hard error, log it. */
4537 netdev_err(priv->dev,
4538 "%s: Tx Ring full when queue awake\n",
4539 __func__);
4540 }
4541 return NETDEV_TX_BUSY;
4542 }
4543
4544 /* Check if VLAN can be inserted by HW */
4545 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4546
4547 entry = tx_q->cur_tx;
4548 first_entry = entry;
4549 WARN_ON(tx_q->tx_skbuff[first_entry]);
4550
4551 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4552 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4553 * queues. In that case, checksum offloading for those queues that don't
4554 * support tx coe needs to fallback to software checksum calculation.
4555 *
4556 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4557 * also have to be checksummed in software.
4558 */
4559 if (csum_insertion &&
4560 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4561 !stmmac_has_ip_ethertype(skb))) {
4562 if (unlikely(skb_checksum_help(skb)))
4563 goto dma_map_err;
4564 csum_insertion = !csum_insertion;
4565 }
4566
4567 if (likely(priv->extend_desc))
4568 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4569 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4570 desc = &tx_q->dma_entx[entry].basic;
4571 else
4572 desc = tx_q->dma_tx + entry;
4573
4574 first = desc;
4575
4576 if (has_vlan)
4577 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4578
4579 enh_desc = priv->plat->enh_desc;
4580 /* To program the descriptors according to the size of the frame */
4581 if (enh_desc)
4582 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4583
4584 if (unlikely(is_jumbo)) {
4585 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4586 if (unlikely(entry < 0) && (entry != -EINVAL))
4587 goto dma_map_err;
4588 }
4589
4590 for (i = 0; i < nfrags; i++) {
4591 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4592 int len = skb_frag_size(frag);
4593 bool last_segment = (i == (nfrags - 1));
4594
4595 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4596 WARN_ON(tx_q->tx_skbuff[entry]);
4597
4598 if (likely(priv->extend_desc))
4599 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4600 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4601 desc = &tx_q->dma_entx[entry].basic;
4602 else
4603 desc = tx_q->dma_tx + entry;
4604
4605 des = skb_frag_dma_map(priv->device, frag, 0, len,
4606 DMA_TO_DEVICE);
4607 if (dma_mapping_error(priv->device, des))
4608 goto dma_map_err; /* should reuse desc w/o issues */
4609
4610 tx_q->tx_skbuff_dma[entry].buf = des;
4611
4612 stmmac_set_desc_addr(priv, desc, des);
4613
4614 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4615 tx_q->tx_skbuff_dma[entry].len = len;
4616 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4617 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4618
4619 /* Prepare the descriptor and set the own bit too */
4620 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4621 priv->mode, 1, last_segment, skb->len);
4622 }
4623
4624 /* Only the last descriptor gets to point to the skb. */
4625 tx_q->tx_skbuff[entry] = skb;
4626 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4627
4628 /* According to the coalesce parameter the IC bit for the latest
4629 * segment is reset and the timer re-started to clean the tx status.
4630 * This approach takes care about the fragments: desc is the first
4631 * element in case of no SG.
4632 */
4633 tx_packets = (entry + 1) - first_tx;
4634 tx_q->tx_count_frames += tx_packets;
4635
4636 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4637 set_ic = true;
4638 else if (!priv->tx_coal_frames[queue])
4639 set_ic = false;
4640 else if (tx_packets > priv->tx_coal_frames[queue])
4641 set_ic = true;
4642 else if ((tx_q->tx_count_frames %
4643 priv->tx_coal_frames[queue]) < tx_packets)
4644 set_ic = true;
4645 else
4646 set_ic = false;
4647
4648 if (set_ic) {
4649 if (likely(priv->extend_desc))
4650 desc = &tx_q->dma_etx[entry].basic;
4651 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4652 desc = &tx_q->dma_entx[entry].basic;
4653 else
4654 desc = &tx_q->dma_tx[entry];
4655
4656 tx_q->tx_count_frames = 0;
4657 stmmac_set_tx_ic(priv, desc);
4658 }
4659
4660 /* We've used all descriptors we need for this skb, however,
4661 * advance cur_tx so that it references a fresh descriptor.
4662 * ndo_start_xmit will fill this descriptor the next time it's
4663 * called and stmmac_tx_clean may clean up to this descriptor.
4664 */
4665 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4666 tx_q->cur_tx = entry;
4667
4668 if (netif_msg_pktdata(priv)) {
4669 netdev_dbg(priv->dev,
4670 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4671 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4672 entry, first, nfrags);
4673
4674 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4675 print_pkt(skb->data, skb->len);
4676 }
4677
4678 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4679 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4680 __func__);
4681 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4682 }
4683
4684 u64_stats_update_begin(&txq_stats->q_syncp);
4685 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4686 if (set_ic)
4687 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4688 u64_stats_update_end(&txq_stats->q_syncp);
4689
4690 if (priv->sarc_type)
4691 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4692
4693 /* Ready to fill the first descriptor and set the OWN bit w/o any
4694 * problems because all the descriptors are actually ready to be
4695 * passed to the DMA engine.
4696 */
4697 if (likely(!is_jumbo)) {
4698 bool last_segment = (nfrags == 0);
4699
4700 des = dma_map_single(priv->device, skb->data,
4701 nopaged_len, DMA_TO_DEVICE);
4702 if (dma_mapping_error(priv->device, des))
4703 goto dma_map_err;
4704
4705 tx_q->tx_skbuff_dma[first_entry].buf = des;
4706 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4707 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4708
4709 stmmac_set_desc_addr(priv, first, des);
4710
4711 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4712 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4713
4714 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4715 priv->hwts_tx_en)) {
4716 /* declare that device is doing timestamping */
4717 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4718 stmmac_enable_tx_timestamp(priv, first);
4719 }
4720
4721 /* Prepare the first descriptor setting the OWN bit too */
4722 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4723 csum_insertion, priv->mode, 0, last_segment,
4724 skb->len);
4725 }
4726
4727 if (tx_q->tbs & STMMAC_TBS_EN) {
4728 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4729
4730 tbs_desc = &tx_q->dma_entx[first_entry];
4731 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4732 }
4733
4734 stmmac_set_tx_owner(priv, first);
4735
4736 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4737
4738 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4739 skb_tx_timestamp(skb);
4740 stmmac_flush_tx_descriptors(priv, queue);
4741 stmmac_tx_timer_arm(priv, queue);
4742
4743 return NETDEV_TX_OK;
4744
4745 dma_map_err:
4746 netdev_err(priv->dev, "Tx DMA map failed\n");
4747 max_sdu_err:
4748 dev_kfree_skb(skb);
4749 priv->xstats.tx_dropped++;
4750 return NETDEV_TX_OK;
4751 }
4752
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4753 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4754 {
4755 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4756 __be16 vlan_proto = veth->h_vlan_proto;
4757 u16 vlanid;
4758
4759 if ((vlan_proto == htons(ETH_P_8021Q) &&
4760 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4761 (vlan_proto == htons(ETH_P_8021AD) &&
4762 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4763 /* pop the vlan tag */
4764 vlanid = ntohs(veth->h_vlan_TCI);
4765 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4766 skb_pull(skb, VLAN_HLEN);
4767 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4768 }
4769 }
4770
4771 /**
4772 * stmmac_rx_refill - refill used skb preallocated buffers
4773 * @priv: driver private structure
4774 * @queue: RX queue index
4775 * Description : this is to reallocate the skb for the reception process
4776 * that is based on zero-copy.
4777 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4778 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4779 {
4780 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4781 int dirty = stmmac_rx_dirty(priv, queue);
4782 unsigned int entry = rx_q->dirty_rx;
4783 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4784
4785 if (priv->dma_cap.host_dma_width <= 32)
4786 gfp |= GFP_DMA32;
4787
4788 while (dirty-- > 0) {
4789 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4790 struct dma_desc *p;
4791 bool use_rx_wd;
4792
4793 if (priv->extend_desc)
4794 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4795 else
4796 p = rx_q->dma_rx + entry;
4797
4798 if (!buf->page) {
4799 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4800 if (!buf->page)
4801 break;
4802 }
4803
4804 if (priv->sph && !buf->sec_page) {
4805 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4806 if (!buf->sec_page)
4807 break;
4808
4809 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4810 }
4811
4812 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4813
4814 stmmac_set_desc_addr(priv, p, buf->addr);
4815 if (priv->sph)
4816 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4817 else
4818 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4819 stmmac_refill_desc3(priv, rx_q, p);
4820
4821 rx_q->rx_count_frames++;
4822 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4823 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4824 rx_q->rx_count_frames = 0;
4825
4826 use_rx_wd = !priv->rx_coal_frames[queue];
4827 use_rx_wd |= rx_q->rx_count_frames > 0;
4828 if (!priv->use_riwt)
4829 use_rx_wd = false;
4830
4831 dma_wmb();
4832 stmmac_set_rx_owner(priv, p, use_rx_wd);
4833
4834 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4835 }
4836 rx_q->dirty_rx = entry;
4837 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4838 (rx_q->dirty_rx * sizeof(struct dma_desc));
4839 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4840 }
4841
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4842 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4843 struct dma_desc *p,
4844 int status, unsigned int len)
4845 {
4846 unsigned int plen = 0, hlen = 0;
4847 int coe = priv->hw->rx_csum;
4848
4849 /* Not first descriptor, buffer is always zero */
4850 if (priv->sph && len)
4851 return 0;
4852
4853 /* First descriptor, get split header length */
4854 stmmac_get_rx_header_len(priv, p, &hlen);
4855 if (priv->sph && hlen) {
4856 priv->xstats.rx_split_hdr_pkt_n++;
4857 return hlen;
4858 }
4859
4860 /* First descriptor, not last descriptor and not split header */
4861 if (status & rx_not_ls)
4862 return priv->dma_conf.dma_buf_sz;
4863
4864 plen = stmmac_get_rx_frame_len(priv, p, coe);
4865
4866 /* First descriptor and last descriptor and not split header */
4867 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4868 }
4869
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4870 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4871 struct dma_desc *p,
4872 int status, unsigned int len)
4873 {
4874 int coe = priv->hw->rx_csum;
4875 unsigned int plen = 0;
4876
4877 /* Not split header, buffer is not available */
4878 if (!priv->sph)
4879 return 0;
4880
4881 /* Not last descriptor */
4882 if (status & rx_not_ls)
4883 return priv->dma_conf.dma_buf_sz;
4884
4885 plen = stmmac_get_rx_frame_len(priv, p, coe);
4886
4887 /* Last descriptor */
4888 return plen - len;
4889 }
4890
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4891 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4892 struct xdp_frame *xdpf, bool dma_map)
4893 {
4894 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4895 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4896 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
4897 unsigned int entry = tx_q->cur_tx;
4898 struct dma_desc *tx_desc;
4899 dma_addr_t dma_addr;
4900 bool set_ic;
4901
4902 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4903 return STMMAC_XDP_CONSUMED;
4904
4905 if (priv->est && priv->est->enable &&
4906 priv->est->max_sdu[queue] &&
4907 xdpf->len > priv->est->max_sdu[queue]) {
4908 priv->xstats.max_sdu_txq_drop[queue]++;
4909 return STMMAC_XDP_CONSUMED;
4910 }
4911
4912 if (likely(priv->extend_desc))
4913 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4914 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4915 tx_desc = &tx_q->dma_entx[entry].basic;
4916 else
4917 tx_desc = tx_q->dma_tx + entry;
4918
4919 if (dma_map) {
4920 dma_addr = dma_map_single(priv->device, xdpf->data,
4921 xdpf->len, DMA_TO_DEVICE);
4922 if (dma_mapping_error(priv->device, dma_addr))
4923 return STMMAC_XDP_CONSUMED;
4924
4925 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4926 } else {
4927 struct page *page = virt_to_page(xdpf->data);
4928
4929 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4930 xdpf->headroom;
4931 dma_sync_single_for_device(priv->device, dma_addr,
4932 xdpf->len, DMA_BIDIRECTIONAL);
4933
4934 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4935 }
4936
4937 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4938 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4939 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4940 tx_q->tx_skbuff_dma[entry].last_segment = true;
4941 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4942
4943 tx_q->xdpf[entry] = xdpf;
4944
4945 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4946
4947 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4948 csum, priv->mode, true, true,
4949 xdpf->len);
4950
4951 tx_q->tx_count_frames++;
4952
4953 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4954 set_ic = true;
4955 else
4956 set_ic = false;
4957
4958 if (set_ic) {
4959 tx_q->tx_count_frames = 0;
4960 stmmac_set_tx_ic(priv, tx_desc);
4961 u64_stats_update_begin(&txq_stats->q_syncp);
4962 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4963 u64_stats_update_end(&txq_stats->q_syncp);
4964 }
4965
4966 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4967
4968 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4969 tx_q->cur_tx = entry;
4970
4971 return STMMAC_XDP_TX;
4972 }
4973
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4974 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4975 int cpu)
4976 {
4977 int index = cpu;
4978
4979 if (unlikely(index < 0))
4980 index = 0;
4981
4982 while (index >= priv->plat->tx_queues_to_use)
4983 index -= priv->plat->tx_queues_to_use;
4984
4985 return index;
4986 }
4987
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4988 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4989 struct xdp_buff *xdp)
4990 {
4991 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4992 int cpu = smp_processor_id();
4993 struct netdev_queue *nq;
4994 int queue;
4995 int res;
4996
4997 if (unlikely(!xdpf))
4998 return STMMAC_XDP_CONSUMED;
4999
5000 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5001 nq = netdev_get_tx_queue(priv->dev, queue);
5002
5003 __netif_tx_lock(nq, cpu);
5004 /* Avoids TX time-out as we are sharing with slow path */
5005 txq_trans_cond_update(nq);
5006
5007 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
5008 if (res == STMMAC_XDP_TX)
5009 stmmac_flush_tx_descriptors(priv, queue);
5010
5011 __netif_tx_unlock(nq);
5012
5013 return res;
5014 }
5015
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5016 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5017 struct bpf_prog *prog,
5018 struct xdp_buff *xdp)
5019 {
5020 u32 act;
5021 int res;
5022
5023 act = bpf_prog_run_xdp(prog, xdp);
5024 switch (act) {
5025 case XDP_PASS:
5026 res = STMMAC_XDP_PASS;
5027 break;
5028 case XDP_TX:
5029 res = stmmac_xdp_xmit_back(priv, xdp);
5030 break;
5031 case XDP_REDIRECT:
5032 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5033 res = STMMAC_XDP_CONSUMED;
5034 else
5035 res = STMMAC_XDP_REDIRECT;
5036 break;
5037 default:
5038 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5039 fallthrough;
5040 case XDP_ABORTED:
5041 trace_xdp_exception(priv->dev, prog, act);
5042 fallthrough;
5043 case XDP_DROP:
5044 res = STMMAC_XDP_CONSUMED;
5045 break;
5046 }
5047
5048 return res;
5049 }
5050
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5051 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5052 struct xdp_buff *xdp)
5053 {
5054 struct bpf_prog *prog;
5055 int res;
5056
5057 prog = READ_ONCE(priv->xdp_prog);
5058 if (!prog) {
5059 res = STMMAC_XDP_PASS;
5060 goto out;
5061 }
5062
5063 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5064 out:
5065 return ERR_PTR(-res);
5066 }
5067
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5068 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5069 int xdp_status)
5070 {
5071 int cpu = smp_processor_id();
5072 int queue;
5073
5074 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5075
5076 if (xdp_status & STMMAC_XDP_TX)
5077 stmmac_tx_timer_arm(priv, queue);
5078
5079 if (xdp_status & STMMAC_XDP_REDIRECT)
5080 xdp_do_flush();
5081 }
5082
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5083 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5084 struct xdp_buff *xdp)
5085 {
5086 unsigned int metasize = xdp->data - xdp->data_meta;
5087 unsigned int datasize = xdp->data_end - xdp->data;
5088 struct sk_buff *skb;
5089
5090 skb = napi_alloc_skb(&ch->rxtx_napi,
5091 xdp->data_end - xdp->data_hard_start);
5092 if (unlikely(!skb))
5093 return NULL;
5094
5095 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5096 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5097 if (metasize)
5098 skb_metadata_set(skb, metasize);
5099
5100 return skb;
5101 }
5102
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5103 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5104 struct dma_desc *p, struct dma_desc *np,
5105 struct xdp_buff *xdp)
5106 {
5107 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5108 struct stmmac_channel *ch = &priv->channel[queue];
5109 unsigned int len = xdp->data_end - xdp->data;
5110 enum pkt_hash_types hash_type;
5111 int coe = priv->hw->rx_csum;
5112 struct sk_buff *skb;
5113 u32 hash;
5114
5115 skb = stmmac_construct_skb_zc(ch, xdp);
5116 if (!skb) {
5117 priv->xstats.rx_dropped++;
5118 return;
5119 }
5120
5121 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5122 if (priv->hw->hw_vlan_en)
5123 /* MAC level stripping. */
5124 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5125 else
5126 /* Driver level stripping. */
5127 stmmac_rx_vlan(priv->dev, skb);
5128 skb->protocol = eth_type_trans(skb, priv->dev);
5129
5130 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5131 skb_checksum_none_assert(skb);
5132 else
5133 skb->ip_summed = CHECKSUM_UNNECESSARY;
5134
5135 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5136 skb_set_hash(skb, hash, hash_type);
5137
5138 skb_record_rx_queue(skb, queue);
5139 napi_gro_receive(&ch->rxtx_napi, skb);
5140
5141 u64_stats_update_begin(&rxq_stats->napi_syncp);
5142 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5143 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5144 u64_stats_update_end(&rxq_stats->napi_syncp);
5145 }
5146
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5147 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5148 {
5149 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5150 unsigned int entry = rx_q->dirty_rx;
5151 struct dma_desc *rx_desc = NULL;
5152 bool ret = true;
5153
5154 budget = min(budget, stmmac_rx_dirty(priv, queue));
5155
5156 while (budget-- > 0 && entry != rx_q->cur_rx) {
5157 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5158 dma_addr_t dma_addr;
5159 bool use_rx_wd;
5160
5161 if (!buf->xdp) {
5162 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5163 if (!buf->xdp) {
5164 ret = false;
5165 break;
5166 }
5167 }
5168
5169 if (priv->extend_desc)
5170 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5171 else
5172 rx_desc = rx_q->dma_rx + entry;
5173
5174 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5175 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5176 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5177 stmmac_refill_desc3(priv, rx_q, rx_desc);
5178
5179 rx_q->rx_count_frames++;
5180 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5181 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5182 rx_q->rx_count_frames = 0;
5183
5184 use_rx_wd = !priv->rx_coal_frames[queue];
5185 use_rx_wd |= rx_q->rx_count_frames > 0;
5186 if (!priv->use_riwt)
5187 use_rx_wd = false;
5188
5189 dma_wmb();
5190 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5191
5192 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5193 }
5194
5195 if (rx_desc) {
5196 rx_q->dirty_rx = entry;
5197 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5198 (rx_q->dirty_rx * sizeof(struct dma_desc));
5199 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5200 }
5201
5202 return ret;
5203 }
5204
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5205 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5206 {
5207 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5208 * to represent incoming packet, whereas cb field in the same structure
5209 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5210 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5211 */
5212 return (struct stmmac_xdp_buff *)xdp;
5213 }
5214
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5215 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5216 {
5217 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5218 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5219 unsigned int count = 0, error = 0, len = 0;
5220 int dirty = stmmac_rx_dirty(priv, queue);
5221 unsigned int next_entry = rx_q->cur_rx;
5222 u32 rx_errors = 0, rx_dropped = 0;
5223 unsigned int desc_size;
5224 struct bpf_prog *prog;
5225 bool failure = false;
5226 int xdp_status = 0;
5227 int status = 0;
5228
5229 if (netif_msg_rx_status(priv)) {
5230 void *rx_head;
5231
5232 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5233 if (priv->extend_desc) {
5234 rx_head = (void *)rx_q->dma_erx;
5235 desc_size = sizeof(struct dma_extended_desc);
5236 } else {
5237 rx_head = (void *)rx_q->dma_rx;
5238 desc_size = sizeof(struct dma_desc);
5239 }
5240
5241 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5242 rx_q->dma_rx_phy, desc_size);
5243 }
5244 while (count < limit) {
5245 struct stmmac_rx_buffer *buf;
5246 struct stmmac_xdp_buff *ctx;
5247 unsigned int buf1_len = 0;
5248 struct dma_desc *np, *p;
5249 int entry;
5250 int res;
5251
5252 if (!count && rx_q->state_saved) {
5253 error = rx_q->state.error;
5254 len = rx_q->state.len;
5255 } else {
5256 rx_q->state_saved = false;
5257 error = 0;
5258 len = 0;
5259 }
5260
5261 if (count >= limit)
5262 break;
5263
5264 read_again:
5265 buf1_len = 0;
5266 entry = next_entry;
5267 buf = &rx_q->buf_pool[entry];
5268
5269 if (dirty >= STMMAC_RX_FILL_BATCH) {
5270 failure = failure ||
5271 !stmmac_rx_refill_zc(priv, queue, dirty);
5272 dirty = 0;
5273 }
5274
5275 if (priv->extend_desc)
5276 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5277 else
5278 p = rx_q->dma_rx + entry;
5279
5280 /* read the status of the incoming frame */
5281 status = stmmac_rx_status(priv, &priv->xstats, p);
5282 /* check if managed by the DMA otherwise go ahead */
5283 if (unlikely(status & dma_own))
5284 break;
5285
5286 /* Prefetch the next RX descriptor */
5287 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5288 priv->dma_conf.dma_rx_size);
5289 next_entry = rx_q->cur_rx;
5290
5291 if (priv->extend_desc)
5292 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5293 else
5294 np = rx_q->dma_rx + next_entry;
5295
5296 prefetch(np);
5297
5298 /* Ensure a valid XSK buffer before proceed */
5299 if (!buf->xdp)
5300 break;
5301
5302 if (priv->extend_desc)
5303 stmmac_rx_extended_status(priv, &priv->xstats,
5304 rx_q->dma_erx + entry);
5305 if (unlikely(status == discard_frame)) {
5306 xsk_buff_free(buf->xdp);
5307 buf->xdp = NULL;
5308 dirty++;
5309 error = 1;
5310 if (!priv->hwts_rx_en)
5311 rx_errors++;
5312 }
5313
5314 if (unlikely(error && (status & rx_not_ls)))
5315 goto read_again;
5316 if (unlikely(error)) {
5317 count++;
5318 continue;
5319 }
5320
5321 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5322 if (likely(status & rx_not_ls)) {
5323 xsk_buff_free(buf->xdp);
5324 buf->xdp = NULL;
5325 dirty++;
5326 count++;
5327 goto read_again;
5328 }
5329
5330 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5331 ctx->priv = priv;
5332 ctx->desc = p;
5333 ctx->ndesc = np;
5334
5335 /* XDP ZC Frame only support primary buffers for now */
5336 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5337 len += buf1_len;
5338
5339 /* ACS is disabled; strip manually. */
5340 if (likely(!(status & rx_not_ls))) {
5341 buf1_len -= ETH_FCS_LEN;
5342 len -= ETH_FCS_LEN;
5343 }
5344
5345 /* RX buffer is good and fit into a XSK pool buffer */
5346 buf->xdp->data_end = buf->xdp->data + buf1_len;
5347 xsk_buff_dma_sync_for_cpu(buf->xdp);
5348
5349 prog = READ_ONCE(priv->xdp_prog);
5350 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5351
5352 switch (res) {
5353 case STMMAC_XDP_PASS:
5354 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5355 xsk_buff_free(buf->xdp);
5356 break;
5357 case STMMAC_XDP_CONSUMED:
5358 xsk_buff_free(buf->xdp);
5359 rx_dropped++;
5360 break;
5361 case STMMAC_XDP_TX:
5362 case STMMAC_XDP_REDIRECT:
5363 xdp_status |= res;
5364 break;
5365 }
5366
5367 buf->xdp = NULL;
5368 dirty++;
5369 count++;
5370 }
5371
5372 if (status & rx_not_ls) {
5373 rx_q->state_saved = true;
5374 rx_q->state.error = error;
5375 rx_q->state.len = len;
5376 }
5377
5378 stmmac_finalize_xdp_rx(priv, xdp_status);
5379
5380 u64_stats_update_begin(&rxq_stats->napi_syncp);
5381 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5382 u64_stats_update_end(&rxq_stats->napi_syncp);
5383
5384 priv->xstats.rx_dropped += rx_dropped;
5385 priv->xstats.rx_errors += rx_errors;
5386
5387 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5388 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5389 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5390 else
5391 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5392
5393 return (int)count;
5394 }
5395
5396 return failure ? limit : (int)count;
5397 }
5398
5399 /**
5400 * stmmac_rx - manage the receive process
5401 * @priv: driver private structure
5402 * @limit: napi bugget
5403 * @queue: RX queue index.
5404 * Description : this the function called by the napi poll method.
5405 * It gets all the frames inside the ring.
5406 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5407 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5408 {
5409 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5410 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5411 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5412 struct stmmac_channel *ch = &priv->channel[queue];
5413 unsigned int count = 0, error = 0, len = 0;
5414 int status = 0, coe = priv->hw->rx_csum;
5415 unsigned int next_entry = rx_q->cur_rx;
5416 enum dma_data_direction dma_dir;
5417 unsigned int desc_size;
5418 struct sk_buff *skb = NULL;
5419 struct stmmac_xdp_buff ctx;
5420 int xdp_status = 0;
5421 int bufsz;
5422
5423 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5424 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5425 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5426
5427 if (netif_msg_rx_status(priv)) {
5428 void *rx_head;
5429
5430 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5431 if (priv->extend_desc) {
5432 rx_head = (void *)rx_q->dma_erx;
5433 desc_size = sizeof(struct dma_extended_desc);
5434 } else {
5435 rx_head = (void *)rx_q->dma_rx;
5436 desc_size = sizeof(struct dma_desc);
5437 }
5438
5439 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5440 rx_q->dma_rx_phy, desc_size);
5441 }
5442 while (count < limit) {
5443 unsigned int buf1_len = 0, buf2_len = 0;
5444 enum pkt_hash_types hash_type;
5445 struct stmmac_rx_buffer *buf;
5446 struct dma_desc *np, *p;
5447 int entry;
5448 u32 hash;
5449
5450 if (!count && rx_q->state_saved) {
5451 skb = rx_q->state.skb;
5452 error = rx_q->state.error;
5453 len = rx_q->state.len;
5454 } else {
5455 rx_q->state_saved = false;
5456 skb = NULL;
5457 error = 0;
5458 len = 0;
5459 }
5460
5461 read_again:
5462 if (count >= limit)
5463 break;
5464
5465 buf1_len = 0;
5466 buf2_len = 0;
5467 entry = next_entry;
5468 buf = &rx_q->buf_pool[entry];
5469
5470 if (priv->extend_desc)
5471 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5472 else
5473 p = rx_q->dma_rx + entry;
5474
5475 /* read the status of the incoming frame */
5476 status = stmmac_rx_status(priv, &priv->xstats, p);
5477 /* check if managed by the DMA otherwise go ahead */
5478 if (unlikely(status & dma_own))
5479 break;
5480
5481 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5482 priv->dma_conf.dma_rx_size);
5483 next_entry = rx_q->cur_rx;
5484
5485 if (priv->extend_desc)
5486 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5487 else
5488 np = rx_q->dma_rx + next_entry;
5489
5490 prefetch(np);
5491
5492 if (priv->extend_desc)
5493 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5494 if (unlikely(status == discard_frame)) {
5495 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5496 buf->page = NULL;
5497 error = 1;
5498 if (!priv->hwts_rx_en)
5499 rx_errors++;
5500 }
5501
5502 if (unlikely(error && (status & rx_not_ls)))
5503 goto read_again;
5504 if (unlikely(error)) {
5505 dev_kfree_skb(skb);
5506 skb = NULL;
5507 count++;
5508 continue;
5509 }
5510
5511 /* Buffer is good. Go on. */
5512
5513 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5514 len += buf1_len;
5515 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5516 len += buf2_len;
5517
5518 /* ACS is disabled; strip manually. */
5519 if (likely(!(status & rx_not_ls))) {
5520 if (buf2_len) {
5521 buf2_len -= ETH_FCS_LEN;
5522 len -= ETH_FCS_LEN;
5523 } else if (buf1_len) {
5524 buf1_len -= ETH_FCS_LEN;
5525 len -= ETH_FCS_LEN;
5526 }
5527 }
5528
5529 if (!skb) {
5530 unsigned int pre_len, sync_len;
5531
5532 dma_sync_single_for_cpu(priv->device, buf->addr,
5533 buf1_len, dma_dir);
5534 net_prefetch(page_address(buf->page) +
5535 buf->page_offset);
5536
5537 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5538 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5539 buf->page_offset, buf1_len, true);
5540
5541 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5542 buf->page_offset;
5543
5544 ctx.priv = priv;
5545 ctx.desc = p;
5546 ctx.ndesc = np;
5547
5548 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5549 /* Due xdp_adjust_tail: DMA sync for_device
5550 * cover max len CPU touch
5551 */
5552 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5553 buf->page_offset;
5554 sync_len = max(sync_len, pre_len);
5555
5556 /* For Not XDP_PASS verdict */
5557 if (IS_ERR(skb)) {
5558 unsigned int xdp_res = -PTR_ERR(skb);
5559
5560 if (xdp_res & STMMAC_XDP_CONSUMED) {
5561 page_pool_put_page(rx_q->page_pool,
5562 virt_to_head_page(ctx.xdp.data),
5563 sync_len, true);
5564 buf->page = NULL;
5565 rx_dropped++;
5566
5567 /* Clear skb as it was set as
5568 * status by XDP program.
5569 */
5570 skb = NULL;
5571
5572 if (unlikely((status & rx_not_ls)))
5573 goto read_again;
5574
5575 count++;
5576 continue;
5577 } else if (xdp_res & (STMMAC_XDP_TX |
5578 STMMAC_XDP_REDIRECT)) {
5579 xdp_status |= xdp_res;
5580 buf->page = NULL;
5581 skb = NULL;
5582 count++;
5583 continue;
5584 }
5585 }
5586 }
5587
5588 if (!skb) {
5589 unsigned int head_pad_len;
5590
5591 /* XDP program may expand or reduce tail */
5592 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5593
5594 skb = napi_build_skb(page_address(buf->page),
5595 rx_q->napi_skb_frag_size);
5596 if (!skb) {
5597 page_pool_recycle_direct(rx_q->page_pool,
5598 buf->page);
5599 rx_dropped++;
5600 count++;
5601 goto drain_data;
5602 }
5603
5604 /* XDP program may adjust header */
5605 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5606 skb_reserve(skb, head_pad_len);
5607 skb_put(skb, buf1_len);
5608 skb_mark_for_recycle(skb);
5609 buf->page = NULL;
5610 } else if (buf1_len) {
5611 dma_sync_single_for_cpu(priv->device, buf->addr,
5612 buf1_len, dma_dir);
5613 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5614 buf->page, buf->page_offset, buf1_len,
5615 priv->dma_conf.dma_buf_sz);
5616 buf->page = NULL;
5617 }
5618
5619 if (buf2_len) {
5620 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5621 buf2_len, dma_dir);
5622 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5623 buf->sec_page, 0, buf2_len,
5624 priv->dma_conf.dma_buf_sz);
5625 buf->sec_page = NULL;
5626 }
5627
5628 drain_data:
5629 if (likely(status & rx_not_ls))
5630 goto read_again;
5631 if (!skb)
5632 continue;
5633
5634 /* Got entire packet into SKB. Finish it. */
5635
5636 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5637
5638 if (priv->hw->hw_vlan_en)
5639 /* MAC level stripping. */
5640 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5641 else
5642 /* Driver level stripping. */
5643 stmmac_rx_vlan(priv->dev, skb);
5644
5645 skb->protocol = eth_type_trans(skb, priv->dev);
5646
5647 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5648 (status & csum_none))
5649 skb_checksum_none_assert(skb);
5650 else
5651 skb->ip_summed = CHECKSUM_UNNECESSARY;
5652
5653 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5654 skb_set_hash(skb, hash, hash_type);
5655
5656 skb_record_rx_queue(skb, queue);
5657 napi_gro_receive(&ch->rx_napi, skb);
5658 skb = NULL;
5659
5660 rx_packets++;
5661 rx_bytes += len;
5662 count++;
5663 }
5664
5665 if (status & rx_not_ls || skb) {
5666 rx_q->state_saved = true;
5667 rx_q->state.skb = skb;
5668 rx_q->state.error = error;
5669 rx_q->state.len = len;
5670 }
5671
5672 stmmac_finalize_xdp_rx(priv, xdp_status);
5673
5674 stmmac_rx_refill(priv, queue);
5675
5676 u64_stats_update_begin(&rxq_stats->napi_syncp);
5677 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5678 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5679 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5680 u64_stats_update_end(&rxq_stats->napi_syncp);
5681
5682 priv->xstats.rx_dropped += rx_dropped;
5683 priv->xstats.rx_errors += rx_errors;
5684
5685 return count;
5686 }
5687
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5688 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5689 {
5690 struct stmmac_channel *ch =
5691 container_of(napi, struct stmmac_channel, rx_napi);
5692 struct stmmac_priv *priv = ch->priv_data;
5693 struct stmmac_rxq_stats *rxq_stats;
5694 u32 chan = ch->index;
5695 int work_done;
5696
5697 rxq_stats = &priv->xstats.rxq_stats[chan];
5698 u64_stats_update_begin(&rxq_stats->napi_syncp);
5699 u64_stats_inc(&rxq_stats->napi.poll);
5700 u64_stats_update_end(&rxq_stats->napi_syncp);
5701
5702 work_done = stmmac_rx(priv, budget, chan);
5703 if (work_done < budget && napi_complete_done(napi, work_done)) {
5704 unsigned long flags;
5705
5706 spin_lock_irqsave(&ch->lock, flags);
5707 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5708 spin_unlock_irqrestore(&ch->lock, flags);
5709 }
5710
5711 return work_done;
5712 }
5713
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5714 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5715 {
5716 struct stmmac_channel *ch =
5717 container_of(napi, struct stmmac_channel, tx_napi);
5718 struct stmmac_priv *priv = ch->priv_data;
5719 struct stmmac_txq_stats *txq_stats;
5720 bool pending_packets = false;
5721 u32 chan = ch->index;
5722 int work_done;
5723
5724 txq_stats = &priv->xstats.txq_stats[chan];
5725 u64_stats_update_begin(&txq_stats->napi_syncp);
5726 u64_stats_inc(&txq_stats->napi.poll);
5727 u64_stats_update_end(&txq_stats->napi_syncp);
5728
5729 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5730 work_done = min(work_done, budget);
5731
5732 if (work_done < budget && napi_complete_done(napi, work_done)) {
5733 unsigned long flags;
5734
5735 spin_lock_irqsave(&ch->lock, flags);
5736 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5737 spin_unlock_irqrestore(&ch->lock, flags);
5738 }
5739
5740 /* TX still have packet to handle, check if we need to arm tx timer */
5741 if (pending_packets)
5742 stmmac_tx_timer_arm(priv, chan);
5743
5744 return work_done;
5745 }
5746
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5747 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5748 {
5749 struct stmmac_channel *ch =
5750 container_of(napi, struct stmmac_channel, rxtx_napi);
5751 struct stmmac_priv *priv = ch->priv_data;
5752 bool tx_pending_packets = false;
5753 int rx_done, tx_done, rxtx_done;
5754 struct stmmac_rxq_stats *rxq_stats;
5755 struct stmmac_txq_stats *txq_stats;
5756 u32 chan = ch->index;
5757
5758 rxq_stats = &priv->xstats.rxq_stats[chan];
5759 u64_stats_update_begin(&rxq_stats->napi_syncp);
5760 u64_stats_inc(&rxq_stats->napi.poll);
5761 u64_stats_update_end(&rxq_stats->napi_syncp);
5762
5763 txq_stats = &priv->xstats.txq_stats[chan];
5764 u64_stats_update_begin(&txq_stats->napi_syncp);
5765 u64_stats_inc(&txq_stats->napi.poll);
5766 u64_stats_update_end(&txq_stats->napi_syncp);
5767
5768 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5769 tx_done = min(tx_done, budget);
5770
5771 rx_done = stmmac_rx_zc(priv, budget, chan);
5772
5773 rxtx_done = max(tx_done, rx_done);
5774
5775 /* If either TX or RX work is not complete, return budget
5776 * and keep pooling
5777 */
5778 if (rxtx_done >= budget)
5779 return budget;
5780
5781 /* all work done, exit the polling mode */
5782 if (napi_complete_done(napi, rxtx_done)) {
5783 unsigned long flags;
5784
5785 spin_lock_irqsave(&ch->lock, flags);
5786 /* Both RX and TX work done are compelte,
5787 * so enable both RX & TX IRQs.
5788 */
5789 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5790 spin_unlock_irqrestore(&ch->lock, flags);
5791 }
5792
5793 /* TX still have packet to handle, check if we need to arm tx timer */
5794 if (tx_pending_packets)
5795 stmmac_tx_timer_arm(priv, chan);
5796
5797 return min(rxtx_done, budget - 1);
5798 }
5799
5800 /**
5801 * stmmac_tx_timeout
5802 * @dev : Pointer to net device structure
5803 * @txqueue: the index of the hanging transmit queue
5804 * Description: this function is called when a packet transmission fails to
5805 * complete within a reasonable time. The driver will mark the error in the
5806 * netdev structure and arrange for the device to be reset to a sane state
5807 * in order to transmit a new packet.
5808 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5809 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5810 {
5811 struct stmmac_priv *priv = netdev_priv(dev);
5812
5813 stmmac_global_err(priv);
5814 }
5815
5816 /**
5817 * stmmac_set_rx_mode - entry point for multicast addressing
5818 * @dev : pointer to the device structure
5819 * Description:
5820 * This function is a driver entry point which gets called by the kernel
5821 * whenever multicast addresses must be enabled/disabled.
5822 * Return value:
5823 * void.
5824 *
5825 * FIXME: This may need RXC to be running, but it may be called with BH
5826 * disabled, which means we can't call phylink_rx_clk_stop*().
5827 */
stmmac_set_rx_mode(struct net_device * dev)5828 static void stmmac_set_rx_mode(struct net_device *dev)
5829 {
5830 struct stmmac_priv *priv = netdev_priv(dev);
5831
5832 stmmac_set_filter(priv, priv->hw, dev);
5833 }
5834
5835 /**
5836 * stmmac_change_mtu - entry point to change MTU size for the device.
5837 * @dev : device pointer.
5838 * @new_mtu : the new MTU size for the device.
5839 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5840 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5841 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5842 * Return value:
5843 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5844 * file on failure.
5845 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5846 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5847 {
5848 struct stmmac_priv *priv = netdev_priv(dev);
5849 int txfifosz = priv->plat->tx_fifo_size;
5850 struct stmmac_dma_conf *dma_conf;
5851 const int mtu = new_mtu;
5852 int ret;
5853
5854 if (txfifosz == 0)
5855 txfifosz = priv->dma_cap.tx_fifo_size;
5856
5857 txfifosz /= priv->plat->tx_queues_to_use;
5858
5859 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5860 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5861 return -EINVAL;
5862 }
5863
5864 new_mtu = STMMAC_ALIGN(new_mtu);
5865
5866 /* If condition true, FIFO is too small or MTU too large */
5867 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5868 return -EINVAL;
5869
5870 if (netif_running(dev)) {
5871 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5872 /* Try to allocate the new DMA conf with the new mtu */
5873 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5874 if (IS_ERR(dma_conf)) {
5875 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5876 mtu);
5877 return PTR_ERR(dma_conf);
5878 }
5879
5880 __stmmac_release(dev);
5881
5882 ret = __stmmac_open(dev, dma_conf);
5883 if (ret) {
5884 free_dma_desc_resources(priv, dma_conf);
5885 kfree(dma_conf);
5886 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5887 return ret;
5888 }
5889
5890 kfree(dma_conf);
5891
5892 stmmac_set_rx_mode(dev);
5893 }
5894
5895 WRITE_ONCE(dev->mtu, mtu);
5896 netdev_update_features(dev);
5897
5898 return 0;
5899 }
5900
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5901 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5902 netdev_features_t features)
5903 {
5904 struct stmmac_priv *priv = netdev_priv(dev);
5905
5906 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5907 features &= ~NETIF_F_RXCSUM;
5908
5909 if (!priv->plat->tx_coe)
5910 features &= ~NETIF_F_CSUM_MASK;
5911
5912 /* Some GMAC devices have a bugged Jumbo frame support that
5913 * needs to have the Tx COE disabled for oversized frames
5914 * (due to limited buffer sizes). In this case we disable
5915 * the TX csum insertion in the TDES and not use SF.
5916 */
5917 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5918 features &= ~NETIF_F_CSUM_MASK;
5919
5920 /* Disable tso if asked by ethtool */
5921 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5922 if (features & NETIF_F_TSO)
5923 priv->tso = true;
5924 else
5925 priv->tso = false;
5926 }
5927
5928 return features;
5929 }
5930
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5931 static int stmmac_set_features(struct net_device *netdev,
5932 netdev_features_t features)
5933 {
5934 struct stmmac_priv *priv = netdev_priv(netdev);
5935
5936 /* Keep the COE Type in case of csum is supporting */
5937 if (features & NETIF_F_RXCSUM)
5938 priv->hw->rx_csum = priv->plat->rx_coe;
5939 else
5940 priv->hw->rx_csum = 0;
5941 /* No check needed because rx_coe has been set before and it will be
5942 * fixed in case of issue.
5943 */
5944 stmmac_rx_ipc(priv, priv->hw);
5945
5946 if (priv->sph_cap) {
5947 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5948 u32 chan;
5949
5950 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5951 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5952 }
5953
5954 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5955 priv->hw->hw_vlan_en = true;
5956 else
5957 priv->hw->hw_vlan_en = false;
5958
5959 phylink_rx_clk_stop_block(priv->phylink);
5960 stmmac_set_hw_vlan_mode(priv, priv->hw);
5961 phylink_rx_clk_stop_unblock(priv->phylink);
5962
5963 return 0;
5964 }
5965
stmmac_common_interrupt(struct stmmac_priv * priv)5966 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5967 {
5968 u32 rx_cnt = priv->plat->rx_queues_to_use;
5969 u32 tx_cnt = priv->plat->tx_queues_to_use;
5970 u32 queues_count;
5971 u32 queue;
5972 bool xmac;
5973
5974 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5975 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5976
5977 if (priv->irq_wake)
5978 pm_wakeup_event(priv->device, 0);
5979
5980 if (priv->dma_cap.estsel)
5981 stmmac_est_irq_status(priv, priv, priv->dev,
5982 &priv->xstats, tx_cnt);
5983
5984 if (stmmac_fpe_supported(priv))
5985 stmmac_fpe_irq_status(priv);
5986
5987 /* To handle GMAC own interrupts */
5988 if ((priv->plat->has_gmac) || xmac) {
5989 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
5990
5991 if (unlikely(status)) {
5992 /* For LPI we need to save the tx status */
5993 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5994 priv->tx_path_in_lpi_mode = true;
5995 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5996 priv->tx_path_in_lpi_mode = false;
5997 }
5998
5999 for (queue = 0; queue < queues_count; queue++)
6000 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6001
6002 /* PCS link status */
6003 if (priv->hw->pcs &&
6004 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6005 if (priv->xstats.pcs_link)
6006 netif_carrier_on(priv->dev);
6007 else
6008 netif_carrier_off(priv->dev);
6009 }
6010
6011 stmmac_timestamp_interrupt(priv, priv);
6012 }
6013 }
6014
6015 /**
6016 * stmmac_interrupt - main ISR
6017 * @irq: interrupt number.
6018 * @dev_id: to pass the net device pointer.
6019 * Description: this is the main driver interrupt service routine.
6020 * It can call:
6021 * o DMA service routine (to manage incoming frame reception and transmission
6022 * status)
6023 * o Core interrupts to manage: remote wake-up, management counter, LPI
6024 * interrupts.
6025 */
stmmac_interrupt(int irq,void * dev_id)6026 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6027 {
6028 struct net_device *dev = (struct net_device *)dev_id;
6029 struct stmmac_priv *priv = netdev_priv(dev);
6030
6031 /* Check if adapter is up */
6032 if (test_bit(STMMAC_DOWN, &priv->state))
6033 return IRQ_HANDLED;
6034
6035 /* Check ASP error if it isn't delivered via an individual IRQ */
6036 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6037 return IRQ_HANDLED;
6038
6039 /* To handle Common interrupts */
6040 stmmac_common_interrupt(priv);
6041
6042 /* To handle DMA interrupts */
6043 stmmac_dma_interrupt(priv);
6044
6045 return IRQ_HANDLED;
6046 }
6047
stmmac_mac_interrupt(int irq,void * dev_id)6048 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6049 {
6050 struct net_device *dev = (struct net_device *)dev_id;
6051 struct stmmac_priv *priv = netdev_priv(dev);
6052
6053 /* Check if adapter is up */
6054 if (test_bit(STMMAC_DOWN, &priv->state))
6055 return IRQ_HANDLED;
6056
6057 /* To handle Common interrupts */
6058 stmmac_common_interrupt(priv);
6059
6060 return IRQ_HANDLED;
6061 }
6062
stmmac_safety_interrupt(int irq,void * dev_id)6063 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6064 {
6065 struct net_device *dev = (struct net_device *)dev_id;
6066 struct stmmac_priv *priv = netdev_priv(dev);
6067
6068 /* Check if adapter is up */
6069 if (test_bit(STMMAC_DOWN, &priv->state))
6070 return IRQ_HANDLED;
6071
6072 /* Check if a fatal error happened */
6073 stmmac_safety_feat_interrupt(priv);
6074
6075 return IRQ_HANDLED;
6076 }
6077
stmmac_msi_intr_tx(int irq,void * data)6078 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6079 {
6080 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6081 struct stmmac_dma_conf *dma_conf;
6082 int chan = tx_q->queue_index;
6083 struct stmmac_priv *priv;
6084 int status;
6085
6086 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6087 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6088
6089 /* Check if adapter is up */
6090 if (test_bit(STMMAC_DOWN, &priv->state))
6091 return IRQ_HANDLED;
6092
6093 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6094
6095 if (unlikely(status & tx_hard_error_bump_tc)) {
6096 /* Try to bump up the dma threshold on this failure */
6097 stmmac_bump_dma_threshold(priv, chan);
6098 } else if (unlikely(status == tx_hard_error)) {
6099 stmmac_tx_err(priv, chan);
6100 }
6101
6102 return IRQ_HANDLED;
6103 }
6104
stmmac_msi_intr_rx(int irq,void * data)6105 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6106 {
6107 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6108 struct stmmac_dma_conf *dma_conf;
6109 int chan = rx_q->queue_index;
6110 struct stmmac_priv *priv;
6111
6112 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6113 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6114
6115 /* Check if adapter is up */
6116 if (test_bit(STMMAC_DOWN, &priv->state))
6117 return IRQ_HANDLED;
6118
6119 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6120
6121 return IRQ_HANDLED;
6122 }
6123
6124 /**
6125 * stmmac_ioctl - Entry point for the Ioctl
6126 * @dev: Device pointer.
6127 * @rq: An IOCTL specefic structure, that can contain a pointer to
6128 * a proprietary structure used to pass information to the driver.
6129 * @cmd: IOCTL command
6130 * Description:
6131 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6132 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6133 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6134 {
6135 struct stmmac_priv *priv = netdev_priv (dev);
6136 int ret = -EOPNOTSUPP;
6137
6138 if (!netif_running(dev))
6139 return -EINVAL;
6140
6141 switch (cmd) {
6142 case SIOCGMIIPHY:
6143 case SIOCGMIIREG:
6144 case SIOCSMIIREG:
6145 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6146 break;
6147 default:
6148 break;
6149 }
6150
6151 return ret;
6152 }
6153
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6154 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6155 void *cb_priv)
6156 {
6157 struct stmmac_priv *priv = cb_priv;
6158 int ret = -EOPNOTSUPP;
6159
6160 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6161 return ret;
6162
6163 __stmmac_disable_all_queues(priv);
6164
6165 switch (type) {
6166 case TC_SETUP_CLSU32:
6167 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6168 break;
6169 case TC_SETUP_CLSFLOWER:
6170 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6171 break;
6172 default:
6173 break;
6174 }
6175
6176 stmmac_enable_all_queues(priv);
6177 return ret;
6178 }
6179
6180 static LIST_HEAD(stmmac_block_cb_list);
6181
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6182 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6183 void *type_data)
6184 {
6185 struct stmmac_priv *priv = netdev_priv(ndev);
6186
6187 switch (type) {
6188 case TC_QUERY_CAPS:
6189 return stmmac_tc_query_caps(priv, priv, type_data);
6190 case TC_SETUP_QDISC_MQPRIO:
6191 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6192 case TC_SETUP_BLOCK:
6193 return flow_block_cb_setup_simple(type_data,
6194 &stmmac_block_cb_list,
6195 stmmac_setup_tc_block_cb,
6196 priv, priv, true);
6197 case TC_SETUP_QDISC_CBS:
6198 return stmmac_tc_setup_cbs(priv, priv, type_data);
6199 case TC_SETUP_QDISC_TAPRIO:
6200 return stmmac_tc_setup_taprio(priv, priv, type_data);
6201 case TC_SETUP_QDISC_ETF:
6202 return stmmac_tc_setup_etf(priv, priv, type_data);
6203 default:
6204 return -EOPNOTSUPP;
6205 }
6206 }
6207
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6208 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6209 struct net_device *sb_dev)
6210 {
6211 int gso = skb_shinfo(skb)->gso_type;
6212
6213 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6214 /*
6215 * There is no way to determine the number of TSO/USO
6216 * capable Queues. Let's use always the Queue 0
6217 * because if TSO/USO is supported then at least this
6218 * one will be capable.
6219 */
6220 return 0;
6221 }
6222
6223 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6224 }
6225
stmmac_set_mac_address(struct net_device * ndev,void * addr)6226 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6227 {
6228 struct stmmac_priv *priv = netdev_priv(ndev);
6229 int ret = 0;
6230
6231 ret = pm_runtime_resume_and_get(priv->device);
6232 if (ret < 0)
6233 return ret;
6234
6235 ret = eth_mac_addr(ndev, addr);
6236 if (ret)
6237 goto set_mac_error;
6238
6239 phylink_rx_clk_stop_block(priv->phylink);
6240 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6241 phylink_rx_clk_stop_unblock(priv->phylink);
6242
6243 set_mac_error:
6244 pm_runtime_put(priv->device);
6245
6246 return ret;
6247 }
6248
6249 #ifdef CONFIG_DEBUG_FS
6250 static struct dentry *stmmac_fs_dir;
6251
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6252 static void sysfs_display_ring(void *head, int size, int extend_desc,
6253 struct seq_file *seq, dma_addr_t dma_phy_addr)
6254 {
6255 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6256 struct dma_desc *p = (struct dma_desc *)head;
6257 unsigned int desc_size;
6258 dma_addr_t dma_addr;
6259 int i;
6260
6261 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6262 for (i = 0; i < size; i++) {
6263 dma_addr = dma_phy_addr + i * desc_size;
6264 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6265 i, &dma_addr,
6266 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6267 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6268 if (extend_desc)
6269 p = &(++ep)->basic;
6270 else
6271 p++;
6272 }
6273 }
6274
stmmac_rings_status_show(struct seq_file * seq,void * v)6275 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6276 {
6277 struct net_device *dev = seq->private;
6278 struct stmmac_priv *priv = netdev_priv(dev);
6279 u32 rx_count = priv->plat->rx_queues_to_use;
6280 u32 tx_count = priv->plat->tx_queues_to_use;
6281 u32 queue;
6282
6283 if ((dev->flags & IFF_UP) == 0)
6284 return 0;
6285
6286 for (queue = 0; queue < rx_count; queue++) {
6287 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6288
6289 seq_printf(seq, "RX Queue %d:\n", queue);
6290
6291 if (priv->extend_desc) {
6292 seq_printf(seq, "Extended descriptor ring:\n");
6293 sysfs_display_ring((void *)rx_q->dma_erx,
6294 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6295 } else {
6296 seq_printf(seq, "Descriptor ring:\n");
6297 sysfs_display_ring((void *)rx_q->dma_rx,
6298 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6299 }
6300 }
6301
6302 for (queue = 0; queue < tx_count; queue++) {
6303 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6304
6305 seq_printf(seq, "TX Queue %d:\n", queue);
6306
6307 if (priv->extend_desc) {
6308 seq_printf(seq, "Extended descriptor ring:\n");
6309 sysfs_display_ring((void *)tx_q->dma_etx,
6310 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6311 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6312 seq_printf(seq, "Descriptor ring:\n");
6313 sysfs_display_ring((void *)tx_q->dma_tx,
6314 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6315 }
6316 }
6317
6318 return 0;
6319 }
6320 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6321
stmmac_dma_cap_show(struct seq_file * seq,void * v)6322 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6323 {
6324 static const char * const dwxgmac_timestamp_source[] = {
6325 "None",
6326 "Internal",
6327 "External",
6328 "Both",
6329 };
6330 static const char * const dwxgmac_safety_feature_desc[] = {
6331 "No",
6332 "All Safety Features with ECC and Parity",
6333 "All Safety Features without ECC or Parity",
6334 "All Safety Features with Parity Only",
6335 "ECC Only",
6336 "UNDEFINED",
6337 "UNDEFINED",
6338 "UNDEFINED",
6339 };
6340 struct net_device *dev = seq->private;
6341 struct stmmac_priv *priv = netdev_priv(dev);
6342
6343 if (!priv->hw_cap_support) {
6344 seq_printf(seq, "DMA HW features not supported\n");
6345 return 0;
6346 }
6347
6348 seq_printf(seq, "==============================\n");
6349 seq_printf(seq, "\tDMA HW features\n");
6350 seq_printf(seq, "==============================\n");
6351
6352 seq_printf(seq, "\t10/100 Mbps: %s\n",
6353 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6354 seq_printf(seq, "\t1000 Mbps: %s\n",
6355 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6356 seq_printf(seq, "\tHalf duplex: %s\n",
6357 (priv->dma_cap.half_duplex) ? "Y" : "N");
6358 if (priv->plat->has_xgmac) {
6359 seq_printf(seq,
6360 "\tNumber of Additional MAC address registers: %d\n",
6361 priv->dma_cap.multi_addr);
6362 } else {
6363 seq_printf(seq, "\tHash Filter: %s\n",
6364 (priv->dma_cap.hash_filter) ? "Y" : "N");
6365 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6366 (priv->dma_cap.multi_addr) ? "Y" : "N");
6367 }
6368 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6369 (priv->dma_cap.pcs) ? "Y" : "N");
6370 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6371 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6372 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6373 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6374 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6375 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6376 seq_printf(seq, "\tRMON module: %s\n",
6377 (priv->dma_cap.rmon) ? "Y" : "N");
6378 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6379 (priv->dma_cap.time_stamp) ? "Y" : "N");
6380 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6381 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6382 if (priv->plat->has_xgmac)
6383 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6384 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6385 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6386 (priv->dma_cap.eee) ? "Y" : "N");
6387 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6388 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6389 (priv->dma_cap.tx_coe) ? "Y" : "N");
6390 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6391 priv->plat->has_xgmac) {
6392 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6393 (priv->dma_cap.rx_coe) ? "Y" : "N");
6394 } else {
6395 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6396 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6397 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6398 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6399 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6400 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6401 }
6402 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6403 priv->dma_cap.number_rx_channel);
6404 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6405 priv->dma_cap.number_tx_channel);
6406 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6407 priv->dma_cap.number_rx_queues);
6408 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6409 priv->dma_cap.number_tx_queues);
6410 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6411 (priv->dma_cap.enh_desc) ? "Y" : "N");
6412 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6413 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6414 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6415 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6416 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6417 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6418 priv->dma_cap.pps_out_num);
6419 seq_printf(seq, "\tSafety Features: %s\n",
6420 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6421 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6422 priv->dma_cap.frpsel ? "Y" : "N");
6423 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6424 priv->dma_cap.host_dma_width);
6425 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6426 priv->dma_cap.rssen ? "Y" : "N");
6427 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6428 priv->dma_cap.vlhash ? "Y" : "N");
6429 seq_printf(seq, "\tSplit Header: %s\n",
6430 priv->dma_cap.sphen ? "Y" : "N");
6431 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6432 priv->dma_cap.vlins ? "Y" : "N");
6433 seq_printf(seq, "\tDouble VLAN: %s\n",
6434 priv->dma_cap.dvlan ? "Y" : "N");
6435 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6436 priv->dma_cap.l3l4fnum);
6437 seq_printf(seq, "\tARP Offloading: %s\n",
6438 priv->dma_cap.arpoffsel ? "Y" : "N");
6439 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6440 priv->dma_cap.estsel ? "Y" : "N");
6441 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6442 priv->dma_cap.fpesel ? "Y" : "N");
6443 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6444 priv->dma_cap.tbssel ? "Y" : "N");
6445 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6446 priv->dma_cap.tbs_ch_num);
6447 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6448 priv->dma_cap.sgfsel ? "Y" : "N");
6449 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6450 BIT(priv->dma_cap.ttsfd) >> 1);
6451 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6452 priv->dma_cap.numtc);
6453 seq_printf(seq, "\tDCB Feature: %s\n",
6454 priv->dma_cap.dcben ? "Y" : "N");
6455 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6456 priv->dma_cap.advthword ? "Y" : "N");
6457 seq_printf(seq, "\tPTP Offload: %s\n",
6458 priv->dma_cap.ptoen ? "Y" : "N");
6459 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6460 priv->dma_cap.osten ? "Y" : "N");
6461 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6462 priv->dma_cap.pfcen ? "Y" : "N");
6463 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6464 BIT(priv->dma_cap.frpes) << 6);
6465 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6466 BIT(priv->dma_cap.frpbs) << 6);
6467 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6468 priv->dma_cap.frppipe_num);
6469 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6470 priv->dma_cap.nrvf_num ?
6471 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6472 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6473 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6474 seq_printf(seq, "\tDepth of GCL: %lu\n",
6475 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6476 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6477 priv->dma_cap.cbtisel ? "Y" : "N");
6478 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6479 priv->dma_cap.aux_snapshot_n);
6480 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6481 priv->dma_cap.pou_ost_en ? "Y" : "N");
6482 seq_printf(seq, "\tEnhanced DMA: %s\n",
6483 priv->dma_cap.edma ? "Y" : "N");
6484 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6485 priv->dma_cap.ediffc ? "Y" : "N");
6486 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6487 priv->dma_cap.vxn ? "Y" : "N");
6488 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6489 priv->dma_cap.dbgmem ? "Y" : "N");
6490 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6491 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6492 return 0;
6493 }
6494 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6495
6496 /* Use network device events to rename debugfs file entries.
6497 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6498 static int stmmac_device_event(struct notifier_block *unused,
6499 unsigned long event, void *ptr)
6500 {
6501 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6502 struct stmmac_priv *priv = netdev_priv(dev);
6503
6504 if (dev->netdev_ops != &stmmac_netdev_ops)
6505 goto done;
6506
6507 switch (event) {
6508 case NETDEV_CHANGENAME:
6509 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6510 break;
6511 }
6512 done:
6513 return NOTIFY_DONE;
6514 }
6515
6516 static struct notifier_block stmmac_notifier = {
6517 .notifier_call = stmmac_device_event,
6518 };
6519
stmmac_init_fs(struct net_device * dev)6520 static void stmmac_init_fs(struct net_device *dev)
6521 {
6522 struct stmmac_priv *priv = netdev_priv(dev);
6523
6524 rtnl_lock();
6525
6526 /* Create per netdev entries */
6527 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6528
6529 /* Entry to report DMA RX/TX rings */
6530 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6531 &stmmac_rings_status_fops);
6532
6533 /* Entry to report the DMA HW features */
6534 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6535 &stmmac_dma_cap_fops);
6536
6537 rtnl_unlock();
6538 }
6539
stmmac_exit_fs(struct net_device * dev)6540 static void stmmac_exit_fs(struct net_device *dev)
6541 {
6542 struct stmmac_priv *priv = netdev_priv(dev);
6543
6544 debugfs_remove_recursive(priv->dbgfs_dir);
6545 }
6546 #endif /* CONFIG_DEBUG_FS */
6547
stmmac_vid_crc32_le(__le16 vid_le)6548 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6549 {
6550 unsigned char *data = (unsigned char *)&vid_le;
6551 unsigned char data_byte = 0;
6552 u32 crc = ~0x0;
6553 u32 temp = 0;
6554 int i, bits;
6555
6556 bits = get_bitmask_order(VLAN_VID_MASK);
6557 for (i = 0; i < bits; i++) {
6558 if ((i % 8) == 0)
6559 data_byte = data[i / 8];
6560
6561 temp = ((crc & 1) ^ data_byte) & 1;
6562 crc >>= 1;
6563 data_byte >>= 1;
6564
6565 if (temp)
6566 crc ^= 0xedb88320;
6567 }
6568
6569 return crc;
6570 }
6571
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6572 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6573 {
6574 u32 crc, hash = 0;
6575 u16 pmatch = 0;
6576 int count = 0;
6577 u16 vid = 0;
6578
6579 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6580 __le16 vid_le = cpu_to_le16(vid);
6581 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6582 hash |= (1 << crc);
6583 count++;
6584 }
6585
6586 if (!priv->dma_cap.vlhash) {
6587 if (count > 2) /* VID = 0 always passes filter */
6588 return -EOPNOTSUPP;
6589
6590 pmatch = vid;
6591 hash = 0;
6592 }
6593
6594 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6595 }
6596
6597 /* FIXME: This may need RXC to be running, but it may be called with BH
6598 * disabled, which means we can't call phylink_rx_clk_stop*().
6599 */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6600 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6601 {
6602 struct stmmac_priv *priv = netdev_priv(ndev);
6603 bool is_double = false;
6604 int ret;
6605
6606 ret = pm_runtime_resume_and_get(priv->device);
6607 if (ret < 0)
6608 return ret;
6609
6610 if (be16_to_cpu(proto) == ETH_P_8021AD)
6611 is_double = true;
6612
6613 set_bit(vid, priv->active_vlans);
6614 ret = stmmac_vlan_update(priv, is_double);
6615 if (ret) {
6616 clear_bit(vid, priv->active_vlans);
6617 goto err_pm_put;
6618 }
6619
6620 if (priv->hw->num_vlan) {
6621 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6622 if (ret)
6623 goto err_pm_put;
6624 }
6625 err_pm_put:
6626 pm_runtime_put(priv->device);
6627
6628 return ret;
6629 }
6630
6631 /* FIXME: This may need RXC to be running, but it may be called with BH
6632 * disabled, which means we can't call phylink_rx_clk_stop*().
6633 */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6634 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6635 {
6636 struct stmmac_priv *priv = netdev_priv(ndev);
6637 bool is_double = false;
6638 int ret;
6639
6640 ret = pm_runtime_resume_and_get(priv->device);
6641 if (ret < 0)
6642 return ret;
6643
6644 if (be16_to_cpu(proto) == ETH_P_8021AD)
6645 is_double = true;
6646
6647 clear_bit(vid, priv->active_vlans);
6648
6649 if (priv->hw->num_vlan) {
6650 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6651 if (ret)
6652 goto del_vlan_error;
6653 }
6654
6655 ret = stmmac_vlan_update(priv, is_double);
6656
6657 del_vlan_error:
6658 pm_runtime_put(priv->device);
6659
6660 return ret;
6661 }
6662
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6663 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6664 {
6665 struct stmmac_priv *priv = netdev_priv(dev);
6666
6667 switch (bpf->command) {
6668 case XDP_SETUP_PROG:
6669 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6670 case XDP_SETUP_XSK_POOL:
6671 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6672 bpf->xsk.queue_id);
6673 default:
6674 return -EOPNOTSUPP;
6675 }
6676 }
6677
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6678 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6679 struct xdp_frame **frames, u32 flags)
6680 {
6681 struct stmmac_priv *priv = netdev_priv(dev);
6682 int cpu = smp_processor_id();
6683 struct netdev_queue *nq;
6684 int i, nxmit = 0;
6685 int queue;
6686
6687 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6688 return -ENETDOWN;
6689
6690 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6691 return -EINVAL;
6692
6693 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6694 nq = netdev_get_tx_queue(priv->dev, queue);
6695
6696 __netif_tx_lock(nq, cpu);
6697 /* Avoids TX time-out as we are sharing with slow path */
6698 txq_trans_cond_update(nq);
6699
6700 for (i = 0; i < num_frames; i++) {
6701 int res;
6702
6703 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6704 if (res == STMMAC_XDP_CONSUMED)
6705 break;
6706
6707 nxmit++;
6708 }
6709
6710 if (flags & XDP_XMIT_FLUSH) {
6711 stmmac_flush_tx_descriptors(priv, queue);
6712 stmmac_tx_timer_arm(priv, queue);
6713 }
6714
6715 __netif_tx_unlock(nq);
6716
6717 return nxmit;
6718 }
6719
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6720 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6721 {
6722 struct stmmac_channel *ch = &priv->channel[queue];
6723 unsigned long flags;
6724
6725 spin_lock_irqsave(&ch->lock, flags);
6726 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6727 spin_unlock_irqrestore(&ch->lock, flags);
6728
6729 stmmac_stop_rx_dma(priv, queue);
6730 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6731 }
6732
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6733 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6734 {
6735 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6736 struct stmmac_channel *ch = &priv->channel[queue];
6737 unsigned long flags;
6738 u32 buf_size;
6739 int ret;
6740
6741 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6742 if (ret) {
6743 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6744 return;
6745 }
6746
6747 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6748 if (ret) {
6749 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6750 netdev_err(priv->dev, "Failed to init RX desc.\n");
6751 return;
6752 }
6753
6754 stmmac_reset_rx_queue(priv, queue);
6755 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6756
6757 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6758 rx_q->dma_rx_phy, rx_q->queue_index);
6759
6760 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6761 sizeof(struct dma_desc));
6762 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6763 rx_q->rx_tail_addr, rx_q->queue_index);
6764
6765 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6766 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6767 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6768 buf_size,
6769 rx_q->queue_index);
6770 } else {
6771 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6772 priv->dma_conf.dma_buf_sz,
6773 rx_q->queue_index);
6774 }
6775
6776 stmmac_start_rx_dma(priv, queue);
6777
6778 spin_lock_irqsave(&ch->lock, flags);
6779 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6780 spin_unlock_irqrestore(&ch->lock, flags);
6781 }
6782
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6783 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6784 {
6785 struct stmmac_channel *ch = &priv->channel[queue];
6786 unsigned long flags;
6787
6788 spin_lock_irqsave(&ch->lock, flags);
6789 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6790 spin_unlock_irqrestore(&ch->lock, flags);
6791
6792 stmmac_stop_tx_dma(priv, queue);
6793 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6794 }
6795
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6796 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6797 {
6798 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6799 struct stmmac_channel *ch = &priv->channel[queue];
6800 unsigned long flags;
6801 int ret;
6802
6803 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6804 if (ret) {
6805 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6806 return;
6807 }
6808
6809 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6810 if (ret) {
6811 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6812 netdev_err(priv->dev, "Failed to init TX desc.\n");
6813 return;
6814 }
6815
6816 stmmac_reset_tx_queue(priv, queue);
6817 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6818
6819 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6820 tx_q->dma_tx_phy, tx_q->queue_index);
6821
6822 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6823 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6824
6825 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6826 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6827 tx_q->tx_tail_addr, tx_q->queue_index);
6828
6829 stmmac_start_tx_dma(priv, queue);
6830
6831 spin_lock_irqsave(&ch->lock, flags);
6832 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6833 spin_unlock_irqrestore(&ch->lock, flags);
6834 }
6835
stmmac_xdp_release(struct net_device * dev)6836 void stmmac_xdp_release(struct net_device *dev)
6837 {
6838 struct stmmac_priv *priv = netdev_priv(dev);
6839 u32 chan;
6840
6841 /* Ensure tx function is not running */
6842 netif_tx_disable(dev);
6843
6844 /* Disable NAPI process */
6845 stmmac_disable_all_queues(priv);
6846
6847 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6848 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6849
6850 /* Free the IRQ lines */
6851 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6852
6853 /* Stop TX/RX DMA channels */
6854 stmmac_stop_all_dma(priv);
6855
6856 /* Release and free the Rx/Tx resources */
6857 free_dma_desc_resources(priv, &priv->dma_conf);
6858
6859 /* Disable the MAC Rx/Tx */
6860 stmmac_mac_set(priv, priv->ioaddr, false);
6861
6862 /* set trans_start so we don't get spurious
6863 * watchdogs during reset
6864 */
6865 netif_trans_update(dev);
6866 netif_carrier_off(dev);
6867 }
6868
stmmac_xdp_open(struct net_device * dev)6869 int stmmac_xdp_open(struct net_device *dev)
6870 {
6871 struct stmmac_priv *priv = netdev_priv(dev);
6872 u32 rx_cnt = priv->plat->rx_queues_to_use;
6873 u32 tx_cnt = priv->plat->tx_queues_to_use;
6874 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6875 struct stmmac_rx_queue *rx_q;
6876 struct stmmac_tx_queue *tx_q;
6877 u32 buf_size;
6878 bool sph_en;
6879 u32 chan;
6880 int ret;
6881
6882 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6883 if (ret < 0) {
6884 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6885 __func__);
6886 goto dma_desc_error;
6887 }
6888
6889 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6890 if (ret < 0) {
6891 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6892 __func__);
6893 goto init_error;
6894 }
6895
6896 stmmac_reset_queues_param(priv);
6897
6898 /* DMA CSR Channel configuration */
6899 for (chan = 0; chan < dma_csr_ch; chan++) {
6900 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6901 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6902 }
6903
6904 /* Adjust Split header */
6905 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6906
6907 /* DMA RX Channel Configuration */
6908 for (chan = 0; chan < rx_cnt; chan++) {
6909 rx_q = &priv->dma_conf.rx_queue[chan];
6910
6911 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6912 rx_q->dma_rx_phy, chan);
6913
6914 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6915 (rx_q->buf_alloc_num *
6916 sizeof(struct dma_desc));
6917 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6918 rx_q->rx_tail_addr, chan);
6919
6920 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6921 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6922 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6923 buf_size,
6924 rx_q->queue_index);
6925 } else {
6926 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6927 priv->dma_conf.dma_buf_sz,
6928 rx_q->queue_index);
6929 }
6930
6931 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6932 }
6933
6934 /* DMA TX Channel Configuration */
6935 for (chan = 0; chan < tx_cnt; chan++) {
6936 tx_q = &priv->dma_conf.tx_queue[chan];
6937
6938 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6939 tx_q->dma_tx_phy, chan);
6940
6941 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6942 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6943 tx_q->tx_tail_addr, chan);
6944
6945 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6946 }
6947
6948 /* Enable the MAC Rx/Tx */
6949 stmmac_mac_set(priv, priv->ioaddr, true);
6950
6951 /* Start Rx & Tx DMA Channels */
6952 stmmac_start_all_dma(priv);
6953
6954 ret = stmmac_request_irq(dev);
6955 if (ret)
6956 goto irq_error;
6957
6958 /* Enable NAPI process*/
6959 stmmac_enable_all_queues(priv);
6960 netif_carrier_on(dev);
6961 netif_tx_start_all_queues(dev);
6962 stmmac_enable_all_dma_irq(priv);
6963
6964 return 0;
6965
6966 irq_error:
6967 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6968 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6969
6970 init_error:
6971 free_dma_desc_resources(priv, &priv->dma_conf);
6972 dma_desc_error:
6973 return ret;
6974 }
6975
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6976 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6977 {
6978 struct stmmac_priv *priv = netdev_priv(dev);
6979 struct stmmac_rx_queue *rx_q;
6980 struct stmmac_tx_queue *tx_q;
6981 struct stmmac_channel *ch;
6982
6983 if (test_bit(STMMAC_DOWN, &priv->state) ||
6984 !netif_carrier_ok(priv->dev))
6985 return -ENETDOWN;
6986
6987 if (!stmmac_xdp_is_enabled(priv))
6988 return -EINVAL;
6989
6990 if (queue >= priv->plat->rx_queues_to_use ||
6991 queue >= priv->plat->tx_queues_to_use)
6992 return -EINVAL;
6993
6994 rx_q = &priv->dma_conf.rx_queue[queue];
6995 tx_q = &priv->dma_conf.tx_queue[queue];
6996 ch = &priv->channel[queue];
6997
6998 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
6999 return -EINVAL;
7000
7001 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7002 /* EQoS does not have per-DMA channel SW interrupt,
7003 * so we schedule RX Napi straight-away.
7004 */
7005 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7006 __napi_schedule(&ch->rxtx_napi);
7007 }
7008
7009 return 0;
7010 }
7011
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7012 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7013 {
7014 struct stmmac_priv *priv = netdev_priv(dev);
7015 u32 tx_cnt = priv->plat->tx_queues_to_use;
7016 u32 rx_cnt = priv->plat->rx_queues_to_use;
7017 unsigned int start;
7018 int q;
7019
7020 for (q = 0; q < tx_cnt; q++) {
7021 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7022 u64 tx_packets;
7023 u64 tx_bytes;
7024
7025 do {
7026 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7027 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7028 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7029 do {
7030 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7031 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7032 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7033
7034 stats->tx_packets += tx_packets;
7035 stats->tx_bytes += tx_bytes;
7036 }
7037
7038 for (q = 0; q < rx_cnt; q++) {
7039 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7040 u64 rx_packets;
7041 u64 rx_bytes;
7042
7043 do {
7044 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7045 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7046 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7047 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7048
7049 stats->rx_packets += rx_packets;
7050 stats->rx_bytes += rx_bytes;
7051 }
7052
7053 stats->rx_dropped = priv->xstats.rx_dropped;
7054 stats->rx_errors = priv->xstats.rx_errors;
7055 stats->tx_dropped = priv->xstats.tx_dropped;
7056 stats->tx_errors = priv->xstats.tx_errors;
7057 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7058 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7059 stats->rx_length_errors = priv->xstats.rx_length;
7060 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7061 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7062 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7063 }
7064
7065 static const struct net_device_ops stmmac_netdev_ops = {
7066 .ndo_open = stmmac_open,
7067 .ndo_start_xmit = stmmac_xmit,
7068 .ndo_stop = stmmac_release,
7069 .ndo_change_mtu = stmmac_change_mtu,
7070 .ndo_fix_features = stmmac_fix_features,
7071 .ndo_set_features = stmmac_set_features,
7072 .ndo_set_rx_mode = stmmac_set_rx_mode,
7073 .ndo_tx_timeout = stmmac_tx_timeout,
7074 .ndo_eth_ioctl = stmmac_ioctl,
7075 .ndo_get_stats64 = stmmac_get_stats64,
7076 .ndo_setup_tc = stmmac_setup_tc,
7077 .ndo_select_queue = stmmac_select_queue,
7078 .ndo_set_mac_address = stmmac_set_mac_address,
7079 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7080 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7081 .ndo_bpf = stmmac_bpf,
7082 .ndo_xdp_xmit = stmmac_xdp_xmit,
7083 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7084 .ndo_hwtstamp_get = stmmac_hwtstamp_get,
7085 .ndo_hwtstamp_set = stmmac_hwtstamp_set,
7086 };
7087
stmmac_reset_subtask(struct stmmac_priv * priv)7088 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7089 {
7090 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7091 return;
7092 if (test_bit(STMMAC_DOWN, &priv->state))
7093 return;
7094
7095 netdev_err(priv->dev, "Reset adapter.\n");
7096
7097 rtnl_lock();
7098 netif_trans_update(priv->dev);
7099 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7100 usleep_range(1000, 2000);
7101
7102 set_bit(STMMAC_DOWN, &priv->state);
7103 dev_close(priv->dev);
7104 dev_open(priv->dev, NULL);
7105 clear_bit(STMMAC_DOWN, &priv->state);
7106 clear_bit(STMMAC_RESETING, &priv->state);
7107 rtnl_unlock();
7108 }
7109
stmmac_service_task(struct work_struct * work)7110 static void stmmac_service_task(struct work_struct *work)
7111 {
7112 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7113 service_task);
7114
7115 stmmac_reset_subtask(priv);
7116 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7117 }
7118
7119 /**
7120 * stmmac_hw_init - Init the MAC device
7121 * @priv: driver private structure
7122 * Description: this function is to configure the MAC device according to
7123 * some platform parameters or the HW capability register. It prepares the
7124 * driver to use either ring or chain modes and to setup either enhanced or
7125 * normal descriptors.
7126 */
stmmac_hw_init(struct stmmac_priv * priv)7127 static int stmmac_hw_init(struct stmmac_priv *priv)
7128 {
7129 int ret;
7130
7131 /* dwmac-sun8i only work in chain mode */
7132 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7133 chain_mode = 1;
7134 priv->chain_mode = chain_mode;
7135
7136 /* Initialize HW Interface */
7137 ret = stmmac_hwif_init(priv);
7138 if (ret)
7139 return ret;
7140
7141 /* Get the HW capability (new GMAC newer than 3.50a) */
7142 priv->hw_cap_support = stmmac_get_hw_features(priv);
7143 if (priv->hw_cap_support) {
7144 dev_info(priv->device, "DMA HW capability register supported\n");
7145
7146 /* We can override some gmac/dma configuration fields: e.g.
7147 * enh_desc, tx_coe (e.g. that are passed through the
7148 * platform) with the values from the HW capability
7149 * register (if supported).
7150 */
7151 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7152 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7153 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7154 if (priv->dma_cap.hash_tb_sz) {
7155 priv->hw->multicast_filter_bins =
7156 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7157 priv->hw->mcast_bits_log2 =
7158 ilog2(priv->hw->multicast_filter_bins);
7159 }
7160
7161 /* TXCOE doesn't work in thresh DMA mode */
7162 if (priv->plat->force_thresh_dma_mode)
7163 priv->plat->tx_coe = 0;
7164 else
7165 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7166
7167 /* In case of GMAC4 rx_coe is from HW cap register. */
7168 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7169
7170 if (priv->dma_cap.rx_coe_type2)
7171 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7172 else if (priv->dma_cap.rx_coe_type1)
7173 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7174
7175 } else {
7176 dev_info(priv->device, "No HW DMA feature register supported\n");
7177 }
7178
7179 if (priv->plat->rx_coe) {
7180 priv->hw->rx_csum = priv->plat->rx_coe;
7181 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7182 if (priv->synopsys_id < DWMAC_CORE_4_00)
7183 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7184 }
7185 if (priv->plat->tx_coe)
7186 dev_info(priv->device, "TX Checksum insertion supported\n");
7187
7188 if (priv->plat->pmt) {
7189 dev_info(priv->device, "Wake-Up On Lan supported\n");
7190 device_set_wakeup_capable(priv->device, 1);
7191 devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7192 }
7193
7194 if (priv->dma_cap.tsoen)
7195 dev_info(priv->device, "TSO supported\n");
7196
7197 if (priv->dma_cap.number_rx_queues &&
7198 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7199 dev_warn(priv->device,
7200 "Number of Rx queues (%u) exceeds dma capability\n",
7201 priv->plat->rx_queues_to_use);
7202 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7203 }
7204 if (priv->dma_cap.number_tx_queues &&
7205 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7206 dev_warn(priv->device,
7207 "Number of Tx queues (%u) exceeds dma capability\n",
7208 priv->plat->tx_queues_to_use);
7209 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7210 }
7211
7212 if (priv->dma_cap.rx_fifo_size &&
7213 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7214 dev_warn(priv->device,
7215 "Rx FIFO size (%u) exceeds dma capability\n",
7216 priv->plat->rx_fifo_size);
7217 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7218 }
7219 if (priv->dma_cap.tx_fifo_size &&
7220 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7221 dev_warn(priv->device,
7222 "Tx FIFO size (%u) exceeds dma capability\n",
7223 priv->plat->tx_fifo_size);
7224 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7225 }
7226
7227 priv->hw->vlan_fail_q_en =
7228 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7229 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7230
7231 /* Run HW quirks, if any */
7232 if (priv->hwif_quirks) {
7233 ret = priv->hwif_quirks(priv);
7234 if (ret)
7235 return ret;
7236 }
7237
7238 /* Rx Watchdog is available in the COREs newer than the 3.40.
7239 * In some case, for example on bugged HW this feature
7240 * has to be disable and this can be done by passing the
7241 * riwt_off field from the platform.
7242 */
7243 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7244 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7245 priv->use_riwt = 1;
7246 dev_info(priv->device,
7247 "Enable RX Mitigation via HW Watchdog Timer\n");
7248 }
7249
7250 return 0;
7251 }
7252
stmmac_napi_add(struct net_device * dev)7253 static void stmmac_napi_add(struct net_device *dev)
7254 {
7255 struct stmmac_priv *priv = netdev_priv(dev);
7256 u32 queue, maxq;
7257
7258 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7259
7260 for (queue = 0; queue < maxq; queue++) {
7261 struct stmmac_channel *ch = &priv->channel[queue];
7262
7263 ch->priv_data = priv;
7264 ch->index = queue;
7265 spin_lock_init(&ch->lock);
7266
7267 if (queue < priv->plat->rx_queues_to_use) {
7268 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7269 }
7270 if (queue < priv->plat->tx_queues_to_use) {
7271 netif_napi_add_tx(dev, &ch->tx_napi,
7272 stmmac_napi_poll_tx);
7273 }
7274 if (queue < priv->plat->rx_queues_to_use &&
7275 queue < priv->plat->tx_queues_to_use) {
7276 netif_napi_add(dev, &ch->rxtx_napi,
7277 stmmac_napi_poll_rxtx);
7278 }
7279 }
7280 }
7281
stmmac_napi_del(struct net_device * dev)7282 static void stmmac_napi_del(struct net_device *dev)
7283 {
7284 struct stmmac_priv *priv = netdev_priv(dev);
7285 u32 queue, maxq;
7286
7287 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7288
7289 for (queue = 0; queue < maxq; queue++) {
7290 struct stmmac_channel *ch = &priv->channel[queue];
7291
7292 if (queue < priv->plat->rx_queues_to_use)
7293 netif_napi_del(&ch->rx_napi);
7294 if (queue < priv->plat->tx_queues_to_use)
7295 netif_napi_del(&ch->tx_napi);
7296 if (queue < priv->plat->rx_queues_to_use &&
7297 queue < priv->plat->tx_queues_to_use) {
7298 netif_napi_del(&ch->rxtx_napi);
7299 }
7300 }
7301 }
7302
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7303 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7304 {
7305 struct stmmac_priv *priv = netdev_priv(dev);
7306 int ret = 0, i;
7307
7308 if (netif_running(dev))
7309 stmmac_release(dev);
7310
7311 stmmac_napi_del(dev);
7312
7313 priv->plat->rx_queues_to_use = rx_cnt;
7314 priv->plat->tx_queues_to_use = tx_cnt;
7315 if (!netif_is_rxfh_configured(dev))
7316 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7317 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7318 rx_cnt);
7319
7320 stmmac_napi_add(dev);
7321
7322 if (netif_running(dev))
7323 ret = stmmac_open(dev);
7324
7325 return ret;
7326 }
7327
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7328 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7329 {
7330 struct stmmac_priv *priv = netdev_priv(dev);
7331 int ret = 0;
7332
7333 if (netif_running(dev))
7334 stmmac_release(dev);
7335
7336 priv->dma_conf.dma_rx_size = rx_size;
7337 priv->dma_conf.dma_tx_size = tx_size;
7338
7339 if (netif_running(dev))
7340 ret = stmmac_open(dev);
7341
7342 return ret;
7343 }
7344
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7345 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7346 {
7347 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7348 struct dma_desc *desc_contains_ts = ctx->desc;
7349 struct stmmac_priv *priv = ctx->priv;
7350 struct dma_desc *ndesc = ctx->ndesc;
7351 struct dma_desc *desc = ctx->desc;
7352 u64 ns = 0;
7353
7354 if (!priv->hwts_rx_en)
7355 return -ENODATA;
7356
7357 /* For GMAC4, the valid timestamp is from CTX next desc. */
7358 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7359 desc_contains_ts = ndesc;
7360
7361 /* Check if timestamp is available */
7362 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7363 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7364 ns -= priv->plat->cdc_error_adj;
7365 *timestamp = ns_to_ktime(ns);
7366 return 0;
7367 }
7368
7369 return -ENODATA;
7370 }
7371
7372 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7373 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7374 };
7375
7376 /**
7377 * stmmac_dvr_probe
7378 * @device: device pointer
7379 * @plat_dat: platform data pointer
7380 * @res: stmmac resource pointer
7381 * Description: this is the main probe function used to
7382 * call the alloc_etherdev, allocate the priv structure.
7383 * Return:
7384 * returns 0 on success, otherwise errno.
7385 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7386 int stmmac_dvr_probe(struct device *device,
7387 struct plat_stmmacenet_data *plat_dat,
7388 struct stmmac_resources *res)
7389 {
7390 struct net_device *ndev = NULL;
7391 struct stmmac_priv *priv;
7392 u32 rxq;
7393 int i, ret = 0;
7394
7395 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7396 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7397 if (!ndev)
7398 return -ENOMEM;
7399
7400 SET_NETDEV_DEV(ndev, device);
7401
7402 priv = netdev_priv(ndev);
7403 priv->device = device;
7404 priv->dev = ndev;
7405
7406 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7407 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7408 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7409 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7410 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7411 }
7412
7413 priv->xstats.pcpu_stats =
7414 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7415 if (!priv->xstats.pcpu_stats)
7416 return -ENOMEM;
7417
7418 stmmac_set_ethtool_ops(ndev);
7419 priv->pause_time = pause;
7420 priv->plat = plat_dat;
7421 priv->ioaddr = res->addr;
7422 priv->dev->base_addr = (unsigned long)res->addr;
7423 priv->plat->dma_cfg->multi_msi_en =
7424 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7425
7426 priv->dev->irq = res->irq;
7427 priv->wol_irq = res->wol_irq;
7428 priv->lpi_irq = res->lpi_irq;
7429 priv->sfty_irq = res->sfty_irq;
7430 priv->sfty_ce_irq = res->sfty_ce_irq;
7431 priv->sfty_ue_irq = res->sfty_ue_irq;
7432 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7433 priv->rx_irq[i] = res->rx_irq[i];
7434 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7435 priv->tx_irq[i] = res->tx_irq[i];
7436
7437 if (!is_zero_ether_addr(res->mac))
7438 eth_hw_addr_set(priv->dev, res->mac);
7439
7440 dev_set_drvdata(device, priv->dev);
7441
7442 /* Verify driver arguments */
7443 stmmac_verify_args();
7444
7445 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7446 if (!priv->af_xdp_zc_qps)
7447 return -ENOMEM;
7448
7449 /* Allocate workqueue */
7450 priv->wq = create_singlethread_workqueue("stmmac_wq");
7451 if (!priv->wq) {
7452 dev_err(priv->device, "failed to create workqueue\n");
7453 ret = -ENOMEM;
7454 goto error_wq_init;
7455 }
7456
7457 INIT_WORK(&priv->service_task, stmmac_service_task);
7458
7459 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7460
7461 /* Override with kernel parameters if supplied XXX CRS XXX
7462 * this needs to have multiple instances
7463 */
7464 if ((phyaddr >= 0) && (phyaddr <= 31))
7465 priv->plat->phy_addr = phyaddr;
7466
7467 if (priv->plat->stmmac_rst) {
7468 ret = reset_control_assert(priv->plat->stmmac_rst);
7469 reset_control_deassert(priv->plat->stmmac_rst);
7470 /* Some reset controllers have only reset callback instead of
7471 * assert + deassert callbacks pair.
7472 */
7473 if (ret == -ENOTSUPP)
7474 reset_control_reset(priv->plat->stmmac_rst);
7475 }
7476
7477 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7478 if (ret == -ENOTSUPP)
7479 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7480 ERR_PTR(ret));
7481
7482 /* Wait a bit for the reset to take effect */
7483 udelay(10);
7484
7485 /* Init MAC and get the capabilities */
7486 ret = stmmac_hw_init(priv);
7487 if (ret)
7488 goto error_hw_init;
7489
7490 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7491 */
7492 if (priv->synopsys_id < DWMAC_CORE_5_20)
7493 priv->plat->dma_cfg->dche = false;
7494
7495 stmmac_check_ether_addr(priv);
7496
7497 ndev->netdev_ops = &stmmac_netdev_ops;
7498
7499 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7500 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7501
7502 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7503 NETIF_F_RXCSUM;
7504 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7505 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7506
7507 ret = stmmac_tc_init(priv, priv);
7508 if (!ret) {
7509 ndev->hw_features |= NETIF_F_HW_TC;
7510 }
7511
7512 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7513 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7514 if (priv->plat->has_gmac4)
7515 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7516 priv->tso = true;
7517 dev_info(priv->device, "TSO feature enabled\n");
7518 }
7519
7520 if (priv->dma_cap.sphen &&
7521 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7522 ndev->hw_features |= NETIF_F_GRO;
7523 priv->sph_cap = true;
7524 priv->sph = priv->sph_cap;
7525 dev_info(priv->device, "SPH feature enabled\n");
7526 }
7527
7528 /* Ideally our host DMA address width is the same as for the
7529 * device. However, it may differ and then we have to use our
7530 * host DMA width for allocation and the device DMA width for
7531 * register handling.
7532 */
7533 if (priv->plat->host_dma_width)
7534 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7535 else
7536 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7537
7538 if (priv->dma_cap.host_dma_width) {
7539 ret = dma_set_mask_and_coherent(device,
7540 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7541 if (!ret) {
7542 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7543 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7544
7545 /*
7546 * If more than 32 bits can be addressed, make sure to
7547 * enable enhanced addressing mode.
7548 */
7549 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7550 priv->plat->dma_cfg->eame = true;
7551 } else {
7552 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7553 if (ret) {
7554 dev_err(priv->device, "Failed to set DMA Mask\n");
7555 goto error_hw_init;
7556 }
7557
7558 priv->dma_cap.host_dma_width = 32;
7559 }
7560 }
7561
7562 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7563 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7564 #ifdef STMMAC_VLAN_TAG_USED
7565 /* Both mac100 and gmac support receive VLAN tag detection */
7566 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7567 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
7568 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7569 priv->hw->hw_vlan_en = true;
7570 }
7571 if (priv->dma_cap.vlhash) {
7572 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7573 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7574 }
7575 if (priv->dma_cap.vlins)
7576 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7577 #endif
7578 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7579
7580 priv->xstats.threshold = tc;
7581
7582 /* Initialize RSS */
7583 rxq = priv->plat->rx_queues_to_use;
7584 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7585 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7586 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7587
7588 if (priv->dma_cap.rssen && priv->plat->rss_en)
7589 ndev->features |= NETIF_F_RXHASH;
7590
7591 ndev->vlan_features |= ndev->features;
7592
7593 /* MTU range: 46 - hw-specific max */
7594 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7595 if (priv->plat->has_xgmac)
7596 ndev->max_mtu = XGMAC_JUMBO_LEN;
7597 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7598 ndev->max_mtu = JUMBO_LEN;
7599 else
7600 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7601 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7602 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7603 */
7604 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7605 (priv->plat->maxmtu >= ndev->min_mtu))
7606 ndev->max_mtu = priv->plat->maxmtu;
7607 else if (priv->plat->maxmtu < ndev->min_mtu)
7608 dev_warn(priv->device,
7609 "%s: warning: maxmtu having invalid value (%d)\n",
7610 __func__, priv->plat->maxmtu);
7611
7612 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7613
7614 /* Setup channels NAPI */
7615 stmmac_napi_add(ndev);
7616
7617 mutex_init(&priv->lock);
7618
7619 stmmac_fpe_init(priv);
7620
7621 stmmac_check_pcs_mode(priv);
7622
7623 pm_runtime_get_noresume(device);
7624 pm_runtime_set_active(device);
7625 if (!pm_runtime_enabled(device))
7626 pm_runtime_enable(device);
7627
7628 ret = stmmac_mdio_register(ndev);
7629 if (ret < 0) {
7630 dev_err_probe(priv->device, ret,
7631 "MDIO bus (id: %d) registration failed\n",
7632 priv->plat->bus_id);
7633 goto error_mdio_register;
7634 }
7635
7636 ret = stmmac_pcs_setup(ndev);
7637 if (ret)
7638 goto error_pcs_setup;
7639
7640 ret = stmmac_phy_setup(priv);
7641 if (ret) {
7642 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7643 goto error_phy_setup;
7644 }
7645
7646 ret = register_netdev(ndev);
7647 if (ret) {
7648 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7649 __func__, ret);
7650 goto error_netdev_register;
7651 }
7652
7653 #ifdef CONFIG_DEBUG_FS
7654 stmmac_init_fs(ndev);
7655 #endif
7656
7657 if (priv->plat->dump_debug_regs)
7658 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7659
7660 /* Let pm_runtime_put() disable the clocks.
7661 * If CONFIG_PM is not enabled, the clocks will stay powered.
7662 */
7663 pm_runtime_put(device);
7664
7665 return ret;
7666
7667 error_netdev_register:
7668 phylink_destroy(priv->phylink);
7669 error_phy_setup:
7670 stmmac_pcs_clean(ndev);
7671 error_pcs_setup:
7672 stmmac_mdio_unregister(ndev);
7673 error_mdio_register:
7674 stmmac_napi_del(ndev);
7675 error_hw_init:
7676 destroy_workqueue(priv->wq);
7677 error_wq_init:
7678 bitmap_free(priv->af_xdp_zc_qps);
7679
7680 return ret;
7681 }
7682 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7683
7684 /**
7685 * stmmac_dvr_remove
7686 * @dev: device pointer
7687 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7688 * changes the link status, releases the DMA descriptor rings.
7689 */
stmmac_dvr_remove(struct device * dev)7690 void stmmac_dvr_remove(struct device *dev)
7691 {
7692 struct net_device *ndev = dev_get_drvdata(dev);
7693 struct stmmac_priv *priv = netdev_priv(ndev);
7694
7695 netdev_info(priv->dev, "%s: removing driver", __func__);
7696
7697 pm_runtime_get_sync(dev);
7698
7699 unregister_netdev(ndev);
7700
7701 #ifdef CONFIG_DEBUG_FS
7702 stmmac_exit_fs(ndev);
7703 #endif
7704 phylink_destroy(priv->phylink);
7705 if (priv->plat->stmmac_rst)
7706 reset_control_assert(priv->plat->stmmac_rst);
7707 reset_control_assert(priv->plat->stmmac_ahb_rst);
7708
7709 stmmac_pcs_clean(ndev);
7710 stmmac_mdio_unregister(ndev);
7711
7712 destroy_workqueue(priv->wq);
7713 mutex_destroy(&priv->lock);
7714 bitmap_free(priv->af_xdp_zc_qps);
7715
7716 pm_runtime_disable(dev);
7717 pm_runtime_put_noidle(dev);
7718 }
7719 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7720
7721 /**
7722 * stmmac_suspend - suspend callback
7723 * @dev: device pointer
7724 * Description: this is the function to suspend the device and it is called
7725 * by the platform driver to stop the network queue, release the resources,
7726 * program the PMT register (for WoL), clean and release driver resources.
7727 */
stmmac_suspend(struct device * dev)7728 int stmmac_suspend(struct device *dev)
7729 {
7730 struct net_device *ndev = dev_get_drvdata(dev);
7731 struct stmmac_priv *priv = netdev_priv(ndev);
7732 u32 chan;
7733
7734 if (!ndev || !netif_running(ndev))
7735 return 0;
7736
7737 mutex_lock(&priv->lock);
7738
7739 netif_device_detach(ndev);
7740
7741 stmmac_disable_all_queues(priv);
7742
7743 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7744 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7745
7746 if (priv->eee_sw_timer_en) {
7747 priv->tx_path_in_lpi_mode = false;
7748 timer_delete_sync(&priv->eee_ctrl_timer);
7749 }
7750
7751 /* Stop TX/RX DMA */
7752 stmmac_stop_all_dma(priv);
7753
7754 if (priv->plat->serdes_powerdown)
7755 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7756
7757 /* Enable Power down mode by programming the PMT regs */
7758 if (stmmac_wol_enabled_mac(priv)) {
7759 stmmac_pmt(priv, priv->hw, priv->wolopts);
7760 priv->irq_wake = 1;
7761 } else {
7762 stmmac_mac_set(priv, priv->ioaddr, false);
7763 pinctrl_pm_select_sleep_state(priv->device);
7764 }
7765
7766 mutex_unlock(&priv->lock);
7767
7768 rtnl_lock();
7769 if (stmmac_wol_enabled_phy(priv))
7770 phylink_speed_down(priv->phylink, false);
7771
7772 phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
7773 rtnl_unlock();
7774
7775 if (stmmac_fpe_supported(priv))
7776 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
7777
7778 if (priv->plat->suspend)
7779 return priv->plat->suspend(dev, priv->plat->bsp_priv);
7780
7781 return 0;
7782 }
7783 EXPORT_SYMBOL_GPL(stmmac_suspend);
7784
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7785 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7786 {
7787 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7788
7789 rx_q->cur_rx = 0;
7790 rx_q->dirty_rx = 0;
7791 }
7792
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7793 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7794 {
7795 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7796
7797 tx_q->cur_tx = 0;
7798 tx_q->dirty_tx = 0;
7799 tx_q->mss = 0;
7800
7801 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7802 }
7803
7804 /**
7805 * stmmac_reset_queues_param - reset queue parameters
7806 * @priv: device pointer
7807 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7808 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7809 {
7810 u32 rx_cnt = priv->plat->rx_queues_to_use;
7811 u32 tx_cnt = priv->plat->tx_queues_to_use;
7812 u32 queue;
7813
7814 for (queue = 0; queue < rx_cnt; queue++)
7815 stmmac_reset_rx_queue(priv, queue);
7816
7817 for (queue = 0; queue < tx_cnt; queue++)
7818 stmmac_reset_tx_queue(priv, queue);
7819 }
7820
7821 /**
7822 * stmmac_resume - resume callback
7823 * @dev: device pointer
7824 * Description: when resume this function is invoked to setup the DMA and CORE
7825 * in a usable state.
7826 */
stmmac_resume(struct device * dev)7827 int stmmac_resume(struct device *dev)
7828 {
7829 struct net_device *ndev = dev_get_drvdata(dev);
7830 struct stmmac_priv *priv = netdev_priv(ndev);
7831 int ret;
7832
7833 if (priv->plat->resume) {
7834 ret = priv->plat->resume(dev, priv->plat->bsp_priv);
7835 if (ret)
7836 return ret;
7837 }
7838
7839 if (!netif_running(ndev))
7840 return 0;
7841
7842 /* Power Down bit, into the PM register, is cleared
7843 * automatically as soon as a magic packet or a Wake-up frame
7844 * is received. Anyway, it's better to manually clear
7845 * this bit because it can generate problems while resuming
7846 * from another devices (e.g. serial console).
7847 */
7848 if (stmmac_wol_enabled_mac(priv)) {
7849 mutex_lock(&priv->lock);
7850 stmmac_pmt(priv, priv->hw, 0);
7851 mutex_unlock(&priv->lock);
7852 priv->irq_wake = 0;
7853 } else {
7854 pinctrl_pm_select_default_state(priv->device);
7855 /* reset the phy so that it's ready */
7856 if (priv->mii)
7857 stmmac_mdio_reset(priv->mii);
7858 }
7859
7860 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7861 priv->plat->serdes_powerup) {
7862 ret = priv->plat->serdes_powerup(ndev,
7863 priv->plat->bsp_priv);
7864
7865 if (ret < 0)
7866 return ret;
7867 }
7868
7869 rtnl_lock();
7870
7871 /* Prepare the PHY to resume, ensuring that its clocks which are
7872 * necessary for the MAC DMA reset to complete are running
7873 */
7874 phylink_prepare_resume(priv->phylink);
7875
7876 mutex_lock(&priv->lock);
7877
7878 stmmac_reset_queues_param(priv);
7879
7880 stmmac_free_tx_skbufs(priv);
7881 stmmac_clear_descriptors(priv, &priv->dma_conf);
7882
7883 ret = stmmac_hw_setup(ndev);
7884 if (ret < 0) {
7885 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
7886 mutex_unlock(&priv->lock);
7887 rtnl_unlock();
7888 return ret;
7889 }
7890
7891 stmmac_init_timestamping(priv);
7892
7893 stmmac_init_coalesce(priv);
7894 phylink_rx_clk_stop_block(priv->phylink);
7895 stmmac_set_rx_mode(ndev);
7896
7897 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7898 phylink_rx_clk_stop_unblock(priv->phylink);
7899
7900 stmmac_enable_all_queues(priv);
7901 stmmac_enable_all_dma_irq(priv);
7902
7903 mutex_unlock(&priv->lock);
7904
7905 /* phylink_resume() must be called after the hardware has been
7906 * initialised because it may bring the link up immediately in a
7907 * workqueue thread, which will race with initialisation.
7908 */
7909 phylink_resume(priv->phylink);
7910 if (stmmac_wol_enabled_phy(priv))
7911 phylink_speed_up(priv->phylink);
7912
7913 rtnl_unlock();
7914
7915 netif_device_attach(ndev);
7916
7917 return 0;
7918 }
7919 EXPORT_SYMBOL_GPL(stmmac_resume);
7920
7921 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
7922 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
7923 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
7924
7925 #ifndef MODULE
stmmac_cmdline_opt(char * str)7926 static int __init stmmac_cmdline_opt(char *str)
7927 {
7928 char *opt;
7929
7930 if (!str || !*str)
7931 return 1;
7932 while ((opt = strsep(&str, ",")) != NULL) {
7933 if (!strncmp(opt, "debug:", 6)) {
7934 if (kstrtoint(opt + 6, 0, &debug))
7935 goto err;
7936 } else if (!strncmp(opt, "phyaddr:", 8)) {
7937 if (kstrtoint(opt + 8, 0, &phyaddr))
7938 goto err;
7939 } else if (!strncmp(opt, "tc:", 3)) {
7940 if (kstrtoint(opt + 3, 0, &tc))
7941 goto err;
7942 } else if (!strncmp(opt, "watchdog:", 9)) {
7943 if (kstrtoint(opt + 9, 0, &watchdog))
7944 goto err;
7945 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
7946 if (kstrtoint(opt + 10, 0, &flow_ctrl))
7947 goto err;
7948 } else if (!strncmp(opt, "pause:", 6)) {
7949 if (kstrtoint(opt + 6, 0, &pause))
7950 goto err;
7951 } else if (!strncmp(opt, "eee_timer:", 10)) {
7952 if (kstrtoint(opt + 10, 0, &eee_timer))
7953 goto err;
7954 } else if (!strncmp(opt, "chain_mode:", 11)) {
7955 if (kstrtoint(opt + 11, 0, &chain_mode))
7956 goto err;
7957 }
7958 }
7959 return 1;
7960
7961 err:
7962 pr_err("%s: ERROR broken module parameter conversion", __func__);
7963 return 1;
7964 }
7965
7966 __setup("stmmaceth=", stmmac_cmdline_opt);
7967 #endif /* MODULE */
7968
stmmac_init(void)7969 static int __init stmmac_init(void)
7970 {
7971 #ifdef CONFIG_DEBUG_FS
7972 /* Create debugfs main directory if it doesn't exist yet */
7973 if (!stmmac_fs_dir)
7974 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
7975 register_netdevice_notifier(&stmmac_notifier);
7976 #endif
7977
7978 return 0;
7979 }
7980
stmmac_exit(void)7981 static void __exit stmmac_exit(void)
7982 {
7983 #ifdef CONFIG_DEBUG_FS
7984 unregister_netdevice_notifier(&stmmac_notifier);
7985 debugfs_remove_recursive(stmmac_fs_dir);
7986 #endif
7987 }
7988
7989 module_init(stmmac_init)
7990 module_exit(stmmac_exit)
7991
7992 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7993 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7994 MODULE_LICENSE("GPL");
7995