1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/circ_buf.h>
18 #include <linux/clk.h>
19 #include <linux/kernel.h>
20 #include <linux/interrupt.h>
21 #include <linux/ip.h>
22 #include <linux/tcp.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_ether.h>
26 #include <linux/crc32.h>
27 #include <linux/mii.h>
28 #include <linux/if.h>
29 #include <linux/if_vlan.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/slab.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_wakeirq.h>
34 #include <linux/prefetch.h>
35 #include <linux/pinctrl/consumer.h>
36 #ifdef CONFIG_DEBUG_FS
37 #include <linux/debugfs.h>
38 #include <linux/seq_file.h>
39 #endif /* CONFIG_DEBUG_FS */
40 #include <linux/net_tstamp.h>
41 #include <linux/phylink.h>
42 #include <linux/udp.h>
43 #include <linux/bpf_trace.h>
44 #include <net/devlink.h>
45 #include <net/page_pool/helpers.h>
46 #include <net/pkt_cls.h>
47 #include <net/xdp_sock_drv.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac_fpe.h"
50 #include "stmmac.h"
51 #include "stmmac_pcs.h"
52 #include "stmmac_xdp.h"
53 #include <linux/reset.h>
54 #include <linux/of_mdio.h>
55 #include "dwmac1000.h"
56 #include "dwxgmac2.h"
57 #include "hwif.h"
58
59 /* As long as the interface is active, we keep the timestamping counter enabled
60 * with fine resolution and binary rollover. This avoid non-monotonic behavior
61 * (clock jumps) when changing timestamping settings at runtime.
62 */
63 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
64
65 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
66 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
67
68 /* Module parameters */
69 #define TX_TIMEO 5000
70 static int watchdog = TX_TIMEO;
71 module_param(watchdog, int, 0644);
72 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
73
74 static int debug = -1;
75 module_param(debug, int, 0644);
76 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
77
78 static int phyaddr = -1;
79 module_param(phyaddr, int, 0444);
80 MODULE_PARM_DESC(phyaddr, "Physical device address");
81
82 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
83
84 /* Limit to make sure XDP TX and slow path can coexist */
85 #define STMMAC_XSK_TX_BUDGET_MAX 256
86 #define STMMAC_TX_XSK_AVAIL 16
87 #define STMMAC_RX_FILL_BATCH 16
88
89 #define STMMAC_XDP_PASS 0
90 #define STMMAC_XDP_CONSUMED BIT(0)
91 #define STMMAC_XDP_TX BIT(1)
92 #define STMMAC_XDP_REDIRECT BIT(2)
93 #define STMMAC_XSK_CONSUMED BIT(3)
94
95 static int flow_ctrl = 0xdead;
96 module_param(flow_ctrl, int, 0644);
97 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
98
99 static int pause = PAUSE_TIME;
100 module_param(pause, int, 0644);
101 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
102
103 #define TC_DEFAULT 64
104 static int tc = TC_DEFAULT;
105 module_param(tc, int, 0644);
106 MODULE_PARM_DESC(tc, "DMA threshold control value");
107
108 /* This is unused */
109 #define DEFAULT_BUFSIZE 1536
110 static int buf_sz = DEFAULT_BUFSIZE;
111 module_param(buf_sz, int, 0644);
112 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
113
114 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
115 NETIF_MSG_LINK | NETIF_MSG_IFUP |
116 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
117
118 #define STMMAC_DEFAULT_LPI_TIMER 1000
119 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
120 module_param(eee_timer, uint, 0644);
121 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
122 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
123
124 /* By default the driver will use the ring mode to manage tx and rx descriptors,
125 * but allow user to force to use the chain instead of the ring
126 */
127 static unsigned int chain_mode;
128 module_param(chain_mode, int, 0444);
129 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
130
131 static const char *stmmac_dwmac_actphyif[8] = {
132 [PHY_INTF_SEL_GMII_MII] = "GMII/MII",
133 [PHY_INTF_SEL_RGMII] = "RGMII",
134 [PHY_INTF_SEL_SGMII] = "SGMII",
135 [PHY_INTF_SEL_TBI] = "TBI",
136 [PHY_INTF_SEL_RMII] = "RMII",
137 [PHY_INTF_SEL_RTBI] = "RTBI",
138 [PHY_INTF_SEL_SMII] = "SMII",
139 [PHY_INTF_SEL_REVMII] = "REVMII",
140 };
141
142 static const char *stmmac_dwxgmac_phyif[4] = {
143 [PHY_INTF_GMII] = "GMII",
144 [PHY_INTF_RGMII] = "RGMII",
145 };
146
147 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
148 /* For MSI interrupts handling */
149 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
150 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
151 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
152 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
153 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
154 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
155 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
156 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
157 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
158 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
159 u32 rxmode, u32 chan);
160 static void stmmac_vlan_restore(struct stmmac_priv *priv);
161
162 #ifdef CONFIG_DEBUG_FS
163 static const struct net_device_ops stmmac_netdev_ops;
164 static void stmmac_init_fs(struct net_device *dev);
165 static void stmmac_exit_fs(struct net_device *dev);
166 #endif
167
168 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
169
170 struct stmmac_devlink_priv {
171 struct stmmac_priv *stmmac_priv;
172 };
173
174 enum stmmac_dl_param_id {
175 STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
176 STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
177 };
178
179 /**
180 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
181 * @bsp_priv: BSP private data structure (unused)
182 * @clk_tx_i: the transmit clock
183 * @interface: the selected interface mode
184 * @speed: the speed that the MAC will be operating at
185 *
186 * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
187 * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
188 * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
189 * the plat_data->set_clk_tx_rate method directly, call it via their own
190 * implementation, or implement their own method should they have more
191 * complex requirements. It is intended to only be used in this method.
192 *
193 * plat_data->clk_tx_i must be filled in.
194 */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)195 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
196 phy_interface_t interface, int speed)
197 {
198 long rate = rgmii_clock(speed);
199
200 /* Silently ignore unsupported speeds as rgmii_clock() only
201 * supports 10, 100 and 1000Mbps. We do not want to spit
202 * errors for 2500 and higher speeds here.
203 */
204 if (rate < 0)
205 return 0;
206
207 return clk_set_rate(clk_tx_i, rate);
208 }
209 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
210
211 /**
212 * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
213 * @regval: pointer to a u32 for the resulting register value
214 * @blen: pointer to an array of u32 containing the burst length values in bytes
215 * @len: the number of entries in the @blen array
216 */
stmmac_axi_blen_to_mask(u32 * regval,const u32 * blen,size_t len)217 void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
218 {
219 size_t i;
220 u32 val;
221
222 for (val = i = 0; i < len; i++) {
223 u32 burst = blen[i];
224
225 /* Burst values of zero must be skipped. */
226 if (!burst)
227 continue;
228
229 /* The valid range for the burst length is 4 to 256 inclusive,
230 * and it must be a power of two.
231 */
232 if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
233 pr_err("stmmac: invalid burst length %u at index %zu\n",
234 burst, i);
235 continue;
236 }
237
238 /* Since burst is a power of two, and the register field starts
239 * with burst = 4, shift right by two bits so bit 0 of the field
240 * corresponds with the minimum value.
241 */
242 val |= burst >> 2;
243 }
244
245 *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
246 }
247 EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
248
249 /**
250 * stmmac_verify_args - verify the driver parameters.
251 * Description: it checks the driver parameters and set a default in case of
252 * errors.
253 */
stmmac_verify_args(void)254 static void stmmac_verify_args(void)
255 {
256 if (unlikely(watchdog < 0))
257 watchdog = TX_TIMEO;
258 if (unlikely((pause < 0) || (pause > 0xffff)))
259 pause = PAUSE_TIME;
260
261 if (flow_ctrl != 0xdead)
262 pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
263 }
264
__stmmac_disable_all_queues(struct stmmac_priv * priv)265 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
266 {
267 u8 rx_queues_cnt = priv->plat->rx_queues_to_use;
268 u8 tx_queues_cnt = priv->plat->tx_queues_to_use;
269 u8 maxq = max(rx_queues_cnt, tx_queues_cnt);
270 u8 queue;
271
272 for (queue = 0; queue < maxq; queue++) {
273 struct stmmac_channel *ch = &priv->channel[queue];
274
275 if (stmmac_xdp_is_enabled(priv) &&
276 test_bit(queue, priv->af_xdp_zc_qps)) {
277 napi_disable(&ch->rxtx_napi);
278 continue;
279 }
280
281 if (queue < rx_queues_cnt)
282 napi_disable(&ch->rx_napi);
283 if (queue < tx_queues_cnt)
284 napi_disable(&ch->tx_napi);
285 }
286 }
287
288 /**
289 * stmmac_disable_all_queues - Disable all queues
290 * @priv: driver private structure
291 */
stmmac_disable_all_queues(struct stmmac_priv * priv)292 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
293 {
294 u8 rx_queues_cnt = priv->plat->rx_queues_to_use;
295 struct stmmac_rx_queue *rx_q;
296 u8 queue;
297
298 /* synchronize_rcu() needed for pending XDP buffers to drain */
299 for (queue = 0; queue < rx_queues_cnt; queue++) {
300 rx_q = &priv->dma_conf.rx_queue[queue];
301 if (rx_q->xsk_pool) {
302 synchronize_rcu();
303 break;
304 }
305 }
306
307 __stmmac_disable_all_queues(priv);
308 }
309
310 /**
311 * stmmac_enable_all_queues - Enable all queues
312 * @priv: driver private structure
313 */
stmmac_enable_all_queues(struct stmmac_priv * priv)314 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
315 {
316 u8 rx_queues_cnt = priv->plat->rx_queues_to_use;
317 u8 tx_queues_cnt = priv->plat->tx_queues_to_use;
318 u8 maxq = max(rx_queues_cnt, tx_queues_cnt);
319 u8 queue;
320
321 for (queue = 0; queue < maxq; queue++) {
322 struct stmmac_channel *ch = &priv->channel[queue];
323
324 if (stmmac_xdp_is_enabled(priv) &&
325 test_bit(queue, priv->af_xdp_zc_qps)) {
326 napi_enable(&ch->rxtx_napi);
327 continue;
328 }
329
330 if (queue < rx_queues_cnt)
331 napi_enable(&ch->rx_napi);
332 if (queue < tx_queues_cnt)
333 napi_enable(&ch->tx_napi);
334 }
335 }
336
stmmac_service_event_schedule(struct stmmac_priv * priv)337 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
338 {
339 if (!test_bit(STMMAC_DOWN, &priv->state) &&
340 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
341 queue_work(priv->wq, &priv->service_task);
342 }
343
stmmac_global_err(struct stmmac_priv * priv)344 static void stmmac_global_err(struct stmmac_priv *priv)
345 {
346 netif_carrier_off(priv->dev);
347 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
348 stmmac_service_event_schedule(priv);
349 }
350
print_pkt(unsigned char * buf,int len)351 static void print_pkt(unsigned char *buf, int len)
352 {
353 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
354 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
355 }
356
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)357 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
358 {
359 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
360
361 return CIRC_SPACE(tx_q->cur_tx, tx_q->dirty_tx,
362 priv->dma_conf.dma_tx_size);
363 }
364
stmmac_get_tx_desc_size(struct stmmac_priv * priv,struct stmmac_tx_queue * tx_q)365 static size_t stmmac_get_tx_desc_size(struct stmmac_priv *priv,
366 struct stmmac_tx_queue *tx_q)
367 {
368 if (priv->extend_desc)
369 return sizeof(struct dma_extended_desc);
370 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
371 return sizeof(struct dma_edesc);
372 else
373 return sizeof(struct dma_desc);
374 }
375
stmmac_get_tx_desc(struct stmmac_priv * priv,struct stmmac_tx_queue * tx_q,unsigned int index)376 static struct dma_desc *stmmac_get_tx_desc(struct stmmac_priv *priv,
377 struct stmmac_tx_queue *tx_q,
378 unsigned int index)
379 {
380 if (priv->extend_desc)
381 return &tx_q->dma_etx[index].basic;
382 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
383 return &tx_q->dma_entx[index].basic;
384 else
385 return &tx_q->dma_tx[index];
386 }
387
stmmac_set_queue_tx_tail_ptr(struct stmmac_priv * priv,struct stmmac_tx_queue * tx_q,unsigned int chan,unsigned int index)388 static void stmmac_set_queue_tx_tail_ptr(struct stmmac_priv *priv,
389 struct stmmac_tx_queue *tx_q,
390 unsigned int chan, unsigned int index)
391 {
392 size_t desc_size;
393 u32 tx_tail_addr;
394
395 desc_size = stmmac_get_tx_desc_size(priv, tx_q);
396
397 tx_tail_addr = tx_q->dma_tx_phy + index * desc_size;
398 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_tail_addr, chan);
399 }
400
stmmac_get_rx_desc_size(struct stmmac_priv * priv)401 static size_t stmmac_get_rx_desc_size(struct stmmac_priv *priv)
402 {
403 if (priv->extend_desc)
404 return sizeof(struct dma_extended_desc);
405 else
406 return sizeof(struct dma_desc);
407 }
408
stmmac_get_rx_desc(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,unsigned int index)409 static struct dma_desc *stmmac_get_rx_desc(struct stmmac_priv *priv,
410 struct stmmac_rx_queue *rx_q,
411 unsigned int index)
412 {
413 if (priv->extend_desc)
414 return &rx_q->dma_erx[index].basic;
415 else
416 return &rx_q->dma_rx[index];
417 }
418
stmmac_set_queue_rx_tail_ptr(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,unsigned int chan,unsigned int index)419 static void stmmac_set_queue_rx_tail_ptr(struct stmmac_priv *priv,
420 struct stmmac_rx_queue *rx_q,
421 unsigned int chan, unsigned int index)
422 {
423 /* This only needs to deal with normal descriptors as enhanced
424 * descriptiors are only supported with dwmac1000 (<v4.0) which
425 * does not implement .set_rx_tail_ptr
426 */
427 u32 rx_tail_addr = rx_q->dma_rx_phy + index * sizeof(struct dma_desc);
428
429 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_tail_addr, chan);
430 }
431
stmmac_set_queue_rx_buf_size(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,unsigned int chan)432 static void stmmac_set_queue_rx_buf_size(struct stmmac_priv *priv,
433 struct stmmac_rx_queue *rx_q,
434 unsigned int chan)
435 {
436 u32 buf_size;
437
438 if (rx_q->xsk_pool && rx_q->buf_alloc_num)
439 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
440 else
441 buf_size = priv->dma_conf.dma_buf_sz;
442
443 stmmac_set_dma_bfsize(priv, priv->ioaddr, buf_size, chan);
444 }
445
446 /**
447 * stmmac_rx_dirty - Get RX queue dirty
448 * @priv: driver private structure
449 * @queue: RX queue index
450 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)451 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
452 {
453 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
454
455 return CIRC_CNT(rx_q->cur_rx, rx_q->dirty_rx,
456 priv->dma_conf.dma_rx_size);
457 }
458
stmmac_eee_tx_busy(struct stmmac_priv * priv)459 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
460 {
461 u8 tx_cnt = priv->plat->tx_queues_to_use;
462 u8 queue;
463
464 /* check if all TX queues have the work finished */
465 for (queue = 0; queue < tx_cnt; queue++) {
466 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
467
468 if (tx_q->dirty_tx != tx_q->cur_tx)
469 return true; /* still unfinished work */
470 }
471
472 return false;
473 }
474
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)475 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
476 {
477 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
478 }
479
480 /**
481 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
482 * @priv: driver private structure
483 * Description: this function is to verify and enter in LPI mode in case of
484 * EEE.
485 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)486 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
487 {
488 if (stmmac_eee_tx_busy(priv)) {
489 stmmac_restart_sw_lpi_timer(priv);
490 return;
491 }
492
493 /* Check and enter in LPI mode */
494 if (!priv->tx_path_in_lpi_mode)
495 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
496 priv->tx_lpi_clk_stop, 0);
497 }
498
499 /**
500 * stmmac_stop_sw_lpi - stop transmitting LPI
501 * @priv: driver private structure
502 * Description: When using software-controlled LPI, stop transmitting LPI state.
503 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)504 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
505 {
506 timer_delete_sync(&priv->eee_ctrl_timer);
507 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
508 priv->tx_path_in_lpi_mode = false;
509 }
510
511 /**
512 * stmmac_eee_ctrl_timer - EEE TX SW timer.
513 * @t: timer_list struct containing private info
514 * Description:
515 * if there is no data transfer and if we are not in LPI state,
516 * then MAC Transmitter can be moved to LPI state.
517 */
stmmac_eee_ctrl_timer(struct timer_list * t)518 static void stmmac_eee_ctrl_timer(struct timer_list *t)
519 {
520 struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
521
522 stmmac_try_to_start_sw_lpi(priv);
523 }
524
525 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
526 * @priv: driver private structure
527 * @p : descriptor pointer
528 * @skb : the socket buffer
529 * Description :
530 * This function will read timestamp from the descriptor & pass it to stack.
531 * and also perform some sanity checks.
532 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)533 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
534 struct dma_desc *p, struct sk_buff *skb)
535 {
536 struct skb_shared_hwtstamps shhwtstamp;
537 bool found = false;
538 u64 ns = 0;
539
540 if (!priv->hwts_tx_en)
541 return;
542
543 /* exit if skb doesn't support hw tstamp */
544 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
545 return;
546
547 /* check tx tstamp status */
548 if (stmmac_get_tx_timestamp_status(priv, p)) {
549 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
550 found = true;
551 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
552 found = true;
553 }
554
555 if (found) {
556 ns -= priv->plat->cdc_error_adj;
557
558 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
559 shhwtstamp.hwtstamp = ns_to_ktime(ns);
560
561 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
562 /* pass tstamp to stack */
563 skb_tstamp_tx(skb, &shhwtstamp);
564 }
565 }
566
567 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
568 * @priv: driver private structure
569 * @p : descriptor pointer
570 * @np : next descriptor pointer
571 * @skb : the socket buffer
572 * Description :
573 * This function will read received packet's timestamp from the descriptor
574 * and pass it to stack. It also perform some sanity checks.
575 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)576 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
577 struct dma_desc *np, struct sk_buff *skb)
578 {
579 struct skb_shared_hwtstamps *shhwtstamp = NULL;
580 struct dma_desc *desc = p;
581 u64 ns = 0;
582
583 if (!priv->hwts_rx_en)
584 return;
585 /* For GMAC4, the valid timestamp is from CTX next desc. */
586 if (dwmac_is_xmac(priv->plat->core_type))
587 desc = np;
588
589 /* Check if timestamp is available */
590 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
591 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
592
593 ns -= priv->plat->cdc_error_adj;
594
595 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
596 shhwtstamp = skb_hwtstamps(skb);
597 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
598 shhwtstamp->hwtstamp = ns_to_ktime(ns);
599 } else {
600 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
601 }
602 }
603
stmmac_update_subsecond_increment(struct stmmac_priv * priv)604 static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
605 {
606 bool xmac = dwmac_is_xmac(priv->plat->core_type);
607 u32 sec_inc = 0;
608 u64 temp = 0;
609
610 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
611
612 /* program Sub Second Increment reg */
613 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
614 priv->plat->clk_ptp_rate,
615 xmac, &sec_inc);
616 temp = div_u64(1000000000ULL, sec_inc);
617
618 /* Store sub second increment for later use */
619 priv->sub_second_inc = sec_inc;
620
621 /* calculate default added value:
622 * formula is :
623 * addend = (2^32)/freq_div_ratio;
624 * where, freq_div_ratio = 1e9ns/sec_inc
625 */
626 temp = (u64)(temp << 32);
627 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
628 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
629 }
630
631 /**
632 * stmmac_hwtstamp_set - control hardware timestamping.
633 * @dev: device pointer.
634 * @config: the timestamping configuration.
635 * @extack: netlink extended ack structure for error reporting.
636 * Description:
637 * This function configures the MAC to enable/disable both outgoing(TX)
638 * and incoming(RX) packets time stamping based on user input.
639 * Return Value:
640 * 0 on success and an appropriate -ve integer on failure.
641 */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)642 static int stmmac_hwtstamp_set(struct net_device *dev,
643 struct kernel_hwtstamp_config *config,
644 struct netlink_ext_ack *extack)
645 {
646 struct stmmac_priv *priv = netdev_priv(dev);
647 u32 ptp_v2 = 0;
648 u32 tstamp_all = 0;
649 u32 ptp_over_ipv4_udp = 0;
650 u32 ptp_over_ipv6_udp = 0;
651 u32 ptp_over_ethernet = 0;
652 u32 snap_type_sel = 0;
653 u32 ts_master_en = 0;
654 u32 ts_event_en = 0;
655
656 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
657 NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
658 priv->hwts_tx_en = 0;
659 priv->hwts_rx_en = 0;
660
661 return -EOPNOTSUPP;
662 }
663
664 if (!netif_running(dev)) {
665 NL_SET_ERR_MSG_MOD(extack,
666 "Cannot change timestamping configuration while down");
667 return -ENODEV;
668 }
669
670 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
671 __func__, config->flags, config->tx_type, config->rx_filter);
672
673 if (config->tx_type != HWTSTAMP_TX_OFF &&
674 config->tx_type != HWTSTAMP_TX_ON)
675 return -ERANGE;
676
677 if (priv->adv_ts) {
678 switch (config->rx_filter) {
679 case HWTSTAMP_FILTER_NONE:
680 /* time stamp no incoming packet at all */
681 config->rx_filter = HWTSTAMP_FILTER_NONE;
682 break;
683
684 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
685 /* PTP v1, UDP, any kind of event packet */
686 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
687 /* 'xmac' hardware can support Sync, Pdelay_Req and
688 * Pdelay_resp by setting bit14 and bits17/16 to 01
689 * This leaves Delay_Req timestamps out.
690 * Enable all events *and* general purpose message
691 * timestamping
692 */
693 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
694 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696 break;
697
698 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
699 /* PTP v1, UDP, Sync packet */
700 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
701 /* take time stamp for SYNC messages only */
702 ts_event_en = PTP_TCR_TSEVNTENA;
703
704 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
705 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
706 break;
707
708 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
709 /* PTP v1, UDP, Delay_req packet */
710 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
711 /* take time stamp for Delay_Req messages only */
712 ts_master_en = PTP_TCR_TSMSTRENA;
713 ts_event_en = PTP_TCR_TSEVNTENA;
714
715 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 break;
718
719 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
720 /* PTP v2, UDP, any kind of event packet */
721 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
722 ptp_v2 = PTP_TCR_TSVER2ENA;
723 /* take time stamp for all event messages */
724 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
725
726 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 break;
729
730 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
731 /* PTP v2, UDP, Sync packet */
732 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
733 ptp_v2 = PTP_TCR_TSVER2ENA;
734 /* take time stamp for SYNC messages only */
735 ts_event_en = PTP_TCR_TSEVNTENA;
736
737 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
738 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
739 break;
740
741 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
742 /* PTP v2, UDP, Delay_req packet */
743 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
744 ptp_v2 = PTP_TCR_TSVER2ENA;
745 /* take time stamp for Delay_Req messages only */
746 ts_master_en = PTP_TCR_TSMSTRENA;
747 ts_event_en = PTP_TCR_TSEVNTENA;
748
749 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
750 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
751 break;
752
753 case HWTSTAMP_FILTER_PTP_V2_EVENT:
754 /* PTP v2/802.AS1 any layer, any kind of event packet */
755 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
756 ptp_v2 = PTP_TCR_TSVER2ENA;
757 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
758 if (priv->synopsys_id < DWMAC_CORE_4_10)
759 ts_event_en = PTP_TCR_TSEVNTENA;
760 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
761 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
762 ptp_over_ethernet = PTP_TCR_TSIPENA;
763 break;
764
765 case HWTSTAMP_FILTER_PTP_V2_SYNC:
766 /* PTP v2/802.AS1, any layer, Sync packet */
767 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
768 ptp_v2 = PTP_TCR_TSVER2ENA;
769 /* take time stamp for SYNC messages only */
770 ts_event_en = PTP_TCR_TSEVNTENA;
771
772 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
773 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
774 ptp_over_ethernet = PTP_TCR_TSIPENA;
775 break;
776
777 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
778 /* PTP v2/802.AS1, any layer, Delay_req packet */
779 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
780 ptp_v2 = PTP_TCR_TSVER2ENA;
781 /* take time stamp for Delay_Req messages only */
782 ts_master_en = PTP_TCR_TSMSTRENA;
783 ts_event_en = PTP_TCR_TSEVNTENA;
784
785 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
786 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
787 ptp_over_ethernet = PTP_TCR_TSIPENA;
788 break;
789
790 case HWTSTAMP_FILTER_NTP_ALL:
791 case HWTSTAMP_FILTER_ALL:
792 /* time stamp any incoming packet */
793 config->rx_filter = HWTSTAMP_FILTER_ALL;
794 tstamp_all = PTP_TCR_TSENALL;
795 break;
796
797 default:
798 return -ERANGE;
799 }
800 } else {
801 switch (config->rx_filter) {
802 case HWTSTAMP_FILTER_NONE:
803 config->rx_filter = HWTSTAMP_FILTER_NONE;
804 break;
805 default:
806 /* PTP v1, UDP, any kind of event packet */
807 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
808 break;
809 }
810 }
811 priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
812 priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
813
814 priv->systime_flags = STMMAC_HWTS_ACTIVE;
815 if (!priv->tsfupdt_coarse)
816 priv->systime_flags |= PTP_TCR_TSCFUPDT;
817
818 if (priv->hwts_tx_en || priv->hwts_rx_en) {
819 priv->systime_flags |= tstamp_all | ptp_v2 |
820 ptp_over_ethernet | ptp_over_ipv6_udp |
821 ptp_over_ipv4_udp | ts_event_en |
822 ts_master_en | snap_type_sel;
823 }
824
825 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
826
827 priv->tstamp_config = *config;
828
829 return 0;
830 }
831
832 /**
833 * stmmac_hwtstamp_get - read hardware timestamping.
834 * @dev: device pointer.
835 * @config: the timestamping configuration.
836 * Description:
837 * This function obtain the current hardware timestamping settings
838 * as requested.
839 */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)840 static int stmmac_hwtstamp_get(struct net_device *dev,
841 struct kernel_hwtstamp_config *config)
842 {
843 struct stmmac_priv *priv = netdev_priv(dev);
844
845 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
846 return -EOPNOTSUPP;
847
848 *config = priv->tstamp_config;
849
850 return 0;
851 }
852
853 /**
854 * stmmac_init_tstamp_counter - init hardware timestamping counter
855 * @priv: driver private structure
856 * @systime_flags: timestamping flags
857 * Description:
858 * Initialize hardware counter for packet timestamping.
859 * This is valid as long as the interface is open and not suspended.
860 * Will be rerun after resuming from suspend, case in which the timestamping
861 * flags updated by stmmac_hwtstamp_set() also need to be restored.
862 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)863 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
864 u32 systime_flags)
865 {
866 struct timespec64 now;
867
868 if (!priv->plat->clk_ptp_rate) {
869 netdev_err(priv->dev, "Invalid PTP clock rate");
870 return -EINVAL;
871 }
872
873 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
874 priv->systime_flags = systime_flags;
875
876 stmmac_update_subsecond_increment(priv);
877
878 /* initialize system time */
879 ktime_get_real_ts64(&now);
880
881 /* lower 32 bits of tv_sec are safe until y2106 */
882 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
883
884 return 0;
885 }
886
887 /**
888 * stmmac_init_timestamping - initialise timestamping
889 * @priv: driver private structure
890 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
891 * This is done by looking at the HW cap. register.
892 * This function also registers the ptp driver.
893 */
stmmac_init_timestamping(struct stmmac_priv * priv)894 static int stmmac_init_timestamping(struct stmmac_priv *priv)
895 {
896 bool xmac = dwmac_is_xmac(priv->plat->core_type);
897 int ret;
898
899 if (priv->plat->ptp_clk_freq_config)
900 priv->plat->ptp_clk_freq_config(priv);
901
902 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
903 netdev_info(priv->dev, "PTP not supported by HW\n");
904 return -EOPNOTSUPP;
905 }
906
907 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
908 PTP_TCR_TSCFUPDT);
909 if (ret) {
910 netdev_warn(priv->dev, "PTP init failed\n");
911 return ret;
912 }
913
914 priv->adv_ts = 0;
915 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
916 if (xmac && priv->dma_cap.atime_stamp)
917 priv->adv_ts = 1;
918 /* Dwmac 3.x core with extend_desc can support adv_ts */
919 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
920 priv->adv_ts = 1;
921
922 if (priv->dma_cap.time_stamp)
923 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
924
925 if (priv->adv_ts)
926 netdev_info(priv->dev,
927 "IEEE 1588-2008 Advanced Timestamp supported\n");
928
929 memset(&priv->tstamp_config, 0, sizeof(priv->tstamp_config));
930 priv->hwts_tx_en = 0;
931 priv->hwts_rx_en = 0;
932
933 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
934 stmmac_hwtstamp_correct_latency(priv, priv);
935
936 return 0;
937 }
938
stmmac_setup_ptp(struct stmmac_priv * priv)939 static void stmmac_setup_ptp(struct stmmac_priv *priv)
940 {
941 int ret;
942
943 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
944 if (ret < 0)
945 netdev_warn(priv->dev,
946 "failed to enable PTP reference clock: %pe\n",
947 ERR_PTR(ret));
948
949 if (stmmac_init_timestamping(priv) == 0)
950 stmmac_ptp_register(priv);
951 }
952
stmmac_release_ptp(struct stmmac_priv * priv)953 static void stmmac_release_ptp(struct stmmac_priv *priv)
954 {
955 stmmac_ptp_unregister(priv);
956 clk_disable_unprepare(priv->plat->clk_ptp_ref);
957 }
958
stmmac_legacy_serdes_power_down(struct stmmac_priv * priv)959 static void stmmac_legacy_serdes_power_down(struct stmmac_priv *priv)
960 {
961 if (priv->plat->serdes_powerdown && priv->legacy_serdes_is_powered)
962 priv->plat->serdes_powerdown(priv->dev, priv->plat->bsp_priv);
963
964 priv->legacy_serdes_is_powered = false;
965 }
966
stmmac_legacy_serdes_power_up(struct stmmac_priv * priv)967 static int stmmac_legacy_serdes_power_up(struct stmmac_priv *priv)
968 {
969 int ret;
970
971 if (!priv->plat->serdes_powerup)
972 return 0;
973
974 ret = priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
975 if (ret < 0)
976 netdev_err(priv->dev, "SerDes powerup failed\n");
977 else
978 priv->legacy_serdes_is_powered = true;
979
980 return ret;
981 }
982
983 /**
984 * stmmac_mac_flow_ctrl - Configure flow control in all queues
985 * @priv: driver private structure
986 * @duplex: duplex passed to the next function
987 * @flow_ctrl: desired flow control modes
988 * Description: It is used for configuring the flow control in all queues
989 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)990 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
991 unsigned int flow_ctrl)
992 {
993 u8 tx_cnt = priv->plat->tx_queues_to_use;
994
995 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
996 tx_cnt);
997 }
998
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)999 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
1000 phy_interface_t interface)
1001 {
1002 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1003
1004 /* Refresh the MAC-specific capabilities */
1005 stmmac_mac_update_caps(priv);
1006
1007 if (priv->hw_cap_support && !priv->dma_cap.half_duplex)
1008 priv->hw->link.caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
1009
1010 config->mac_capabilities = priv->hw->link.caps;
1011
1012 if (priv->plat->max_speed)
1013 phylink_limit_mac_speed(config, priv->plat->max_speed);
1014
1015 return config->mac_capabilities;
1016 }
1017
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)1018 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
1019 phy_interface_t interface)
1020 {
1021 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1022 struct phylink_pcs *pcs;
1023
1024 if (priv->plat->select_pcs) {
1025 pcs = priv->plat->select_pcs(priv, interface);
1026 if (!IS_ERR(pcs))
1027 return pcs;
1028 }
1029
1030 if (priv->integrated_pcs &&
1031 test_bit(interface, priv->integrated_pcs->pcs.supported_interfaces))
1032 return &priv->integrated_pcs->pcs;
1033
1034 return NULL;
1035 }
1036
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1037 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1038 const struct phylink_link_state *state)
1039 {
1040 /* Nothing to do, xpcs_config() handles everything */
1041 }
1042
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1043 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1044 phy_interface_t interface)
1045 {
1046 struct net_device *ndev = to_net_dev(config->dev);
1047 struct stmmac_priv *priv = netdev_priv(ndev);
1048
1049 if (priv->plat->mac_finish)
1050 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode,
1051 interface);
1052
1053 return 0;
1054 }
1055
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1056 static void stmmac_mac_link_down(struct phylink_config *config,
1057 unsigned int mode, phy_interface_t interface)
1058 {
1059 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1060
1061 stmmac_mac_set(priv, priv->ioaddr, false);
1062 if (priv->dma_cap.eee)
1063 stmmac_set_eee_pls(priv, priv->hw, false);
1064
1065 if (stmmac_fpe_supported(priv))
1066 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
1067 }
1068
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1069 static void stmmac_mac_link_up(struct phylink_config *config,
1070 struct phy_device *phy,
1071 unsigned int mode, phy_interface_t interface,
1072 int speed, int duplex,
1073 bool tx_pause, bool rx_pause)
1074 {
1075 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1076 unsigned int flow_ctrl;
1077 u32 old_ctrl, ctrl;
1078 int ret;
1079
1080 if (priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)
1081 stmmac_legacy_serdes_power_up(priv);
1082
1083 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1084 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1085
1086 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1087 switch (speed) {
1088 case SPEED_10000:
1089 ctrl |= priv->hw->link.xgmii.speed10000;
1090 break;
1091 case SPEED_5000:
1092 ctrl |= priv->hw->link.xgmii.speed5000;
1093 break;
1094 case SPEED_2500:
1095 ctrl |= priv->hw->link.xgmii.speed2500;
1096 break;
1097 default:
1098 return;
1099 }
1100 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1101 switch (speed) {
1102 case SPEED_100000:
1103 ctrl |= priv->hw->link.xlgmii.speed100000;
1104 break;
1105 case SPEED_50000:
1106 ctrl |= priv->hw->link.xlgmii.speed50000;
1107 break;
1108 case SPEED_40000:
1109 ctrl |= priv->hw->link.xlgmii.speed40000;
1110 break;
1111 case SPEED_25000:
1112 ctrl |= priv->hw->link.xlgmii.speed25000;
1113 break;
1114 case SPEED_10000:
1115 ctrl |= priv->hw->link.xgmii.speed10000;
1116 break;
1117 case SPEED_2500:
1118 ctrl |= priv->hw->link.speed2500;
1119 break;
1120 case SPEED_1000:
1121 ctrl |= priv->hw->link.speed1000;
1122 break;
1123 default:
1124 return;
1125 }
1126 } else {
1127 switch (speed) {
1128 case SPEED_2500:
1129 ctrl |= priv->hw->link.speed2500;
1130 break;
1131 case SPEED_1000:
1132 ctrl |= priv->hw->link.speed1000;
1133 break;
1134 case SPEED_100:
1135 ctrl |= priv->hw->link.speed100;
1136 break;
1137 case SPEED_10:
1138 ctrl |= priv->hw->link.speed10;
1139 break;
1140 default:
1141 return;
1142 }
1143 }
1144
1145 if (priv->plat->fix_mac_speed)
1146 priv->plat->fix_mac_speed(priv->plat->bsp_priv, interface,
1147 speed, mode);
1148
1149 if (!duplex)
1150 ctrl &= ~priv->hw->link.duplex;
1151 else
1152 ctrl |= priv->hw->link.duplex;
1153
1154 /* Flow Control operation */
1155 if (rx_pause && tx_pause)
1156 flow_ctrl = FLOW_AUTO;
1157 else if (rx_pause && !tx_pause)
1158 flow_ctrl = FLOW_RX;
1159 else if (!rx_pause && tx_pause)
1160 flow_ctrl = FLOW_TX;
1161 else
1162 flow_ctrl = FLOW_OFF;
1163
1164 stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1165
1166 if (ctrl != old_ctrl)
1167 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1168
1169 if (priv->plat->set_clk_tx_rate) {
1170 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1171 priv->plat->clk_tx_i,
1172 interface, speed);
1173 if (ret < 0)
1174 netdev_err(priv->dev,
1175 "failed to configure %s transmit clock for %dMbps: %pe\n",
1176 phy_modes(interface), speed, ERR_PTR(ret));
1177 }
1178
1179 stmmac_mac_set(priv, priv->ioaddr, true);
1180 if (priv->dma_cap.eee)
1181 stmmac_set_eee_pls(priv, priv->hw, true);
1182
1183 if (stmmac_fpe_supported(priv))
1184 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1185
1186 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1187 stmmac_hwtstamp_correct_latency(priv, priv);
1188 }
1189
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1190 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1191 {
1192 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1193
1194 priv->eee_active = false;
1195
1196 mutex_lock(&priv->lock);
1197
1198 priv->eee_enabled = false;
1199
1200 netdev_dbg(priv->dev, "disable EEE\n");
1201 priv->eee_sw_timer_en = false;
1202 timer_delete_sync(&priv->eee_ctrl_timer);
1203 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1204 priv->tx_path_in_lpi_mode = false;
1205
1206 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1207 mutex_unlock(&priv->lock);
1208 }
1209
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1210 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1211 bool tx_clk_stop)
1212 {
1213 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1214 int ret;
1215
1216 priv->tx_lpi_timer = timer;
1217 priv->eee_active = true;
1218
1219 mutex_lock(&priv->lock);
1220
1221 priv->eee_enabled = true;
1222
1223 /* Update the transmit clock stop according to PHY capability if
1224 * the platform allows
1225 */
1226 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1227 priv->tx_lpi_clk_stop = tx_clk_stop;
1228
1229 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1230 STMMAC_DEFAULT_TWT_LS);
1231
1232 /* Try to configure the hardware timer. */
1233 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1234 priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1235
1236 if (ret) {
1237 /* Hardware timer mode not supported, or value out of range.
1238 * Fall back to using software LPI mode
1239 */
1240 priv->eee_sw_timer_en = true;
1241 stmmac_restart_sw_lpi_timer(priv);
1242 }
1243
1244 mutex_unlock(&priv->lock);
1245 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1246
1247 return 0;
1248 }
1249
stmmac_mac_wol_set(struct phylink_config * config,u32 wolopts,const u8 * sopass)1250 static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
1251 const u8 *sopass)
1252 {
1253 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1254
1255 device_set_wakeup_enable(priv->device, !!wolopts);
1256
1257 mutex_lock(&priv->lock);
1258 priv->wolopts = wolopts;
1259 mutex_unlock(&priv->lock);
1260
1261 return 0;
1262 }
1263
1264 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1265 .mac_get_caps = stmmac_mac_get_caps,
1266 .mac_select_pcs = stmmac_mac_select_pcs,
1267 .mac_config = stmmac_mac_config,
1268 .mac_finish = stmmac_mac_finish,
1269 .mac_link_down = stmmac_mac_link_down,
1270 .mac_link_up = stmmac_mac_link_up,
1271 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1272 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1273 .mac_wol_set = stmmac_mac_wol_set,
1274 };
1275
1276 /**
1277 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1278 * @priv: driver private structure
1279 * Description: this is to verify if the HW supports the PCS.
1280 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1281 * configured for the TBI, RTBI, or SGMII PHY interface.
1282 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1283 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1284 {
1285 int interface = priv->plat->phy_interface;
1286 int speed = priv->plat->mac_port_sel_speed;
1287
1288 if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
1289 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1290
1291 switch (speed) {
1292 case SPEED_10:
1293 case SPEED_100:
1294 case SPEED_1000:
1295 priv->hw->reverse_sgmii_enable = true;
1296 break;
1297
1298 default:
1299 dev_warn(priv->device, "invalid port speed\n");
1300 fallthrough;
1301 case 0:
1302 priv->hw->reverse_sgmii_enable = false;
1303 break;
1304 }
1305 }
1306 }
1307
1308 /**
1309 * stmmac_init_phy - PHY initialization
1310 * @dev: net device structure
1311 * Description: it initializes the driver's PHY state, and attaches the PHY
1312 * to the mac driver.
1313 * Return value:
1314 * 0 on success
1315 */
stmmac_init_phy(struct net_device * dev)1316 static int stmmac_init_phy(struct net_device *dev)
1317 {
1318 struct stmmac_priv *priv = netdev_priv(dev);
1319 int mode = priv->plat->phy_interface;
1320 struct fwnode_handle *phy_fwnode;
1321 struct fwnode_handle *fwnode;
1322 struct ethtool_keee eee;
1323 u32 dev_flags = 0;
1324 int ret;
1325
1326 if (!phylink_expects_phy(priv->phylink))
1327 return 0;
1328
1329 if (priv->hw->xpcs &&
1330 xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1331 return 0;
1332
1333 fwnode = dev_fwnode(priv->device);
1334 if (fwnode)
1335 phy_fwnode = fwnode_get_phy_node(fwnode);
1336 else
1337 phy_fwnode = NULL;
1338
1339 if (priv->plat->flags & STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD)
1340 dev_flags |= PHY_F_KEEP_PREAMBLE_BEFORE_SFD;
1341
1342 /* Some DT bindings do not set-up the PHY handle. Let's try to
1343 * manually parse it
1344 */
1345 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1346 int addr = priv->plat->phy_addr;
1347 struct phy_device *phydev;
1348
1349 if (addr < 0) {
1350 netdev_err(priv->dev, "no phy found\n");
1351 return -ENODEV;
1352 }
1353
1354 phydev = mdiobus_get_phy(priv->mii, addr);
1355 if (!phydev) {
1356 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1357 return -ENODEV;
1358 }
1359
1360 phydev->dev_flags |= dev_flags;
1361
1362 ret = phylink_connect_phy(priv->phylink, phydev);
1363 } else {
1364 fwnode_handle_put(phy_fwnode);
1365 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, dev_flags);
1366 }
1367
1368 if (ret) {
1369 netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1370 ERR_PTR(ret));
1371 return ret;
1372 }
1373
1374 /* Configure phylib's copy of the LPI timer. Normally,
1375 * phylink_config.lpi_timer_default would do this, but there is a
1376 * chance that userspace could change the eee_timer setting via sysfs
1377 * before the first open. Thus, preserve existing behaviour.
1378 */
1379 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1380 eee.tx_lpi_timer = priv->tx_lpi_timer;
1381 phylink_ethtool_set_eee(priv->phylink, &eee);
1382 }
1383
1384 return 0;
1385 }
1386
stmmac_phylink_setup(struct stmmac_priv * priv)1387 static int stmmac_phylink_setup(struct stmmac_priv *priv)
1388 {
1389 struct phylink_config *config;
1390 struct phylink_pcs *pcs;
1391 struct phylink *phylink;
1392
1393 config = &priv->phylink_config;
1394
1395 config->dev = &priv->dev->dev;
1396 config->type = PHYLINK_NETDEV;
1397 config->mac_managed_pm = true;
1398
1399 /* Stmmac always requires an RX clock for hardware initialization */
1400 config->mac_requires_rxc = true;
1401
1402 /* Disable EEE RX clock stop to ensure VLAN register access works
1403 * correctly.
1404 */
1405 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
1406 !(priv->dev->features & NETIF_F_VLAN_FEATURES))
1407 config->eee_rx_clk_stop_enable = true;
1408
1409 /* Set the default transmit clock stop bit based on the platform glue */
1410 priv->tx_lpi_clk_stop = priv->plat->flags &
1411 STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1412
1413 /* Get the PHY interface modes (at the PHY end of the link) that
1414 * are supported by the platform.
1415 */
1416 if (priv->plat->get_interfaces)
1417 priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1418 config->supported_interfaces);
1419
1420 config->default_an_inband = priv->plat->default_an_inband;
1421
1422 /* Set the platform/firmware specified interface mode if the
1423 * supported interfaces have not already been provided using
1424 * phy_interface as a last resort.
1425 */
1426 if (phy_interface_empty(config->supported_interfaces))
1427 __set_bit(priv->plat->phy_interface,
1428 config->supported_interfaces);
1429
1430 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1431 if (priv->hw->xpcs)
1432 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1433 else
1434 pcs = priv->hw->phylink_pcs;
1435
1436 if (pcs)
1437 phy_interface_or(config->supported_interfaces,
1438 config->supported_interfaces,
1439 pcs->supported_interfaces);
1440
1441 /* Some platforms, e.g. iMX8MP, wire lpi_intr_o to the same interrupt
1442 * used for stmmac's main interrupts, which leads to interrupt storms.
1443 * STMMAC_FLAG_EEE_DISABLE allows EEE to be disabled on such platforms.
1444 */
1445 if (priv->dma_cap.eee &&
1446 !(priv->plat->flags & STMMAC_FLAG_EEE_DISABLE)) {
1447 /* The GMAC 3.74a databook states that EEE is only supported
1448 * in MII, GMII, and RGMII interfaces.
1449 */
1450 __set_bit(PHY_INTERFACE_MODE_MII, config->lpi_interfaces);
1451 __set_bit(PHY_INTERFACE_MODE_GMII, config->lpi_interfaces);
1452 phy_interface_set_rgmii(config->lpi_interfaces);
1453
1454 /* If we have a non-integrated PCS, assume that it is connected
1455 * to the GMAC using GMII or another EEE compatible interface,
1456 * and thus all PCS-supported interfaces support LPI.
1457 */
1458 if (pcs)
1459 phy_interface_or(config->lpi_interfaces,
1460 config->lpi_interfaces,
1461 pcs->supported_interfaces);
1462
1463 /* All full duplex speeds above 100Mbps are supported */
1464 config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1465 config->lpi_timer_default = eee_timer * 1000;
1466 config->eee_enabled_default = true;
1467 }
1468
1469 config->wol_phy_speed_ctrl = true;
1470 if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
1471 config->wol_phy_legacy = true;
1472 } else {
1473 if (priv->dma_cap.pmt_remote_wake_up)
1474 config->wol_mac_support |= WAKE_UCAST;
1475 if (priv->dma_cap.pmt_magic_frame)
1476 config->wol_mac_support |= WAKE_MAGIC;
1477 }
1478
1479 phylink = phylink_create(config, dev_fwnode(priv->device),
1480 priv->plat->phy_interface,
1481 &stmmac_phylink_mac_ops);
1482 if (IS_ERR(phylink))
1483 return PTR_ERR(phylink);
1484
1485 priv->phylink = phylink;
1486 return 0;
1487 }
1488
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1489 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1490 struct stmmac_dma_conf *dma_conf)
1491 {
1492 u8 rx_cnt = priv->plat->rx_queues_to_use;
1493 unsigned int desc_size;
1494 void *head_rx;
1495 u8 queue;
1496
1497 /* Display RX rings */
1498 for (queue = 0; queue < rx_cnt; queue++) {
1499 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1500
1501 pr_info("\tRX Queue %u rings\n", queue);
1502
1503 head_rx = stmmac_get_rx_desc(priv, rx_q, 0);
1504 desc_size = stmmac_get_rx_desc_size(priv);
1505
1506 /* Display RX ring */
1507 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1508 rx_q->dma_rx_phy, desc_size);
1509 }
1510 }
1511
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1512 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1513 struct stmmac_dma_conf *dma_conf)
1514 {
1515 u8 tx_cnt = priv->plat->tx_queues_to_use;
1516 unsigned int desc_size;
1517 void *head_tx;
1518 u8 queue;
1519
1520 /* Display TX rings */
1521 for (queue = 0; queue < tx_cnt; queue++) {
1522 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1523
1524 pr_info("\tTX Queue %d rings\n", queue);
1525
1526 head_tx = stmmac_get_tx_desc(priv, tx_q, 0);
1527 desc_size = stmmac_get_tx_desc_size(priv, tx_q);
1528
1529 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1530 tx_q->dma_tx_phy, desc_size);
1531 }
1532 }
1533
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1534 static void stmmac_display_rings(struct stmmac_priv *priv,
1535 struct stmmac_dma_conf *dma_conf)
1536 {
1537 /* Display RX ring */
1538 stmmac_display_rx_rings(priv, dma_conf);
1539
1540 /* Display TX ring */
1541 stmmac_display_tx_rings(priv, dma_conf);
1542 }
1543
stmmac_rx_offset(struct stmmac_priv * priv)1544 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1545 {
1546 if (stmmac_xdp_is_enabled(priv))
1547 return XDP_PACKET_HEADROOM;
1548
1549 return NET_SKB_PAD;
1550 }
1551
stmmac_set_bfsize(int mtu)1552 static int stmmac_set_bfsize(int mtu)
1553 {
1554 int ret;
1555
1556 if (mtu >= BUF_SIZE_8KiB)
1557 ret = BUF_SIZE_16KiB;
1558 else if (mtu >= BUF_SIZE_4KiB)
1559 ret = BUF_SIZE_8KiB;
1560 else if (mtu >= BUF_SIZE_2KiB)
1561 ret = BUF_SIZE_4KiB;
1562 else if (mtu > DEFAULT_BUFSIZE)
1563 ret = BUF_SIZE_2KiB;
1564 else
1565 ret = DEFAULT_BUFSIZE;
1566
1567 return ret;
1568 }
1569
1570 /**
1571 * stmmac_clear_rx_descriptors - clear RX descriptors
1572 * @priv: driver private structure
1573 * @dma_conf: structure to take the dma data
1574 * @queue: RX queue index
1575 * Description: this function is called to clear the RX descriptors
1576 * in case of both basic and extended descriptors are used.
1577 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1578 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1579 struct stmmac_dma_conf *dma_conf,
1580 u32 queue)
1581 {
1582 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1583 struct dma_desc *desc;
1584 int i;
1585
1586 /* Clear the RX descriptors */
1587 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1588 desc = stmmac_get_rx_desc(priv, rx_q, i);
1589
1590 stmmac_init_rx_desc(priv, desc, priv->use_riwt,
1591 priv->descriptor_mode,
1592 (i == dma_conf->dma_rx_size - 1),
1593 dma_conf->dma_buf_sz);
1594 }
1595 }
1596
1597 /**
1598 * stmmac_clear_tx_descriptors - clear tx descriptors
1599 * @priv: driver private structure
1600 * @dma_conf: structure to take the dma data
1601 * @queue: TX queue index.
1602 * Description: this function is called to clear the TX descriptors
1603 * in case of both basic and extended descriptors are used.
1604 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1605 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1606 struct stmmac_dma_conf *dma_conf,
1607 u32 queue)
1608 {
1609 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1610 int i;
1611
1612 /* Clear the TX descriptors */
1613 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1614 int last = (i == (dma_conf->dma_tx_size - 1));
1615 struct dma_desc *p;
1616
1617 p = stmmac_get_tx_desc(priv, tx_q, i);
1618 stmmac_init_tx_desc(priv, p, priv->descriptor_mode, last);
1619 }
1620 }
1621
1622 /**
1623 * stmmac_clear_descriptors - clear descriptors
1624 * @priv: driver private structure
1625 * @dma_conf: structure to take the dma data
1626 * Description: this function is called to clear the TX and RX descriptors
1627 * in case of both basic and extended descriptors are used.
1628 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1629 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1630 struct stmmac_dma_conf *dma_conf)
1631 {
1632 u8 rx_queue_cnt = priv->plat->rx_queues_to_use;
1633 u8 tx_queue_cnt = priv->plat->tx_queues_to_use;
1634 u8 queue;
1635
1636 /* Clear the RX descriptors */
1637 for (queue = 0; queue < rx_queue_cnt; queue++)
1638 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1639
1640 /* Clear the TX descriptors */
1641 for (queue = 0; queue < tx_queue_cnt; queue++)
1642 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1643 }
1644
1645 /**
1646 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1647 * @priv: driver private structure
1648 * @dma_conf: structure to take the dma data
1649 * @p: descriptor pointer
1650 * @i: descriptor index
1651 * @flags: gfp flag
1652 * @queue: RX queue index
1653 * Description: this function is called to allocate a receive buffer, perform
1654 * the DMA mapping and init the descriptor.
1655 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1656 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1657 struct stmmac_dma_conf *dma_conf,
1658 struct dma_desc *p,
1659 int i, gfp_t flags, u32 queue)
1660 {
1661 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1662 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1663 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1664
1665 if (priv->dma_cap.host_dma_width <= 32)
1666 gfp |= GFP_DMA32;
1667
1668 if (!buf->page) {
1669 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1670 if (!buf->page)
1671 return -ENOMEM;
1672 buf->page_offset = stmmac_rx_offset(priv);
1673 }
1674
1675 if (priv->sph_active && !buf->sec_page) {
1676 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1677 if (!buf->sec_page)
1678 return -ENOMEM;
1679
1680 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1681 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1682 } else {
1683 buf->sec_page = NULL;
1684 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1685 }
1686
1687 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1688
1689 stmmac_set_desc_addr(priv, p, buf->addr);
1690 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1691 stmmac_init_desc3(priv, p);
1692
1693 return 0;
1694 }
1695
1696 /**
1697 * stmmac_free_rx_buffer - free RX dma buffers
1698 * @priv: private structure
1699 * @rx_q: RX queue
1700 * @i: buffer index.
1701 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1702 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1703 struct stmmac_rx_queue *rx_q,
1704 int i)
1705 {
1706 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1707
1708 if (buf->page)
1709 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1710 buf->page = NULL;
1711
1712 if (buf->sec_page)
1713 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1714 buf->sec_page = NULL;
1715 }
1716
1717 /**
1718 * stmmac_free_tx_buffer - free RX dma buffers
1719 * @priv: private structure
1720 * @dma_conf: structure to take the dma data
1721 * @queue: RX queue index
1722 * @i: buffer index.
1723 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1724 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1725 struct stmmac_dma_conf *dma_conf,
1726 u32 queue, int i)
1727 {
1728 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1729
1730 if (tx_q->tx_skbuff_dma[i].buf &&
1731 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1732 if (tx_q->tx_skbuff_dma[i].map_as_page)
1733 dma_unmap_page(priv->device,
1734 tx_q->tx_skbuff_dma[i].buf,
1735 tx_q->tx_skbuff_dma[i].len,
1736 DMA_TO_DEVICE);
1737 else
1738 dma_unmap_single(priv->device,
1739 tx_q->tx_skbuff_dma[i].buf,
1740 tx_q->tx_skbuff_dma[i].len,
1741 DMA_TO_DEVICE);
1742 }
1743
1744 if (tx_q->xdpf[i] &&
1745 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1746 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1747 xdp_return_frame(tx_q->xdpf[i]);
1748 tx_q->xdpf[i] = NULL;
1749 }
1750
1751 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1752 tx_q->xsk_frames_done++;
1753
1754 if (tx_q->tx_skbuff[i] &&
1755 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1756 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1757 tx_q->tx_skbuff[i] = NULL;
1758 }
1759
1760 tx_q->tx_skbuff_dma[i].buf = 0;
1761 tx_q->tx_skbuff_dma[i].map_as_page = false;
1762 }
1763
1764 /**
1765 * dma_free_rx_skbufs - free RX dma buffers
1766 * @priv: private structure
1767 * @dma_conf: structure to take the dma data
1768 * @queue: RX queue index
1769 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1770 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1771 struct stmmac_dma_conf *dma_conf,
1772 u32 queue)
1773 {
1774 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1775 int i;
1776
1777 for (i = 0; i < dma_conf->dma_rx_size; i++)
1778 stmmac_free_rx_buffer(priv, rx_q, i);
1779 }
1780
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1781 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1782 struct stmmac_dma_conf *dma_conf,
1783 u32 queue, gfp_t flags)
1784 {
1785 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1786 int i;
1787
1788 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1789 struct dma_desc *p;
1790 int ret;
1791
1792 p = stmmac_get_rx_desc(priv, rx_q, i);
1793
1794 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1795 queue);
1796 if (ret)
1797 return ret;
1798
1799 rx_q->buf_alloc_num++;
1800 }
1801
1802 return 0;
1803 }
1804
1805 /**
1806 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1807 * @priv: private structure
1808 * @dma_conf: structure to take the dma data
1809 * @queue: RX queue index
1810 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1811 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1812 struct stmmac_dma_conf *dma_conf,
1813 u32 queue)
1814 {
1815 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1816 int i;
1817
1818 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1819 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1820
1821 if (!buf->xdp)
1822 continue;
1823
1824 xsk_buff_free(buf->xdp);
1825 buf->xdp = NULL;
1826 }
1827 }
1828
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1829 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1830 struct stmmac_dma_conf *dma_conf,
1831 u32 queue)
1832 {
1833 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1834 int i;
1835
1836 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1837 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1838 * use this macro to make sure no size violations.
1839 */
1840 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1841
1842 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1843 struct stmmac_rx_buffer *buf;
1844 dma_addr_t dma_addr;
1845 struct dma_desc *p;
1846
1847 p = stmmac_get_rx_desc(priv, rx_q, i);
1848
1849 buf = &rx_q->buf_pool[i];
1850
1851 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1852 if (!buf->xdp)
1853 return -ENOMEM;
1854
1855 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1856 stmmac_set_desc_addr(priv, p, dma_addr);
1857 rx_q->buf_alloc_num++;
1858 }
1859
1860 return 0;
1861 }
1862
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1863 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1864 {
1865 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1866 return NULL;
1867
1868 return xsk_get_pool_from_qid(priv->dev, queue);
1869 }
1870
1871 /**
1872 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1873 * @priv: driver private structure
1874 * @dma_conf: structure to take the dma data
1875 * @queue: RX queue index
1876 * @flags: gfp flag.
1877 * Description: this function initializes the DMA RX descriptors
1878 * and allocates the socket buffers. It supports the chained and ring
1879 * modes.
1880 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1881 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1882 struct stmmac_dma_conf *dma_conf,
1883 u32 queue, gfp_t flags)
1884 {
1885 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1886 void *des;
1887 int ret;
1888
1889 netif_dbg(priv, probe, priv->dev,
1890 "(%s) dma_rx_phy=0x%08x\n", __func__,
1891 (u32)rx_q->dma_rx_phy);
1892
1893 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1894
1895 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1896
1897 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1898
1899 if (rx_q->xsk_pool) {
1900 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1901 MEM_TYPE_XSK_BUFF_POOL,
1902 NULL));
1903 netdev_info(priv->dev,
1904 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1905 queue);
1906 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1907 } else {
1908 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1909 MEM_TYPE_PAGE_POOL,
1910 rx_q->page_pool));
1911 netdev_info(priv->dev,
1912 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1913 queue);
1914 }
1915
1916 if (rx_q->xsk_pool) {
1917 /* RX XDP ZC buffer pool may not be populated, e.g.
1918 * xdpsock TX-only.
1919 */
1920 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1921 } else {
1922 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1923 if (ret < 0)
1924 return -ENOMEM;
1925 }
1926
1927 /* Setup the chained descriptor addresses */
1928 if (priv->descriptor_mode == STMMAC_CHAIN_MODE) {
1929 if (priv->extend_desc)
1930 des = rx_q->dma_erx;
1931 else
1932 des = rx_q->dma_rx;
1933
1934 stmmac_mode_init(priv, des, rx_q->dma_rx_phy,
1935 dma_conf->dma_rx_size, priv->extend_desc);
1936 }
1937
1938 return 0;
1939 }
1940
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1941 static int init_dma_rx_desc_rings(struct net_device *dev,
1942 struct stmmac_dma_conf *dma_conf,
1943 gfp_t flags)
1944 {
1945 struct stmmac_priv *priv = netdev_priv(dev);
1946 u8 rx_count = priv->plat->rx_queues_to_use;
1947 int queue;
1948 int ret;
1949
1950 /* RX INITIALIZATION */
1951 netif_dbg(priv, probe, priv->dev,
1952 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1953
1954 for (queue = 0; queue < rx_count; queue++) {
1955 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1956 if (ret)
1957 goto err_init_rx_buffers;
1958 }
1959
1960 return 0;
1961
1962 err_init_rx_buffers:
1963 while (queue >= 0) {
1964 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1965
1966 if (rx_q->xsk_pool)
1967 dma_free_rx_xskbufs(priv, dma_conf, queue);
1968 else
1969 dma_free_rx_skbufs(priv, dma_conf, queue);
1970
1971 rx_q->buf_alloc_num = 0;
1972 rx_q->xsk_pool = NULL;
1973
1974 queue--;
1975 }
1976
1977 return ret;
1978 }
1979
stmmac_set_tx_dma_entry(struct stmmac_tx_queue * tx_q,unsigned int entry,enum stmmac_txbuf_type type,dma_addr_t addr,size_t len,bool map_as_page)1980 static void stmmac_set_tx_dma_entry(struct stmmac_tx_queue *tx_q,
1981 unsigned int entry,
1982 enum stmmac_txbuf_type type,
1983 dma_addr_t addr, size_t len,
1984 bool map_as_page)
1985 {
1986 tx_q->tx_skbuff_dma[entry].buf = addr;
1987 tx_q->tx_skbuff_dma[entry].len = len;
1988 tx_q->tx_skbuff_dma[entry].buf_type = type;
1989 tx_q->tx_skbuff_dma[entry].map_as_page = map_as_page;
1990 tx_q->tx_skbuff_dma[entry].last_segment = false;
1991 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1992 }
1993
stmmac_set_tx_skb_dma_entry(struct stmmac_tx_queue * tx_q,unsigned int entry,dma_addr_t addr,size_t len,bool map_as_page)1994 static void stmmac_set_tx_skb_dma_entry(struct stmmac_tx_queue *tx_q,
1995 unsigned int entry, dma_addr_t addr,
1996 size_t len, bool map_as_page)
1997 {
1998 stmmac_set_tx_dma_entry(tx_q, entry, STMMAC_TXBUF_T_SKB, addr, len,
1999 map_as_page);
2000 }
2001
stmmac_set_tx_dma_last_segment(struct stmmac_tx_queue * tx_q,unsigned int entry)2002 static void stmmac_set_tx_dma_last_segment(struct stmmac_tx_queue *tx_q,
2003 unsigned int entry)
2004 {
2005 tx_q->tx_skbuff_dma[entry].last_segment = true;
2006 }
2007
2008 /**
2009 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
2010 * @priv: driver private structure
2011 * @dma_conf: structure to take the dma data
2012 * @queue: TX queue index
2013 * Description: this function initializes the DMA TX descriptors
2014 * and allocates the socket buffers. It supports the chained and ring
2015 * modes.
2016 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2017 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
2018 struct stmmac_dma_conf *dma_conf,
2019 u32 queue)
2020 {
2021 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2022 int i;
2023
2024 netif_dbg(priv, probe, priv->dev,
2025 "(%s) dma_tx_phy=0x%08x\n", __func__,
2026 (u32)tx_q->dma_tx_phy);
2027
2028 /* Setup the chained descriptor addresses */
2029 if (priv->descriptor_mode == STMMAC_CHAIN_MODE) {
2030 if (priv->extend_desc)
2031 stmmac_mode_init(priv, tx_q->dma_etx,
2032 tx_q->dma_tx_phy,
2033 dma_conf->dma_tx_size, 1);
2034 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
2035 stmmac_mode_init(priv, tx_q->dma_tx,
2036 tx_q->dma_tx_phy,
2037 dma_conf->dma_tx_size, 0);
2038 }
2039
2040 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
2041
2042 for (i = 0; i < dma_conf->dma_tx_size; i++) {
2043 struct dma_desc *p;
2044
2045 p = stmmac_get_tx_desc(priv, tx_q, i);
2046 stmmac_clear_desc(priv, p);
2047 stmmac_set_tx_skb_dma_entry(tx_q, i, 0, 0, false);
2048
2049 tx_q->tx_skbuff[i] = NULL;
2050 }
2051
2052 return 0;
2053 }
2054
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)2055 static int init_dma_tx_desc_rings(struct net_device *dev,
2056 struct stmmac_dma_conf *dma_conf)
2057 {
2058 struct stmmac_priv *priv = netdev_priv(dev);
2059 u8 tx_queue_cnt;
2060 u8 queue;
2061
2062 tx_queue_cnt = priv->plat->tx_queues_to_use;
2063
2064 for (queue = 0; queue < tx_queue_cnt; queue++)
2065 __init_dma_tx_desc_rings(priv, dma_conf, queue);
2066
2067 return 0;
2068 }
2069
2070 /**
2071 * init_dma_desc_rings - init the RX/TX descriptor rings
2072 * @dev: net device structure
2073 * @dma_conf: structure to take the dma data
2074 * @flags: gfp flag.
2075 * Description: this function initializes the DMA RX/TX descriptors
2076 * and allocates the socket buffers. It supports the chained and ring
2077 * modes.
2078 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)2079 static int init_dma_desc_rings(struct net_device *dev,
2080 struct stmmac_dma_conf *dma_conf,
2081 gfp_t flags)
2082 {
2083 struct stmmac_priv *priv = netdev_priv(dev);
2084 int ret;
2085
2086 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
2087 if (ret)
2088 return ret;
2089
2090 ret = init_dma_tx_desc_rings(dev, dma_conf);
2091
2092 stmmac_clear_descriptors(priv, dma_conf);
2093
2094 if (netif_msg_hw(priv))
2095 stmmac_display_rings(priv, dma_conf);
2096
2097 return ret;
2098 }
2099
2100 /**
2101 * dma_free_tx_skbufs - free TX dma buffers
2102 * @priv: private structure
2103 * @dma_conf: structure to take the dma data
2104 * @queue: TX queue index
2105 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2106 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
2107 struct stmmac_dma_conf *dma_conf,
2108 u32 queue)
2109 {
2110 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2111 int i;
2112
2113 tx_q->xsk_frames_done = 0;
2114
2115 for (i = 0; i < dma_conf->dma_tx_size; i++)
2116 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
2117
2118 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
2119 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2120 tx_q->xsk_frames_done = 0;
2121 tx_q->xsk_pool = NULL;
2122 }
2123 }
2124
2125 /**
2126 * stmmac_free_tx_skbufs - free TX skb buffers
2127 * @priv: private structure
2128 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)2129 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
2130 {
2131 u8 tx_queue_cnt = priv->plat->tx_queues_to_use;
2132 u8 queue;
2133
2134 for (queue = 0; queue < tx_queue_cnt; queue++)
2135 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
2136 }
2137
2138 /**
2139 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2140 * @priv: private structure
2141 * @dma_conf: structure to take the dma data
2142 * @queue: RX queue index
2143 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2144 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2145 struct stmmac_dma_conf *dma_conf,
2146 u32 queue)
2147 {
2148 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2149 size_t size;
2150 void *addr;
2151
2152 /* Release the DMA RX socket buffers */
2153 if (rx_q->xsk_pool)
2154 dma_free_rx_xskbufs(priv, dma_conf, queue);
2155 else
2156 dma_free_rx_skbufs(priv, dma_conf, queue);
2157
2158 rx_q->buf_alloc_num = 0;
2159 rx_q->xsk_pool = NULL;
2160
2161 /* Free DMA regions of consistent memory previously allocated */
2162 if (priv->extend_desc)
2163 addr = rx_q->dma_erx;
2164 else
2165 addr = rx_q->dma_rx;
2166
2167 size = stmmac_get_rx_desc_size(priv) * dma_conf->dma_rx_size;
2168
2169 dma_free_coherent(priv->device, size, addr, rx_q->dma_rx_phy);
2170
2171 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2172 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2173
2174 kfree(rx_q->buf_pool);
2175 if (rx_q->page_pool)
2176 page_pool_destroy(rx_q->page_pool);
2177 }
2178
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2179 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2180 struct stmmac_dma_conf *dma_conf)
2181 {
2182 u8 rx_count = priv->plat->rx_queues_to_use;
2183 u8 queue;
2184
2185 /* Free RX queue resources */
2186 for (queue = 0; queue < rx_count; queue++)
2187 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2188 }
2189
2190 /**
2191 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2192 * @priv: private structure
2193 * @dma_conf: structure to take the dma data
2194 * @queue: TX queue index
2195 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2196 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2197 struct stmmac_dma_conf *dma_conf,
2198 u32 queue)
2199 {
2200 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2201 size_t size;
2202 void *addr;
2203
2204 /* Release the DMA TX socket buffers */
2205 dma_free_tx_skbufs(priv, dma_conf, queue);
2206
2207 if (priv->extend_desc) {
2208 addr = tx_q->dma_etx;
2209 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2210 addr = tx_q->dma_entx;
2211 } else {
2212 addr = tx_q->dma_tx;
2213 }
2214
2215 size = stmmac_get_tx_desc_size(priv, tx_q) * dma_conf->dma_tx_size;
2216
2217 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2218
2219 kfree(tx_q->tx_skbuff_dma);
2220 kfree(tx_q->tx_skbuff);
2221 }
2222
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2223 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2224 struct stmmac_dma_conf *dma_conf)
2225 {
2226 u8 tx_count = priv->plat->tx_queues_to_use;
2227 u8 queue;
2228
2229 /* Free TX queue resources */
2230 for (queue = 0; queue < tx_count; queue++)
2231 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2232 }
2233
2234 /**
2235 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2236 * @priv: private structure
2237 * @dma_conf: structure to take the dma data
2238 * @queue: RX queue index
2239 * Description: according to which descriptor can be used (extend or basic)
2240 * this function allocates the resources for TX and RX paths. In case of
2241 * reception, for example, it pre-allocated the RX socket buffer in order to
2242 * allow zero-copy mechanism.
2243 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2244 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2245 struct stmmac_dma_conf *dma_conf,
2246 u32 queue)
2247 {
2248 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2249 struct stmmac_channel *ch = &priv->channel[queue];
2250 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2251 struct page_pool_params pp_params = { 0 };
2252 unsigned int dma_buf_sz_pad, num_pages;
2253 unsigned int napi_id;
2254 size_t size;
2255 void *addr;
2256 int ret;
2257
2258 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2259 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2260 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2261
2262 rx_q->queue_index = queue;
2263 rx_q->priv_data = priv;
2264 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2265
2266 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2267 pp_params.pool_size = dma_conf->dma_rx_size;
2268 pp_params.order = order_base_2(num_pages);
2269 pp_params.nid = dev_to_node(priv->device);
2270 pp_params.dev = priv->device;
2271 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2272 pp_params.offset = stmmac_rx_offset(priv);
2273 pp_params.max_len = dma_conf->dma_buf_sz;
2274
2275 if (priv->sph_active) {
2276 pp_params.offset = 0;
2277 pp_params.max_len += stmmac_rx_offset(priv);
2278 }
2279
2280 rx_q->page_pool = page_pool_create(&pp_params);
2281 if (IS_ERR(rx_q->page_pool)) {
2282 ret = PTR_ERR(rx_q->page_pool);
2283 rx_q->page_pool = NULL;
2284 return ret;
2285 }
2286
2287 rx_q->buf_pool = kzalloc_objs(*rx_q->buf_pool, dma_conf->dma_rx_size);
2288 if (!rx_q->buf_pool)
2289 return -ENOMEM;
2290
2291 size = stmmac_get_rx_desc_size(priv) * dma_conf->dma_rx_size;
2292
2293 addr = dma_alloc_coherent(priv->device, size, &rx_q->dma_rx_phy,
2294 GFP_KERNEL);
2295 if (!addr)
2296 return -ENOMEM;
2297
2298 if (priv->extend_desc)
2299 rx_q->dma_erx = addr;
2300 else
2301 rx_q->dma_rx = addr;
2302
2303 if (stmmac_xdp_is_enabled(priv) &&
2304 test_bit(queue, priv->af_xdp_zc_qps))
2305 napi_id = ch->rxtx_napi.napi_id;
2306 else
2307 napi_id = ch->rx_napi.napi_id;
2308
2309 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, queue, napi_id);
2310 if (ret) {
2311 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2312 return -EINVAL;
2313 }
2314
2315 return 0;
2316 }
2317
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2318 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2319 struct stmmac_dma_conf *dma_conf)
2320 {
2321 u8 rx_count = priv->plat->rx_queues_to_use;
2322 u8 queue;
2323 int ret;
2324
2325 /* RX queues buffers and DMA */
2326 for (queue = 0; queue < rx_count; queue++) {
2327 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2328 if (ret)
2329 goto err_dma;
2330 }
2331
2332 return 0;
2333
2334 err_dma:
2335 free_dma_rx_desc_resources(priv, dma_conf);
2336
2337 return ret;
2338 }
2339
2340 /**
2341 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2342 * @priv: private structure
2343 * @dma_conf: structure to take the dma data
2344 * @queue: TX queue index
2345 * Description: according to which descriptor can be used (extend or basic)
2346 * this function allocates the resources for TX and RX paths. In case of
2347 * reception, for example, it pre-allocated the RX socket buffer in order to
2348 * allow zero-copy mechanism.
2349 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2350 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2351 struct stmmac_dma_conf *dma_conf,
2352 u32 queue)
2353 {
2354 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2355 size_t size;
2356 void *addr;
2357
2358 tx_q->queue_index = queue;
2359 tx_q->priv_data = priv;
2360
2361 tx_q->tx_skbuff_dma = kzalloc_objs(*tx_q->tx_skbuff_dma,
2362 dma_conf->dma_tx_size);
2363 if (!tx_q->tx_skbuff_dma)
2364 return -ENOMEM;
2365
2366 tx_q->tx_skbuff = kzalloc_objs(struct sk_buff *, dma_conf->dma_tx_size);
2367 if (!tx_q->tx_skbuff)
2368 return -ENOMEM;
2369
2370 size = stmmac_get_tx_desc_size(priv, tx_q) * dma_conf->dma_tx_size;
2371
2372 addr = dma_alloc_coherent(priv->device, size,
2373 &tx_q->dma_tx_phy, GFP_KERNEL);
2374 if (!addr)
2375 return -ENOMEM;
2376
2377 if (priv->extend_desc)
2378 tx_q->dma_etx = addr;
2379 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2380 tx_q->dma_entx = addr;
2381 else
2382 tx_q->dma_tx = addr;
2383
2384 return 0;
2385 }
2386
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2387 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2388 struct stmmac_dma_conf *dma_conf)
2389 {
2390 u8 tx_count = priv->plat->tx_queues_to_use;
2391 u8 queue;
2392 int ret;
2393
2394 /* TX queues buffers and DMA */
2395 for (queue = 0; queue < tx_count; queue++) {
2396 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2397 if (ret)
2398 goto err_dma;
2399 }
2400
2401 return 0;
2402
2403 err_dma:
2404 free_dma_tx_desc_resources(priv, dma_conf);
2405 return ret;
2406 }
2407
2408 /**
2409 * alloc_dma_desc_resources - alloc TX/RX resources.
2410 * @priv: private structure
2411 * @dma_conf: structure to take the dma data
2412 * Description: according to which descriptor can be used (extend or basic)
2413 * this function allocates the resources for TX and RX paths. In case of
2414 * reception, for example, it pre-allocated the RX socket buffer in order to
2415 * allow zero-copy mechanism.
2416 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2417 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2418 struct stmmac_dma_conf *dma_conf)
2419 {
2420 /* RX Allocation */
2421 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2422
2423 if (ret)
2424 return ret;
2425
2426 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2427
2428 return ret;
2429 }
2430
2431 /**
2432 * free_dma_desc_resources - free dma desc resources
2433 * @priv: private structure
2434 * @dma_conf: structure to take the dma data
2435 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2436 static void free_dma_desc_resources(struct stmmac_priv *priv,
2437 struct stmmac_dma_conf *dma_conf)
2438 {
2439 /* Release the DMA TX socket buffers */
2440 free_dma_tx_desc_resources(priv, dma_conf);
2441
2442 /* Release the DMA RX socket buffers later
2443 * to ensure all pending XDP_TX buffers are returned.
2444 */
2445 free_dma_rx_desc_resources(priv, dma_conf);
2446 }
2447
2448 /**
2449 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2450 * @priv: driver private structure
2451 * Description: It is used for enabling the rx queues in the MAC
2452 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2453 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2454 {
2455 u8 rx_queues_count = priv->plat->rx_queues_to_use;
2456 u8 queue;
2457 u8 mode;
2458
2459 for (queue = 0; queue < rx_queues_count; queue++) {
2460 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2461 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2462 }
2463 }
2464
2465 /**
2466 * stmmac_start_rx_dma - start RX DMA channel
2467 * @priv: driver private structure
2468 * @chan: RX channel index
2469 * Description:
2470 * This starts a RX DMA channel
2471 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2472 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2473 {
2474 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2475 stmmac_start_rx(priv, priv->ioaddr, chan);
2476 }
2477
2478 /**
2479 * stmmac_start_tx_dma - start TX DMA channel
2480 * @priv: driver private structure
2481 * @chan: TX channel index
2482 * Description:
2483 * This starts a TX DMA channel
2484 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2485 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2486 {
2487 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2488 stmmac_start_tx(priv, priv->ioaddr, chan);
2489 }
2490
2491 /**
2492 * stmmac_stop_rx_dma - stop RX DMA channel
2493 * @priv: driver private structure
2494 * @chan: RX channel index
2495 * Description:
2496 * This stops a RX DMA channel
2497 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2498 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2499 {
2500 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2501 stmmac_stop_rx(priv, priv->ioaddr, chan);
2502 }
2503
2504 /**
2505 * stmmac_stop_tx_dma - stop TX DMA channel
2506 * @priv: driver private structure
2507 * @chan: TX channel index
2508 * Description:
2509 * This stops a TX DMA channel
2510 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2511 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2512 {
2513 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2514 stmmac_stop_tx(priv, priv->ioaddr, chan);
2515 }
2516
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2517 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2518 {
2519 u8 rx_channels_count = priv->plat->rx_queues_to_use;
2520 u8 tx_channels_count = priv->plat->tx_queues_to_use;
2521 u8 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2522 u8 chan;
2523
2524 for (chan = 0; chan < dma_csr_ch; chan++) {
2525 struct stmmac_channel *ch = &priv->channel[chan];
2526 unsigned long flags;
2527
2528 spin_lock_irqsave(&ch->lock, flags);
2529 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2530 spin_unlock_irqrestore(&ch->lock, flags);
2531 }
2532 }
2533
2534 /**
2535 * stmmac_start_all_dma - start all RX and TX DMA channels
2536 * @priv: driver private structure
2537 * Description:
2538 * This starts all the RX and TX DMA channels
2539 */
stmmac_start_all_dma(struct stmmac_priv * priv)2540 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2541 {
2542 u8 rx_channels_count = priv->plat->rx_queues_to_use;
2543 u8 tx_channels_count = priv->plat->tx_queues_to_use;
2544 u8 chan;
2545
2546 for (chan = 0; chan < rx_channels_count; chan++)
2547 stmmac_start_rx_dma(priv, chan);
2548
2549 for (chan = 0; chan < tx_channels_count; chan++)
2550 stmmac_start_tx_dma(priv, chan);
2551 }
2552
2553 /**
2554 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2555 * @priv: driver private structure
2556 * Description:
2557 * This stops the RX and TX DMA channels
2558 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2559 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2560 {
2561 u8 rx_channels_count = priv->plat->rx_queues_to_use;
2562 u8 tx_channels_count = priv->plat->tx_queues_to_use;
2563 u8 chan;
2564
2565 for (chan = 0; chan < rx_channels_count; chan++)
2566 stmmac_stop_rx_dma(priv, chan);
2567
2568 for (chan = 0; chan < tx_channels_count; chan++)
2569 stmmac_stop_tx_dma(priv, chan);
2570 }
2571
2572 /**
2573 * stmmac_dma_operation_mode - HW DMA operation mode
2574 * @priv: driver private structure
2575 * Description: it is used for configuring the DMA operation mode register in
2576 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2577 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2578 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2579 {
2580 u8 rx_channels_count = priv->plat->rx_queues_to_use;
2581 u8 tx_channels_count = priv->plat->tx_queues_to_use;
2582 int rxfifosz = priv->plat->rx_fifo_size;
2583 int txfifosz = priv->plat->tx_fifo_size;
2584 u32 txmode = 0;
2585 u32 rxmode = 0;
2586 u8 qmode = 0;
2587 u8 chan;
2588
2589 if (rxfifosz == 0)
2590 rxfifosz = priv->dma_cap.rx_fifo_size;
2591 if (txfifosz == 0)
2592 txfifosz = priv->dma_cap.tx_fifo_size;
2593
2594 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2595 if (dwmac_is_xmac(priv->plat->core_type)) {
2596 rxfifosz /= rx_channels_count;
2597 txfifosz /= tx_channels_count;
2598 }
2599
2600 if (priv->plat->force_thresh_dma_mode) {
2601 txmode = tc;
2602 rxmode = tc;
2603 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2604 /*
2605 * In case of GMAC, SF mode can be enabled
2606 * to perform the TX COE in HW. This depends on:
2607 * 1) TX COE if actually supported
2608 * 2) There is no bugged Jumbo frame support
2609 * that needs to not insert csum in the TDES.
2610 */
2611 txmode = SF_DMA_MODE;
2612 rxmode = SF_DMA_MODE;
2613 priv->xstats.threshold = SF_DMA_MODE;
2614 } else {
2615 txmode = tc;
2616 rxmode = SF_DMA_MODE;
2617 }
2618
2619 /* configure all channels */
2620 for (chan = 0; chan < rx_channels_count; chan++) {
2621 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2622
2623 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2624
2625 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2626 rxfifosz, qmode);
2627
2628 stmmac_set_queue_rx_buf_size(priv, rx_q, chan);
2629 }
2630
2631 for (chan = 0; chan < tx_channels_count; chan++) {
2632 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2633
2634 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2635 txfifosz, qmode);
2636 }
2637 }
2638
stmmac_xsk_request_timestamp(void * _priv)2639 static void stmmac_xsk_request_timestamp(void *_priv)
2640 {
2641 struct stmmac_metadata_request *meta_req = _priv;
2642
2643 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2644 *meta_req->set_ic = true;
2645 }
2646
stmmac_xsk_fill_timestamp(void * _priv)2647 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2648 {
2649 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2650 struct stmmac_priv *priv = tx_compl->priv;
2651 struct dma_desc *desc = tx_compl->desc;
2652 bool found = false;
2653 u64 ns = 0;
2654
2655 if (!priv->hwts_tx_en)
2656 return 0;
2657
2658 /* check tx tstamp status */
2659 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2660 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2661 found = true;
2662 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2663 found = true;
2664 }
2665
2666 if (found) {
2667 ns -= priv->plat->cdc_error_adj;
2668 return ns_to_ktime(ns);
2669 }
2670
2671 return 0;
2672 }
2673
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2674 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2675 {
2676 struct timespec64 ts = ns_to_timespec64(launch_time);
2677 struct stmmac_metadata_request *meta_req = _priv;
2678
2679 if (meta_req->tbs & STMMAC_TBS_EN)
2680 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2681 ts.tv_nsec);
2682 }
2683
2684 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2685 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2686 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2687 .tmo_request_launch_time = stmmac_xsk_request_launch_time,
2688 };
2689
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2690 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2691 {
2692 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2693 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2694 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2695 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2696 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2697 unsigned int entry = tx_q->cur_tx;
2698 struct dma_desc *tx_desc = NULL;
2699 struct xdp_desc xdp_desc;
2700 bool work_done = true;
2701 u32 tx_set_ic_bit = 0;
2702
2703 /* Avoids TX time-out as we are sharing with slow path */
2704 txq_trans_cond_update(nq);
2705
2706 budget = min(budget, stmmac_tx_avail(priv, queue));
2707
2708 for (; budget > 0; budget--) {
2709 struct stmmac_metadata_request meta_req;
2710 struct xsk_tx_metadata *meta = NULL;
2711 dma_addr_t dma_addr;
2712 bool set_ic;
2713
2714 /* We are sharing with slow path and stop XSK TX desc submission when
2715 * available TX ring is less than threshold.
2716 */
2717 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2718 !netif_carrier_ok(priv->dev)) {
2719 work_done = false;
2720 break;
2721 }
2722
2723 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2724 break;
2725
2726 if (priv->est && priv->est->enable &&
2727 priv->est->max_sdu[queue] &&
2728 xdp_desc.len > priv->est->max_sdu[queue]) {
2729 priv->xstats.max_sdu_txq_drop[queue]++;
2730 continue;
2731 }
2732
2733 tx_desc = stmmac_get_tx_desc(priv, tx_q, entry);
2734 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2735 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2736 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2737
2738 /* To return XDP buffer to XSK pool, we simple call
2739 * xsk_tx_completed(), so we don't need to fill up
2740 * 'buf' and 'xdpf'.
2741 */
2742 stmmac_set_tx_dma_entry(tx_q, entry, STMMAC_TXBUF_T_XSK_TX,
2743 0, xdp_desc.len, false);
2744 stmmac_set_tx_dma_last_segment(tx_q, entry);
2745
2746 tx_q->xdpf[entry] = NULL;
2747
2748 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2749
2750 tx_q->tx_count_frames++;
2751
2752 if (!priv->tx_coal_frames[queue])
2753 set_ic = false;
2754 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2755 set_ic = true;
2756 else
2757 set_ic = false;
2758
2759 meta_req.priv = priv;
2760 meta_req.tx_desc = tx_desc;
2761 meta_req.set_ic = &set_ic;
2762 meta_req.tbs = tx_q->tbs;
2763 meta_req.edesc = &tx_q->dma_entx[entry];
2764 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2765 &meta_req);
2766 if (set_ic) {
2767 tx_q->tx_count_frames = 0;
2768 stmmac_set_tx_ic(priv, tx_desc);
2769 tx_set_ic_bit++;
2770 }
2771
2772 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2773 csum, priv->descriptor_mode, true, true,
2774 xdp_desc.len);
2775
2776 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2777
2778 xsk_tx_metadata_to_compl(meta,
2779 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2780
2781 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2782 entry = tx_q->cur_tx;
2783 }
2784 u64_stats_update_begin(&txq_stats->napi_syncp);
2785 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2786 u64_stats_update_end(&txq_stats->napi_syncp);
2787
2788 if (tx_desc) {
2789 stmmac_flush_tx_descriptors(priv, queue);
2790 xsk_tx_release(pool);
2791 }
2792
2793 /* Return true if all of the 3 conditions are met
2794 * a) TX Budget is still available
2795 * b) work_done = true when XSK TX desc peek is empty (no more
2796 * pending XSK TX for transmission)
2797 */
2798 return !!budget && work_done;
2799 }
2800
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2801 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2802 {
2803 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2804 tc += 64;
2805
2806 if (priv->plat->force_thresh_dma_mode)
2807 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2808 else
2809 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2810 chan);
2811
2812 priv->xstats.threshold = tc;
2813 }
2814 }
2815
2816 /**
2817 * stmmac_tx_clean - to manage the transmission completion
2818 * @priv: driver private structure
2819 * @budget: napi budget limiting this functions packet handling
2820 * @queue: TX queue index
2821 * @pending_packets: signal to arm the TX coal timer
2822 * Description: it reclaims the transmit resources after transmission completes.
2823 * If some packets still needs to be handled, due to TX coalesce, set
2824 * pending_packets to true to make NAPI arm the TX coal timer.
2825 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2826 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2827 bool *pending_packets)
2828 {
2829 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2830 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2831 unsigned int bytes_compl = 0, pkts_compl = 0;
2832 unsigned int entry, xmits = 0, count = 0;
2833 u32 tx_packets = 0, tx_errors = 0;
2834
2835 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2836
2837 tx_q->xsk_frames_done = 0;
2838
2839 entry = tx_q->dirty_tx;
2840
2841 /* Try to clean all TX complete frame in 1 shot */
2842 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2843 struct xdp_frame *xdpf;
2844 struct sk_buff *skb;
2845 struct dma_desc *p;
2846 int status;
2847
2848 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2849 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2850 xdpf = tx_q->xdpf[entry];
2851 skb = NULL;
2852 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2853 xdpf = NULL;
2854 skb = tx_q->tx_skbuff[entry];
2855 } else {
2856 xdpf = NULL;
2857 skb = NULL;
2858 }
2859
2860 p = stmmac_get_tx_desc(priv, tx_q, entry);
2861 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2862 /* Check if the descriptor is owned by the DMA */
2863 if (unlikely(status & tx_dma_own))
2864 break;
2865
2866 count++;
2867
2868 /* Make sure descriptor fields are read after reading
2869 * the own bit.
2870 */
2871 dma_rmb();
2872
2873 /* Just consider the last segment and ...*/
2874 if (likely(!(status & tx_not_ls))) {
2875 /* ... verify the status error condition */
2876 if (unlikely(status & tx_err)) {
2877 tx_errors++;
2878 if (unlikely(status & tx_err_bump_tc))
2879 stmmac_bump_dma_threshold(priv, queue);
2880 } else {
2881 tx_packets++;
2882 }
2883 if (skb) {
2884 stmmac_get_tx_hwtstamp(priv, p, skb);
2885 } else if (tx_q->xsk_pool &&
2886 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2887 struct stmmac_xsk_tx_complete tx_compl = {
2888 .priv = priv,
2889 .desc = p,
2890 };
2891
2892 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2893 &stmmac_xsk_tx_metadata_ops,
2894 &tx_compl);
2895 }
2896 }
2897
2898 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2899 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2900 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2901 dma_unmap_page(priv->device,
2902 tx_q->tx_skbuff_dma[entry].buf,
2903 tx_q->tx_skbuff_dma[entry].len,
2904 DMA_TO_DEVICE);
2905 else
2906 dma_unmap_single(priv->device,
2907 tx_q->tx_skbuff_dma[entry].buf,
2908 tx_q->tx_skbuff_dma[entry].len,
2909 DMA_TO_DEVICE);
2910 tx_q->tx_skbuff_dma[entry].buf = 0;
2911 tx_q->tx_skbuff_dma[entry].len = 0;
2912 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2913 }
2914
2915 /* This looks at tx_q->tx_skbuff_dma[tx_q->dirty_tx].is_jumbo
2916 * and tx_q->tx_skbuff_dma[tx_q->dirty_tx].last_segment
2917 */
2918 stmmac_clean_desc3(priv, tx_q, p);
2919
2920 tx_q->tx_skbuff_dma[entry].last_segment = false;
2921 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2922
2923 if (xdpf &&
2924 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2925 xdp_return_frame_rx_napi(xdpf);
2926 tx_q->xdpf[entry] = NULL;
2927 }
2928
2929 if (xdpf &&
2930 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2931 xdp_return_frame(xdpf);
2932 tx_q->xdpf[entry] = NULL;
2933 }
2934
2935 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2936 tx_q->xsk_frames_done++;
2937
2938 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2939 if (likely(skb)) {
2940 pkts_compl++;
2941 bytes_compl += skb->len;
2942 dev_consume_skb_any(skb);
2943 tx_q->tx_skbuff[entry] = NULL;
2944 }
2945 }
2946
2947 stmmac_release_tx_desc(priv, p, priv->descriptor_mode);
2948
2949 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
2950 }
2951 tx_q->dirty_tx = entry;
2952
2953 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2954 pkts_compl, bytes_compl);
2955
2956 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2957 queue))) &&
2958 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2959
2960 netif_dbg(priv, tx_done, priv->dev,
2961 "%s: restart transmit\n", __func__);
2962 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2963 }
2964
2965 if (tx_q->xsk_pool) {
2966 bool work_done;
2967
2968 if (tx_q->xsk_frames_done)
2969 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2970
2971 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2972 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2973
2974 /* For XSK TX, we try to send as many as possible.
2975 * If XSK work done (XSK TX desc empty and budget still
2976 * available), return "budget - 1" to reenable TX IRQ.
2977 * Else, return "budget" to make NAPI continue polling.
2978 */
2979 work_done = stmmac_xdp_xmit_zc(priv, queue,
2980 STMMAC_XSK_TX_BUDGET_MAX);
2981 if (work_done)
2982 xmits = budget - 1;
2983 else
2984 xmits = budget;
2985 }
2986
2987 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2988 stmmac_restart_sw_lpi_timer(priv);
2989
2990 /* We still have pending packets, let's call for a new scheduling */
2991 if (tx_q->dirty_tx != tx_q->cur_tx)
2992 *pending_packets = true;
2993
2994 u64_stats_update_begin(&txq_stats->napi_syncp);
2995 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2996 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2997 u64_stats_inc(&txq_stats->napi.tx_clean);
2998 u64_stats_update_end(&txq_stats->napi_syncp);
2999
3000 priv->xstats.tx_errors += tx_errors;
3001
3002 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
3003
3004 /* Combine decisions from TX clean and XSK TX */
3005 return max(count, xmits);
3006 }
3007
3008 /**
3009 * stmmac_tx_err - to manage the tx error
3010 * @priv: driver private structure
3011 * @chan: channel index
3012 * Description: it cleans the descriptors and restarts the transmission
3013 * in case of transmission errors.
3014 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)3015 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
3016 {
3017 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3018
3019 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
3020
3021 stmmac_stop_tx_dma(priv, chan);
3022 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
3023 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
3024 stmmac_reset_tx_queue(priv, chan);
3025 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3026 tx_q->dma_tx_phy, chan);
3027 stmmac_start_tx_dma(priv, chan);
3028
3029 priv->xstats.tx_errors++;
3030 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
3031 }
3032
3033 /**
3034 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
3035 * @priv: driver private structure
3036 * @txmode: TX operating mode
3037 * @rxmode: RX operating mode
3038 * @chan: channel index
3039 * Description: it is used for configuring of the DMA operation mode in
3040 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
3041 * mode.
3042 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)3043 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
3044 u32 rxmode, u32 chan)
3045 {
3046 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
3047 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
3048 u8 rx_channels_count = priv->plat->rx_queues_to_use;
3049 u8 tx_channels_count = priv->plat->tx_queues_to_use;
3050 int rxfifosz = priv->plat->rx_fifo_size;
3051 int txfifosz = priv->plat->tx_fifo_size;
3052
3053 if (rxfifosz == 0)
3054 rxfifosz = priv->dma_cap.rx_fifo_size;
3055 if (txfifosz == 0)
3056 txfifosz = priv->dma_cap.tx_fifo_size;
3057
3058 /* Adjust for real per queue fifo size */
3059 rxfifosz /= rx_channels_count;
3060 txfifosz /= tx_channels_count;
3061
3062 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
3063 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
3064 }
3065
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)3066 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
3067 {
3068 int ret;
3069
3070 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
3071 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
3072 if (ret && (ret != -EINVAL)) {
3073 stmmac_global_err(priv);
3074 return true;
3075 }
3076
3077 return false;
3078 }
3079
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)3080 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
3081 {
3082 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3083 &priv->xstats, chan, dir);
3084 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
3085 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3086 struct stmmac_channel *ch = &priv->channel[chan];
3087 struct napi_struct *rx_napi;
3088 struct napi_struct *tx_napi;
3089 unsigned long flags;
3090
3091 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
3092 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3093
3094 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3095 if (napi_schedule_prep(rx_napi)) {
3096 spin_lock_irqsave(&ch->lock, flags);
3097 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3098 spin_unlock_irqrestore(&ch->lock, flags);
3099 __napi_schedule(rx_napi);
3100 }
3101 }
3102
3103 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3104 if (napi_schedule_prep(tx_napi)) {
3105 spin_lock_irqsave(&ch->lock, flags);
3106 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3107 spin_unlock_irqrestore(&ch->lock, flags);
3108 __napi_schedule(tx_napi);
3109 }
3110 }
3111
3112 return status;
3113 }
3114
3115 /**
3116 * stmmac_dma_interrupt - DMA ISR
3117 * @priv: driver private structure
3118 * Description: this is the DMA ISR. It is called by the main ISR.
3119 * It calls the dwmac dma routine and schedule poll method in case of some
3120 * work can be done.
3121 */
stmmac_dma_interrupt(struct stmmac_priv * priv)3122 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3123 {
3124 u8 tx_channel_count = priv->plat->tx_queues_to_use;
3125 u8 rx_channel_count = priv->plat->rx_queues_to_use;
3126 u8 channels_to_check = tx_channel_count > rx_channel_count ?
3127 tx_channel_count : rx_channel_count;
3128 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3129 u8 chan;
3130
3131 /* Make sure we never check beyond our status buffer. */
3132 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3133 channels_to_check = ARRAY_SIZE(status);
3134
3135 for (chan = 0; chan < channels_to_check; chan++)
3136 status[chan] = stmmac_napi_check(priv, chan,
3137 DMA_DIR_RXTX);
3138
3139 for (chan = 0; chan < tx_channel_count; chan++) {
3140 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3141 /* Try to bump up the dma threshold on this failure */
3142 stmmac_bump_dma_threshold(priv, chan);
3143 } else if (unlikely(status[chan] == tx_hard_error)) {
3144 stmmac_tx_err(priv, chan);
3145 }
3146 }
3147 }
3148
3149 /**
3150 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3151 * @priv: driver private structure
3152 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3153 */
stmmac_mmc_setup(struct stmmac_priv * priv)3154 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3155 {
3156 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3157 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3158
3159 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3160
3161 if (priv->dma_cap.rmon) {
3162 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3163 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3164 } else
3165 netdev_info(priv->dev, "No MAC Management Counters available\n");
3166 }
3167
3168 /**
3169 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3170 * @priv: driver private structure
3171 * Description:
3172 * new GMAC chip generations have a new register to indicate the
3173 * presence of the optional feature/functions.
3174 * This can be also used to override the value passed through the
3175 * platform and necessary for old MAC10/100 and GMAC chips.
3176 */
stmmac_get_hw_features(struct stmmac_priv * priv)3177 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3178 {
3179 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3180 }
3181
3182 /**
3183 * stmmac_check_ether_addr - check if the MAC addr is valid
3184 * @priv: driver private structure
3185 * Description:
3186 * it is to verify if the MAC address is valid, in case of failures it
3187 * generates a random MAC address
3188 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3189 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3190 {
3191 u8 addr[ETH_ALEN];
3192
3193 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3194 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3195 if (is_valid_ether_addr(addr))
3196 eth_hw_addr_set(priv->dev, addr);
3197 else
3198 eth_hw_addr_random(priv->dev);
3199 dev_info(priv->device, "device MAC address %pM\n",
3200 priv->dev->dev_addr);
3201 }
3202 }
3203
stmmac_get_phy_intf_sel(phy_interface_t interface)3204 int stmmac_get_phy_intf_sel(phy_interface_t interface)
3205 {
3206 int phy_intf_sel = -EINVAL;
3207
3208 if (interface == PHY_INTERFACE_MODE_MII ||
3209 interface == PHY_INTERFACE_MODE_GMII)
3210 phy_intf_sel = PHY_INTF_SEL_GMII_MII;
3211 else if (phy_interface_mode_is_rgmii(interface))
3212 phy_intf_sel = PHY_INTF_SEL_RGMII;
3213 else if (interface == PHY_INTERFACE_MODE_RMII)
3214 phy_intf_sel = PHY_INTF_SEL_RMII;
3215 else if (interface == PHY_INTERFACE_MODE_REVMII)
3216 phy_intf_sel = PHY_INTF_SEL_REVMII;
3217
3218 return phy_intf_sel;
3219 }
3220 EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
3221
stmmac_prereset_configure(struct stmmac_priv * priv)3222 static int stmmac_prereset_configure(struct stmmac_priv *priv)
3223 {
3224 struct plat_stmmacenet_data *plat_dat = priv->plat;
3225 phy_interface_t interface;
3226 struct phylink_pcs *pcs;
3227 int phy_intf_sel, ret;
3228
3229 if (!plat_dat->set_phy_intf_sel)
3230 return 0;
3231
3232 interface = plat_dat->phy_interface;
3233
3234 /* Check whether this mode uses a PCS */
3235 pcs = stmmac_mac_select_pcs(&priv->phylink_config, interface);
3236 if (priv->integrated_pcs && pcs == &priv->integrated_pcs->pcs) {
3237 /* Request the phy_intf_sel from the integrated PCS */
3238 phy_intf_sel = stmmac_integrated_pcs_get_phy_intf_sel(pcs,
3239 interface);
3240 } else {
3241 phy_intf_sel = stmmac_get_phy_intf_sel(interface);
3242 }
3243
3244 if (phy_intf_sel < 0) {
3245 netdev_err(priv->dev,
3246 "failed to get phy_intf_sel for %s: %pe\n",
3247 phy_modes(interface), ERR_PTR(phy_intf_sel));
3248 return phy_intf_sel;
3249 }
3250
3251 ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
3252 if (ret == -EINVAL)
3253 netdev_err(priv->dev, "platform does not support %s\n",
3254 phy_modes(interface));
3255 else if (ret < 0)
3256 netdev_err(priv->dev,
3257 "platform failed to set interface %s: %pe\n",
3258 phy_modes(interface), ERR_PTR(ret));
3259
3260 return ret;
3261 }
3262
3263 /**
3264 * stmmac_init_dma_engine - DMA init.
3265 * @priv: driver private structure
3266 * Description:
3267 * It inits the DMA invoking the specific MAC/GMAC callback.
3268 * Some DMA parameters can be passed from the platform;
3269 * in case of these are not passed a default is kept for the MAC or GMAC.
3270 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3271 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3272 {
3273 u8 rx_channels_count = priv->plat->rx_queues_to_use;
3274 u8 tx_channels_count = priv->plat->tx_queues_to_use;
3275 u8 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3276 struct stmmac_rx_queue *rx_q;
3277 struct stmmac_tx_queue *tx_q;
3278 int ret = 0;
3279 u8 chan;
3280
3281 ret = stmmac_prereset_configure(priv);
3282 if (ret)
3283 return ret;
3284
3285 ret = stmmac_reset(priv);
3286 if (ret) {
3287 netdev_err(priv->dev, "Failed to reset the dma\n");
3288 return ret;
3289 }
3290
3291 /* DMA Configuration */
3292 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3293
3294 if (priv->plat->axi)
3295 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3296
3297 /* DMA CSR Channel configuration */
3298 for (chan = 0; chan < dma_csr_ch; chan++) {
3299 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3300 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3301 }
3302
3303 /* DMA RX Channel Configuration */
3304 for (chan = 0; chan < rx_channels_count; chan++) {
3305 rx_q = &priv->dma_conf.rx_queue[chan];
3306
3307 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3308 rx_q->dma_rx_phy, chan);
3309
3310 stmmac_set_queue_rx_tail_ptr(priv, rx_q, chan,
3311 rx_q->buf_alloc_num);
3312 }
3313
3314 /* DMA TX Channel Configuration */
3315 for (chan = 0; chan < tx_channels_count; chan++) {
3316 tx_q = &priv->dma_conf.tx_queue[chan];
3317
3318 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3319 tx_q->dma_tx_phy, chan);
3320
3321 stmmac_set_queue_tx_tail_ptr(priv, tx_q, chan, 0);
3322 }
3323
3324 return ret;
3325 }
3326
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3327 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3328 {
3329 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3330 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3331 struct stmmac_channel *ch;
3332 struct napi_struct *napi;
3333
3334 if (!tx_coal_timer)
3335 return;
3336
3337 ch = &priv->channel[queue];
3338 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3339
3340 /* Arm timer only if napi is not already scheduled.
3341 * Try to cancel any timer if napi is scheduled, timer will be armed
3342 * again in the next scheduled napi.
3343 */
3344 if (unlikely(!napi_is_scheduled(napi)))
3345 hrtimer_start(&tx_q->txtimer,
3346 STMMAC_COAL_TIMER(tx_coal_timer),
3347 HRTIMER_MODE_REL);
3348 else
3349 hrtimer_try_to_cancel(&tx_q->txtimer);
3350 }
3351
3352 /**
3353 * stmmac_tx_timer - mitigation sw timer for tx.
3354 * @t: data pointer
3355 * Description:
3356 * This is the timer handler to directly invoke the stmmac_tx_clean.
3357 */
stmmac_tx_timer(struct hrtimer * t)3358 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3359 {
3360 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3361 struct stmmac_priv *priv = tx_q->priv_data;
3362 struct stmmac_channel *ch;
3363 struct napi_struct *napi;
3364
3365 ch = &priv->channel[tx_q->queue_index];
3366 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3367
3368 if (likely(napi_schedule_prep(napi))) {
3369 unsigned long flags;
3370
3371 spin_lock_irqsave(&ch->lock, flags);
3372 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3373 spin_unlock_irqrestore(&ch->lock, flags);
3374 __napi_schedule(napi);
3375 }
3376
3377 return HRTIMER_NORESTART;
3378 }
3379
3380 /**
3381 * stmmac_init_coalesce - init mitigation options.
3382 * @priv: driver private structure
3383 * Description:
3384 * This inits the coalesce parameters: i.e. timer rate,
3385 * timer handler and default threshold used for enabling the
3386 * interrupt on completion bit.
3387 */
stmmac_init_coalesce(struct stmmac_priv * priv)3388 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3389 {
3390 u8 tx_channel_count = priv->plat->tx_queues_to_use;
3391 u8 rx_channel_count = priv->plat->rx_queues_to_use;
3392 u8 chan;
3393
3394 for (chan = 0; chan < tx_channel_count; chan++) {
3395 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3396
3397 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3398 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3399
3400 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3401 }
3402
3403 for (chan = 0; chan < rx_channel_count; chan++)
3404 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3405 }
3406
stmmac_set_rings_length(struct stmmac_priv * priv)3407 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3408 {
3409 u8 rx_channels_count = priv->plat->rx_queues_to_use;
3410 u8 tx_channels_count = priv->plat->tx_queues_to_use;
3411 u8 chan;
3412
3413 /* set TX ring length */
3414 for (chan = 0; chan < tx_channels_count; chan++)
3415 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3416 (priv->dma_conf.dma_tx_size - 1), chan);
3417
3418 /* set RX ring length */
3419 for (chan = 0; chan < rx_channels_count; chan++)
3420 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3421 (priv->dma_conf.dma_rx_size - 1), chan);
3422 }
3423
3424 /**
3425 * stmmac_set_tx_queue_weight - Set TX queue weight
3426 * @priv: driver private structure
3427 * Description: It is used for setting TX queues weight
3428 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3429 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3430 {
3431 u8 tx_queues_count = priv->plat->tx_queues_to_use;
3432 u32 weight;
3433 u8 queue;
3434
3435 for (queue = 0; queue < tx_queues_count; queue++) {
3436 weight = priv->plat->tx_queues_cfg[queue].weight;
3437 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3438 }
3439 }
3440
3441 /**
3442 * stmmac_configure_cbs - Configure CBS in TX queue
3443 * @priv: driver private structure
3444 * Description: It is used for configuring CBS in AVB TX queues
3445 */
stmmac_configure_cbs(struct stmmac_priv * priv)3446 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3447 {
3448 u8 tx_queues_count = priv->plat->tx_queues_to_use;
3449 u32 mode_to_use;
3450 u8 queue;
3451
3452 /* queue 0 is reserved for legacy traffic */
3453 for (queue = 1; queue < tx_queues_count; queue++) {
3454 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3455 if (mode_to_use == MTL_QUEUE_DCB)
3456 continue;
3457
3458 stmmac_config_cbs(priv, priv->hw,
3459 priv->plat->tx_queues_cfg[queue].send_slope,
3460 priv->plat->tx_queues_cfg[queue].idle_slope,
3461 priv->plat->tx_queues_cfg[queue].high_credit,
3462 priv->plat->tx_queues_cfg[queue].low_credit,
3463 queue);
3464 }
3465 }
3466
3467 /**
3468 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3469 * @priv: driver private structure
3470 * Description: It is used for mapping RX queues to RX dma channels
3471 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3472 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3473 {
3474 u8 rx_queues_count = priv->plat->rx_queues_to_use;
3475 u8 queue;
3476 u32 chan;
3477
3478 for (queue = 0; queue < rx_queues_count; queue++) {
3479 chan = priv->plat->rx_queues_cfg[queue].chan;
3480 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3481 }
3482 }
3483
3484 /**
3485 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3486 * @priv: driver private structure
3487 * Description: It is used for configuring the RX Queue Priority
3488 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3489 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3490 {
3491 u8 rx_queues_count = priv->plat->rx_queues_to_use;
3492 u8 queue;
3493 u32 prio;
3494
3495 for (queue = 0; queue < rx_queues_count; queue++) {
3496 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3497 continue;
3498
3499 prio = priv->plat->rx_queues_cfg[queue].prio;
3500 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3501 }
3502 }
3503
3504 /**
3505 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3506 * @priv: driver private structure
3507 * Description: It is used for configuring the TX Queue Priority
3508 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3509 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3510 {
3511 u8 tx_queues_count = priv->plat->tx_queues_to_use;
3512 u8 queue;
3513 u32 prio;
3514
3515 for (queue = 0; queue < tx_queues_count; queue++) {
3516 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3517 continue;
3518
3519 prio = priv->plat->tx_queues_cfg[queue].prio;
3520 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3521 }
3522 }
3523
3524 /**
3525 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3526 * @priv: driver private structure
3527 * Description: It is used for configuring the RX queue routing
3528 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3529 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3530 {
3531 u8 rx_queues_count = priv->plat->rx_queues_to_use;
3532 u8 packet;
3533 u8 queue;
3534
3535 for (queue = 0; queue < rx_queues_count; queue++) {
3536 /* no specific packet type routing specified for the queue */
3537 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3538 continue;
3539
3540 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3541 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3542 }
3543 }
3544
stmmac_mac_config_rss(struct stmmac_priv * priv)3545 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3546 {
3547 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3548 priv->rss.enable = false;
3549 return;
3550 }
3551
3552 if (priv->dev->features & NETIF_F_RXHASH)
3553 priv->rss.enable = true;
3554 else
3555 priv->rss.enable = false;
3556
3557 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3558 priv->plat->rx_queues_to_use);
3559 }
3560
3561 /**
3562 * stmmac_mtl_configuration - Configure MTL
3563 * @priv: driver private structure
3564 * Description: It is used for configuring MTL
3565 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3566 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3567 {
3568 u8 rx_queues_count = priv->plat->rx_queues_to_use;
3569 u8 tx_queues_count = priv->plat->tx_queues_to_use;
3570
3571 if (tx_queues_count > 1)
3572 stmmac_set_tx_queue_weight(priv);
3573
3574 /* Configure MTL RX algorithms */
3575 if (rx_queues_count > 1)
3576 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3577 priv->plat->rx_sched_algorithm);
3578
3579 /* Configure MTL TX algorithms */
3580 if (tx_queues_count > 1)
3581 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3582 priv->plat->tx_sched_algorithm);
3583
3584 /* Configure CBS in AVB TX queues */
3585 if (tx_queues_count > 1)
3586 stmmac_configure_cbs(priv);
3587
3588 /* Map RX MTL to DMA channels */
3589 stmmac_rx_queue_dma_chan_map(priv);
3590
3591 /* Enable MAC RX Queues */
3592 stmmac_mac_enable_rx_queues(priv);
3593
3594 /* Set RX priorities */
3595 if (rx_queues_count > 1)
3596 stmmac_mac_config_rx_queues_prio(priv);
3597
3598 /* Set TX priorities */
3599 if (tx_queues_count > 1)
3600 stmmac_mac_config_tx_queues_prio(priv);
3601
3602 /* Set RX routing */
3603 if (rx_queues_count > 1)
3604 stmmac_mac_config_rx_queues_routing(priv);
3605
3606 /* Receive Side Scaling */
3607 if (rx_queues_count > 1)
3608 stmmac_mac_config_rss(priv);
3609 }
3610
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3611 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3612 {
3613 if (priv->dma_cap.asp) {
3614 netdev_info(priv->dev, "Enabling Safety Features\n");
3615 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3616 priv->plat->safety_feat_cfg);
3617 } else {
3618 netdev_info(priv->dev, "No Safety Features support found\n");
3619 }
3620 }
3621
3622 /* STM32MP25xx (dwmac v5.3) states "Do not enable time-based scheduling for
3623 * channels on which the TSO feature is enabled." If we have a skb for a
3624 * channel which has TBS enabled, fall back to software GSO.
3625 */
stmmac_tso_channel_permitted(struct stmmac_priv * priv,unsigned int chan)3626 static bool stmmac_tso_channel_permitted(struct stmmac_priv *priv,
3627 unsigned int chan)
3628 {
3629 /* TSO and TBS cannot co-exist */
3630 return !(priv->dma_conf.tx_queue[chan].tbs & STMMAC_TBS_AVAIL);
3631 }
3632
3633 /**
3634 * stmmac_hw_setup - setup mac in a usable state.
3635 * @dev : pointer to the device structure.
3636 * Description:
3637 * this is the main function to setup the HW in a usable state because the
3638 * dma engine is reset, the core registers are configured (e.g. AXI,
3639 * Checksum features, timers). The DMA is ready to start receiving and
3640 * transmitting.
3641 * Return value:
3642 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3643 * file on failure.
3644 */
stmmac_hw_setup(struct net_device * dev)3645 static int stmmac_hw_setup(struct net_device *dev)
3646 {
3647 struct stmmac_priv *priv = netdev_priv(dev);
3648 u8 rx_cnt = priv->plat->rx_queues_to_use;
3649 u8 tx_cnt = priv->plat->tx_queues_to_use;
3650 bool sph_en;
3651 u8 chan;
3652 int ret;
3653
3654 /* Make sure RX clock is enabled */
3655 if (priv->hw->phylink_pcs)
3656 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3657
3658 /* Note that clk_rx_i must be running for reset to complete. This
3659 * clock may also be required when setting the MAC address.
3660 *
3661 * Block the receive clock stop for LPI mode at the PHY in case
3662 * the link is established with EEE mode active.
3663 */
3664 phylink_rx_clk_stop_block(priv->phylink);
3665
3666 /* DMA initialization and SW reset */
3667 ret = stmmac_init_dma_engine(priv);
3668 if (ret < 0) {
3669 phylink_rx_clk_stop_unblock(priv->phylink);
3670 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3671 __func__);
3672 return ret;
3673 }
3674
3675 /* Copy the MAC addr into the HW */
3676 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3677 phylink_rx_clk_stop_unblock(priv->phylink);
3678
3679 /* Initialize the MAC Core */
3680 stmmac_core_init(priv, priv->hw, dev);
3681
3682 /* Initialize MTL*/
3683 stmmac_mtl_configuration(priv);
3684
3685 /* Initialize Safety Features */
3686 stmmac_safety_feat_configuration(priv);
3687
3688 ret = stmmac_rx_ipc(priv, priv->hw);
3689 if (!ret) {
3690 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3691 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3692 priv->hw->rx_csum = 0;
3693 }
3694
3695 /* Enable the MAC Rx/Tx */
3696 stmmac_mac_set(priv, priv->ioaddr, true);
3697
3698 /* Set the HW DMA mode and the COE */
3699 stmmac_dma_operation_mode(priv);
3700
3701 stmmac_mmc_setup(priv);
3702
3703 if (priv->use_riwt) {
3704 u32 queue;
3705
3706 for (queue = 0; queue < rx_cnt; queue++) {
3707 if (!priv->rx_riwt[queue])
3708 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3709
3710 stmmac_rx_watchdog(priv, priv->ioaddr,
3711 priv->rx_riwt[queue], queue);
3712 }
3713 }
3714
3715 /* set TX and RX rings length */
3716 stmmac_set_rings_length(priv);
3717
3718 /* Enable TSO */
3719 if (priv->dma_cap.tsoen && priv->plat->flags & STMMAC_FLAG_TSO_EN) {
3720 for (chan = 0; chan < tx_cnt; chan++) {
3721 if (!stmmac_tso_channel_permitted(priv, chan))
3722 continue;
3723
3724 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3725 }
3726 }
3727
3728 /* Enable Split Header */
3729 sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
3730 for (chan = 0; chan < rx_cnt; chan++)
3731 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3732
3733
3734 /* VLAN Tag Insertion */
3735 if (priv->dma_cap.vlins)
3736 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3737
3738 /* TBS */
3739 for (chan = 0; chan < tx_cnt; chan++) {
3740 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3741 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3742
3743 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3744 }
3745
3746 /* Configure real RX and TX queues */
3747 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3748 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3749
3750 /* Start the ball rolling... */
3751 stmmac_start_all_dma(priv);
3752
3753 phylink_rx_clk_stop_block(priv->phylink);
3754 stmmac_set_hw_vlan_mode(priv, priv->hw);
3755 phylink_rx_clk_stop_unblock(priv->phylink);
3756
3757 return 0;
3758 }
3759
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3760 static void stmmac_free_irq(struct net_device *dev,
3761 enum request_irq_err irq_err, int irq_idx)
3762 {
3763 struct stmmac_priv *priv = netdev_priv(dev);
3764 struct stmmac_msi *msi = priv->msi;
3765 int j;
3766
3767 switch (irq_err) {
3768 case REQ_IRQ_ERR_ALL:
3769 irq_idx = priv->plat->tx_queues_to_use;
3770 fallthrough;
3771 case REQ_IRQ_ERR_TX:
3772 for (j = irq_idx - 1; msi && j >= 0; j--) {
3773 if (msi->tx_irq[j] > 0) {
3774 irq_set_affinity_hint(msi->tx_irq[j], NULL);
3775 free_irq(msi->tx_irq[j],
3776 &priv->dma_conf.tx_queue[j]);
3777 }
3778 }
3779 irq_idx = priv->plat->rx_queues_to_use;
3780 fallthrough;
3781 case REQ_IRQ_ERR_RX:
3782 for (j = irq_idx - 1; msi && j >= 0; j--) {
3783 if (msi->rx_irq[j] > 0) {
3784 irq_set_affinity_hint(msi->rx_irq[j], NULL);
3785 free_irq(msi->rx_irq[j],
3786 &priv->dma_conf.rx_queue[j]);
3787 }
3788 }
3789
3790 if (msi && msi->sfty_ue_irq > 0 && msi->sfty_ue_irq != dev->irq)
3791 free_irq(msi->sfty_ue_irq, dev);
3792 fallthrough;
3793 case REQ_IRQ_ERR_SFTY_UE:
3794 if (msi && msi->sfty_ce_irq > 0 && msi->sfty_ce_irq != dev->irq)
3795 free_irq(msi->sfty_ce_irq, dev);
3796 fallthrough;
3797 case REQ_IRQ_ERR_SFTY_CE:
3798 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3799 free_irq(priv->wol_irq, dev);
3800 fallthrough;
3801 case REQ_IRQ_ERR_SFTY:
3802 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3803 free_irq(priv->sfty_irq, dev);
3804 fallthrough;
3805 case REQ_IRQ_ERR_WOL:
3806 free_irq(dev->irq, dev);
3807 fallthrough;
3808 case REQ_IRQ_ERR_MAC:
3809 case REQ_IRQ_ERR_NO:
3810 /* If MAC IRQ request error, no more IRQ to free */
3811 break;
3812 }
3813 }
3814
stmmac_msi_init(struct stmmac_priv * priv,struct stmmac_resources * res)3815 static int stmmac_msi_init(struct stmmac_priv *priv,
3816 struct stmmac_resources *res)
3817 {
3818 int i;
3819
3820 priv->msi = devm_kmalloc(priv->device, sizeof(*priv->msi), GFP_KERNEL);
3821 if (!priv->msi)
3822 return -ENOMEM;
3823
3824 priv->msi->sfty_ce_irq = res->sfty_ce_irq;
3825 priv->msi->sfty_ue_irq = res->sfty_ue_irq;
3826
3827 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
3828 priv->msi->rx_irq[i] = res->rx_irq[i];
3829 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
3830 priv->msi->tx_irq[i] = res->tx_irq[i];
3831
3832 return 0;
3833 }
3834
stmmac_request_irq_multi_msi(struct net_device * dev)3835 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3836 {
3837 struct stmmac_priv *priv = netdev_priv(dev);
3838 struct stmmac_msi *msi = priv->msi;
3839 enum request_irq_err irq_err;
3840 int irq_idx = 0;
3841 char *int_name;
3842 int ret;
3843 int i;
3844
3845 /* For common interrupt */
3846 int_name = msi->int_name_mac;
3847 sprintf(int_name, "%s:%s", dev->name, "mac");
3848 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3849 0, int_name, dev);
3850 if (unlikely(ret < 0)) {
3851 netdev_err(priv->dev,
3852 "%s: alloc mac MSI %d (error: %d)\n",
3853 __func__, dev->irq, ret);
3854 irq_err = REQ_IRQ_ERR_MAC;
3855 goto irq_error;
3856 }
3857
3858 /* Request the Wake IRQ in case of another line
3859 * is used for WoL
3860 */
3861 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3862 int_name = msi->int_name_wol;
3863 sprintf(int_name, "%s:%s", dev->name, "wol");
3864 ret = request_irq(priv->wol_irq,
3865 stmmac_mac_interrupt,
3866 0, int_name, dev);
3867 if (unlikely(ret < 0)) {
3868 netdev_err(priv->dev,
3869 "%s: alloc wol MSI %d (error: %d)\n",
3870 __func__, priv->wol_irq, ret);
3871 irq_err = REQ_IRQ_ERR_WOL;
3872 goto irq_error;
3873 }
3874 }
3875
3876 /* Request the common Safety Feature Correctible/Uncorrectible
3877 * Error line in case of another line is used
3878 */
3879 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3880 int_name = msi->int_name_sfty;
3881 sprintf(int_name, "%s:%s", dev->name, "safety");
3882 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3883 0, int_name, dev);
3884 if (unlikely(ret < 0)) {
3885 netdev_err(priv->dev,
3886 "%s: alloc sfty MSI %d (error: %d)\n",
3887 __func__, priv->sfty_irq, ret);
3888 irq_err = REQ_IRQ_ERR_SFTY;
3889 goto irq_error;
3890 }
3891 }
3892
3893 /* Request the Safety Feature Correctible Error line in
3894 * case of another line is used
3895 */
3896 if (msi->sfty_ce_irq > 0 && msi->sfty_ce_irq != dev->irq) {
3897 int_name = msi->int_name_sfty_ce;
3898 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3899 ret = request_irq(msi->sfty_ce_irq,
3900 stmmac_safety_interrupt,
3901 0, int_name, dev);
3902 if (unlikely(ret < 0)) {
3903 netdev_err(priv->dev,
3904 "%s: alloc sfty ce MSI %d (error: %d)\n",
3905 __func__, msi->sfty_ce_irq, ret);
3906 irq_err = REQ_IRQ_ERR_SFTY_CE;
3907 goto irq_error;
3908 }
3909 }
3910
3911 /* Request the Safety Feature Uncorrectible Error line in
3912 * case of another line is used
3913 */
3914 if (msi->sfty_ue_irq > 0 && msi->sfty_ue_irq != dev->irq) {
3915 int_name = msi->int_name_sfty_ue;
3916 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3917 ret = request_irq(msi->sfty_ue_irq,
3918 stmmac_safety_interrupt,
3919 0, int_name, dev);
3920 if (unlikely(ret < 0)) {
3921 netdev_err(priv->dev,
3922 "%s: alloc sfty ue MSI %d (error: %d)\n",
3923 __func__, msi->sfty_ue_irq, ret);
3924 irq_err = REQ_IRQ_ERR_SFTY_UE;
3925 goto irq_error;
3926 }
3927 }
3928
3929 /* Request Rx MSI irq */
3930 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3931 if (i >= MTL_MAX_RX_QUEUES)
3932 break;
3933 if (msi->rx_irq[i] == 0)
3934 continue;
3935
3936 int_name = msi->int_name_rx_irq[i];
3937 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3938 ret = request_irq(msi->rx_irq[i],
3939 stmmac_msi_intr_rx,
3940 0, int_name, &priv->dma_conf.rx_queue[i]);
3941 if (unlikely(ret < 0)) {
3942 netdev_err(priv->dev,
3943 "%s: alloc rx-%d MSI %d (error: %d)\n",
3944 __func__, i, msi->rx_irq[i], ret);
3945 irq_err = REQ_IRQ_ERR_RX;
3946 irq_idx = i;
3947 goto irq_error;
3948 }
3949 irq_set_affinity_hint(msi->rx_irq[i],
3950 cpumask_of(i % num_online_cpus()));
3951 }
3952
3953 /* Request Tx MSI irq */
3954 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3955 if (i >= MTL_MAX_TX_QUEUES)
3956 break;
3957 if (msi->tx_irq[i] == 0)
3958 continue;
3959
3960 int_name = msi->int_name_tx_irq[i];
3961 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3962 ret = request_irq(msi->tx_irq[i],
3963 stmmac_msi_intr_tx,
3964 0, int_name, &priv->dma_conf.tx_queue[i]);
3965 if (unlikely(ret < 0)) {
3966 netdev_err(priv->dev,
3967 "%s: alloc tx-%d MSI %d (error: %d)\n",
3968 __func__, i, msi->tx_irq[i], ret);
3969 irq_err = REQ_IRQ_ERR_TX;
3970 irq_idx = i;
3971 goto irq_error;
3972 }
3973 irq_set_affinity_hint(msi->tx_irq[i],
3974 cpumask_of(i % num_online_cpus()));
3975 }
3976
3977 return 0;
3978
3979 irq_error:
3980 stmmac_free_irq(dev, irq_err, irq_idx);
3981 return ret;
3982 }
3983
stmmac_request_irq_single(struct net_device * dev)3984 static int stmmac_request_irq_single(struct net_device *dev)
3985 {
3986 struct stmmac_priv *priv = netdev_priv(dev);
3987 enum request_irq_err irq_err;
3988 int ret;
3989
3990 ret = request_irq(dev->irq, stmmac_interrupt,
3991 IRQF_SHARED, dev->name, dev);
3992 if (unlikely(ret < 0)) {
3993 netdev_err(priv->dev,
3994 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3995 __func__, dev->irq, ret);
3996 irq_err = REQ_IRQ_ERR_MAC;
3997 goto irq_error;
3998 }
3999
4000 /* Request the Wake IRQ in case of another line
4001 * is used for WoL
4002 */
4003 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
4004 ret = request_irq(priv->wol_irq, stmmac_interrupt,
4005 IRQF_SHARED, dev->name, dev);
4006 if (unlikely(ret < 0)) {
4007 netdev_err(priv->dev,
4008 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
4009 __func__, priv->wol_irq, ret);
4010 irq_err = REQ_IRQ_ERR_WOL;
4011 goto irq_error;
4012 }
4013 }
4014
4015 /* Request the common Safety Feature Correctible/Uncorrectible
4016 * Error line in case of another line is used
4017 */
4018 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
4019 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
4020 IRQF_SHARED, dev->name, dev);
4021 if (unlikely(ret < 0)) {
4022 netdev_err(priv->dev,
4023 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
4024 __func__, priv->sfty_irq, ret);
4025 irq_err = REQ_IRQ_ERR_SFTY;
4026 goto irq_error;
4027 }
4028 }
4029
4030 return 0;
4031
4032 irq_error:
4033 stmmac_free_irq(dev, irq_err, 0);
4034 return ret;
4035 }
4036
stmmac_request_irq(struct net_device * dev)4037 static int stmmac_request_irq(struct net_device *dev)
4038 {
4039 struct stmmac_priv *priv = netdev_priv(dev);
4040 int ret;
4041
4042 /* Request the IRQ lines */
4043 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
4044 ret = stmmac_request_irq_multi_msi(dev);
4045 else
4046 ret = stmmac_request_irq_single(dev);
4047
4048 return ret;
4049 }
4050
4051 /**
4052 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
4053 * @priv: driver private structure
4054 * @mtu: MTU to setup the dma queue and buf with
4055 * Description: Allocate and generate a dma_conf based on the provided MTU.
4056 * Allocate the Tx/Rx DMA queue and init them.
4057 * Return value:
4058 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
4059 */
4060 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)4061 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
4062 {
4063 struct stmmac_dma_conf *dma_conf;
4064 int bfsize, ret;
4065 u8 chan;
4066
4067 dma_conf = kzalloc_obj(*dma_conf);
4068 if (!dma_conf) {
4069 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
4070 __func__);
4071 return ERR_PTR(-ENOMEM);
4072 }
4073
4074 /* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
4075 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
4076 if (bfsize < 0)
4077 bfsize = 0;
4078
4079 if (bfsize < BUF_SIZE_16KiB)
4080 bfsize = stmmac_set_bfsize(mtu);
4081
4082 dma_conf->dma_buf_sz = bfsize;
4083 /* Chose the tx/rx size from the already defined one in the
4084 * priv struct. (if defined)
4085 */
4086 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
4087 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
4088
4089 if (!dma_conf->dma_tx_size)
4090 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
4091 if (!dma_conf->dma_rx_size)
4092 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
4093
4094 /* Earlier check for TBS */
4095 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
4096 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
4097 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
4098
4099 /* Setup per-TXQ tbs flag before TX descriptor alloc */
4100 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
4101 }
4102
4103 ret = alloc_dma_desc_resources(priv, dma_conf);
4104 if (ret < 0) {
4105 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4106 __func__);
4107 goto alloc_error;
4108 }
4109
4110 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4111 if (ret < 0) {
4112 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4113 __func__);
4114 goto init_error;
4115 }
4116
4117 return dma_conf;
4118
4119 init_error:
4120 free_dma_desc_resources(priv, dma_conf);
4121 alloc_error:
4122 kfree(dma_conf);
4123 return ERR_PTR(ret);
4124 }
4125
4126 /**
4127 * __stmmac_open - open entry point of the driver
4128 * @dev : pointer to the device structure.
4129 * @dma_conf : structure to take the dma data
4130 * Description:
4131 * This function is the open entry point of the driver.
4132 * Return value:
4133 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4134 * file on failure.
4135 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)4136 static int __stmmac_open(struct net_device *dev,
4137 struct stmmac_dma_conf *dma_conf)
4138 {
4139 struct stmmac_priv *priv = netdev_priv(dev);
4140 u8 chan;
4141 int ret;
4142
4143 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4144 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4145 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4146 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4147
4148 stmmac_reset_queues_param(priv);
4149
4150 ret = stmmac_hw_setup(dev);
4151 if (ret < 0) {
4152 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4153 goto init_error;
4154 }
4155
4156 stmmac_setup_ptp(priv);
4157
4158 stmmac_init_coalesce(priv);
4159
4160 phylink_start(priv->phylink);
4161
4162 stmmac_vlan_restore(priv);
4163
4164 ret = stmmac_request_irq(dev);
4165 if (ret)
4166 goto irq_error;
4167
4168 stmmac_enable_all_queues(priv);
4169 netif_tx_start_all_queues(priv->dev);
4170 stmmac_enable_all_dma_irq(priv);
4171
4172 return 0;
4173
4174 irq_error:
4175 phylink_stop(priv->phylink);
4176
4177 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4178 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4179
4180 stmmac_release_ptp(priv);
4181 init_error:
4182 return ret;
4183 }
4184
stmmac_open(struct net_device * dev)4185 static int stmmac_open(struct net_device *dev)
4186 {
4187 struct stmmac_priv *priv = netdev_priv(dev);
4188 struct stmmac_dma_conf *dma_conf;
4189 int ret;
4190
4191 /* Initialise the tx lpi timer, converting from msec to usec */
4192 if (!priv->tx_lpi_timer)
4193 priv->tx_lpi_timer = eee_timer * 1000;
4194
4195 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4196 if (IS_ERR(dma_conf))
4197 return PTR_ERR(dma_conf);
4198
4199 ret = pm_runtime_resume_and_get(priv->device);
4200 if (ret < 0)
4201 goto err_dma_resources;
4202
4203 ret = stmmac_init_phy(dev);
4204 if (ret)
4205 goto err_runtime_pm;
4206
4207 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
4208 ret = stmmac_legacy_serdes_power_up(priv);
4209 if (ret < 0)
4210 goto err_disconnect_phy;
4211 }
4212
4213 ret = __stmmac_open(dev, dma_conf);
4214 if (ret)
4215 goto err_serdes;
4216
4217 kfree(dma_conf);
4218
4219 /* We may have called phylink_speed_down before */
4220 phylink_speed_up(priv->phylink);
4221
4222 return ret;
4223
4224 err_serdes:
4225 stmmac_legacy_serdes_power_down(priv);
4226 err_disconnect_phy:
4227 phylink_disconnect_phy(priv->phylink);
4228 err_runtime_pm:
4229 pm_runtime_put(priv->device);
4230 err_dma_resources:
4231 free_dma_desc_resources(priv, dma_conf);
4232 kfree(dma_conf);
4233 return ret;
4234 }
4235
__stmmac_release(struct net_device * dev)4236 static void __stmmac_release(struct net_device *dev)
4237 {
4238 struct stmmac_priv *priv = netdev_priv(dev);
4239 u8 chan;
4240
4241 /* Stop and disconnect the PHY */
4242 phylink_stop(priv->phylink);
4243
4244 stmmac_disable_all_queues(priv);
4245
4246 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4247 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4248
4249 netif_tx_disable(dev);
4250
4251 /* Free the IRQ lines */
4252 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4253
4254 /* Stop TX/RX DMA and clear the descriptors */
4255 stmmac_stop_all_dma(priv);
4256
4257 /* Release and free the Rx/Tx resources */
4258 free_dma_desc_resources(priv, &priv->dma_conf);
4259
4260 stmmac_release_ptp(priv);
4261
4262 if (stmmac_fpe_supported(priv))
4263 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4264 }
4265
4266 /**
4267 * stmmac_release - close entry point of the driver
4268 * @dev : device pointer.
4269 * Description:
4270 * This is the stop entry point of the driver.
4271 */
stmmac_release(struct net_device * dev)4272 static int stmmac_release(struct net_device *dev)
4273 {
4274 struct stmmac_priv *priv = netdev_priv(dev);
4275
4276 /* If the PHY or MAC has WoL enabled, then the PHY will not be
4277 * suspended when phylink_stop() is called below. Set the PHY
4278 * to its slowest speed to save power.
4279 */
4280 if (device_may_wakeup(priv->device))
4281 phylink_speed_down(priv->phylink, false);
4282
4283 __stmmac_release(dev);
4284
4285 stmmac_legacy_serdes_power_down(priv);
4286 phylink_disconnect_phy(priv->phylink);
4287 pm_runtime_put(priv->device);
4288
4289 return 0;
4290 }
4291
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4292 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4293 struct stmmac_tx_queue *tx_q)
4294 {
4295 struct dma_desc *p;
4296 u16 tag = 0x0;
4297
4298 if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
4299 return false;
4300
4301 tag = skb_vlan_tag_get(skb);
4302
4303 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4304 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4305 else
4306 p = &tx_q->dma_tx[tx_q->cur_tx];
4307
4308 if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
4309 return false;
4310
4311 stmmac_set_tx_owner(priv, p);
4312 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4313 return true;
4314 }
4315
4316 /**
4317 * stmmac_tso_allocator - close entry point of the driver
4318 * @priv: driver private structure
4319 * @des: buffer start address
4320 * @total_len: total length to fill in descriptors
4321 * @last_segment: condition for the last descriptor
4322 * @queue: TX queue index
4323 * Description:
4324 * This function fills descriptor and request new descriptors according to
4325 * buffer length to fill
4326 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4327 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4328 int total_len, bool last_segment, u32 queue)
4329 {
4330 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4331 struct dma_desc *desc;
4332 u32 buff_size;
4333 int tmp_len;
4334
4335 tmp_len = total_len;
4336
4337 while (tmp_len > 0) {
4338 dma_addr_t curr_addr;
4339
4340 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
4341 priv->dma_conf.dma_tx_size);
4342 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4343
4344 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4345 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4346 else
4347 desc = &tx_q->dma_tx[tx_q->cur_tx];
4348
4349 curr_addr = des + (total_len - tmp_len);
4350 stmmac_set_desc_addr(priv, desc, curr_addr);
4351 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4352 TSO_MAX_BUFF_SIZE : tmp_len;
4353
4354 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4355 0, 1,
4356 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4357 0, 0);
4358
4359 tmp_len -= TSO_MAX_BUFF_SIZE;
4360 }
4361 }
4362
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4363 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4364 {
4365 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4366
4367 /* The own bit must be the latest setting done when prepare the
4368 * descriptor and then barrier is needed to make sure that
4369 * all is coherent before granting the DMA engine.
4370 */
4371 wmb();
4372
4373 stmmac_set_queue_tx_tail_ptr(priv, tx_q, queue, tx_q->cur_tx);
4374 }
4375
stmmac_set_gso_types(struct stmmac_priv * priv,bool tso)4376 static void stmmac_set_gso_types(struct stmmac_priv *priv, bool tso)
4377 {
4378 if (!tso) {
4379 priv->gso_enabled_types = 0;
4380 } else {
4381 /* Manage oversized TCP frames for GMAC4 device */
4382 priv->gso_enabled_types = SKB_GSO_TCPV4 | SKB_GSO_TCPV6;
4383 if (priv->plat->core_type == DWMAC_CORE_GMAC4)
4384 priv->gso_enabled_types |= SKB_GSO_UDP_L4;
4385 }
4386 }
4387
stmmac_set_gso_features(struct net_device * ndev)4388 static void stmmac_set_gso_features(struct net_device *ndev)
4389 {
4390 struct stmmac_priv *priv = netdev_priv(ndev);
4391 const struct stmmac_dma_cfg *dma_cfg;
4392 int txpbl;
4393
4394 if (priv->dma_cap.tsoen)
4395 dev_info(priv->device, "TSO supported\n");
4396
4397 if (!(priv->plat->flags & STMMAC_FLAG_TSO_EN))
4398 return;
4399
4400 if (!priv->dma_cap.tsoen) {
4401 dev_warn(priv->device, "platform requests unsupported TSO\n");
4402 return;
4403 }
4404
4405 /* FIXME:
4406 * STM32MP151 (v4.2 userver v4.0) states that TxPBL must be >= 4. It
4407 * is not clear whether PBLx8 (which multiplies the PBL value by 8)
4408 * influences this.
4409 */
4410 dma_cfg = priv->plat->dma_cfg;
4411 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
4412 if (txpbl < 4) {
4413 dev_warn(priv->device, "txpbl(%d) is too low for TSO\n", txpbl);
4414 return;
4415 }
4416
4417 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4418 if (priv->plat->core_type == DWMAC_CORE_GMAC4)
4419 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4420
4421 stmmac_set_gso_types(priv, true);
4422
4423 dev_info(priv->device, "TSO feature enabled\n");
4424 }
4425
stmmac_tso_header_size(struct sk_buff * skb)4426 static size_t stmmac_tso_header_size(struct sk_buff *skb)
4427 {
4428 size_t size;
4429
4430 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
4431 size = skb_transport_offset(skb) + sizeof(struct udphdr);
4432 else
4433 size = skb_tcp_all_headers(skb);
4434
4435 return size;
4436 }
4437
4438 /* STM32MP151 (dwmac v4.2) and STM32MP25xx (dwmac v5.3) states for TDES2 normal
4439 * (read format) descriptor that the maximum header length supported for the
4440 * TSO feature is 1023 bytes.
4441 *
4442 * While IPv4 is limited to MAC+VLAN+IPv4+ext+TCP+ext = 138 bytes, the IPv6
4443 * extension headers aren't similarly limited.
4444 *
4445 * Fall back to software GSO for these skbs. Also check that the MSS is >=
4446 * the recommended 64 bytes (documented in ETH_DMACxCR register description),
4447 * and that a the header plus MSS is not larger than 16383 (documented in
4448 * "Building the Descriptor and the packet for the TSO feature").
4449 */
stmmac_tso_valid_packet(struct sk_buff * skb)4450 static bool stmmac_tso_valid_packet(struct sk_buff *skb)
4451 {
4452 size_t header_len = stmmac_tso_header_size(skb);
4453 unsigned int gso_size = skb_shinfo(skb)->gso_size;
4454
4455 return header_len <= 1023 && gso_size >= 64 &&
4456 header_len + gso_size < 16383;
4457 }
4458
4459 /**
4460 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4461 * @skb : the socket buffer
4462 * @dev : device pointer
4463 * Description: this is the transmit function that is called on TSO frames
4464 * (support available on GMAC4 and newer chips).
4465 * Diagram below show the ring programming in case of TSO frames:
4466 *
4467 * First Descriptor
4468 * --------
4469 * | DES0 |---> buffer1 = L2/L3/L4 header
4470 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4471 * | | width is 32-bit, but we never use it.
4472 * | | Also can be used as the most-significant 8-bits or 16-bits of
4473 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4474 * | | or 48-bit, and we always use it.
4475 * | DES2 |---> buffer1 len
4476 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4477 * --------
4478 * --------
4479 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4480 * | DES1 |---> same as the First Descriptor
4481 * | DES2 |---> buffer1 len
4482 * | DES3 |
4483 * --------
4484 * |
4485 * ...
4486 * |
4487 * --------
4488 * | DES0 |---> buffer1 = Split TCP Payload
4489 * | DES1 |---> same as the First Descriptor
4490 * | DES2 |---> buffer1 len
4491 * | DES3 |
4492 * --------
4493 *
4494 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4495 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4496 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4497 {
4498 struct dma_desc *desc, *first, *mss_desc = NULL;
4499 struct stmmac_priv *priv = netdev_priv(dev);
4500 unsigned int first_entry, tx_packets;
4501 struct stmmac_txq_stats *txq_stats;
4502 struct stmmac_tx_queue *tx_q;
4503 bool set_ic, is_last_segment;
4504 u32 pay_len, mss, queue;
4505 int i, first_tx, nfrags;
4506 u8 proto_hdr_len, hdr;
4507 dma_addr_t des;
4508
4509 nfrags = skb_shinfo(skb)->nr_frags;
4510 queue = skb_get_queue_mapping(skb);
4511
4512 tx_q = &priv->dma_conf.tx_queue[queue];
4513 txq_stats = &priv->xstats.txq_stats[queue];
4514 first_tx = tx_q->cur_tx;
4515
4516 /* Compute header lengths */
4517 proto_hdr_len = stmmac_tso_header_size(skb);
4518 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
4519 hdr = sizeof(struct udphdr);
4520 else
4521 hdr = tcp_hdrlen(skb);
4522
4523 /* Desc availability based on threshold should be enough safe */
4524 if (unlikely(stmmac_tx_avail(priv, queue) <
4525 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4526 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4527 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4528 queue));
4529 /* This is a hard error, log it. */
4530 netdev_err(priv->dev,
4531 "%s: Tx Ring full when queue awake\n",
4532 __func__);
4533 }
4534 return NETDEV_TX_BUSY;
4535 }
4536
4537 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4538
4539 mss = skb_shinfo(skb)->gso_size;
4540
4541 /* set new MSS value if needed */
4542 if (mss != tx_q->mss) {
4543 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4544 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4545 else
4546 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4547
4548 stmmac_set_mss(priv, mss_desc, mss);
4549 tx_q->mss = mss;
4550 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
4551 priv->dma_conf.dma_tx_size);
4552 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4553 }
4554
4555 if (netif_msg_tx_queued(priv)) {
4556 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4557 __func__, hdr, proto_hdr_len, pay_len, mss);
4558 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4559 skb->data_len);
4560 }
4561
4562 first_entry = tx_q->cur_tx;
4563 WARN_ON(tx_q->tx_skbuff[first_entry]);
4564
4565 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4566 desc = &tx_q->dma_entx[first_entry].basic;
4567 else
4568 desc = &tx_q->dma_tx[first_entry];
4569 first = desc;
4570
4571 /* first descriptor: fill Headers on Buf1 */
4572 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4573 DMA_TO_DEVICE);
4574 if (dma_mapping_error(priv->device, des))
4575 goto dma_map_err;
4576
4577 stmmac_set_desc_addr(priv, first, des);
4578 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4579 (nfrags == 0), queue);
4580
4581 /* In case two or more DMA transmit descriptors are allocated for this
4582 * non-paged SKB data, the DMA buffer address should be saved to
4583 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4584 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4585 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4586 * since the tail areas of the DMA buffer can be accessed by DMA engine
4587 * sooner or later.
4588 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4589 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4590 * this DMA buffer right after the DMA engine completely finishes the
4591 * full buffer transmission.
4592 */
4593 stmmac_set_tx_skb_dma_entry(tx_q, tx_q->cur_tx, des, skb_headlen(skb),
4594 false);
4595
4596 /* Prepare fragments */
4597 for (i = 0; i < nfrags; i++) {
4598 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4599
4600 des = skb_frag_dma_map(priv->device, frag, 0,
4601 skb_frag_size(frag),
4602 DMA_TO_DEVICE);
4603 if (dma_mapping_error(priv->device, des))
4604 goto dma_map_err;
4605
4606 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4607 (i == nfrags - 1), queue);
4608
4609 stmmac_set_tx_skb_dma_entry(tx_q, tx_q->cur_tx, des,
4610 skb_frag_size(frag), true);
4611 }
4612
4613 stmmac_set_tx_dma_last_segment(tx_q, tx_q->cur_tx);
4614
4615 /* Only the last descriptor gets to point to the skb. */
4616 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4617
4618 /* Manage tx mitigation */
4619 tx_packets = CIRC_CNT(tx_q->cur_tx + 1, first_tx,
4620 priv->dma_conf.dma_tx_size);
4621 tx_q->tx_count_frames += tx_packets;
4622
4623 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4624 set_ic = true;
4625 else if (!priv->tx_coal_frames[queue])
4626 set_ic = false;
4627 else if (tx_packets > priv->tx_coal_frames[queue])
4628 set_ic = true;
4629 else if ((tx_q->tx_count_frames %
4630 priv->tx_coal_frames[queue]) < tx_packets)
4631 set_ic = true;
4632 else
4633 set_ic = false;
4634
4635 if (set_ic) {
4636 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4637 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4638 else
4639 desc = &tx_q->dma_tx[tx_q->cur_tx];
4640
4641 tx_q->tx_count_frames = 0;
4642 stmmac_set_tx_ic(priv, desc);
4643 }
4644
4645 /* We've used all descriptors we need for this skb, however,
4646 * advance cur_tx so that it references a fresh descriptor.
4647 * ndo_start_xmit will fill this descriptor the next time it's
4648 * called and stmmac_tx_clean may clean up to this descriptor.
4649 */
4650 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4651
4652 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4653 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4654 __func__);
4655 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4656 }
4657
4658 u64_stats_update_begin(&txq_stats->q_syncp);
4659 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4660 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4661 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4662 if (set_ic)
4663 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4664 u64_stats_update_end(&txq_stats->q_syncp);
4665
4666 if (priv->sarc_type)
4667 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4668
4669 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4670 priv->hwts_tx_en)) {
4671 /* declare that device is doing timestamping */
4672 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4673 stmmac_enable_tx_timestamp(priv, first);
4674 }
4675
4676 /* If we only have one entry used, then the first entry is the last
4677 * segment.
4678 */
4679 is_last_segment = CIRC_CNT(tx_q->cur_tx, first_entry,
4680 priv->dma_conf.dma_tx_size) == 1;
4681
4682 /* Complete the first descriptor before granting the DMA */
4683 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4684 is_last_segment, hdr / 4,
4685 skb->len - proto_hdr_len);
4686
4687 /* If context desc is used to change MSS */
4688 if (mss_desc) {
4689 /* Make sure that first descriptor has been completely
4690 * written, including its own bit. This is because MSS is
4691 * actually before first descriptor, so we need to make
4692 * sure that MSS's own bit is the last thing written.
4693 */
4694 dma_wmb();
4695 stmmac_set_tx_owner(priv, mss_desc);
4696 }
4697
4698 if (netif_msg_pktdata(priv)) {
4699 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4700 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4701 tx_q->cur_tx, first, nfrags);
4702 pr_info(">>> frame to be transmitted: ");
4703 print_pkt(skb->data, skb_headlen(skb));
4704 }
4705
4706 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4707 skb_tx_timestamp(skb);
4708
4709 stmmac_flush_tx_descriptors(priv, queue);
4710 stmmac_tx_timer_arm(priv, queue);
4711
4712 return NETDEV_TX_OK;
4713
4714 dma_map_err:
4715 dev_err(priv->device, "Tx dma map failed\n");
4716 dev_kfree_skb(skb);
4717 priv->xstats.tx_dropped++;
4718 return NETDEV_TX_OK;
4719 }
4720
4721 /**
4722 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4723 * @skb: socket buffer to check
4724 *
4725 * Check if a packet has an ethertype that will trigger the IP header checks
4726 * and IP/TCP checksum engine of the stmmac core.
4727 *
4728 * Return: true if the ethertype can trigger the checksum engine, false
4729 * otherwise
4730 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4731 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4732 {
4733 int depth = 0;
4734 __be16 proto;
4735
4736 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4737 &depth);
4738
4739 return (depth <= ETH_HLEN) &&
4740 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4741 }
4742
4743 /**
4744 * stmmac_xmit - Tx entry point of the driver
4745 * @skb : the socket buffer
4746 * @dev : device pointer
4747 * Description : this is the tx entry point of the driver.
4748 * It programs the chain or the ring and supports oversized frames
4749 * and SG feature.
4750 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4751 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4752 {
4753 bool enh_desc, has_vlan, set_ic, is_jumbo = false;
4754 struct stmmac_priv *priv = netdev_priv(dev);
4755 unsigned int nopaged_len = skb_headlen(skb);
4756 u32 queue = skb_get_queue_mapping(skb);
4757 int nfrags = skb_shinfo(skb)->nr_frags;
4758 unsigned int first_entry, tx_packets;
4759 struct stmmac_txq_stats *txq_stats;
4760 struct dma_desc *desc, *first_desc;
4761 struct stmmac_tx_queue *tx_q;
4762 int i, csum_insertion = 0;
4763 int entry, first_tx;
4764 dma_addr_t dma_addr;
4765 u32 sdu_len;
4766
4767 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4768 stmmac_stop_sw_lpi(priv);
4769
4770 if (skb_is_gso(skb) &&
4771 skb_shinfo(skb)->gso_type & priv->gso_enabled_types)
4772 return stmmac_tso_xmit(skb, dev);
4773
4774 if (priv->est && priv->est->enable &&
4775 priv->est->max_sdu[queue]) {
4776 sdu_len = skb->len;
4777 /* Add VLAN tag length if VLAN tag insertion offload is requested */
4778 if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
4779 sdu_len += VLAN_HLEN;
4780 if (sdu_len > priv->est->max_sdu[queue]) {
4781 priv->xstats.max_sdu_txq_drop[queue]++;
4782 goto max_sdu_err;
4783 }
4784 }
4785
4786 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4787 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4788 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4789 queue));
4790 /* This is a hard error, log it. */
4791 netdev_err(priv->dev,
4792 "%s: Tx Ring full when queue awake\n",
4793 __func__);
4794 }
4795 return NETDEV_TX_BUSY;
4796 }
4797
4798 tx_q = &priv->dma_conf.tx_queue[queue];
4799 first_tx = tx_q->cur_tx;
4800
4801 /* Check if VLAN can be inserted by HW */
4802 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4803
4804 entry = tx_q->cur_tx;
4805 first_entry = entry;
4806 WARN_ON(tx_q->tx_skbuff[first_entry]);
4807
4808 desc = stmmac_get_tx_desc(priv, tx_q, entry);
4809 first_desc = desc;
4810
4811 if (has_vlan)
4812 stmmac_set_desc_vlan(priv, first_desc, STMMAC_VLAN_INSERT);
4813
4814 enh_desc = priv->plat->enh_desc;
4815 /* To program the descriptors according to the size of the frame */
4816 if (enh_desc)
4817 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4818
4819 csum_insertion = skb->ip_summed == CHECKSUM_PARTIAL;
4820
4821 if (unlikely(is_jumbo)) {
4822 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4823 if (unlikely(entry < 0) && (entry != -EINVAL))
4824 goto dma_map_err;
4825 } else {
4826 bool last_segment = (nfrags == 0);
4827
4828 dma_addr = dma_map_single(priv->device, skb->data,
4829 nopaged_len, DMA_TO_DEVICE);
4830 if (dma_mapping_error(priv->device, dma_addr))
4831 goto dma_map_err;
4832
4833 stmmac_set_tx_skb_dma_entry(tx_q, first_entry, dma_addr,
4834 nopaged_len, false);
4835
4836 stmmac_set_desc_addr(priv, first_desc, dma_addr);
4837
4838 if (last_segment)
4839 stmmac_set_tx_dma_last_segment(tx_q, first_entry);
4840
4841 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4842 priv->hwts_tx_en)) {
4843 /* declare that device is doing timestamping */
4844 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4845 stmmac_enable_tx_timestamp(priv, first_desc);
4846 }
4847
4848 /* Prepare the first descriptor without setting the OWN bit */
4849 stmmac_prepare_tx_desc(priv, first_desc, 1, nopaged_len,
4850 csum_insertion, priv->descriptor_mode,
4851 0, last_segment, skb->len);
4852 }
4853
4854 if (priv->sarc_type)
4855 stmmac_set_desc_sarc(priv, first_desc, priv->sarc_type);
4856
4857 /* STMMAC_TBS_EN can only be set if STMMAC_TBS_AVAIL has already
4858 * been set, which means the underlying type of the descriptors
4859 * will be struct stmmac_edesc. Therefore, it is safe to convert
4860 * the basic descriptor to the enhanced descriptor here.
4861 */
4862 if (tx_q->tbs & STMMAC_TBS_EN) {
4863 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4864
4865 stmmac_set_desc_tbs(priv, dma_desc_to_edesc(first_desc),
4866 ts.tv_sec, ts.tv_nsec);
4867 }
4868
4869 for (i = 0; i < nfrags; i++) {
4870 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4871 unsigned int frag_size = skb_frag_size(frag);
4872 bool last_segment = (i == (nfrags - 1));
4873
4874 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
4875 WARN_ON(tx_q->tx_skbuff[entry]);
4876
4877 desc = stmmac_get_tx_desc(priv, tx_q, entry);
4878
4879 dma_addr = skb_frag_dma_map(priv->device, frag, 0, frag_size,
4880 DMA_TO_DEVICE);
4881 if (dma_mapping_error(priv->device, dma_addr))
4882 goto dma_map_err; /* should reuse desc w/o issues */
4883
4884 stmmac_set_tx_skb_dma_entry(tx_q, entry, dma_addr, frag_size,
4885 true);
4886 stmmac_set_desc_addr(priv, desc, dma_addr);
4887
4888 /* Prepare the descriptor and set the own bit too */
4889 stmmac_prepare_tx_desc(priv, desc, 0, frag_size, csum_insertion,
4890 priv->descriptor_mode, 1, last_segment,
4891 skb->len);
4892 }
4893
4894 stmmac_set_tx_dma_last_segment(tx_q, entry);
4895
4896 /* Only the last descriptor gets to point to the skb. */
4897 tx_q->tx_skbuff[entry] = skb;
4898
4899 /* According to the coalesce parameter the IC bit for the latest
4900 * segment is reset and the timer re-started to clean the tx status.
4901 * This approach takes care about the fragments: desc is the first
4902 * element in case of no SG.
4903 */
4904 tx_packets = CIRC_CNT(entry + 1, first_tx, priv->dma_conf.dma_tx_size);
4905 tx_q->tx_count_frames += tx_packets;
4906
4907 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4908 set_ic = true;
4909 else if (!priv->tx_coal_frames[queue])
4910 set_ic = false;
4911 else if (tx_packets > priv->tx_coal_frames[queue])
4912 set_ic = true;
4913 else if ((tx_q->tx_count_frames %
4914 priv->tx_coal_frames[queue]) < tx_packets)
4915 set_ic = true;
4916 else
4917 set_ic = false;
4918
4919 if (set_ic) {
4920 desc = stmmac_get_tx_desc(priv, tx_q, entry);
4921 tx_q->tx_count_frames = 0;
4922 stmmac_set_tx_ic(priv, desc);
4923 }
4924
4925 /* We've used all descriptors we need for this skb, however,
4926 * advance cur_tx so that it references a fresh descriptor.
4927 * ndo_start_xmit will fill this descriptor the next time it's
4928 * called and stmmac_tx_clean may clean up to this descriptor.
4929 */
4930 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
4931 tx_q->cur_tx = entry;
4932
4933 if (netif_msg_pktdata(priv)) {
4934 netdev_dbg(priv->dev,
4935 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4936 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4937 entry, first_desc, nfrags);
4938
4939 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4940 print_pkt(skb->data, skb->len);
4941 }
4942
4943 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4944 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4945 __func__);
4946 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4947 }
4948
4949 txq_stats = &priv->xstats.txq_stats[queue];
4950 u64_stats_update_begin(&txq_stats->q_syncp);
4951 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4952 if (set_ic)
4953 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4954 u64_stats_update_end(&txq_stats->q_syncp);
4955
4956 /* Set the OWN bit on the first descriptor now that all descriptors
4957 * for this skb are populated.
4958 */
4959 stmmac_set_tx_owner(priv, first_desc);
4960
4961 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4962
4963 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4964 skb_tx_timestamp(skb);
4965 stmmac_flush_tx_descriptors(priv, queue);
4966 stmmac_tx_timer_arm(priv, queue);
4967
4968 return NETDEV_TX_OK;
4969
4970 dma_map_err:
4971 netdev_err(priv->dev, "Tx DMA map failed\n");
4972 max_sdu_err:
4973 dev_kfree_skb(skb);
4974 priv->xstats.tx_dropped++;
4975 return NETDEV_TX_OK;
4976 }
4977
stmmac_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4978 static netdev_features_t stmmac_features_check(struct sk_buff *skb,
4979 struct net_device *dev,
4980 netdev_features_t features)
4981 {
4982 struct stmmac_priv *priv = netdev_priv(dev);
4983 u16 queue = skb_get_queue_mapping(skb);
4984
4985 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4986 * queues. In that case, checksum offloading for those queues that don't
4987 * support tx coe needs to fallback to software checksum calculation.
4988 *
4989 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4990 * also have to be checksummed in software.
4991 *
4992 * Note that disabling hardware checksumming also disables TSO. See
4993 * harmonize_features() in net/core/dev.c
4994 */
4995 if (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4996 !stmmac_has_ip_ethertype(skb))
4997 features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4998
4999 if (skb_is_gso(skb)) {
5000 if (!stmmac_tso_channel_permitted(priv, queue) ||
5001 !stmmac_tso_valid_packet(skb))
5002 features &= ~NETIF_F_GSO_MASK;
5003
5004 /* If we are going to be using hardware TSO, always insert
5005 * VLAN tag to SKB payload for TSO frames.
5006 *
5007 * Never insert VLAN tag by HW, since segments split by
5008 * TSO engine will be un-tagged by mistake.
5009 */
5010 if (features & NETIF_F_GSO_MASK)
5011 features &= ~(NETIF_F_HW_VLAN_STAG_TX |
5012 NETIF_F_HW_VLAN_CTAG_TX);
5013 }
5014
5015 return vlan_features_check(skb, features);
5016 }
5017
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)5018 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
5019 {
5020 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
5021 __be16 vlan_proto = veth->h_vlan_proto;
5022 u16 vlanid;
5023
5024 if ((vlan_proto == htons(ETH_P_8021Q) &&
5025 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
5026 (vlan_proto == htons(ETH_P_8021AD) &&
5027 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
5028 /* pop the vlan tag */
5029 vlanid = ntohs(veth->h_vlan_TCI);
5030 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
5031 skb_pull(skb, VLAN_HLEN);
5032 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
5033 }
5034 }
5035
5036 /**
5037 * stmmac_rx_refill - refill used skb preallocated buffers
5038 * @priv: driver private structure
5039 * @queue: RX queue index
5040 * Description : this is to reallocate the skb for the reception process
5041 * that is based on zero-copy.
5042 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)5043 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
5044 {
5045 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5046 int dirty = stmmac_rx_dirty(priv, queue);
5047 unsigned int entry = rx_q->dirty_rx;
5048 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
5049
5050 if (priv->dma_cap.host_dma_width <= 32)
5051 gfp |= GFP_DMA32;
5052
5053 while (dirty-- > 0) {
5054 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5055 struct dma_desc *p;
5056 bool use_rx_wd;
5057
5058 p = stmmac_get_rx_desc(priv, rx_q, entry);
5059
5060 if (!buf->page) {
5061 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
5062 if (!buf->page)
5063 break;
5064 }
5065
5066 if (priv->sph_active && !buf->sec_page) {
5067 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
5068 if (!buf->sec_page)
5069 break;
5070
5071 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
5072 }
5073
5074 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
5075
5076 stmmac_set_desc_addr(priv, p, buf->addr);
5077 if (priv->sph_active)
5078 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
5079 else
5080 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
5081 stmmac_refill_desc3(priv, rx_q, p);
5082
5083 rx_q->rx_count_frames++;
5084 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5085 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5086 rx_q->rx_count_frames = 0;
5087
5088 use_rx_wd = !priv->rx_coal_frames[queue];
5089 use_rx_wd |= rx_q->rx_count_frames > 0;
5090 if (!priv->use_riwt)
5091 use_rx_wd = false;
5092
5093 dma_wmb();
5094 stmmac_set_rx_owner(priv, p, use_rx_wd);
5095
5096 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
5097 }
5098 rx_q->dirty_rx = entry;
5099 stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->dirty_rx);
5100 /* Wake up Rx DMA from the suspend state if required */
5101 stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
5102 }
5103
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)5104 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
5105 struct dma_desc *p,
5106 int status, unsigned int len)
5107 {
5108 unsigned int plen = 0, hlen = 0;
5109 int coe = priv->hw->rx_csum;
5110
5111 /* Not first descriptor, buffer is always zero */
5112 if (priv->sph_active && len)
5113 return 0;
5114
5115 /* First descriptor, get split header length */
5116 stmmac_get_rx_header_len(priv, p, &hlen);
5117 if (priv->sph_active && hlen) {
5118 priv->xstats.rx_split_hdr_pkt_n++;
5119 return hlen;
5120 }
5121
5122 /* First descriptor, not last descriptor and not split header */
5123 if (status & rx_not_ls)
5124 return priv->dma_conf.dma_buf_sz;
5125
5126 plen = stmmac_get_rx_frame_len(priv, p, coe);
5127
5128 /* First descriptor and last descriptor and not split header */
5129 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
5130 }
5131
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)5132 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
5133 struct dma_desc *p,
5134 int status, unsigned int len)
5135 {
5136 int coe = priv->hw->rx_csum;
5137 unsigned int plen = 0;
5138
5139 /* Not split header, buffer is not available */
5140 if (!priv->sph_active)
5141 return 0;
5142
5143 /* For GMAC4, when split header is enabled, in some rare cases, the
5144 * hardware does not fill buf2 of the first descriptor with payload.
5145 * Thus we cannot assume buf2 is always fully filled if it is not
5146 * the last descriptor. Otherwise, the length of buf2 of the second
5147 * descriptor will be calculated wrong and cause an oops.
5148 *
5149 * If this is the last descriptor, 'plen' is the length of the
5150 * received packet that was transferred to system memory.
5151 * Otherwise, it is the accumulated number of bytes that have been
5152 * transferred for the current packet.
5153 *
5154 * Thus 'plen - len' always gives the correct length of buf2.
5155 */
5156
5157 /* Not GMAC4 and not last descriptor */
5158 if (priv->plat->core_type != DWMAC_CORE_GMAC4 && (status & rx_not_ls))
5159 return priv->dma_conf.dma_buf_sz;
5160
5161 /* GMAC4 or last descriptor */
5162 plen = stmmac_get_rx_frame_len(priv, p, coe);
5163
5164 return plen - len;
5165 }
5166
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)5167 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
5168 struct xdp_frame *xdpf, bool dma_map)
5169 {
5170 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
5171 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
5172 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
5173 unsigned int entry = tx_q->cur_tx;
5174 enum stmmac_txbuf_type buf_type;
5175 struct dma_desc *tx_desc;
5176 dma_addr_t dma_addr;
5177 bool set_ic;
5178
5179 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
5180 return STMMAC_XDP_CONSUMED;
5181
5182 if (priv->est && priv->est->enable &&
5183 priv->est->max_sdu[queue] &&
5184 xdpf->len > priv->est->max_sdu[queue]) {
5185 priv->xstats.max_sdu_txq_drop[queue]++;
5186 return STMMAC_XDP_CONSUMED;
5187 }
5188
5189 tx_desc = stmmac_get_tx_desc(priv, tx_q, entry);
5190 if (dma_map) {
5191 dma_addr = dma_map_single(priv->device, xdpf->data,
5192 xdpf->len, DMA_TO_DEVICE);
5193 if (dma_mapping_error(priv->device, dma_addr))
5194 return STMMAC_XDP_CONSUMED;
5195
5196 buf_type = STMMAC_TXBUF_T_XDP_NDO;
5197 } else {
5198 struct page *page = virt_to_page(xdpf->data);
5199
5200 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5201 xdpf->headroom;
5202 dma_sync_single_for_device(priv->device, dma_addr,
5203 xdpf->len, DMA_BIDIRECTIONAL);
5204
5205 buf_type = STMMAC_TXBUF_T_XDP_TX;
5206 }
5207
5208 stmmac_set_tx_dma_entry(tx_q, entry, buf_type, dma_addr, xdpf->len,
5209 false);
5210 stmmac_set_tx_dma_last_segment(tx_q, entry);
5211
5212 tx_q->xdpf[entry] = xdpf;
5213
5214 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5215
5216 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5217 csum, priv->descriptor_mode, true, true,
5218 xdpf->len);
5219
5220 tx_q->tx_count_frames++;
5221
5222 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5223 set_ic = true;
5224 else
5225 set_ic = false;
5226
5227 if (set_ic) {
5228 tx_q->tx_count_frames = 0;
5229 stmmac_set_tx_ic(priv, tx_desc);
5230 u64_stats_update_begin(&txq_stats->q_syncp);
5231 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5232 u64_stats_update_end(&txq_stats->q_syncp);
5233 }
5234
5235 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5236
5237 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
5238 tx_q->cur_tx = entry;
5239
5240 return STMMAC_XDP_TX;
5241 }
5242
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5243 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5244 int cpu)
5245 {
5246 int index = cpu;
5247
5248 if (unlikely(index < 0))
5249 index = 0;
5250
5251 while (index >= priv->plat->tx_queues_to_use)
5252 index -= priv->plat->tx_queues_to_use;
5253
5254 return index;
5255 }
5256
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5257 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5258 struct xdp_buff *xdp)
5259 {
5260 bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
5261 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5262 int cpu = smp_processor_id();
5263 struct netdev_queue *nq;
5264 int queue;
5265 int res;
5266
5267 if (unlikely(!xdpf))
5268 return STMMAC_XDP_CONSUMED;
5269
5270 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5271 nq = netdev_get_tx_queue(priv->dev, queue);
5272
5273 __netif_tx_lock(nq, cpu);
5274 /* Avoids TX time-out as we are sharing with slow path */
5275 txq_trans_cond_update(nq);
5276
5277 /* For zero copy XDP_TX action, dma_map is true */
5278 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc);
5279 if (res == STMMAC_XDP_TX) {
5280 stmmac_flush_tx_descriptors(priv, queue);
5281 } else if (res == STMMAC_XDP_CONSUMED && zc) {
5282 /* xdp has been freed by xdp_convert_buff_to_frame(),
5283 * no need to call xsk_buff_free() again, so return
5284 * STMMAC_XSK_CONSUMED.
5285 */
5286 res = STMMAC_XSK_CONSUMED;
5287 xdp_return_frame(xdpf);
5288 }
5289
5290 __netif_tx_unlock(nq);
5291
5292 return res;
5293 }
5294
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5295 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5296 struct bpf_prog *prog,
5297 struct xdp_buff *xdp)
5298 {
5299 u32 act;
5300 int res;
5301
5302 act = bpf_prog_run_xdp(prog, xdp);
5303 switch (act) {
5304 case XDP_PASS:
5305 res = STMMAC_XDP_PASS;
5306 break;
5307 case XDP_TX:
5308 res = stmmac_xdp_xmit_back(priv, xdp);
5309 break;
5310 case XDP_REDIRECT:
5311 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5312 res = STMMAC_XDP_CONSUMED;
5313 else
5314 res = STMMAC_XDP_REDIRECT;
5315 break;
5316 default:
5317 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5318 fallthrough;
5319 case XDP_ABORTED:
5320 trace_xdp_exception(priv->dev, prog, act);
5321 fallthrough;
5322 case XDP_DROP:
5323 res = STMMAC_XDP_CONSUMED;
5324 break;
5325 }
5326
5327 return res;
5328 }
5329
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5330 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5331 struct xdp_buff *xdp)
5332 {
5333 struct bpf_prog *prog;
5334 int res;
5335
5336 prog = READ_ONCE(priv->xdp_prog);
5337 if (!prog) {
5338 res = STMMAC_XDP_PASS;
5339 goto out;
5340 }
5341
5342 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5343 out:
5344 return ERR_PTR(-res);
5345 }
5346
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5347 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5348 int xdp_status)
5349 {
5350 int cpu = smp_processor_id();
5351 int queue;
5352
5353 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5354
5355 if (xdp_status & STMMAC_XDP_TX)
5356 stmmac_tx_timer_arm(priv, queue);
5357
5358 if (xdp_status & STMMAC_XDP_REDIRECT)
5359 xdp_do_flush();
5360 }
5361
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5362 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5363 struct xdp_buff *xdp)
5364 {
5365 unsigned int metasize = xdp->data - xdp->data_meta;
5366 unsigned int datasize = xdp->data_end - xdp->data;
5367 struct sk_buff *skb;
5368
5369 skb = napi_alloc_skb(&ch->rxtx_napi,
5370 xdp->data_end - xdp->data_hard_start);
5371 if (unlikely(!skb))
5372 return NULL;
5373
5374 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5375 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5376 if (metasize)
5377 skb_metadata_set(skb, metasize);
5378
5379 return skb;
5380 }
5381
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5382 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5383 struct dma_desc *p, struct dma_desc *np,
5384 struct xdp_buff *xdp)
5385 {
5386 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5387 struct stmmac_channel *ch = &priv->channel[queue];
5388 unsigned int len = xdp->data_end - xdp->data;
5389 enum pkt_hash_types hash_type;
5390 int coe = priv->hw->rx_csum;
5391 struct sk_buff *skb;
5392 u32 hash;
5393
5394 skb = stmmac_construct_skb_zc(ch, xdp);
5395 if (!skb) {
5396 priv->xstats.rx_dropped++;
5397 return;
5398 }
5399
5400 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5401 if (priv->hw->hw_vlan_en)
5402 /* MAC level stripping. */
5403 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5404 else
5405 /* Driver level stripping. */
5406 stmmac_rx_vlan(priv->dev, skb);
5407 skb->protocol = eth_type_trans(skb, priv->dev);
5408
5409 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5410 skb_checksum_none_assert(skb);
5411 else
5412 skb->ip_summed = CHECKSUM_UNNECESSARY;
5413
5414 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5415 skb_set_hash(skb, hash, hash_type);
5416
5417 skb_record_rx_queue(skb, queue);
5418 napi_gro_receive(&ch->rxtx_napi, skb);
5419
5420 u64_stats_update_begin(&rxq_stats->napi_syncp);
5421 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5422 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5423 u64_stats_update_end(&rxq_stats->napi_syncp);
5424 }
5425
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5426 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5427 {
5428 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5429 unsigned int entry = rx_q->dirty_rx;
5430 struct dma_desc *rx_desc = NULL;
5431 bool ret = true;
5432
5433 budget = min(budget, stmmac_rx_dirty(priv, queue));
5434
5435 while (budget-- > 0 && entry != rx_q->cur_rx) {
5436 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5437 dma_addr_t dma_addr;
5438 bool use_rx_wd;
5439
5440 if (!buf->xdp) {
5441 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5442 if (!buf->xdp) {
5443 ret = false;
5444 break;
5445 }
5446 }
5447
5448 rx_desc = stmmac_get_rx_desc(priv, rx_q, entry);
5449
5450 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5451 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5452 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5453 stmmac_refill_desc3(priv, rx_q, rx_desc);
5454
5455 rx_q->rx_count_frames++;
5456 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5457 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5458 rx_q->rx_count_frames = 0;
5459
5460 use_rx_wd = !priv->rx_coal_frames[queue];
5461 use_rx_wd |= rx_q->rx_count_frames > 0;
5462 if (!priv->use_riwt)
5463 use_rx_wd = false;
5464
5465 dma_wmb();
5466 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5467
5468 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
5469 }
5470
5471 if (rx_desc) {
5472 rx_q->dirty_rx = entry;
5473 stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->dirty_rx);
5474 }
5475
5476 return ret;
5477 }
5478
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5479 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5480 {
5481 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5482 * to represent incoming packet, whereas cb field in the same structure
5483 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5484 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5485 */
5486 return (struct stmmac_xdp_buff *)xdp;
5487 }
5488
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5489 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5490 {
5491 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5492 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5493 unsigned int count = 0, error = 0, len = 0;
5494 int dirty = stmmac_rx_dirty(priv, queue);
5495 unsigned int next_entry = rx_q->cur_rx;
5496 u32 rx_errors = 0, rx_dropped = 0;
5497 unsigned int desc_size;
5498 struct bpf_prog *prog;
5499 bool failure = false;
5500 int xdp_status = 0;
5501 int status = 0;
5502
5503 if (netif_msg_rx_status(priv)) {
5504 void *rx_head = stmmac_get_rx_desc(priv, rx_q, 0);
5505
5506 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5507 desc_size = stmmac_get_rx_desc_size(priv);
5508
5509 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5510 rx_q->dma_rx_phy, desc_size);
5511 }
5512 while (count < limit) {
5513 struct stmmac_rx_buffer *buf;
5514 struct stmmac_xdp_buff *ctx;
5515 unsigned int buf1_len = 0;
5516 struct dma_desc *np, *p;
5517 int entry;
5518 int res;
5519
5520 if (!count && rx_q->state_saved) {
5521 error = rx_q->state.error;
5522 len = rx_q->state.len;
5523 } else {
5524 rx_q->state_saved = false;
5525 error = 0;
5526 len = 0;
5527 }
5528
5529 read_again:
5530 if (count >= limit)
5531 break;
5532
5533 buf1_len = 0;
5534 entry = next_entry;
5535 buf = &rx_q->buf_pool[entry];
5536
5537 if (dirty >= STMMAC_RX_FILL_BATCH) {
5538 failure = failure ||
5539 !stmmac_rx_refill_zc(priv, queue, dirty);
5540 dirty = 0;
5541 }
5542
5543 p = stmmac_get_rx_desc(priv, rx_q, entry);
5544
5545 /* read the status of the incoming frame */
5546 status = stmmac_rx_status(priv, &priv->xstats, p);
5547 /* check if managed by the DMA otherwise go ahead */
5548 if (unlikely(status & dma_own))
5549 break;
5550
5551 /* Prefetch the next RX descriptor */
5552 next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
5553 priv->dma_conf.dma_rx_size);
5554 if (unlikely(next_entry == rx_q->dirty_rx))
5555 break;
5556
5557 rx_q->cur_rx = next_entry;
5558
5559 np = stmmac_get_rx_desc(priv, rx_q, next_entry);
5560
5561 prefetch(np);
5562
5563 /* Ensure a valid XSK buffer before proceed */
5564 if (!buf->xdp)
5565 break;
5566
5567 if (priv->extend_desc)
5568 stmmac_rx_extended_status(priv, &priv->xstats,
5569 rx_q->dma_erx + entry);
5570 if (unlikely(status == discard_frame)) {
5571 xsk_buff_free(buf->xdp);
5572 buf->xdp = NULL;
5573 dirty++;
5574 error = 1;
5575 if (!priv->hwts_rx_en)
5576 rx_errors++;
5577 }
5578
5579 if (unlikely(error && (status & rx_not_ls)))
5580 goto read_again;
5581 if (unlikely(error)) {
5582 count++;
5583 continue;
5584 }
5585
5586 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5587 if (likely(status & rx_not_ls)) {
5588 xsk_buff_free(buf->xdp);
5589 buf->xdp = NULL;
5590 dirty++;
5591 count++;
5592 goto read_again;
5593 }
5594
5595 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5596 ctx->priv = priv;
5597 ctx->desc = p;
5598 ctx->ndesc = np;
5599
5600 /* XDP ZC Frame only support primary buffers for now */
5601 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5602 len += buf1_len;
5603
5604 /* ACS is disabled; strip manually. */
5605 if (likely(!(status & rx_not_ls))) {
5606 buf1_len -= ETH_FCS_LEN;
5607 len -= ETH_FCS_LEN;
5608 }
5609
5610 /* RX buffer is good and fit into a XSK pool buffer */
5611 buf->xdp->data_end = buf->xdp->data + buf1_len;
5612 xsk_buff_dma_sync_for_cpu(buf->xdp);
5613
5614 prog = READ_ONCE(priv->xdp_prog);
5615 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5616
5617 switch (res) {
5618 case STMMAC_XDP_PASS:
5619 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5620 xsk_buff_free(buf->xdp);
5621 break;
5622 case STMMAC_XDP_CONSUMED:
5623 xsk_buff_free(buf->xdp);
5624 fallthrough;
5625 case STMMAC_XSK_CONSUMED:
5626 rx_dropped++;
5627 break;
5628 case STMMAC_XDP_TX:
5629 case STMMAC_XDP_REDIRECT:
5630 xdp_status |= res;
5631 break;
5632 }
5633
5634 buf->xdp = NULL;
5635 dirty++;
5636 count++;
5637 }
5638
5639 if (status & rx_not_ls) {
5640 rx_q->state_saved = true;
5641 rx_q->state.error = error;
5642 rx_q->state.len = len;
5643 }
5644
5645 stmmac_finalize_xdp_rx(priv, xdp_status);
5646
5647 u64_stats_update_begin(&rxq_stats->napi_syncp);
5648 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5649 u64_stats_update_end(&rxq_stats->napi_syncp);
5650
5651 priv->xstats.rx_dropped += rx_dropped;
5652 priv->xstats.rx_errors += rx_errors;
5653
5654 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5655 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5656 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5657 else
5658 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5659
5660 return (int)count;
5661 }
5662
5663 return failure ? limit : (int)count;
5664 }
5665
5666 /**
5667 * stmmac_rx - manage the receive process
5668 * @priv: driver private structure
5669 * @limit: napi bugget
5670 * @queue: RX queue index.
5671 * Description : this the function called by the napi poll method.
5672 * It gets all the frames inside the ring.
5673 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5674 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5675 {
5676 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5677 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5678 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5679 struct stmmac_channel *ch = &priv->channel[queue];
5680 unsigned int count = 0, error = 0, len = 0;
5681 int status = 0, coe = priv->hw->rx_csum;
5682 unsigned int next_entry = rx_q->cur_rx;
5683 enum dma_data_direction dma_dir;
5684 unsigned int desc_size;
5685 struct sk_buff *skb = NULL;
5686 struct stmmac_xdp_buff ctx;
5687 int xdp_status = 0;
5688 int bufsz;
5689
5690 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5691 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5692
5693 if (netif_msg_rx_status(priv)) {
5694 void *rx_head = stmmac_get_rx_desc(priv, rx_q, 0);
5695
5696 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5697 desc_size = stmmac_get_rx_desc_size(priv);
5698
5699 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5700 rx_q->dma_rx_phy, desc_size);
5701 }
5702 while (count < limit) {
5703 unsigned int buf1_len = 0, buf2_len = 0;
5704 enum pkt_hash_types hash_type;
5705 struct stmmac_rx_buffer *buf;
5706 struct dma_desc *np, *p;
5707 int entry;
5708 u32 hash;
5709
5710 if (!count && rx_q->state_saved) {
5711 skb = rx_q->state.skb;
5712 error = rx_q->state.error;
5713 len = rx_q->state.len;
5714 } else {
5715 rx_q->state_saved = false;
5716 skb = NULL;
5717 error = 0;
5718 len = 0;
5719 }
5720
5721 read_again:
5722 if (count >= limit)
5723 break;
5724
5725 buf1_len = 0;
5726 buf2_len = 0;
5727 entry = next_entry;
5728 buf = &rx_q->buf_pool[entry];
5729
5730 p = stmmac_get_rx_desc(priv, rx_q, entry);
5731
5732 /* read the status of the incoming frame */
5733 status = stmmac_rx_status(priv, &priv->xstats, p);
5734 /* check if managed by the DMA otherwise go ahead */
5735 if (unlikely(status & dma_own))
5736 break;
5737
5738 next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
5739 priv->dma_conf.dma_rx_size);
5740 if (unlikely(next_entry == rx_q->dirty_rx))
5741 break;
5742
5743 rx_q->cur_rx = next_entry;
5744
5745 np = stmmac_get_rx_desc(priv, rx_q, next_entry);
5746
5747 prefetch(np);
5748
5749 if (priv->extend_desc)
5750 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5751 if (unlikely(status == discard_frame)) {
5752 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5753 buf->page = NULL;
5754 error = 1;
5755 if (!priv->hwts_rx_en)
5756 rx_errors++;
5757 }
5758
5759 if (unlikely(error && (status & rx_not_ls)))
5760 goto read_again;
5761 if (unlikely(error)) {
5762 dev_kfree_skb(skb);
5763 skb = NULL;
5764 count++;
5765 continue;
5766 }
5767
5768 /* Buffer is good. Go on. */
5769
5770 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5771 len += buf1_len;
5772 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5773 len += buf2_len;
5774
5775 /* ACS is disabled; strip manually. */
5776 if (likely(!(status & rx_not_ls))) {
5777 if (buf2_len) {
5778 buf2_len -= ETH_FCS_LEN;
5779 len -= ETH_FCS_LEN;
5780 } else if (buf1_len) {
5781 buf1_len -= ETH_FCS_LEN;
5782 len -= ETH_FCS_LEN;
5783 }
5784 }
5785
5786 if (!skb) {
5787 unsigned int pre_len, sync_len;
5788
5789 dma_sync_single_for_cpu(priv->device, buf->addr,
5790 buf1_len, dma_dir);
5791 net_prefetch(page_address(buf->page) +
5792 buf->page_offset);
5793
5794 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5795 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5796 buf->page_offset, buf1_len, true);
5797
5798 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5799 buf->page_offset;
5800
5801 ctx.priv = priv;
5802 ctx.desc = p;
5803 ctx.ndesc = np;
5804
5805 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5806 /* Due xdp_adjust_tail: DMA sync for_device
5807 * cover max len CPU touch
5808 */
5809 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5810 buf->page_offset;
5811 sync_len = max(sync_len, pre_len);
5812
5813 /* For Not XDP_PASS verdict */
5814 if (IS_ERR(skb)) {
5815 unsigned int xdp_res = -PTR_ERR(skb);
5816
5817 if (xdp_res & STMMAC_XDP_CONSUMED) {
5818 page_pool_put_page(rx_q->page_pool,
5819 virt_to_head_page(ctx.xdp.data),
5820 sync_len, true);
5821 buf->page = NULL;
5822 rx_dropped++;
5823
5824 /* Clear skb as it was set as
5825 * status by XDP program.
5826 */
5827 skb = NULL;
5828
5829 if (unlikely((status & rx_not_ls)))
5830 goto read_again;
5831
5832 count++;
5833 continue;
5834 } else if (xdp_res & (STMMAC_XDP_TX |
5835 STMMAC_XDP_REDIRECT)) {
5836 xdp_status |= xdp_res;
5837 buf->page = NULL;
5838 skb = NULL;
5839 count++;
5840 continue;
5841 }
5842 }
5843 }
5844
5845 if (!skb) {
5846 unsigned int head_pad_len;
5847
5848 /* XDP program may expand or reduce tail */
5849 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5850
5851 skb = napi_build_skb(page_address(buf->page),
5852 rx_q->napi_skb_frag_size);
5853 if (!skb) {
5854 page_pool_recycle_direct(rx_q->page_pool,
5855 buf->page);
5856 rx_dropped++;
5857 count++;
5858 goto drain_data;
5859 }
5860
5861 /* XDP program may adjust header */
5862 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5863 skb_reserve(skb, head_pad_len);
5864 skb_put(skb, buf1_len);
5865 skb_mark_for_recycle(skb);
5866 buf->page = NULL;
5867 } else if (buf1_len) {
5868 dma_sync_single_for_cpu(priv->device, buf->addr,
5869 buf1_len, dma_dir);
5870 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5871 buf->page, buf->page_offset, buf1_len,
5872 priv->dma_conf.dma_buf_sz);
5873 buf->page = NULL;
5874 }
5875
5876 if (buf2_len) {
5877 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5878 buf2_len, dma_dir);
5879 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5880 buf->sec_page, 0, buf2_len,
5881 priv->dma_conf.dma_buf_sz);
5882 buf->sec_page = NULL;
5883 }
5884
5885 drain_data:
5886 if (likely(status & rx_not_ls))
5887 goto read_again;
5888 if (!skb)
5889 continue;
5890
5891 /* Got entire packet into SKB. Finish it. */
5892
5893 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5894
5895 if (priv->hw->hw_vlan_en)
5896 /* MAC level stripping. */
5897 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5898 else
5899 /* Driver level stripping. */
5900 stmmac_rx_vlan(priv->dev, skb);
5901
5902 skb->protocol = eth_type_trans(skb, priv->dev);
5903
5904 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5905 (status & csum_none))
5906 skb_checksum_none_assert(skb);
5907 else
5908 skb->ip_summed = CHECKSUM_UNNECESSARY;
5909
5910 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5911 skb_set_hash(skb, hash, hash_type);
5912
5913 skb_record_rx_queue(skb, queue);
5914 napi_gro_receive(&ch->rx_napi, skb);
5915 skb = NULL;
5916
5917 rx_packets++;
5918 rx_bytes += len;
5919 count++;
5920 }
5921
5922 if (status & rx_not_ls || skb) {
5923 rx_q->state_saved = true;
5924 rx_q->state.skb = skb;
5925 rx_q->state.error = error;
5926 rx_q->state.len = len;
5927 }
5928
5929 stmmac_finalize_xdp_rx(priv, xdp_status);
5930
5931 stmmac_rx_refill(priv, queue);
5932
5933 u64_stats_update_begin(&rxq_stats->napi_syncp);
5934 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5935 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5936 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5937 u64_stats_update_end(&rxq_stats->napi_syncp);
5938
5939 priv->xstats.rx_dropped += rx_dropped;
5940 priv->xstats.rx_errors += rx_errors;
5941
5942 return count;
5943 }
5944
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5945 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5946 {
5947 struct stmmac_channel *ch =
5948 container_of(napi, struct stmmac_channel, rx_napi);
5949 struct stmmac_priv *priv = ch->priv_data;
5950 struct stmmac_rxq_stats *rxq_stats;
5951 u32 chan = ch->index;
5952 int work_done;
5953
5954 rxq_stats = &priv->xstats.rxq_stats[chan];
5955 u64_stats_update_begin(&rxq_stats->napi_syncp);
5956 u64_stats_inc(&rxq_stats->napi.poll);
5957 u64_stats_update_end(&rxq_stats->napi_syncp);
5958
5959 work_done = stmmac_rx(priv, budget, chan);
5960 if (work_done < budget && napi_complete_done(napi, work_done)) {
5961 unsigned long flags;
5962
5963 spin_lock_irqsave(&ch->lock, flags);
5964 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5965 spin_unlock_irqrestore(&ch->lock, flags);
5966 }
5967
5968 return work_done;
5969 }
5970
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5971 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5972 {
5973 struct stmmac_channel *ch =
5974 container_of(napi, struct stmmac_channel, tx_napi);
5975 struct stmmac_priv *priv = ch->priv_data;
5976 struct stmmac_txq_stats *txq_stats;
5977 bool pending_packets = false;
5978 u32 chan = ch->index;
5979 int work_done;
5980
5981 txq_stats = &priv->xstats.txq_stats[chan];
5982 u64_stats_update_begin(&txq_stats->napi_syncp);
5983 u64_stats_inc(&txq_stats->napi.poll);
5984 u64_stats_update_end(&txq_stats->napi_syncp);
5985
5986 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5987 work_done = min(work_done, budget);
5988
5989 if (work_done < budget && napi_complete_done(napi, work_done)) {
5990 unsigned long flags;
5991
5992 spin_lock_irqsave(&ch->lock, flags);
5993 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5994 spin_unlock_irqrestore(&ch->lock, flags);
5995 }
5996
5997 /* TX still have packet to handle, check if we need to arm tx timer */
5998 if (pending_packets)
5999 stmmac_tx_timer_arm(priv, chan);
6000
6001 return work_done;
6002 }
6003
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)6004 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
6005 {
6006 struct stmmac_channel *ch =
6007 container_of(napi, struct stmmac_channel, rxtx_napi);
6008 struct stmmac_priv *priv = ch->priv_data;
6009 bool tx_pending_packets = false;
6010 int rx_done, tx_done, rxtx_done;
6011 struct stmmac_rxq_stats *rxq_stats;
6012 struct stmmac_txq_stats *txq_stats;
6013 u32 chan = ch->index;
6014
6015 rxq_stats = &priv->xstats.rxq_stats[chan];
6016 u64_stats_update_begin(&rxq_stats->napi_syncp);
6017 u64_stats_inc(&rxq_stats->napi.poll);
6018 u64_stats_update_end(&rxq_stats->napi_syncp);
6019
6020 txq_stats = &priv->xstats.txq_stats[chan];
6021 u64_stats_update_begin(&txq_stats->napi_syncp);
6022 u64_stats_inc(&txq_stats->napi.poll);
6023 u64_stats_update_end(&txq_stats->napi_syncp);
6024
6025 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
6026 tx_done = min(tx_done, budget);
6027
6028 rx_done = stmmac_rx_zc(priv, budget, chan);
6029
6030 rxtx_done = max(tx_done, rx_done);
6031
6032 /* If either TX or RX work is not complete, return budget
6033 * and keep pooling
6034 */
6035 if (rxtx_done >= budget)
6036 return budget;
6037
6038 /* all work done, exit the polling mode */
6039 if (napi_complete_done(napi, rxtx_done)) {
6040 unsigned long flags;
6041
6042 spin_lock_irqsave(&ch->lock, flags);
6043 /* Both RX and TX work done are complete,
6044 * so enable both RX & TX IRQs.
6045 */
6046 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6047 spin_unlock_irqrestore(&ch->lock, flags);
6048 }
6049
6050 /* TX still have packet to handle, check if we need to arm tx timer */
6051 if (tx_pending_packets)
6052 stmmac_tx_timer_arm(priv, chan);
6053
6054 return min(rxtx_done, budget - 1);
6055 }
6056
6057 /**
6058 * stmmac_tx_timeout
6059 * @dev : Pointer to net device structure
6060 * @txqueue: the index of the hanging transmit queue
6061 * Description: this function is called when a packet transmission fails to
6062 * complete within a reasonable time. The driver will mark the error in the
6063 * netdev structure and arrange for the device to be reset to a sane state
6064 * in order to transmit a new packet.
6065 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)6066 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
6067 {
6068 struct stmmac_priv *priv = netdev_priv(dev);
6069
6070 stmmac_global_err(priv);
6071 }
6072
6073 /**
6074 * stmmac_set_rx_mode - entry point for multicast addressing
6075 * @dev : pointer to the device structure
6076 * Description:
6077 * This function is a driver entry point which gets called by the kernel
6078 * whenever multicast addresses must be enabled/disabled.
6079 * Return value:
6080 * void.
6081 *
6082 * FIXME: This may need RXC to be running, but it may be called with BH
6083 * disabled, which means we can't call phylink_rx_clk_stop*().
6084 */
stmmac_set_rx_mode(struct net_device * dev)6085 static void stmmac_set_rx_mode(struct net_device *dev)
6086 {
6087 struct stmmac_priv *priv = netdev_priv(dev);
6088
6089 stmmac_set_filter(priv, priv->hw, dev);
6090 }
6091
6092 /**
6093 * stmmac_change_mtu - entry point to change MTU size for the device.
6094 * @dev : device pointer.
6095 * @new_mtu : the new MTU size for the device.
6096 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
6097 * to drive packet transmission. Ethernet has an MTU of 1500 octets
6098 * (ETH_DATA_LEN). This value can be changed with ifconfig.
6099 * Return value:
6100 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6101 * file on failure.
6102 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)6103 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
6104 {
6105 struct stmmac_priv *priv = netdev_priv(dev);
6106 int txfifosz = priv->plat->tx_fifo_size;
6107 struct stmmac_dma_conf *dma_conf;
6108 const int mtu = new_mtu;
6109 int ret;
6110
6111 if (txfifosz == 0)
6112 txfifosz = priv->dma_cap.tx_fifo_size;
6113
6114 txfifosz /= priv->plat->tx_queues_to_use;
6115
6116 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
6117 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
6118 return -EINVAL;
6119 }
6120
6121 new_mtu = STMMAC_ALIGN(new_mtu);
6122
6123 /* If condition true, FIFO is too small or MTU too large */
6124 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
6125 return -EINVAL;
6126
6127 if (netif_running(dev)) {
6128 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
6129 /* Try to allocate the new DMA conf with the new mtu */
6130 dma_conf = stmmac_setup_dma_desc(priv, mtu);
6131 if (IS_ERR(dma_conf)) {
6132 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
6133 mtu);
6134 return PTR_ERR(dma_conf);
6135 }
6136
6137 __stmmac_release(dev);
6138
6139 ret = __stmmac_open(dev, dma_conf);
6140 if (ret) {
6141 free_dma_desc_resources(priv, dma_conf);
6142 kfree(dma_conf);
6143 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
6144 return ret;
6145 }
6146
6147 kfree(dma_conf);
6148
6149 stmmac_set_rx_mode(dev);
6150 }
6151
6152 WRITE_ONCE(dev->mtu, mtu);
6153 netdev_update_features(dev);
6154
6155 return 0;
6156 }
6157
stmmac_fix_features(struct net_device * dev,netdev_features_t features)6158 static netdev_features_t stmmac_fix_features(struct net_device *dev,
6159 netdev_features_t features)
6160 {
6161 struct stmmac_priv *priv = netdev_priv(dev);
6162
6163 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
6164 features &= ~NETIF_F_RXCSUM;
6165
6166 if (!priv->plat->tx_coe)
6167 features &= ~NETIF_F_CSUM_MASK;
6168
6169 /* Some GMAC devices have a bugged Jumbo frame support that
6170 * needs to have the Tx COE disabled for oversized frames
6171 * (due to limited buffer sizes). In this case we disable
6172 * the TX csum insertion in the TDES and not use SF.
6173 */
6174 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6175 features &= ~NETIF_F_CSUM_MASK;
6176
6177 return features;
6178 }
6179
stmmac_set_features(struct net_device * netdev,netdev_features_t features)6180 static int stmmac_set_features(struct net_device *netdev,
6181 netdev_features_t features)
6182 {
6183 struct stmmac_priv *priv = netdev_priv(netdev);
6184
6185 /* Keep the COE Type in case of csum is supporting */
6186 if (features & NETIF_F_RXCSUM)
6187 priv->hw->rx_csum = priv->plat->rx_coe;
6188 else
6189 priv->hw->rx_csum = 0;
6190 /* No check needed because rx_coe has been set before and it will be
6191 * fixed in case of issue.
6192 */
6193 stmmac_rx_ipc(priv, priv->hw);
6194
6195 if (priv->sph_capable) {
6196 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
6197 u8 chan;
6198
6199 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6200 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6201 }
6202
6203 stmmac_set_gso_types(priv, features & NETIF_F_TSO);
6204
6205 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6206 priv->hw->hw_vlan_en = true;
6207 else
6208 priv->hw->hw_vlan_en = false;
6209
6210 phylink_rx_clk_stop_block(priv->phylink);
6211 stmmac_set_hw_vlan_mode(priv, priv->hw);
6212 phylink_rx_clk_stop_unblock(priv->phylink);
6213
6214 return 0;
6215 }
6216
stmmac_common_interrupt(struct stmmac_priv * priv)6217 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6218 {
6219 u8 rx_cnt = priv->plat->rx_queues_to_use;
6220 u8 tx_cnt = priv->plat->tx_queues_to_use;
6221 u8 queues_count;
6222 bool xmac;
6223 u8 queue;
6224
6225 xmac = dwmac_is_xmac(priv->plat->core_type);
6226 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6227
6228 if (priv->irq_wake)
6229 pm_wakeup_event(priv->device, 0);
6230
6231 if (priv->dma_cap.estsel)
6232 stmmac_est_irq_status(priv, priv, priv->dev,
6233 &priv->xstats, tx_cnt);
6234
6235 if (stmmac_fpe_supported(priv))
6236 stmmac_fpe_irq_status(priv);
6237
6238 /* To handle GMAC own interrupts */
6239 if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
6240 int status = stmmac_host_irq_status(priv, &priv->xstats);
6241
6242 if (unlikely(status)) {
6243 /* For LPI we need to save the tx status */
6244 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6245 priv->tx_path_in_lpi_mode = true;
6246 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6247 priv->tx_path_in_lpi_mode = false;
6248 }
6249
6250 for (queue = 0; queue < queues_count; queue++)
6251 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6252
6253 stmmac_timestamp_interrupt(priv, priv);
6254 }
6255 }
6256
6257 /**
6258 * stmmac_interrupt - main ISR
6259 * @irq: interrupt number.
6260 * @dev_id: to pass the net device pointer.
6261 * Description: this is the main driver interrupt service routine.
6262 * It can call:
6263 * o DMA service routine (to manage incoming frame reception and transmission
6264 * status)
6265 * o Core interrupts to manage: remote wake-up, management counter, LPI
6266 * interrupts.
6267 */
stmmac_interrupt(int irq,void * dev_id)6268 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6269 {
6270 struct net_device *dev = (struct net_device *)dev_id;
6271 struct stmmac_priv *priv = netdev_priv(dev);
6272
6273 /* Check if adapter is up */
6274 if (test_bit(STMMAC_DOWN, &priv->state))
6275 return IRQ_HANDLED;
6276
6277 /* Check ASP error if it isn't delivered via an individual IRQ */
6278 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6279 return IRQ_HANDLED;
6280
6281 /* To handle Common interrupts */
6282 stmmac_common_interrupt(priv);
6283
6284 /* To handle DMA interrupts */
6285 stmmac_dma_interrupt(priv);
6286
6287 return IRQ_HANDLED;
6288 }
6289
stmmac_mac_interrupt(int irq,void * dev_id)6290 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6291 {
6292 struct net_device *dev = (struct net_device *)dev_id;
6293 struct stmmac_priv *priv = netdev_priv(dev);
6294
6295 /* Check if adapter is up */
6296 if (test_bit(STMMAC_DOWN, &priv->state))
6297 return IRQ_HANDLED;
6298
6299 /* To handle Common interrupts */
6300 stmmac_common_interrupt(priv);
6301
6302 return IRQ_HANDLED;
6303 }
6304
stmmac_safety_interrupt(int irq,void * dev_id)6305 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6306 {
6307 struct net_device *dev = (struct net_device *)dev_id;
6308 struct stmmac_priv *priv = netdev_priv(dev);
6309
6310 /* Check if adapter is up */
6311 if (test_bit(STMMAC_DOWN, &priv->state))
6312 return IRQ_HANDLED;
6313
6314 /* Check if a fatal error happened */
6315 stmmac_safety_feat_interrupt(priv);
6316
6317 return IRQ_HANDLED;
6318 }
6319
stmmac_msi_intr_tx(int irq,void * data)6320 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6321 {
6322 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6323 struct stmmac_dma_conf *dma_conf;
6324 int chan = tx_q->queue_index;
6325 struct stmmac_priv *priv;
6326 int status;
6327
6328 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6329 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6330
6331 /* Check if adapter is up */
6332 if (test_bit(STMMAC_DOWN, &priv->state))
6333 return IRQ_HANDLED;
6334
6335 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6336
6337 if (unlikely(status & tx_hard_error_bump_tc)) {
6338 /* Try to bump up the dma threshold on this failure */
6339 stmmac_bump_dma_threshold(priv, chan);
6340 } else if (unlikely(status == tx_hard_error)) {
6341 stmmac_tx_err(priv, chan);
6342 }
6343
6344 return IRQ_HANDLED;
6345 }
6346
stmmac_msi_intr_rx(int irq,void * data)6347 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6348 {
6349 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6350 struct stmmac_dma_conf *dma_conf;
6351 int chan = rx_q->queue_index;
6352 struct stmmac_priv *priv;
6353
6354 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6355 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6356
6357 /* Check if adapter is up */
6358 if (test_bit(STMMAC_DOWN, &priv->state))
6359 return IRQ_HANDLED;
6360
6361 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6362
6363 return IRQ_HANDLED;
6364 }
6365
6366 /**
6367 * stmmac_ioctl - Entry point for the Ioctl
6368 * @dev: Device pointer.
6369 * @rq: An IOCTL specific structure, that can contain a pointer to
6370 * a proprietary structure used to pass information to the driver.
6371 * @cmd: IOCTL command
6372 * Description:
6373 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6374 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6375 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6376 {
6377 struct stmmac_priv *priv = netdev_priv (dev);
6378 int ret = -EOPNOTSUPP;
6379
6380 if (!netif_running(dev))
6381 return -EINVAL;
6382
6383 switch (cmd) {
6384 case SIOCGMIIPHY:
6385 case SIOCGMIIREG:
6386 case SIOCSMIIREG:
6387 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6388 break;
6389 default:
6390 break;
6391 }
6392
6393 return ret;
6394 }
6395
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6396 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6397 void *cb_priv)
6398 {
6399 struct stmmac_priv *priv = cb_priv;
6400 int ret = -EOPNOTSUPP;
6401
6402 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6403 return ret;
6404
6405 __stmmac_disable_all_queues(priv);
6406
6407 switch (type) {
6408 case TC_SETUP_CLSU32:
6409 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6410 break;
6411 case TC_SETUP_CLSFLOWER:
6412 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6413 break;
6414 default:
6415 break;
6416 }
6417
6418 stmmac_enable_all_queues(priv);
6419 return ret;
6420 }
6421
6422 static LIST_HEAD(stmmac_block_cb_list);
6423
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6424 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6425 void *type_data)
6426 {
6427 struct stmmac_priv *priv = netdev_priv(ndev);
6428
6429 switch (type) {
6430 case TC_QUERY_CAPS:
6431 return stmmac_tc_query_caps(priv, priv, type_data);
6432 case TC_SETUP_QDISC_MQPRIO:
6433 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6434 case TC_SETUP_BLOCK:
6435 return flow_block_cb_setup_simple(type_data,
6436 &stmmac_block_cb_list,
6437 stmmac_setup_tc_block_cb,
6438 priv, priv, true);
6439 case TC_SETUP_QDISC_CBS:
6440 return stmmac_tc_setup_cbs(priv, priv, type_data);
6441 case TC_SETUP_QDISC_TAPRIO:
6442 return stmmac_tc_setup_taprio(priv, priv, type_data);
6443 case TC_SETUP_QDISC_ETF:
6444 return stmmac_tc_setup_etf(priv, priv, type_data);
6445 default:
6446 return -EOPNOTSUPP;
6447 }
6448 }
6449
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6450 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6451 struct net_device *sb_dev)
6452 {
6453 int gso = skb_shinfo(skb)->gso_type;
6454
6455 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6456 /*
6457 * There is no way to determine the number of TSO/USO
6458 * capable Queues. Let's use always the Queue 0
6459 * because if TSO/USO is supported then at least this
6460 * one will be capable.
6461 */
6462 return 0;
6463 }
6464
6465 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6466 }
6467
stmmac_set_mac_address(struct net_device * ndev,void * addr)6468 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6469 {
6470 struct stmmac_priv *priv = netdev_priv(ndev);
6471 int ret = 0;
6472
6473 ret = pm_runtime_resume_and_get(priv->device);
6474 if (ret < 0)
6475 return ret;
6476
6477 ret = eth_mac_addr(ndev, addr);
6478 if (ret)
6479 goto set_mac_error;
6480
6481 phylink_rx_clk_stop_block(priv->phylink);
6482 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6483 phylink_rx_clk_stop_unblock(priv->phylink);
6484
6485 set_mac_error:
6486 pm_runtime_put(priv->device);
6487
6488 return ret;
6489 }
6490
6491 #ifdef CONFIG_DEBUG_FS
6492 static struct dentry *stmmac_fs_dir;
6493
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6494 static void sysfs_display_ring(void *head, int size, int extend_desc,
6495 struct seq_file *seq, dma_addr_t dma_phy_addr)
6496 {
6497 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6498 struct dma_desc *p = (struct dma_desc *)head;
6499 unsigned int desc_size;
6500 dma_addr_t dma_addr;
6501 int i;
6502
6503 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6504 for (i = 0; i < size; i++) {
6505 dma_addr = dma_phy_addr + i * desc_size;
6506 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6507 i, &dma_addr,
6508 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6509 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6510 if (extend_desc)
6511 p = &(++ep)->basic;
6512 else
6513 p++;
6514 }
6515 }
6516
stmmac_rings_status_show(struct seq_file * seq,void * v)6517 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6518 {
6519 struct net_device *dev = seq->private;
6520 struct stmmac_priv *priv = netdev_priv(dev);
6521 u8 rx_count = priv->plat->rx_queues_to_use;
6522 u8 tx_count = priv->plat->tx_queues_to_use;
6523 u8 queue;
6524
6525 if ((dev->flags & IFF_UP) == 0)
6526 return 0;
6527
6528 for (queue = 0; queue < rx_count; queue++) {
6529 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6530
6531 seq_printf(seq, "RX Queue %d:\n", queue);
6532
6533 if (priv->extend_desc) {
6534 seq_printf(seq, "Extended descriptor ring:\n");
6535 sysfs_display_ring((void *)rx_q->dma_erx,
6536 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6537 } else {
6538 seq_printf(seq, "Descriptor ring:\n");
6539 sysfs_display_ring((void *)rx_q->dma_rx,
6540 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6541 }
6542 }
6543
6544 for (queue = 0; queue < tx_count; queue++) {
6545 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6546
6547 seq_printf(seq, "TX Queue %d:\n", queue);
6548
6549 if (priv->extend_desc) {
6550 seq_printf(seq, "Extended descriptor ring:\n");
6551 sysfs_display_ring((void *)tx_q->dma_etx,
6552 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6553 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6554 seq_printf(seq, "Descriptor ring:\n");
6555 sysfs_display_ring((void *)tx_q->dma_tx,
6556 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6557 }
6558 }
6559
6560 return 0;
6561 }
6562 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6563
stmmac_dma_cap_show(struct seq_file * seq,void * v)6564 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6565 {
6566 static const char * const dwxgmac_timestamp_source[] = {
6567 "None",
6568 "Internal",
6569 "External",
6570 "Both",
6571 };
6572 static const char * const dwxgmac_safety_feature_desc[] = {
6573 "No",
6574 "All Safety Features with ECC and Parity",
6575 "All Safety Features without ECC or Parity",
6576 "All Safety Features with Parity Only",
6577 "ECC Only",
6578 "UNDEFINED",
6579 "UNDEFINED",
6580 "UNDEFINED",
6581 };
6582 struct net_device *dev = seq->private;
6583 struct stmmac_priv *priv = netdev_priv(dev);
6584
6585 if (!priv->hw_cap_support) {
6586 seq_printf(seq, "DMA HW features not supported\n");
6587 return 0;
6588 }
6589
6590 seq_printf(seq, "==============================\n");
6591 seq_printf(seq, "\tDMA HW features\n");
6592 seq_printf(seq, "==============================\n");
6593
6594 seq_printf(seq, "\t10/100 Mbps: %s\n",
6595 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6596 seq_printf(seq, "\t1000 Mbps: %s\n",
6597 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6598 seq_printf(seq, "\tHalf duplex: %s\n",
6599 (priv->dma_cap.half_duplex) ? "Y" : "N");
6600 if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
6601 seq_printf(seq,
6602 "\tNumber of Additional MAC address registers: %d\n",
6603 priv->dma_cap.multi_addr);
6604 } else {
6605 seq_printf(seq, "\tHash Filter: %s\n",
6606 (priv->dma_cap.hash_filter) ? "Y" : "N");
6607 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6608 (priv->dma_cap.multi_addr) ? "Y" : "N");
6609 }
6610 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6611 (priv->dma_cap.pcs) ? "Y" : "N");
6612 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6613 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6614 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6615 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6616 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6617 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6618 seq_printf(seq, "\tRMON module: %s\n",
6619 (priv->dma_cap.rmon) ? "Y" : "N");
6620 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6621 (priv->dma_cap.time_stamp) ? "Y" : "N");
6622 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6623 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6624 if (priv->plat->core_type == DWMAC_CORE_XGMAC)
6625 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6626 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6627 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6628 (priv->dma_cap.eee) ? "Y" : "N");
6629 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6630 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6631 (priv->dma_cap.tx_coe) ? "Y" : "N");
6632 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6633 priv->plat->core_type == DWMAC_CORE_XGMAC) {
6634 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6635 (priv->dma_cap.rx_coe) ? "Y" : "N");
6636 } else {
6637 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6638 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6639 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6640 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6641 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6642 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6643 }
6644 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6645 priv->dma_cap.number_rx_channel);
6646 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6647 priv->dma_cap.number_tx_channel);
6648 seq_printf(seq, "\tNumber of Additional RX queues: %u\n",
6649 priv->dma_cap.number_rx_queues);
6650 seq_printf(seq, "\tNumber of Additional TX queues: %u\n",
6651 priv->dma_cap.number_tx_queues);
6652 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6653 (priv->dma_cap.enh_desc) ? "Y" : "N");
6654 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6655 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6656 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6657 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6658 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6659 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6660 priv->dma_cap.pps_out_num);
6661 seq_printf(seq, "\tSafety Features: %s\n",
6662 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6663 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6664 priv->dma_cap.frpsel ? "Y" : "N");
6665 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6666 priv->dma_cap.host_dma_width);
6667 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6668 priv->dma_cap.rssen ? "Y" : "N");
6669 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6670 priv->dma_cap.vlhash ? "Y" : "N");
6671 seq_printf(seq, "\tSplit Header: %s\n",
6672 priv->dma_cap.sphen ? "Y" : "N");
6673 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6674 priv->dma_cap.vlins ? "Y" : "N");
6675 seq_printf(seq, "\tDouble VLAN: %s\n",
6676 priv->dma_cap.dvlan ? "Y" : "N");
6677 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6678 priv->dma_cap.l3l4fnum);
6679 seq_printf(seq, "\tARP Offloading: %s\n",
6680 priv->dma_cap.arpoffsel ? "Y" : "N");
6681 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6682 priv->dma_cap.estsel ? "Y" : "N");
6683 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6684 priv->dma_cap.fpesel ? "Y" : "N");
6685 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6686 priv->dma_cap.tbssel ? "Y" : "N");
6687 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6688 priv->dma_cap.tbs_ch_num);
6689 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6690 priv->dma_cap.sgfsel ? "Y" : "N");
6691 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6692 BIT(priv->dma_cap.ttsfd) >> 1);
6693 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6694 priv->dma_cap.numtc);
6695 seq_printf(seq, "\tDCB Feature: %s\n",
6696 priv->dma_cap.dcben ? "Y" : "N");
6697 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6698 priv->dma_cap.advthword ? "Y" : "N");
6699 seq_printf(seq, "\tPTP Offload: %s\n",
6700 priv->dma_cap.ptoen ? "Y" : "N");
6701 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6702 priv->dma_cap.osten ? "Y" : "N");
6703 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6704 priv->dma_cap.pfcen ? "Y" : "N");
6705 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6706 BIT(priv->dma_cap.frpes) << 6);
6707 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6708 BIT(priv->dma_cap.frpbs) << 6);
6709 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6710 priv->dma_cap.frppipe_num);
6711 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6712 priv->dma_cap.nrvf_num ?
6713 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6714 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6715 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6716 seq_printf(seq, "\tDepth of GCL: %lu\n",
6717 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6718 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6719 priv->dma_cap.cbtisel ? "Y" : "N");
6720 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6721 priv->dma_cap.aux_snapshot_n);
6722 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6723 priv->dma_cap.pou_ost_en ? "Y" : "N");
6724 seq_printf(seq, "\tEnhanced DMA: %s\n",
6725 priv->dma_cap.edma ? "Y" : "N");
6726 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6727 priv->dma_cap.ediffc ? "Y" : "N");
6728 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6729 priv->dma_cap.vxn ? "Y" : "N");
6730 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6731 priv->dma_cap.dbgmem ? "Y" : "N");
6732 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6733 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6734 return 0;
6735 }
6736 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6737
6738 /* Use network device events to rename debugfs file entries.
6739 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6740 static int stmmac_device_event(struct notifier_block *unused,
6741 unsigned long event, void *ptr)
6742 {
6743 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6744 struct stmmac_priv *priv = netdev_priv(dev);
6745
6746 if (dev->netdev_ops != &stmmac_netdev_ops)
6747 goto done;
6748
6749 switch (event) {
6750 case NETDEV_CHANGENAME:
6751 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6752 break;
6753 }
6754 done:
6755 return NOTIFY_DONE;
6756 }
6757
6758 static struct notifier_block stmmac_notifier = {
6759 .notifier_call = stmmac_device_event,
6760 };
6761
stmmac_init_fs(struct net_device * dev)6762 static void stmmac_init_fs(struct net_device *dev)
6763 {
6764 struct stmmac_priv *priv = netdev_priv(dev);
6765
6766 rtnl_lock();
6767
6768 /* Create per netdev entries */
6769 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6770
6771 /* Entry to report DMA RX/TX rings */
6772 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6773 &stmmac_rings_status_fops);
6774
6775 /* Entry to report the DMA HW features */
6776 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6777 &stmmac_dma_cap_fops);
6778
6779 rtnl_unlock();
6780 }
6781
stmmac_exit_fs(struct net_device * dev)6782 static void stmmac_exit_fs(struct net_device *dev)
6783 {
6784 struct stmmac_priv *priv = netdev_priv(dev);
6785
6786 debugfs_remove_recursive(priv->dbgfs_dir);
6787 }
6788 #endif /* CONFIG_DEBUG_FS */
6789
stmmac_vid_crc32_le(__le16 vid_le)6790 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6791 {
6792 unsigned char *data = (unsigned char *)&vid_le;
6793 unsigned char data_byte = 0;
6794 u32 crc = ~0x0;
6795 u32 temp = 0;
6796 int i, bits;
6797
6798 bits = get_bitmask_order(VLAN_VID_MASK);
6799 for (i = 0; i < bits; i++) {
6800 if ((i % 8) == 0)
6801 data_byte = data[i / 8];
6802
6803 temp = ((crc & 1) ^ data_byte) & 1;
6804 crc >>= 1;
6805 data_byte >>= 1;
6806
6807 if (temp)
6808 crc ^= 0xedb88320;
6809 }
6810
6811 return crc;
6812 }
6813
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6814 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6815 {
6816 u32 crc, hash = 0;
6817 u16 pmatch = 0;
6818 int count = 0;
6819 u16 vid = 0;
6820
6821 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6822 __le16 vid_le = cpu_to_le16(vid);
6823 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6824 hash |= (1 << crc);
6825 count++;
6826 }
6827
6828 if (!priv->dma_cap.vlhash) {
6829 if (count > 2) /* VID = 0 always passes filter */
6830 return -EOPNOTSUPP;
6831
6832 pmatch = vid;
6833 hash = 0;
6834 }
6835
6836 if (!netif_running(priv->dev))
6837 return 0;
6838
6839 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6840 }
6841
6842 /* FIXME: This may need RXC to be running, but it may be called with BH
6843 * disabled, which means we can't call phylink_rx_clk_stop*().
6844 */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6845 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6846 {
6847 struct stmmac_priv *priv = netdev_priv(ndev);
6848 unsigned int num_double_vlans;
6849 bool is_double = false;
6850 int ret;
6851
6852 ret = pm_runtime_resume_and_get(priv->device);
6853 if (ret < 0)
6854 return ret;
6855
6856 if (be16_to_cpu(proto) == ETH_P_8021AD)
6857 is_double = true;
6858
6859 set_bit(vid, priv->active_vlans);
6860 num_double_vlans = priv->num_double_vlans + is_double;
6861 ret = stmmac_vlan_update(priv, num_double_vlans);
6862 if (ret) {
6863 clear_bit(vid, priv->active_vlans);
6864 goto err_pm_put;
6865 }
6866
6867 if (priv->hw->num_vlan) {
6868 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6869 if (ret) {
6870 clear_bit(vid, priv->active_vlans);
6871 stmmac_vlan_update(priv, priv->num_double_vlans);
6872 goto err_pm_put;
6873 }
6874 }
6875
6876 priv->num_double_vlans = num_double_vlans;
6877
6878 err_pm_put:
6879 pm_runtime_put(priv->device);
6880
6881 return ret;
6882 }
6883
6884 /* FIXME: This may need RXC to be running, but it may be called with BH
6885 * disabled, which means we can't call phylink_rx_clk_stop*().
6886 */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6887 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6888 {
6889 struct stmmac_priv *priv = netdev_priv(ndev);
6890 unsigned int num_double_vlans;
6891 bool is_double = false;
6892 int ret;
6893
6894 ret = pm_runtime_resume_and_get(priv->device);
6895 if (ret < 0)
6896 return ret;
6897
6898 if (be16_to_cpu(proto) == ETH_P_8021AD)
6899 is_double = true;
6900
6901 clear_bit(vid, priv->active_vlans);
6902 num_double_vlans = priv->num_double_vlans - is_double;
6903 ret = stmmac_vlan_update(priv, num_double_vlans);
6904 if (ret) {
6905 set_bit(vid, priv->active_vlans);
6906 goto del_vlan_error;
6907 }
6908
6909 if (priv->hw->num_vlan) {
6910 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6911 if (ret) {
6912 set_bit(vid, priv->active_vlans);
6913 stmmac_vlan_update(priv, priv->num_double_vlans);
6914 goto del_vlan_error;
6915 }
6916 }
6917
6918 priv->num_double_vlans = num_double_vlans;
6919
6920 del_vlan_error:
6921 pm_runtime_put(priv->device);
6922
6923 return ret;
6924 }
6925
stmmac_vlan_restore(struct stmmac_priv * priv)6926 static void stmmac_vlan_restore(struct stmmac_priv *priv)
6927 {
6928 if (!(priv->dev->features & NETIF_F_VLAN_FEATURES))
6929 return;
6930
6931 if (priv->hw->num_vlan)
6932 stmmac_restore_hw_vlan_rx_fltr(priv, priv->dev, priv->hw);
6933
6934 stmmac_vlan_update(priv, priv->num_double_vlans);
6935 }
6936
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6937 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6938 {
6939 struct stmmac_priv *priv = netdev_priv(dev);
6940
6941 switch (bpf->command) {
6942 case XDP_SETUP_PROG:
6943 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6944 case XDP_SETUP_XSK_POOL:
6945 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6946 bpf->xsk.queue_id);
6947 default:
6948 return -EOPNOTSUPP;
6949 }
6950 }
6951
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6952 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6953 struct xdp_frame **frames, u32 flags)
6954 {
6955 struct stmmac_priv *priv = netdev_priv(dev);
6956 int cpu = smp_processor_id();
6957 struct netdev_queue *nq;
6958 int i, nxmit = 0;
6959 int queue;
6960
6961 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6962 return -ENETDOWN;
6963
6964 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6965 return -EINVAL;
6966
6967 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6968 nq = netdev_get_tx_queue(priv->dev, queue);
6969
6970 __netif_tx_lock(nq, cpu);
6971 /* Avoids TX time-out as we are sharing with slow path */
6972 txq_trans_cond_update(nq);
6973
6974 for (i = 0; i < num_frames; i++) {
6975 int res;
6976
6977 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6978 if (res == STMMAC_XDP_CONSUMED)
6979 break;
6980
6981 nxmit++;
6982 }
6983
6984 if (flags & XDP_XMIT_FLUSH) {
6985 stmmac_flush_tx_descriptors(priv, queue);
6986 stmmac_tx_timer_arm(priv, queue);
6987 }
6988
6989 __netif_tx_unlock(nq);
6990
6991 return nxmit;
6992 }
6993
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6994 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6995 {
6996 struct stmmac_channel *ch = &priv->channel[queue];
6997 unsigned long flags;
6998
6999 spin_lock_irqsave(&ch->lock, flags);
7000 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
7001 spin_unlock_irqrestore(&ch->lock, flags);
7002
7003 stmmac_stop_rx_dma(priv, queue);
7004 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
7005 }
7006
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)7007 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
7008 {
7009 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7010 struct stmmac_channel *ch = &priv->channel[queue];
7011 unsigned long flags;
7012 int ret;
7013
7014 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
7015 if (ret) {
7016 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
7017 return;
7018 }
7019
7020 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
7021 if (ret) {
7022 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
7023 netdev_err(priv->dev, "Failed to init RX desc.\n");
7024 return;
7025 }
7026
7027 stmmac_reset_rx_queue(priv, queue);
7028 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
7029
7030 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7031 rx_q->dma_rx_phy, queue);
7032
7033 stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->buf_alloc_num);
7034
7035 stmmac_set_queue_rx_buf_size(priv, rx_q, queue);
7036
7037 stmmac_start_rx_dma(priv, queue);
7038
7039 spin_lock_irqsave(&ch->lock, flags);
7040 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
7041 spin_unlock_irqrestore(&ch->lock, flags);
7042 }
7043
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)7044 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
7045 {
7046 struct stmmac_channel *ch = &priv->channel[queue];
7047 unsigned long flags;
7048
7049 spin_lock_irqsave(&ch->lock, flags);
7050 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
7051 spin_unlock_irqrestore(&ch->lock, flags);
7052
7053 stmmac_stop_tx_dma(priv, queue);
7054 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7055 }
7056
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)7057 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
7058 {
7059 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7060 struct stmmac_channel *ch = &priv->channel[queue];
7061 unsigned long flags;
7062 int ret;
7063
7064 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7065 if (ret) {
7066 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
7067 return;
7068 }
7069
7070 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
7071 if (ret) {
7072 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7073 netdev_err(priv->dev, "Failed to init TX desc.\n");
7074 return;
7075 }
7076
7077 stmmac_reset_tx_queue(priv, queue);
7078 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
7079
7080 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7081 tx_q->dma_tx_phy, queue);
7082
7083 if (tx_q->tbs & STMMAC_TBS_AVAIL)
7084 stmmac_enable_tbs(priv, priv->ioaddr, 1, queue);
7085
7086 stmmac_set_queue_tx_tail_ptr(priv, tx_q, queue, 0);
7087
7088 stmmac_start_tx_dma(priv, queue);
7089
7090 spin_lock_irqsave(&ch->lock, flags);
7091 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
7092 spin_unlock_irqrestore(&ch->lock, flags);
7093 }
7094
stmmac_xdp_release(struct net_device * dev)7095 void stmmac_xdp_release(struct net_device *dev)
7096 {
7097 struct stmmac_priv *priv = netdev_priv(dev);
7098 u8 chan;
7099
7100 /* Ensure tx function is not running */
7101 netif_tx_disable(dev);
7102
7103 /* Disable NAPI process */
7104 stmmac_disable_all_queues(priv);
7105
7106 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7107 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7108
7109 /* Free the IRQ lines */
7110 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
7111
7112 /* Stop TX/RX DMA channels */
7113 stmmac_stop_all_dma(priv);
7114
7115 /* Release and free the Rx/Tx resources */
7116 free_dma_desc_resources(priv, &priv->dma_conf);
7117
7118 /* Disable the MAC Rx/Tx */
7119 stmmac_mac_set(priv, priv->ioaddr, false);
7120
7121 /* set trans_start so we don't get spurious
7122 * watchdogs during reset
7123 */
7124 netif_trans_update(dev);
7125 netif_carrier_off(dev);
7126 }
7127
stmmac_xdp_open(struct net_device * dev)7128 int stmmac_xdp_open(struct net_device *dev)
7129 {
7130 struct stmmac_priv *priv = netdev_priv(dev);
7131 u8 rx_cnt = priv->plat->rx_queues_to_use;
7132 u8 tx_cnt = priv->plat->tx_queues_to_use;
7133 u8 dma_csr_ch = max(rx_cnt, tx_cnt);
7134 struct stmmac_rx_queue *rx_q;
7135 struct stmmac_tx_queue *tx_q;
7136 bool sph_en;
7137 u8 chan;
7138 int ret;
7139
7140 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
7141 if (ret < 0) {
7142 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
7143 __func__);
7144 goto dma_desc_error;
7145 }
7146
7147 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
7148 if (ret < 0) {
7149 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
7150 __func__);
7151 goto init_error;
7152 }
7153
7154 stmmac_reset_queues_param(priv);
7155
7156 /* DMA CSR Channel configuration */
7157 for (chan = 0; chan < dma_csr_ch; chan++) {
7158 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
7159 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
7160 }
7161
7162 /* Adjust Split header */
7163 sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
7164
7165 /* DMA RX Channel Configuration */
7166 for (chan = 0; chan < rx_cnt; chan++) {
7167 rx_q = &priv->dma_conf.rx_queue[chan];
7168
7169 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7170 rx_q->dma_rx_phy, chan);
7171
7172 stmmac_set_queue_rx_tail_ptr(priv, rx_q, chan,
7173 rx_q->buf_alloc_num);
7174
7175 stmmac_set_queue_rx_buf_size(priv, rx_q, chan);
7176
7177 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7178 }
7179
7180 /* DMA TX Channel Configuration */
7181 for (chan = 0; chan < tx_cnt; chan++) {
7182 tx_q = &priv->dma_conf.tx_queue[chan];
7183
7184 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7185 tx_q->dma_tx_phy, chan);
7186
7187 stmmac_set_queue_tx_tail_ptr(priv, tx_q, chan, 0);
7188
7189 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7190 }
7191
7192 /* Enable the MAC Rx/Tx */
7193 stmmac_mac_set(priv, priv->ioaddr, true);
7194
7195 /* Start Rx & Tx DMA Channels */
7196 stmmac_start_all_dma(priv);
7197
7198 ret = stmmac_request_irq(dev);
7199 if (ret)
7200 goto irq_error;
7201
7202 /* Enable NAPI process*/
7203 stmmac_enable_all_queues(priv);
7204 netif_carrier_on(dev);
7205 netif_tx_start_all_queues(dev);
7206 stmmac_enable_all_dma_irq(priv);
7207
7208 return 0;
7209
7210 irq_error:
7211 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7212 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7213
7214 init_error:
7215 free_dma_desc_resources(priv, &priv->dma_conf);
7216 dma_desc_error:
7217 return ret;
7218 }
7219
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7220 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7221 {
7222 struct stmmac_priv *priv = netdev_priv(dev);
7223 struct stmmac_rx_queue *rx_q;
7224 struct stmmac_tx_queue *tx_q;
7225 struct stmmac_channel *ch;
7226
7227 if (test_bit(STMMAC_DOWN, &priv->state) ||
7228 !netif_carrier_ok(priv->dev))
7229 return -ENETDOWN;
7230
7231 if (!stmmac_xdp_is_enabled(priv))
7232 return -EINVAL;
7233
7234 if (queue >= priv->plat->rx_queues_to_use ||
7235 queue >= priv->plat->tx_queues_to_use)
7236 return -EINVAL;
7237
7238 rx_q = &priv->dma_conf.rx_queue[queue];
7239 tx_q = &priv->dma_conf.tx_queue[queue];
7240 ch = &priv->channel[queue];
7241
7242 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7243 return -EINVAL;
7244
7245 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7246 /* EQoS does not have per-DMA channel SW interrupt,
7247 * so we schedule RX Napi straight-away.
7248 */
7249 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7250 __napi_schedule(&ch->rxtx_napi);
7251 }
7252
7253 return 0;
7254 }
7255
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7256 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7257 {
7258 struct stmmac_priv *priv = netdev_priv(dev);
7259 u8 tx_cnt = priv->plat->tx_queues_to_use;
7260 u8 rx_cnt = priv->plat->rx_queues_to_use;
7261 unsigned int start;
7262 u8 q;
7263
7264 for (q = 0; q < tx_cnt; q++) {
7265 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7266 u64 tx_packets;
7267 u64 tx_bytes;
7268
7269 do {
7270 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7271 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7272 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7273 do {
7274 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7275 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7276 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7277
7278 stats->tx_packets += tx_packets;
7279 stats->tx_bytes += tx_bytes;
7280 }
7281
7282 for (q = 0; q < rx_cnt; q++) {
7283 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7284 u64 rx_packets;
7285 u64 rx_bytes;
7286
7287 do {
7288 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7289 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7290 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7291 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7292
7293 stats->rx_packets += rx_packets;
7294 stats->rx_bytes += rx_bytes;
7295 }
7296
7297 stats->rx_dropped = priv->xstats.rx_dropped;
7298 stats->rx_errors = priv->xstats.rx_errors;
7299 stats->tx_dropped = priv->xstats.tx_dropped;
7300 stats->tx_errors = priv->xstats.tx_errors;
7301 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7302 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7303 stats->rx_length_errors = priv->xstats.rx_length;
7304 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7305 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7306 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7307 }
7308
7309 static const struct net_device_ops stmmac_netdev_ops = {
7310 .ndo_open = stmmac_open,
7311 .ndo_start_xmit = stmmac_xmit,
7312 .ndo_features_check = stmmac_features_check,
7313 .ndo_stop = stmmac_release,
7314 .ndo_change_mtu = stmmac_change_mtu,
7315 .ndo_fix_features = stmmac_fix_features,
7316 .ndo_set_features = stmmac_set_features,
7317 .ndo_set_rx_mode = stmmac_set_rx_mode,
7318 .ndo_tx_timeout = stmmac_tx_timeout,
7319 .ndo_eth_ioctl = stmmac_ioctl,
7320 .ndo_get_stats64 = stmmac_get_stats64,
7321 .ndo_setup_tc = stmmac_setup_tc,
7322 .ndo_select_queue = stmmac_select_queue,
7323 .ndo_set_mac_address = stmmac_set_mac_address,
7324 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7325 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7326 .ndo_bpf = stmmac_bpf,
7327 .ndo_xdp_xmit = stmmac_xdp_xmit,
7328 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7329 .ndo_hwtstamp_get = stmmac_hwtstamp_get,
7330 .ndo_hwtstamp_set = stmmac_hwtstamp_set,
7331 };
7332
stmmac_reset_subtask(struct stmmac_priv * priv)7333 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7334 {
7335 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7336 return;
7337 if (test_bit(STMMAC_DOWN, &priv->state))
7338 return;
7339
7340 netdev_err(priv->dev, "Reset adapter.\n");
7341
7342 rtnl_lock();
7343 netif_trans_update(priv->dev);
7344 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7345 usleep_range(1000, 2000);
7346
7347 set_bit(STMMAC_DOWN, &priv->state);
7348 dev_close(priv->dev);
7349 dev_open(priv->dev, NULL);
7350 clear_bit(STMMAC_DOWN, &priv->state);
7351 clear_bit(STMMAC_RESETING, &priv->state);
7352 rtnl_unlock();
7353 }
7354
stmmac_service_task(struct work_struct * work)7355 static void stmmac_service_task(struct work_struct *work)
7356 {
7357 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7358 service_task);
7359
7360 stmmac_reset_subtask(priv);
7361 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7362 }
7363
stmmac_print_actphyif(struct stmmac_priv * priv)7364 static void stmmac_print_actphyif(struct stmmac_priv *priv)
7365 {
7366 const char **phyif_table;
7367 const char *actphyif_str;
7368 size_t phyif_table_size;
7369
7370 switch (priv->plat->core_type) {
7371 case DWMAC_CORE_MAC100:
7372 return;
7373
7374 case DWMAC_CORE_GMAC:
7375 case DWMAC_CORE_GMAC4:
7376 phyif_table = stmmac_dwmac_actphyif;
7377 phyif_table_size = ARRAY_SIZE(stmmac_dwmac_actphyif);
7378 break;
7379
7380 case DWMAC_CORE_XGMAC:
7381 phyif_table = stmmac_dwxgmac_phyif;
7382 phyif_table_size = ARRAY_SIZE(stmmac_dwxgmac_phyif);
7383 break;
7384 }
7385
7386 if (priv->dma_cap.actphyif < phyif_table_size)
7387 actphyif_str = phyif_table[priv->dma_cap.actphyif];
7388 else
7389 actphyif_str = NULL;
7390
7391 if (!actphyif_str)
7392 actphyif_str = "unknown";
7393
7394 dev_info(priv->device, "Active PHY interface: %s (%u)\n",
7395 actphyif_str, priv->dma_cap.actphyif);
7396 }
7397
7398 /**
7399 * stmmac_hw_init - Init the MAC device
7400 * @priv: driver private structure
7401 * Description: this function is to configure the MAC device according to
7402 * some platform parameters or the HW capability register. It prepares the
7403 * driver to use either ring or chain modes and to setup either enhanced or
7404 * normal descriptors.
7405 */
stmmac_hw_init(struct stmmac_priv * priv)7406 static int stmmac_hw_init(struct stmmac_priv *priv)
7407 {
7408 int ret;
7409
7410 /* dwmac-sun8i only work in chain mode */
7411 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7412 chain_mode = 1;
7413 priv->chain_mode = !!chain_mode;
7414
7415 /* Initialize HW Interface */
7416 ret = stmmac_hwif_init(priv);
7417 if (ret)
7418 return ret;
7419
7420 /* Get the HW capability (new GMAC newer than 3.50a) */
7421 priv->hw_cap_support = stmmac_get_hw_features(priv);
7422 if (priv->hw_cap_support) {
7423 dev_info(priv->device, "DMA HW capability register supported\n");
7424
7425 /* We can override some gmac/dma configuration fields: e.g.
7426 * enh_desc, tx_coe (e.g. that are passed through the
7427 * platform) with the values from the HW capability
7428 * register (if supported).
7429 */
7430 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7431 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7432 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7433 if (priv->dma_cap.hash_tb_sz) {
7434 priv->hw->multicast_filter_bins =
7435 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7436 priv->hw->mcast_bits_log2 =
7437 ilog2(priv->hw->multicast_filter_bins);
7438 }
7439
7440 /* TXCOE doesn't work in thresh DMA mode */
7441 if (priv->plat->force_thresh_dma_mode)
7442 priv->plat->tx_coe = false;
7443 else
7444 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7445
7446 /* In case of GMAC4 rx_coe is from HW cap register. */
7447 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7448
7449 if (priv->dma_cap.rx_coe_type2)
7450 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7451 else if (priv->dma_cap.rx_coe_type1)
7452 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7453
7454 stmmac_print_actphyif(priv);
7455 } else {
7456 dev_info(priv->device, "No HW DMA feature register supported\n");
7457 }
7458
7459 if (priv->plat->rx_coe) {
7460 priv->hw->rx_csum = priv->plat->rx_coe;
7461 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7462 if (priv->synopsys_id < DWMAC_CORE_4_00)
7463 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7464 }
7465 if (priv->plat->tx_coe)
7466 dev_info(priv->device, "TX Checksum insertion supported\n");
7467
7468 if (priv->plat->pmt) {
7469 dev_info(priv->device, "Wake-Up On Lan supported\n");
7470 device_set_wakeup_capable(priv->device, 1);
7471 devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7472 }
7473
7474 if (priv->dma_cap.number_rx_queues &&
7475 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7476 dev_warn(priv->device,
7477 "Number of Rx queues (%u) exceeds dma capability\n",
7478 priv->plat->rx_queues_to_use);
7479 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7480 }
7481 if (priv->dma_cap.number_tx_queues &&
7482 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7483 dev_warn(priv->device,
7484 "Number of Tx queues (%u) exceeds dma capability\n",
7485 priv->plat->tx_queues_to_use);
7486 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7487 }
7488
7489 if (priv->dma_cap.rx_fifo_size &&
7490 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7491 dev_warn(priv->device,
7492 "Rx FIFO size (%u) exceeds dma capability\n",
7493 priv->plat->rx_fifo_size);
7494 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7495 }
7496 if (priv->dma_cap.tx_fifo_size &&
7497 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7498 dev_warn(priv->device,
7499 "Tx FIFO size (%u) exceeds dma capability\n",
7500 priv->plat->tx_fifo_size);
7501 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7502 }
7503
7504 priv->hw->vlan_fail_q_en =
7505 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7506 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7507
7508 /* Run HW quirks, if any */
7509 if (priv->hwif_quirks) {
7510 ret = priv->hwif_quirks(priv);
7511 if (ret)
7512 return ret;
7513 }
7514
7515 /* Set alternate descriptor size (which tells the hardware that
7516 * descriptors are 8 32-bit words) when using extended descriptors
7517 * with ring mode. Only applicable for pre-v4.0 cores. Platform glue
7518 * is not expected to change this.
7519 */
7520 priv->plat->dma_cfg->atds = priv->extend_desc &&
7521 priv->descriptor_mode == STMMAC_RING_MODE;
7522
7523 /* Rx Watchdog is available in the COREs newer than the 3.40.
7524 * In some case, for example on bugged HW this feature
7525 * has to be disable and this can be done by passing the
7526 * riwt_off field from the platform.
7527 */
7528 if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
7529 priv->plat->core_type == DWMAC_CORE_XGMAC) &&
7530 !priv->plat->riwt_off) {
7531 priv->use_riwt = 1;
7532 dev_info(priv->device,
7533 "Enable RX Mitigation via HW Watchdog Timer\n");
7534 }
7535
7536 /* Unimplemented PCS init (as indicated by stmmac_do_callback()
7537 * perversely returning -EINVAL) is non-fatal.
7538 */
7539 ret = stmmac_mac_pcs_init(priv);
7540 if (ret != -EINVAL)
7541 return ret;
7542
7543 return 0;
7544 }
7545
stmmac_napi_add(struct net_device * dev)7546 static void stmmac_napi_add(struct net_device *dev)
7547 {
7548 struct stmmac_priv *priv = netdev_priv(dev);
7549 u8 queue, maxq;
7550
7551 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7552
7553 for (queue = 0; queue < maxq; queue++) {
7554 struct stmmac_channel *ch = &priv->channel[queue];
7555
7556 ch->priv_data = priv;
7557 ch->index = queue;
7558 spin_lock_init(&ch->lock);
7559
7560 if (queue < priv->plat->rx_queues_to_use) {
7561 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7562 }
7563 if (queue < priv->plat->tx_queues_to_use) {
7564 netif_napi_add_tx(dev, &ch->tx_napi,
7565 stmmac_napi_poll_tx);
7566 }
7567 if (queue < priv->plat->rx_queues_to_use &&
7568 queue < priv->plat->tx_queues_to_use) {
7569 netif_napi_add(dev, &ch->rxtx_napi,
7570 stmmac_napi_poll_rxtx);
7571 }
7572 }
7573 }
7574
stmmac_napi_del(struct net_device * dev)7575 static void stmmac_napi_del(struct net_device *dev)
7576 {
7577 struct stmmac_priv *priv = netdev_priv(dev);
7578 u8 queue, maxq;
7579
7580 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7581
7582 for (queue = 0; queue < maxq; queue++) {
7583 struct stmmac_channel *ch = &priv->channel[queue];
7584
7585 if (queue < priv->plat->rx_queues_to_use)
7586 netif_napi_del(&ch->rx_napi);
7587 if (queue < priv->plat->tx_queues_to_use)
7588 netif_napi_del(&ch->tx_napi);
7589 if (queue < priv->plat->rx_queues_to_use &&
7590 queue < priv->plat->tx_queues_to_use) {
7591 netif_napi_del(&ch->rxtx_napi);
7592 }
7593 }
7594 }
7595
stmmac_reinit_queues(struct net_device * dev,u8 rx_cnt,u8 tx_cnt)7596 int stmmac_reinit_queues(struct net_device *dev, u8 rx_cnt, u8 tx_cnt)
7597 {
7598 struct stmmac_priv *priv = netdev_priv(dev);
7599 int ret = 0, i;
7600
7601 if (netif_running(dev))
7602 stmmac_release(dev);
7603
7604 stmmac_napi_del(dev);
7605
7606 priv->plat->rx_queues_to_use = rx_cnt;
7607 priv->plat->tx_queues_to_use = tx_cnt;
7608 if (!netif_is_rxfh_configured(dev))
7609 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7610 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7611 rx_cnt);
7612
7613 stmmac_napi_add(dev);
7614
7615 if (netif_running(dev))
7616 ret = stmmac_open(dev);
7617
7618 return ret;
7619 }
7620
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7621 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7622 {
7623 struct stmmac_priv *priv = netdev_priv(dev);
7624 int ret = 0;
7625
7626 if (netif_running(dev))
7627 stmmac_release(dev);
7628
7629 priv->dma_conf.dma_rx_size = rx_size;
7630 priv->dma_conf.dma_tx_size = tx_size;
7631
7632 if (netif_running(dev))
7633 ret = stmmac_open(dev);
7634
7635 return ret;
7636 }
7637
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7638 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7639 {
7640 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7641 struct dma_desc *desc_contains_ts = ctx->desc;
7642 struct stmmac_priv *priv = ctx->priv;
7643 struct dma_desc *ndesc = ctx->ndesc;
7644 struct dma_desc *desc = ctx->desc;
7645 u64 ns = 0;
7646
7647 if (!priv->hwts_rx_en)
7648 return -ENODATA;
7649
7650 /* For GMAC4, the valid timestamp is from CTX next desc. */
7651 if (dwmac_is_xmac(priv->plat->core_type))
7652 desc_contains_ts = ndesc;
7653
7654 /* Check if timestamp is available */
7655 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7656 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7657 ns -= priv->plat->cdc_error_adj;
7658 *timestamp = ns_to_ktime(ns);
7659 return 0;
7660 }
7661
7662 return -ENODATA;
7663 }
7664
7665 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7666 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7667 };
7668
stmmac_dl_ts_coarse_set(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7669 static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
7670 struct devlink_param_gset_ctx *ctx,
7671 struct netlink_ext_ack *extack)
7672 {
7673 struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7674 struct stmmac_priv *priv = dl_priv->stmmac_priv;
7675
7676 priv->tsfupdt_coarse = ctx->val.vbool;
7677
7678 if (priv->tsfupdt_coarse)
7679 priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
7680 else
7681 priv->systime_flags |= PTP_TCR_TSCFUPDT;
7682
7683 /* In Coarse mode, we can use a smaller subsecond increment, let's
7684 * reconfigure the systime, subsecond increment and addend.
7685 */
7686 stmmac_update_subsecond_increment(priv);
7687
7688 return 0;
7689 }
7690
stmmac_dl_ts_coarse_get(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7691 static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
7692 struct devlink_param_gset_ctx *ctx,
7693 struct netlink_ext_ack *extack)
7694 {
7695 struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7696 struct stmmac_priv *priv = dl_priv->stmmac_priv;
7697
7698 ctx->val.vbool = priv->tsfupdt_coarse;
7699
7700 return 0;
7701 }
7702
7703 static const struct devlink_param stmmac_devlink_params[] = {
7704 DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
7705 DEVLINK_PARAM_TYPE_BOOL,
7706 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
7707 stmmac_dl_ts_coarse_get,
7708 stmmac_dl_ts_coarse_set, NULL),
7709 };
7710
7711 /* None of the generic devlink parameters are implemented */
7712 static const struct devlink_ops stmmac_devlink_ops = {};
7713
stmmac_register_devlink(struct stmmac_priv * priv)7714 static int stmmac_register_devlink(struct stmmac_priv *priv)
7715 {
7716 struct stmmac_devlink_priv *dl_priv;
7717 int ret;
7718
7719 /* For now, what is exposed over devlink is only relevant when
7720 * timestamping is available and we have a valid ptp clock rate
7721 */
7722 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
7723 !priv->plat->clk_ptp_rate)
7724 return 0;
7725
7726 priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
7727 priv->device);
7728 if (!priv->devlink)
7729 return -ENOMEM;
7730
7731 dl_priv = devlink_priv(priv->devlink);
7732 dl_priv->stmmac_priv = priv;
7733
7734 ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
7735 ARRAY_SIZE(stmmac_devlink_params));
7736 if (ret)
7737 goto dl_free;
7738
7739 devlink_register(priv->devlink);
7740 return 0;
7741
7742 dl_free:
7743 devlink_free(priv->devlink);
7744
7745 return ret;
7746 }
7747
stmmac_unregister_devlink(struct stmmac_priv * priv)7748 static void stmmac_unregister_devlink(struct stmmac_priv *priv)
7749 {
7750 if (!priv->devlink)
7751 return;
7752
7753 devlink_unregister(priv->devlink);
7754 devlink_params_unregister(priv->devlink, stmmac_devlink_params,
7755 ARRAY_SIZE(stmmac_devlink_params));
7756 devlink_free(priv->devlink);
7757 }
7758
stmmac_plat_dat_alloc(struct device * dev)7759 struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
7760 {
7761 struct plat_stmmacenet_data *plat_dat;
7762 int i;
7763
7764 plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
7765 if (!plat_dat)
7766 return NULL;
7767
7768 plat_dat->dma_cfg = &plat_dat->__dma_cfg;
7769
7770 /* Set the defaults:
7771 * - phy autodetection
7772 * - determine GMII_Address CR field from CSR clock
7773 * - allow MTU up to JUMBO_LEN
7774 * - hash table size
7775 * - one unicast filter entry
7776 */
7777 plat_dat->phy_addr = -1;
7778 plat_dat->clk_csr = -1;
7779 plat_dat->maxmtu = JUMBO_LEN;
7780 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
7781 plat_dat->unicast_filter_entries = 1;
7782
7783 /* Set the mtl defaults */
7784 plat_dat->tx_queues_to_use = 1;
7785 plat_dat->rx_queues_to_use = 1;
7786
7787 /* Setup the default RX queue channel map */
7788 for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
7789 plat_dat->rx_queues_cfg[i].chan = i;
7790
7791 return plat_dat;
7792 }
7793 EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
7794
__stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7795 static int __stmmac_dvr_probe(struct device *device,
7796 struct plat_stmmacenet_data *plat_dat,
7797 struct stmmac_resources *res)
7798 {
7799 struct net_device *ndev = NULL;
7800 struct stmmac_priv *priv;
7801 int i, ret = 0;
7802 u8 rxq;
7803
7804 if (!plat_dat->dma_cfg || !plat_dat->dma_cfg->pbl) {
7805 dev_err(device, "invalid DMA configuration\n");
7806 return -EINVAL;
7807 }
7808
7809 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7810 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7811 if (!ndev)
7812 return -ENOMEM;
7813
7814 SET_NETDEV_DEV(ndev, device);
7815
7816 priv = netdev_priv(ndev);
7817 priv->device = device;
7818 priv->dev = ndev;
7819
7820 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7821 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7822 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7823 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7824 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7825 }
7826
7827 priv->xstats.pcpu_stats =
7828 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7829 if (!priv->xstats.pcpu_stats)
7830 return -ENOMEM;
7831
7832 stmmac_set_ethtool_ops(ndev);
7833 priv->pause_time = pause;
7834 priv->plat = plat_dat;
7835 priv->ioaddr = res->addr;
7836 priv->dev->base_addr = (unsigned long)res->addr;
7837 priv->plat->dma_cfg->multi_msi_en =
7838 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7839
7840 priv->dev->irq = res->irq;
7841 priv->wol_irq = res->wol_irq;
7842 priv->sfty_irq = res->sfty_irq;
7843
7844 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
7845 ret = stmmac_msi_init(priv, res);
7846 if (ret)
7847 return ret;
7848 }
7849
7850 if (!is_zero_ether_addr(res->mac))
7851 eth_hw_addr_set(priv->dev, res->mac);
7852
7853 dev_set_drvdata(device, priv->dev);
7854
7855 /* Verify driver arguments */
7856 stmmac_verify_args();
7857
7858 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7859 if (!priv->af_xdp_zc_qps)
7860 return -ENOMEM;
7861
7862 /* Allocate workqueue */
7863 priv->wq = create_singlethread_workqueue("stmmac_wq");
7864 if (!priv->wq) {
7865 dev_err(priv->device, "failed to create workqueue\n");
7866 ret = -ENOMEM;
7867 goto error_wq_init;
7868 }
7869
7870 INIT_WORK(&priv->service_task, stmmac_service_task);
7871
7872 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7873
7874 /* Override with kernel parameters if supplied XXX CRS XXX
7875 * this needs to have multiple instances
7876 */
7877 if ((phyaddr >= 0) && (phyaddr <= 31))
7878 priv->plat->phy_addr = phyaddr;
7879
7880 if (priv->plat->stmmac_rst) {
7881 ret = reset_control_assert(priv->plat->stmmac_rst);
7882 reset_control_deassert(priv->plat->stmmac_rst);
7883 /* Some reset controllers have only reset callback instead of
7884 * assert + deassert callbacks pair.
7885 */
7886 if (ret == -ENOTSUPP)
7887 reset_control_reset(priv->plat->stmmac_rst);
7888 }
7889
7890 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7891 if (ret == -ENOTSUPP)
7892 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7893 ERR_PTR(ret));
7894
7895 /* Wait a bit for the reset to take effect */
7896 udelay(10);
7897
7898 /* Init MAC and get the capabilities */
7899 ret = stmmac_hw_init(priv);
7900 if (ret)
7901 goto error_hw_init;
7902
7903 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7904 */
7905 if (priv->synopsys_id < DWMAC_CORE_5_20)
7906 priv->plat->dma_cfg->dche = false;
7907
7908 stmmac_check_ether_addr(priv);
7909
7910 ndev->netdev_ops = &stmmac_netdev_ops;
7911
7912 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7913 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7914
7915 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7916 NETIF_F_RXCSUM;
7917 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7918 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7919
7920 ret = stmmac_tc_init(priv, priv);
7921 if (!ret) {
7922 ndev->hw_features |= NETIF_F_HW_TC;
7923 }
7924
7925 stmmac_set_gso_features(ndev);
7926
7927 if (priv->dma_cap.sphen &&
7928 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7929 ndev->hw_features |= NETIF_F_GRO;
7930 priv->sph_capable = true;
7931 priv->sph_active = priv->sph_capable;
7932 dev_info(priv->device, "SPH feature enabled\n");
7933 }
7934
7935 /* Ideally our host DMA address width is the same as for the
7936 * device. However, it may differ and then we have to use our
7937 * host DMA width for allocation and the device DMA width for
7938 * register handling.
7939 */
7940 if (priv->plat->host_dma_width)
7941 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7942 else
7943 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7944
7945 if (priv->dma_cap.host_dma_width) {
7946 ret = dma_set_mask_and_coherent(device,
7947 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7948 if (!ret) {
7949 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7950 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7951
7952 /*
7953 * If more than 32 bits can be addressed, make sure to
7954 * enable enhanced addressing mode.
7955 */
7956 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7957 priv->plat->dma_cfg->eame = true;
7958 } else {
7959 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7960 if (ret) {
7961 dev_err(priv->device, "Failed to set DMA Mask\n");
7962 goto error_hw_init;
7963 }
7964
7965 priv->dma_cap.host_dma_width = 32;
7966 }
7967 }
7968
7969 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7970 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7971 #ifdef STMMAC_VLAN_TAG_USED
7972 /* Both mac100 and gmac support receive VLAN tag detection */
7973 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7974 if (dwmac_is_xmac(priv->plat->core_type)) {
7975 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7976 priv->hw->hw_vlan_en = true;
7977 }
7978 if (priv->dma_cap.vlhash) {
7979 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7980 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7981 }
7982 if (priv->dma_cap.vlins)
7983 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7984 #endif
7985 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7986
7987 priv->xstats.threshold = tc;
7988
7989 /* Initialize RSS */
7990 rxq = priv->plat->rx_queues_to_use;
7991 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7992 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7993 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7994
7995 if (priv->dma_cap.rssen && priv->plat->rss_en)
7996 ndev->features |= NETIF_F_RXHASH;
7997
7998 ndev->vlan_features |= ndev->features;
7999
8000 /* MTU range: 46 - hw-specific max */
8001 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
8002
8003 if (priv->plat->core_type == DWMAC_CORE_XGMAC)
8004 ndev->max_mtu = XGMAC_JUMBO_LEN;
8005 else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
8006 ndev->max_mtu = JUMBO_LEN;
8007 else
8008 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
8009
8010 /* Warn if the platform's maxmtu is smaller than the minimum MTU,
8011 * otherwise clamp the maximum MTU above to the platform's maxmtu.
8012 */
8013 if (priv->plat->maxmtu < ndev->min_mtu)
8014 dev_warn(priv->device,
8015 "%s: warning: maxmtu having invalid value (%d)\n",
8016 __func__, priv->plat->maxmtu);
8017 else if (priv->plat->maxmtu < ndev->max_mtu)
8018 ndev->max_mtu = priv->plat->maxmtu;
8019
8020 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
8021
8022 /* Setup channels NAPI */
8023 stmmac_napi_add(ndev);
8024
8025 mutex_init(&priv->lock);
8026
8027 stmmac_fpe_init(priv);
8028
8029 stmmac_check_pcs_mode(priv);
8030
8031 pm_runtime_get_noresume(device);
8032 pm_runtime_set_active(device);
8033 if (!pm_runtime_enabled(device))
8034 pm_runtime_enable(device);
8035
8036 ret = stmmac_mdio_register(ndev);
8037 if (ret < 0) {
8038 dev_err_probe(priv->device, ret,
8039 "MDIO bus (id: %d) registration failed\n",
8040 priv->plat->bus_id);
8041 goto error_mdio_register;
8042 }
8043
8044 ret = stmmac_pcs_setup(ndev);
8045 if (ret)
8046 goto error_pcs_setup;
8047
8048 ret = stmmac_phylink_setup(priv);
8049 if (ret) {
8050 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
8051 goto error_phy_setup;
8052 }
8053
8054 ret = stmmac_register_devlink(priv);
8055 if (ret)
8056 goto error_devlink_setup;
8057
8058 ret = register_netdev(ndev);
8059 if (ret) {
8060 dev_err(priv->device, "%s: ERROR %i registering the device\n",
8061 __func__, ret);
8062 goto error_netdev_register;
8063 }
8064
8065 #ifdef CONFIG_DEBUG_FS
8066 stmmac_init_fs(ndev);
8067 #endif
8068
8069 if (priv->plat->dump_debug_regs)
8070 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
8071
8072 /* Let pm_runtime_put() disable the clocks.
8073 * If CONFIG_PM is not enabled, the clocks will stay powered.
8074 */
8075 pm_runtime_put(device);
8076
8077 return ret;
8078
8079 error_netdev_register:
8080 stmmac_unregister_devlink(priv);
8081 error_devlink_setup:
8082 phylink_destroy(priv->phylink);
8083 error_phy_setup:
8084 stmmac_pcs_clean(ndev);
8085 error_pcs_setup:
8086 stmmac_mdio_unregister(ndev);
8087 error_mdio_register:
8088 stmmac_napi_del(ndev);
8089 error_hw_init:
8090 destroy_workqueue(priv->wq);
8091 error_wq_init:
8092 bitmap_free(priv->af_xdp_zc_qps);
8093
8094 return ret;
8095 }
8096
8097 /**
8098 * stmmac_dvr_probe
8099 * @dev: device pointer
8100 * @plat_dat: platform data pointer
8101 * @res: stmmac resource pointer
8102 * Description: this is the main probe function used to
8103 * call the alloc_etherdev, allocate the priv structure.
8104 * Return:
8105 * returns 0 on success, otherwise errno.
8106 */
stmmac_dvr_probe(struct device * dev,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)8107 int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
8108 struct stmmac_resources *res)
8109 {
8110 int ret;
8111
8112 if (plat_dat->init) {
8113 ret = plat_dat->init(dev, plat_dat->bsp_priv);
8114 if (ret)
8115 return ret;
8116 }
8117
8118 ret = __stmmac_dvr_probe(dev, plat_dat, res);
8119 if (ret && plat_dat->exit)
8120 plat_dat->exit(dev, plat_dat->bsp_priv);
8121
8122 return ret;
8123 }
8124 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
8125
8126 /**
8127 * stmmac_dvr_remove
8128 * @dev: device pointer
8129 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
8130 * changes the link status, releases the DMA descriptor rings.
8131 */
stmmac_dvr_remove(struct device * dev)8132 void stmmac_dvr_remove(struct device *dev)
8133 {
8134 struct net_device *ndev = dev_get_drvdata(dev);
8135 struct stmmac_priv *priv = netdev_priv(ndev);
8136
8137 netdev_info(priv->dev, "%s: removing driver", __func__);
8138
8139 pm_runtime_get_sync(dev);
8140
8141 unregister_netdev(ndev);
8142
8143 #ifdef CONFIG_DEBUG_FS
8144 stmmac_exit_fs(ndev);
8145 #endif
8146 stmmac_unregister_devlink(priv);
8147
8148 phylink_destroy(priv->phylink);
8149 if (priv->plat->stmmac_rst)
8150 reset_control_assert(priv->plat->stmmac_rst);
8151 reset_control_assert(priv->plat->stmmac_ahb_rst);
8152
8153 stmmac_pcs_clean(ndev);
8154 stmmac_mdio_unregister(ndev);
8155
8156 destroy_workqueue(priv->wq);
8157 mutex_destroy(&priv->lock);
8158 bitmap_free(priv->af_xdp_zc_qps);
8159
8160 pm_runtime_disable(dev);
8161 pm_runtime_put_noidle(dev);
8162
8163 if (priv->plat->exit)
8164 priv->plat->exit(dev, priv->plat->bsp_priv);
8165 }
8166 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
8167
8168 /**
8169 * stmmac_suspend - suspend callback
8170 * @dev: device pointer
8171 * Description: this is the function to suspend the device and it is called
8172 * by the platform driver to stop the network queue, release the resources,
8173 * program the PMT register (for WoL), clean and release driver resources.
8174 */
stmmac_suspend(struct device * dev)8175 int stmmac_suspend(struct device *dev)
8176 {
8177 struct net_device *ndev = dev_get_drvdata(dev);
8178 struct stmmac_priv *priv = netdev_priv(ndev);
8179 u8 chan;
8180
8181 if (!ndev || !netif_running(ndev))
8182 goto suspend_bsp;
8183
8184 mutex_lock(&priv->lock);
8185
8186 netif_device_detach(ndev);
8187
8188 stmmac_disable_all_queues(priv);
8189
8190 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
8191 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
8192
8193 if (priv->eee_sw_timer_en) {
8194 priv->tx_path_in_lpi_mode = false;
8195 timer_delete_sync(&priv->eee_ctrl_timer);
8196 }
8197
8198 /* Stop TX/RX DMA */
8199 stmmac_stop_all_dma(priv);
8200
8201 stmmac_legacy_serdes_power_down(priv);
8202
8203 /* Enable Power down mode by programming the PMT regs */
8204 if (priv->wolopts) {
8205 stmmac_pmt(priv, priv->hw, priv->wolopts);
8206 priv->irq_wake = 1;
8207 } else {
8208 stmmac_mac_set(priv, priv->ioaddr, false);
8209 pinctrl_pm_select_sleep_state(priv->device);
8210 }
8211
8212 mutex_unlock(&priv->lock);
8213
8214 rtnl_lock();
8215 phylink_suspend(priv->phylink, !!priv->wolopts);
8216 rtnl_unlock();
8217
8218 if (stmmac_fpe_supported(priv))
8219 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
8220
8221 suspend_bsp:
8222 if (priv->plat->suspend)
8223 return priv->plat->suspend(dev, priv->plat->bsp_priv);
8224
8225 return 0;
8226 }
8227 EXPORT_SYMBOL_GPL(stmmac_suspend);
8228
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)8229 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
8230 {
8231 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
8232
8233 rx_q->cur_rx = 0;
8234 rx_q->dirty_rx = 0;
8235 }
8236
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)8237 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
8238 {
8239 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
8240
8241 tx_q->cur_tx = 0;
8242 tx_q->dirty_tx = 0;
8243 tx_q->mss = 0;
8244
8245 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
8246 }
8247
8248 /**
8249 * stmmac_reset_queues_param - reset queue parameters
8250 * @priv: device pointer
8251 */
stmmac_reset_queues_param(struct stmmac_priv * priv)8252 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
8253 {
8254 u8 rx_cnt = priv->plat->rx_queues_to_use;
8255 u8 tx_cnt = priv->plat->tx_queues_to_use;
8256 u8 queue;
8257
8258 for (queue = 0; queue < rx_cnt; queue++)
8259 stmmac_reset_rx_queue(priv, queue);
8260
8261 for (queue = 0; queue < tx_cnt; queue++)
8262 stmmac_reset_tx_queue(priv, queue);
8263 }
8264
8265 /**
8266 * stmmac_resume - resume callback
8267 * @dev: device pointer
8268 * Description: when resume this function is invoked to setup the DMA and CORE
8269 * in a usable state.
8270 */
stmmac_resume(struct device * dev)8271 int stmmac_resume(struct device *dev)
8272 {
8273 struct net_device *ndev = dev_get_drvdata(dev);
8274 struct stmmac_priv *priv = netdev_priv(ndev);
8275 int ret;
8276
8277 if (priv->plat->resume) {
8278 ret = priv->plat->resume(dev, priv->plat->bsp_priv);
8279 if (ret)
8280 return ret;
8281 }
8282
8283 if (!netif_running(ndev))
8284 return 0;
8285
8286 /* Power Down bit, into the PM register, is cleared
8287 * automatically as soon as a magic packet or a Wake-up frame
8288 * is received. Anyway, it's better to manually clear
8289 * this bit because it can generate problems while resuming
8290 * from another devices (e.g. serial console).
8291 */
8292 if (priv->wolopts) {
8293 mutex_lock(&priv->lock);
8294 stmmac_pmt(priv, priv->hw, 0);
8295 mutex_unlock(&priv->lock);
8296 priv->irq_wake = 0;
8297 } else {
8298 pinctrl_pm_select_default_state(priv->device);
8299 /* reset the phy so that it's ready */
8300 if (priv->mii)
8301 stmmac_mdio_reset(priv->mii);
8302 }
8303
8304 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
8305 ret = stmmac_legacy_serdes_power_up(priv);
8306 if (ret < 0)
8307 return ret;
8308 }
8309
8310 rtnl_lock();
8311
8312 /* Prepare the PHY to resume, ensuring that its clocks which are
8313 * necessary for the MAC DMA reset to complete are running
8314 */
8315 phylink_prepare_resume(priv->phylink);
8316
8317 mutex_lock(&priv->lock);
8318
8319 stmmac_reset_queues_param(priv);
8320
8321 stmmac_free_tx_skbufs(priv);
8322 stmmac_clear_descriptors(priv, &priv->dma_conf);
8323
8324 ret = stmmac_hw_setup(ndev);
8325 if (ret < 0) {
8326 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
8327 stmmac_legacy_serdes_power_down(priv);
8328 mutex_unlock(&priv->lock);
8329 rtnl_unlock();
8330 return ret;
8331 }
8332
8333 stmmac_init_timestamping(priv);
8334
8335 stmmac_init_coalesce(priv);
8336 phylink_rx_clk_stop_block(priv->phylink);
8337 stmmac_set_rx_mode(ndev);
8338 phylink_rx_clk_stop_unblock(priv->phylink);
8339
8340 stmmac_vlan_restore(priv);
8341
8342 stmmac_enable_all_queues(priv);
8343 stmmac_enable_all_dma_irq(priv);
8344
8345 mutex_unlock(&priv->lock);
8346
8347 /* phylink_resume() must be called after the hardware has been
8348 * initialised because it may bring the link up immediately in a
8349 * workqueue thread, which will race with initialisation.
8350 */
8351 phylink_resume(priv->phylink);
8352 rtnl_unlock();
8353
8354 netif_device_attach(ndev);
8355
8356 return 0;
8357 }
8358 EXPORT_SYMBOL_GPL(stmmac_resume);
8359
8360 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
8361 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
8362 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
8363
8364 #ifndef MODULE
stmmac_cmdline_opt(char * str)8365 static int __init stmmac_cmdline_opt(char *str)
8366 {
8367 char *opt;
8368
8369 if (!str || !*str)
8370 return 1;
8371 while ((opt = strsep(&str, ",")) != NULL) {
8372 if (!strncmp(opt, "debug:", 6)) {
8373 if (kstrtoint(opt + 6, 0, &debug))
8374 goto err;
8375 } else if (!strncmp(opt, "phyaddr:", 8)) {
8376 if (kstrtoint(opt + 8, 0, &phyaddr))
8377 goto err;
8378 } else if (!strncmp(opt, "tc:", 3)) {
8379 if (kstrtoint(opt + 3, 0, &tc))
8380 goto err;
8381 } else if (!strncmp(opt, "watchdog:", 9)) {
8382 if (kstrtoint(opt + 9, 0, &watchdog))
8383 goto err;
8384 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8385 if (kstrtoint(opt + 10, 0, &flow_ctrl))
8386 goto err;
8387 } else if (!strncmp(opt, "pause:", 6)) {
8388 if (kstrtoint(opt + 6, 0, &pause))
8389 goto err;
8390 } else if (!strncmp(opt, "eee_timer:", 10)) {
8391 if (kstrtoint(opt + 10, 0, &eee_timer))
8392 goto err;
8393 } else if (!strncmp(opt, "chain_mode:", 11)) {
8394 if (kstrtoint(opt + 11, 0, &chain_mode))
8395 goto err;
8396 }
8397 }
8398 return 1;
8399
8400 err:
8401 pr_err("%s: ERROR broken module parameter conversion", __func__);
8402 return 1;
8403 }
8404
8405 __setup("stmmaceth=", stmmac_cmdline_opt);
8406 #endif /* MODULE */
8407
stmmac_init(void)8408 static int __init stmmac_init(void)
8409 {
8410 #ifdef CONFIG_DEBUG_FS
8411 /* Create debugfs main directory if it doesn't exist yet */
8412 if (!stmmac_fs_dir)
8413 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8414 register_netdevice_notifier(&stmmac_notifier);
8415 #endif
8416
8417 return 0;
8418 }
8419
stmmac_exit(void)8420 static void __exit stmmac_exit(void)
8421 {
8422 #ifdef CONFIG_DEBUG_FS
8423 unregister_netdevice_notifier(&stmmac_notifier);
8424 debugfs_remove_recursive(stmmac_fs_dir);
8425 #endif
8426 }
8427
8428 module_init(stmmac_init)
8429 module_exit(stmmac_exit)
8430
8431 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8432 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8433 MODULE_LICENSE("GPL");
8434