Lines Matching +full:1588 +full:- +full:2008
1 // SPDX-License-Identifier: GPL-2.0-only
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
29 #include <linux/dma-mapping.h>
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
70 static int debug = -1;
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 static int phyaddr = -1;
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
155 ret = clk_prepare_enable(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
158 ret = clk_prepare_enable(priv->plat->pclk); in stmmac_bus_clks_config()
160 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
163 if (priv->plat->clks_config) { in stmmac_bus_clks_config()
164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
166 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
167 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
172 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
173 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
174 if (priv->plat->clks_config) in stmmac_bus_clks_config()
175 priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
183 * stmmac_verify_args - verify the driver parameters.
205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in __stmmac_disable_all_queues()
206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in __stmmac_disable_all_queues()
211 struct stmmac_channel *ch = &priv->channel[queue]; in __stmmac_disable_all_queues()
214 test_bit(queue, priv->af_xdp_zc_qps)) { in __stmmac_disable_all_queues()
215 napi_disable(&ch->rxtx_napi); in __stmmac_disable_all_queues()
220 napi_disable(&ch->rx_napi); in __stmmac_disable_all_queues()
222 napi_disable(&ch->tx_napi); in __stmmac_disable_all_queues()
227 * stmmac_disable_all_queues - Disable all queues
232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_disable_all_queues()
238 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_disable_all_queues()
239 if (rx_q->xsk_pool) { in stmmac_disable_all_queues()
249 * stmmac_enable_all_queues - Enable all queues
254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_enable_all_queues()
255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_all_queues()
260 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_all_queues()
263 test_bit(queue, priv->af_xdp_zc_qps)) { in stmmac_enable_all_queues()
264 napi_enable(&ch->rxtx_napi); in stmmac_enable_all_queues()
269 napi_enable(&ch->rx_napi); in stmmac_enable_all_queues()
271 napi_enable(&ch->tx_napi); in stmmac_enable_all_queues()
277 if (!test_bit(STMMAC_DOWN, &priv->state) && in stmmac_service_event_schedule()
278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) in stmmac_service_event_schedule()
279 queue_work(priv->wq, &priv->service_task); in stmmac_service_event_schedule()
284 netif_carrier_off(priv->dev); in stmmac_global_err()
285 set_bit(STMMAC_RESET_REQUESTED, &priv->state); in stmmac_global_err()
290 * stmmac_clk_csr_set - dynamically set the MDC clock
297 * changed at run-time and it is fixed (as reported in the driver
305 clk_rate = clk_get_rate(priv->plat->stmmac_clk); in stmmac_clk_csr_set()
314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { in stmmac_clk_csr_set()
316 priv->clk_csr = STMMAC_CSR_20_35M; in stmmac_clk_csr_set()
318 priv->clk_csr = STMMAC_CSR_35_60M; in stmmac_clk_csr_set()
320 priv->clk_csr = STMMAC_CSR_60_100M; in stmmac_clk_csr_set()
322 priv->clk_csr = STMMAC_CSR_100_150M; in stmmac_clk_csr_set()
324 priv->clk_csr = STMMAC_CSR_150_250M; in stmmac_clk_csr_set()
326 priv->clk_csr = STMMAC_CSR_250_300M; in stmmac_clk_csr_set()
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { in stmmac_clk_csr_set()
331 priv->clk_csr = 0x03; in stmmac_clk_csr_set()
333 priv->clk_csr = 0x02; in stmmac_clk_csr_set()
335 priv->clk_csr = 0x01; in stmmac_clk_csr_set()
337 priv->clk_csr = 0; in stmmac_clk_csr_set()
340 if (priv->plat->has_xgmac) { in stmmac_clk_csr_set()
342 priv->clk_csr = 0x5; in stmmac_clk_csr_set()
344 priv->clk_csr = 0x4; in stmmac_clk_csr_set()
346 priv->clk_csr = 0x3; in stmmac_clk_csr_set()
348 priv->clk_csr = 0x2; in stmmac_clk_csr_set()
350 priv->clk_csr = 0x1; in stmmac_clk_csr_set()
352 priv->clk_csr = 0x0; in stmmac_clk_csr_set()
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_avail()
367 if (tx_q->dirty_tx > tx_q->cur_tx) in stmmac_tx_avail()
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; in stmmac_tx_avail()
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; in stmmac_tx_avail()
376 * stmmac_rx_dirty - Get RX queue dirty
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_dirty()
385 if (rx_q->dirty_rx <= rx_q->cur_rx) in stmmac_rx_dirty()
386 dirty = rx_q->cur_rx - rx_q->dirty_rx; in stmmac_rx_dirty()
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; in stmmac_rx_dirty()
398 priv->eee_sw_timer_en = en ? 0 : 1; in stmmac_lpi_entry_timer_config()
399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; in stmmac_lpi_entry_timer_config()
400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); in stmmac_lpi_entry_timer_config()
404 * stmmac_enable_eee_mode - check and enter in LPI mode
411 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_eee_mode()
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_eee_mode()
418 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_enable_eee_mode()
419 return -EBUSY; /* still unfinished work */ in stmmac_enable_eee_mode()
423 if (!priv->tx_path_in_lpi_mode) in stmmac_enable_eee_mode()
424 stmmac_set_eee_mode(priv, priv->hw, in stmmac_enable_eee_mode()
425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); in stmmac_enable_eee_mode()
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
437 if (!priv->eee_sw_timer_en) { in stmmac_disable_eee_mode()
442 stmmac_reset_eee_mode(priv, priv->hw); in stmmac_disable_eee_mode()
443 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_disable_eee_mode()
444 priv->tx_path_in_lpi_mode = false; in stmmac_disable_eee_mode()
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_eee_ctrl_timer()
463 * stmmac_eee_init - init EEE
472 int eee_tw_timer = priv->eee_tw_timer; in stmmac_eee_init()
475 if (!priv->dma_cap.eee) in stmmac_eee_init()
478 mutex_lock(&priv->lock); in stmmac_eee_init()
481 if (!priv->eee_active) { in stmmac_eee_init()
482 if (priv->eee_enabled) { in stmmac_eee_init()
483 netdev_dbg(priv->dev, "disable EEE\n"); in stmmac_eee_init()
485 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
486 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); in stmmac_eee_init()
487 if (priv->hw->xpcs) in stmmac_eee_init()
488 xpcs_config_eee(priv->hw->xpcs, in stmmac_eee_init()
489 priv->plat->mult_fact_100ns, in stmmac_eee_init()
492 mutex_unlock(&priv->lock); in stmmac_eee_init()
496 if (priv->eee_active && !priv->eee_enabled) { in stmmac_eee_init()
497 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); in stmmac_eee_init()
498 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, in stmmac_eee_init()
500 if (priv->hw->xpcs) in stmmac_eee_init()
501 xpcs_config_eee(priv->hw->xpcs, in stmmac_eee_init()
502 priv->plat->mult_fact_100ns, in stmmac_eee_init()
506 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { in stmmac_eee_init()
507 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
508 priv->tx_path_in_lpi_mode = false; in stmmac_eee_init()
512 mod_timer(&priv->eee_ctrl_timer, in stmmac_eee_init()
513 STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_eee_init()
516 mutex_unlock(&priv->lock); in stmmac_eee_init()
517 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); in stmmac_eee_init()
521 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
536 if (!priv->hwts_tx_en) in stmmac_get_tx_hwtstamp()
540 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) in stmmac_get_tx_hwtstamp()
545 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); in stmmac_get_tx_hwtstamp()
547 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_get_tx_hwtstamp()
552 ns -= priv->plat->cdc_error_adj; in stmmac_get_tx_hwtstamp()
557 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); in stmmac_get_tx_hwtstamp()
563 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
579 if (!priv->hwts_rx_en) in stmmac_get_rx_hwtstamp()
582 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_get_rx_hwtstamp()
586 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { in stmmac_get_rx_hwtstamp()
587 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_get_rx_hwtstamp()
589 ns -= priv->plat->cdc_error_adj; in stmmac_get_rx_hwtstamp()
591 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); in stmmac_get_rx_hwtstamp()
594 shhwtstamp->hwtstamp = ns_to_ktime(ns); in stmmac_get_rx_hwtstamp()
596 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); in stmmac_get_rx_hwtstamp()
601 * stmmac_hwtstamp_set - control hardware timestamping.
609 * 0 on success and an appropriate -ve integer on failure.
624 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { in stmmac_hwtstamp_set()
625 netdev_alert(priv->dev, "No support for HW time stamping\n"); in stmmac_hwtstamp_set()
626 priv->hwts_tx_en = 0; in stmmac_hwtstamp_set()
627 priv->hwts_rx_en = 0; in stmmac_hwtstamp_set()
629 return -EOPNOTSUPP; in stmmac_hwtstamp_set()
632 if (copy_from_user(&config, ifr->ifr_data, in stmmac_hwtstamp_set()
634 return -EFAULT; in stmmac_hwtstamp_set()
636 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", in stmmac_hwtstamp_set()
641 return -ERANGE; in stmmac_hwtstamp_set()
643 if (priv->adv_ts) { in stmmac_hwtstamp_set()
724 if (priv->synopsys_id < DWMAC_CORE_4_10) in stmmac_hwtstamp_set()
764 return -ERANGE; in stmmac_hwtstamp_set()
777 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); in stmmac_hwtstamp_set()
778 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; in stmmac_hwtstamp_set()
780 priv->systime_flags = STMMAC_HWTS_ACTIVE; in stmmac_hwtstamp_set()
782 if (priv->hwts_tx_en || priv->hwts_rx_en) { in stmmac_hwtstamp_set()
783 priv->systime_flags |= tstamp_all | ptp_v2 | in stmmac_hwtstamp_set()
789 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); in stmmac_hwtstamp_set()
791 memcpy(&priv->tstamp_config, &config, sizeof(config)); in stmmac_hwtstamp_set()
793 return copy_to_user(ifr->ifr_data, &config, in stmmac_hwtstamp_set()
794 sizeof(config)) ? -EFAULT : 0; in stmmac_hwtstamp_set()
798 * stmmac_hwtstamp_get - read hardware timestamping.
809 struct hwtstamp_config *config = &priv->tstamp_config; in stmmac_hwtstamp_get()
811 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_hwtstamp_get()
812 return -EOPNOTSUPP; in stmmac_hwtstamp_get()
814 return copy_to_user(ifr->ifr_data, config, in stmmac_hwtstamp_get()
815 sizeof(*config)) ? -EFAULT : 0; in stmmac_hwtstamp_get()
819 * stmmac_init_tstamp_counter - init hardware timestamping counter
830 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_tstamp_counter()
835 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_init_tstamp_counter()
836 return -EOPNOTSUPP; in stmmac_init_tstamp_counter()
838 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); in stmmac_init_tstamp_counter()
839 priv->systime_flags = systime_flags; in stmmac_init_tstamp_counter()
842 stmmac_config_sub_second_increment(priv, priv->ptpaddr, in stmmac_init_tstamp_counter()
843 priv->plat->clk_ptp_rate, in stmmac_init_tstamp_counter()
848 priv->sub_second_inc = sec_inc; in stmmac_init_tstamp_counter()
856 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); in stmmac_init_tstamp_counter()
857 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); in stmmac_init_tstamp_counter()
863 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); in stmmac_init_tstamp_counter()
870 * stmmac_init_ptp - init PTP
878 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_ptp()
881 if (priv->plat->ptp_clk_freq_config) in stmmac_init_ptp()
882 priv->plat->ptp_clk_freq_config(priv); in stmmac_init_ptp()
888 priv->adv_ts = 0; in stmmac_init_ptp()
890 if (xmac && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
891 priv->adv_ts = 1; in stmmac_init_ptp()
893 else if (priv->extend_desc && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
894 priv->adv_ts = 1; in stmmac_init_ptp()
896 if (priv->dma_cap.time_stamp) in stmmac_init_ptp()
897 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); in stmmac_init_ptp()
899 if (priv->adv_ts) in stmmac_init_ptp()
900 netdev_info(priv->dev, in stmmac_init_ptp()
901 "IEEE 1588-2008 Advanced Timestamp supported\n"); in stmmac_init_ptp()
903 priv->hwts_tx_en = 0; in stmmac_init_ptp()
904 priv->hwts_rx_en = 0; in stmmac_init_ptp()
906 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_init_ptp()
914 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_release_ptp()
919 * stmmac_mac_flow_ctrl - Configure flow control in all queues
926 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_mac_flow_ctrl()
928 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, in stmmac_mac_flow_ctrl()
929 priv->pause, tx_cnt); in stmmac_mac_flow_ctrl()
935 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_get_caps()
937 /* Refresh the MAC-specific capabilities */ in stmmac_mac_get_caps()
940 config->mac_capabilities = priv->hw->link.caps; in stmmac_mac_get_caps()
942 if (priv->plat->max_speed) in stmmac_mac_get_caps()
943 phylink_limit_mac_speed(config, priv->plat->max_speed); in stmmac_mac_get_caps()
945 return config->mac_capabilities; in stmmac_mac_get_caps()
951 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_select_pcs()
954 if (priv->plat->select_pcs) { in stmmac_mac_select_pcs()
955 pcs = priv->plat->select_pcs(priv, interface); in stmmac_mac_select_pcs()
971 struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; in stmmac_fpe_link_state_handle()
974 timer_shutdown_sync(&fpe_cfg->verify_timer); in stmmac_fpe_link_state_handle()
976 spin_lock_irqsave(&fpe_cfg->lock, flags); in stmmac_fpe_link_state_handle()
978 if (is_up && fpe_cfg->pmac_enabled) { in stmmac_fpe_link_state_handle()
980 stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, in stmmac_fpe_link_state_handle()
981 priv->plat->tx_queues_to_use, in stmmac_fpe_link_state_handle()
982 priv->plat->rx_queues_to_use, in stmmac_fpe_link_state_handle()
989 stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, in stmmac_fpe_link_state_handle()
990 priv->plat->tx_queues_to_use, in stmmac_fpe_link_state_handle()
991 priv->plat->rx_queues_to_use, in stmmac_fpe_link_state_handle()
995 spin_unlock_irqrestore(&fpe_cfg->lock, flags); in stmmac_fpe_link_state_handle()
1001 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_down()
1003 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_mac_link_down()
1004 priv->eee_active = false; in stmmac_mac_link_down()
1005 priv->tx_lpi_enabled = false; in stmmac_mac_link_down()
1006 priv->eee_enabled = stmmac_eee_init(priv); in stmmac_mac_link_down()
1007 stmmac_set_eee_pls(priv, priv->hw, false); in stmmac_mac_link_down()
1009 if (priv->dma_cap.fpesel) in stmmac_mac_link_down()
1019 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_up()
1022 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_mac_link_up()
1023 priv->plat->serdes_powerup) in stmmac_mac_link_up()
1024 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); in stmmac_mac_link_up()
1026 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1027 ctrl = old_ctrl & ~priv->hw->link.speed_mask; in stmmac_mac_link_up()
1032 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1035 ctrl |= priv->hw->link.xgmii.speed5000; in stmmac_mac_link_up()
1038 ctrl |= priv->hw->link.xgmii.speed2500; in stmmac_mac_link_up()
1046 ctrl |= priv->hw->link.xlgmii.speed100000; in stmmac_mac_link_up()
1049 ctrl |= priv->hw->link.xlgmii.speed50000; in stmmac_mac_link_up()
1052 ctrl |= priv->hw->link.xlgmii.speed40000; in stmmac_mac_link_up()
1055 ctrl |= priv->hw->link.xlgmii.speed25000; in stmmac_mac_link_up()
1058 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1061 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1064 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1072 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1075 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1078 ctrl |= priv->hw->link.speed100; in stmmac_mac_link_up()
1081 ctrl |= priv->hw->link.speed10; in stmmac_mac_link_up()
1088 priv->speed = speed; in stmmac_mac_link_up()
1090 if (priv->plat->fix_mac_speed) in stmmac_mac_link_up()
1091 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); in stmmac_mac_link_up()
1094 ctrl &= ~priv->hw->link.duplex; in stmmac_mac_link_up()
1096 ctrl |= priv->hw->link.duplex; in stmmac_mac_link_up()
1100 priv->flow_ctrl = FLOW_AUTO; in stmmac_mac_link_up()
1102 priv->flow_ctrl = FLOW_RX; in stmmac_mac_link_up()
1104 priv->flow_ctrl = FLOW_TX; in stmmac_mac_link_up()
1106 priv->flow_ctrl = FLOW_OFF; in stmmac_mac_link_up()
1111 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1113 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_mac_link_up()
1114 if (phy && priv->dma_cap.eee) { in stmmac_mac_link_up()
1115 priv->eee_active = in stmmac_mac_link_up()
1116 phy_init_eee(phy, !(priv->plat->flags & in stmmac_mac_link_up()
1118 priv->eee_enabled = stmmac_eee_init(priv); in stmmac_mac_link_up()
1119 priv->tx_lpi_enabled = priv->eee_enabled; in stmmac_mac_link_up()
1120 stmmac_set_eee_pls(priv, priv->hw, true); in stmmac_mac_link_up()
1123 if (priv->dma_cap.fpesel) in stmmac_mac_link_up()
1126 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_mac_link_up()
1139 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1147 int interface = priv->plat->mac_interface; in stmmac_check_pcs_mode()
1149 if (priv->dma_cap.pcs) { in stmmac_check_pcs_mode()
1154 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); in stmmac_check_pcs_mode()
1155 priv->hw->pcs = STMMAC_PCS_RGMII; in stmmac_check_pcs_mode()
1157 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); in stmmac_check_pcs_mode()
1158 priv->hw->pcs = STMMAC_PCS_SGMII; in stmmac_check_pcs_mode()
1164 * stmmac_init_phy - PHY initialization
1178 if (!phylink_expects_phy(priv->phylink)) in stmmac_init_phy()
1181 fwnode = priv->plat->port_node; in stmmac_init_phy()
1183 fwnode = dev_fwnode(priv->device); in stmmac_init_phy()
1190 /* Some DT bindings do not set-up the PHY handle. Let's try to in stmmac_init_phy()
1194 int addr = priv->plat->phy_addr; in stmmac_init_phy()
1198 netdev_err(priv->dev, "no phy found\n"); in stmmac_init_phy()
1199 return -ENODEV; in stmmac_init_phy()
1202 phydev = mdiobus_get_phy(priv->mii, addr); in stmmac_init_phy()
1204 netdev_err(priv->dev, "no phy at addr %d\n", addr); in stmmac_init_phy()
1205 return -ENODEV; in stmmac_init_phy()
1208 ret = phylink_connect_phy(priv->phylink, phydev); in stmmac_init_phy()
1211 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); in stmmac_init_phy()
1214 if (!priv->plat->pmt) { in stmmac_init_phy()
1217 phylink_ethtool_get_wol(priv->phylink, &wol); in stmmac_init_phy()
1218 device_set_wakeup_capable(priv->device, !!wol.supported); in stmmac_init_phy()
1219 device_set_wakeup_enable(priv->device, !!wol.wolopts); in stmmac_init_phy()
1228 int mode = priv->plat->phy_interface; in stmmac_phy_setup()
1232 priv->phylink_config.dev = &priv->dev->dev; in stmmac_phy_setup()
1233 priv->phylink_config.type = PHYLINK_NETDEV; in stmmac_phy_setup()
1234 priv->phylink_config.mac_managed_pm = true; in stmmac_phy_setup()
1237 priv->phylink_config.mac_requires_rxc = true; in stmmac_phy_setup()
1239 mdio_bus_data = priv->plat->mdio_bus_data; in stmmac_phy_setup()
1241 priv->phylink_config.default_an_inband = in stmmac_phy_setup()
1242 mdio_bus_data->default_an_inband; in stmmac_phy_setup()
1247 __set_bit(mode, priv->phylink_config.supported_interfaces); in stmmac_phy_setup()
1250 if (priv->hw->xpcs) in stmmac_phy_setup()
1251 xpcs_get_interfaces(priv->hw->xpcs, in stmmac_phy_setup()
1252 priv->phylink_config.supported_interfaces); in stmmac_phy_setup()
1254 fwnode = priv->plat->port_node; in stmmac_phy_setup()
1256 fwnode = dev_fwnode(priv->device); in stmmac_phy_setup()
1258 phylink = phylink_create(&priv->phylink_config, fwnode, in stmmac_phy_setup()
1263 priv->phylink = phylink; in stmmac_phy_setup()
1270 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_display_rx_rings()
1277 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_display_rx_rings()
1281 if (priv->extend_desc) { in stmmac_display_rx_rings()
1282 head_rx = (void *)rx_q->dma_erx; in stmmac_display_rx_rings()
1285 head_rx = (void *)rx_q->dma_rx; in stmmac_display_rx_rings()
1290 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, in stmmac_display_rx_rings()
1291 rx_q->dma_rx_phy, desc_size); in stmmac_display_rx_rings()
1298 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_display_tx_rings()
1305 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_display_tx_rings()
1309 if (priv->extend_desc) { in stmmac_display_tx_rings()
1310 head_tx = (void *)tx_q->dma_etx; in stmmac_display_tx_rings()
1312 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in stmmac_display_tx_rings()
1313 head_tx = (void *)tx_q->dma_entx; in stmmac_display_tx_rings()
1316 head_tx = (void *)tx_q->dma_tx; in stmmac_display_tx_rings()
1320 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, in stmmac_display_tx_rings()
1321 tx_q->dma_tx_phy, desc_size); in stmmac_display_tx_rings()
1354 * stmmac_clear_rx_descriptors - clear RX descriptors
1365 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_clear_rx_descriptors()
1369 for (i = 0; i < dma_conf->dma_rx_size; i++) in stmmac_clear_rx_descriptors()
1370 if (priv->extend_desc) in stmmac_clear_rx_descriptors()
1371 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, in stmmac_clear_rx_descriptors()
1372 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1373 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1374 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1376 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], in stmmac_clear_rx_descriptors()
1377 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1378 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1379 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1383 * stmmac_clear_tx_descriptors - clear tx descriptors
1394 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_clear_tx_descriptors()
1398 for (i = 0; i < dma_conf->dma_tx_size; i++) { in stmmac_clear_tx_descriptors()
1399 int last = (i == (dma_conf->dma_tx_size - 1)); in stmmac_clear_tx_descriptors()
1402 if (priv->extend_desc) in stmmac_clear_tx_descriptors()
1403 p = &tx_q->dma_etx[i].basic; in stmmac_clear_tx_descriptors()
1404 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_clear_tx_descriptors()
1405 p = &tx_q->dma_entx[i].basic; in stmmac_clear_tx_descriptors()
1407 p = &tx_q->dma_tx[i]; in stmmac_clear_tx_descriptors()
1409 stmmac_init_tx_desc(priv, p, priv->mode, last); in stmmac_clear_tx_descriptors()
1414 * stmmac_clear_descriptors - clear descriptors
1423 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; in stmmac_clear_descriptors()
1424 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_clear_descriptors()
1437 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1452 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_init_rx_buffers()
1453 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_init_rx_buffers()
1456 if (priv->dma_cap.host_dma_width <= 32) in stmmac_init_rx_buffers()
1459 if (!buf->page) { in stmmac_init_rx_buffers()
1460 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1461 if (!buf->page) in stmmac_init_rx_buffers()
1462 return -ENOMEM; in stmmac_init_rx_buffers()
1463 buf->page_offset = stmmac_rx_offset(priv); in stmmac_init_rx_buffers()
1466 if (priv->sph && !buf->sec_page) { in stmmac_init_rx_buffers()
1467 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1468 if (!buf->sec_page) in stmmac_init_rx_buffers()
1469 return -ENOMEM; in stmmac_init_rx_buffers()
1471 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_init_rx_buffers()
1472 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_init_rx_buffers()
1474 buf->sec_page = NULL; in stmmac_init_rx_buffers()
1475 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_init_rx_buffers()
1478 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_init_rx_buffers()
1480 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_init_rx_buffers()
1481 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) in stmmac_init_rx_buffers()
1488 * stmmac_free_rx_buffer - free RX dma buffers
1497 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_free_rx_buffer()
1499 if (buf->page) in stmmac_free_rx_buffer()
1500 page_pool_put_full_page(rx_q->page_pool, buf->page, false); in stmmac_free_rx_buffer()
1501 buf->page = NULL; in stmmac_free_rx_buffer()
1503 if (buf->sec_page) in stmmac_free_rx_buffer()
1504 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); in stmmac_free_rx_buffer()
1505 buf->sec_page = NULL; in stmmac_free_rx_buffer()
1509 * stmmac_free_tx_buffer - free RX dma buffers
1519 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_free_tx_buffer()
1521 if (tx_q->tx_skbuff_dma[i].buf && in stmmac_free_tx_buffer()
1522 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { in stmmac_free_tx_buffer()
1523 if (tx_q->tx_skbuff_dma[i].map_as_page) in stmmac_free_tx_buffer()
1524 dma_unmap_page(priv->device, in stmmac_free_tx_buffer()
1525 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1526 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1529 dma_unmap_single(priv->device, in stmmac_free_tx_buffer()
1530 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1531 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1535 if (tx_q->xdpf[i] && in stmmac_free_tx_buffer()
1536 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_free_tx_buffer()
1537 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { in stmmac_free_tx_buffer()
1538 xdp_return_frame(tx_q->xdpf[i]); in stmmac_free_tx_buffer()
1539 tx_q->xdpf[i] = NULL; in stmmac_free_tx_buffer()
1542 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_free_tx_buffer()
1543 tx_q->xsk_frames_done++; in stmmac_free_tx_buffer()
1545 if (tx_q->tx_skbuff[i] && in stmmac_free_tx_buffer()
1546 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_free_tx_buffer()
1547 dev_kfree_skb_any(tx_q->tx_skbuff[i]); in stmmac_free_tx_buffer()
1548 tx_q->tx_skbuff[i] = NULL; in stmmac_free_tx_buffer()
1551 tx_q->tx_skbuff_dma[i].buf = 0; in stmmac_free_tx_buffer()
1552 tx_q->tx_skbuff_dma[i].map_as_page = false; in stmmac_free_tx_buffer()
1556 * dma_free_rx_skbufs - free RX dma buffers
1565 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_skbufs()
1568 for (i = 0; i < dma_conf->dma_rx_size; i++) in dma_free_rx_skbufs()
1576 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers()
1579 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers()
1583 if (priv->extend_desc) in stmmac_alloc_rx_buffers()
1584 p = &((rx_q->dma_erx + i)->basic); in stmmac_alloc_rx_buffers()
1586 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers()
1593 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers()
1600 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1609 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_xskbufs()
1612 for (i = 0; i < dma_conf->dma_rx_size; i++) { in dma_free_rx_xskbufs()
1613 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in dma_free_rx_xskbufs()
1615 if (!buf->xdp) in dma_free_rx_xskbufs()
1618 xsk_buff_free(buf->xdp); in dma_free_rx_xskbufs()
1619 buf->xdp = NULL; in dma_free_rx_xskbufs()
1627 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers_zc()
1636 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers_zc()
1641 if (priv->extend_desc) in stmmac_alloc_rx_buffers_zc()
1642 p = (struct dma_desc *)(rx_q->dma_erx + i); in stmmac_alloc_rx_buffers_zc()
1644 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers_zc()
1646 buf = &rx_q->buf_pool[i]; in stmmac_alloc_rx_buffers_zc()
1648 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1649 if (!buf->xdp) in stmmac_alloc_rx_buffers_zc()
1650 return -ENOMEM; in stmmac_alloc_rx_buffers_zc()
1652 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_alloc_rx_buffers_zc()
1654 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers_zc()
1662 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) in stmmac_get_xsk_pool()
1665 return xsk_get_pool_from_qid(priv->dev, queue); in stmmac_get_xsk_pool()
1669 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1682 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __init_dma_rx_desc_rings()
1685 netif_dbg(priv, probe, priv->dev, in __init_dma_rx_desc_rings()
1687 (u32)rx_q->dma_rx_phy); in __init_dma_rx_desc_rings()
1691 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1693 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1695 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1696 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1699 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1700 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1701 rx_q->queue_index); in __init_dma_rx_desc_rings()
1702 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1704 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1706 rx_q->page_pool)); in __init_dma_rx_desc_rings()
1707 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1708 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1709 rx_q->queue_index); in __init_dma_rx_desc_rings()
1712 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1714 * xdpsock TX-only. in __init_dma_rx_desc_rings()
1720 return -ENOMEM; in __init_dma_rx_desc_rings()
1724 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_rx_desc_rings()
1725 if (priv->extend_desc) in __init_dma_rx_desc_rings()
1726 stmmac_mode_init(priv, rx_q->dma_erx, in __init_dma_rx_desc_rings()
1727 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1728 dma_conf->dma_rx_size, 1); in __init_dma_rx_desc_rings()
1730 stmmac_mode_init(priv, rx_q->dma_rx, in __init_dma_rx_desc_rings()
1731 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1732 dma_conf->dma_rx_size, 0); in __init_dma_rx_desc_rings()
1743 u32 rx_count = priv->plat->rx_queues_to_use; in init_dma_rx_desc_rings()
1748 netif_dbg(priv, probe, priv->dev, in init_dma_rx_desc_rings()
1761 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in init_dma_rx_desc_rings()
1763 if (rx_q->xsk_pool) in init_dma_rx_desc_rings()
1768 rx_q->buf_alloc_num = 0; in init_dma_rx_desc_rings()
1769 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings()
1771 queue--; in init_dma_rx_desc_rings()
1778 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1790 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __init_dma_tx_desc_rings()
1793 netif_dbg(priv, probe, priv->dev, in __init_dma_tx_desc_rings()
1795 (u32)tx_q->dma_tx_phy); in __init_dma_tx_desc_rings()
1798 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_tx_desc_rings()
1799 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1800 stmmac_mode_init(priv, tx_q->dma_etx, in __init_dma_tx_desc_rings()
1801 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1802 dma_conf->dma_tx_size, 1); in __init_dma_tx_desc_rings()
1803 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) in __init_dma_tx_desc_rings()
1804 stmmac_mode_init(priv, tx_q->dma_tx, in __init_dma_tx_desc_rings()
1805 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1806 dma_conf->dma_tx_size, 0); in __init_dma_tx_desc_rings()
1809 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings()
1811 for (i = 0; i < dma_conf->dma_tx_size; i++) { in __init_dma_tx_desc_rings()
1814 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1815 p = &((tx_q->dma_etx + i)->basic); in __init_dma_tx_desc_rings()
1816 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __init_dma_tx_desc_rings()
1817 p = &((tx_q->dma_entx + i)->basic); in __init_dma_tx_desc_rings()
1819 p = tx_q->dma_tx + i; in __init_dma_tx_desc_rings()
1823 tx_q->tx_skbuff_dma[i].buf = 0; in __init_dma_tx_desc_rings()
1824 tx_q->tx_skbuff_dma[i].map_as_page = false; in __init_dma_tx_desc_rings()
1825 tx_q->tx_skbuff_dma[i].len = 0; in __init_dma_tx_desc_rings()
1826 tx_q->tx_skbuff_dma[i].last_segment = false; in __init_dma_tx_desc_rings()
1827 tx_q->tx_skbuff[i] = NULL; in __init_dma_tx_desc_rings()
1840 tx_queue_cnt = priv->plat->tx_queues_to_use; in init_dma_tx_desc_rings()
1849 * init_dma_desc_rings - init the RX/TX descriptor rings
1879 * dma_free_tx_skbufs - free TX dma buffers
1888 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in dma_free_tx_skbufs()
1891 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1893 for (i = 0; i < dma_conf->dma_tx_size; i++) in dma_free_tx_skbufs()
1896 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs()
1897 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in dma_free_tx_skbufs()
1898 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1899 tx_q->xsk_pool = NULL; in dma_free_tx_skbufs()
1904 * stmmac_free_tx_skbufs - free TX skb buffers
1909 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_free_tx_skbufs()
1913 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); in stmmac_free_tx_skbufs()
1917 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1926 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __free_dma_rx_desc_resources()
1929 if (rx_q->xsk_pool) in __free_dma_rx_desc_resources()
1934 rx_q->buf_alloc_num = 0; in __free_dma_rx_desc_resources()
1935 rx_q->xsk_pool = NULL; in __free_dma_rx_desc_resources()
1938 if (!priv->extend_desc) in __free_dma_rx_desc_resources()
1939 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1941 rx_q->dma_rx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
1943 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1945 rx_q->dma_erx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
1947 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) in __free_dma_rx_desc_resources()
1948 xdp_rxq_info_unreg(&rx_q->xdp_rxq); in __free_dma_rx_desc_resources()
1950 kfree(rx_q->buf_pool); in __free_dma_rx_desc_resources()
1951 if (rx_q->page_pool) in __free_dma_rx_desc_resources()
1952 page_pool_destroy(rx_q->page_pool); in __free_dma_rx_desc_resources()
1958 u32 rx_count = priv->plat->rx_queues_to_use; in free_dma_rx_desc_resources()
1967 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1976 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __free_dma_tx_desc_resources()
1983 if (priv->extend_desc) { in __free_dma_tx_desc_resources()
1985 addr = tx_q->dma_etx; in __free_dma_tx_desc_resources()
1986 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in __free_dma_tx_desc_resources()
1988 addr = tx_q->dma_entx; in __free_dma_tx_desc_resources()
1991 addr = tx_q->dma_tx; in __free_dma_tx_desc_resources()
1994 size *= dma_conf->dma_tx_size; in __free_dma_tx_desc_resources()
1996 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); in __free_dma_tx_desc_resources()
1998 kfree(tx_q->tx_skbuff_dma); in __free_dma_tx_desc_resources()
1999 kfree(tx_q->tx_skbuff); in __free_dma_tx_desc_resources()
2005 u32 tx_count = priv->plat->tx_queues_to_use; in free_dma_tx_desc_resources()
2014 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2020 * reception, for example, it pre-allocated the RX socket buffer in order to
2021 * allow zero-copy mechanism.
2027 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __alloc_dma_rx_desc_resources()
2028 struct stmmac_channel *ch = &priv->channel[queue]; in __alloc_dma_rx_desc_resources()
2035 rx_q->queue_index = queue; in __alloc_dma_rx_desc_resources()
2036 rx_q->priv_data = priv; in __alloc_dma_rx_desc_resources()
2039 pp_params.pool_size = dma_conf->dma_rx_size; in __alloc_dma_rx_desc_resources()
2040 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); in __alloc_dma_rx_desc_resources()
2042 pp_params.nid = dev_to_node(priv->device); in __alloc_dma_rx_desc_resources()
2043 pp_params.dev = priv->device; in __alloc_dma_rx_desc_resources()
2048 rx_q->page_pool = page_pool_create(&pp_params); in __alloc_dma_rx_desc_resources()
2049 if (IS_ERR(rx_q->page_pool)) { in __alloc_dma_rx_desc_resources()
2050 ret = PTR_ERR(rx_q->page_pool); in __alloc_dma_rx_desc_resources()
2051 rx_q->page_pool = NULL; in __alloc_dma_rx_desc_resources()
2055 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, in __alloc_dma_rx_desc_resources()
2056 sizeof(*rx_q->buf_pool), in __alloc_dma_rx_desc_resources()
2058 if (!rx_q->buf_pool) in __alloc_dma_rx_desc_resources()
2059 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2061 if (priv->extend_desc) { in __alloc_dma_rx_desc_resources()
2062 rx_q->dma_erx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2063 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2065 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2067 if (!rx_q->dma_erx) in __alloc_dma_rx_desc_resources()
2068 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2071 rx_q->dma_rx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2072 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2074 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2076 if (!rx_q->dma_rx) in __alloc_dma_rx_desc_resources()
2077 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2081 test_bit(queue, priv->af_xdp_zc_qps)) in __alloc_dma_rx_desc_resources()
2082 napi_id = ch->rxtx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2084 napi_id = ch->rx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2086 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, in __alloc_dma_rx_desc_resources()
2087 rx_q->queue_index, in __alloc_dma_rx_desc_resources()
2090 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); in __alloc_dma_rx_desc_resources()
2091 return -EINVAL; in __alloc_dma_rx_desc_resources()
2100 u32 rx_count = priv->plat->rx_queues_to_use; in alloc_dma_rx_desc_resources()
2120 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2126 * reception, for example, it pre-allocated the RX socket buffer in order to
2127 * allow zero-copy mechanism.
2133 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __alloc_dma_tx_desc_resources()
2137 tx_q->queue_index = queue; in __alloc_dma_tx_desc_resources()
2138 tx_q->priv_data = priv; in __alloc_dma_tx_desc_resources()
2140 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2141 sizeof(*tx_q->tx_skbuff_dma), in __alloc_dma_tx_desc_resources()
2143 if (!tx_q->tx_skbuff_dma) in __alloc_dma_tx_desc_resources()
2144 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2146 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2149 if (!tx_q->tx_skbuff) in __alloc_dma_tx_desc_resources()
2150 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2152 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2154 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2159 size *= dma_conf->dma_tx_size; in __alloc_dma_tx_desc_resources()
2161 addr = dma_alloc_coherent(priv->device, size, in __alloc_dma_tx_desc_resources()
2162 &tx_q->dma_tx_phy, GFP_KERNEL); in __alloc_dma_tx_desc_resources()
2164 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2166 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2167 tx_q->dma_etx = addr; in __alloc_dma_tx_desc_resources()
2168 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2169 tx_q->dma_entx = addr; in __alloc_dma_tx_desc_resources()
2171 tx_q->dma_tx = addr; in __alloc_dma_tx_desc_resources()
2179 u32 tx_count = priv->plat->tx_queues_to_use; in alloc_dma_tx_desc_resources()
2198 * alloc_dma_desc_resources - alloc TX/RX resources.
2203 * reception, for example, it pre-allocated the RX socket buffer in order to
2204 * allow zero-copy mechanism.
2221 * free_dma_desc_resources - free dma desc resources
2238 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2244 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_enable_rx_queues()
2249 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; in stmmac_mac_enable_rx_queues()
2250 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); in stmmac_mac_enable_rx_queues()
2255 * stmmac_start_rx_dma - start RX DMA channel
2263 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); in stmmac_start_rx_dma()
2264 stmmac_start_rx(priv, priv->ioaddr, chan); in stmmac_start_rx_dma()
2268 * stmmac_start_tx_dma - start TX DMA channel
2276 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); in stmmac_start_tx_dma()
2277 stmmac_start_tx(priv, priv->ioaddr, chan); in stmmac_start_tx_dma()
2281 * stmmac_stop_rx_dma - stop RX DMA channel
2289 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); in stmmac_stop_rx_dma()
2290 stmmac_stop_rx(priv, priv->ioaddr, chan); in stmmac_stop_rx_dma()
2294 * stmmac_stop_tx_dma - stop TX DMA channel
2302 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); in stmmac_stop_tx_dma()
2303 stmmac_stop_tx(priv, priv->ioaddr, chan); in stmmac_stop_tx_dma()
2308 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_enable_all_dma_irq()
2309 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_enable_all_dma_irq()
2314 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_enable_all_dma_irq()
2317 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2318 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_enable_all_dma_irq()
2319 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2324 * stmmac_start_all_dma - start all RX and TX DMA channels
2331 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_start_all_dma()
2332 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_start_all_dma()
2343 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2350 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_stop_all_dma()
2351 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_stop_all_dma()
2362 * stmmac_dma_operation_mode - HW DMA operation mode
2365 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2369 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_dma_operation_mode()
2370 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_dma_operation_mode()
2371 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_dma_operation_mode()
2372 int txfifosz = priv->plat->tx_fifo_size; in stmmac_dma_operation_mode()
2379 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_dma_operation_mode()
2381 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_dma_operation_mode()
2384 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { in stmmac_dma_operation_mode()
2389 if (priv->plat->force_thresh_dma_mode) { in stmmac_dma_operation_mode()
2392 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { in stmmac_dma_operation_mode()
2402 priv->xstats.threshold = SF_DMA_MODE; in stmmac_dma_operation_mode()
2410 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_dma_operation_mode()
2413 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2415 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, in stmmac_dma_operation_mode()
2418 if (rx_q->xsk_pool) { in stmmac_dma_operation_mode()
2419 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_dma_operation_mode()
2420 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2424 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2425 priv->dma_conf.dma_buf_sz, in stmmac_dma_operation_mode()
2431 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2433 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, in stmmac_dma_operation_mode()
2442 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc); in stmmac_xsk_request_timestamp()
2443 *meta_req->set_ic = true; in stmmac_xsk_request_timestamp()
2449 struct stmmac_priv *priv = tx_compl->priv; in stmmac_xsk_fill_timestamp()
2450 struct dma_desc *desc = tx_compl->desc; in stmmac_xsk_fill_timestamp()
2454 if (!priv->hwts_tx_en) in stmmac_xsk_fill_timestamp()
2459 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_xsk_fill_timestamp()
2461 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_xsk_fill_timestamp()
2466 ns -= priv->plat->cdc_error_adj; in stmmac_xsk_fill_timestamp()
2480 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc()
2481 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_zc()
2482 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_zc()
2483 struct xsk_buff_pool *pool = tx_q->xsk_pool; in stmmac_xdp_xmit_zc()
2484 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2490 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_zc()
2495 while (budget-- > 0) { in stmmac_xdp_xmit_zc()
2505 !netif_carrier_ok(priv->dev)) { in stmmac_xdp_xmit_zc()
2513 if (priv->est && priv->est->enable && in stmmac_xdp_xmit_zc()
2514 priv->est->max_sdu[queue] && in stmmac_xdp_xmit_zc()
2515 xdp_desc.len > priv->est->max_sdu[queue]) { in stmmac_xdp_xmit_zc()
2516 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xdp_xmit_zc()
2520 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_zc()
2521 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_zc()
2522 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_zc()
2523 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_zc()
2525 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_zc()
2531 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; in stmmac_xdp_xmit_zc()
2537 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_xdp_xmit_zc()
2538 tx_q->xdpf[entry] = NULL; in stmmac_xdp_xmit_zc()
2540 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_zc()
2541 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; in stmmac_xdp_xmit_zc()
2542 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_zc()
2543 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_zc()
2547 tx_q->tx_count_frames++; in stmmac_xdp_xmit_zc()
2549 if (!priv->tx_coal_frames[queue]) in stmmac_xdp_xmit_zc()
2551 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_zc()
2562 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_zc()
2568 true, priv->mode, true, true, in stmmac_xdp_xmit_zc()
2571 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xdp_xmit_zc()
2574 &tx_q->tx_skbuff_dma[entry].xsk_meta); in stmmac_xdp_xmit_zc()
2576 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_zc()
2577 entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2579 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2580 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); in stmmac_xdp_xmit_zc()
2581 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2598 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { in stmmac_bump_dma_threshold()
2601 if (priv->plat->force_thresh_dma_mode) in stmmac_bump_dma_threshold()
2607 priv->xstats.threshold = tc; in stmmac_bump_dma_threshold()
2612 * stmmac_tx_clean - to manage the transmission completion
2624 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_clean()
2625 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tx_clean()
2630 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2632 tx_q->xsk_frames_done = 0; in stmmac_tx_clean()
2634 entry = tx_q->dirty_tx; in stmmac_tx_clean()
2637 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { in stmmac_tx_clean()
2643 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_tx_clean()
2644 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2645 xdpf = tx_q->xdpf[entry]; in stmmac_tx_clean()
2647 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2649 skb = tx_q->tx_skbuff[entry]; in stmmac_tx_clean()
2655 if (priv->extend_desc) in stmmac_tx_clean()
2656 p = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_tx_clean()
2657 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tx_clean()
2658 p = &tx_q->dma_entx[entry].basic; in stmmac_tx_clean()
2660 p = tx_q->dma_tx + entry; in stmmac_tx_clean()
2662 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); in stmmac_tx_clean()
2686 } else if (tx_q->xsk_pool && in stmmac_tx_clean()
2687 xp_tx_metadata_enabled(tx_q->xsk_pool)) { in stmmac_tx_clean()
2693 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta, in stmmac_tx_clean()
2699 if (likely(tx_q->tx_skbuff_dma[entry].buf && in stmmac_tx_clean()
2700 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { in stmmac_tx_clean()
2701 if (tx_q->tx_skbuff_dma[entry].map_as_page) in stmmac_tx_clean()
2702 dma_unmap_page(priv->device, in stmmac_tx_clean()
2703 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2704 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2707 dma_unmap_single(priv->device, in stmmac_tx_clean()
2708 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2709 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2711 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_tx_clean()
2712 tx_q->tx_skbuff_dma[entry].len = 0; in stmmac_tx_clean()
2713 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_tx_clean()
2718 tx_q->tx_skbuff_dma[entry].last_segment = false; in stmmac_tx_clean()
2719 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_tx_clean()
2722 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { in stmmac_tx_clean()
2724 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2728 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2730 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2733 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_tx_clean()
2734 tx_q->xsk_frames_done++; in stmmac_tx_clean()
2736 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2739 bytes_compl += skb->len; in stmmac_tx_clean()
2741 tx_q->tx_skbuff[entry] = NULL; in stmmac_tx_clean()
2745 stmmac_release_tx_desc(priv, p, priv->mode); in stmmac_tx_clean()
2747 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_tx_clean()
2749 tx_q->dirty_tx = entry; in stmmac_tx_clean()
2751 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), in stmmac_tx_clean()
2754 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, in stmmac_tx_clean()
2758 netif_dbg(priv, tx_done, priv->dev, in stmmac_tx_clean()
2760 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2763 if (tx_q->xsk_pool) { in stmmac_tx_clean()
2766 if (tx_q->xsk_frames_done) in stmmac_tx_clean()
2767 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in stmmac_tx_clean()
2769 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) in stmmac_tx_clean()
2770 xsk_set_tx_need_wakeup(tx_q->xsk_pool); in stmmac_tx_clean()
2774 * available), return "budget - 1" to reenable TX IRQ. in stmmac_tx_clean()
2780 xmits = budget - 1; in stmmac_tx_clean()
2785 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && in stmmac_tx_clean()
2786 priv->eee_sw_timer_en) { in stmmac_tx_clean()
2788 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_tx_clean()
2792 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_tx_clean()
2795 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_tx_clean()
2796 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); in stmmac_tx_clean()
2797 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); in stmmac_tx_clean()
2798 u64_stats_inc(&txq_stats->napi.tx_clean); in stmmac_tx_clean()
2799 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_tx_clean()
2801 priv->xstats.tx_errors += tx_errors; in stmmac_tx_clean()
2803 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2810 * stmmac_tx_err - to manage the tx error
2818 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_tx_err()
2820 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2823 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2824 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2826 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_tx_err()
2827 tx_q->dma_tx_phy, chan); in stmmac_tx_err()
2830 priv->xstats.tx_errors++; in stmmac_tx_err()
2831 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2835 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2841 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2847 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2848 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2849 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_dma_operation_mode()
2850 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_dma_operation_mode()
2851 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_set_dma_operation_mode()
2852 int txfifosz = priv->plat->tx_fifo_size; in stmmac_set_dma_operation_mode()
2855 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_set_dma_operation_mode()
2857 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_set_dma_operation_mode()
2863 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); in stmmac_set_dma_operation_mode()
2864 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); in stmmac_set_dma_operation_mode()
2871 ret = stmmac_safety_feat_irq_status(priv, priv->dev, in stmmac_safety_feat_interrupt()
2872 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); in stmmac_safety_feat_interrupt()
2873 if (ret && (ret != -EINVAL)) { in stmmac_safety_feat_interrupt()
2883 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, in stmmac_napi_check()
2884 &priv->xstats, chan, dir); in stmmac_napi_check()
2885 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_napi_check()
2886 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_napi_check()
2887 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_napi_check()
2892 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; in stmmac_napi_check()
2893 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_napi_check()
2895 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { in stmmac_napi_check()
2897 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2898 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_check()
2899 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2904 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { in stmmac_napi_check()
2906 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2907 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_check()
2908 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2917 * stmmac_dma_interrupt - DMA ISR
2925 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_dma_interrupt()
2926 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_dma_interrupt()
2960 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); in stmmac_mmc_setup()
2962 if (priv->dma_cap.rmon) { in stmmac_mmc_setup()
2963 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); in stmmac_mmc_setup()
2964 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); in stmmac_mmc_setup()
2966 netdev_info(priv->dev, "No MAC Management Counters available\n"); in stmmac_mmc_setup()
2970 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2980 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; in stmmac_get_hw_features()
2984 * stmmac_check_ether_addr - check if the MAC addr is valid
2994 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in stmmac_check_ether_addr()
2995 stmmac_get_umac_addr(priv, priv->hw, addr, 0); in stmmac_check_ether_addr()
2997 eth_hw_addr_set(priv->dev, addr); in stmmac_check_ether_addr()
2999 eth_hw_addr_random(priv->dev); in stmmac_check_ether_addr()
3000 dev_info(priv->device, "device MAC address %pM\n", in stmmac_check_ether_addr()
3001 priv->dev->dev_addr); in stmmac_check_ether_addr()
3006 * stmmac_init_dma_engine - DMA init.
3015 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_init_dma_engine()
3016 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_init_dma_engine()
3023 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { in stmmac_init_dma_engine()
3024 dev_err(priv->device, "Invalid DMA configuration\n"); in stmmac_init_dma_engine()
3025 return -EINVAL; in stmmac_init_dma_engine()
3028 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) in stmmac_init_dma_engine()
3029 priv->plat->dma_cfg->atds = 1; in stmmac_init_dma_engine()
3031 ret = stmmac_reset(priv, priv->ioaddr); in stmmac_init_dma_engine()
3033 dev_err(priv->device, "Failed to reset the dma\n"); in stmmac_init_dma_engine()
3038 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); in stmmac_init_dma_engine()
3040 if (priv->plat->axi) in stmmac_init_dma_engine()
3041 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); in stmmac_init_dma_engine()
3045 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_init_dma_engine()
3046 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_init_dma_engine()
3051 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_init_dma_engine()
3053 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3054 rx_q->dma_rx_phy, chan); in stmmac_init_dma_engine()
3056 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_init_dma_engine()
3057 (rx_q->buf_alloc_num * in stmmac_init_dma_engine()
3059 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3060 rx_q->rx_tail_addr, chan); in stmmac_init_dma_engine()
3065 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_dma_engine()
3067 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3068 tx_q->dma_tx_phy, chan); in stmmac_init_dma_engine()
3070 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_init_dma_engine()
3071 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3072 tx_q->tx_tail_addr, chan); in stmmac_init_dma_engine()
3080 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_timer_arm()
3081 u32 tx_coal_timer = priv->tx_coal_timer[queue]; in stmmac_tx_timer_arm()
3088 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer_arm()
3089 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer_arm()
3096 hrtimer_start(&tx_q->txtimer, in stmmac_tx_timer_arm()
3100 hrtimer_try_to_cancel(&tx_q->txtimer); in stmmac_tx_timer_arm()
3104 * stmmac_tx_timer - mitigation sw timer for tx.
3112 struct stmmac_priv *priv = tx_q->priv_data; in stmmac_tx_timer()
3116 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer()
3117 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer()
3122 spin_lock_irqsave(&ch->lock, flags); in stmmac_tx_timer()
3123 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); in stmmac_tx_timer()
3124 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_tx_timer()
3132 * stmmac_init_coalesce - init mitigation options.
3141 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_init_coalesce()
3142 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_init_coalesce()
3146 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_coalesce()
3148 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; in stmmac_init_coalesce()
3149 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; in stmmac_init_coalesce()
3151 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_init_coalesce()
3152 tx_q->txtimer.function = stmmac_tx_timer; in stmmac_init_coalesce()
3156 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; in stmmac_init_coalesce()
3161 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_rings_length()
3162 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_rings_length()
3167 stmmac_set_tx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3168 (priv->dma_conf.dma_tx_size - 1), chan); in stmmac_set_rings_length()
3172 stmmac_set_rx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3173 (priv->dma_conf.dma_rx_size - 1), chan); in stmmac_set_rings_length()
3177 * stmmac_set_tx_queue_weight - Set TX queue weight
3183 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_set_tx_queue_weight()
3188 weight = priv->plat->tx_queues_cfg[queue].weight; in stmmac_set_tx_queue_weight()
3189 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); in stmmac_set_tx_queue_weight()
3194 * stmmac_configure_cbs - Configure CBS in TX queue
3200 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_configure_cbs()
3206 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; in stmmac_configure_cbs()
3210 stmmac_config_cbs(priv, priv->hw, in stmmac_configure_cbs()
3211 priv->plat->tx_queues_cfg[queue].send_slope, in stmmac_configure_cbs()
3212 priv->plat->tx_queues_cfg[queue].idle_slope, in stmmac_configure_cbs()
3213 priv->plat->tx_queues_cfg[queue].high_credit, in stmmac_configure_cbs()
3214 priv->plat->tx_queues_cfg[queue].low_credit, in stmmac_configure_cbs()
3220 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3226 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_rx_queue_dma_chan_map()
3231 chan = priv->plat->rx_queues_cfg[queue].chan; in stmmac_rx_queue_dma_chan_map()
3232 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); in stmmac_rx_queue_dma_chan_map()
3237 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3243 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_prio()
3248 if (!priv->plat->rx_queues_cfg[queue].use_prio) in stmmac_mac_config_rx_queues_prio()
3251 prio = priv->plat->rx_queues_cfg[queue].prio; in stmmac_mac_config_rx_queues_prio()
3252 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_rx_queues_prio()
3257 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3263 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mac_config_tx_queues_prio()
3268 if (!priv->plat->tx_queues_cfg[queue].use_prio) in stmmac_mac_config_tx_queues_prio()
3271 prio = priv->plat->tx_queues_cfg[queue].prio; in stmmac_mac_config_tx_queues_prio()
3272 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_tx_queues_prio()
3277 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3283 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_routing()
3289 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) in stmmac_mac_config_rx_queues_routing()
3292 packet = priv->plat->rx_queues_cfg[queue].pkt_route; in stmmac_mac_config_rx_queues_routing()
3293 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); in stmmac_mac_config_rx_queues_routing()
3299 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { in stmmac_mac_config_rss()
3300 priv->rss.enable = false; in stmmac_mac_config_rss()
3304 if (priv->dev->features & NETIF_F_RXHASH) in stmmac_mac_config_rss()
3305 priv->rss.enable = true; in stmmac_mac_config_rss()
3307 priv->rss.enable = false; in stmmac_mac_config_rss()
3309 stmmac_rss_configure(priv, priv->hw, &priv->rss, in stmmac_mac_config_rss()
3310 priv->plat->rx_queues_to_use); in stmmac_mac_config_rss()
3314 * stmmac_mtl_configuration - Configure MTL
3320 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mtl_configuration()
3321 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mtl_configuration()
3328 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3329 priv->plat->rx_sched_algorithm); in stmmac_mtl_configuration()
3333 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3334 priv->plat->tx_sched_algorithm); in stmmac_mtl_configuration()
3365 if (priv->dma_cap.asp) { in stmmac_safety_feat_configuration()
3366 netdev_info(priv->dev, "Enabling Safety Features\n"); in stmmac_safety_feat_configuration()
3367 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, in stmmac_safety_feat_configuration()
3368 priv->plat->safety_feat_cfg); in stmmac_safety_feat_configuration()
3370 netdev_info(priv->dev, "No Safety Features support found\n"); in stmmac_safety_feat_configuration()
3375 * stmmac_hw_setup - setup mac in a usable state.
3384 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3390 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_hw_setup()
3391 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_hw_setup()
3397 if (priv->hw->phylink_pcs) in stmmac_hw_setup()
3398 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs); in stmmac_hw_setup()
3403 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", in stmmac_hw_setup()
3409 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); in stmmac_hw_setup()
3412 if (priv->hw->pcs) { in stmmac_hw_setup()
3413 int speed = priv->plat->mac_port_sel_speed; in stmmac_hw_setup()
3417 priv->hw->ps = speed; in stmmac_hw_setup()
3419 dev_warn(priv->device, "invalid port speed\n"); in stmmac_hw_setup()
3420 priv->hw->ps = 0; in stmmac_hw_setup()
3425 stmmac_core_init(priv, priv->hw, dev); in stmmac_hw_setup()
3433 ret = stmmac_rx_ipc(priv, priv->hw); in stmmac_hw_setup()
3435 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); in stmmac_hw_setup()
3436 priv->plat->rx_coe = STMMAC_RX_COE_NONE; in stmmac_hw_setup()
3437 priv->hw->rx_csum = 0; in stmmac_hw_setup()
3441 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_hw_setup()
3449 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); in stmmac_hw_setup()
3451 netdev_warn(priv->dev, in stmmac_hw_setup()
3457 if (ret == -EOPNOTSUPP) in stmmac_hw_setup()
3458 netdev_info(priv->dev, "PTP not supported by HW\n"); in stmmac_hw_setup()
3460 netdev_warn(priv->dev, "PTP init failed\n"); in stmmac_hw_setup()
3464 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; in stmmac_hw_setup()
3467 if (!priv->tx_lpi_timer) in stmmac_hw_setup()
3468 priv->tx_lpi_timer = eee_timer * 1000; in stmmac_hw_setup()
3470 if (priv->use_riwt) { in stmmac_hw_setup()
3474 if (!priv->rx_riwt[queue]) in stmmac_hw_setup()
3475 priv->rx_riwt[queue] = DEF_DMA_RIWT; in stmmac_hw_setup()
3477 stmmac_rx_watchdog(priv, priv->ioaddr, in stmmac_hw_setup()
3478 priv->rx_riwt[queue], queue); in stmmac_hw_setup()
3482 if (priv->hw->pcs) in stmmac_hw_setup()
3483 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); in stmmac_hw_setup()
3489 if (priv->tso) { in stmmac_hw_setup()
3491 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3493 /* TSO and TBS cannot co-exist */ in stmmac_hw_setup()
3494 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_hw_setup()
3497 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); in stmmac_hw_setup()
3502 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_hw_setup()
3504 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_hw_setup()
3508 if (priv->dma_cap.vlins) in stmmac_hw_setup()
3509 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); in stmmac_hw_setup()
3513 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3514 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; in stmmac_hw_setup()
3516 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); in stmmac_hw_setup()
3520 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); in stmmac_hw_setup()
3521 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); in stmmac_hw_setup()
3526 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_hw_setup()
3535 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_hw_teardown()
3546 irq_idx = priv->plat->tx_queues_to_use; in stmmac_free_irq()
3549 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3550 if (priv->tx_irq[j] > 0) { in stmmac_free_irq()
3551 irq_set_affinity_hint(priv->tx_irq[j], NULL); in stmmac_free_irq()
3552 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); in stmmac_free_irq()
3555 irq_idx = priv->plat->rx_queues_to_use; in stmmac_free_irq()
3558 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3559 if (priv->rx_irq[j] > 0) { in stmmac_free_irq()
3560 irq_set_affinity_hint(priv->rx_irq[j], NULL); in stmmac_free_irq()
3561 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); in stmmac_free_irq()
3565 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) in stmmac_free_irq()
3566 free_irq(priv->sfty_ue_irq, dev); in stmmac_free_irq()
3569 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) in stmmac_free_irq()
3570 free_irq(priv->sfty_ce_irq, dev); in stmmac_free_irq()
3573 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) in stmmac_free_irq()
3574 free_irq(priv->lpi_irq, dev); in stmmac_free_irq()
3577 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) in stmmac_free_irq()
3578 free_irq(priv->wol_irq, dev); in stmmac_free_irq()
3581 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) in stmmac_free_irq()
3582 free_irq(priv->sfty_irq, dev); in stmmac_free_irq()
3585 free_irq(dev->irq, dev); in stmmac_free_irq()
3605 int_name = priv->int_name_mac; in stmmac_request_irq_multi_msi()
3606 sprintf(int_name, "%s:%s", dev->name, "mac"); in stmmac_request_irq_multi_msi()
3607 ret = request_irq(dev->irq, stmmac_mac_interrupt, in stmmac_request_irq_multi_msi()
3610 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3612 __func__, dev->irq, ret); in stmmac_request_irq_multi_msi()
3620 priv->wol_irq_disabled = true; in stmmac_request_irq_multi_msi()
3621 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3622 int_name = priv->int_name_wol; in stmmac_request_irq_multi_msi()
3623 sprintf(int_name, "%s:%s", dev->name, "wol"); in stmmac_request_irq_multi_msi()
3624 ret = request_irq(priv->wol_irq, in stmmac_request_irq_multi_msi()
3628 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3630 __func__, priv->wol_irq, ret); in stmmac_request_irq_multi_msi()
3639 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3640 int_name = priv->int_name_lpi; in stmmac_request_irq_multi_msi()
3641 sprintf(int_name, "%s:%s", dev->name, "lpi"); in stmmac_request_irq_multi_msi()
3642 ret = request_irq(priv->lpi_irq, in stmmac_request_irq_multi_msi()
3646 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3648 __func__, priv->lpi_irq, ret); in stmmac_request_irq_multi_msi()
3657 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3658 int_name = priv->int_name_sfty; in stmmac_request_irq_multi_msi()
3659 sprintf(int_name, "%s:%s", dev->name, "safety"); in stmmac_request_irq_multi_msi()
3660 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, in stmmac_request_irq_multi_msi()
3663 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3665 __func__, priv->sfty_irq, ret); in stmmac_request_irq_multi_msi()
3674 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3675 int_name = priv->int_name_sfty_ce; in stmmac_request_irq_multi_msi()
3676 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); in stmmac_request_irq_multi_msi()
3677 ret = request_irq(priv->sfty_ce_irq, in stmmac_request_irq_multi_msi()
3681 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3683 __func__, priv->sfty_ce_irq, ret); in stmmac_request_irq_multi_msi()
3692 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3693 int_name = priv->int_name_sfty_ue; in stmmac_request_irq_multi_msi()
3694 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); in stmmac_request_irq_multi_msi()
3695 ret = request_irq(priv->sfty_ue_irq, in stmmac_request_irq_multi_msi()
3699 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3701 __func__, priv->sfty_ue_irq, ret); in stmmac_request_irq_multi_msi()
3708 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3711 if (priv->rx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3714 int_name = priv->int_name_rx_irq[i]; in stmmac_request_irq_multi_msi()
3715 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); in stmmac_request_irq_multi_msi()
3716 ret = request_irq(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3718 0, int_name, &priv->dma_conf.rx_queue[i]); in stmmac_request_irq_multi_msi()
3720 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3721 "%s: alloc rx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3722 __func__, i, priv->rx_irq[i], ret); in stmmac_request_irq_multi_msi()
3729 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); in stmmac_request_irq_multi_msi()
3733 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3736 if (priv->tx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3739 int_name = priv->int_name_tx_irq[i]; in stmmac_request_irq_multi_msi()
3740 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); in stmmac_request_irq_multi_msi()
3741 ret = request_irq(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3743 0, int_name, &priv->dma_conf.tx_queue[i]); in stmmac_request_irq_multi_msi()
3745 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3746 "%s: alloc tx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3747 __func__, i, priv->tx_irq[i], ret); in stmmac_request_irq_multi_msi()
3754 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); in stmmac_request_irq_multi_msi()
3770 ret = request_irq(dev->irq, stmmac_interrupt, in stmmac_request_irq_single()
3771 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3773 netdev_err(priv->dev, in stmmac_request_irq_single()
3775 __func__, dev->irq, ret); in stmmac_request_irq_single()
3783 priv->wol_irq_disabled = true; in stmmac_request_irq_single()
3784 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_single()
3785 ret = request_irq(priv->wol_irq, stmmac_interrupt, in stmmac_request_irq_single()
3786 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3788 netdev_err(priv->dev, in stmmac_request_irq_single()
3790 __func__, priv->wol_irq, ret); in stmmac_request_irq_single()
3797 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_single()
3798 ret = request_irq(priv->lpi_irq, stmmac_interrupt, in stmmac_request_irq_single()
3799 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3801 netdev_err(priv->dev, in stmmac_request_irq_single()
3803 __func__, priv->lpi_irq, ret); in stmmac_request_irq_single()
3812 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { in stmmac_request_irq_single()
3813 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, in stmmac_request_irq_single()
3814 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3816 netdev_err(priv->dev, in stmmac_request_irq_single()
3818 __func__, priv->sfty_irq, ret); in stmmac_request_irq_single()
3837 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) in stmmac_request_irq()
3846 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3862 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", in stmmac_setup_dma_desc()
3864 return ERR_PTR(-ENOMEM); in stmmac_setup_dma_desc()
3874 dma_conf->dma_buf_sz = bfsize; in stmmac_setup_dma_desc()
3878 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; in stmmac_setup_dma_desc()
3879 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; in stmmac_setup_dma_desc()
3881 if (!dma_conf->dma_tx_size) in stmmac_setup_dma_desc()
3882 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; in stmmac_setup_dma_desc()
3883 if (!dma_conf->dma_rx_size) in stmmac_setup_dma_desc()
3884 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; in stmmac_setup_dma_desc()
3887 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { in stmmac_setup_dma_desc()
3888 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; in stmmac_setup_dma_desc()
3889 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; in stmmac_setup_dma_desc()
3891 /* Setup per-TXQ tbs flag before TX descriptor alloc */ in stmmac_setup_dma_desc()
3892 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; in stmmac_setup_dma_desc()
3897 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", in stmmac_setup_dma_desc()
3902 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); in stmmac_setup_dma_desc()
3904 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", in stmmac_setup_dma_desc()
3919 * __stmmac_open - open entry point of the driver
3925 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3932 int mode = priv->plat->phy_interface; in __stmmac_open()
3936 ret = pm_runtime_resume_and_get(priv->device); in __stmmac_open()
3940 if ((!priv->hw->xpcs || in __stmmac_open()
3941 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { in __stmmac_open()
3944 netdev_err(priv->dev, in __stmmac_open()
3951 priv->rx_copybreak = STMMAC_RX_COPYBREAK; in __stmmac_open()
3953 buf_sz = dma_conf->dma_buf_sz; in __stmmac_open()
3955 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) in __stmmac_open()
3956 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; in __stmmac_open()
3957 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); in __stmmac_open()
3961 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in __stmmac_open()
3962 priv->plat->serdes_powerup) { in __stmmac_open()
3963 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); in __stmmac_open()
3965 netdev_err(priv->dev, "%s: Serdes powerup failed\n", in __stmmac_open()
3973 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); in __stmmac_open()
3979 phylink_start(priv->phylink); in __stmmac_open()
3981 phylink_speed_up(priv->phylink); in __stmmac_open()
3988 netif_tx_start_all_queues(priv->dev); in __stmmac_open()
3994 phylink_stop(priv->phylink); in __stmmac_open()
3996 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in __stmmac_open()
3997 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in __stmmac_open()
4001 phylink_disconnect_phy(priv->phylink); in __stmmac_open()
4003 pm_runtime_put(priv->device); in __stmmac_open()
4013 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); in stmmac_open()
4026 * stmmac_release - close entry point of the driver
4036 if (device_may_wakeup(priv->device)) in stmmac_release()
4037 phylink_speed_down(priv->phylink, false); in stmmac_release()
4039 phylink_stop(priv->phylink); in stmmac_release()
4040 phylink_disconnect_phy(priv->phylink); in stmmac_release()
4044 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_release()
4045 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_release()
4052 if (priv->eee_enabled) { in stmmac_release()
4053 priv->tx_path_in_lpi_mode = false; in stmmac_release()
4054 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_release()
4061 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_release()
4064 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_release()
4067 if (priv->plat->serdes_powerdown) in stmmac_release()
4068 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); in stmmac_release()
4072 if (priv->dma_cap.fpesel) in stmmac_release()
4073 timer_shutdown_sync(&priv->fpe_cfg.verify_timer); in stmmac_release()
4075 pm_runtime_put(priv->device); in stmmac_release()
4087 if (!priv->dma_cap.vlins) in stmmac_vlan_insert()
4091 if (skb->vlan_proto == htons(ETH_P_8021AD)) { in stmmac_vlan_insert()
4098 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_vlan_insert()
4099 p = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_vlan_insert()
4101 p = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_vlan_insert()
4107 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_vlan_insert()
4112 * stmmac_tso_allocator - close entry point of the driver
4125 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_allocator()
4135 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_allocator()
4136 priv->dma_conf.dma_tx_size); in stmmac_tso_allocator()
4137 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_allocator()
4139 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_allocator()
4140 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_allocator()
4142 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_allocator()
4144 curr_addr = des + (total_len - tmp_len); in stmmac_tso_allocator()
4145 if (priv->dma_cap.addr64 <= 32) in stmmac_tso_allocator()
4146 desc->des0 = cpu_to_le32(curr_addr); in stmmac_tso_allocator()
4158 tmp_len -= TSO_MAX_BUFF_SIZE; in stmmac_tso_allocator()
4164 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_flush_tx_descriptors()
4167 if (likely(priv->extend_desc)) in stmmac_flush_tx_descriptors()
4169 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_flush_tx_descriptors()
4180 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); in stmmac_flush_tx_descriptors()
4181 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); in stmmac_flush_tx_descriptors()
4185 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4193 * --------
4194 * | DES0 |---> buffer1 = L2/L3/L4 header
4195 * | DES1 |---> TCP Payload (can continue on next descr...)
4196 * | DES2 |---> buffer 1 and 2 len
4197 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4198 * --------
4202 * --------
4203 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4204 * | DES1 | --|
4205 * | DES2 | --> buffer 1 and 2 len
4207 * --------
4228 * TSO engine will be un-tagged by mistake. in stmmac_tso_xmit()
4233 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4238 nfrags = skb_shinfo(skb)->nr_frags; in stmmac_tso_xmit()
4241 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_xmit()
4242 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tso_xmit()
4243 first_tx = tx_q->cur_tx; in stmmac_tso_xmit()
4246 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in stmmac_tso_xmit()
4256 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { in stmmac_tso_xmit()
4258 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_tso_xmit()
4261 netdev_err(priv->dev, in stmmac_tso_xmit()
4268 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ in stmmac_tso_xmit()
4270 mss = skb_shinfo(skb)->gso_size; in stmmac_tso_xmit()
4273 if (mss != tx_q->mss) { in stmmac_tso_xmit()
4274 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4275 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4277 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4280 tx_q->mss = mss; in stmmac_tso_xmit()
4281 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_xmit()
4282 priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4283 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_xmit()
4289 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, in stmmac_tso_xmit()
4290 skb->data_len); in stmmac_tso_xmit()
4293 first_entry = tx_q->cur_tx; in stmmac_tso_xmit()
4294 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_tso_xmit()
4296 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4297 desc = &tx_q->dma_entx[first_entry].basic; in stmmac_tso_xmit()
4299 desc = &tx_q->dma_tx[first_entry]; in stmmac_tso_xmit()
4303 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), in stmmac_tso_xmit()
4305 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4308 if (priv->dma_cap.addr64 <= 32) { in stmmac_tso_xmit()
4309 first->des0 = cpu_to_le32(des); in stmmac_tso_xmit()
4313 first->des1 = cpu_to_le32(des + proto_hdr_len); in stmmac_tso_xmit()
4316 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; in stmmac_tso_xmit()
4327 * non-paged SKB data, the DMA buffer address should be saved to in stmmac_tso_xmit()
4328 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor, in stmmac_tso_xmit()
4329 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee in stmmac_tso_xmit()
4333 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf in stmmac_tso_xmit()
4338 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4339 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb); in stmmac_tso_xmit()
4340 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false; in stmmac_tso_xmit()
4341 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4345 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_tso_xmit()
4347 des = skb_frag_dma_map(priv->device, frag, 0, in stmmac_tso_xmit()
4350 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4354 (i == nfrags - 1), queue); in stmmac_tso_xmit()
4356 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4357 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); in stmmac_tso_xmit()
4358 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; in stmmac_tso_xmit()
4359 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4362 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; in stmmac_tso_xmit()
4365 tx_q->tx_skbuff[tx_q->cur_tx] = skb; in stmmac_tso_xmit()
4366 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4369 tx_packets = (tx_q->cur_tx + 1) - first_tx; in stmmac_tso_xmit()
4370 tx_q->tx_count_frames += tx_packets; in stmmac_tso_xmit()
4372 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_tso_xmit()
4374 else if (!priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4376 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4378 else if ((tx_q->tx_count_frames % in stmmac_tso_xmit()
4379 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_tso_xmit()
4385 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4386 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4388 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4390 tx_q->tx_count_frames = 0; in stmmac_tso_xmit()
4399 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4402 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_tso_xmit()
4404 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tso_xmit()
4407 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_tso_xmit()
4408 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_tso_xmit()
4409 u64_stats_inc(&txq_stats->q.tx_tso_frames); in stmmac_tso_xmit()
4410 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); in stmmac_tso_xmit()
4412 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_tso_xmit()
4413 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_tso_xmit()
4415 if (priv->sarc_type) in stmmac_tso_xmit()
4416 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_tso_xmit()
4420 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_tso_xmit()
4421 priv->hwts_tx_en)) { in stmmac_tso_xmit()
4423 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_tso_xmit()
4431 1, tx_q->tx_skbuff_dma[first_entry].last_segment, in stmmac_tso_xmit()
4432 hdr / 4, (skb->len - proto_hdr_len)); in stmmac_tso_xmit()
4447 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_tso_xmit()
4448 tx_q->cur_tx, first, nfrags); in stmmac_tso_xmit()
4450 print_pkt(skb->data, skb_headlen(skb)); in stmmac_tso_xmit()
4453 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_tso_xmit()
4461 dev_err(priv->device, "Tx dma map failed\n"); in stmmac_tso_xmit()
4463 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4468 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4490 * stmmac_xmit - Tx entry point of the driver
4504 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_xmit()
4505 int gso = skb_shinfo(skb)->gso_type; in stmmac_xmit()
4514 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xmit()
4515 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xmit()
4516 first_tx = tx_q->cur_tx; in stmmac_xmit()
4518 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) in stmmac_xmit()
4522 if (skb_is_gso(skb) && priv->tso) { in stmmac_xmit()
4525 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) in stmmac_xmit()
4529 if (priv->est && priv->est->enable && in stmmac_xmit()
4530 priv->est->max_sdu[queue] && in stmmac_xmit()
4531 skb->len > priv->est->max_sdu[queue]){ in stmmac_xmit()
4532 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xmit()
4538 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_xmit()
4541 netdev_err(priv->dev, in stmmac_xmit()
4551 entry = tx_q->cur_tx; in stmmac_xmit()
4553 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_xmit()
4555 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); in stmmac_xmit()
4560 * Packets that won't trigger the COE e.g. most DSA-tagged packets will in stmmac_xmit()
4564 (priv->plat->tx_queues_cfg[queue].coe_unsupported || in stmmac_xmit()
4571 if (likely(priv->extend_desc)) in stmmac_xmit()
4572 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4573 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4574 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4576 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4583 enh_desc = priv->plat->enh_desc; in stmmac_xmit()
4586 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); in stmmac_xmit()
4590 if (unlikely(entry < 0) && (entry != -EINVAL)) in stmmac_xmit()
4595 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_xmit()
4597 bool last_segment = (i == (nfrags - 1)); in stmmac_xmit()
4599 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4600 WARN_ON(tx_q->tx_skbuff[entry]); in stmmac_xmit()
4602 if (likely(priv->extend_desc)) in stmmac_xmit()
4603 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4604 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4605 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4607 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4609 des = skb_frag_dma_map(priv->device, frag, 0, len, in stmmac_xmit()
4611 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4614 tx_q->tx_skbuff_dma[entry].buf = des; in stmmac_xmit()
4618 tx_q->tx_skbuff_dma[entry].map_as_page = true; in stmmac_xmit()
4619 tx_q->tx_skbuff_dma[entry].len = len; in stmmac_xmit()
4620 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; in stmmac_xmit()
4621 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4625 priv->mode, 1, last_segment, skb->len); in stmmac_xmit()
4629 tx_q->tx_skbuff[entry] = skb; in stmmac_xmit()
4630 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4633 * segment is reset and the timer re-started to clean the tx status. in stmmac_xmit()
4637 tx_packets = (entry + 1) - first_tx; in stmmac_xmit()
4638 tx_q->tx_count_frames += tx_packets; in stmmac_xmit()
4640 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_xmit()
4642 else if (!priv->tx_coal_frames[queue]) in stmmac_xmit()
4644 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_xmit()
4646 else if ((tx_q->tx_count_frames % in stmmac_xmit()
4647 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_xmit()
4653 if (likely(priv->extend_desc)) in stmmac_xmit()
4654 desc = &tx_q->dma_etx[entry].basic; in stmmac_xmit()
4655 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4656 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4658 desc = &tx_q->dma_tx[entry]; in stmmac_xmit()
4660 tx_q->tx_count_frames = 0; in stmmac_xmit()
4669 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4670 tx_q->cur_tx = entry; in stmmac_xmit()
4673 netdev_dbg(priv->dev, in stmmac_xmit()
4675 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_xmit()
4678 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); in stmmac_xmit()
4679 print_pkt(skb->data, skb->len); in stmmac_xmit()
4683 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_xmit()
4685 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_xmit()
4688 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xmit()
4689 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_xmit()
4691 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xmit()
4692 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xmit()
4694 if (priv->sarc_type) in stmmac_xmit()
4695 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_xmit()
4706 des = dma_map_single(priv->device, skb->data, in stmmac_xmit()
4708 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4711 tx_q->tx_skbuff_dma[first_entry].buf = des; in stmmac_xmit()
4712 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4713 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; in stmmac_xmit()
4717 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; in stmmac_xmit()
4718 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; in stmmac_xmit()
4720 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_xmit()
4721 priv->hwts_tx_en)) { in stmmac_xmit()
4723 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_xmit()
4729 csum_insertion, priv->mode, 0, last_segment, in stmmac_xmit()
4730 skb->len); in stmmac_xmit()
4733 if (tx_q->tbs & STMMAC_TBS_EN) { in stmmac_xmit()
4734 struct timespec64 ts = ns_to_timespec64(skb->tstamp); in stmmac_xmit()
4736 tbs_desc = &tx_q->dma_entx[first_entry]; in stmmac_xmit()
4742 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_xmit()
4744 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xmit()
4752 netdev_err(priv->dev, "Tx DMA map failed\n"); in stmmac_xmit()
4755 priv->xstats.tx_dropped++; in stmmac_xmit()
4762 __be16 vlan_proto = veth->h_vlan_proto; in stmmac_rx_vlan()
4766 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || in stmmac_rx_vlan()
4768 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { in stmmac_rx_vlan()
4770 vlanid = ntohs(veth->h_vlan_TCI); in stmmac_rx_vlan()
4771 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); in stmmac_rx_vlan()
4778 * stmmac_rx_refill - refill used skb preallocated buffers
4782 * that is based on zero-copy.
4786 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill()
4788 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill()
4791 if (priv->dma_cap.host_dma_width <= 32) in stmmac_rx_refill()
4794 while (dirty-- > 0) { in stmmac_rx_refill()
4795 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill()
4799 if (priv->extend_desc) in stmmac_rx_refill()
4800 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill()
4802 p = rx_q->dma_rx + entry; in stmmac_rx_refill()
4804 if (!buf->page) { in stmmac_rx_refill()
4805 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4806 if (!buf->page) in stmmac_rx_refill()
4810 if (priv->sph && !buf->sec_page) { in stmmac_rx_refill()
4811 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4812 if (!buf->sec_page) in stmmac_rx_refill()
4815 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_rx_refill()
4818 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_rx_refill()
4820 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_rx_refill()
4821 if (priv->sph) in stmmac_rx_refill()
4822 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_rx_refill()
4824 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_rx_refill()
4827 rx_q->rx_count_frames++; in stmmac_rx_refill()
4828 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4829 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill()
4830 rx_q->rx_count_frames = 0; in stmmac_rx_refill()
4832 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4833 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill()
4834 if (!priv->use_riwt) in stmmac_rx_refill()
4840 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill()
4842 rx_q->dirty_rx = entry; in stmmac_rx_refill()
4843 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill()
4844 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill()
4845 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill()
4853 int coe = priv->hw->rx_csum; in stmmac_rx_buf1_len()
4856 if (priv->sph && len) in stmmac_rx_buf1_len()
4861 if (priv->sph && hlen) { in stmmac_rx_buf1_len()
4862 priv->xstats.rx_split_hdr_pkt_n++; in stmmac_rx_buf1_len()
4868 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf1_len()
4873 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); in stmmac_rx_buf1_len()
4880 int coe = priv->hw->rx_csum; in stmmac_rx_buf2_len()
4884 if (!priv->sph) in stmmac_rx_buf2_len()
4889 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf2_len()
4894 return plen - len; in stmmac_rx_buf2_len()
4900 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_xdpf()
4901 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_xdpf()
4902 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_xdpf()
4910 if (priv->est && priv->est->enable && in stmmac_xdp_xmit_xdpf()
4911 priv->est->max_sdu[queue] && in stmmac_xdp_xmit_xdpf()
4912 xdpf->len > priv->est->max_sdu[queue]) { in stmmac_xdp_xmit_xdpf()
4913 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xdp_xmit_xdpf()
4917 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_xdpf()
4918 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_xdpf()
4919 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_xdpf()
4920 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_xdpf()
4922 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_xdpf()
4925 dma_addr = dma_map_single(priv->device, xdpf->data, in stmmac_xdp_xmit_xdpf()
4926 xdpf->len, DMA_TO_DEVICE); in stmmac_xdp_xmit_xdpf()
4927 if (dma_mapping_error(priv->device, dma_addr)) in stmmac_xdp_xmit_xdpf()
4930 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; in stmmac_xdp_xmit_xdpf()
4932 struct page *page = virt_to_page(xdpf->data); in stmmac_xdp_xmit_xdpf()
4935 xdpf->headroom; in stmmac_xdp_xmit_xdpf()
4936 dma_sync_single_for_device(priv->device, dma_addr, in stmmac_xdp_xmit_xdpf()
4937 xdpf->len, DMA_BIDIRECTIONAL); in stmmac_xdp_xmit_xdpf()
4939 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; in stmmac_xdp_xmit_xdpf()
4942 tx_q->tx_skbuff_dma[entry].buf = dma_addr; in stmmac_xdp_xmit_xdpf()
4943 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_xdpf()
4944 tx_q->tx_skbuff_dma[entry].len = xdpf->len; in stmmac_xdp_xmit_xdpf()
4945 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_xdpf()
4946 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_xdpf()
4948 tx_q->xdpf[entry] = xdpf; in stmmac_xdp_xmit_xdpf()
4952 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, in stmmac_xdp_xmit_xdpf()
4953 true, priv->mode, true, true, in stmmac_xdp_xmit_xdpf()
4954 xdpf->len); in stmmac_xdp_xmit_xdpf()
4956 tx_q->tx_count_frames++; in stmmac_xdp_xmit_xdpf()
4958 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_xdpf()
4964 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_xdpf()
4966 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
4967 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xdp_xmit_xdpf()
4968 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
4971 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xdp_xmit_xdpf()
4973 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_xdpf()
4974 tx_q->cur_tx = entry; in stmmac_xdp_xmit_xdpf()
4987 while (index >= priv->plat->tx_queues_to_use) in stmmac_xdp_get_tx_queue()
4988 index -= priv->plat->tx_queues_to_use; in stmmac_xdp_get_tx_queue()
5006 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back()
5009 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_back()
5037 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) in __stmmac_xdp_run_prog()
5043 bpf_warn_invalid_xdp_action(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5046 trace_xdp_exception(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5062 prog = READ_ONCE(priv->xdp_prog); in stmmac_xdp_run_prog()
5070 return ERR_PTR(-res); in stmmac_xdp_run_prog()
5091 unsigned int metasize = xdp->data - xdp->data_meta; in stmmac_construct_skb_zc()
5092 unsigned int datasize = xdp->data_end - xdp->data; in stmmac_construct_skb_zc()
5095 skb = napi_alloc_skb(&ch->rxtx_napi, in stmmac_construct_skb_zc()
5096 xdp->data_end - xdp->data_hard_start); in stmmac_construct_skb_zc()
5100 skb_reserve(skb, xdp->data - xdp->data_hard_start); in stmmac_construct_skb_zc()
5101 memcpy(__skb_put(skb, datasize), xdp->data, datasize); in stmmac_construct_skb_zc()
5112 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_dispatch_skb_zc()
5113 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_dispatch_skb_zc()
5114 unsigned int len = xdp->data_end - xdp->data; in stmmac_dispatch_skb_zc()
5116 int coe = priv->hw->rx_csum; in stmmac_dispatch_skb_zc()
5122 priv->xstats.rx_dropped++; in stmmac_dispatch_skb_zc()
5127 if (priv->hw->hw_vlan_en) in stmmac_dispatch_skb_zc()
5129 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_dispatch_skb_zc()
5132 stmmac_rx_vlan(priv->dev, skb); in stmmac_dispatch_skb_zc()
5133 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_dispatch_skb_zc()
5138 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_dispatch_skb_zc()
5144 napi_gro_receive(&ch->rxtx_napi, skb); in stmmac_dispatch_skb_zc()
5146 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5147 u64_stats_inc(&rxq_stats->napi.rx_pkt_n); in stmmac_dispatch_skb_zc()
5148 u64_stats_add(&rxq_stats->napi.rx_bytes, len); in stmmac_dispatch_skb_zc()
5149 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5154 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill_zc()
5155 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill_zc()
5161 while (budget-- > 0 && entry != rx_q->cur_rx) { in stmmac_rx_refill_zc()
5162 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill_zc()
5166 if (!buf->xdp) { in stmmac_rx_refill_zc()
5167 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_rx_refill_zc()
5168 if (!buf->xdp) { in stmmac_rx_refill_zc()
5174 if (priv->extend_desc) in stmmac_rx_refill_zc()
5175 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill_zc()
5177 rx_desc = rx_q->dma_rx + entry; in stmmac_rx_refill_zc()
5179 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_rx_refill_zc()
5184 rx_q->rx_count_frames++; in stmmac_rx_refill_zc()
5185 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5186 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill_zc()
5187 rx_q->rx_count_frames = 0; in stmmac_rx_refill_zc()
5189 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5190 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill_zc()
5191 if (!priv->use_riwt) in stmmac_rx_refill_zc()
5197 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill_zc()
5201 rx_q->dirty_rx = entry; in stmmac_rx_refill_zc()
5202 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill_zc()
5203 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill_zc()
5204 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill_zc()
5222 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx_zc()
5223 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_zc()
5226 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5237 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx_zc()
5238 if (priv->extend_desc) { in stmmac_rx_zc()
5239 rx_head = (void *)rx_q->dma_erx; in stmmac_rx_zc()
5242 rx_head = (void *)rx_q->dma_rx; in stmmac_rx_zc()
5246 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx_zc()
5247 rx_q->dma_rx_phy, desc_size); in stmmac_rx_zc()
5257 if (!count && rx_q->state_saved) { in stmmac_rx_zc()
5258 error = rx_q->state.error; in stmmac_rx_zc()
5259 len = rx_q->state.len; in stmmac_rx_zc()
5261 rx_q->state_saved = false; in stmmac_rx_zc()
5272 buf = &rx_q->buf_pool[entry]; in stmmac_rx_zc()
5280 if (priv->extend_desc) in stmmac_rx_zc()
5281 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_zc()
5283 p = rx_q->dma_rx + entry; in stmmac_rx_zc()
5286 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx_zc()
5292 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx_zc()
5293 priv->dma_conf.dma_rx_size); in stmmac_rx_zc()
5294 next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5296 if (priv->extend_desc) in stmmac_rx_zc()
5297 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx_zc()
5299 np = rx_q->dma_rx + next_entry; in stmmac_rx_zc()
5304 if (!buf->xdp) in stmmac_rx_zc()
5307 if (priv->extend_desc) in stmmac_rx_zc()
5308 stmmac_rx_extended_status(priv, &priv->xstats, in stmmac_rx_zc()
5309 rx_q->dma_erx + entry); in stmmac_rx_zc()
5311 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5312 buf->xdp = NULL; in stmmac_rx_zc()
5315 if (!priv->hwts_rx_en) in stmmac_rx_zc()
5328 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5329 buf->xdp = NULL; in stmmac_rx_zc()
5335 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); in stmmac_rx_zc()
5336 ctx->priv = priv; in stmmac_rx_zc()
5337 ctx->desc = p; in stmmac_rx_zc()
5338 ctx->ndesc = np; in stmmac_rx_zc()
5346 buf1_len -= ETH_FCS_LEN; in stmmac_rx_zc()
5347 len -= ETH_FCS_LEN; in stmmac_rx_zc()
5351 buf->xdp->data_end = buf->xdp->data + buf1_len; in stmmac_rx_zc()
5352 xsk_buff_dma_sync_for_cpu(buf->xdp); in stmmac_rx_zc()
5354 prog = READ_ONCE(priv->xdp_prog); in stmmac_rx_zc()
5355 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); in stmmac_rx_zc()
5359 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); in stmmac_rx_zc()
5360 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5363 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5372 buf->xdp = NULL; in stmmac_rx_zc()
5378 rx_q->state_saved = true; in stmmac_rx_zc()
5379 rx_q->state.error = error; in stmmac_rx_zc()
5380 rx_q->state.len = len; in stmmac_rx_zc()
5385 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5386 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx_zc()
5387 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5389 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx_zc()
5390 priv->xstats.rx_errors += rx_errors; in stmmac_rx_zc()
5392 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { in stmmac_rx_zc()
5394 xsk_set_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5396 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5405 * stmmac_rx - manage the receive process
5415 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx()
5416 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx()
5417 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_rx()
5419 int status = 0, coe = priv->hw->rx_csum; in stmmac_rx()
5420 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx()
5428 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); in stmmac_rx()
5429 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; in stmmac_rx()
5430 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); in stmmac_rx()
5435 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx()
5436 if (priv->extend_desc) { in stmmac_rx()
5437 rx_head = (void *)rx_q->dma_erx; in stmmac_rx()
5440 rx_head = (void *)rx_q->dma_rx; in stmmac_rx()
5444 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx()
5445 rx_q->dma_rx_phy, desc_size); in stmmac_rx()
5455 if (!count && rx_q->state_saved) { in stmmac_rx()
5456 skb = rx_q->state.skb; in stmmac_rx()
5457 error = rx_q->state.error; in stmmac_rx()
5458 len = rx_q->state.len; in stmmac_rx()
5460 rx_q->state_saved = false; in stmmac_rx()
5473 buf = &rx_q->buf_pool[entry]; in stmmac_rx()
5475 if (priv->extend_desc) in stmmac_rx()
5476 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx()
5478 p = rx_q->dma_rx + entry; in stmmac_rx()
5481 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx()
5486 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx()
5487 priv->dma_conf.dma_rx_size); in stmmac_rx()
5488 next_entry = rx_q->cur_rx; in stmmac_rx()
5490 if (priv->extend_desc) in stmmac_rx()
5491 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx()
5493 np = rx_q->dma_rx + next_entry; in stmmac_rx()
5497 if (priv->extend_desc) in stmmac_rx()
5498 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); in stmmac_rx()
5500 page_pool_recycle_direct(rx_q->page_pool, buf->page); in stmmac_rx()
5501 buf->page = NULL; in stmmac_rx()
5503 if (!priv->hwts_rx_en) in stmmac_rx()
5518 prefetch(page_address(buf->page) + buf->page_offset); in stmmac_rx()
5519 if (buf->sec_page) in stmmac_rx()
5520 prefetch(page_address(buf->sec_page)); in stmmac_rx()
5530 buf2_len -= ETH_FCS_LEN; in stmmac_rx()
5531 len -= ETH_FCS_LEN; in stmmac_rx()
5533 buf1_len -= ETH_FCS_LEN; in stmmac_rx()
5534 len -= ETH_FCS_LEN; in stmmac_rx()
5541 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5544 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); in stmmac_rx()
5545 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), in stmmac_rx()
5546 buf->page_offset, buf1_len, true); in stmmac_rx()
5548 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5549 buf->page_offset; in stmmac_rx()
5559 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5560 buf->page_offset; in stmmac_rx()
5565 unsigned int xdp_res = -PTR_ERR(skb); in stmmac_rx()
5568 page_pool_put_page(rx_q->page_pool, in stmmac_rx()
5571 buf->page = NULL; in stmmac_rx()
5587 buf->page = NULL; in stmmac_rx()
5597 buf1_len = ctx.xdp.data_end - ctx.xdp.data; in stmmac_rx()
5599 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); in stmmac_rx()
5611 page_pool_recycle_direct(rx_q->page_pool, buf->page); in stmmac_rx()
5612 buf->page = NULL; in stmmac_rx()
5614 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5616 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5617 buf->page, buf->page_offset, buf1_len, in stmmac_rx()
5618 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5622 buf->page = NULL; in stmmac_rx()
5626 dma_sync_single_for_cpu(priv->device, buf->sec_addr, in stmmac_rx()
5628 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5629 buf->sec_page, 0, buf2_len, in stmmac_rx()
5630 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5634 buf->sec_page = NULL; in stmmac_rx()
5647 if (priv->hw->hw_vlan_en) in stmmac_rx()
5649 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_rx()
5652 stmmac_rx_vlan(priv->dev, skb); in stmmac_rx()
5654 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_rx()
5659 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_rx()
5665 napi_gro_receive(&ch->rx_napi, skb); in stmmac_rx()
5674 rx_q->state_saved = true; in stmmac_rx()
5675 rx_q->state.skb = skb; in stmmac_rx()
5676 rx_q->state.error = error; in stmmac_rx()
5677 rx_q->state.len = len; in stmmac_rx()
5684 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx()
5685 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); in stmmac_rx()
5686 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); in stmmac_rx()
5687 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx()
5688 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx()
5690 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx()
5691 priv->xstats.rx_errors += rx_errors; in stmmac_rx()
5700 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rx()
5702 u32 chan = ch->index; in stmmac_napi_poll_rx()
5705 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rx()
5706 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5707 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rx()
5708 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5714 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rx()
5715 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_poll_rx()
5716 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rx()
5726 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_tx()
5729 u32 chan = ch->index; in stmmac_napi_poll_tx()
5732 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_tx()
5733 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5734 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_tx()
5735 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5743 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_tx()
5744 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_poll_tx()
5745 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_tx()
5759 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rxtx()
5764 u32 chan = ch->index; in stmmac_napi_poll_rxtx()
5766 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rxtx()
5767 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5768 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5769 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5771 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_rxtx()
5772 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5773 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5774 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5793 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5797 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_napi_poll_rxtx()
5798 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5805 return min(rxtx_done, budget - 1); in stmmac_napi_poll_rxtx()
5825 * stmmac_set_rx_mode - entry point for multicast addressing
5837 stmmac_set_filter(priv, priv->hw, dev); in stmmac_set_rx_mode()
5841 * stmmac_change_mtu - entry point to change MTU size for the device.
5848 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5854 int txfifosz = priv->plat->tx_fifo_size; in stmmac_change_mtu()
5860 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_change_mtu()
5862 txfifosz /= priv->plat->tx_queues_to_use; in stmmac_change_mtu()
5865 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); in stmmac_change_mtu()
5866 return -EINVAL; in stmmac_change_mtu()
5873 return -EINVAL; in stmmac_change_mtu()
5876 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); in stmmac_change_mtu()
5880 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", in stmmac_change_mtu()
5891 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); in stmmac_change_mtu()
5900 WRITE_ONCE(dev->mtu, mtu); in stmmac_change_mtu()
5911 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) in stmmac_fix_features()
5914 if (!priv->plat->tx_coe) in stmmac_fix_features()
5922 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) in stmmac_fix_features()
5926 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_fix_features()
5928 priv->tso = true; in stmmac_fix_features()
5930 priv->tso = false; in stmmac_fix_features()
5943 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_set_features()
5945 priv->hw->rx_csum = 0; in stmmac_set_features()
5949 stmmac_rx_ipc(priv, priv->hw); in stmmac_set_features()
5951 if (priv->sph_cap) { in stmmac_set_features()
5952 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_set_features()
5955 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) in stmmac_set_features()
5956 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_set_features()
5960 priv->hw->hw_vlan_en = true; in stmmac_set_features()
5962 priv->hw->hw_vlan_en = false; in stmmac_set_features()
5964 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_set_features()
5971 struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; in stmmac_fpe_event_status()
5974 spin_lock(&fpe_cfg->lock); in stmmac_fpe_event_status()
5976 if (!fpe_cfg->pmac_enabled || status == FPE_EVENT_UNKNOWN) in stmmac_fpe_event_status()
5981 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, in stmmac_fpe_event_status()
5986 fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) in stmmac_fpe_event_status()
5987 fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING; in stmmac_fpe_event_status()
5991 fpe_cfg->status == ETHTOOL_MM_VERIFY_STATUS_VERIFYING) in stmmac_fpe_event_status()
5992 fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED; in stmmac_fpe_event_status()
5995 spin_unlock(&fpe_cfg->lock); in stmmac_fpe_event_status()
6000 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_common_interrupt()
6001 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_common_interrupt()
6006 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_common_interrupt()
6009 if (priv->irq_wake) in stmmac_common_interrupt()
6010 pm_wakeup_event(priv->device, 0); in stmmac_common_interrupt()
6012 if (priv->dma_cap.estsel) in stmmac_common_interrupt()
6013 stmmac_est_irq_status(priv, priv, priv->dev, in stmmac_common_interrupt()
6014 &priv->xstats, tx_cnt); in stmmac_common_interrupt()
6016 if (priv->dma_cap.fpesel) { in stmmac_common_interrupt()
6017 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, in stmmac_common_interrupt()
6018 priv->dev); in stmmac_common_interrupt()
6024 if ((priv->plat->has_gmac) || xmac) { in stmmac_common_interrupt()
6025 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); in stmmac_common_interrupt()
6030 priv->tx_path_in_lpi_mode = true; in stmmac_common_interrupt()
6032 priv->tx_path_in_lpi_mode = false; in stmmac_common_interrupt()
6036 stmmac_host_mtl_irq_status(priv, priv->hw, queue); in stmmac_common_interrupt()
6039 if (priv->hw->pcs && in stmmac_common_interrupt()
6040 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { in stmmac_common_interrupt()
6041 if (priv->xstats.pcs_link) in stmmac_common_interrupt()
6042 netif_carrier_on(priv->dev); in stmmac_common_interrupt()
6044 netif_carrier_off(priv->dev); in stmmac_common_interrupt()
6052 * stmmac_interrupt - main ISR
6059 * o Core interrupts to manage: remote wake-up, management counter, LPI
6068 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_interrupt()
6072 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv)) in stmmac_interrupt()
6090 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_mac_interrupt()
6105 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_safety_interrupt()
6118 int chan = tx_q->queue_index; in stmmac_msi_intr_tx()
6126 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_tx()
6145 int chan = rx_q->queue_index; in stmmac_msi_intr_rx()
6152 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_rx()
6161 * stmmac_ioctl - Entry point for the Ioctl
6172 int ret = -EOPNOTSUPP; in stmmac_ioctl()
6175 return -EINVAL; in stmmac_ioctl()
6181 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); in stmmac_ioctl()
6200 int ret = -EOPNOTSUPP; in stmmac_setup_tc_block_cb()
6202 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) in stmmac_setup_tc_block_cb()
6246 return -EOPNOTSUPP; in stmmac_setup_tc()
6253 int gso = skb_shinfo(skb)->gso_type; in stmmac_select_queue()
6265 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in stmmac_select_queue()
6273 ret = pm_runtime_resume_and_get(priv->device); in stmmac_set_mac_address()
6281 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); in stmmac_set_mac_address()
6284 pm_runtime_put(priv->device); in stmmac_set_mac_address()
6306 le32_to_cpu(p->des0), le32_to_cpu(p->des1), in sysfs_display_ring()
6307 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); in sysfs_display_ring()
6309 p = &(++ep)->basic; in sysfs_display_ring()
6317 struct net_device *dev = seq->private; in stmmac_rings_status_show()
6319 u32 rx_count = priv->plat->rx_queues_to_use; in stmmac_rings_status_show()
6320 u32 tx_count = priv->plat->tx_queues_to_use; in stmmac_rings_status_show()
6323 if ((dev->flags & IFF_UP) == 0) in stmmac_rings_status_show()
6327 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rings_status_show()
6331 if (priv->extend_desc) { in stmmac_rings_status_show()
6333 sysfs_display_ring((void *)rx_q->dma_erx, in stmmac_rings_status_show()
6334 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6337 sysfs_display_ring((void *)rx_q->dma_rx, in stmmac_rings_status_show()
6338 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6343 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_rings_status_show()
6347 if (priv->extend_desc) { in stmmac_rings_status_show()
6349 sysfs_display_ring((void *)tx_q->dma_etx, in stmmac_rings_status_show()
6350 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6351 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { in stmmac_rings_status_show()
6353 sysfs_display_ring((void *)tx_q->dma_tx, in stmmac_rings_status_show()
6354 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6380 struct net_device *dev = seq->private; in stmmac_dma_cap_show()
6383 if (!priv->hw_cap_support) { in stmmac_dma_cap_show()
6393 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); in stmmac_dma_cap_show()
6395 (priv->dma_cap.mbps_1000) ? "Y" : "N"); in stmmac_dma_cap_show()
6397 (priv->dma_cap.half_duplex) ? "Y" : "N"); in stmmac_dma_cap_show()
6398 if (priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6401 priv->dma_cap.multi_addr); in stmmac_dma_cap_show()
6404 (priv->dma_cap.hash_filter) ? "Y" : "N"); in stmmac_dma_cap_show()
6406 (priv->dma_cap.multi_addr) ? "Y" : "N"); in stmmac_dma_cap_show()
6409 (priv->dma_cap.pcs) ? "Y" : "N"); in stmmac_dma_cap_show()
6411 (priv->dma_cap.sma_mdio) ? "Y" : "N"); in stmmac_dma_cap_show()
6413 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); in stmmac_dma_cap_show()
6415 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); in stmmac_dma_cap_show()
6417 (priv->dma_cap.rmon) ? "Y" : "N"); in stmmac_dma_cap_show()
6418 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", in stmmac_dma_cap_show()
6419 (priv->dma_cap.time_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6420 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", in stmmac_dma_cap_show()
6421 (priv->dma_cap.atime_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6422 if (priv->plat->has_xgmac) in stmmac_dma_cap_show()
6424 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); in stmmac_dma_cap_show()
6425 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", in stmmac_dma_cap_show()
6426 (priv->dma_cap.eee) ? "Y" : "N"); in stmmac_dma_cap_show()
6427 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); in stmmac_dma_cap_show()
6429 (priv->dma_cap.tx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6430 if (priv->synopsys_id >= DWMAC_CORE_4_00 || in stmmac_dma_cap_show()
6431 priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6433 (priv->dma_cap.rx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6436 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); in stmmac_dma_cap_show()
6438 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); in stmmac_dma_cap_show()
6440 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); in stmmac_dma_cap_show()
6443 priv->dma_cap.number_rx_channel); in stmmac_dma_cap_show()
6445 priv->dma_cap.number_tx_channel); in stmmac_dma_cap_show()
6447 priv->dma_cap.number_rx_queues); in stmmac_dma_cap_show()
6449 priv->dma_cap.number_tx_queues); in stmmac_dma_cap_show()
6451 (priv->dma_cap.enh_desc) ? "Y" : "N"); in stmmac_dma_cap_show()
6452 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); in stmmac_dma_cap_show()
6453 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); in stmmac_dma_cap_show()
6454 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? in stmmac_dma_cap_show()
6455 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); in stmmac_dma_cap_show()
6456 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); in stmmac_dma_cap_show()
6458 priv->dma_cap.pps_out_num); in stmmac_dma_cap_show()
6460 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); in stmmac_dma_cap_show()
6462 priv->dma_cap.frpsel ? "Y" : "N"); in stmmac_dma_cap_show()
6464 priv->dma_cap.host_dma_width); in stmmac_dma_cap_show()
6466 priv->dma_cap.rssen ? "Y" : "N"); in stmmac_dma_cap_show()
6468 priv->dma_cap.vlhash ? "Y" : "N"); in stmmac_dma_cap_show()
6470 priv->dma_cap.sphen ? "Y" : "N"); in stmmac_dma_cap_show()
6472 priv->dma_cap.vlins ? "Y" : "N"); in stmmac_dma_cap_show()
6474 priv->dma_cap.dvlan ? "Y" : "N"); in stmmac_dma_cap_show()
6476 priv->dma_cap.l3l4fnum); in stmmac_dma_cap_show()
6478 priv->dma_cap.arpoffsel ? "Y" : "N"); in stmmac_dma_cap_show()
6480 priv->dma_cap.estsel ? "Y" : "N"); in stmmac_dma_cap_show()
6482 priv->dma_cap.fpesel ? "Y" : "N"); in stmmac_dma_cap_show()
6483 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", in stmmac_dma_cap_show()
6484 priv->dma_cap.tbssel ? "Y" : "N"); in stmmac_dma_cap_show()
6486 priv->dma_cap.tbs_ch_num); in stmmac_dma_cap_show()
6487 seq_printf(seq, "\tPer-Stream Filtering: %s\n", in stmmac_dma_cap_show()
6488 priv->dma_cap.sgfsel ? "Y" : "N"); in stmmac_dma_cap_show()
6490 BIT(priv->dma_cap.ttsfd) >> 1); in stmmac_dma_cap_show()
6492 priv->dma_cap.numtc); in stmmac_dma_cap_show()
6494 priv->dma_cap.dcben ? "Y" : "N"); in stmmac_dma_cap_show()
6495 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n", in stmmac_dma_cap_show()
6496 priv->dma_cap.advthword ? "Y" : "N"); in stmmac_dma_cap_show()
6498 priv->dma_cap.ptoen ? "Y" : "N"); in stmmac_dma_cap_show()
6499 seq_printf(seq, "\tOne-Step Timestamping: %s\n", in stmmac_dma_cap_show()
6500 priv->dma_cap.osten ? "Y" : "N"); in stmmac_dma_cap_show()
6501 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", in stmmac_dma_cap_show()
6502 priv->dma_cap.pfcen ? "Y" : "N"); in stmmac_dma_cap_show()
6504 BIT(priv->dma_cap.frpes) << 6); in stmmac_dma_cap_show()
6506 BIT(priv->dma_cap.frpbs) << 6); in stmmac_dma_cap_show()
6508 priv->dma_cap.frppipe_num); in stmmac_dma_cap_show()
6510 priv->dma_cap.nrvf_num ? in stmmac_dma_cap_show()
6511 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); in stmmac_dma_cap_show()
6513 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); in stmmac_dma_cap_show()
6515 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); in stmmac_dma_cap_show()
6516 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", in stmmac_dma_cap_show()
6517 priv->dma_cap.cbtisel ? "Y" : "N"); in stmmac_dma_cap_show()
6519 priv->dma_cap.aux_snapshot_n); in stmmac_dma_cap_show()
6520 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", in stmmac_dma_cap_show()
6521 priv->dma_cap.pou_ost_en ? "Y" : "N"); in stmmac_dma_cap_show()
6523 priv->dma_cap.edma ? "Y" : "N"); in stmmac_dma_cap_show()
6525 priv->dma_cap.ediffc ? "Y" : "N"); in stmmac_dma_cap_show()
6527 priv->dma_cap.vxn ? "Y" : "N"); in stmmac_dma_cap_show()
6529 priv->dma_cap.dbgmem ? "Y" : "N"); in stmmac_dma_cap_show()
6531 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); in stmmac_dma_cap_show()
6544 if (dev->netdev_ops != &stmmac_netdev_ops) in stmmac_device_event()
6549 if (priv->dbgfs_dir) in stmmac_device_event()
6550 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, in stmmac_device_event()
6551 priv->dbgfs_dir, in stmmac_device_event()
6553 dev->name); in stmmac_device_event()
6571 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); in stmmac_init_fs()
6574 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6578 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6588 debugfs_remove_recursive(priv->dbgfs_dir); in stmmac_exit_fs()
6623 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { in stmmac_vlan_update()
6630 if (!priv->dma_cap.vlhash) { in stmmac_vlan_update()
6632 return -EOPNOTSUPP; in stmmac_vlan_update()
6638 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); in stmmac_vlan_update()
6647 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_add_vid()
6654 set_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6657 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6661 if (priv->hw->num_vlan) { in stmmac_vlan_rx_add_vid()
6662 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_add_vid()
6667 pm_runtime_put(priv->device); in stmmac_vlan_rx_add_vid()
6678 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_kill_vid()
6685 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_kill_vid()
6687 if (priv->hw->num_vlan) { in stmmac_vlan_rx_kill_vid()
6688 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_kill_vid()
6696 pm_runtime_put(priv->device); in stmmac_vlan_rx_kill_vid()
6705 switch (bpf->command) { in stmmac_bpf()
6707 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); in stmmac_bpf()
6709 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, in stmmac_bpf()
6710 bpf->xsk.queue_id); in stmmac_bpf()
6712 return -EOPNOTSUPP; in stmmac_bpf()
6725 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) in stmmac_xdp_xmit()
6726 return -ENETDOWN; in stmmac_xdp_xmit()
6729 return -EINVAL; in stmmac_xdp_xmit()
6732 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit()
6735 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit()
6760 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_rx_queue()
6763 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_rx_queue()
6764 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_disable_rx_queue()
6765 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_rx_queue()
6768 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_rx_queue()
6773 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_enable_rx_queue()
6774 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_rx_queue()
6779 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6781 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); in stmmac_enable_rx_queue()
6785 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); in stmmac_enable_rx_queue()
6787 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6788 netdev_err(priv->dev, "Failed to init RX desc.\n"); in stmmac_enable_rx_queue()
6793 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6795 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_rx_queue()
6796 rx_q->dma_rx_phy, rx_q->queue_index); in stmmac_enable_rx_queue()
6798 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * in stmmac_enable_rx_queue()
6800 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6801 rx_q->rx_tail_addr, rx_q->queue_index); in stmmac_enable_rx_queue()
6803 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_enable_rx_queue()
6804 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_enable_rx_queue()
6805 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6807 rx_q->queue_index); in stmmac_enable_rx_queue()
6809 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6810 priv->dma_conf.dma_buf_sz, in stmmac_enable_rx_queue()
6811 rx_q->queue_index); in stmmac_enable_rx_queue()
6816 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_rx_queue()
6817 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_enable_rx_queue()
6818 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_rx_queue()
6823 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_tx_queue()
6826 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_tx_queue()
6827 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_disable_tx_queue()
6828 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_tx_queue()
6831 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_tx_queue()
6836 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_tx_queue()
6837 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_tx_queue()
6841 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6843 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); in stmmac_enable_tx_queue()
6847 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6849 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6850 netdev_err(priv->dev, "Failed to init TX desc.\n"); in stmmac_enable_tx_queue()
6855 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6857 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_tx_queue()
6858 tx_q->dma_tx_phy, tx_q->queue_index); in stmmac_enable_tx_queue()
6860 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_enable_tx_queue()
6861 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); in stmmac_enable_tx_queue()
6863 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_enable_tx_queue()
6864 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_tx_queue()
6865 tx_q->tx_tail_addr, tx_q->queue_index); in stmmac_enable_tx_queue()
6869 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_tx_queue()
6870 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_enable_tx_queue()
6871 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_tx_queue()
6885 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_release()
6886 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_release()
6895 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_release()
6898 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_xdp_release()
6910 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_xdp_open()
6911 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_xdp_open()
6920 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6927 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); in stmmac_xdp_open()
6938 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_xdp_open()
6939 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_xdp_open()
6943 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_xdp_open()
6947 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_xdp_open()
6949 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6950 rx_q->dma_rx_phy, chan); in stmmac_xdp_open()
6952 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_xdp_open()
6953 (rx_q->buf_alloc_num * in stmmac_xdp_open()
6955 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6956 rx_q->rx_tail_addr, chan); in stmmac_xdp_open()
6958 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_xdp_open()
6959 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_xdp_open()
6960 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6962 rx_q->queue_index); in stmmac_xdp_open()
6964 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6965 priv->dma_conf.dma_buf_sz, in stmmac_xdp_open()
6966 rx_q->queue_index); in stmmac_xdp_open()
6969 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_xdp_open()
6974 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_xdp_open()
6976 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6977 tx_q->dma_tx_phy, chan); in stmmac_xdp_open()
6979 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_xdp_open()
6980 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6981 tx_q->tx_tail_addr, chan); in stmmac_xdp_open()
6983 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_xdp_open()
6984 tx_q->txtimer.function = stmmac_tx_timer; in stmmac_xdp_open()
6988 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_xdp_open()
7006 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_open()
7007 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_open()
7011 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
7023 if (test_bit(STMMAC_DOWN, &priv->state) || in stmmac_xsk_wakeup()
7024 !netif_carrier_ok(priv->dev)) in stmmac_xsk_wakeup()
7025 return -ENETDOWN; in stmmac_xsk_wakeup()
7028 return -EINVAL; in stmmac_xsk_wakeup()
7030 if (queue >= priv->plat->rx_queues_to_use || in stmmac_xsk_wakeup()
7031 queue >= priv->plat->tx_queues_to_use) in stmmac_xsk_wakeup()
7032 return -EINVAL; in stmmac_xsk_wakeup()
7034 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_xsk_wakeup()
7035 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xsk_wakeup()
7036 ch = &priv->channel[queue]; in stmmac_xsk_wakeup()
7038 if (!rx_q->xsk_pool && !tx_q->xsk_pool) in stmmac_xsk_wakeup()
7039 return -EINVAL; in stmmac_xsk_wakeup()
7041 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { in stmmac_xsk_wakeup()
7042 /* EQoS does not have per-DMA channel SW interrupt, in stmmac_xsk_wakeup()
7043 * so we schedule RX Napi straight-away. in stmmac_xsk_wakeup()
7045 if (likely(napi_schedule_prep(&ch->rxtx_napi))) in stmmac_xsk_wakeup()
7046 __napi_schedule(&ch->rxtx_napi); in stmmac_xsk_wakeup()
7055 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_get_stats64()
7056 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_get_stats64()
7061 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; in stmmac_get_stats64()
7066 start = u64_stats_fetch_begin(&txq_stats->q_syncp); in stmmac_get_stats64()
7067 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); in stmmac_get_stats64()
7068 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); in stmmac_get_stats64()
7070 start = u64_stats_fetch_begin(&txq_stats->napi_syncp); in stmmac_get_stats64()
7071 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); in stmmac_get_stats64()
7072 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); in stmmac_get_stats64()
7074 stats->tx_packets += tx_packets; in stmmac_get_stats64()
7075 stats->tx_bytes += tx_bytes; in stmmac_get_stats64()
7079 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; in stmmac_get_stats64()
7084 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); in stmmac_get_stats64()
7085 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); in stmmac_get_stats64()
7086 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); in stmmac_get_stats64()
7087 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); in stmmac_get_stats64()
7089 stats->rx_packets += rx_packets; in stmmac_get_stats64()
7090 stats->rx_bytes += rx_bytes; in stmmac_get_stats64()
7093 stats->rx_dropped = priv->xstats.rx_dropped; in stmmac_get_stats64()
7094 stats->rx_errors = priv->xstats.rx_errors; in stmmac_get_stats64()
7095 stats->tx_dropped = priv->xstats.tx_dropped; in stmmac_get_stats64()
7096 stats->tx_errors = priv->xstats.tx_errors; in stmmac_get_stats64()
7097 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; in stmmac_get_stats64()
7098 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; in stmmac_get_stats64()
7099 stats->rx_length_errors = priv->xstats.rx_length; in stmmac_get_stats64()
7100 stats->rx_crc_errors = priv->xstats.rx_crc_errors; in stmmac_get_stats64()
7101 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; in stmmac_get_stats64()
7102 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; in stmmac_get_stats64()
7128 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) in stmmac_reset_subtask()
7130 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_reset_subtask()
7133 netdev_err(priv->dev, "Reset adapter.\n"); in stmmac_reset_subtask()
7136 netif_trans_update(priv->dev); in stmmac_reset_subtask()
7137 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) in stmmac_reset_subtask()
7140 set_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7141 dev_close(priv->dev); in stmmac_reset_subtask()
7142 dev_open(priv->dev, NULL); in stmmac_reset_subtask()
7143 clear_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7144 clear_bit(STMMAC_RESETING, &priv->state); in stmmac_reset_subtask()
7154 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); in stmmac_service_task()
7158 * stmmac_hw_init - Init the MAC device
7169 /* dwmac-sun8i only work in chain mode */ in stmmac_hw_init()
7170 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) in stmmac_hw_init()
7172 priv->chain_mode = chain_mode; in stmmac_hw_init()
7180 priv->hw_cap_support = stmmac_get_hw_features(priv); in stmmac_hw_init()
7181 if (priv->hw_cap_support) { in stmmac_hw_init()
7182 dev_info(priv->device, "DMA HW capability register supported\n"); in stmmac_hw_init()
7189 priv->plat->enh_desc = priv->dma_cap.enh_desc; in stmmac_hw_init()
7190 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && in stmmac_hw_init()
7191 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); in stmmac_hw_init()
7192 priv->hw->pmt = priv->plat->pmt; in stmmac_hw_init()
7193 if (priv->dma_cap.hash_tb_sz) { in stmmac_hw_init()
7194 priv->hw->multicast_filter_bins = in stmmac_hw_init()
7195 (BIT(priv->dma_cap.hash_tb_sz) << 5); in stmmac_hw_init()
7196 priv->hw->mcast_bits_log2 = in stmmac_hw_init()
7197 ilog2(priv->hw->multicast_filter_bins); in stmmac_hw_init()
7201 if (priv->plat->force_thresh_dma_mode) in stmmac_hw_init()
7202 priv->plat->tx_coe = 0; in stmmac_hw_init()
7204 priv->plat->tx_coe = priv->dma_cap.tx_coe; in stmmac_hw_init()
7207 priv->plat->rx_coe = priv->dma_cap.rx_coe; in stmmac_hw_init()
7209 if (priv->dma_cap.rx_coe_type2) in stmmac_hw_init()
7210 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; in stmmac_hw_init()
7211 else if (priv->dma_cap.rx_coe_type1) in stmmac_hw_init()
7212 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; in stmmac_hw_init()
7215 dev_info(priv->device, "No HW DMA feature register supported\n"); in stmmac_hw_init()
7218 if (priv->plat->rx_coe) { in stmmac_hw_init()
7219 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_hw_init()
7220 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); in stmmac_hw_init()
7221 if (priv->synopsys_id < DWMAC_CORE_4_00) in stmmac_hw_init()
7222 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); in stmmac_hw_init()
7224 if (priv->plat->tx_coe) in stmmac_hw_init()
7225 dev_info(priv->device, "TX Checksum insertion supported\n"); in stmmac_hw_init()
7227 if (priv->plat->pmt) { in stmmac_hw_init()
7228 dev_info(priv->device, "Wake-Up On Lan supported\n"); in stmmac_hw_init()
7229 device_set_wakeup_capable(priv->device, 1); in stmmac_hw_init()
7232 if (priv->dma_cap.tsoen) in stmmac_hw_init()
7233 dev_info(priv->device, "TSO supported\n"); in stmmac_hw_init()
7235 priv->hw->vlan_fail_q_en = in stmmac_hw_init()
7236 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); in stmmac_hw_init()
7237 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; in stmmac_hw_init()
7240 if (priv->hwif_quirks) { in stmmac_hw_init()
7241 ret = priv->hwif_quirks(priv); in stmmac_hw_init()
7251 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || in stmmac_hw_init()
7252 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { in stmmac_hw_init()
7253 priv->use_riwt = 1; in stmmac_hw_init()
7254 dev_info(priv->device, in stmmac_hw_init()
7266 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_add()
7269 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_add()
7271 ch->priv_data = priv; in stmmac_napi_add()
7272 ch->index = queue; in stmmac_napi_add()
7273 spin_lock_init(&ch->lock); in stmmac_napi_add()
7275 if (queue < priv->plat->rx_queues_to_use) { in stmmac_napi_add()
7276 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); in stmmac_napi_add()
7278 if (queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7279 netif_napi_add_tx(dev, &ch->tx_napi, in stmmac_napi_add()
7282 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_add()
7283 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7284 netif_napi_add(dev, &ch->rxtx_napi, in stmmac_napi_add()
7295 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_del()
7298 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_del()
7300 if (queue < priv->plat->rx_queues_to_use) in stmmac_napi_del()
7301 netif_napi_del(&ch->rx_napi); in stmmac_napi_del()
7302 if (queue < priv->plat->tx_queues_to_use) in stmmac_napi_del()
7303 netif_napi_del(&ch->tx_napi); in stmmac_napi_del()
7304 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_del()
7305 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_del()
7306 netif_napi_del(&ch->rxtx_napi); in stmmac_napi_del()
7321 priv->plat->rx_queues_to_use = rx_cnt; in stmmac_reinit_queues()
7322 priv->plat->tx_queues_to_use = tx_cnt; in stmmac_reinit_queues()
7324 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_reinit_queues()
7325 priv->rss.table[i] = ethtool_rxfh_indir_default(i, in stmmac_reinit_queues()
7344 priv->dma_conf.dma_rx_size = rx_size; in stmmac_reinit_ringparam()
7345 priv->dma_conf.dma_tx_size = tx_size; in stmmac_reinit_ringparam()
7354 * stmmac_fpe_verify_timer - Timer for MAC Merge verification
7369 spin_lock_irqsave(&fpe_cfg->lock, flags); in stmmac_fpe_verify_timer()
7371 switch (fpe_cfg->status) { in stmmac_fpe_verify_timer()
7374 if (fpe_cfg->verify_retries != 0) { in stmmac_fpe_verify_timer()
7375 stmmac_fpe_send_mpacket(priv, priv->ioaddr, in stmmac_fpe_verify_timer()
7379 fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_FAILED; in stmmac_fpe_verify_timer()
7382 fpe_cfg->verify_retries--; in stmmac_fpe_verify_timer()
7386 stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, in stmmac_fpe_verify_timer()
7387 priv->plat->tx_queues_to_use, in stmmac_fpe_verify_timer()
7388 priv->plat->rx_queues_to_use, in stmmac_fpe_verify_timer()
7397 mod_timer(&fpe_cfg->verify_timer, in stmmac_fpe_verify_timer()
7398 jiffies + msecs_to_jiffies(fpe_cfg->verify_time)); in stmmac_fpe_verify_timer()
7401 spin_unlock_irqrestore(&fpe_cfg->lock, flags); in stmmac_fpe_verify_timer()
7406 if (fpe_cfg->pmac_enabled && fpe_cfg->tx_enabled && in stmmac_fpe_verify_timer_arm()
7407 fpe_cfg->verify_enabled && in stmmac_fpe_verify_timer_arm()
7408 fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_FAILED && in stmmac_fpe_verify_timer_arm()
7409 fpe_cfg->status != ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED) { in stmmac_fpe_verify_timer_arm()
7410 timer_setup(&fpe_cfg->verify_timer, stmmac_fpe_verify_timer, 0); in stmmac_fpe_verify_timer_arm()
7411 mod_timer(&fpe_cfg->verify_timer, jiffies); in stmmac_fpe_verify_timer_arm()
7417 struct stmmac_fpe_cfg *fpe_cfg = &priv->fpe_cfg; in stmmac_fpe_apply()
7422 if (!fpe_cfg->verify_enabled) { in stmmac_fpe_apply()
7423 stmmac_fpe_configure(priv, priv->ioaddr, fpe_cfg, in stmmac_fpe_apply()
7424 priv->plat->tx_queues_to_use, in stmmac_fpe_apply()
7425 priv->plat->rx_queues_to_use, in stmmac_fpe_apply()
7426 fpe_cfg->tx_enabled, in stmmac_fpe_apply()
7427 fpe_cfg->pmac_enabled); in stmmac_fpe_apply()
7429 fpe_cfg->status = ETHTOOL_MM_VERIFY_STATUS_INITIAL; in stmmac_fpe_apply()
7430 fpe_cfg->verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES; in stmmac_fpe_apply()
7432 if (netif_running(priv->dev)) in stmmac_fpe_apply()
7440 struct dma_desc *desc_contains_ts = ctx->desc; in stmmac_xdp_rx_timestamp()
7441 struct stmmac_priv *priv = ctx->priv; in stmmac_xdp_rx_timestamp()
7442 struct dma_desc *ndesc = ctx->ndesc; in stmmac_xdp_rx_timestamp()
7443 struct dma_desc *desc = ctx->desc; in stmmac_xdp_rx_timestamp()
7446 if (!priv->hwts_rx_en) in stmmac_xdp_rx_timestamp()
7447 return -ENODATA; in stmmac_xdp_rx_timestamp()
7450 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_xdp_rx_timestamp()
7454 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { in stmmac_xdp_rx_timestamp()
7455 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); in stmmac_xdp_rx_timestamp()
7456 ns -= priv->plat->cdc_error_adj; in stmmac_xdp_rx_timestamp()
7461 return -ENODATA; in stmmac_xdp_rx_timestamp()
7490 return -ENOMEM; in stmmac_dvr_probe()
7495 priv->device = device; in stmmac_dvr_probe()
7496 priv->dev = ndev; in stmmac_dvr_probe()
7499 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); in stmmac_dvr_probe()
7501 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); in stmmac_dvr_probe()
7502 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); in stmmac_dvr_probe()
7505 priv->xstats.pcpu_stats = in stmmac_dvr_probe()
7507 if (!priv->xstats.pcpu_stats) in stmmac_dvr_probe()
7508 return -ENOMEM; in stmmac_dvr_probe()
7511 priv->pause = pause; in stmmac_dvr_probe()
7512 priv->plat = plat_dat; in stmmac_dvr_probe()
7513 priv->ioaddr = res->addr; in stmmac_dvr_probe()
7514 priv->dev->base_addr = (unsigned long)res->addr; in stmmac_dvr_probe()
7515 priv->plat->dma_cfg->multi_msi_en = in stmmac_dvr_probe()
7516 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); in stmmac_dvr_probe()
7518 priv->dev->irq = res->irq; in stmmac_dvr_probe()
7519 priv->wol_irq = res->wol_irq; in stmmac_dvr_probe()
7520 priv->lpi_irq = res->lpi_irq; in stmmac_dvr_probe()
7521 priv->sfty_irq = res->sfty_irq; in stmmac_dvr_probe()
7522 priv->sfty_ce_irq = res->sfty_ce_irq; in stmmac_dvr_probe()
7523 priv->sfty_ue_irq = res->sfty_ue_irq; in stmmac_dvr_probe()
7525 priv->rx_irq[i] = res->rx_irq[i]; in stmmac_dvr_probe()
7527 priv->tx_irq[i] = res->tx_irq[i]; in stmmac_dvr_probe()
7529 if (!is_zero_ether_addr(res->mac)) in stmmac_dvr_probe()
7530 eth_hw_addr_set(priv->dev, res->mac); in stmmac_dvr_probe()
7532 dev_set_drvdata(device, priv->dev); in stmmac_dvr_probe()
7537 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); in stmmac_dvr_probe()
7538 if (!priv->af_xdp_zc_qps) in stmmac_dvr_probe()
7539 return -ENOMEM; in stmmac_dvr_probe()
7542 priv->wq = create_singlethread_workqueue("stmmac_wq"); in stmmac_dvr_probe()
7543 if (!priv->wq) { in stmmac_dvr_probe()
7544 dev_err(priv->device, "failed to create workqueue\n"); in stmmac_dvr_probe()
7545 ret = -ENOMEM; in stmmac_dvr_probe()
7549 INIT_WORK(&priv->service_task, stmmac_service_task); in stmmac_dvr_probe()
7555 priv->plat->phy_addr = phyaddr; in stmmac_dvr_probe()
7557 if (priv->plat->stmmac_rst) { in stmmac_dvr_probe()
7558 ret = reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7559 reset_control_deassert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7563 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7564 reset_control_reset(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7567 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_probe()
7568 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7569 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", in stmmac_dvr_probe()
7582 if (priv->synopsys_id < DWMAC_CORE_5_20) in stmmac_dvr_probe()
7583 priv->plat->dma_cfg->dche = false; in stmmac_dvr_probe()
7587 ndev->netdev_ops = &stmmac_netdev_ops; in stmmac_dvr_probe()
7589 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; in stmmac_dvr_probe()
7590 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops; in stmmac_dvr_probe()
7592 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in stmmac_dvr_probe()
7594 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in stmmac_dvr_probe()
7599 ndev->hw_features |= NETIF_F_HW_TC; in stmmac_dvr_probe()
7602 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_dvr_probe()
7603 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in stmmac_dvr_probe()
7604 if (priv->plat->has_gmac4) in stmmac_dvr_probe()
7605 ndev->hw_features |= NETIF_F_GSO_UDP_L4; in stmmac_dvr_probe()
7606 priv->tso = true; in stmmac_dvr_probe()
7607 dev_info(priv->device, "TSO feature enabled\n"); in stmmac_dvr_probe()
7610 if (priv->dma_cap.sphen && in stmmac_dvr_probe()
7611 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { in stmmac_dvr_probe()
7612 ndev->hw_features |= NETIF_F_GRO; in stmmac_dvr_probe()
7613 priv->sph_cap = true; in stmmac_dvr_probe()
7614 priv->sph = priv->sph_cap; in stmmac_dvr_probe()
7615 dev_info(priv->device, "SPH feature enabled\n"); in stmmac_dvr_probe()
7623 if (priv->plat->host_dma_width) in stmmac_dvr_probe()
7624 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; in stmmac_dvr_probe()
7626 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; in stmmac_dvr_probe()
7628 if (priv->dma_cap.host_dma_width) { in stmmac_dvr_probe()
7630 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); in stmmac_dvr_probe()
7632 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", in stmmac_dvr_probe()
7633 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); in stmmac_dvr_probe()
7640 priv->plat->dma_cfg->eame = true; in stmmac_dvr_probe()
7644 dev_err(priv->device, "Failed to set DMA Mask\n"); in stmmac_dvr_probe()
7648 priv->dma_cap.host_dma_width = 32; in stmmac_dvr_probe()
7652 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; in stmmac_dvr_probe()
7653 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); in stmmac_dvr_probe()
7656 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; in stmmac_dvr_probe()
7657 if (priv->plat->has_gmac4) { in stmmac_dvr_probe()
7658 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in stmmac_dvr_probe()
7659 priv->hw->hw_vlan_en = true; in stmmac_dvr_probe()
7661 if (priv->dma_cap.vlhash) { in stmmac_dvr_probe()
7662 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in stmmac_dvr_probe()
7663 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; in stmmac_dvr_probe()
7665 if (priv->dma_cap.vlins) { in stmmac_dvr_probe()
7666 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; in stmmac_dvr_probe()
7667 if (priv->dma_cap.dvlan) in stmmac_dvr_probe()
7668 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; in stmmac_dvr_probe()
7671 priv->msg_enable = netif_msg_init(debug, default_msg_level); in stmmac_dvr_probe()
7673 priv->xstats.threshold = tc; in stmmac_dvr_probe()
7676 rxq = priv->plat->rx_queues_to_use; in stmmac_dvr_probe()
7677 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); in stmmac_dvr_probe()
7678 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_dvr_probe()
7679 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); in stmmac_dvr_probe()
7681 if (priv->dma_cap.rssen && priv->plat->rss_en) in stmmac_dvr_probe()
7682 ndev->features |= NETIF_F_RXHASH; in stmmac_dvr_probe()
7684 ndev->vlan_features |= ndev->features; in stmmac_dvr_probe()
7686 /* MTU range: 46 - hw-specific max */ in stmmac_dvr_probe()
7687 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; in stmmac_dvr_probe()
7688 if (priv->plat->has_xgmac) in stmmac_dvr_probe()
7689 ndev->max_mtu = XGMAC_JUMBO_LEN; in stmmac_dvr_probe()
7690 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) in stmmac_dvr_probe()
7691 ndev->max_mtu = JUMBO_LEN; in stmmac_dvr_probe()
7693 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); in stmmac_dvr_probe()
7694 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu in stmmac_dvr_probe()
7695 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. in stmmac_dvr_probe()
7697 if ((priv->plat->maxmtu < ndev->max_mtu) && in stmmac_dvr_probe()
7698 (priv->plat->maxmtu >= ndev->min_mtu)) in stmmac_dvr_probe()
7699 ndev->max_mtu = priv->plat->maxmtu; in stmmac_dvr_probe()
7700 else if (priv->plat->maxmtu < ndev->min_mtu) in stmmac_dvr_probe()
7701 dev_warn(priv->device, in stmmac_dvr_probe()
7703 __func__, priv->plat->maxmtu); in stmmac_dvr_probe()
7706 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ in stmmac_dvr_probe()
7708 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in stmmac_dvr_probe()
7713 mutex_init(&priv->lock); in stmmac_dvr_probe()
7715 priv->fpe_cfg.verify_retries = STMMAC_FPE_MM_MAX_VERIFY_RETRIES; in stmmac_dvr_probe()
7716 priv->fpe_cfg.verify_time = STMMAC_FPE_MM_MAX_VERIFY_TIME_MS; in stmmac_dvr_probe()
7717 priv->fpe_cfg.status = ETHTOOL_MM_VERIFY_STATUS_DISABLED; in stmmac_dvr_probe()
7718 timer_setup(&priv->fpe_cfg.verify_timer, stmmac_fpe_verify_timer, 0); in stmmac_dvr_probe()
7719 spin_lock_init(&priv->fpe_cfg.lock); in stmmac_dvr_probe()
7723 * changed at run-time and it is fixed. Viceversa the driver'll try to in stmmac_dvr_probe()
7727 if (priv->plat->clk_csr >= 0) in stmmac_dvr_probe()
7728 priv->clk_csr = priv->plat->clk_csr; in stmmac_dvr_probe()
7741 dev_err_probe(priv->device, ret, in stmmac_dvr_probe()
7743 priv->plat->bus_id); in stmmac_dvr_probe()
7747 if (priv->plat->speed_mode_2500) in stmmac_dvr_probe()
7748 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); in stmmac_dvr_probe()
7762 dev_err(priv->device, "%s: ERROR %i registering the device\n", in stmmac_dvr_probe()
7771 if (priv->plat->dump_debug_regs) in stmmac_dvr_probe()
7772 priv->plat->dump_debug_regs(priv->plat->bsp_priv); in stmmac_dvr_probe()
7782 phylink_destroy(priv->phylink); in stmmac_dvr_probe()
7790 destroy_workqueue(priv->wq); in stmmac_dvr_probe()
7792 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_probe()
7809 netdev_info(priv->dev, "%s: removing driver", __func__); in stmmac_dvr_remove()
7814 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_dvr_remove()
7820 phylink_destroy(priv->phylink); in stmmac_dvr_remove()
7821 if (priv->plat->stmmac_rst) in stmmac_dvr_remove()
7822 reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_remove()
7823 reset_control_assert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_remove()
7828 destroy_workqueue(priv->wq); in stmmac_dvr_remove()
7829 mutex_destroy(&priv->lock); in stmmac_dvr_remove()
7830 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_remove()
7838 * stmmac_suspend - suspend callback
7853 mutex_lock(&priv->lock); in stmmac_suspend()
7859 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_suspend()
7860 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_suspend()
7862 if (priv->eee_enabled) { in stmmac_suspend()
7863 priv->tx_path_in_lpi_mode = false; in stmmac_suspend()
7864 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_suspend()
7870 if (priv->plat->serdes_powerdown) in stmmac_suspend()
7871 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_suspend()
7874 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7875 stmmac_pmt(priv, priv->hw, priv->wolopts); in stmmac_suspend()
7876 priv->irq_wake = 1; in stmmac_suspend()
7878 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_suspend()
7879 pinctrl_pm_select_sleep_state(priv->device); in stmmac_suspend()
7882 mutex_unlock(&priv->lock); in stmmac_suspend()
7885 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7886 phylink_suspend(priv->phylink, true); in stmmac_suspend()
7888 if (device_may_wakeup(priv->device)) in stmmac_suspend()
7889 phylink_speed_down(priv->phylink, false); in stmmac_suspend()
7890 phylink_suspend(priv->phylink, false); in stmmac_suspend()
7894 if (priv->dma_cap.fpesel) in stmmac_suspend()
7895 timer_shutdown_sync(&priv->fpe_cfg.verify_timer); in stmmac_suspend()
7897 priv->speed = SPEED_UNKNOWN; in stmmac_suspend()
7904 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_reset_rx_queue()
7906 rx_q->cur_rx = 0; in stmmac_reset_rx_queue()
7907 rx_q->dirty_rx = 0; in stmmac_reset_rx_queue()
7912 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_reset_tx_queue()
7914 tx_q->cur_tx = 0; in stmmac_reset_tx_queue()
7915 tx_q->dirty_tx = 0; in stmmac_reset_tx_queue()
7916 tx_q->mss = 0; in stmmac_reset_tx_queue()
7918 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_reset_tx_queue()
7922 * stmmac_reset_queues_param - reset queue parameters
7927 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_reset_queues_param()
7928 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_reset_queues_param()
7939 * stmmac_resume - resume callback
7954 * automatically as soon as a magic packet or a Wake-up frame in stmmac_resume()
7959 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7960 mutex_lock(&priv->lock); in stmmac_resume()
7961 stmmac_pmt(priv, priv->hw, 0); in stmmac_resume()
7962 mutex_unlock(&priv->lock); in stmmac_resume()
7963 priv->irq_wake = 0; in stmmac_resume()
7965 pinctrl_pm_select_default_state(priv->device); in stmmac_resume()
7967 if (priv->mii) in stmmac_resume()
7968 stmmac_mdio_reset(priv->mii); in stmmac_resume()
7971 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_resume()
7972 priv->plat->serdes_powerup) { in stmmac_resume()
7973 ret = priv->plat->serdes_powerup(ndev, in stmmac_resume()
7974 priv->plat->bsp_priv); in stmmac_resume()
7981 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7982 phylink_resume(priv->phylink); in stmmac_resume()
7984 phylink_resume(priv->phylink); in stmmac_resume()
7985 if (device_may_wakeup(priv->device)) in stmmac_resume()
7986 phylink_speed_up(priv->phylink); in stmmac_resume()
7991 mutex_lock(&priv->lock); in stmmac_resume()
7996 stmmac_clear_descriptors(priv, &priv->dma_conf); in stmmac_resume()
8002 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); in stmmac_resume()
8007 mutex_unlock(&priv->lock); in stmmac_resume()