Lines Matching +full:coe +full:- +full:unsupported
1 // SPDX-License-Identifier: GPL-2.0-only
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
29 #include <linux/dma-mapping.h>
57 * with fine resolution and binary rollover. This avoid non-monotonic behavior
64 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
72 static int debug = -1;
74 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
76 static int phyaddr = -1;
80 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
151 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
160 * the plat_data->set_clk_tx_rate method directly, call it via their own
164 * plat_data->clk_tx_i must be filled in.
171 /* Silently ignore unsupported speeds as rgmii_clock() only in stmmac_set_clk_tx_rate()
183 * stmmac_verify_args - verify the driver parameters.
195 …pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configu… in stmmac_verify_args()
200 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in __stmmac_disable_all_queues()
201 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in __stmmac_disable_all_queues()
206 struct stmmac_channel *ch = &priv->channel[queue]; in __stmmac_disable_all_queues()
209 test_bit(queue, priv->af_xdp_zc_qps)) { in __stmmac_disable_all_queues()
210 napi_disable(&ch->rxtx_napi); in __stmmac_disable_all_queues()
215 napi_disable(&ch->rx_napi); in __stmmac_disable_all_queues()
217 napi_disable(&ch->tx_napi); in __stmmac_disable_all_queues()
222 * stmmac_disable_all_queues - Disable all queues
227 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_disable_all_queues()
233 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_disable_all_queues()
234 if (rx_q->xsk_pool) { in stmmac_disable_all_queues()
244 * stmmac_enable_all_queues - Enable all queues
249 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_enable_all_queues()
250 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_all_queues()
255 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_all_queues()
258 test_bit(queue, priv->af_xdp_zc_qps)) { in stmmac_enable_all_queues()
259 napi_enable(&ch->rxtx_napi); in stmmac_enable_all_queues()
264 napi_enable(&ch->rx_napi); in stmmac_enable_all_queues()
266 napi_enable(&ch->tx_napi); in stmmac_enable_all_queues()
272 if (!test_bit(STMMAC_DOWN, &priv->state) && in stmmac_service_event_schedule()
273 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) in stmmac_service_event_schedule()
274 queue_work(priv->wq, &priv->service_task); in stmmac_service_event_schedule()
279 netif_carrier_off(priv->dev); in stmmac_global_err()
280 set_bit(STMMAC_RESET_REQUESTED, &priv->state); in stmmac_global_err()
292 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_avail()
295 if (tx_q->dirty_tx > tx_q->cur_tx) in stmmac_tx_avail()
296 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; in stmmac_tx_avail()
298 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; in stmmac_tx_avail()
304 * stmmac_rx_dirty - Get RX queue dirty
310 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_dirty()
313 if (rx_q->dirty_rx <= rx_q->cur_rx) in stmmac_rx_dirty()
314 dirty = rx_q->cur_rx - rx_q->dirty_rx; in stmmac_rx_dirty()
316 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; in stmmac_rx_dirty()
323 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_eee_tx_busy()
328 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_eee_tx_busy()
330 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_eee_tx_busy()
339 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_restart_sw_lpi_timer()
343 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
356 if (!priv->tx_path_in_lpi_mode) in stmmac_try_to_start_sw_lpi()
357 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED, in stmmac_try_to_start_sw_lpi()
358 priv->tx_lpi_clk_stop, 0); in stmmac_try_to_start_sw_lpi()
362 * stmmac_stop_sw_lpi - stop transmitting LPI
364 * Description: When using software-controlled LPI, stop transmitting LPI state.
368 timer_delete_sync(&priv->eee_ctrl_timer); in stmmac_stop_sw_lpi()
369 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); in stmmac_stop_sw_lpi()
370 priv->tx_path_in_lpi_mode = false; in stmmac_stop_sw_lpi()
374 * stmmac_eee_ctrl_timer - EEE TX SW timer.
387 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
402 if (!priv->hwts_tx_en) in stmmac_get_tx_hwtstamp()
406 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) in stmmac_get_tx_hwtstamp()
411 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); in stmmac_get_tx_hwtstamp()
413 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_get_tx_hwtstamp()
418 ns -= priv->plat->cdc_error_adj; in stmmac_get_tx_hwtstamp()
423 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); in stmmac_get_tx_hwtstamp()
429 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
445 if (!priv->hwts_rx_en) in stmmac_get_rx_hwtstamp()
448 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_get_rx_hwtstamp()
452 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { in stmmac_get_rx_hwtstamp()
453 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_get_rx_hwtstamp()
455 ns -= priv->plat->cdc_error_adj; in stmmac_get_rx_hwtstamp()
457 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); in stmmac_get_rx_hwtstamp()
460 shhwtstamp->hwtstamp = ns_to_ktime(ns); in stmmac_get_rx_hwtstamp()
462 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); in stmmac_get_rx_hwtstamp()
467 * stmmac_hwtstamp_set - control hardware timestamping.
475 * 0 on success and an appropriate -ve integer on failure.
491 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { in stmmac_hwtstamp_set()
493 priv->hwts_tx_en = 0; in stmmac_hwtstamp_set()
494 priv->hwts_rx_en = 0; in stmmac_hwtstamp_set()
496 return -EOPNOTSUPP; in stmmac_hwtstamp_set()
502 return -ENODEV; in stmmac_hwtstamp_set()
505 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", in stmmac_hwtstamp_set()
506 __func__, config->flags, config->tx_type, config->rx_filter); in stmmac_hwtstamp_set()
508 if (config->tx_type != HWTSTAMP_TX_OFF && in stmmac_hwtstamp_set()
509 config->tx_type != HWTSTAMP_TX_ON) in stmmac_hwtstamp_set()
510 return -ERANGE; in stmmac_hwtstamp_set()
512 if (priv->adv_ts) { in stmmac_hwtstamp_set()
513 switch (config->rx_filter) { in stmmac_hwtstamp_set()
516 config->rx_filter = HWTSTAMP_FILTER_NONE; in stmmac_hwtstamp_set()
521 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; in stmmac_hwtstamp_set()
535 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; in stmmac_hwtstamp_set()
545 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; in stmmac_hwtstamp_set()
556 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; in stmmac_hwtstamp_set()
567 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; in stmmac_hwtstamp_set()
578 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; in stmmac_hwtstamp_set()
590 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; in stmmac_hwtstamp_set()
593 if (priv->synopsys_id < DWMAC_CORE_4_10) in stmmac_hwtstamp_set()
602 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; in stmmac_hwtstamp_set()
614 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; in stmmac_hwtstamp_set()
628 config->rx_filter = HWTSTAMP_FILTER_ALL; in stmmac_hwtstamp_set()
633 return -ERANGE; in stmmac_hwtstamp_set()
636 switch (config->rx_filter) { in stmmac_hwtstamp_set()
638 config->rx_filter = HWTSTAMP_FILTER_NONE; in stmmac_hwtstamp_set()
642 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; in stmmac_hwtstamp_set()
646 priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE; in stmmac_hwtstamp_set()
647 priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON; in stmmac_hwtstamp_set()
649 priv->systime_flags = STMMAC_HWTS_ACTIVE; in stmmac_hwtstamp_set()
651 if (priv->hwts_tx_en || priv->hwts_rx_en) { in stmmac_hwtstamp_set()
652 priv->systime_flags |= tstamp_all | ptp_v2 | in stmmac_hwtstamp_set()
658 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); in stmmac_hwtstamp_set()
660 priv->tstamp_config = *config; in stmmac_hwtstamp_set()
666 * stmmac_hwtstamp_get - read hardware timestamping.
678 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_hwtstamp_get()
679 return -EOPNOTSUPP; in stmmac_hwtstamp_get()
681 *config = priv->tstamp_config; in stmmac_hwtstamp_get()
687 * stmmac_init_tstamp_counter - init hardware timestamping counter
699 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_tstamp_counter()
704 if (!priv->plat->clk_ptp_rate) { in stmmac_init_tstamp_counter()
705 netdev_err(priv->dev, "Invalid PTP clock rate"); in stmmac_init_tstamp_counter()
706 return -EINVAL; in stmmac_init_tstamp_counter()
709 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); in stmmac_init_tstamp_counter()
710 priv->systime_flags = systime_flags; in stmmac_init_tstamp_counter()
713 stmmac_config_sub_second_increment(priv, priv->ptpaddr, in stmmac_init_tstamp_counter()
714 priv->plat->clk_ptp_rate, in stmmac_init_tstamp_counter()
719 priv->sub_second_inc = sec_inc; in stmmac_init_tstamp_counter()
727 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); in stmmac_init_tstamp_counter()
728 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); in stmmac_init_tstamp_counter()
734 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); in stmmac_init_tstamp_counter()
740 * stmmac_init_timestamping - initialise timestamping
748 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_timestamping()
751 if (priv->plat->ptp_clk_freq_config) in stmmac_init_timestamping()
752 priv->plat->ptp_clk_freq_config(priv); in stmmac_init_timestamping()
754 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) { in stmmac_init_timestamping()
755 netdev_info(priv->dev, "PTP not supported by HW\n"); in stmmac_init_timestamping()
756 return -EOPNOTSUPP; in stmmac_init_timestamping()
761 netdev_warn(priv->dev, "PTP init failed\n"); in stmmac_init_timestamping()
765 priv->adv_ts = 0; in stmmac_init_timestamping()
767 if (xmac && priv->dma_cap.atime_stamp) in stmmac_init_timestamping()
768 priv->adv_ts = 1; in stmmac_init_timestamping()
770 else if (priv->extend_desc && priv->dma_cap.atime_stamp) in stmmac_init_timestamping()
771 priv->adv_ts = 1; in stmmac_init_timestamping()
773 if (priv->dma_cap.time_stamp) in stmmac_init_timestamping()
774 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); in stmmac_init_timestamping()
776 if (priv->adv_ts) in stmmac_init_timestamping()
777 netdev_info(priv->dev, in stmmac_init_timestamping()
778 "IEEE 1588-2008 Advanced Timestamp supported\n"); in stmmac_init_timestamping()
780 priv->hwts_tx_en = 0; in stmmac_init_timestamping()
781 priv->hwts_rx_en = 0; in stmmac_init_timestamping()
783 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_init_timestamping()
793 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); in stmmac_setup_ptp()
795 netdev_warn(priv->dev, in stmmac_setup_ptp()
806 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_release_ptp()
810 * stmmac_mac_flow_ctrl - Configure flow control in all queues
819 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_mac_flow_ctrl()
821 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time, in stmmac_mac_flow_ctrl()
828 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_get_caps()
830 /* Refresh the MAC-specific capabilities */ in stmmac_mac_get_caps()
833 config->mac_capabilities = priv->hw->link.caps; in stmmac_mac_get_caps()
835 if (priv->plat->max_speed) in stmmac_mac_get_caps()
836 phylink_limit_mac_speed(config, priv->plat->max_speed); in stmmac_mac_get_caps()
838 return config->mac_capabilities; in stmmac_mac_get_caps()
844 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_select_pcs()
847 if (priv->plat->select_pcs) { in stmmac_mac_select_pcs()
848 pcs = priv->plat->select_pcs(priv, interface); in stmmac_mac_select_pcs()
865 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_down()
867 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_mac_link_down()
868 if (priv->dma_cap.eee) in stmmac_mac_link_down()
869 stmmac_set_eee_pls(priv, priv->hw, false); in stmmac_mac_link_down()
872 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false); in stmmac_mac_link_down()
881 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_up()
886 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_mac_link_up()
887 priv->plat->serdes_powerup) in stmmac_mac_link_up()
888 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); in stmmac_mac_link_up()
890 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
891 ctrl = old_ctrl & ~priv->hw->link.speed_mask; in stmmac_mac_link_up()
896 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
899 ctrl |= priv->hw->link.xgmii.speed5000; in stmmac_mac_link_up()
902 ctrl |= priv->hw->link.xgmii.speed2500; in stmmac_mac_link_up()
910 ctrl |= priv->hw->link.xlgmii.speed100000; in stmmac_mac_link_up()
913 ctrl |= priv->hw->link.xlgmii.speed50000; in stmmac_mac_link_up()
916 ctrl |= priv->hw->link.xlgmii.speed40000; in stmmac_mac_link_up()
919 ctrl |= priv->hw->link.xlgmii.speed25000; in stmmac_mac_link_up()
922 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
925 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
928 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
936 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
939 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
942 ctrl |= priv->hw->link.speed100; in stmmac_mac_link_up()
945 ctrl |= priv->hw->link.speed10; in stmmac_mac_link_up()
952 if (priv->plat->fix_mac_speed) in stmmac_mac_link_up()
953 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); in stmmac_mac_link_up()
956 ctrl &= ~priv->hw->link.duplex; in stmmac_mac_link_up()
958 ctrl |= priv->hw->link.duplex; in stmmac_mac_link_up()
973 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
975 if (priv->plat->set_clk_tx_rate) { in stmmac_mac_link_up()
976 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv, in stmmac_mac_link_up()
977 priv->plat->clk_tx_i, in stmmac_mac_link_up()
980 netdev_err(priv->dev, in stmmac_mac_link_up()
985 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_mac_link_up()
986 if (priv->dma_cap.eee) in stmmac_mac_link_up()
987 stmmac_set_eee_pls(priv, priv->hw, true); in stmmac_mac_link_up()
990 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true); in stmmac_mac_link_up()
992 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_mac_link_up()
998 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_disable_tx_lpi()
1000 priv->eee_active = false; in stmmac_mac_disable_tx_lpi()
1002 mutex_lock(&priv->lock); in stmmac_mac_disable_tx_lpi()
1004 priv->eee_enabled = false; in stmmac_mac_disable_tx_lpi()
1006 netdev_dbg(priv->dev, "disable EEE\n"); in stmmac_mac_disable_tx_lpi()
1007 priv->eee_sw_timer_en = false; in stmmac_mac_disable_tx_lpi()
1008 timer_delete_sync(&priv->eee_ctrl_timer); in stmmac_mac_disable_tx_lpi()
1009 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); in stmmac_mac_disable_tx_lpi()
1010 priv->tx_path_in_lpi_mode = false; in stmmac_mac_disable_tx_lpi()
1012 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS); in stmmac_mac_disable_tx_lpi()
1013 mutex_unlock(&priv->lock); in stmmac_mac_disable_tx_lpi()
1019 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_enable_tx_lpi()
1022 priv->tx_lpi_timer = timer; in stmmac_mac_enable_tx_lpi()
1023 priv->eee_active = true; in stmmac_mac_enable_tx_lpi()
1025 mutex_lock(&priv->lock); in stmmac_mac_enable_tx_lpi()
1027 priv->eee_enabled = true; in stmmac_mac_enable_tx_lpi()
1032 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP) in stmmac_mac_enable_tx_lpi()
1033 priv->tx_lpi_clk_stop = tx_clk_stop; in stmmac_mac_enable_tx_lpi()
1035 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, in stmmac_mac_enable_tx_lpi()
1039 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER, in stmmac_mac_enable_tx_lpi()
1040 priv->tx_lpi_clk_stop, priv->tx_lpi_timer); in stmmac_mac_enable_tx_lpi()
1046 priv->eee_sw_timer_en = true; in stmmac_mac_enable_tx_lpi()
1050 mutex_unlock(&priv->lock); in stmmac_mac_enable_tx_lpi()
1051 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); in stmmac_mac_enable_tx_lpi()
1059 struct net_device *ndev = to_net_dev(config->dev); in stmmac_mac_finish()
1062 if (priv->plat->mac_finish) in stmmac_mac_finish()
1063 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface); in stmmac_mac_finish()
1080 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1088 int interface = priv->plat->phy_interface; in stmmac_check_pcs_mode()
1090 if (priv->dma_cap.pcs) { in stmmac_check_pcs_mode()
1095 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); in stmmac_check_pcs_mode()
1096 priv->hw->pcs = STMMAC_PCS_RGMII; in stmmac_check_pcs_mode()
1098 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); in stmmac_check_pcs_mode()
1099 priv->hw->pcs = STMMAC_PCS_SGMII; in stmmac_check_pcs_mode()
1105 * stmmac_init_phy - PHY initialization
1115 int mode = priv->plat->phy_interface; in stmmac_init_phy()
1121 if (!phylink_expects_phy(priv->phylink)) in stmmac_init_phy()
1124 if (priv->hw->xpcs && in stmmac_init_phy()
1125 xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73) in stmmac_init_phy()
1128 fwnode = priv->plat->port_node; in stmmac_init_phy()
1130 fwnode = dev_fwnode(priv->device); in stmmac_init_phy()
1137 /* Some DT bindings do not set-up the PHY handle. Let's try to in stmmac_init_phy()
1141 int addr = priv->plat->phy_addr; in stmmac_init_phy()
1145 netdev_err(priv->dev, "no phy found\n"); in stmmac_init_phy()
1146 return -ENODEV; in stmmac_init_phy()
1149 phydev = mdiobus_get_phy(priv->mii, addr); in stmmac_init_phy()
1151 netdev_err(priv->dev, "no phy at addr %d\n", addr); in stmmac_init_phy()
1152 return -ENODEV; in stmmac_init_phy()
1155 ret = phylink_connect_phy(priv->phylink, phydev); in stmmac_init_phy()
1158 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); in stmmac_init_phy()
1162 netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n", in stmmac_init_phy()
1172 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) { in stmmac_init_phy()
1173 eee.tx_lpi_timer = priv->tx_lpi_timer; in stmmac_init_phy()
1174 phylink_ethtool_set_eee(priv->phylink, &eee); in stmmac_init_phy()
1177 if (!priv->plat->pmt) { in stmmac_init_phy()
1180 phylink_ethtool_get_wol(priv->phylink, &wol); in stmmac_init_phy()
1181 device_set_wakeup_capable(priv->device, !!wol.supported); in stmmac_init_phy()
1182 device_set_wakeup_enable(priv->device, !!wol.wolopts); in stmmac_init_phy()
1196 config = &priv->phylink_config; in stmmac_phy_setup()
1198 config->dev = &priv->dev->dev; in stmmac_phy_setup()
1199 config->type = PHYLINK_NETDEV; in stmmac_phy_setup()
1200 config->mac_managed_pm = true; in stmmac_phy_setup()
1203 config->mac_requires_rxc = true; in stmmac_phy_setup()
1205 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) in stmmac_phy_setup()
1206 config->eee_rx_clk_stop_enable = true; in stmmac_phy_setup()
1209 priv->tx_lpi_clk_stop = priv->plat->flags & in stmmac_phy_setup()
1212 mdio_bus_data = priv->plat->mdio_bus_data; in stmmac_phy_setup()
1214 config->default_an_inband = mdio_bus_data->default_an_inband; in stmmac_phy_setup()
1219 if (priv->plat->get_interfaces) in stmmac_phy_setup()
1220 priv->plat->get_interfaces(priv, priv->plat->bsp_priv, in stmmac_phy_setup()
1221 config->supported_interfaces); in stmmac_phy_setup()
1227 if (phy_interface_empty(config->supported_interfaces)) in stmmac_phy_setup()
1228 __set_bit(priv->plat->phy_interface, in stmmac_phy_setup()
1229 config->supported_interfaces); in stmmac_phy_setup()
1232 if (priv->hw->xpcs) in stmmac_phy_setup()
1233 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs); in stmmac_phy_setup()
1235 pcs = priv->hw->phylink_pcs; in stmmac_phy_setup()
1238 phy_interface_or(config->supported_interfaces, in stmmac_phy_setup()
1239 config->supported_interfaces, in stmmac_phy_setup()
1240 pcs->supported_interfaces); in stmmac_phy_setup()
1242 if (priv->dma_cap.eee) { in stmmac_phy_setup()
1244 memcpy(config->lpi_interfaces, config->supported_interfaces, in stmmac_phy_setup()
1245 sizeof(config->lpi_interfaces)); in stmmac_phy_setup()
1248 config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD; in stmmac_phy_setup()
1249 config->lpi_timer_default = eee_timer * 1000; in stmmac_phy_setup()
1250 config->eee_enabled_default = true; in stmmac_phy_setup()
1253 fwnode = priv->plat->port_node; in stmmac_phy_setup()
1255 fwnode = dev_fwnode(priv->device); in stmmac_phy_setup()
1257 phylink = phylink_create(config, fwnode, priv->plat->phy_interface, in stmmac_phy_setup()
1262 priv->phylink = phylink; in stmmac_phy_setup()
1269 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_display_rx_rings()
1276 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_display_rx_rings()
1280 if (priv->extend_desc) { in stmmac_display_rx_rings()
1281 head_rx = (void *)rx_q->dma_erx; in stmmac_display_rx_rings()
1284 head_rx = (void *)rx_q->dma_rx; in stmmac_display_rx_rings()
1289 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, in stmmac_display_rx_rings()
1290 rx_q->dma_rx_phy, desc_size); in stmmac_display_rx_rings()
1297 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_display_tx_rings()
1304 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_display_tx_rings()
1308 if (priv->extend_desc) { in stmmac_display_tx_rings()
1309 head_tx = (void *)tx_q->dma_etx; in stmmac_display_tx_rings()
1311 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in stmmac_display_tx_rings()
1312 head_tx = (void *)tx_q->dma_entx; in stmmac_display_tx_rings()
1315 head_tx = (void *)tx_q->dma_tx; in stmmac_display_tx_rings()
1319 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, in stmmac_display_tx_rings()
1320 tx_q->dma_tx_phy, desc_size); in stmmac_display_tx_rings()
1361 * stmmac_clear_rx_descriptors - clear RX descriptors
1372 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_clear_rx_descriptors()
1376 for (i = 0; i < dma_conf->dma_rx_size; i++) in stmmac_clear_rx_descriptors()
1377 if (priv->extend_desc) in stmmac_clear_rx_descriptors()
1378 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, in stmmac_clear_rx_descriptors()
1379 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1380 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1381 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1383 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], in stmmac_clear_rx_descriptors()
1384 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1385 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1386 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1390 * stmmac_clear_tx_descriptors - clear tx descriptors
1401 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_clear_tx_descriptors()
1405 for (i = 0; i < dma_conf->dma_tx_size; i++) { in stmmac_clear_tx_descriptors()
1406 int last = (i == (dma_conf->dma_tx_size - 1)); in stmmac_clear_tx_descriptors()
1409 if (priv->extend_desc) in stmmac_clear_tx_descriptors()
1410 p = &tx_q->dma_etx[i].basic; in stmmac_clear_tx_descriptors()
1411 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_clear_tx_descriptors()
1412 p = &tx_q->dma_entx[i].basic; in stmmac_clear_tx_descriptors()
1414 p = &tx_q->dma_tx[i]; in stmmac_clear_tx_descriptors()
1416 stmmac_init_tx_desc(priv, p, priv->mode, last); in stmmac_clear_tx_descriptors()
1421 * stmmac_clear_descriptors - clear descriptors
1430 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; in stmmac_clear_descriptors()
1431 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_clear_descriptors()
1444 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1459 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_init_rx_buffers()
1460 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_init_rx_buffers()
1463 if (priv->dma_cap.host_dma_width <= 32) in stmmac_init_rx_buffers()
1466 if (!buf->page) { in stmmac_init_rx_buffers()
1467 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1468 if (!buf->page) in stmmac_init_rx_buffers()
1469 return -ENOMEM; in stmmac_init_rx_buffers()
1470 buf->page_offset = stmmac_rx_offset(priv); in stmmac_init_rx_buffers()
1473 if (priv->sph && !buf->sec_page) { in stmmac_init_rx_buffers()
1474 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1475 if (!buf->sec_page) in stmmac_init_rx_buffers()
1476 return -ENOMEM; in stmmac_init_rx_buffers()
1478 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_init_rx_buffers()
1479 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_init_rx_buffers()
1481 buf->sec_page = NULL; in stmmac_init_rx_buffers()
1482 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_init_rx_buffers()
1485 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_init_rx_buffers()
1487 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_init_rx_buffers()
1488 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) in stmmac_init_rx_buffers()
1495 * stmmac_free_rx_buffer - free RX dma buffers
1504 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_free_rx_buffer()
1506 if (buf->page) in stmmac_free_rx_buffer()
1507 page_pool_put_full_page(rx_q->page_pool, buf->page, false); in stmmac_free_rx_buffer()
1508 buf->page = NULL; in stmmac_free_rx_buffer()
1510 if (buf->sec_page) in stmmac_free_rx_buffer()
1511 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); in stmmac_free_rx_buffer()
1512 buf->sec_page = NULL; in stmmac_free_rx_buffer()
1516 * stmmac_free_tx_buffer - free RX dma buffers
1526 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_free_tx_buffer()
1528 if (tx_q->tx_skbuff_dma[i].buf && in stmmac_free_tx_buffer()
1529 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { in stmmac_free_tx_buffer()
1530 if (tx_q->tx_skbuff_dma[i].map_as_page) in stmmac_free_tx_buffer()
1531 dma_unmap_page(priv->device, in stmmac_free_tx_buffer()
1532 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1533 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1536 dma_unmap_single(priv->device, in stmmac_free_tx_buffer()
1537 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1538 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1542 if (tx_q->xdpf[i] && in stmmac_free_tx_buffer()
1543 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_free_tx_buffer()
1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { in stmmac_free_tx_buffer()
1545 xdp_return_frame(tx_q->xdpf[i]); in stmmac_free_tx_buffer()
1546 tx_q->xdpf[i] = NULL; in stmmac_free_tx_buffer()
1549 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_free_tx_buffer()
1550 tx_q->xsk_frames_done++; in stmmac_free_tx_buffer()
1552 if (tx_q->tx_skbuff[i] && in stmmac_free_tx_buffer()
1553 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_free_tx_buffer()
1554 dev_kfree_skb_any(tx_q->tx_skbuff[i]); in stmmac_free_tx_buffer()
1555 tx_q->tx_skbuff[i] = NULL; in stmmac_free_tx_buffer()
1558 tx_q->tx_skbuff_dma[i].buf = 0; in stmmac_free_tx_buffer()
1559 tx_q->tx_skbuff_dma[i].map_as_page = false; in stmmac_free_tx_buffer()
1563 * dma_free_rx_skbufs - free RX dma buffers
1572 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_skbufs()
1575 for (i = 0; i < dma_conf->dma_rx_size; i++) in dma_free_rx_skbufs()
1583 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers()
1586 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers()
1590 if (priv->extend_desc) in stmmac_alloc_rx_buffers()
1591 p = &((rx_q->dma_erx + i)->basic); in stmmac_alloc_rx_buffers()
1593 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers()
1600 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers()
1607 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1616 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_xskbufs()
1619 for (i = 0; i < dma_conf->dma_rx_size; i++) { in dma_free_rx_xskbufs()
1620 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in dma_free_rx_xskbufs()
1622 if (!buf->xdp) in dma_free_rx_xskbufs()
1625 xsk_buff_free(buf->xdp); in dma_free_rx_xskbufs()
1626 buf->xdp = NULL; in dma_free_rx_xskbufs()
1634 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers_zc()
1643 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers_zc()
1648 if (priv->extend_desc) in stmmac_alloc_rx_buffers_zc()
1649 p = (struct dma_desc *)(rx_q->dma_erx + i); in stmmac_alloc_rx_buffers_zc()
1651 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers_zc()
1653 buf = &rx_q->buf_pool[i]; in stmmac_alloc_rx_buffers_zc()
1655 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1656 if (!buf->xdp) in stmmac_alloc_rx_buffers_zc()
1657 return -ENOMEM; in stmmac_alloc_rx_buffers_zc()
1659 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_alloc_rx_buffers_zc()
1661 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers_zc()
1669 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) in stmmac_get_xsk_pool()
1672 return xsk_get_pool_from_qid(priv->dev, queue); in stmmac_get_xsk_pool()
1676 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1689 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __init_dma_rx_desc_rings()
1692 netif_dbg(priv, probe, priv->dev, in __init_dma_rx_desc_rings()
1694 (u32)rx_q->dma_rx_phy); in __init_dma_rx_desc_rings()
1698 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1700 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1702 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1703 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1706 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1707 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1708 rx_q->queue_index); in __init_dma_rx_desc_rings()
1709 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1711 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1713 rx_q->page_pool)); in __init_dma_rx_desc_rings()
1714 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1715 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1716 rx_q->queue_index); in __init_dma_rx_desc_rings()
1719 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1721 * xdpsock TX-only. in __init_dma_rx_desc_rings()
1727 return -ENOMEM; in __init_dma_rx_desc_rings()
1731 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_rx_desc_rings()
1732 if (priv->extend_desc) in __init_dma_rx_desc_rings()
1733 stmmac_mode_init(priv, rx_q->dma_erx, in __init_dma_rx_desc_rings()
1734 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1735 dma_conf->dma_rx_size, 1); in __init_dma_rx_desc_rings()
1737 stmmac_mode_init(priv, rx_q->dma_rx, in __init_dma_rx_desc_rings()
1738 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1739 dma_conf->dma_rx_size, 0); in __init_dma_rx_desc_rings()
1750 u32 rx_count = priv->plat->rx_queues_to_use; in init_dma_rx_desc_rings()
1755 netif_dbg(priv, probe, priv->dev, in init_dma_rx_desc_rings()
1768 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in init_dma_rx_desc_rings()
1770 if (rx_q->xsk_pool) in init_dma_rx_desc_rings()
1775 rx_q->buf_alloc_num = 0; in init_dma_rx_desc_rings()
1776 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings()
1778 queue--; in init_dma_rx_desc_rings()
1785 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1797 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __init_dma_tx_desc_rings()
1800 netif_dbg(priv, probe, priv->dev, in __init_dma_tx_desc_rings()
1802 (u32)tx_q->dma_tx_phy); in __init_dma_tx_desc_rings()
1805 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_tx_desc_rings()
1806 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1807 stmmac_mode_init(priv, tx_q->dma_etx, in __init_dma_tx_desc_rings()
1808 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1809 dma_conf->dma_tx_size, 1); in __init_dma_tx_desc_rings()
1810 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) in __init_dma_tx_desc_rings()
1811 stmmac_mode_init(priv, tx_q->dma_tx, in __init_dma_tx_desc_rings()
1812 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1813 dma_conf->dma_tx_size, 0); in __init_dma_tx_desc_rings()
1816 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings()
1818 for (i = 0; i < dma_conf->dma_tx_size; i++) { in __init_dma_tx_desc_rings()
1821 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1822 p = &((tx_q->dma_etx + i)->basic); in __init_dma_tx_desc_rings()
1823 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __init_dma_tx_desc_rings()
1824 p = &((tx_q->dma_entx + i)->basic); in __init_dma_tx_desc_rings()
1826 p = tx_q->dma_tx + i; in __init_dma_tx_desc_rings()
1830 tx_q->tx_skbuff_dma[i].buf = 0; in __init_dma_tx_desc_rings()
1831 tx_q->tx_skbuff_dma[i].map_as_page = false; in __init_dma_tx_desc_rings()
1832 tx_q->tx_skbuff_dma[i].len = 0; in __init_dma_tx_desc_rings()
1833 tx_q->tx_skbuff_dma[i].last_segment = false; in __init_dma_tx_desc_rings()
1834 tx_q->tx_skbuff[i] = NULL; in __init_dma_tx_desc_rings()
1847 tx_queue_cnt = priv->plat->tx_queues_to_use; in init_dma_tx_desc_rings()
1856 * init_dma_desc_rings - init the RX/TX descriptor rings
1886 * dma_free_tx_skbufs - free TX dma buffers
1895 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in dma_free_tx_skbufs()
1898 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1900 for (i = 0; i < dma_conf->dma_tx_size; i++) in dma_free_tx_skbufs()
1903 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs()
1904 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in dma_free_tx_skbufs()
1905 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1906 tx_q->xsk_pool = NULL; in dma_free_tx_skbufs()
1911 * stmmac_free_tx_skbufs - free TX skb buffers
1916 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_free_tx_skbufs()
1920 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); in stmmac_free_tx_skbufs()
1924 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1933 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __free_dma_rx_desc_resources()
1936 if (rx_q->xsk_pool) in __free_dma_rx_desc_resources()
1941 rx_q->buf_alloc_num = 0; in __free_dma_rx_desc_resources()
1942 rx_q->xsk_pool = NULL; in __free_dma_rx_desc_resources()
1945 if (!priv->extend_desc) in __free_dma_rx_desc_resources()
1946 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1948 rx_q->dma_rx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
1950 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1952 rx_q->dma_erx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
1954 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) in __free_dma_rx_desc_resources()
1955 xdp_rxq_info_unreg(&rx_q->xdp_rxq); in __free_dma_rx_desc_resources()
1957 kfree(rx_q->buf_pool); in __free_dma_rx_desc_resources()
1958 if (rx_q->page_pool) in __free_dma_rx_desc_resources()
1959 page_pool_destroy(rx_q->page_pool); in __free_dma_rx_desc_resources()
1965 u32 rx_count = priv->plat->rx_queues_to_use; in free_dma_rx_desc_resources()
1974 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1983 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __free_dma_tx_desc_resources()
1990 if (priv->extend_desc) { in __free_dma_tx_desc_resources()
1992 addr = tx_q->dma_etx; in __free_dma_tx_desc_resources()
1993 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in __free_dma_tx_desc_resources()
1995 addr = tx_q->dma_entx; in __free_dma_tx_desc_resources()
1998 addr = tx_q->dma_tx; in __free_dma_tx_desc_resources()
2001 size *= dma_conf->dma_tx_size; in __free_dma_tx_desc_resources()
2003 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); in __free_dma_tx_desc_resources()
2005 kfree(tx_q->tx_skbuff_dma); in __free_dma_tx_desc_resources()
2006 kfree(tx_q->tx_skbuff); in __free_dma_tx_desc_resources()
2012 u32 tx_count = priv->plat->tx_queues_to_use; in free_dma_tx_desc_resources()
2021 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2027 * reception, for example, it pre-allocated the RX socket buffer in order to
2028 * allow zero-copy mechanism.
2034 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __alloc_dma_rx_desc_resources()
2035 struct stmmac_channel *ch = &priv->channel[queue]; in __alloc_dma_rx_desc_resources()
2042 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz + in __alloc_dma_rx_desc_resources()
2046 rx_q->queue_index = queue; in __alloc_dma_rx_desc_resources()
2047 rx_q->priv_data = priv; in __alloc_dma_rx_desc_resources()
2048 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE; in __alloc_dma_rx_desc_resources()
2051 pp_params.pool_size = dma_conf->dma_rx_size; in __alloc_dma_rx_desc_resources()
2053 pp_params.nid = dev_to_node(priv->device); in __alloc_dma_rx_desc_resources()
2054 pp_params.dev = priv->device; in __alloc_dma_rx_desc_resources()
2057 pp_params.max_len = dma_conf->dma_buf_sz; in __alloc_dma_rx_desc_resources()
2059 if (priv->sph) { in __alloc_dma_rx_desc_resources()
2064 rx_q->page_pool = page_pool_create(&pp_params); in __alloc_dma_rx_desc_resources()
2065 if (IS_ERR(rx_q->page_pool)) { in __alloc_dma_rx_desc_resources()
2066 ret = PTR_ERR(rx_q->page_pool); in __alloc_dma_rx_desc_resources()
2067 rx_q->page_pool = NULL; in __alloc_dma_rx_desc_resources()
2071 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, in __alloc_dma_rx_desc_resources()
2072 sizeof(*rx_q->buf_pool), in __alloc_dma_rx_desc_resources()
2074 if (!rx_q->buf_pool) in __alloc_dma_rx_desc_resources()
2075 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2077 if (priv->extend_desc) { in __alloc_dma_rx_desc_resources()
2078 rx_q->dma_erx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2079 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2081 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2083 if (!rx_q->dma_erx) in __alloc_dma_rx_desc_resources()
2084 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2087 rx_q->dma_rx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2088 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2090 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2092 if (!rx_q->dma_rx) in __alloc_dma_rx_desc_resources()
2093 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2097 test_bit(queue, priv->af_xdp_zc_qps)) in __alloc_dma_rx_desc_resources()
2098 napi_id = ch->rxtx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2100 napi_id = ch->rx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2102 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, in __alloc_dma_rx_desc_resources()
2103 rx_q->queue_index, in __alloc_dma_rx_desc_resources()
2106 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); in __alloc_dma_rx_desc_resources()
2107 return -EINVAL; in __alloc_dma_rx_desc_resources()
2116 u32 rx_count = priv->plat->rx_queues_to_use; in alloc_dma_rx_desc_resources()
2136 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2142 * reception, for example, it pre-allocated the RX socket buffer in order to
2143 * allow zero-copy mechanism.
2149 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __alloc_dma_tx_desc_resources()
2153 tx_q->queue_index = queue; in __alloc_dma_tx_desc_resources()
2154 tx_q->priv_data = priv; in __alloc_dma_tx_desc_resources()
2156 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2157 sizeof(*tx_q->tx_skbuff_dma), in __alloc_dma_tx_desc_resources()
2159 if (!tx_q->tx_skbuff_dma) in __alloc_dma_tx_desc_resources()
2160 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2162 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2165 if (!tx_q->tx_skbuff) in __alloc_dma_tx_desc_resources()
2166 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2168 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2170 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2175 size *= dma_conf->dma_tx_size; in __alloc_dma_tx_desc_resources()
2177 addr = dma_alloc_coherent(priv->device, size, in __alloc_dma_tx_desc_resources()
2178 &tx_q->dma_tx_phy, GFP_KERNEL); in __alloc_dma_tx_desc_resources()
2180 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2182 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2183 tx_q->dma_etx = addr; in __alloc_dma_tx_desc_resources()
2184 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2185 tx_q->dma_entx = addr; in __alloc_dma_tx_desc_resources()
2187 tx_q->dma_tx = addr; in __alloc_dma_tx_desc_resources()
2195 u32 tx_count = priv->plat->tx_queues_to_use; in alloc_dma_tx_desc_resources()
2214 * alloc_dma_desc_resources - alloc TX/RX resources.
2219 * reception, for example, it pre-allocated the RX socket buffer in order to
2220 * allow zero-copy mechanism.
2237 * free_dma_desc_resources - free dma desc resources
2254 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2260 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_enable_rx_queues()
2265 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; in stmmac_mac_enable_rx_queues()
2266 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); in stmmac_mac_enable_rx_queues()
2271 * stmmac_start_rx_dma - start RX DMA channel
2279 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); in stmmac_start_rx_dma()
2280 stmmac_start_rx(priv, priv->ioaddr, chan); in stmmac_start_rx_dma()
2284 * stmmac_start_tx_dma - start TX DMA channel
2292 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); in stmmac_start_tx_dma()
2293 stmmac_start_tx(priv, priv->ioaddr, chan); in stmmac_start_tx_dma()
2297 * stmmac_stop_rx_dma - stop RX DMA channel
2305 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); in stmmac_stop_rx_dma()
2306 stmmac_stop_rx(priv, priv->ioaddr, chan); in stmmac_stop_rx_dma()
2310 * stmmac_stop_tx_dma - stop TX DMA channel
2318 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); in stmmac_stop_tx_dma()
2319 stmmac_stop_tx(priv, priv->ioaddr, chan); in stmmac_stop_tx_dma()
2324 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_enable_all_dma_irq()
2325 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_enable_all_dma_irq()
2330 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_enable_all_dma_irq()
2333 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2334 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_enable_all_dma_irq()
2335 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2340 * stmmac_start_all_dma - start all RX and TX DMA channels
2347 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_start_all_dma()
2348 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_start_all_dma()
2359 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2366 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_stop_all_dma()
2367 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_stop_all_dma()
2378 * stmmac_dma_operation_mode - HW DMA operation mode
2381 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2385 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_dma_operation_mode()
2386 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_dma_operation_mode()
2387 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_dma_operation_mode()
2388 int txfifosz = priv->plat->tx_fifo_size; in stmmac_dma_operation_mode()
2395 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_dma_operation_mode()
2397 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_dma_operation_mode()
2400 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { in stmmac_dma_operation_mode()
2405 if (priv->plat->force_thresh_dma_mode) { in stmmac_dma_operation_mode()
2408 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { in stmmac_dma_operation_mode()
2411 * to perform the TX COE in HW. This depends on: in stmmac_dma_operation_mode()
2412 * 1) TX COE if actually supported in stmmac_dma_operation_mode()
2418 priv->xstats.threshold = SF_DMA_MODE; in stmmac_dma_operation_mode()
2426 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_dma_operation_mode()
2429 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2431 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, in stmmac_dma_operation_mode()
2434 if (rx_q->xsk_pool) { in stmmac_dma_operation_mode()
2435 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_dma_operation_mode()
2436 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2440 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2441 priv->dma_conf.dma_buf_sz, in stmmac_dma_operation_mode()
2447 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2449 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, in stmmac_dma_operation_mode()
2458 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc); in stmmac_xsk_request_timestamp()
2459 *meta_req->set_ic = true; in stmmac_xsk_request_timestamp()
2465 struct stmmac_priv *priv = tx_compl->priv; in stmmac_xsk_fill_timestamp()
2466 struct dma_desc *desc = tx_compl->desc; in stmmac_xsk_fill_timestamp()
2470 if (!priv->hwts_tx_en) in stmmac_xsk_fill_timestamp()
2475 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_xsk_fill_timestamp()
2477 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_xsk_fill_timestamp()
2482 ns -= priv->plat->cdc_error_adj; in stmmac_xsk_fill_timestamp()
2494 if (meta_req->tbs & STMMAC_TBS_EN) in stmmac_xsk_request_launch_time()
2495 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec, in stmmac_xsk_request_launch_time()
2507 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc()
2508 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_zc()
2509 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_zc()
2510 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported; in stmmac_xdp_xmit_zc()
2511 struct xsk_buff_pool *pool = tx_q->xsk_pool; in stmmac_xdp_xmit_zc()
2512 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2518 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_zc()
2523 for (; budget > 0; budget--) { in stmmac_xdp_xmit_zc()
2533 !netif_carrier_ok(priv->dev)) { in stmmac_xdp_xmit_zc()
2541 if (priv->est && priv->est->enable && in stmmac_xdp_xmit_zc()
2542 priv->est->max_sdu[queue] && in stmmac_xdp_xmit_zc()
2543 xdp_desc.len > priv->est->max_sdu[queue]) { in stmmac_xdp_xmit_zc()
2544 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xdp_xmit_zc()
2548 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_zc()
2549 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_zc()
2550 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_zc()
2551 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_zc()
2553 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_zc()
2559 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; in stmmac_xdp_xmit_zc()
2565 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_xdp_xmit_zc()
2566 tx_q->xdpf[entry] = NULL; in stmmac_xdp_xmit_zc()
2568 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_zc()
2569 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; in stmmac_xdp_xmit_zc()
2570 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_zc()
2571 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_zc()
2575 tx_q->tx_count_frames++; in stmmac_xdp_xmit_zc()
2577 if (!priv->tx_coal_frames[queue]) in stmmac_xdp_xmit_zc()
2579 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_zc()
2587 meta_req.tbs = tx_q->tbs; in stmmac_xdp_xmit_zc()
2588 meta_req.edesc = &tx_q->dma_entx[entry]; in stmmac_xdp_xmit_zc()
2592 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_zc()
2598 csum, priv->mode, true, true, in stmmac_xdp_xmit_zc()
2601 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xdp_xmit_zc()
2604 &tx_q->tx_skbuff_dma[entry].xsk_meta); in stmmac_xdp_xmit_zc()
2606 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_zc()
2607 entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2609 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2610 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); in stmmac_xdp_xmit_zc()
2611 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2628 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { in stmmac_bump_dma_threshold()
2631 if (priv->plat->force_thresh_dma_mode) in stmmac_bump_dma_threshold()
2637 priv->xstats.threshold = tc; in stmmac_bump_dma_threshold()
2642 * stmmac_tx_clean - to manage the transmission completion
2654 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_clean()
2655 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tx_clean()
2660 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2662 tx_q->xsk_frames_done = 0; in stmmac_tx_clean()
2664 entry = tx_q->dirty_tx; in stmmac_tx_clean()
2667 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { in stmmac_tx_clean()
2673 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_tx_clean()
2674 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2675 xdpf = tx_q->xdpf[entry]; in stmmac_tx_clean()
2677 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2679 skb = tx_q->tx_skbuff[entry]; in stmmac_tx_clean()
2685 if (priv->extend_desc) in stmmac_tx_clean()
2686 p = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_tx_clean()
2687 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tx_clean()
2688 p = &tx_q->dma_entx[entry].basic; in stmmac_tx_clean()
2690 p = tx_q->dma_tx + entry; in stmmac_tx_clean()
2692 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); in stmmac_tx_clean()
2716 } else if (tx_q->xsk_pool && in stmmac_tx_clean()
2717 xp_tx_metadata_enabled(tx_q->xsk_pool)) { in stmmac_tx_clean()
2723 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta, in stmmac_tx_clean()
2729 if (likely(tx_q->tx_skbuff_dma[entry].buf && in stmmac_tx_clean()
2730 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { in stmmac_tx_clean()
2731 if (tx_q->tx_skbuff_dma[entry].map_as_page) in stmmac_tx_clean()
2732 dma_unmap_page(priv->device, in stmmac_tx_clean()
2733 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2734 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2737 dma_unmap_single(priv->device, in stmmac_tx_clean()
2738 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2739 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2741 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_tx_clean()
2742 tx_q->tx_skbuff_dma[entry].len = 0; in stmmac_tx_clean()
2743 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_tx_clean()
2748 tx_q->tx_skbuff_dma[entry].last_segment = false; in stmmac_tx_clean()
2749 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_tx_clean()
2752 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { in stmmac_tx_clean()
2754 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2758 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2760 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2763 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_tx_clean()
2764 tx_q->xsk_frames_done++; in stmmac_tx_clean()
2766 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2769 bytes_compl += skb->len; in stmmac_tx_clean()
2771 tx_q->tx_skbuff[entry] = NULL; in stmmac_tx_clean()
2775 stmmac_release_tx_desc(priv, p, priv->mode); in stmmac_tx_clean()
2777 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_tx_clean()
2779 tx_q->dirty_tx = entry; in stmmac_tx_clean()
2781 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), in stmmac_tx_clean()
2784 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, in stmmac_tx_clean()
2788 netif_dbg(priv, tx_done, priv->dev, in stmmac_tx_clean()
2790 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2793 if (tx_q->xsk_pool) { in stmmac_tx_clean()
2796 if (tx_q->xsk_frames_done) in stmmac_tx_clean()
2797 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in stmmac_tx_clean()
2799 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) in stmmac_tx_clean()
2800 xsk_set_tx_need_wakeup(tx_q->xsk_pool); in stmmac_tx_clean()
2804 * available), return "budget - 1" to reenable TX IRQ. in stmmac_tx_clean()
2810 xmits = budget - 1; in stmmac_tx_clean()
2815 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode) in stmmac_tx_clean()
2819 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_tx_clean()
2822 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_tx_clean()
2823 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); in stmmac_tx_clean()
2824 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); in stmmac_tx_clean()
2825 u64_stats_inc(&txq_stats->napi.tx_clean); in stmmac_tx_clean()
2826 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_tx_clean()
2828 priv->xstats.tx_errors += tx_errors; in stmmac_tx_clean()
2830 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2837 * stmmac_tx_err - to manage the tx error
2845 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_tx_err()
2847 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2850 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2851 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2853 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_tx_err()
2854 tx_q->dma_tx_phy, chan); in stmmac_tx_err()
2857 priv->xstats.tx_errors++; in stmmac_tx_err()
2858 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2862 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2868 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2874 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2875 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2876 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_dma_operation_mode()
2877 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_dma_operation_mode()
2878 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_set_dma_operation_mode()
2879 int txfifosz = priv->plat->tx_fifo_size; in stmmac_set_dma_operation_mode()
2882 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_set_dma_operation_mode()
2884 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_set_dma_operation_mode()
2890 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); in stmmac_set_dma_operation_mode()
2891 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); in stmmac_set_dma_operation_mode()
2898 ret = stmmac_safety_feat_irq_status(priv, priv->dev, in stmmac_safety_feat_interrupt()
2899 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); in stmmac_safety_feat_interrupt()
2900 if (ret && (ret != -EINVAL)) { in stmmac_safety_feat_interrupt()
2910 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, in stmmac_napi_check()
2911 &priv->xstats, chan, dir); in stmmac_napi_check()
2912 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_napi_check()
2913 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_napi_check()
2914 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_napi_check()
2919 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; in stmmac_napi_check()
2920 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_napi_check()
2922 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { in stmmac_napi_check()
2924 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2925 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_check()
2926 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2931 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { in stmmac_napi_check()
2933 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2934 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_check()
2935 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2944 * stmmac_dma_interrupt - DMA ISR
2952 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_dma_interrupt()
2953 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_dma_interrupt()
2987 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); in stmmac_mmc_setup()
2989 if (priv->dma_cap.rmon) { in stmmac_mmc_setup()
2990 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); in stmmac_mmc_setup()
2991 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); in stmmac_mmc_setup()
2993 netdev_info(priv->dev, "No MAC Management Counters available\n"); in stmmac_mmc_setup()
2997 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3007 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; in stmmac_get_hw_features()
3011 * stmmac_check_ether_addr - check if the MAC addr is valid
3021 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in stmmac_check_ether_addr()
3022 stmmac_get_umac_addr(priv, priv->hw, addr, 0); in stmmac_check_ether_addr()
3024 eth_hw_addr_set(priv->dev, addr); in stmmac_check_ether_addr()
3026 eth_hw_addr_random(priv->dev); in stmmac_check_ether_addr()
3027 dev_info(priv->device, "device MAC address %pM\n", in stmmac_check_ether_addr()
3028 priv->dev->dev_addr); in stmmac_check_ether_addr()
3033 * stmmac_init_dma_engine - DMA init.
3042 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_init_dma_engine()
3043 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_init_dma_engine()
3050 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { in stmmac_init_dma_engine()
3051 netdev_err(priv->dev, "Invalid DMA configuration\n"); in stmmac_init_dma_engine()
3052 return -EINVAL; in stmmac_init_dma_engine()
3055 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) in stmmac_init_dma_engine()
3056 priv->plat->dma_cfg->atds = 1; in stmmac_init_dma_engine()
3058 ret = stmmac_reset(priv, priv->ioaddr); in stmmac_init_dma_engine()
3060 netdev_err(priv->dev, "Failed to reset the dma\n"); in stmmac_init_dma_engine()
3065 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); in stmmac_init_dma_engine()
3067 if (priv->plat->axi) in stmmac_init_dma_engine()
3068 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); in stmmac_init_dma_engine()
3072 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_init_dma_engine()
3073 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_init_dma_engine()
3078 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_init_dma_engine()
3080 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3081 rx_q->dma_rx_phy, chan); in stmmac_init_dma_engine()
3083 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_init_dma_engine()
3084 (rx_q->buf_alloc_num * in stmmac_init_dma_engine()
3086 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3087 rx_q->rx_tail_addr, chan); in stmmac_init_dma_engine()
3092 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_dma_engine()
3094 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3095 tx_q->dma_tx_phy, chan); in stmmac_init_dma_engine()
3097 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_init_dma_engine()
3098 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3099 tx_q->tx_tail_addr, chan); in stmmac_init_dma_engine()
3107 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_timer_arm()
3108 u32 tx_coal_timer = priv->tx_coal_timer[queue]; in stmmac_tx_timer_arm()
3115 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer_arm()
3116 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer_arm()
3123 hrtimer_start(&tx_q->txtimer, in stmmac_tx_timer_arm()
3127 hrtimer_try_to_cancel(&tx_q->txtimer); in stmmac_tx_timer_arm()
3131 * stmmac_tx_timer - mitigation sw timer for tx.
3139 struct stmmac_priv *priv = tx_q->priv_data; in stmmac_tx_timer()
3143 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer()
3144 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer()
3149 spin_lock_irqsave(&ch->lock, flags); in stmmac_tx_timer()
3150 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); in stmmac_tx_timer()
3151 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_tx_timer()
3159 * stmmac_init_coalesce - init mitigation options.
3168 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_init_coalesce()
3169 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_init_coalesce()
3173 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_coalesce()
3175 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; in stmmac_init_coalesce()
3176 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; in stmmac_init_coalesce()
3178 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_init_coalesce()
3182 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; in stmmac_init_coalesce()
3187 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_rings_length()
3188 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_rings_length()
3193 stmmac_set_tx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3194 (priv->dma_conf.dma_tx_size - 1), chan); in stmmac_set_rings_length()
3198 stmmac_set_rx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3199 (priv->dma_conf.dma_rx_size - 1), chan); in stmmac_set_rings_length()
3203 * stmmac_set_tx_queue_weight - Set TX queue weight
3209 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_set_tx_queue_weight()
3214 weight = priv->plat->tx_queues_cfg[queue].weight; in stmmac_set_tx_queue_weight()
3215 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); in stmmac_set_tx_queue_weight()
3220 * stmmac_configure_cbs - Configure CBS in TX queue
3226 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_configure_cbs()
3232 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; in stmmac_configure_cbs()
3236 stmmac_config_cbs(priv, priv->hw, in stmmac_configure_cbs()
3237 priv->plat->tx_queues_cfg[queue].send_slope, in stmmac_configure_cbs()
3238 priv->plat->tx_queues_cfg[queue].idle_slope, in stmmac_configure_cbs()
3239 priv->plat->tx_queues_cfg[queue].high_credit, in stmmac_configure_cbs()
3240 priv->plat->tx_queues_cfg[queue].low_credit, in stmmac_configure_cbs()
3246 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3252 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_rx_queue_dma_chan_map()
3257 chan = priv->plat->rx_queues_cfg[queue].chan; in stmmac_rx_queue_dma_chan_map()
3258 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); in stmmac_rx_queue_dma_chan_map()
3263 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3269 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_prio()
3274 if (!priv->plat->rx_queues_cfg[queue].use_prio) in stmmac_mac_config_rx_queues_prio()
3277 prio = priv->plat->rx_queues_cfg[queue].prio; in stmmac_mac_config_rx_queues_prio()
3278 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_rx_queues_prio()
3283 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3289 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mac_config_tx_queues_prio()
3294 if (!priv->plat->tx_queues_cfg[queue].use_prio) in stmmac_mac_config_tx_queues_prio()
3297 prio = priv->plat->tx_queues_cfg[queue].prio; in stmmac_mac_config_tx_queues_prio()
3298 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_tx_queues_prio()
3303 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3309 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_routing()
3315 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) in stmmac_mac_config_rx_queues_routing()
3318 packet = priv->plat->rx_queues_cfg[queue].pkt_route; in stmmac_mac_config_rx_queues_routing()
3319 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); in stmmac_mac_config_rx_queues_routing()
3325 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { in stmmac_mac_config_rss()
3326 priv->rss.enable = false; in stmmac_mac_config_rss()
3330 if (priv->dev->features & NETIF_F_RXHASH) in stmmac_mac_config_rss()
3331 priv->rss.enable = true; in stmmac_mac_config_rss()
3333 priv->rss.enable = false; in stmmac_mac_config_rss()
3335 stmmac_rss_configure(priv, priv->hw, &priv->rss, in stmmac_mac_config_rss()
3336 priv->plat->rx_queues_to_use); in stmmac_mac_config_rss()
3340 * stmmac_mtl_configuration - Configure MTL
3346 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mtl_configuration()
3347 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mtl_configuration()
3354 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3355 priv->plat->rx_sched_algorithm); in stmmac_mtl_configuration()
3359 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3360 priv->plat->tx_sched_algorithm); in stmmac_mtl_configuration()
3391 if (priv->dma_cap.asp) { in stmmac_safety_feat_configuration()
3392 netdev_info(priv->dev, "Enabling Safety Features\n"); in stmmac_safety_feat_configuration()
3393 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, in stmmac_safety_feat_configuration()
3394 priv->plat->safety_feat_cfg); in stmmac_safety_feat_configuration()
3396 netdev_info(priv->dev, "No Safety Features support found\n"); in stmmac_safety_feat_configuration()
3401 * stmmac_hw_setup - setup mac in a usable state.
3409 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3415 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_hw_setup()
3416 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_hw_setup()
3422 if (priv->hw->phylink_pcs) in stmmac_hw_setup()
3423 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs); in stmmac_hw_setup()
3431 phylink_rx_clk_stop_block(priv->phylink); in stmmac_hw_setup()
3436 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_hw_setup()
3437 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", in stmmac_hw_setup()
3443 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); in stmmac_hw_setup()
3444 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_hw_setup()
3447 if (priv->hw->pcs) { in stmmac_hw_setup()
3448 int speed = priv->plat->mac_port_sel_speed; in stmmac_hw_setup()
3452 priv->hw->ps = speed; in stmmac_hw_setup()
3454 dev_warn(priv->device, "invalid port speed\n"); in stmmac_hw_setup()
3455 priv->hw->ps = 0; in stmmac_hw_setup()
3460 stmmac_core_init(priv, priv->hw, dev); in stmmac_hw_setup()
3468 ret = stmmac_rx_ipc(priv, priv->hw); in stmmac_hw_setup()
3470 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); in stmmac_hw_setup()
3471 priv->plat->rx_coe = STMMAC_RX_COE_NONE; in stmmac_hw_setup()
3472 priv->hw->rx_csum = 0; in stmmac_hw_setup()
3476 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_hw_setup()
3478 /* Set the HW DMA mode and the COE */ in stmmac_hw_setup()
3483 if (priv->use_riwt) { in stmmac_hw_setup()
3487 if (!priv->rx_riwt[queue]) in stmmac_hw_setup()
3488 priv->rx_riwt[queue] = DEF_DMA_RIWT; in stmmac_hw_setup()
3490 stmmac_rx_watchdog(priv, priv->ioaddr, in stmmac_hw_setup()
3491 priv->rx_riwt[queue], queue); in stmmac_hw_setup()
3495 if (priv->hw->pcs) in stmmac_hw_setup()
3496 stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0); in stmmac_hw_setup()
3502 if (priv->tso) { in stmmac_hw_setup()
3504 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3506 /* TSO and TBS cannot co-exist */ in stmmac_hw_setup()
3507 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_hw_setup()
3510 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); in stmmac_hw_setup()
3515 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_hw_setup()
3517 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_hw_setup()
3521 if (priv->dma_cap.vlins) in stmmac_hw_setup()
3522 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); in stmmac_hw_setup()
3526 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3527 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; in stmmac_hw_setup()
3529 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); in stmmac_hw_setup()
3533 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); in stmmac_hw_setup()
3534 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); in stmmac_hw_setup()
3539 phylink_rx_clk_stop_block(priv->phylink); in stmmac_hw_setup()
3540 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_hw_setup()
3541 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_hw_setup()
3554 irq_idx = priv->plat->tx_queues_to_use; in stmmac_free_irq()
3557 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3558 if (priv->tx_irq[j] > 0) { in stmmac_free_irq()
3559 irq_set_affinity_hint(priv->tx_irq[j], NULL); in stmmac_free_irq()
3560 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); in stmmac_free_irq()
3563 irq_idx = priv->plat->rx_queues_to_use; in stmmac_free_irq()
3566 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3567 if (priv->rx_irq[j] > 0) { in stmmac_free_irq()
3568 irq_set_affinity_hint(priv->rx_irq[j], NULL); in stmmac_free_irq()
3569 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); in stmmac_free_irq()
3573 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) in stmmac_free_irq()
3574 free_irq(priv->sfty_ue_irq, dev); in stmmac_free_irq()
3577 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) in stmmac_free_irq()
3578 free_irq(priv->sfty_ce_irq, dev); in stmmac_free_irq()
3581 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) in stmmac_free_irq()
3582 free_irq(priv->lpi_irq, dev); in stmmac_free_irq()
3585 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) in stmmac_free_irq()
3586 free_irq(priv->wol_irq, dev); in stmmac_free_irq()
3589 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) in stmmac_free_irq()
3590 free_irq(priv->sfty_irq, dev); in stmmac_free_irq()
3593 free_irq(dev->irq, dev); in stmmac_free_irq()
3612 int_name = priv->int_name_mac; in stmmac_request_irq_multi_msi()
3613 sprintf(int_name, "%s:%s", dev->name, "mac"); in stmmac_request_irq_multi_msi()
3614 ret = request_irq(dev->irq, stmmac_mac_interrupt, in stmmac_request_irq_multi_msi()
3617 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3619 __func__, dev->irq, ret); in stmmac_request_irq_multi_msi()
3627 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3628 int_name = priv->int_name_wol; in stmmac_request_irq_multi_msi()
3629 sprintf(int_name, "%s:%s", dev->name, "wol"); in stmmac_request_irq_multi_msi()
3630 ret = request_irq(priv->wol_irq, in stmmac_request_irq_multi_msi()
3634 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3636 __func__, priv->wol_irq, ret); in stmmac_request_irq_multi_msi()
3645 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3646 int_name = priv->int_name_lpi; in stmmac_request_irq_multi_msi()
3647 sprintf(int_name, "%s:%s", dev->name, "lpi"); in stmmac_request_irq_multi_msi()
3648 ret = request_irq(priv->lpi_irq, in stmmac_request_irq_multi_msi()
3652 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3654 __func__, priv->lpi_irq, ret); in stmmac_request_irq_multi_msi()
3663 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3664 int_name = priv->int_name_sfty; in stmmac_request_irq_multi_msi()
3665 sprintf(int_name, "%s:%s", dev->name, "safety"); in stmmac_request_irq_multi_msi()
3666 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, in stmmac_request_irq_multi_msi()
3669 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3671 __func__, priv->sfty_irq, ret); in stmmac_request_irq_multi_msi()
3680 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3681 int_name = priv->int_name_sfty_ce; in stmmac_request_irq_multi_msi()
3682 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); in stmmac_request_irq_multi_msi()
3683 ret = request_irq(priv->sfty_ce_irq, in stmmac_request_irq_multi_msi()
3687 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3689 __func__, priv->sfty_ce_irq, ret); in stmmac_request_irq_multi_msi()
3698 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3699 int_name = priv->int_name_sfty_ue; in stmmac_request_irq_multi_msi()
3700 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); in stmmac_request_irq_multi_msi()
3701 ret = request_irq(priv->sfty_ue_irq, in stmmac_request_irq_multi_msi()
3705 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3707 __func__, priv->sfty_ue_irq, ret); in stmmac_request_irq_multi_msi()
3714 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3717 if (priv->rx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3720 int_name = priv->int_name_rx_irq[i]; in stmmac_request_irq_multi_msi()
3721 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); in stmmac_request_irq_multi_msi()
3722 ret = request_irq(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3724 0, int_name, &priv->dma_conf.rx_queue[i]); in stmmac_request_irq_multi_msi()
3726 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3727 "%s: alloc rx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3728 __func__, i, priv->rx_irq[i], ret); in stmmac_request_irq_multi_msi()
3733 irq_set_affinity_hint(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3738 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3741 if (priv->tx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3744 int_name = priv->int_name_tx_irq[i]; in stmmac_request_irq_multi_msi()
3745 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); in stmmac_request_irq_multi_msi()
3746 ret = request_irq(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3748 0, int_name, &priv->dma_conf.tx_queue[i]); in stmmac_request_irq_multi_msi()
3750 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3751 "%s: alloc tx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3752 __func__, i, priv->tx_irq[i], ret); in stmmac_request_irq_multi_msi()
3757 irq_set_affinity_hint(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3774 ret = request_irq(dev->irq, stmmac_interrupt, in stmmac_request_irq_single()
3775 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3777 netdev_err(priv->dev, in stmmac_request_irq_single()
3779 __func__, dev->irq, ret); in stmmac_request_irq_single()
3787 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_single()
3788 ret = request_irq(priv->wol_irq, stmmac_interrupt, in stmmac_request_irq_single()
3789 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3791 netdev_err(priv->dev, in stmmac_request_irq_single()
3793 __func__, priv->wol_irq, ret); in stmmac_request_irq_single()
3800 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_single()
3801 ret = request_irq(priv->lpi_irq, stmmac_interrupt, in stmmac_request_irq_single()
3802 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3804 netdev_err(priv->dev, in stmmac_request_irq_single()
3806 __func__, priv->lpi_irq, ret); in stmmac_request_irq_single()
3815 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { in stmmac_request_irq_single()
3816 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, in stmmac_request_irq_single()
3817 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3819 netdev_err(priv->dev, in stmmac_request_irq_single()
3821 __func__, priv->sfty_irq, ret); in stmmac_request_irq_single()
3840 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) in stmmac_request_irq()
3849 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3865 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", in stmmac_setup_dma_desc()
3867 return ERR_PTR(-ENOMEM); in stmmac_setup_dma_desc()
3877 dma_conf->dma_buf_sz = bfsize; in stmmac_setup_dma_desc()
3881 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; in stmmac_setup_dma_desc()
3882 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; in stmmac_setup_dma_desc()
3884 if (!dma_conf->dma_tx_size) in stmmac_setup_dma_desc()
3885 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; in stmmac_setup_dma_desc()
3886 if (!dma_conf->dma_rx_size) in stmmac_setup_dma_desc()
3887 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; in stmmac_setup_dma_desc()
3890 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { in stmmac_setup_dma_desc()
3891 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; in stmmac_setup_dma_desc()
3892 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; in stmmac_setup_dma_desc()
3894 /* Setup per-TXQ tbs flag before TX descriptor alloc */ in stmmac_setup_dma_desc()
3895 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; in stmmac_setup_dma_desc()
3900 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", in stmmac_setup_dma_desc()
3905 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); in stmmac_setup_dma_desc()
3907 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", in stmmac_setup_dma_desc()
3922 * __stmmac_open - open entry point of the driver
3928 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3939 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) in __stmmac_open()
3940 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; in __stmmac_open()
3941 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); in __stmmac_open()
3945 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in __stmmac_open()
3946 priv->plat->serdes_powerup) { in __stmmac_open()
3947 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); in __stmmac_open()
3949 netdev_err(priv->dev, "%s: Serdes powerup failed\n", in __stmmac_open()
3957 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); in __stmmac_open()
3965 phylink_start(priv->phylink); in __stmmac_open()
3967 phylink_speed_up(priv->phylink); in __stmmac_open()
3974 netif_tx_start_all_queues(priv->dev); in __stmmac_open()
3980 phylink_stop(priv->phylink); in __stmmac_open()
3982 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in __stmmac_open()
3983 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in __stmmac_open()
3997 if (!priv->tx_lpi_timer) in stmmac_open()
3998 priv->tx_lpi_timer = eee_timer * 1000; in stmmac_open()
4000 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); in stmmac_open()
4004 ret = pm_runtime_resume_and_get(priv->device); in stmmac_open()
4021 phylink_disconnect_phy(priv->phylink); in stmmac_open()
4023 pm_runtime_put(priv->device); in stmmac_open()
4039 if (device_may_wakeup(priv->device)) in __stmmac_release()
4040 phylink_speed_down(priv->phylink, false); in __stmmac_release()
4043 phylink_stop(priv->phylink); in __stmmac_release()
4047 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in __stmmac_release()
4048 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in __stmmac_release()
4059 free_dma_desc_resources(priv, &priv->dma_conf); in __stmmac_release()
4062 if (priv->plat->serdes_powerdown) in __stmmac_release()
4063 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); in __stmmac_release()
4068 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv); in __stmmac_release()
4072 * stmmac_release - close entry point of the driver
4083 phylink_disconnect_phy(priv->phylink); in stmmac_release()
4084 pm_runtime_put(priv->device); in stmmac_release()
4096 if (!priv->dma_cap.vlins) in stmmac_vlan_insert()
4100 if (skb->vlan_proto == htons(ETH_P_8021AD)) { in stmmac_vlan_insert()
4107 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_vlan_insert()
4108 p = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_vlan_insert()
4110 p = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_vlan_insert()
4116 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_vlan_insert()
4121 * stmmac_tso_allocator - close entry point of the driver
4134 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_allocator()
4144 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_allocator()
4145 priv->dma_conf.dma_tx_size); in stmmac_tso_allocator()
4146 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_allocator()
4148 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_allocator()
4149 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_allocator()
4151 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_allocator()
4153 curr_addr = des + (total_len - tmp_len); in stmmac_tso_allocator()
4163 tmp_len -= TSO_MAX_BUFF_SIZE; in stmmac_tso_allocator()
4169 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_flush_tx_descriptors()
4172 if (likely(priv->extend_desc)) in stmmac_flush_tx_descriptors()
4174 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_flush_tx_descriptors()
4185 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); in stmmac_flush_tx_descriptors()
4186 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); in stmmac_flush_tx_descriptors()
4190 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4198 * --------
4199 * | DES0 |---> buffer1 = L2/L3/L4 header
4200 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4201 * | | width is 32-bit, but we never use it.
4202 * | | Also can be used as the most-significant 8-bits or 16-bits of
4203 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4204 * | | or 48-bit, and we always use it.
4205 * | DES2 |---> buffer1 len
4206 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4207 * --------
4208 * --------
4209 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4210 * | DES1 |---> same as the First Descriptor
4211 * | DES2 |---> buffer1 len
4213 * --------
4217 * --------
4218 * | DES0 |---> buffer1 = Split TCP Payload
4219 * | DES1 |---> same as the First Descriptor
4220 * | DES2 |---> buffer1 len
4222 * --------
4242 * TSO engine will be un-tagged by mistake. in stmmac_tso_xmit()
4247 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4252 nfrags = skb_shinfo(skb)->nr_frags; in stmmac_tso_xmit()
4255 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_xmit()
4256 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tso_xmit()
4257 first_tx = tx_q->cur_tx; in stmmac_tso_xmit()
4260 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in stmmac_tso_xmit()
4270 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { in stmmac_tso_xmit()
4272 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_tso_xmit()
4275 netdev_err(priv->dev, in stmmac_tso_xmit()
4282 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ in stmmac_tso_xmit()
4284 mss = skb_shinfo(skb)->gso_size; in stmmac_tso_xmit()
4287 if (mss != tx_q->mss) { in stmmac_tso_xmit()
4288 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4289 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4291 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4294 tx_q->mss = mss; in stmmac_tso_xmit()
4295 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_xmit()
4296 priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4297 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_xmit()
4303 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, in stmmac_tso_xmit()
4304 skb->data_len); in stmmac_tso_xmit()
4307 first_entry = tx_q->cur_tx; in stmmac_tso_xmit()
4308 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_tso_xmit()
4310 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4311 desc = &tx_q->dma_entx[first_entry].basic; in stmmac_tso_xmit()
4313 desc = &tx_q->dma_tx[first_entry]; in stmmac_tso_xmit()
4317 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), in stmmac_tso_xmit()
4319 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4327 * non-paged SKB data, the DMA buffer address should be saved to in stmmac_tso_xmit()
4328 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor, in stmmac_tso_xmit()
4329 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee in stmmac_tso_xmit()
4333 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf in stmmac_tso_xmit()
4338 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4339 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb); in stmmac_tso_xmit()
4340 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false; in stmmac_tso_xmit()
4341 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4345 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_tso_xmit()
4347 des = skb_frag_dma_map(priv->device, frag, 0, in stmmac_tso_xmit()
4350 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4354 (i == nfrags - 1), queue); in stmmac_tso_xmit()
4356 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4357 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); in stmmac_tso_xmit()
4358 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; in stmmac_tso_xmit()
4359 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4362 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; in stmmac_tso_xmit()
4365 tx_q->tx_skbuff[tx_q->cur_tx] = skb; in stmmac_tso_xmit()
4366 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4369 tx_packets = (tx_q->cur_tx + 1) - first_tx; in stmmac_tso_xmit()
4370 tx_q->tx_count_frames += tx_packets; in stmmac_tso_xmit()
4372 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_tso_xmit()
4374 else if (!priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4376 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4378 else if ((tx_q->tx_count_frames % in stmmac_tso_xmit()
4379 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_tso_xmit()
4385 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4386 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4388 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4390 tx_q->tx_count_frames = 0; in stmmac_tso_xmit()
4399 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4402 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_tso_xmit()
4404 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tso_xmit()
4407 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_tso_xmit()
4408 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_tso_xmit()
4409 u64_stats_inc(&txq_stats->q.tx_tso_frames); in stmmac_tso_xmit()
4410 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); in stmmac_tso_xmit()
4412 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_tso_xmit()
4413 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_tso_xmit()
4415 if (priv->sarc_type) in stmmac_tso_xmit()
4416 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_tso_xmit()
4418 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_tso_xmit()
4419 priv->hwts_tx_en)) { in stmmac_tso_xmit()
4421 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_tso_xmit()
4427 tx_q->tx_skbuff_dma[first_entry].last_segment, in stmmac_tso_xmit()
4428 hdr / 4, (skb->len - proto_hdr_len)); in stmmac_tso_xmit()
4443 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_tso_xmit()
4444 tx_q->cur_tx, first, nfrags); in stmmac_tso_xmit()
4446 print_pkt(skb->data, skb_headlen(skb)); in stmmac_tso_xmit()
4449 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_tso_xmit()
4458 dev_err(priv->device, "Tx dma map failed\n"); in stmmac_tso_xmit()
4460 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4465 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4487 * stmmac_xmit - Tx entry point of the driver
4501 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_xmit()
4502 int gso = skb_shinfo(skb)->gso_type; in stmmac_xmit()
4511 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xmit()
4512 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xmit()
4513 first_tx = tx_q->cur_tx; in stmmac_xmit()
4515 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) in stmmac_xmit()
4519 if (skb_is_gso(skb) && priv->tso) { in stmmac_xmit()
4522 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) in stmmac_xmit()
4526 if (priv->est && priv->est->enable && in stmmac_xmit()
4527 priv->est->max_sdu[queue] && in stmmac_xmit()
4528 skb->len > priv->est->max_sdu[queue]){ in stmmac_xmit()
4529 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xmit()
4535 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_xmit()
4538 netdev_err(priv->dev, in stmmac_xmit()
4548 entry = tx_q->cur_tx; in stmmac_xmit()
4550 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_xmit()
4552 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); in stmmac_xmit()
4553 /* DWMAC IPs can be synthesized to support tx coe only for a few tx in stmmac_xmit()
4555 * support tx coe needs to fallback to software checksum calculation. in stmmac_xmit()
4557 * Packets that won't trigger the COE e.g. most DSA-tagged packets will in stmmac_xmit()
4561 (priv->plat->tx_queues_cfg[queue].coe_unsupported || in stmmac_xmit()
4568 if (likely(priv->extend_desc)) in stmmac_xmit()
4569 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4570 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4571 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4573 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4580 enh_desc = priv->plat->enh_desc; in stmmac_xmit()
4583 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); in stmmac_xmit()
4587 if (unlikely(entry < 0) && (entry != -EINVAL)) in stmmac_xmit()
4592 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_xmit()
4594 bool last_segment = (i == (nfrags - 1)); in stmmac_xmit()
4596 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4597 WARN_ON(tx_q->tx_skbuff[entry]); in stmmac_xmit()
4599 if (likely(priv->extend_desc)) in stmmac_xmit()
4600 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4601 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4602 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4604 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4606 des = skb_frag_dma_map(priv->device, frag, 0, len, in stmmac_xmit()
4608 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4611 tx_q->tx_skbuff_dma[entry].buf = des; in stmmac_xmit()
4615 tx_q->tx_skbuff_dma[entry].map_as_page = true; in stmmac_xmit()
4616 tx_q->tx_skbuff_dma[entry].len = len; in stmmac_xmit()
4617 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; in stmmac_xmit()
4618 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4622 priv->mode, 1, last_segment, skb->len); in stmmac_xmit()
4626 tx_q->tx_skbuff[entry] = skb; in stmmac_xmit()
4627 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4630 * segment is reset and the timer re-started to clean the tx status. in stmmac_xmit()
4634 tx_packets = (entry + 1) - first_tx; in stmmac_xmit()
4635 tx_q->tx_count_frames += tx_packets; in stmmac_xmit()
4637 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_xmit()
4639 else if (!priv->tx_coal_frames[queue]) in stmmac_xmit()
4641 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_xmit()
4643 else if ((tx_q->tx_count_frames % in stmmac_xmit()
4644 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_xmit()
4650 if (likely(priv->extend_desc)) in stmmac_xmit()
4651 desc = &tx_q->dma_etx[entry].basic; in stmmac_xmit()
4652 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4653 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4655 desc = &tx_q->dma_tx[entry]; in stmmac_xmit()
4657 tx_q->tx_count_frames = 0; in stmmac_xmit()
4666 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4667 tx_q->cur_tx = entry; in stmmac_xmit()
4670 netdev_dbg(priv->dev, in stmmac_xmit()
4672 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_xmit()
4675 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); in stmmac_xmit()
4676 print_pkt(skb->data, skb->len); in stmmac_xmit()
4680 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_xmit()
4682 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_xmit()
4685 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xmit()
4686 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_xmit()
4688 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xmit()
4689 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xmit()
4691 if (priv->sarc_type) in stmmac_xmit()
4692 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_xmit()
4701 des = dma_map_single(priv->device, skb->data, in stmmac_xmit()
4703 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4706 tx_q->tx_skbuff_dma[first_entry].buf = des; in stmmac_xmit()
4707 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4708 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; in stmmac_xmit()
4712 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; in stmmac_xmit()
4713 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; in stmmac_xmit()
4715 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_xmit()
4716 priv->hwts_tx_en)) { in stmmac_xmit()
4718 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_xmit()
4724 csum_insertion, priv->mode, 0, last_segment, in stmmac_xmit()
4725 skb->len); in stmmac_xmit()
4728 if (tx_q->tbs & STMMAC_TBS_EN) { in stmmac_xmit()
4729 struct timespec64 ts = ns_to_timespec64(skb->tstamp); in stmmac_xmit()
4731 tbs_desc = &tx_q->dma_entx[first_entry]; in stmmac_xmit()
4737 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_xmit()
4739 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xmit()
4747 netdev_err(priv->dev, "Tx DMA map failed\n"); in stmmac_xmit()
4750 priv->xstats.tx_dropped++; in stmmac_xmit()
4757 __be16 vlan_proto = veth->h_vlan_proto; in stmmac_rx_vlan()
4761 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || in stmmac_rx_vlan()
4763 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { in stmmac_rx_vlan()
4765 vlanid = ntohs(veth->h_vlan_TCI); in stmmac_rx_vlan()
4766 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); in stmmac_rx_vlan()
4773 * stmmac_rx_refill - refill used skb preallocated buffers
4777 * that is based on zero-copy.
4781 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill()
4783 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill()
4786 if (priv->dma_cap.host_dma_width <= 32) in stmmac_rx_refill()
4789 while (dirty-- > 0) { in stmmac_rx_refill()
4790 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill()
4794 if (priv->extend_desc) in stmmac_rx_refill()
4795 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill()
4797 p = rx_q->dma_rx + entry; in stmmac_rx_refill()
4799 if (!buf->page) { in stmmac_rx_refill()
4800 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4801 if (!buf->page) in stmmac_rx_refill()
4805 if (priv->sph && !buf->sec_page) { in stmmac_rx_refill()
4806 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4807 if (!buf->sec_page) in stmmac_rx_refill()
4810 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_rx_refill()
4813 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_rx_refill()
4815 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_rx_refill()
4816 if (priv->sph) in stmmac_rx_refill()
4817 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_rx_refill()
4819 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_rx_refill()
4822 rx_q->rx_count_frames++; in stmmac_rx_refill()
4823 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4824 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill()
4825 rx_q->rx_count_frames = 0; in stmmac_rx_refill()
4827 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4828 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill()
4829 if (!priv->use_riwt) in stmmac_rx_refill()
4835 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill()
4837 rx_q->dirty_rx = entry; in stmmac_rx_refill()
4838 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill()
4839 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill()
4840 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill()
4848 int coe = priv->hw->rx_csum; in stmmac_rx_buf1_len() local
4851 if (priv->sph && len) in stmmac_rx_buf1_len()
4856 if (priv->sph && hlen) { in stmmac_rx_buf1_len()
4857 priv->xstats.rx_split_hdr_pkt_n++; in stmmac_rx_buf1_len()
4863 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf1_len()
4865 plen = stmmac_get_rx_frame_len(priv, p, coe); in stmmac_rx_buf1_len()
4868 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); in stmmac_rx_buf1_len()
4875 int coe = priv->hw->rx_csum; in stmmac_rx_buf2_len() local
4879 if (!priv->sph) in stmmac_rx_buf2_len()
4884 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf2_len()
4886 plen = stmmac_get_rx_frame_len(priv, p, coe); in stmmac_rx_buf2_len()
4889 return plen - len; in stmmac_rx_buf2_len()
4895 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_xdpf()
4896 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_xdpf()
4897 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported; in stmmac_xdp_xmit_xdpf()
4898 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_xdpf()
4906 if (priv->est && priv->est->enable && in stmmac_xdp_xmit_xdpf()
4907 priv->est->max_sdu[queue] && in stmmac_xdp_xmit_xdpf()
4908 xdpf->len > priv->est->max_sdu[queue]) { in stmmac_xdp_xmit_xdpf()
4909 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xdp_xmit_xdpf()
4913 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_xdpf()
4914 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_xdpf()
4915 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_xdpf()
4916 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_xdpf()
4918 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_xdpf()
4921 dma_addr = dma_map_single(priv->device, xdpf->data, in stmmac_xdp_xmit_xdpf()
4922 xdpf->len, DMA_TO_DEVICE); in stmmac_xdp_xmit_xdpf()
4923 if (dma_mapping_error(priv->device, dma_addr)) in stmmac_xdp_xmit_xdpf()
4926 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; in stmmac_xdp_xmit_xdpf()
4928 struct page *page = virt_to_page(xdpf->data); in stmmac_xdp_xmit_xdpf()
4931 xdpf->headroom; in stmmac_xdp_xmit_xdpf()
4932 dma_sync_single_for_device(priv->device, dma_addr, in stmmac_xdp_xmit_xdpf()
4933 xdpf->len, DMA_BIDIRECTIONAL); in stmmac_xdp_xmit_xdpf()
4935 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; in stmmac_xdp_xmit_xdpf()
4938 tx_q->tx_skbuff_dma[entry].buf = dma_addr; in stmmac_xdp_xmit_xdpf()
4939 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_xdpf()
4940 tx_q->tx_skbuff_dma[entry].len = xdpf->len; in stmmac_xdp_xmit_xdpf()
4941 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_xdpf()
4942 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_xdpf()
4944 tx_q->xdpf[entry] = xdpf; in stmmac_xdp_xmit_xdpf()
4948 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, in stmmac_xdp_xmit_xdpf()
4949 csum, priv->mode, true, true, in stmmac_xdp_xmit_xdpf()
4950 xdpf->len); in stmmac_xdp_xmit_xdpf()
4952 tx_q->tx_count_frames++; in stmmac_xdp_xmit_xdpf()
4954 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_xdpf()
4960 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_xdpf()
4962 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
4963 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xdp_xmit_xdpf()
4964 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
4967 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xdp_xmit_xdpf()
4969 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_xdpf()
4970 tx_q->cur_tx = entry; in stmmac_xdp_xmit_xdpf()
4983 while (index >= priv->plat->tx_queues_to_use) in stmmac_xdp_get_tx_queue()
4984 index -= priv->plat->tx_queues_to_use; in stmmac_xdp_get_tx_queue()
5002 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back()
5005 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_back()
5033 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) in __stmmac_xdp_run_prog()
5039 bpf_warn_invalid_xdp_action(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5042 trace_xdp_exception(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5058 prog = READ_ONCE(priv->xdp_prog); in stmmac_xdp_run_prog()
5066 return ERR_PTR(-res); in stmmac_xdp_run_prog()
5087 unsigned int metasize = xdp->data - xdp->data_meta; in stmmac_construct_skb_zc()
5088 unsigned int datasize = xdp->data_end - xdp->data; in stmmac_construct_skb_zc()
5091 skb = napi_alloc_skb(&ch->rxtx_napi, in stmmac_construct_skb_zc()
5092 xdp->data_end - xdp->data_hard_start); in stmmac_construct_skb_zc()
5096 skb_reserve(skb, xdp->data - xdp->data_hard_start); in stmmac_construct_skb_zc()
5097 memcpy(__skb_put(skb, datasize), xdp->data, datasize); in stmmac_construct_skb_zc()
5108 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_dispatch_skb_zc()
5109 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_dispatch_skb_zc()
5110 unsigned int len = xdp->data_end - xdp->data; in stmmac_dispatch_skb_zc()
5112 int coe = priv->hw->rx_csum; in stmmac_dispatch_skb_zc() local
5118 priv->xstats.rx_dropped++; in stmmac_dispatch_skb_zc()
5123 if (priv->hw->hw_vlan_en) in stmmac_dispatch_skb_zc()
5125 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_dispatch_skb_zc()
5128 stmmac_rx_vlan(priv->dev, skb); in stmmac_dispatch_skb_zc()
5129 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_dispatch_skb_zc()
5131 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb)) in stmmac_dispatch_skb_zc()
5134 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_dispatch_skb_zc()
5140 napi_gro_receive(&ch->rxtx_napi, skb); in stmmac_dispatch_skb_zc()
5142 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5143 u64_stats_inc(&rxq_stats->napi.rx_pkt_n); in stmmac_dispatch_skb_zc()
5144 u64_stats_add(&rxq_stats->napi.rx_bytes, len); in stmmac_dispatch_skb_zc()
5145 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5150 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill_zc()
5151 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill_zc()
5157 while (budget-- > 0 && entry != rx_q->cur_rx) { in stmmac_rx_refill_zc()
5158 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill_zc()
5162 if (!buf->xdp) { in stmmac_rx_refill_zc()
5163 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_rx_refill_zc()
5164 if (!buf->xdp) { in stmmac_rx_refill_zc()
5170 if (priv->extend_desc) in stmmac_rx_refill_zc()
5171 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill_zc()
5173 rx_desc = rx_q->dma_rx + entry; in stmmac_rx_refill_zc()
5175 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_rx_refill_zc()
5180 rx_q->rx_count_frames++; in stmmac_rx_refill_zc()
5181 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5182 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill_zc()
5183 rx_q->rx_count_frames = 0; in stmmac_rx_refill_zc()
5185 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5186 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill_zc()
5187 if (!priv->use_riwt) in stmmac_rx_refill_zc()
5193 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill_zc()
5197 rx_q->dirty_rx = entry; in stmmac_rx_refill_zc()
5198 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill_zc()
5199 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill_zc()
5200 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill_zc()
5218 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx_zc()
5219 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_zc()
5222 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5233 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx_zc()
5234 if (priv->extend_desc) { in stmmac_rx_zc()
5235 rx_head = (void *)rx_q->dma_erx; in stmmac_rx_zc()
5238 rx_head = (void *)rx_q->dma_rx; in stmmac_rx_zc()
5242 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx_zc()
5243 rx_q->dma_rx_phy, desc_size); in stmmac_rx_zc()
5253 if (!count && rx_q->state_saved) { in stmmac_rx_zc()
5254 error = rx_q->state.error; in stmmac_rx_zc()
5255 len = rx_q->state.len; in stmmac_rx_zc()
5257 rx_q->state_saved = false; in stmmac_rx_zc()
5268 buf = &rx_q->buf_pool[entry]; in stmmac_rx_zc()
5276 if (priv->extend_desc) in stmmac_rx_zc()
5277 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_zc()
5279 p = rx_q->dma_rx + entry; in stmmac_rx_zc()
5282 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx_zc()
5288 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx_zc()
5289 priv->dma_conf.dma_rx_size); in stmmac_rx_zc()
5290 next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5292 if (priv->extend_desc) in stmmac_rx_zc()
5293 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx_zc()
5295 np = rx_q->dma_rx + next_entry; in stmmac_rx_zc()
5300 if (!buf->xdp) in stmmac_rx_zc()
5303 if (priv->extend_desc) in stmmac_rx_zc()
5304 stmmac_rx_extended_status(priv, &priv->xstats, in stmmac_rx_zc()
5305 rx_q->dma_erx + entry); in stmmac_rx_zc()
5307 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5308 buf->xdp = NULL; in stmmac_rx_zc()
5311 if (!priv->hwts_rx_en) in stmmac_rx_zc()
5324 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5325 buf->xdp = NULL; in stmmac_rx_zc()
5331 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); in stmmac_rx_zc()
5332 ctx->priv = priv; in stmmac_rx_zc()
5333 ctx->desc = p; in stmmac_rx_zc()
5334 ctx->ndesc = np; in stmmac_rx_zc()
5342 buf1_len -= ETH_FCS_LEN; in stmmac_rx_zc()
5343 len -= ETH_FCS_LEN; in stmmac_rx_zc()
5347 buf->xdp->data_end = buf->xdp->data + buf1_len; in stmmac_rx_zc()
5348 xsk_buff_dma_sync_for_cpu(buf->xdp); in stmmac_rx_zc()
5350 prog = READ_ONCE(priv->xdp_prog); in stmmac_rx_zc()
5351 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); in stmmac_rx_zc()
5355 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); in stmmac_rx_zc()
5356 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5359 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5368 buf->xdp = NULL; in stmmac_rx_zc()
5374 rx_q->state_saved = true; in stmmac_rx_zc()
5375 rx_q->state.error = error; in stmmac_rx_zc()
5376 rx_q->state.len = len; in stmmac_rx_zc()
5381 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5382 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx_zc()
5383 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5385 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx_zc()
5386 priv->xstats.rx_errors += rx_errors; in stmmac_rx_zc()
5388 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { in stmmac_rx_zc()
5390 xsk_set_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5392 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5401 * stmmac_rx - manage the receive process
5411 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx()
5412 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx()
5413 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_rx()
5415 int status = 0, coe = priv->hw->rx_csum; in stmmac_rx() local
5416 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx()
5424 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); in stmmac_rx()
5425 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; in stmmac_rx()
5426 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); in stmmac_rx()
5431 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx()
5432 if (priv->extend_desc) { in stmmac_rx()
5433 rx_head = (void *)rx_q->dma_erx; in stmmac_rx()
5436 rx_head = (void *)rx_q->dma_rx; in stmmac_rx()
5440 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx()
5441 rx_q->dma_rx_phy, desc_size); in stmmac_rx()
5451 if (!count && rx_q->state_saved) { in stmmac_rx()
5452 skb = rx_q->state.skb; in stmmac_rx()
5453 error = rx_q->state.error; in stmmac_rx()
5454 len = rx_q->state.len; in stmmac_rx()
5456 rx_q->state_saved = false; in stmmac_rx()
5469 buf = &rx_q->buf_pool[entry]; in stmmac_rx()
5471 if (priv->extend_desc) in stmmac_rx()
5472 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx()
5474 p = rx_q->dma_rx + entry; in stmmac_rx()
5477 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx()
5482 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx()
5483 priv->dma_conf.dma_rx_size); in stmmac_rx()
5484 next_entry = rx_q->cur_rx; in stmmac_rx()
5486 if (priv->extend_desc) in stmmac_rx()
5487 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx()
5489 np = rx_q->dma_rx + next_entry; in stmmac_rx()
5493 if (priv->extend_desc) in stmmac_rx()
5494 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); in stmmac_rx()
5496 page_pool_put_page(rx_q->page_pool, buf->page, 0, true); in stmmac_rx()
5497 buf->page = NULL; in stmmac_rx()
5499 if (!priv->hwts_rx_en) in stmmac_rx()
5522 buf2_len -= ETH_FCS_LEN; in stmmac_rx()
5523 len -= ETH_FCS_LEN; in stmmac_rx()
5525 buf1_len -= ETH_FCS_LEN; in stmmac_rx()
5526 len -= ETH_FCS_LEN; in stmmac_rx()
5533 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5535 net_prefetch(page_address(buf->page) + in stmmac_rx()
5536 buf->page_offset); in stmmac_rx()
5538 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq); in stmmac_rx()
5539 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), in stmmac_rx()
5540 buf->page_offset, buf1_len, true); in stmmac_rx()
5542 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5543 buf->page_offset; in stmmac_rx()
5553 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5554 buf->page_offset; in stmmac_rx()
5559 unsigned int xdp_res = -PTR_ERR(skb); in stmmac_rx()
5562 page_pool_put_page(rx_q->page_pool, in stmmac_rx()
5565 buf->page = NULL; in stmmac_rx()
5581 buf->page = NULL; in stmmac_rx()
5593 buf1_len = ctx.xdp.data_end - ctx.xdp.data; in stmmac_rx()
5595 skb = napi_build_skb(page_address(buf->page), in stmmac_rx()
5596 rx_q->napi_skb_frag_size); in stmmac_rx()
5598 page_pool_recycle_direct(rx_q->page_pool, in stmmac_rx()
5599 buf->page); in stmmac_rx()
5606 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start; in stmmac_rx()
5610 buf->page = NULL; in stmmac_rx()
5612 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5614 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5615 buf->page, buf->page_offset, buf1_len, in stmmac_rx()
5616 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5617 buf->page = NULL; in stmmac_rx()
5621 dma_sync_single_for_cpu(priv->device, buf->sec_addr, in stmmac_rx()
5623 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5624 buf->sec_page, 0, buf2_len, in stmmac_rx()
5625 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5626 buf->sec_page = NULL; in stmmac_rx()
5639 if (priv->hw->hw_vlan_en) in stmmac_rx()
5641 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_rx()
5644 stmmac_rx_vlan(priv->dev, skb); in stmmac_rx()
5646 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_rx()
5648 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) || in stmmac_rx()
5652 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_rx()
5658 napi_gro_receive(&ch->rx_napi, skb); in stmmac_rx()
5667 rx_q->state_saved = true; in stmmac_rx()
5668 rx_q->state.skb = skb; in stmmac_rx()
5669 rx_q->state.error = error; in stmmac_rx()
5670 rx_q->state.len = len; in stmmac_rx()
5677 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx()
5678 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); in stmmac_rx()
5679 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); in stmmac_rx()
5680 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx()
5681 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx()
5683 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx()
5684 priv->xstats.rx_errors += rx_errors; in stmmac_rx()
5693 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rx()
5695 u32 chan = ch->index; in stmmac_napi_poll_rx()
5698 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rx()
5699 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5700 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rx()
5701 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5707 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rx()
5708 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_poll_rx()
5709 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rx()
5719 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_tx()
5722 u32 chan = ch->index; in stmmac_napi_poll_tx()
5725 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_tx()
5726 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5727 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_tx()
5728 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5736 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_tx()
5737 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_poll_tx()
5738 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_tx()
5752 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rxtx()
5757 u32 chan = ch->index; in stmmac_napi_poll_rxtx()
5759 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rxtx()
5760 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5761 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5762 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5764 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_rxtx()
5765 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5766 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5767 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5786 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5790 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_napi_poll_rxtx()
5791 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5798 return min(rxtx_done, budget - 1); in stmmac_napi_poll_rxtx()
5818 * stmmac_set_rx_mode - entry point for multicast addressing
5833 stmmac_set_filter(priv, priv->hw, dev); in stmmac_set_rx_mode()
5837 * stmmac_change_mtu - entry point to change MTU size for the device.
5844 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5850 int txfifosz = priv->plat->tx_fifo_size; in stmmac_change_mtu()
5856 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_change_mtu()
5858 txfifosz /= priv->plat->tx_queues_to_use; in stmmac_change_mtu()
5861 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); in stmmac_change_mtu()
5862 return -EINVAL; in stmmac_change_mtu()
5869 return -EINVAL; in stmmac_change_mtu()
5872 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); in stmmac_change_mtu()
5876 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", in stmmac_change_mtu()
5887 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); in stmmac_change_mtu()
5896 WRITE_ONCE(dev->mtu, mtu); in stmmac_change_mtu()
5907 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) in stmmac_fix_features()
5910 if (!priv->plat->tx_coe) in stmmac_fix_features()
5914 * needs to have the Tx COE disabled for oversized frames in stmmac_fix_features()
5918 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) in stmmac_fix_features()
5922 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_fix_features()
5924 priv->tso = true; in stmmac_fix_features()
5926 priv->tso = false; in stmmac_fix_features()
5937 /* Keep the COE Type in case of csum is supporting */ in stmmac_set_features()
5939 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_set_features()
5941 priv->hw->rx_csum = 0; in stmmac_set_features()
5945 stmmac_rx_ipc(priv, priv->hw); in stmmac_set_features()
5947 if (priv->sph_cap) { in stmmac_set_features()
5948 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_set_features()
5951 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) in stmmac_set_features()
5952 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_set_features()
5956 priv->hw->hw_vlan_en = true; in stmmac_set_features()
5958 priv->hw->hw_vlan_en = false; in stmmac_set_features()
5960 phylink_rx_clk_stop_block(priv->phylink); in stmmac_set_features()
5961 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_set_features()
5962 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_set_features()
5969 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_common_interrupt()
5970 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_common_interrupt()
5975 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_common_interrupt()
5978 if (priv->irq_wake) in stmmac_common_interrupt()
5979 pm_wakeup_event(priv->device, 0); in stmmac_common_interrupt()
5981 if (priv->dma_cap.estsel) in stmmac_common_interrupt()
5982 stmmac_est_irq_status(priv, priv, priv->dev, in stmmac_common_interrupt()
5983 &priv->xstats, tx_cnt); in stmmac_common_interrupt()
5989 if ((priv->plat->has_gmac) || xmac) { in stmmac_common_interrupt()
5990 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); in stmmac_common_interrupt()
5995 priv->tx_path_in_lpi_mode = true; in stmmac_common_interrupt()
5997 priv->tx_path_in_lpi_mode = false; in stmmac_common_interrupt()
6001 stmmac_host_mtl_irq_status(priv, priv->hw, queue); in stmmac_common_interrupt()
6004 if (priv->hw->pcs && in stmmac_common_interrupt()
6005 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { in stmmac_common_interrupt()
6006 if (priv->xstats.pcs_link) in stmmac_common_interrupt()
6007 netif_carrier_on(priv->dev); in stmmac_common_interrupt()
6009 netif_carrier_off(priv->dev); in stmmac_common_interrupt()
6017 * stmmac_interrupt - main ISR
6024 * o Core interrupts to manage: remote wake-up, management counter, LPI
6033 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_interrupt()
6037 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv)) in stmmac_interrupt()
6055 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_mac_interrupt()
6070 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_safety_interrupt()
6083 int chan = tx_q->queue_index; in stmmac_msi_intr_tx()
6091 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_tx()
6110 int chan = rx_q->queue_index; in stmmac_msi_intr_rx()
6117 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_rx()
6126 * stmmac_ioctl - Entry point for the Ioctl
6137 int ret = -EOPNOTSUPP; in stmmac_ioctl()
6140 return -EINVAL; in stmmac_ioctl()
6146 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); in stmmac_ioctl()
6159 int ret = -EOPNOTSUPP; in stmmac_setup_tc_block_cb()
6161 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) in stmmac_setup_tc_block_cb()
6205 return -EOPNOTSUPP; in stmmac_setup_tc()
6212 int gso = skb_shinfo(skb)->gso_type; in stmmac_select_queue()
6224 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in stmmac_select_queue()
6232 ret = pm_runtime_resume_and_get(priv->device); in stmmac_set_mac_address()
6240 phylink_rx_clk_stop_block(priv->phylink); in stmmac_set_mac_address()
6241 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); in stmmac_set_mac_address()
6242 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_set_mac_address()
6245 pm_runtime_put(priv->device); in stmmac_set_mac_address()
6267 le32_to_cpu(p->des0), le32_to_cpu(p->des1), in sysfs_display_ring()
6268 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); in sysfs_display_ring()
6270 p = &(++ep)->basic; in sysfs_display_ring()
6278 struct net_device *dev = seq->private; in stmmac_rings_status_show()
6280 u32 rx_count = priv->plat->rx_queues_to_use; in stmmac_rings_status_show()
6281 u32 tx_count = priv->plat->tx_queues_to_use; in stmmac_rings_status_show()
6284 if ((dev->flags & IFF_UP) == 0) in stmmac_rings_status_show()
6288 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rings_status_show()
6292 if (priv->extend_desc) { in stmmac_rings_status_show()
6294 sysfs_display_ring((void *)rx_q->dma_erx, in stmmac_rings_status_show()
6295 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6298 sysfs_display_ring((void *)rx_q->dma_rx, in stmmac_rings_status_show()
6299 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6304 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_rings_status_show()
6308 if (priv->extend_desc) { in stmmac_rings_status_show()
6310 sysfs_display_ring((void *)tx_q->dma_etx, in stmmac_rings_status_show()
6311 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6312 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { in stmmac_rings_status_show()
6314 sysfs_display_ring((void *)tx_q->dma_tx, in stmmac_rings_status_show()
6315 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6341 struct net_device *dev = seq->private; in stmmac_dma_cap_show()
6344 if (!priv->hw_cap_support) { in stmmac_dma_cap_show()
6354 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); in stmmac_dma_cap_show()
6356 (priv->dma_cap.mbps_1000) ? "Y" : "N"); in stmmac_dma_cap_show()
6358 (priv->dma_cap.half_duplex) ? "Y" : "N"); in stmmac_dma_cap_show()
6359 if (priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6362 priv->dma_cap.multi_addr); in stmmac_dma_cap_show()
6365 (priv->dma_cap.hash_filter) ? "Y" : "N"); in stmmac_dma_cap_show()
6367 (priv->dma_cap.multi_addr) ? "Y" : "N"); in stmmac_dma_cap_show()
6370 (priv->dma_cap.pcs) ? "Y" : "N"); in stmmac_dma_cap_show()
6372 (priv->dma_cap.sma_mdio) ? "Y" : "N"); in stmmac_dma_cap_show()
6374 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); in stmmac_dma_cap_show()
6376 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); in stmmac_dma_cap_show()
6378 (priv->dma_cap.rmon) ? "Y" : "N"); in stmmac_dma_cap_show()
6379 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", in stmmac_dma_cap_show()
6380 (priv->dma_cap.time_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6381 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", in stmmac_dma_cap_show()
6382 (priv->dma_cap.atime_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6383 if (priv->plat->has_xgmac) in stmmac_dma_cap_show()
6385 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); in stmmac_dma_cap_show()
6386 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", in stmmac_dma_cap_show()
6387 (priv->dma_cap.eee) ? "Y" : "N"); in stmmac_dma_cap_show()
6388 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); in stmmac_dma_cap_show()
6390 (priv->dma_cap.tx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6391 if (priv->synopsys_id >= DWMAC_CORE_4_00 || in stmmac_dma_cap_show()
6392 priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6394 (priv->dma_cap.rx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6397 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); in stmmac_dma_cap_show()
6399 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); in stmmac_dma_cap_show()
6401 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); in stmmac_dma_cap_show()
6404 priv->dma_cap.number_rx_channel); in stmmac_dma_cap_show()
6406 priv->dma_cap.number_tx_channel); in stmmac_dma_cap_show()
6408 priv->dma_cap.number_rx_queues); in stmmac_dma_cap_show()
6410 priv->dma_cap.number_tx_queues); in stmmac_dma_cap_show()
6412 (priv->dma_cap.enh_desc) ? "Y" : "N"); in stmmac_dma_cap_show()
6413 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); in stmmac_dma_cap_show()
6414 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); in stmmac_dma_cap_show()
6415 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? in stmmac_dma_cap_show()
6416 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); in stmmac_dma_cap_show()
6417 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); in stmmac_dma_cap_show()
6419 priv->dma_cap.pps_out_num); in stmmac_dma_cap_show()
6421 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); in stmmac_dma_cap_show()
6423 priv->dma_cap.frpsel ? "Y" : "N"); in stmmac_dma_cap_show()
6425 priv->dma_cap.host_dma_width); in stmmac_dma_cap_show()
6427 priv->dma_cap.rssen ? "Y" : "N"); in stmmac_dma_cap_show()
6429 priv->dma_cap.vlhash ? "Y" : "N"); in stmmac_dma_cap_show()
6431 priv->dma_cap.sphen ? "Y" : "N"); in stmmac_dma_cap_show()
6433 priv->dma_cap.vlins ? "Y" : "N"); in stmmac_dma_cap_show()
6435 priv->dma_cap.dvlan ? "Y" : "N"); in stmmac_dma_cap_show()
6437 priv->dma_cap.l3l4fnum); in stmmac_dma_cap_show()
6439 priv->dma_cap.arpoffsel ? "Y" : "N"); in stmmac_dma_cap_show()
6441 priv->dma_cap.estsel ? "Y" : "N"); in stmmac_dma_cap_show()
6443 priv->dma_cap.fpesel ? "Y" : "N"); in stmmac_dma_cap_show()
6444 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", in stmmac_dma_cap_show()
6445 priv->dma_cap.tbssel ? "Y" : "N"); in stmmac_dma_cap_show()
6447 priv->dma_cap.tbs_ch_num); in stmmac_dma_cap_show()
6448 seq_printf(seq, "\tPer-Stream Filtering: %s\n", in stmmac_dma_cap_show()
6449 priv->dma_cap.sgfsel ? "Y" : "N"); in stmmac_dma_cap_show()
6451 BIT(priv->dma_cap.ttsfd) >> 1); in stmmac_dma_cap_show()
6453 priv->dma_cap.numtc); in stmmac_dma_cap_show()
6455 priv->dma_cap.dcben ? "Y" : "N"); in stmmac_dma_cap_show()
6457 priv->dma_cap.advthword ? "Y" : "N"); in stmmac_dma_cap_show()
6459 priv->dma_cap.ptoen ? "Y" : "N"); in stmmac_dma_cap_show()
6460 seq_printf(seq, "\tOne-Step Timestamping: %s\n", in stmmac_dma_cap_show()
6461 priv->dma_cap.osten ? "Y" : "N"); in stmmac_dma_cap_show()
6462 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", in stmmac_dma_cap_show()
6463 priv->dma_cap.pfcen ? "Y" : "N"); in stmmac_dma_cap_show()
6465 BIT(priv->dma_cap.frpes) << 6); in stmmac_dma_cap_show()
6467 BIT(priv->dma_cap.frpbs) << 6); in stmmac_dma_cap_show()
6469 priv->dma_cap.frppipe_num); in stmmac_dma_cap_show()
6471 priv->dma_cap.nrvf_num ? in stmmac_dma_cap_show()
6472 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); in stmmac_dma_cap_show()
6474 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); in stmmac_dma_cap_show()
6476 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); in stmmac_dma_cap_show()
6477 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", in stmmac_dma_cap_show()
6478 priv->dma_cap.cbtisel ? "Y" : "N"); in stmmac_dma_cap_show()
6480 priv->dma_cap.aux_snapshot_n); in stmmac_dma_cap_show()
6481 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", in stmmac_dma_cap_show()
6482 priv->dma_cap.pou_ost_en ? "Y" : "N"); in stmmac_dma_cap_show()
6484 priv->dma_cap.edma ? "Y" : "N"); in stmmac_dma_cap_show()
6486 priv->dma_cap.ediffc ? "Y" : "N"); in stmmac_dma_cap_show()
6488 priv->dma_cap.vxn ? "Y" : "N"); in stmmac_dma_cap_show()
6490 priv->dma_cap.dbgmem ? "Y" : "N"); in stmmac_dma_cap_show()
6492 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); in stmmac_dma_cap_show()
6505 if (dev->netdev_ops != &stmmac_netdev_ops) in stmmac_device_event()
6510 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name); in stmmac_device_event()
6528 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); in stmmac_init_fs()
6531 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6535 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6545 debugfs_remove_recursive(priv->dbgfs_dir); in stmmac_exit_fs()
6580 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { in stmmac_vlan_update()
6587 if (!priv->dma_cap.vlhash) { in stmmac_vlan_update()
6589 return -EOPNOTSUPP; in stmmac_vlan_update()
6595 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); in stmmac_vlan_update()
6607 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_add_vid()
6614 set_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6617 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6621 if (priv->hw->num_vlan) { in stmmac_vlan_rx_add_vid()
6622 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_add_vid()
6627 pm_runtime_put(priv->device); in stmmac_vlan_rx_add_vid()
6641 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_kill_vid()
6648 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_kill_vid()
6650 if (priv->hw->num_vlan) { in stmmac_vlan_rx_kill_vid()
6651 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_kill_vid()
6659 pm_runtime_put(priv->device); in stmmac_vlan_rx_kill_vid()
6668 switch (bpf->command) { in stmmac_bpf()
6670 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); in stmmac_bpf()
6672 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, in stmmac_bpf()
6673 bpf->xsk.queue_id); in stmmac_bpf()
6675 return -EOPNOTSUPP; in stmmac_bpf()
6688 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) in stmmac_xdp_xmit()
6689 return -ENETDOWN; in stmmac_xdp_xmit()
6692 return -EINVAL; in stmmac_xdp_xmit()
6695 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit()
6698 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit()
6723 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_rx_queue()
6726 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_rx_queue()
6727 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_disable_rx_queue()
6728 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_rx_queue()
6731 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_rx_queue()
6736 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_enable_rx_queue()
6737 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_rx_queue()
6742 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6744 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); in stmmac_enable_rx_queue()
6748 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); in stmmac_enable_rx_queue()
6750 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6751 netdev_err(priv->dev, "Failed to init RX desc.\n"); in stmmac_enable_rx_queue()
6756 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6758 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_rx_queue()
6759 rx_q->dma_rx_phy, rx_q->queue_index); in stmmac_enable_rx_queue()
6761 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * in stmmac_enable_rx_queue()
6763 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6764 rx_q->rx_tail_addr, rx_q->queue_index); in stmmac_enable_rx_queue()
6766 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_enable_rx_queue()
6767 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_enable_rx_queue()
6768 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6770 rx_q->queue_index); in stmmac_enable_rx_queue()
6772 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6773 priv->dma_conf.dma_buf_sz, in stmmac_enable_rx_queue()
6774 rx_q->queue_index); in stmmac_enable_rx_queue()
6779 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_rx_queue()
6780 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_enable_rx_queue()
6781 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_rx_queue()
6786 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_tx_queue()
6789 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_tx_queue()
6790 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_disable_tx_queue()
6791 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_tx_queue()
6794 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_tx_queue()
6799 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_tx_queue()
6800 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_tx_queue()
6804 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6806 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); in stmmac_enable_tx_queue()
6810 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6812 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6813 netdev_err(priv->dev, "Failed to init TX desc.\n"); in stmmac_enable_tx_queue()
6818 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6820 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_tx_queue()
6821 tx_q->dma_tx_phy, tx_q->queue_index); in stmmac_enable_tx_queue()
6823 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_enable_tx_queue()
6824 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); in stmmac_enable_tx_queue()
6826 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_enable_tx_queue()
6827 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_tx_queue()
6828 tx_q->tx_tail_addr, tx_q->queue_index); in stmmac_enable_tx_queue()
6832 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_tx_queue()
6833 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_enable_tx_queue()
6834 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_tx_queue()
6848 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_release()
6849 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_release()
6858 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_release()
6861 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_xdp_release()
6873 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_xdp_open()
6874 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_xdp_open()
6883 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6890 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); in stmmac_xdp_open()
6901 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_xdp_open()
6902 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_xdp_open()
6906 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_xdp_open()
6910 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_xdp_open()
6912 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6913 rx_q->dma_rx_phy, chan); in stmmac_xdp_open()
6915 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_xdp_open()
6916 (rx_q->buf_alloc_num * in stmmac_xdp_open()
6918 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6919 rx_q->rx_tail_addr, chan); in stmmac_xdp_open()
6921 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_xdp_open()
6922 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_xdp_open()
6923 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6925 rx_q->queue_index); in stmmac_xdp_open()
6927 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6928 priv->dma_conf.dma_buf_sz, in stmmac_xdp_open()
6929 rx_q->queue_index); in stmmac_xdp_open()
6932 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_xdp_open()
6937 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_xdp_open()
6939 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6940 tx_q->dma_tx_phy, chan); in stmmac_xdp_open()
6942 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_xdp_open()
6943 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6944 tx_q->tx_tail_addr, chan); in stmmac_xdp_open()
6946 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_xdp_open()
6950 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_xdp_open()
6968 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_open()
6969 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_open()
6972 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6984 if (test_bit(STMMAC_DOWN, &priv->state) || in stmmac_xsk_wakeup()
6985 !netif_carrier_ok(priv->dev)) in stmmac_xsk_wakeup()
6986 return -ENETDOWN; in stmmac_xsk_wakeup()
6989 return -EINVAL; in stmmac_xsk_wakeup()
6991 if (queue >= priv->plat->rx_queues_to_use || in stmmac_xsk_wakeup()
6992 queue >= priv->plat->tx_queues_to_use) in stmmac_xsk_wakeup()
6993 return -EINVAL; in stmmac_xsk_wakeup()
6995 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_xsk_wakeup()
6996 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xsk_wakeup()
6997 ch = &priv->channel[queue]; in stmmac_xsk_wakeup()
6999 if (!rx_q->xsk_pool && !tx_q->xsk_pool) in stmmac_xsk_wakeup()
7000 return -EINVAL; in stmmac_xsk_wakeup()
7002 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { in stmmac_xsk_wakeup()
7003 /* EQoS does not have per-DMA channel SW interrupt, in stmmac_xsk_wakeup()
7004 * so we schedule RX Napi straight-away. in stmmac_xsk_wakeup()
7006 if (likely(napi_schedule_prep(&ch->rxtx_napi))) in stmmac_xsk_wakeup()
7007 __napi_schedule(&ch->rxtx_napi); in stmmac_xsk_wakeup()
7016 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_get_stats64()
7017 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_get_stats64()
7022 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; in stmmac_get_stats64()
7027 start = u64_stats_fetch_begin(&txq_stats->q_syncp); in stmmac_get_stats64()
7028 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); in stmmac_get_stats64()
7029 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); in stmmac_get_stats64()
7031 start = u64_stats_fetch_begin(&txq_stats->napi_syncp); in stmmac_get_stats64()
7032 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); in stmmac_get_stats64()
7033 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); in stmmac_get_stats64()
7035 stats->tx_packets += tx_packets; in stmmac_get_stats64()
7036 stats->tx_bytes += tx_bytes; in stmmac_get_stats64()
7040 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; in stmmac_get_stats64()
7045 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); in stmmac_get_stats64()
7046 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); in stmmac_get_stats64()
7047 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); in stmmac_get_stats64()
7048 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); in stmmac_get_stats64()
7050 stats->rx_packets += rx_packets; in stmmac_get_stats64()
7051 stats->rx_bytes += rx_bytes; in stmmac_get_stats64()
7054 stats->rx_dropped = priv->xstats.rx_dropped; in stmmac_get_stats64()
7055 stats->rx_errors = priv->xstats.rx_errors; in stmmac_get_stats64()
7056 stats->tx_dropped = priv->xstats.tx_dropped; in stmmac_get_stats64()
7057 stats->tx_errors = priv->xstats.tx_errors; in stmmac_get_stats64()
7058 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; in stmmac_get_stats64()
7059 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; in stmmac_get_stats64()
7060 stats->rx_length_errors = priv->xstats.rx_length; in stmmac_get_stats64()
7061 stats->rx_crc_errors = priv->xstats.rx_crc_errors; in stmmac_get_stats64()
7062 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; in stmmac_get_stats64()
7063 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; in stmmac_get_stats64()
7091 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) in stmmac_reset_subtask()
7093 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_reset_subtask()
7096 netdev_err(priv->dev, "Reset adapter.\n"); in stmmac_reset_subtask()
7099 netif_trans_update(priv->dev); in stmmac_reset_subtask()
7100 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) in stmmac_reset_subtask()
7103 set_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7104 dev_close(priv->dev); in stmmac_reset_subtask()
7105 dev_open(priv->dev, NULL); in stmmac_reset_subtask()
7106 clear_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7107 clear_bit(STMMAC_RESETING, &priv->state); in stmmac_reset_subtask()
7117 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); in stmmac_service_task()
7121 * stmmac_hw_init - Init the MAC device
7132 /* dwmac-sun8i only work in chain mode */ in stmmac_hw_init()
7133 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) in stmmac_hw_init()
7135 priv->chain_mode = chain_mode; in stmmac_hw_init()
7143 priv->hw_cap_support = stmmac_get_hw_features(priv); in stmmac_hw_init()
7144 if (priv->hw_cap_support) { in stmmac_hw_init()
7145 dev_info(priv->device, "DMA HW capability register supported\n"); in stmmac_hw_init()
7152 priv->plat->enh_desc = priv->dma_cap.enh_desc; in stmmac_hw_init()
7153 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && in stmmac_hw_init()
7154 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); in stmmac_hw_init()
7155 if (priv->dma_cap.hash_tb_sz) { in stmmac_hw_init()
7156 priv->hw->multicast_filter_bins = in stmmac_hw_init()
7157 (BIT(priv->dma_cap.hash_tb_sz) << 5); in stmmac_hw_init()
7158 priv->hw->mcast_bits_log2 = in stmmac_hw_init()
7159 ilog2(priv->hw->multicast_filter_bins); in stmmac_hw_init()
7163 if (priv->plat->force_thresh_dma_mode) in stmmac_hw_init()
7164 priv->plat->tx_coe = 0; in stmmac_hw_init()
7166 priv->plat->tx_coe = priv->dma_cap.tx_coe; in stmmac_hw_init()
7169 priv->plat->rx_coe = priv->dma_cap.rx_coe; in stmmac_hw_init()
7171 if (priv->dma_cap.rx_coe_type2) in stmmac_hw_init()
7172 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; in stmmac_hw_init()
7173 else if (priv->dma_cap.rx_coe_type1) in stmmac_hw_init()
7174 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; in stmmac_hw_init()
7177 dev_info(priv->device, "No HW DMA feature register supported\n"); in stmmac_hw_init()
7180 if (priv->plat->rx_coe) { in stmmac_hw_init()
7181 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_hw_init()
7182 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); in stmmac_hw_init()
7183 if (priv->synopsys_id < DWMAC_CORE_4_00) in stmmac_hw_init()
7184 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); in stmmac_hw_init()
7186 if (priv->plat->tx_coe) in stmmac_hw_init()
7187 dev_info(priv->device, "TX Checksum insertion supported\n"); in stmmac_hw_init()
7189 if (priv->plat->pmt) { in stmmac_hw_init()
7190 dev_info(priv->device, "Wake-Up On Lan supported\n"); in stmmac_hw_init()
7191 device_set_wakeup_capable(priv->device, 1); in stmmac_hw_init()
7192 devm_pm_set_wake_irq(priv->device, priv->wol_irq); in stmmac_hw_init()
7195 if (priv->dma_cap.tsoen) in stmmac_hw_init()
7196 dev_info(priv->device, "TSO supported\n"); in stmmac_hw_init()
7198 if (priv->dma_cap.number_rx_queues && in stmmac_hw_init()
7199 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) { in stmmac_hw_init()
7200 dev_warn(priv->device, in stmmac_hw_init()
7202 priv->plat->rx_queues_to_use); in stmmac_hw_init()
7203 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues; in stmmac_hw_init()
7205 if (priv->dma_cap.number_tx_queues && in stmmac_hw_init()
7206 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) { in stmmac_hw_init()
7207 dev_warn(priv->device, in stmmac_hw_init()
7209 priv->plat->tx_queues_to_use); in stmmac_hw_init()
7210 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues; in stmmac_hw_init()
7213 if (priv->dma_cap.rx_fifo_size && in stmmac_hw_init()
7214 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) { in stmmac_hw_init()
7215 dev_warn(priv->device, in stmmac_hw_init()
7217 priv->plat->rx_fifo_size); in stmmac_hw_init()
7218 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size; in stmmac_hw_init()
7220 if (priv->dma_cap.tx_fifo_size && in stmmac_hw_init()
7221 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) { in stmmac_hw_init()
7222 dev_warn(priv->device, in stmmac_hw_init()
7224 priv->plat->tx_fifo_size); in stmmac_hw_init()
7225 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size; in stmmac_hw_init()
7228 priv->hw->vlan_fail_q_en = in stmmac_hw_init()
7229 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); in stmmac_hw_init()
7230 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; in stmmac_hw_init()
7233 if (priv->hwif_quirks) { in stmmac_hw_init()
7234 ret = priv->hwif_quirks(priv); in stmmac_hw_init()
7244 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || in stmmac_hw_init()
7245 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { in stmmac_hw_init()
7246 priv->use_riwt = 1; in stmmac_hw_init()
7247 dev_info(priv->device, in stmmac_hw_init()
7259 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_add()
7262 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_add()
7264 ch->priv_data = priv; in stmmac_napi_add()
7265 ch->index = queue; in stmmac_napi_add()
7266 spin_lock_init(&ch->lock); in stmmac_napi_add()
7268 if (queue < priv->plat->rx_queues_to_use) { in stmmac_napi_add()
7269 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); in stmmac_napi_add()
7271 if (queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7272 netif_napi_add_tx(dev, &ch->tx_napi, in stmmac_napi_add()
7275 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_add()
7276 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7277 netif_napi_add(dev, &ch->rxtx_napi, in stmmac_napi_add()
7288 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_del()
7291 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_del()
7293 if (queue < priv->plat->rx_queues_to_use) in stmmac_napi_del()
7294 netif_napi_del(&ch->rx_napi); in stmmac_napi_del()
7295 if (queue < priv->plat->tx_queues_to_use) in stmmac_napi_del()
7296 netif_napi_del(&ch->tx_napi); in stmmac_napi_del()
7297 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_del()
7298 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_del()
7299 netif_napi_del(&ch->rxtx_napi); in stmmac_napi_del()
7314 priv->plat->rx_queues_to_use = rx_cnt; in stmmac_reinit_queues()
7315 priv->plat->tx_queues_to_use = tx_cnt; in stmmac_reinit_queues()
7317 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_reinit_queues()
7318 priv->rss.table[i] = ethtool_rxfh_indir_default(i, in stmmac_reinit_queues()
7337 priv->dma_conf.dma_rx_size = rx_size; in stmmac_reinit_ringparam()
7338 priv->dma_conf.dma_tx_size = tx_size; in stmmac_reinit_ringparam()
7349 struct dma_desc *desc_contains_ts = ctx->desc; in stmmac_xdp_rx_timestamp()
7350 struct stmmac_priv *priv = ctx->priv; in stmmac_xdp_rx_timestamp()
7351 struct dma_desc *ndesc = ctx->ndesc; in stmmac_xdp_rx_timestamp()
7352 struct dma_desc *desc = ctx->desc; in stmmac_xdp_rx_timestamp()
7355 if (!priv->hwts_rx_en) in stmmac_xdp_rx_timestamp()
7356 return -ENODATA; in stmmac_xdp_rx_timestamp()
7359 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_xdp_rx_timestamp()
7363 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { in stmmac_xdp_rx_timestamp()
7364 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); in stmmac_xdp_rx_timestamp()
7365 ns -= priv->plat->cdc_error_adj; in stmmac_xdp_rx_timestamp()
7370 return -ENODATA; in stmmac_xdp_rx_timestamp()
7399 return -ENOMEM; in stmmac_dvr_probe()
7404 priv->device = device; in stmmac_dvr_probe()
7405 priv->dev = ndev; in stmmac_dvr_probe()
7408 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); in stmmac_dvr_probe()
7410 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); in stmmac_dvr_probe()
7411 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); in stmmac_dvr_probe()
7414 priv->xstats.pcpu_stats = in stmmac_dvr_probe()
7416 if (!priv->xstats.pcpu_stats) in stmmac_dvr_probe()
7417 return -ENOMEM; in stmmac_dvr_probe()
7420 priv->pause_time = pause; in stmmac_dvr_probe()
7421 priv->plat = plat_dat; in stmmac_dvr_probe()
7422 priv->ioaddr = res->addr; in stmmac_dvr_probe()
7423 priv->dev->base_addr = (unsigned long)res->addr; in stmmac_dvr_probe()
7424 priv->plat->dma_cfg->multi_msi_en = in stmmac_dvr_probe()
7425 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); in stmmac_dvr_probe()
7427 priv->dev->irq = res->irq; in stmmac_dvr_probe()
7428 priv->wol_irq = res->wol_irq; in stmmac_dvr_probe()
7429 priv->lpi_irq = res->lpi_irq; in stmmac_dvr_probe()
7430 priv->sfty_irq = res->sfty_irq; in stmmac_dvr_probe()
7431 priv->sfty_ce_irq = res->sfty_ce_irq; in stmmac_dvr_probe()
7432 priv->sfty_ue_irq = res->sfty_ue_irq; in stmmac_dvr_probe()
7434 priv->rx_irq[i] = res->rx_irq[i]; in stmmac_dvr_probe()
7436 priv->tx_irq[i] = res->tx_irq[i]; in stmmac_dvr_probe()
7438 if (!is_zero_ether_addr(res->mac)) in stmmac_dvr_probe()
7439 eth_hw_addr_set(priv->dev, res->mac); in stmmac_dvr_probe()
7441 dev_set_drvdata(device, priv->dev); in stmmac_dvr_probe()
7446 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); in stmmac_dvr_probe()
7447 if (!priv->af_xdp_zc_qps) in stmmac_dvr_probe()
7448 return -ENOMEM; in stmmac_dvr_probe()
7451 priv->wq = create_singlethread_workqueue("stmmac_wq"); in stmmac_dvr_probe()
7452 if (!priv->wq) { in stmmac_dvr_probe()
7453 dev_err(priv->device, "failed to create workqueue\n"); in stmmac_dvr_probe()
7454 ret = -ENOMEM; in stmmac_dvr_probe()
7458 INIT_WORK(&priv->service_task, stmmac_service_task); in stmmac_dvr_probe()
7460 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); in stmmac_dvr_probe()
7466 priv->plat->phy_addr = phyaddr; in stmmac_dvr_probe()
7468 if (priv->plat->stmmac_rst) { in stmmac_dvr_probe()
7469 ret = reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7470 reset_control_deassert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7474 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7475 reset_control_reset(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7478 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_probe()
7479 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7480 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", in stmmac_dvr_probe()
7493 if (priv->synopsys_id < DWMAC_CORE_5_20) in stmmac_dvr_probe()
7494 priv->plat->dma_cfg->dche = false; in stmmac_dvr_probe()
7498 ndev->netdev_ops = &stmmac_netdev_ops; in stmmac_dvr_probe()
7500 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; in stmmac_dvr_probe()
7501 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops; in stmmac_dvr_probe()
7503 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in stmmac_dvr_probe()
7505 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in stmmac_dvr_probe()
7510 ndev->hw_features |= NETIF_F_HW_TC; in stmmac_dvr_probe()
7513 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_dvr_probe()
7514 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in stmmac_dvr_probe()
7515 if (priv->plat->has_gmac4) in stmmac_dvr_probe()
7516 ndev->hw_features |= NETIF_F_GSO_UDP_L4; in stmmac_dvr_probe()
7517 priv->tso = true; in stmmac_dvr_probe()
7518 dev_info(priv->device, "TSO feature enabled\n"); in stmmac_dvr_probe()
7521 if (priv->dma_cap.sphen && in stmmac_dvr_probe()
7522 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { in stmmac_dvr_probe()
7523 ndev->hw_features |= NETIF_F_GRO; in stmmac_dvr_probe()
7524 priv->sph_cap = true; in stmmac_dvr_probe()
7525 priv->sph = priv->sph_cap; in stmmac_dvr_probe()
7526 dev_info(priv->device, "SPH feature enabled\n"); in stmmac_dvr_probe()
7534 if (priv->plat->host_dma_width) in stmmac_dvr_probe()
7535 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; in stmmac_dvr_probe()
7537 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; in stmmac_dvr_probe()
7539 if (priv->dma_cap.host_dma_width) { in stmmac_dvr_probe()
7541 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); in stmmac_dvr_probe()
7543 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", in stmmac_dvr_probe()
7544 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); in stmmac_dvr_probe()
7551 priv->plat->dma_cfg->eame = true; in stmmac_dvr_probe()
7555 dev_err(priv->device, "Failed to set DMA Mask\n"); in stmmac_dvr_probe()
7559 priv->dma_cap.host_dma_width = 32; in stmmac_dvr_probe()
7563 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; in stmmac_dvr_probe()
7564 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); in stmmac_dvr_probe()
7567 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; in stmmac_dvr_probe()
7568 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { in stmmac_dvr_probe()
7569 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in stmmac_dvr_probe()
7570 priv->hw->hw_vlan_en = true; in stmmac_dvr_probe()
7572 if (priv->dma_cap.vlhash) { in stmmac_dvr_probe()
7573 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in stmmac_dvr_probe()
7574 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; in stmmac_dvr_probe()
7576 if (priv->dma_cap.vlins) { in stmmac_dvr_probe()
7577 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; in stmmac_dvr_probe()
7578 if (priv->dma_cap.dvlan) in stmmac_dvr_probe()
7579 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; in stmmac_dvr_probe()
7582 priv->msg_enable = netif_msg_init(debug, default_msg_level); in stmmac_dvr_probe()
7584 priv->xstats.threshold = tc; in stmmac_dvr_probe()
7587 rxq = priv->plat->rx_queues_to_use; in stmmac_dvr_probe()
7588 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); in stmmac_dvr_probe()
7589 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_dvr_probe()
7590 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); in stmmac_dvr_probe()
7592 if (priv->dma_cap.rssen && priv->plat->rss_en) in stmmac_dvr_probe()
7593 ndev->features |= NETIF_F_RXHASH; in stmmac_dvr_probe()
7595 ndev->vlan_features |= ndev->features; in stmmac_dvr_probe()
7597 /* MTU range: 46 - hw-specific max */ in stmmac_dvr_probe()
7598 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; in stmmac_dvr_probe()
7599 if (priv->plat->has_xgmac) in stmmac_dvr_probe()
7600 ndev->max_mtu = XGMAC_JUMBO_LEN; in stmmac_dvr_probe()
7601 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) in stmmac_dvr_probe()
7602 ndev->max_mtu = JUMBO_LEN; in stmmac_dvr_probe()
7604 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); in stmmac_dvr_probe()
7605 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu in stmmac_dvr_probe()
7606 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. in stmmac_dvr_probe()
7608 if ((priv->plat->maxmtu < ndev->max_mtu) && in stmmac_dvr_probe()
7609 (priv->plat->maxmtu >= ndev->min_mtu)) in stmmac_dvr_probe()
7610 ndev->max_mtu = priv->plat->maxmtu; in stmmac_dvr_probe()
7611 else if (priv->plat->maxmtu < ndev->min_mtu) in stmmac_dvr_probe()
7612 dev_warn(priv->device, in stmmac_dvr_probe()
7614 __func__, priv->plat->maxmtu); in stmmac_dvr_probe()
7616 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in stmmac_dvr_probe()
7621 mutex_init(&priv->lock); in stmmac_dvr_probe()
7634 dev_err_probe(priv->device, ret, in stmmac_dvr_probe()
7636 priv->plat->bus_id); in stmmac_dvr_probe()
7652 dev_err(priv->device, "%s: ERROR %i registering the device\n", in stmmac_dvr_probe()
7661 if (priv->plat->dump_debug_regs) in stmmac_dvr_probe()
7662 priv->plat->dump_debug_regs(priv->plat->bsp_priv); in stmmac_dvr_probe()
7672 phylink_destroy(priv->phylink); in stmmac_dvr_probe()
7680 destroy_workqueue(priv->wq); in stmmac_dvr_probe()
7682 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_probe()
7699 netdev_info(priv->dev, "%s: removing driver", __func__); in stmmac_dvr_remove()
7708 phylink_destroy(priv->phylink); in stmmac_dvr_remove()
7709 if (priv->plat->stmmac_rst) in stmmac_dvr_remove()
7710 reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_remove()
7711 reset_control_assert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_remove()
7716 destroy_workqueue(priv->wq); in stmmac_dvr_remove()
7717 mutex_destroy(&priv->lock); in stmmac_dvr_remove()
7718 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_remove()
7726 * stmmac_suspend - suspend callback
7741 mutex_lock(&priv->lock); in stmmac_suspend()
7747 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_suspend()
7748 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_suspend()
7750 if (priv->eee_sw_timer_en) { in stmmac_suspend()
7751 priv->tx_path_in_lpi_mode = false; in stmmac_suspend()
7752 timer_delete_sync(&priv->eee_ctrl_timer); in stmmac_suspend()
7758 if (priv->plat->serdes_powerdown) in stmmac_suspend()
7759 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_suspend()
7763 stmmac_pmt(priv, priv->hw, priv->wolopts); in stmmac_suspend()
7764 priv->irq_wake = 1; in stmmac_suspend()
7766 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_suspend()
7767 pinctrl_pm_select_sleep_state(priv->device); in stmmac_suspend()
7770 mutex_unlock(&priv->lock); in stmmac_suspend()
7774 phylink_speed_down(priv->phylink, false); in stmmac_suspend()
7776 phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv)); in stmmac_suspend()
7780 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv); in stmmac_suspend()
7782 if (priv->plat->suspend) in stmmac_suspend()
7783 return priv->plat->suspend(dev, priv->plat->bsp_priv); in stmmac_suspend()
7791 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_reset_rx_queue()
7793 rx_q->cur_rx = 0; in stmmac_reset_rx_queue()
7794 rx_q->dirty_rx = 0; in stmmac_reset_rx_queue()
7799 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_reset_tx_queue()
7801 tx_q->cur_tx = 0; in stmmac_reset_tx_queue()
7802 tx_q->dirty_tx = 0; in stmmac_reset_tx_queue()
7803 tx_q->mss = 0; in stmmac_reset_tx_queue()
7805 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_reset_tx_queue()
7809 * stmmac_reset_queues_param - reset queue parameters
7814 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_reset_queues_param()
7815 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_reset_queues_param()
7826 * stmmac_resume - resume callback
7837 if (priv->plat->resume) { in stmmac_resume()
7838 ret = priv->plat->resume(dev, priv->plat->bsp_priv); in stmmac_resume()
7847 * automatically as soon as a magic packet or a Wake-up frame in stmmac_resume()
7853 mutex_lock(&priv->lock); in stmmac_resume()
7854 stmmac_pmt(priv, priv->hw, 0); in stmmac_resume()
7855 mutex_unlock(&priv->lock); in stmmac_resume()
7856 priv->irq_wake = 0; in stmmac_resume()
7858 pinctrl_pm_select_default_state(priv->device); in stmmac_resume()
7860 if (priv->mii) in stmmac_resume()
7861 stmmac_mdio_reset(priv->mii); in stmmac_resume()
7864 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_resume()
7865 priv->plat->serdes_powerup) { in stmmac_resume()
7866 ret = priv->plat->serdes_powerup(ndev, in stmmac_resume()
7867 priv->plat->bsp_priv); in stmmac_resume()
7878 phylink_prepare_resume(priv->phylink); in stmmac_resume()
7880 mutex_lock(&priv->lock); in stmmac_resume()
7885 stmmac_clear_descriptors(priv, &priv->dma_conf); in stmmac_resume()
7889 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); in stmmac_resume()
7890 mutex_unlock(&priv->lock); in stmmac_resume()
7898 phylink_rx_clk_stop_block(priv->phylink); in stmmac_resume()
7901 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); in stmmac_resume()
7902 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_resume()
7907 mutex_unlock(&priv->lock); in stmmac_resume()
7913 phylink_resume(priv->phylink); in stmmac_resume()
7915 phylink_speed_up(priv->phylink); in stmmac_resume()