1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 4 ST Ethernet IPs are built around a Synopsys IP Core. 5 6 Copyright(C) 2007-2011 STMicroelectronics Ltd 7 8 9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 10 11 Documentation available at: 12 http://www.stlinux.com 13 Support available at: 14 https://bugzilla.stlinux.com/ 15 *******************************************************************************/ 16 17 #include <linux/clk.h> 18 #include <linux/kernel.h> 19 #include <linux/interrupt.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/mii.h> 27 #include <linux/if.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/slab.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/prefetch.h> 33 #include <linux/pinctrl/consumer.h> 34 #ifdef CONFIG_DEBUG_FS 35 #include <linux/debugfs.h> 36 #include <linux/seq_file.h> 37 #endif /* CONFIG_DEBUG_FS */ 38 #include <linux/net_tstamp.h> 39 #include <linux/phylink.h> 40 #include <linux/udp.h> 41 #include <linux/bpf_trace.h> 42 #include <net/page_pool/helpers.h> 43 #include <net/pkt_cls.h> 44 #include <net/xdp_sock_drv.h> 45 #include "stmmac_ptp.h" 46 #include "stmmac.h" 47 #include "stmmac_xdp.h" 48 #include <linux/reset.h> 49 #include <linux/of_mdio.h> 50 #include "dwmac1000.h" 51 #include "dwxgmac2.h" 52 #include "hwif.h" 53 54 /* As long as the interface is active, we keep the timestamping counter enabled 55 * with fine resolution and binary rollover. This avoid non-monotonic behavior 56 * (clock jumps) when changing timestamping settings at runtime. 57 */ 58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \ 59 PTP_TCR_TSCTRLSSR) 60 61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 63 64 /* Module parameters */ 65 #define TX_TIMEO 5000 66 static int watchdog = TX_TIMEO; 67 module_param(watchdog, int, 0644); 68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 69 70 static int debug = -1; 71 module_param(debug, int, 0644); 72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 73 74 static int phyaddr = -1; 75 module_param(phyaddr, int, 0444); 76 MODULE_PARM_DESC(phyaddr, "Physical device address"); 77 78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4) 79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4) 80 81 /* Limit to make sure XDP TX and slow path can coexist */ 82 #define STMMAC_XSK_TX_BUDGET_MAX 256 83 #define STMMAC_TX_XSK_AVAIL 16 84 #define STMMAC_RX_FILL_BATCH 16 85 86 #define STMMAC_XDP_PASS 0 87 #define STMMAC_XDP_CONSUMED BIT(0) 88 #define STMMAC_XDP_TX BIT(1) 89 #define STMMAC_XDP_REDIRECT BIT(2) 90 91 static int flow_ctrl = FLOW_AUTO; 92 module_param(flow_ctrl, int, 0644); 93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 94 95 static int pause = PAUSE_TIME; 96 module_param(pause, int, 0644); 97 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 98 99 #define TC_DEFAULT 64 100 static int tc = TC_DEFAULT; 101 module_param(tc, int, 0644); 102 MODULE_PARM_DESC(tc, "DMA threshold control value"); 103 104 #define DEFAULT_BUFSIZE 1536 105 static int buf_sz = DEFAULT_BUFSIZE; 106 module_param(buf_sz, int, 0644); 107 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 108 109 #define STMMAC_RX_COPYBREAK 256 110 111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 112 NETIF_MSG_LINK | NETIF_MSG_IFUP | 113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 114 115 #define STMMAC_DEFAULT_LPI_TIMER 1000 116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 117 module_param(eee_timer, int, 0644); 118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 120 121 /* By default the driver will use the ring mode to manage tx and rx descriptors, 122 * but allow user to force to use the chain instead of the ring 123 */ 124 static unsigned int chain_mode; 125 module_param(chain_mode, int, 0444); 126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 127 128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 129 /* For MSI interrupts handling */ 130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id); 131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id); 132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data); 133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data); 134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue); 135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue); 136 static void stmmac_reset_queues_param(struct stmmac_priv *priv); 137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); 138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); 139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 140 u32 rxmode, u32 chan); 141 142 #ifdef CONFIG_DEBUG_FS 143 static const struct net_device_ops stmmac_netdev_ops; 144 static void stmmac_init_fs(struct net_device *dev); 145 static void stmmac_exit_fs(struct net_device *dev); 146 #endif 147 148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 149 150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 151 { 152 int ret = 0; 153 154 if (enabled) { 155 ret = clk_prepare_enable(priv->plat->stmmac_clk); 156 if (ret) 157 return ret; 158 ret = clk_prepare_enable(priv->plat->pclk); 159 if (ret) { 160 clk_disable_unprepare(priv->plat->stmmac_clk); 161 return ret; 162 } 163 if (priv->plat->clks_config) { 164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 165 if (ret) { 166 clk_disable_unprepare(priv->plat->stmmac_clk); 167 clk_disable_unprepare(priv->plat->pclk); 168 return ret; 169 } 170 } 171 } else { 172 clk_disable_unprepare(priv->plat->stmmac_clk); 173 clk_disable_unprepare(priv->plat->pclk); 174 if (priv->plat->clks_config) 175 priv->plat->clks_config(priv->plat->bsp_priv, enabled); 176 } 177 178 return ret; 179 } 180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 181 182 /** 183 * stmmac_verify_args - verify the driver parameters. 184 * Description: it checks the driver parameters and set a default in case of 185 * errors. 186 */ 187 static void stmmac_verify_args(void) 188 { 189 if (unlikely(watchdog < 0)) 190 watchdog = TX_TIMEO; 191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 192 buf_sz = DEFAULT_BUFSIZE; 193 if (unlikely(flow_ctrl > 1)) 194 flow_ctrl = FLOW_AUTO; 195 else if (likely(flow_ctrl < 0)) 196 flow_ctrl = FLOW_OFF; 197 if (unlikely((pause < 0) || (pause > 0xffff))) 198 pause = PAUSE_TIME; 199 if (eee_timer < 0) 200 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 201 } 202 203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv) 204 { 205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 208 u32 queue; 209 210 for (queue = 0; queue < maxq; queue++) { 211 struct stmmac_channel *ch = &priv->channel[queue]; 212 213 if (stmmac_xdp_is_enabled(priv) && 214 test_bit(queue, priv->af_xdp_zc_qps)) { 215 napi_disable(&ch->rxtx_napi); 216 continue; 217 } 218 219 if (queue < rx_queues_cnt) 220 napi_disable(&ch->rx_napi); 221 if (queue < tx_queues_cnt) 222 napi_disable(&ch->tx_napi); 223 } 224 } 225 226 /** 227 * stmmac_disable_all_queues - Disable all queues 228 * @priv: driver private structure 229 */ 230 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 231 { 232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 233 struct stmmac_rx_queue *rx_q; 234 u32 queue; 235 236 /* synchronize_rcu() needed for pending XDP buffers to drain */ 237 for (queue = 0; queue < rx_queues_cnt; queue++) { 238 rx_q = &priv->dma_conf.rx_queue[queue]; 239 if (rx_q->xsk_pool) { 240 synchronize_rcu(); 241 break; 242 } 243 } 244 245 __stmmac_disable_all_queues(priv); 246 } 247 248 /** 249 * stmmac_enable_all_queues - Enable all queues 250 * @priv: driver private structure 251 */ 252 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 253 { 254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 257 u32 queue; 258 259 for (queue = 0; queue < maxq; queue++) { 260 struct stmmac_channel *ch = &priv->channel[queue]; 261 262 if (stmmac_xdp_is_enabled(priv) && 263 test_bit(queue, priv->af_xdp_zc_qps)) { 264 napi_enable(&ch->rxtx_napi); 265 continue; 266 } 267 268 if (queue < rx_queues_cnt) 269 napi_enable(&ch->rx_napi); 270 if (queue < tx_queues_cnt) 271 napi_enable(&ch->tx_napi); 272 } 273 } 274 275 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 276 { 277 if (!test_bit(STMMAC_DOWN, &priv->state) && 278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 279 queue_work(priv->wq, &priv->service_task); 280 } 281 282 static void stmmac_global_err(struct stmmac_priv *priv) 283 { 284 netif_carrier_off(priv->dev); 285 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 286 stmmac_service_event_schedule(priv); 287 } 288 289 /** 290 * stmmac_clk_csr_set - dynamically set the MDC clock 291 * @priv: driver private structure 292 * Description: this is to dynamically set the MDC clock according to the csr 293 * clock input. 294 * Note: 295 * If a specific clk_csr value is passed from the platform 296 * this means that the CSR Clock Range selection cannot be 297 * changed at run-time and it is fixed (as reported in the driver 298 * documentation). Viceversa the driver will try to set the MDC 299 * clock dynamically according to the actual clock input. 300 */ 301 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 302 { 303 u32 clk_rate; 304 305 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 306 307 /* Platform provided default clk_csr would be assumed valid 308 * for all other cases except for the below mentioned ones. 309 * For values higher than the IEEE 802.3 specified frequency 310 * we can not estimate the proper divider as it is not known 311 * the frequency of clk_csr_i. So we do not change the default 312 * divider. 313 */ 314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 315 if (clk_rate < CSR_F_35M) 316 priv->clk_csr = STMMAC_CSR_20_35M; 317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 318 priv->clk_csr = STMMAC_CSR_35_60M; 319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 320 priv->clk_csr = STMMAC_CSR_60_100M; 321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 322 priv->clk_csr = STMMAC_CSR_100_150M; 323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 324 priv->clk_csr = STMMAC_CSR_150_250M; 325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) 326 priv->clk_csr = STMMAC_CSR_250_300M; 327 } 328 329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { 330 if (clk_rate > 160000000) 331 priv->clk_csr = 0x03; 332 else if (clk_rate > 80000000) 333 priv->clk_csr = 0x02; 334 else if (clk_rate > 40000000) 335 priv->clk_csr = 0x01; 336 else 337 priv->clk_csr = 0; 338 } 339 340 if (priv->plat->has_xgmac) { 341 if (clk_rate > 400000000) 342 priv->clk_csr = 0x5; 343 else if (clk_rate > 350000000) 344 priv->clk_csr = 0x4; 345 else if (clk_rate > 300000000) 346 priv->clk_csr = 0x3; 347 else if (clk_rate > 250000000) 348 priv->clk_csr = 0x2; 349 else if (clk_rate > 150000000) 350 priv->clk_csr = 0x1; 351 else 352 priv->clk_csr = 0x0; 353 } 354 } 355 356 static void print_pkt(unsigned char *buf, int len) 357 { 358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 360 } 361 362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 363 { 364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 365 u32 avail; 366 367 if (tx_q->dirty_tx > tx_q->cur_tx) 368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 369 else 370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 371 372 return avail; 373 } 374 375 /** 376 * stmmac_rx_dirty - Get RX queue dirty 377 * @priv: driver private structure 378 * @queue: RX queue index 379 */ 380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 381 { 382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 383 u32 dirty; 384 385 if (rx_q->dirty_rx <= rx_q->cur_rx) 386 dirty = rx_q->cur_rx - rx_q->dirty_rx; 387 else 388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 389 390 return dirty; 391 } 392 393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 394 { 395 int tx_lpi_timer; 396 397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 398 priv->eee_sw_timer_en = en ? 0 : 1; 399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 401 } 402 403 /** 404 * stmmac_enable_eee_mode - check and enter in LPI mode 405 * @priv: driver private structure 406 * Description: this function is to verify and enter in LPI mode in case of 407 * EEE. 408 */ 409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv) 410 { 411 u32 tx_cnt = priv->plat->tx_queues_to_use; 412 u32 queue; 413 414 /* check if all TX queues have the work finished */ 415 for (queue = 0; queue < tx_cnt; queue++) { 416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 417 418 if (tx_q->dirty_tx != tx_q->cur_tx) 419 return -EBUSY; /* still unfinished work */ 420 } 421 422 /* Check and enter in LPI mode */ 423 if (!priv->tx_path_in_lpi_mode) 424 stmmac_set_eee_mode(priv, priv->hw, 425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); 426 return 0; 427 } 428 429 /** 430 * stmmac_disable_eee_mode - disable and exit from LPI mode 431 * @priv: driver private structure 432 * Description: this function is to exit and disable EEE in case of 433 * LPI state is true. This is called by the xmit. 434 */ 435 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 436 { 437 if (!priv->eee_sw_timer_en) { 438 stmmac_lpi_entry_timer_config(priv, 0); 439 return; 440 } 441 442 stmmac_reset_eee_mode(priv, priv->hw); 443 del_timer_sync(&priv->eee_ctrl_timer); 444 priv->tx_path_in_lpi_mode = false; 445 } 446 447 /** 448 * stmmac_eee_ctrl_timer - EEE TX SW timer. 449 * @t: timer_list struct containing private info 450 * Description: 451 * if there is no data transfer and if we are not in LPI state, 452 * then MAC Transmitter can be moved to LPI state. 453 */ 454 static void stmmac_eee_ctrl_timer(struct timer_list *t) 455 { 456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 457 458 if (stmmac_enable_eee_mode(priv)) 459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 460 } 461 462 /** 463 * stmmac_eee_init - init EEE 464 * @priv: driver private structure 465 * Description: 466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 467 * can also manage EEE, this function enable the LPI state and start related 468 * timer. 469 */ 470 bool stmmac_eee_init(struct stmmac_priv *priv) 471 { 472 int eee_tw_timer = priv->eee_tw_timer; 473 474 /* Using PCS we cannot dial with the phy registers at this stage 475 * so we do not support extra feature like EEE. 476 */ 477 if (priv->hw->pcs == STMMAC_PCS_TBI || 478 priv->hw->pcs == STMMAC_PCS_RTBI) 479 return false; 480 481 /* Check if MAC core supports the EEE feature. */ 482 if (!priv->dma_cap.eee) 483 return false; 484 485 mutex_lock(&priv->lock); 486 487 /* Check if it needs to be deactivated */ 488 if (!priv->eee_active) { 489 if (priv->eee_enabled) { 490 netdev_dbg(priv->dev, "disable EEE\n"); 491 stmmac_lpi_entry_timer_config(priv, 0); 492 del_timer_sync(&priv->eee_ctrl_timer); 493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 494 if (priv->hw->xpcs) 495 xpcs_config_eee(priv->hw->xpcs, 496 priv->plat->mult_fact_100ns, 497 false); 498 } 499 mutex_unlock(&priv->lock); 500 return false; 501 } 502 503 if (priv->eee_active && !priv->eee_enabled) { 504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 506 eee_tw_timer); 507 if (priv->hw->xpcs) 508 xpcs_config_eee(priv->hw->xpcs, 509 priv->plat->mult_fact_100ns, 510 true); 511 } 512 513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 514 del_timer_sync(&priv->eee_ctrl_timer); 515 priv->tx_path_in_lpi_mode = false; 516 stmmac_lpi_entry_timer_config(priv, 1); 517 } else { 518 stmmac_lpi_entry_timer_config(priv, 0); 519 mod_timer(&priv->eee_ctrl_timer, 520 STMMAC_LPI_T(priv->tx_lpi_timer)); 521 } 522 523 mutex_unlock(&priv->lock); 524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 525 return true; 526 } 527 528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 529 * @priv: driver private structure 530 * @p : descriptor pointer 531 * @skb : the socket buffer 532 * Description : 533 * This function will read timestamp from the descriptor & pass it to stack. 534 * and also perform some sanity checks. 535 */ 536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 537 struct dma_desc *p, struct sk_buff *skb) 538 { 539 struct skb_shared_hwtstamps shhwtstamp; 540 bool found = false; 541 u64 ns = 0; 542 543 if (!priv->hwts_tx_en) 544 return; 545 546 /* exit if skb doesn't support hw tstamp */ 547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 548 return; 549 550 /* check tx tstamp status */ 551 if (stmmac_get_tx_timestamp_status(priv, p)) { 552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 553 found = true; 554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 555 found = true; 556 } 557 558 if (found) { 559 ns -= priv->plat->cdc_error_adj; 560 561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 562 shhwtstamp.hwtstamp = ns_to_ktime(ns); 563 564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 565 /* pass tstamp to stack */ 566 skb_tstamp_tx(skb, &shhwtstamp); 567 } 568 } 569 570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 571 * @priv: driver private structure 572 * @p : descriptor pointer 573 * @np : next descriptor pointer 574 * @skb : the socket buffer 575 * Description : 576 * This function will read received packet's timestamp from the descriptor 577 * and pass it to stack. It also perform some sanity checks. 578 */ 579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 580 struct dma_desc *np, struct sk_buff *skb) 581 { 582 struct skb_shared_hwtstamps *shhwtstamp = NULL; 583 struct dma_desc *desc = p; 584 u64 ns = 0; 585 586 if (!priv->hwts_rx_en) 587 return; 588 /* For GMAC4, the valid timestamp is from CTX next desc. */ 589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 590 desc = np; 591 592 /* Check if timestamp is available */ 593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 595 596 ns -= priv->plat->cdc_error_adj; 597 598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 599 shhwtstamp = skb_hwtstamps(skb); 600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 601 shhwtstamp->hwtstamp = ns_to_ktime(ns); 602 } else { 603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 604 } 605 } 606 607 /** 608 * stmmac_hwtstamp_set - control hardware timestamping. 609 * @dev: device pointer. 610 * @ifr: An IOCTL specific structure, that can contain a pointer to 611 * a proprietary structure used to pass information to the driver. 612 * Description: 613 * This function configures the MAC to enable/disable both outgoing(TX) 614 * and incoming(RX) packets time stamping based on user input. 615 * Return Value: 616 * 0 on success and an appropriate -ve integer on failure. 617 */ 618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 619 { 620 struct stmmac_priv *priv = netdev_priv(dev); 621 struct hwtstamp_config config; 622 u32 ptp_v2 = 0; 623 u32 tstamp_all = 0; 624 u32 ptp_over_ipv4_udp = 0; 625 u32 ptp_over_ipv6_udp = 0; 626 u32 ptp_over_ethernet = 0; 627 u32 snap_type_sel = 0; 628 u32 ts_master_en = 0; 629 u32 ts_event_en = 0; 630 631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 632 netdev_alert(priv->dev, "No support for HW time stamping\n"); 633 priv->hwts_tx_en = 0; 634 priv->hwts_rx_en = 0; 635 636 return -EOPNOTSUPP; 637 } 638 639 if (copy_from_user(&config, ifr->ifr_data, 640 sizeof(config))) 641 return -EFAULT; 642 643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 644 __func__, config.flags, config.tx_type, config.rx_filter); 645 646 if (config.tx_type != HWTSTAMP_TX_OFF && 647 config.tx_type != HWTSTAMP_TX_ON) 648 return -ERANGE; 649 650 if (priv->adv_ts) { 651 switch (config.rx_filter) { 652 case HWTSTAMP_FILTER_NONE: 653 /* time stamp no incoming packet at all */ 654 config.rx_filter = HWTSTAMP_FILTER_NONE; 655 break; 656 657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 658 /* PTP v1, UDP, any kind of event packet */ 659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 660 /* 'xmac' hardware can support Sync, Pdelay_Req and 661 * Pdelay_resp by setting bit14 and bits17/16 to 01 662 * This leaves Delay_Req timestamps out. 663 * Enable all events *and* general purpose message 664 * timestamping 665 */ 666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 669 break; 670 671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 672 /* PTP v1, UDP, Sync packet */ 673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 674 /* take time stamp for SYNC messages only */ 675 ts_event_en = PTP_TCR_TSEVNTENA; 676 677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 679 break; 680 681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 682 /* PTP v1, UDP, Delay_req packet */ 683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 684 /* take time stamp for Delay_Req messages only */ 685 ts_master_en = PTP_TCR_TSMSTRENA; 686 ts_event_en = PTP_TCR_TSEVNTENA; 687 688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 690 break; 691 692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 693 /* PTP v2, UDP, any kind of event packet */ 694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 695 ptp_v2 = PTP_TCR_TSVER2ENA; 696 /* take time stamp for all event messages */ 697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 698 699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 701 break; 702 703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 704 /* PTP v2, UDP, Sync packet */ 705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 706 ptp_v2 = PTP_TCR_TSVER2ENA; 707 /* take time stamp for SYNC messages only */ 708 ts_event_en = PTP_TCR_TSEVNTENA; 709 710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 712 break; 713 714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 715 /* PTP v2, UDP, Delay_req packet */ 716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 717 ptp_v2 = PTP_TCR_TSVER2ENA; 718 /* take time stamp for Delay_Req messages only */ 719 ts_master_en = PTP_TCR_TSMSTRENA; 720 ts_event_en = PTP_TCR_TSEVNTENA; 721 722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 724 break; 725 726 case HWTSTAMP_FILTER_PTP_V2_EVENT: 727 /* PTP v2/802.AS1 any layer, any kind of event packet */ 728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 729 ptp_v2 = PTP_TCR_TSVER2ENA; 730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 731 if (priv->synopsys_id < DWMAC_CORE_4_10) 732 ts_event_en = PTP_TCR_TSEVNTENA; 733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 735 ptp_over_ethernet = PTP_TCR_TSIPENA; 736 break; 737 738 case HWTSTAMP_FILTER_PTP_V2_SYNC: 739 /* PTP v2/802.AS1, any layer, Sync packet */ 740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 741 ptp_v2 = PTP_TCR_TSVER2ENA; 742 /* take time stamp for SYNC messages only */ 743 ts_event_en = PTP_TCR_TSEVNTENA; 744 745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 747 ptp_over_ethernet = PTP_TCR_TSIPENA; 748 break; 749 750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 751 /* PTP v2/802.AS1, any layer, Delay_req packet */ 752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 753 ptp_v2 = PTP_TCR_TSVER2ENA; 754 /* take time stamp for Delay_Req messages only */ 755 ts_master_en = PTP_TCR_TSMSTRENA; 756 ts_event_en = PTP_TCR_TSEVNTENA; 757 758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 760 ptp_over_ethernet = PTP_TCR_TSIPENA; 761 break; 762 763 case HWTSTAMP_FILTER_NTP_ALL: 764 case HWTSTAMP_FILTER_ALL: 765 /* time stamp any incoming packet */ 766 config.rx_filter = HWTSTAMP_FILTER_ALL; 767 tstamp_all = PTP_TCR_TSENALL; 768 break; 769 770 default: 771 return -ERANGE; 772 } 773 } else { 774 switch (config.rx_filter) { 775 case HWTSTAMP_FILTER_NONE: 776 config.rx_filter = HWTSTAMP_FILTER_NONE; 777 break; 778 default: 779 /* PTP v1, UDP, any kind of event packet */ 780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 781 break; 782 } 783 } 784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 786 787 priv->systime_flags = STMMAC_HWTS_ACTIVE; 788 789 if (priv->hwts_tx_en || priv->hwts_rx_en) { 790 priv->systime_flags |= tstamp_all | ptp_v2 | 791 ptp_over_ethernet | ptp_over_ipv6_udp | 792 ptp_over_ipv4_udp | ts_event_en | 793 ts_master_en | snap_type_sel; 794 } 795 796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); 797 798 memcpy(&priv->tstamp_config, &config, sizeof(config)); 799 800 return copy_to_user(ifr->ifr_data, &config, 801 sizeof(config)) ? -EFAULT : 0; 802 } 803 804 /** 805 * stmmac_hwtstamp_get - read hardware timestamping. 806 * @dev: device pointer. 807 * @ifr: An IOCTL specific structure, that can contain a pointer to 808 * a proprietary structure used to pass information to the driver. 809 * Description: 810 * This function obtain the current hardware timestamping settings 811 * as requested. 812 */ 813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 814 { 815 struct stmmac_priv *priv = netdev_priv(dev); 816 struct hwtstamp_config *config = &priv->tstamp_config; 817 818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 819 return -EOPNOTSUPP; 820 821 return copy_to_user(ifr->ifr_data, config, 822 sizeof(*config)) ? -EFAULT : 0; 823 } 824 825 /** 826 * stmmac_init_tstamp_counter - init hardware timestamping counter 827 * @priv: driver private structure 828 * @systime_flags: timestamping flags 829 * Description: 830 * Initialize hardware counter for packet timestamping. 831 * This is valid as long as the interface is open and not suspended. 832 * Will be rerun after resuming from suspend, case in which the timestamping 833 * flags updated by stmmac_hwtstamp_set() also need to be restored. 834 */ 835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) 836 { 837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 838 struct timespec64 now; 839 u32 sec_inc = 0; 840 u64 temp = 0; 841 842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 843 return -EOPNOTSUPP; 844 845 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); 846 priv->systime_flags = systime_flags; 847 848 /* program Sub Second Increment reg */ 849 stmmac_config_sub_second_increment(priv, priv->ptpaddr, 850 priv->plat->clk_ptp_rate, 851 xmac, &sec_inc); 852 temp = div_u64(1000000000ULL, sec_inc); 853 854 /* Store sub second increment for later use */ 855 priv->sub_second_inc = sec_inc; 856 857 /* calculate default added value: 858 * formula is : 859 * addend = (2^32)/freq_div_ratio; 860 * where, freq_div_ratio = 1e9ns/sec_inc 861 */ 862 temp = (u64)(temp << 32); 863 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 864 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 865 866 /* initialize system time */ 867 ktime_get_real_ts64(&now); 868 869 /* lower 32 bits of tv_sec are safe until y2106 */ 870 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); 871 872 return 0; 873 } 874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter); 875 876 /** 877 * stmmac_init_ptp - init PTP 878 * @priv: driver private structure 879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 880 * This is done by looking at the HW cap. register. 881 * This function also registers the ptp driver. 882 */ 883 static int stmmac_init_ptp(struct stmmac_priv *priv) 884 { 885 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 886 int ret; 887 888 if (priv->plat->ptp_clk_freq_config) 889 priv->plat->ptp_clk_freq_config(priv); 890 891 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE); 892 if (ret) 893 return ret; 894 895 priv->adv_ts = 0; 896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 897 if (xmac && priv->dma_cap.atime_stamp) 898 priv->adv_ts = 1; 899 /* Dwmac 3.x core with extend_desc can support adv_ts */ 900 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 901 priv->adv_ts = 1; 902 903 if (priv->dma_cap.time_stamp) 904 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 905 906 if (priv->adv_ts) 907 netdev_info(priv->dev, 908 "IEEE 1588-2008 Advanced Timestamp supported\n"); 909 910 priv->hwts_tx_en = 0; 911 priv->hwts_rx_en = 0; 912 913 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) 914 stmmac_hwtstamp_correct_latency(priv, priv); 915 916 return 0; 917 } 918 919 static void stmmac_release_ptp(struct stmmac_priv *priv) 920 { 921 clk_disable_unprepare(priv->plat->clk_ptp_ref); 922 stmmac_ptp_unregister(priv); 923 } 924 925 /** 926 * stmmac_mac_flow_ctrl - Configure flow control in all queues 927 * @priv: driver private structure 928 * @duplex: duplex passed to the next function 929 * Description: It is used for configuring the flow control in all queues 930 */ 931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 932 { 933 u32 tx_cnt = priv->plat->tx_queues_to_use; 934 935 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 936 priv->pause, tx_cnt); 937 } 938 939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, 940 phy_interface_t interface) 941 { 942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 943 944 if (priv->hw->xpcs) 945 return &priv->hw->xpcs->pcs; 946 947 if (priv->hw->lynx_pcs) 948 return priv->hw->lynx_pcs; 949 950 return NULL; 951 } 952 953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 954 const struct phylink_link_state *state) 955 { 956 /* Nothing to do, xpcs_config() handles everything */ 957 } 958 959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up) 960 { 961 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 962 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 963 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 964 bool *hs_enable = &fpe_cfg->hs_enable; 965 966 if (is_up && *hs_enable) { 967 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY); 968 } else { 969 *lo_state = FPE_STATE_OFF; 970 *lp_state = FPE_STATE_OFF; 971 } 972 } 973 974 static void stmmac_mac_link_down(struct phylink_config *config, 975 unsigned int mode, phy_interface_t interface) 976 { 977 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 978 979 stmmac_mac_set(priv, priv->ioaddr, false); 980 priv->eee_active = false; 981 priv->tx_lpi_enabled = false; 982 priv->eee_enabled = stmmac_eee_init(priv); 983 stmmac_set_eee_pls(priv, priv->hw, false); 984 985 if (priv->dma_cap.fpesel) 986 stmmac_fpe_link_state_handle(priv, false); 987 } 988 989 static void stmmac_mac_link_up(struct phylink_config *config, 990 struct phy_device *phy, 991 unsigned int mode, phy_interface_t interface, 992 int speed, int duplex, 993 bool tx_pause, bool rx_pause) 994 { 995 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 996 u32 old_ctrl, ctrl; 997 998 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 999 priv->plat->serdes_powerup) 1000 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); 1001 1002 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 1003 ctrl = old_ctrl & ~priv->hw->link.speed_mask; 1004 1005 if (interface == PHY_INTERFACE_MODE_USXGMII) { 1006 switch (speed) { 1007 case SPEED_10000: 1008 ctrl |= priv->hw->link.xgmii.speed10000; 1009 break; 1010 case SPEED_5000: 1011 ctrl |= priv->hw->link.xgmii.speed5000; 1012 break; 1013 case SPEED_2500: 1014 ctrl |= priv->hw->link.xgmii.speed2500; 1015 break; 1016 default: 1017 return; 1018 } 1019 } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 1020 switch (speed) { 1021 case SPEED_100000: 1022 ctrl |= priv->hw->link.xlgmii.speed100000; 1023 break; 1024 case SPEED_50000: 1025 ctrl |= priv->hw->link.xlgmii.speed50000; 1026 break; 1027 case SPEED_40000: 1028 ctrl |= priv->hw->link.xlgmii.speed40000; 1029 break; 1030 case SPEED_25000: 1031 ctrl |= priv->hw->link.xlgmii.speed25000; 1032 break; 1033 case SPEED_10000: 1034 ctrl |= priv->hw->link.xgmii.speed10000; 1035 break; 1036 case SPEED_2500: 1037 ctrl |= priv->hw->link.speed2500; 1038 break; 1039 case SPEED_1000: 1040 ctrl |= priv->hw->link.speed1000; 1041 break; 1042 default: 1043 return; 1044 } 1045 } else { 1046 switch (speed) { 1047 case SPEED_2500: 1048 ctrl |= priv->hw->link.speed2500; 1049 break; 1050 case SPEED_1000: 1051 ctrl |= priv->hw->link.speed1000; 1052 break; 1053 case SPEED_100: 1054 ctrl |= priv->hw->link.speed100; 1055 break; 1056 case SPEED_10: 1057 ctrl |= priv->hw->link.speed10; 1058 break; 1059 default: 1060 return; 1061 } 1062 } 1063 1064 priv->speed = speed; 1065 1066 if (priv->plat->fix_mac_speed) 1067 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); 1068 1069 if (!duplex) 1070 ctrl &= ~priv->hw->link.duplex; 1071 else 1072 ctrl |= priv->hw->link.duplex; 1073 1074 /* Flow Control operation */ 1075 if (rx_pause && tx_pause) 1076 priv->flow_ctrl = FLOW_AUTO; 1077 else if (rx_pause && !tx_pause) 1078 priv->flow_ctrl = FLOW_RX; 1079 else if (!rx_pause && tx_pause) 1080 priv->flow_ctrl = FLOW_TX; 1081 else 1082 priv->flow_ctrl = FLOW_OFF; 1083 1084 stmmac_mac_flow_ctrl(priv, duplex); 1085 1086 if (ctrl != old_ctrl) 1087 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 1088 1089 stmmac_mac_set(priv, priv->ioaddr, true); 1090 if (phy && priv->dma_cap.eee) { 1091 priv->eee_active = 1092 phy_init_eee(phy, !(priv->plat->flags & 1093 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0; 1094 priv->eee_enabled = stmmac_eee_init(priv); 1095 priv->tx_lpi_enabled = priv->eee_enabled; 1096 stmmac_set_eee_pls(priv, priv->hw, true); 1097 } 1098 1099 if (priv->dma_cap.fpesel) 1100 stmmac_fpe_link_state_handle(priv, true); 1101 1102 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) 1103 stmmac_hwtstamp_correct_latency(priv, priv); 1104 } 1105 1106 static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1107 .mac_select_pcs = stmmac_mac_select_pcs, 1108 .mac_config = stmmac_mac_config, 1109 .mac_link_down = stmmac_mac_link_down, 1110 .mac_link_up = stmmac_mac_link_up, 1111 }; 1112 1113 /** 1114 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 1115 * @priv: driver private structure 1116 * Description: this is to verify if the HW supports the PCS. 1117 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 1118 * configured for the TBI, RTBI, or SGMII PHY interface. 1119 */ 1120 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1121 { 1122 int interface = priv->plat->mac_interface; 1123 1124 if (priv->dma_cap.pcs) { 1125 if ((interface == PHY_INTERFACE_MODE_RGMII) || 1126 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 1127 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1128 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 1129 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 1130 priv->hw->pcs = STMMAC_PCS_RGMII; 1131 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 1132 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 1133 priv->hw->pcs = STMMAC_PCS_SGMII; 1134 } 1135 } 1136 } 1137 1138 /** 1139 * stmmac_init_phy - PHY initialization 1140 * @dev: net device structure 1141 * Description: it initializes the driver's PHY state, and attaches the PHY 1142 * to the mac driver. 1143 * Return value: 1144 * 0 on success 1145 */ 1146 static int stmmac_init_phy(struct net_device *dev) 1147 { 1148 struct stmmac_priv *priv = netdev_priv(dev); 1149 struct fwnode_handle *phy_fwnode; 1150 struct fwnode_handle *fwnode; 1151 int ret; 1152 1153 if (!phylink_expects_phy(priv->phylink)) 1154 return 0; 1155 1156 fwnode = priv->plat->port_node; 1157 if (!fwnode) 1158 fwnode = dev_fwnode(priv->device); 1159 1160 if (fwnode) 1161 phy_fwnode = fwnode_get_phy_node(fwnode); 1162 else 1163 phy_fwnode = NULL; 1164 1165 /* Some DT bindings do not set-up the PHY handle. Let's try to 1166 * manually parse it 1167 */ 1168 if (!phy_fwnode || IS_ERR(phy_fwnode)) { 1169 int addr = priv->plat->phy_addr; 1170 struct phy_device *phydev; 1171 1172 if (addr < 0) { 1173 netdev_err(priv->dev, "no phy found\n"); 1174 return -ENODEV; 1175 } 1176 1177 phydev = mdiobus_get_phy(priv->mii, addr); 1178 if (!phydev) { 1179 netdev_err(priv->dev, "no phy at addr %d\n", addr); 1180 return -ENODEV; 1181 } 1182 1183 ret = phylink_connect_phy(priv->phylink, phydev); 1184 } else { 1185 fwnode_handle_put(phy_fwnode); 1186 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); 1187 } 1188 1189 if (!priv->plat->pmt) { 1190 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1191 1192 phylink_ethtool_get_wol(priv->phylink, &wol); 1193 device_set_wakeup_capable(priv->device, !!wol.supported); 1194 device_set_wakeup_enable(priv->device, !!wol.wolopts); 1195 } 1196 1197 return ret; 1198 } 1199 1200 static int stmmac_phy_setup(struct stmmac_priv *priv) 1201 { 1202 struct stmmac_mdio_bus_data *mdio_bus_data; 1203 int mode = priv->plat->phy_interface; 1204 struct fwnode_handle *fwnode; 1205 struct phylink *phylink; 1206 int max_speed; 1207 1208 priv->phylink_config.dev = &priv->dev->dev; 1209 priv->phylink_config.type = PHYLINK_NETDEV; 1210 priv->phylink_config.mac_managed_pm = true; 1211 1212 mdio_bus_data = priv->plat->mdio_bus_data; 1213 if (mdio_bus_data) 1214 priv->phylink_config.ovr_an_inband = 1215 mdio_bus_data->xpcs_an_inband; 1216 1217 /* Set the platform/firmware specified interface mode. Note, phylink 1218 * deals with the PHY interface mode, not the MAC interface mode. 1219 */ 1220 __set_bit(mode, priv->phylink_config.supported_interfaces); 1221 1222 /* If we have an xpcs, it defines which PHY interfaces are supported. */ 1223 if (priv->hw->xpcs) 1224 xpcs_get_interfaces(priv->hw->xpcs, 1225 priv->phylink_config.supported_interfaces); 1226 1227 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | 1228 MAC_10FD | MAC_100FD | 1229 MAC_1000FD; 1230 1231 /* Half-Duplex can only work with single queue */ 1232 if (priv->plat->tx_queues_to_use <= 1) 1233 priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD | 1234 MAC_1000HD; 1235 1236 /* Get the MAC specific capabilities */ 1237 stmmac_mac_phylink_get_caps(priv); 1238 1239 max_speed = priv->plat->max_speed; 1240 if (max_speed) 1241 phylink_limit_mac_speed(&priv->phylink_config, max_speed); 1242 1243 fwnode = priv->plat->port_node; 1244 if (!fwnode) 1245 fwnode = dev_fwnode(priv->device); 1246 1247 phylink = phylink_create(&priv->phylink_config, fwnode, 1248 mode, &stmmac_phylink_mac_ops); 1249 if (IS_ERR(phylink)) 1250 return PTR_ERR(phylink); 1251 1252 priv->phylink = phylink; 1253 return 0; 1254 } 1255 1256 static void stmmac_display_rx_rings(struct stmmac_priv *priv, 1257 struct stmmac_dma_conf *dma_conf) 1258 { 1259 u32 rx_cnt = priv->plat->rx_queues_to_use; 1260 unsigned int desc_size; 1261 void *head_rx; 1262 u32 queue; 1263 1264 /* Display RX rings */ 1265 for (queue = 0; queue < rx_cnt; queue++) { 1266 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1267 1268 pr_info("\tRX Queue %u rings\n", queue); 1269 1270 if (priv->extend_desc) { 1271 head_rx = (void *)rx_q->dma_erx; 1272 desc_size = sizeof(struct dma_extended_desc); 1273 } else { 1274 head_rx = (void *)rx_q->dma_rx; 1275 desc_size = sizeof(struct dma_desc); 1276 } 1277 1278 /* Display RX ring */ 1279 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, 1280 rx_q->dma_rx_phy, desc_size); 1281 } 1282 } 1283 1284 static void stmmac_display_tx_rings(struct stmmac_priv *priv, 1285 struct stmmac_dma_conf *dma_conf) 1286 { 1287 u32 tx_cnt = priv->plat->tx_queues_to_use; 1288 unsigned int desc_size; 1289 void *head_tx; 1290 u32 queue; 1291 1292 /* Display TX rings */ 1293 for (queue = 0; queue < tx_cnt; queue++) { 1294 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1295 1296 pr_info("\tTX Queue %d rings\n", queue); 1297 1298 if (priv->extend_desc) { 1299 head_tx = (void *)tx_q->dma_etx; 1300 desc_size = sizeof(struct dma_extended_desc); 1301 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1302 head_tx = (void *)tx_q->dma_entx; 1303 desc_size = sizeof(struct dma_edesc); 1304 } else { 1305 head_tx = (void *)tx_q->dma_tx; 1306 desc_size = sizeof(struct dma_desc); 1307 } 1308 1309 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, 1310 tx_q->dma_tx_phy, desc_size); 1311 } 1312 } 1313 1314 static void stmmac_display_rings(struct stmmac_priv *priv, 1315 struct stmmac_dma_conf *dma_conf) 1316 { 1317 /* Display RX ring */ 1318 stmmac_display_rx_rings(priv, dma_conf); 1319 1320 /* Display TX ring */ 1321 stmmac_display_tx_rings(priv, dma_conf); 1322 } 1323 1324 static int stmmac_set_bfsize(int mtu, int bufsize) 1325 { 1326 int ret = bufsize; 1327 1328 if (mtu >= BUF_SIZE_8KiB) 1329 ret = BUF_SIZE_16KiB; 1330 else if (mtu >= BUF_SIZE_4KiB) 1331 ret = BUF_SIZE_8KiB; 1332 else if (mtu >= BUF_SIZE_2KiB) 1333 ret = BUF_SIZE_4KiB; 1334 else if (mtu > DEFAULT_BUFSIZE) 1335 ret = BUF_SIZE_2KiB; 1336 else 1337 ret = DEFAULT_BUFSIZE; 1338 1339 return ret; 1340 } 1341 1342 /** 1343 * stmmac_clear_rx_descriptors - clear RX descriptors 1344 * @priv: driver private structure 1345 * @dma_conf: structure to take the dma data 1346 * @queue: RX queue index 1347 * Description: this function is called to clear the RX descriptors 1348 * in case of both basic and extended descriptors are used. 1349 */ 1350 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, 1351 struct stmmac_dma_conf *dma_conf, 1352 u32 queue) 1353 { 1354 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1355 int i; 1356 1357 /* Clear the RX descriptors */ 1358 for (i = 0; i < dma_conf->dma_rx_size; i++) 1359 if (priv->extend_desc) 1360 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1361 priv->use_riwt, priv->mode, 1362 (i == dma_conf->dma_rx_size - 1), 1363 dma_conf->dma_buf_sz); 1364 else 1365 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1366 priv->use_riwt, priv->mode, 1367 (i == dma_conf->dma_rx_size - 1), 1368 dma_conf->dma_buf_sz); 1369 } 1370 1371 /** 1372 * stmmac_clear_tx_descriptors - clear tx descriptors 1373 * @priv: driver private structure 1374 * @dma_conf: structure to take the dma data 1375 * @queue: TX queue index. 1376 * Description: this function is called to clear the TX descriptors 1377 * in case of both basic and extended descriptors are used. 1378 */ 1379 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, 1380 struct stmmac_dma_conf *dma_conf, 1381 u32 queue) 1382 { 1383 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1384 int i; 1385 1386 /* Clear the TX descriptors */ 1387 for (i = 0; i < dma_conf->dma_tx_size; i++) { 1388 int last = (i == (dma_conf->dma_tx_size - 1)); 1389 struct dma_desc *p; 1390 1391 if (priv->extend_desc) 1392 p = &tx_q->dma_etx[i].basic; 1393 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1394 p = &tx_q->dma_entx[i].basic; 1395 else 1396 p = &tx_q->dma_tx[i]; 1397 1398 stmmac_init_tx_desc(priv, p, priv->mode, last); 1399 } 1400 } 1401 1402 /** 1403 * stmmac_clear_descriptors - clear descriptors 1404 * @priv: driver private structure 1405 * @dma_conf: structure to take the dma data 1406 * Description: this function is called to clear the TX and RX descriptors 1407 * in case of both basic and extended descriptors are used. 1408 */ 1409 static void stmmac_clear_descriptors(struct stmmac_priv *priv, 1410 struct stmmac_dma_conf *dma_conf) 1411 { 1412 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1413 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1414 u32 queue; 1415 1416 /* Clear the RX descriptors */ 1417 for (queue = 0; queue < rx_queue_cnt; queue++) 1418 stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1419 1420 /* Clear the TX descriptors */ 1421 for (queue = 0; queue < tx_queue_cnt; queue++) 1422 stmmac_clear_tx_descriptors(priv, dma_conf, queue); 1423 } 1424 1425 /** 1426 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1427 * @priv: driver private structure 1428 * @dma_conf: structure to take the dma data 1429 * @p: descriptor pointer 1430 * @i: descriptor index 1431 * @flags: gfp flag 1432 * @queue: RX queue index 1433 * Description: this function is called to allocate a receive buffer, perform 1434 * the DMA mapping and init the descriptor. 1435 */ 1436 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, 1437 struct stmmac_dma_conf *dma_conf, 1438 struct dma_desc *p, 1439 int i, gfp_t flags, u32 queue) 1440 { 1441 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1442 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1443 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 1444 1445 if (priv->dma_cap.host_dma_width <= 32) 1446 gfp |= GFP_DMA32; 1447 1448 if (!buf->page) { 1449 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 1450 if (!buf->page) 1451 return -ENOMEM; 1452 buf->page_offset = stmmac_rx_offset(priv); 1453 } 1454 1455 if (priv->sph && !buf->sec_page) { 1456 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 1457 if (!buf->sec_page) 1458 return -ENOMEM; 1459 1460 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1461 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1462 } else { 1463 buf->sec_page = NULL; 1464 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1465 } 1466 1467 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 1468 1469 stmmac_set_desc_addr(priv, p, buf->addr); 1470 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) 1471 stmmac_init_desc3(priv, p); 1472 1473 return 0; 1474 } 1475 1476 /** 1477 * stmmac_free_rx_buffer - free RX dma buffers 1478 * @priv: private structure 1479 * @rx_q: RX queue 1480 * @i: buffer index. 1481 */ 1482 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, 1483 struct stmmac_rx_queue *rx_q, 1484 int i) 1485 { 1486 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1487 1488 if (buf->page) 1489 page_pool_put_full_page(rx_q->page_pool, buf->page, false); 1490 buf->page = NULL; 1491 1492 if (buf->sec_page) 1493 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 1494 buf->sec_page = NULL; 1495 } 1496 1497 /** 1498 * stmmac_free_tx_buffer - free RX dma buffers 1499 * @priv: private structure 1500 * @dma_conf: structure to take the dma data 1501 * @queue: RX queue index 1502 * @i: buffer index. 1503 */ 1504 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, 1505 struct stmmac_dma_conf *dma_conf, 1506 u32 queue, int i) 1507 { 1508 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1509 1510 if (tx_q->tx_skbuff_dma[i].buf && 1511 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { 1512 if (tx_q->tx_skbuff_dma[i].map_as_page) 1513 dma_unmap_page(priv->device, 1514 tx_q->tx_skbuff_dma[i].buf, 1515 tx_q->tx_skbuff_dma[i].len, 1516 DMA_TO_DEVICE); 1517 else 1518 dma_unmap_single(priv->device, 1519 tx_q->tx_skbuff_dma[i].buf, 1520 tx_q->tx_skbuff_dma[i].len, 1521 DMA_TO_DEVICE); 1522 } 1523 1524 if (tx_q->xdpf[i] && 1525 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || 1526 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { 1527 xdp_return_frame(tx_q->xdpf[i]); 1528 tx_q->xdpf[i] = NULL; 1529 } 1530 1531 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) 1532 tx_q->xsk_frames_done++; 1533 1534 if (tx_q->tx_skbuff[i] && 1535 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { 1536 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1537 tx_q->tx_skbuff[i] = NULL; 1538 } 1539 1540 tx_q->tx_skbuff_dma[i].buf = 0; 1541 tx_q->tx_skbuff_dma[i].map_as_page = false; 1542 } 1543 1544 /** 1545 * dma_free_rx_skbufs - free RX dma buffers 1546 * @priv: private structure 1547 * @dma_conf: structure to take the dma data 1548 * @queue: RX queue index 1549 */ 1550 static void dma_free_rx_skbufs(struct stmmac_priv *priv, 1551 struct stmmac_dma_conf *dma_conf, 1552 u32 queue) 1553 { 1554 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1555 int i; 1556 1557 for (i = 0; i < dma_conf->dma_rx_size; i++) 1558 stmmac_free_rx_buffer(priv, rx_q, i); 1559 } 1560 1561 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, 1562 struct stmmac_dma_conf *dma_conf, 1563 u32 queue, gfp_t flags) 1564 { 1565 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1566 int i; 1567 1568 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1569 struct dma_desc *p; 1570 int ret; 1571 1572 if (priv->extend_desc) 1573 p = &((rx_q->dma_erx + i)->basic); 1574 else 1575 p = rx_q->dma_rx + i; 1576 1577 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags, 1578 queue); 1579 if (ret) 1580 return ret; 1581 1582 rx_q->buf_alloc_num++; 1583 } 1584 1585 return 0; 1586 } 1587 1588 /** 1589 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool 1590 * @priv: private structure 1591 * @dma_conf: structure to take the dma data 1592 * @queue: RX queue index 1593 */ 1594 static void dma_free_rx_xskbufs(struct stmmac_priv *priv, 1595 struct stmmac_dma_conf *dma_conf, 1596 u32 queue) 1597 { 1598 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1599 int i; 1600 1601 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1602 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1603 1604 if (!buf->xdp) 1605 continue; 1606 1607 xsk_buff_free(buf->xdp); 1608 buf->xdp = NULL; 1609 } 1610 } 1611 1612 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, 1613 struct stmmac_dma_conf *dma_conf, 1614 u32 queue) 1615 { 1616 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1617 int i; 1618 1619 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes) 1620 * in struct xdp_buff_xsk to stash driver specific information. Thus, 1621 * use this macro to make sure no size violations. 1622 */ 1623 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff); 1624 1625 for (i = 0; i < dma_conf->dma_rx_size; i++) { 1626 struct stmmac_rx_buffer *buf; 1627 dma_addr_t dma_addr; 1628 struct dma_desc *p; 1629 1630 if (priv->extend_desc) 1631 p = (struct dma_desc *)(rx_q->dma_erx + i); 1632 else 1633 p = rx_q->dma_rx + i; 1634 1635 buf = &rx_q->buf_pool[i]; 1636 1637 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 1638 if (!buf->xdp) 1639 return -ENOMEM; 1640 1641 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 1642 stmmac_set_desc_addr(priv, p, dma_addr); 1643 rx_q->buf_alloc_num++; 1644 } 1645 1646 return 0; 1647 } 1648 1649 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) 1650 { 1651 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) 1652 return NULL; 1653 1654 return xsk_get_pool_from_qid(priv->dev, queue); 1655 } 1656 1657 /** 1658 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue) 1659 * @priv: driver private structure 1660 * @dma_conf: structure to take the dma data 1661 * @queue: RX queue index 1662 * @flags: gfp flag. 1663 * Description: this function initializes the DMA RX descriptors 1664 * and allocates the socket buffers. It supports the chained and ring 1665 * modes. 1666 */ 1667 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, 1668 struct stmmac_dma_conf *dma_conf, 1669 u32 queue, gfp_t flags) 1670 { 1671 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1672 int ret; 1673 1674 netif_dbg(priv, probe, priv->dev, 1675 "(%s) dma_rx_phy=0x%08x\n", __func__, 1676 (u32)rx_q->dma_rx_phy); 1677 1678 stmmac_clear_rx_descriptors(priv, dma_conf, queue); 1679 1680 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); 1681 1682 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1683 1684 if (rx_q->xsk_pool) { 1685 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1686 MEM_TYPE_XSK_BUFF_POOL, 1687 NULL)); 1688 netdev_info(priv->dev, 1689 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", 1690 rx_q->queue_index); 1691 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); 1692 } else { 1693 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, 1694 MEM_TYPE_PAGE_POOL, 1695 rx_q->page_pool)); 1696 netdev_info(priv->dev, 1697 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", 1698 rx_q->queue_index); 1699 } 1700 1701 if (rx_q->xsk_pool) { 1702 /* RX XDP ZC buffer pool may not be populated, e.g. 1703 * xdpsock TX-only. 1704 */ 1705 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); 1706 } else { 1707 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); 1708 if (ret < 0) 1709 return -ENOMEM; 1710 } 1711 1712 /* Setup the chained descriptor addresses */ 1713 if (priv->mode == STMMAC_CHAIN_MODE) { 1714 if (priv->extend_desc) 1715 stmmac_mode_init(priv, rx_q->dma_erx, 1716 rx_q->dma_rx_phy, 1717 dma_conf->dma_rx_size, 1); 1718 else 1719 stmmac_mode_init(priv, rx_q->dma_rx, 1720 rx_q->dma_rx_phy, 1721 dma_conf->dma_rx_size, 0); 1722 } 1723 1724 return 0; 1725 } 1726 1727 static int init_dma_rx_desc_rings(struct net_device *dev, 1728 struct stmmac_dma_conf *dma_conf, 1729 gfp_t flags) 1730 { 1731 struct stmmac_priv *priv = netdev_priv(dev); 1732 u32 rx_count = priv->plat->rx_queues_to_use; 1733 int queue; 1734 int ret; 1735 1736 /* RX INITIALIZATION */ 1737 netif_dbg(priv, probe, priv->dev, 1738 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1739 1740 for (queue = 0; queue < rx_count; queue++) { 1741 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); 1742 if (ret) 1743 goto err_init_rx_buffers; 1744 } 1745 1746 return 0; 1747 1748 err_init_rx_buffers: 1749 while (queue >= 0) { 1750 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1751 1752 if (rx_q->xsk_pool) 1753 dma_free_rx_xskbufs(priv, dma_conf, queue); 1754 else 1755 dma_free_rx_skbufs(priv, dma_conf, queue); 1756 1757 rx_q->buf_alloc_num = 0; 1758 rx_q->xsk_pool = NULL; 1759 1760 queue--; 1761 } 1762 1763 return ret; 1764 } 1765 1766 /** 1767 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue) 1768 * @priv: driver private structure 1769 * @dma_conf: structure to take the dma data 1770 * @queue: TX queue index 1771 * Description: this function initializes the DMA TX descriptors 1772 * and allocates the socket buffers. It supports the chained and ring 1773 * modes. 1774 */ 1775 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, 1776 struct stmmac_dma_conf *dma_conf, 1777 u32 queue) 1778 { 1779 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1780 int i; 1781 1782 netif_dbg(priv, probe, priv->dev, 1783 "(%s) dma_tx_phy=0x%08x\n", __func__, 1784 (u32)tx_q->dma_tx_phy); 1785 1786 /* Setup the chained descriptor addresses */ 1787 if (priv->mode == STMMAC_CHAIN_MODE) { 1788 if (priv->extend_desc) 1789 stmmac_mode_init(priv, tx_q->dma_etx, 1790 tx_q->dma_tx_phy, 1791 dma_conf->dma_tx_size, 1); 1792 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 1793 stmmac_mode_init(priv, tx_q->dma_tx, 1794 tx_q->dma_tx_phy, 1795 dma_conf->dma_tx_size, 0); 1796 } 1797 1798 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); 1799 1800 for (i = 0; i < dma_conf->dma_tx_size; i++) { 1801 struct dma_desc *p; 1802 1803 if (priv->extend_desc) 1804 p = &((tx_q->dma_etx + i)->basic); 1805 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1806 p = &((tx_q->dma_entx + i)->basic); 1807 else 1808 p = tx_q->dma_tx + i; 1809 1810 stmmac_clear_desc(priv, p); 1811 1812 tx_q->tx_skbuff_dma[i].buf = 0; 1813 tx_q->tx_skbuff_dma[i].map_as_page = false; 1814 tx_q->tx_skbuff_dma[i].len = 0; 1815 tx_q->tx_skbuff_dma[i].last_segment = false; 1816 tx_q->tx_skbuff[i] = NULL; 1817 } 1818 1819 return 0; 1820 } 1821 1822 static int init_dma_tx_desc_rings(struct net_device *dev, 1823 struct stmmac_dma_conf *dma_conf) 1824 { 1825 struct stmmac_priv *priv = netdev_priv(dev); 1826 u32 tx_queue_cnt; 1827 u32 queue; 1828 1829 tx_queue_cnt = priv->plat->tx_queues_to_use; 1830 1831 for (queue = 0; queue < tx_queue_cnt; queue++) 1832 __init_dma_tx_desc_rings(priv, dma_conf, queue); 1833 1834 return 0; 1835 } 1836 1837 /** 1838 * init_dma_desc_rings - init the RX/TX descriptor rings 1839 * @dev: net device structure 1840 * @dma_conf: structure to take the dma data 1841 * @flags: gfp flag. 1842 * Description: this function initializes the DMA RX/TX descriptors 1843 * and allocates the socket buffers. It supports the chained and ring 1844 * modes. 1845 */ 1846 static int init_dma_desc_rings(struct net_device *dev, 1847 struct stmmac_dma_conf *dma_conf, 1848 gfp_t flags) 1849 { 1850 struct stmmac_priv *priv = netdev_priv(dev); 1851 int ret; 1852 1853 ret = init_dma_rx_desc_rings(dev, dma_conf, flags); 1854 if (ret) 1855 return ret; 1856 1857 ret = init_dma_tx_desc_rings(dev, dma_conf); 1858 1859 stmmac_clear_descriptors(priv, dma_conf); 1860 1861 if (netif_msg_hw(priv)) 1862 stmmac_display_rings(priv, dma_conf); 1863 1864 return ret; 1865 } 1866 1867 /** 1868 * dma_free_tx_skbufs - free TX dma buffers 1869 * @priv: private structure 1870 * @dma_conf: structure to take the dma data 1871 * @queue: TX queue index 1872 */ 1873 static void dma_free_tx_skbufs(struct stmmac_priv *priv, 1874 struct stmmac_dma_conf *dma_conf, 1875 u32 queue) 1876 { 1877 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1878 int i; 1879 1880 tx_q->xsk_frames_done = 0; 1881 1882 for (i = 0; i < dma_conf->dma_tx_size; i++) 1883 stmmac_free_tx_buffer(priv, dma_conf, queue, i); 1884 1885 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { 1886 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 1887 tx_q->xsk_frames_done = 0; 1888 tx_q->xsk_pool = NULL; 1889 } 1890 } 1891 1892 /** 1893 * stmmac_free_tx_skbufs - free TX skb buffers 1894 * @priv: private structure 1895 */ 1896 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 1897 { 1898 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1899 u32 queue; 1900 1901 for (queue = 0; queue < tx_queue_cnt; queue++) 1902 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); 1903 } 1904 1905 /** 1906 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue) 1907 * @priv: private structure 1908 * @dma_conf: structure to take the dma data 1909 * @queue: RX queue index 1910 */ 1911 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, 1912 struct stmmac_dma_conf *dma_conf, 1913 u32 queue) 1914 { 1915 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 1916 1917 /* Release the DMA RX socket buffers */ 1918 if (rx_q->xsk_pool) 1919 dma_free_rx_xskbufs(priv, dma_conf, queue); 1920 else 1921 dma_free_rx_skbufs(priv, dma_conf, queue); 1922 1923 rx_q->buf_alloc_num = 0; 1924 rx_q->xsk_pool = NULL; 1925 1926 /* Free DMA regions of consistent memory previously allocated */ 1927 if (!priv->extend_desc) 1928 dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1929 sizeof(struct dma_desc), 1930 rx_q->dma_rx, rx_q->dma_rx_phy); 1931 else 1932 dma_free_coherent(priv->device, dma_conf->dma_rx_size * 1933 sizeof(struct dma_extended_desc), 1934 rx_q->dma_erx, rx_q->dma_rx_phy); 1935 1936 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) 1937 xdp_rxq_info_unreg(&rx_q->xdp_rxq); 1938 1939 kfree(rx_q->buf_pool); 1940 if (rx_q->page_pool) 1941 page_pool_destroy(rx_q->page_pool); 1942 } 1943 1944 static void free_dma_rx_desc_resources(struct stmmac_priv *priv, 1945 struct stmmac_dma_conf *dma_conf) 1946 { 1947 u32 rx_count = priv->plat->rx_queues_to_use; 1948 u32 queue; 1949 1950 /* Free RX queue resources */ 1951 for (queue = 0; queue < rx_count; queue++) 1952 __free_dma_rx_desc_resources(priv, dma_conf, queue); 1953 } 1954 1955 /** 1956 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue) 1957 * @priv: private structure 1958 * @dma_conf: structure to take the dma data 1959 * @queue: TX queue index 1960 */ 1961 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, 1962 struct stmmac_dma_conf *dma_conf, 1963 u32 queue) 1964 { 1965 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 1966 size_t size; 1967 void *addr; 1968 1969 /* Release the DMA TX socket buffers */ 1970 dma_free_tx_skbufs(priv, dma_conf, queue); 1971 1972 if (priv->extend_desc) { 1973 size = sizeof(struct dma_extended_desc); 1974 addr = tx_q->dma_etx; 1975 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1976 size = sizeof(struct dma_edesc); 1977 addr = tx_q->dma_entx; 1978 } else { 1979 size = sizeof(struct dma_desc); 1980 addr = tx_q->dma_tx; 1981 } 1982 1983 size *= dma_conf->dma_tx_size; 1984 1985 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1986 1987 kfree(tx_q->tx_skbuff_dma); 1988 kfree(tx_q->tx_skbuff); 1989 } 1990 1991 static void free_dma_tx_desc_resources(struct stmmac_priv *priv, 1992 struct stmmac_dma_conf *dma_conf) 1993 { 1994 u32 tx_count = priv->plat->tx_queues_to_use; 1995 u32 queue; 1996 1997 /* Free TX queue resources */ 1998 for (queue = 0; queue < tx_count; queue++) 1999 __free_dma_tx_desc_resources(priv, dma_conf, queue); 2000 } 2001 2002 /** 2003 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue). 2004 * @priv: private structure 2005 * @dma_conf: structure to take the dma data 2006 * @queue: RX queue index 2007 * Description: according to which descriptor can be used (extend or basic) 2008 * this function allocates the resources for TX and RX paths. In case of 2009 * reception, for example, it pre-allocated the RX socket buffer in order to 2010 * allow zero-copy mechanism. 2011 */ 2012 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2013 struct stmmac_dma_conf *dma_conf, 2014 u32 queue) 2015 { 2016 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; 2017 struct stmmac_channel *ch = &priv->channel[queue]; 2018 bool xdp_prog = stmmac_xdp_is_enabled(priv); 2019 struct page_pool_params pp_params = { 0 }; 2020 unsigned int num_pages; 2021 unsigned int napi_id; 2022 int ret; 2023 2024 rx_q->queue_index = queue; 2025 rx_q->priv_data = priv; 2026 2027 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 2028 pp_params.pool_size = dma_conf->dma_rx_size; 2029 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); 2030 pp_params.order = ilog2(num_pages); 2031 pp_params.nid = dev_to_node(priv->device); 2032 pp_params.dev = priv->device; 2033 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; 2034 pp_params.offset = stmmac_rx_offset(priv); 2035 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages); 2036 2037 rx_q->page_pool = page_pool_create(&pp_params); 2038 if (IS_ERR(rx_q->page_pool)) { 2039 ret = PTR_ERR(rx_q->page_pool); 2040 rx_q->page_pool = NULL; 2041 return ret; 2042 } 2043 2044 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, 2045 sizeof(*rx_q->buf_pool), 2046 GFP_KERNEL); 2047 if (!rx_q->buf_pool) 2048 return -ENOMEM; 2049 2050 if (priv->extend_desc) { 2051 rx_q->dma_erx = dma_alloc_coherent(priv->device, 2052 dma_conf->dma_rx_size * 2053 sizeof(struct dma_extended_desc), 2054 &rx_q->dma_rx_phy, 2055 GFP_KERNEL); 2056 if (!rx_q->dma_erx) 2057 return -ENOMEM; 2058 2059 } else { 2060 rx_q->dma_rx = dma_alloc_coherent(priv->device, 2061 dma_conf->dma_rx_size * 2062 sizeof(struct dma_desc), 2063 &rx_q->dma_rx_phy, 2064 GFP_KERNEL); 2065 if (!rx_q->dma_rx) 2066 return -ENOMEM; 2067 } 2068 2069 if (stmmac_xdp_is_enabled(priv) && 2070 test_bit(queue, priv->af_xdp_zc_qps)) 2071 napi_id = ch->rxtx_napi.napi_id; 2072 else 2073 napi_id = ch->rx_napi.napi_id; 2074 2075 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, 2076 rx_q->queue_index, 2077 napi_id); 2078 if (ret) { 2079 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); 2080 return -EINVAL; 2081 } 2082 2083 return 0; 2084 } 2085 2086 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv, 2087 struct stmmac_dma_conf *dma_conf) 2088 { 2089 u32 rx_count = priv->plat->rx_queues_to_use; 2090 u32 queue; 2091 int ret; 2092 2093 /* RX queues buffers and DMA */ 2094 for (queue = 0; queue < rx_count; queue++) { 2095 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); 2096 if (ret) 2097 goto err_dma; 2098 } 2099 2100 return 0; 2101 2102 err_dma: 2103 free_dma_rx_desc_resources(priv, dma_conf); 2104 2105 return ret; 2106 } 2107 2108 /** 2109 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue). 2110 * @priv: private structure 2111 * @dma_conf: structure to take the dma data 2112 * @queue: TX queue index 2113 * Description: according to which descriptor can be used (extend or basic) 2114 * this function allocates the resources for TX and RX paths. In case of 2115 * reception, for example, it pre-allocated the RX socket buffer in order to 2116 * allow zero-copy mechanism. 2117 */ 2118 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2119 struct stmmac_dma_conf *dma_conf, 2120 u32 queue) 2121 { 2122 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; 2123 size_t size; 2124 void *addr; 2125 2126 tx_q->queue_index = queue; 2127 tx_q->priv_data = priv; 2128 2129 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, 2130 sizeof(*tx_q->tx_skbuff_dma), 2131 GFP_KERNEL); 2132 if (!tx_q->tx_skbuff_dma) 2133 return -ENOMEM; 2134 2135 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, 2136 sizeof(struct sk_buff *), 2137 GFP_KERNEL); 2138 if (!tx_q->tx_skbuff) 2139 return -ENOMEM; 2140 2141 if (priv->extend_desc) 2142 size = sizeof(struct dma_extended_desc); 2143 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2144 size = sizeof(struct dma_edesc); 2145 else 2146 size = sizeof(struct dma_desc); 2147 2148 size *= dma_conf->dma_tx_size; 2149 2150 addr = dma_alloc_coherent(priv->device, size, 2151 &tx_q->dma_tx_phy, GFP_KERNEL); 2152 if (!addr) 2153 return -ENOMEM; 2154 2155 if (priv->extend_desc) 2156 tx_q->dma_etx = addr; 2157 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2158 tx_q->dma_entx = addr; 2159 else 2160 tx_q->dma_tx = addr; 2161 2162 return 0; 2163 } 2164 2165 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv, 2166 struct stmmac_dma_conf *dma_conf) 2167 { 2168 u32 tx_count = priv->plat->tx_queues_to_use; 2169 u32 queue; 2170 int ret; 2171 2172 /* TX queues buffers and DMA */ 2173 for (queue = 0; queue < tx_count; queue++) { 2174 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); 2175 if (ret) 2176 goto err_dma; 2177 } 2178 2179 return 0; 2180 2181 err_dma: 2182 free_dma_tx_desc_resources(priv, dma_conf); 2183 return ret; 2184 } 2185 2186 /** 2187 * alloc_dma_desc_resources - alloc TX/RX resources. 2188 * @priv: private structure 2189 * @dma_conf: structure to take the dma data 2190 * Description: according to which descriptor can be used (extend or basic) 2191 * this function allocates the resources for TX and RX paths. In case of 2192 * reception, for example, it pre-allocated the RX socket buffer in order to 2193 * allow zero-copy mechanism. 2194 */ 2195 static int alloc_dma_desc_resources(struct stmmac_priv *priv, 2196 struct stmmac_dma_conf *dma_conf) 2197 { 2198 /* RX Allocation */ 2199 int ret = alloc_dma_rx_desc_resources(priv, dma_conf); 2200 2201 if (ret) 2202 return ret; 2203 2204 ret = alloc_dma_tx_desc_resources(priv, dma_conf); 2205 2206 return ret; 2207 } 2208 2209 /** 2210 * free_dma_desc_resources - free dma desc resources 2211 * @priv: private structure 2212 * @dma_conf: structure to take the dma data 2213 */ 2214 static void free_dma_desc_resources(struct stmmac_priv *priv, 2215 struct stmmac_dma_conf *dma_conf) 2216 { 2217 /* Release the DMA TX socket buffers */ 2218 free_dma_tx_desc_resources(priv, dma_conf); 2219 2220 /* Release the DMA RX socket buffers later 2221 * to ensure all pending XDP_TX buffers are returned. 2222 */ 2223 free_dma_rx_desc_resources(priv, dma_conf); 2224 } 2225 2226 /** 2227 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 2228 * @priv: driver private structure 2229 * Description: It is used for enabling the rx queues in the MAC 2230 */ 2231 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 2232 { 2233 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2234 int queue; 2235 u8 mode; 2236 2237 for (queue = 0; queue < rx_queues_count; queue++) { 2238 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 2239 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 2240 } 2241 } 2242 2243 /** 2244 * stmmac_start_rx_dma - start RX DMA channel 2245 * @priv: driver private structure 2246 * @chan: RX channel index 2247 * Description: 2248 * This starts a RX DMA channel 2249 */ 2250 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 2251 { 2252 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 2253 stmmac_start_rx(priv, priv->ioaddr, chan); 2254 } 2255 2256 /** 2257 * stmmac_start_tx_dma - start TX DMA channel 2258 * @priv: driver private structure 2259 * @chan: TX channel index 2260 * Description: 2261 * This starts a TX DMA channel 2262 */ 2263 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 2264 { 2265 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 2266 stmmac_start_tx(priv, priv->ioaddr, chan); 2267 } 2268 2269 /** 2270 * stmmac_stop_rx_dma - stop RX DMA channel 2271 * @priv: driver private structure 2272 * @chan: RX channel index 2273 * Description: 2274 * This stops a RX DMA channel 2275 */ 2276 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2277 { 2278 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2279 stmmac_stop_rx(priv, priv->ioaddr, chan); 2280 } 2281 2282 /** 2283 * stmmac_stop_tx_dma - stop TX DMA channel 2284 * @priv: driver private structure 2285 * @chan: TX channel index 2286 * Description: 2287 * This stops a TX DMA channel 2288 */ 2289 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2290 { 2291 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2292 stmmac_stop_tx(priv, priv->ioaddr, chan); 2293 } 2294 2295 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv) 2296 { 2297 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2298 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2299 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2300 u32 chan; 2301 2302 for (chan = 0; chan < dma_csr_ch; chan++) { 2303 struct stmmac_channel *ch = &priv->channel[chan]; 2304 unsigned long flags; 2305 2306 spin_lock_irqsave(&ch->lock, flags); 2307 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2308 spin_unlock_irqrestore(&ch->lock, flags); 2309 } 2310 } 2311 2312 /** 2313 * stmmac_start_all_dma - start all RX and TX DMA channels 2314 * @priv: driver private structure 2315 * Description: 2316 * This starts all the RX and TX DMA channels 2317 */ 2318 static void stmmac_start_all_dma(struct stmmac_priv *priv) 2319 { 2320 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2321 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2322 u32 chan = 0; 2323 2324 for (chan = 0; chan < rx_channels_count; chan++) 2325 stmmac_start_rx_dma(priv, chan); 2326 2327 for (chan = 0; chan < tx_channels_count; chan++) 2328 stmmac_start_tx_dma(priv, chan); 2329 } 2330 2331 /** 2332 * stmmac_stop_all_dma - stop all RX and TX DMA channels 2333 * @priv: driver private structure 2334 * Description: 2335 * This stops the RX and TX DMA channels 2336 */ 2337 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2338 { 2339 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2340 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2341 u32 chan = 0; 2342 2343 for (chan = 0; chan < rx_channels_count; chan++) 2344 stmmac_stop_rx_dma(priv, chan); 2345 2346 for (chan = 0; chan < tx_channels_count; chan++) 2347 stmmac_stop_tx_dma(priv, chan); 2348 } 2349 2350 /** 2351 * stmmac_dma_operation_mode - HW DMA operation mode 2352 * @priv: driver private structure 2353 * Description: it is used for configuring the DMA operation mode register in 2354 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 2355 */ 2356 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 2357 { 2358 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2359 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2360 int rxfifosz = priv->plat->rx_fifo_size; 2361 int txfifosz = priv->plat->tx_fifo_size; 2362 u32 txmode = 0; 2363 u32 rxmode = 0; 2364 u32 chan = 0; 2365 u8 qmode = 0; 2366 2367 if (rxfifosz == 0) 2368 rxfifosz = priv->dma_cap.rx_fifo_size; 2369 if (txfifosz == 0) 2370 txfifosz = priv->dma_cap.tx_fifo_size; 2371 2372 /* Adjust for real per queue fifo size */ 2373 rxfifosz /= rx_channels_count; 2374 txfifosz /= tx_channels_count; 2375 2376 if (priv->plat->force_thresh_dma_mode) { 2377 txmode = tc; 2378 rxmode = tc; 2379 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 2380 /* 2381 * In case of GMAC, SF mode can be enabled 2382 * to perform the TX COE in HW. This depends on: 2383 * 1) TX COE if actually supported 2384 * 2) There is no bugged Jumbo frame support 2385 * that needs to not insert csum in the TDES. 2386 */ 2387 txmode = SF_DMA_MODE; 2388 rxmode = SF_DMA_MODE; 2389 priv->xstats.threshold = SF_DMA_MODE; 2390 } else { 2391 txmode = tc; 2392 rxmode = SF_DMA_MODE; 2393 } 2394 2395 /* configure all channels */ 2396 for (chan = 0; chan < rx_channels_count; chan++) { 2397 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2398 u32 buf_size; 2399 2400 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2401 2402 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2403 rxfifosz, qmode); 2404 2405 if (rx_q->xsk_pool) { 2406 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 2407 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2408 buf_size, 2409 chan); 2410 } else { 2411 stmmac_set_dma_bfsize(priv, priv->ioaddr, 2412 priv->dma_conf.dma_buf_sz, 2413 chan); 2414 } 2415 } 2416 2417 for (chan = 0; chan < tx_channels_count; chan++) { 2418 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2419 2420 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2421 txfifosz, qmode); 2422 } 2423 } 2424 2425 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 2426 { 2427 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); 2428 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2429 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 2430 struct xsk_buff_pool *pool = tx_q->xsk_pool; 2431 unsigned int entry = tx_q->cur_tx; 2432 struct dma_desc *tx_desc = NULL; 2433 struct xdp_desc xdp_desc; 2434 bool work_done = true; 2435 u32 tx_set_ic_bit = 0; 2436 unsigned long flags; 2437 2438 /* Avoids TX time-out as we are sharing with slow path */ 2439 txq_trans_cond_update(nq); 2440 2441 budget = min(budget, stmmac_tx_avail(priv, queue)); 2442 2443 while (budget-- > 0) { 2444 dma_addr_t dma_addr; 2445 bool set_ic; 2446 2447 /* We are sharing with slow path and stop XSK TX desc submission when 2448 * available TX ring is less than threshold. 2449 */ 2450 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || 2451 !netif_carrier_ok(priv->dev)) { 2452 work_done = false; 2453 break; 2454 } 2455 2456 if (!xsk_tx_peek_desc(pool, &xdp_desc)) 2457 break; 2458 2459 if (likely(priv->extend_desc)) 2460 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 2461 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2462 tx_desc = &tx_q->dma_entx[entry].basic; 2463 else 2464 tx_desc = tx_q->dma_tx + entry; 2465 2466 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr); 2467 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len); 2468 2469 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; 2470 2471 /* To return XDP buffer to XSK pool, we simple call 2472 * xsk_tx_completed(), so we don't need to fill up 2473 * 'buf' and 'xdpf'. 2474 */ 2475 tx_q->tx_skbuff_dma[entry].buf = 0; 2476 tx_q->xdpf[entry] = NULL; 2477 2478 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2479 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; 2480 tx_q->tx_skbuff_dma[entry].last_segment = true; 2481 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2482 2483 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 2484 2485 tx_q->tx_count_frames++; 2486 2487 if (!priv->tx_coal_frames[queue]) 2488 set_ic = false; 2489 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 2490 set_ic = true; 2491 else 2492 set_ic = false; 2493 2494 if (set_ic) { 2495 tx_q->tx_count_frames = 0; 2496 stmmac_set_tx_ic(priv, tx_desc); 2497 tx_set_ic_bit++; 2498 } 2499 2500 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len, 2501 true, priv->mode, true, true, 2502 xdp_desc.len); 2503 2504 stmmac_enable_dma_transmission(priv, priv->ioaddr); 2505 2506 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 2507 entry = tx_q->cur_tx; 2508 } 2509 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 2510 txq_stats->tx_set_ic_bit += tx_set_ic_bit; 2511 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 2512 2513 if (tx_desc) { 2514 stmmac_flush_tx_descriptors(priv, queue); 2515 xsk_tx_release(pool); 2516 } 2517 2518 /* Return true if all of the 3 conditions are met 2519 * a) TX Budget is still available 2520 * b) work_done = true when XSK TX desc peek is empty (no more 2521 * pending XSK TX for transmission) 2522 */ 2523 return !!budget && work_done; 2524 } 2525 2526 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan) 2527 { 2528 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { 2529 tc += 64; 2530 2531 if (priv->plat->force_thresh_dma_mode) 2532 stmmac_set_dma_operation_mode(priv, tc, tc, chan); 2533 else 2534 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE, 2535 chan); 2536 2537 priv->xstats.threshold = tc; 2538 } 2539 } 2540 2541 /** 2542 * stmmac_tx_clean - to manage the transmission completion 2543 * @priv: driver private structure 2544 * @budget: napi budget limiting this functions packet handling 2545 * @queue: TX queue index 2546 * Description: it reclaims the transmit resources after transmission completes. 2547 */ 2548 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 2549 { 2550 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2551 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 2552 unsigned int bytes_compl = 0, pkts_compl = 0; 2553 unsigned int entry, xmits = 0, count = 0; 2554 u32 tx_packets = 0, tx_errors = 0; 2555 unsigned long flags; 2556 2557 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2558 2559 tx_q->xsk_frames_done = 0; 2560 2561 entry = tx_q->dirty_tx; 2562 2563 /* Try to clean all TX complete frame in 1 shot */ 2564 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { 2565 struct xdp_frame *xdpf; 2566 struct sk_buff *skb; 2567 struct dma_desc *p; 2568 int status; 2569 2570 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || 2571 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2572 xdpf = tx_q->xdpf[entry]; 2573 skb = NULL; 2574 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2575 xdpf = NULL; 2576 skb = tx_q->tx_skbuff[entry]; 2577 } else { 2578 xdpf = NULL; 2579 skb = NULL; 2580 } 2581 2582 if (priv->extend_desc) 2583 p = (struct dma_desc *)(tx_q->dma_etx + entry); 2584 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2585 p = &tx_q->dma_entx[entry].basic; 2586 else 2587 p = tx_q->dma_tx + entry; 2588 2589 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); 2590 /* Check if the descriptor is owned by the DMA */ 2591 if (unlikely(status & tx_dma_own)) 2592 break; 2593 2594 count++; 2595 2596 /* Make sure descriptor fields are read after reading 2597 * the own bit. 2598 */ 2599 dma_rmb(); 2600 2601 /* Just consider the last segment and ...*/ 2602 if (likely(!(status & tx_not_ls))) { 2603 /* ... verify the status error condition */ 2604 if (unlikely(status & tx_err)) { 2605 tx_errors++; 2606 if (unlikely(status & tx_err_bump_tc)) 2607 stmmac_bump_dma_threshold(priv, queue); 2608 } else { 2609 tx_packets++; 2610 } 2611 if (skb) 2612 stmmac_get_tx_hwtstamp(priv, p, skb); 2613 } 2614 2615 if (likely(tx_q->tx_skbuff_dma[entry].buf && 2616 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { 2617 if (tx_q->tx_skbuff_dma[entry].map_as_page) 2618 dma_unmap_page(priv->device, 2619 tx_q->tx_skbuff_dma[entry].buf, 2620 tx_q->tx_skbuff_dma[entry].len, 2621 DMA_TO_DEVICE); 2622 else 2623 dma_unmap_single(priv->device, 2624 tx_q->tx_skbuff_dma[entry].buf, 2625 tx_q->tx_skbuff_dma[entry].len, 2626 DMA_TO_DEVICE); 2627 tx_q->tx_skbuff_dma[entry].buf = 0; 2628 tx_q->tx_skbuff_dma[entry].len = 0; 2629 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2630 } 2631 2632 stmmac_clean_desc3(priv, tx_q, p); 2633 2634 tx_q->tx_skbuff_dma[entry].last_segment = false; 2635 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2636 2637 if (xdpf && 2638 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { 2639 xdp_return_frame_rx_napi(xdpf); 2640 tx_q->xdpf[entry] = NULL; 2641 } 2642 2643 if (xdpf && 2644 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { 2645 xdp_return_frame(xdpf); 2646 tx_q->xdpf[entry] = NULL; 2647 } 2648 2649 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) 2650 tx_q->xsk_frames_done++; 2651 2652 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { 2653 if (likely(skb)) { 2654 pkts_compl++; 2655 bytes_compl += skb->len; 2656 dev_consume_skb_any(skb); 2657 tx_q->tx_skbuff[entry] = NULL; 2658 } 2659 } 2660 2661 stmmac_release_tx_desc(priv, p, priv->mode); 2662 2663 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 2664 } 2665 tx_q->dirty_tx = entry; 2666 2667 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2668 pkts_compl, bytes_compl); 2669 2670 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2671 queue))) && 2672 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2673 2674 netif_dbg(priv, tx_done, priv->dev, 2675 "%s: restart transmit\n", __func__); 2676 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 2677 } 2678 2679 if (tx_q->xsk_pool) { 2680 bool work_done; 2681 2682 if (tx_q->xsk_frames_done) 2683 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); 2684 2685 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) 2686 xsk_set_tx_need_wakeup(tx_q->xsk_pool); 2687 2688 /* For XSK TX, we try to send as many as possible. 2689 * If XSK work done (XSK TX desc empty and budget still 2690 * available), return "budget - 1" to reenable TX IRQ. 2691 * Else, return "budget" to make NAPI continue polling. 2692 */ 2693 work_done = stmmac_xdp_xmit_zc(priv, queue, 2694 STMMAC_XSK_TX_BUDGET_MAX); 2695 if (work_done) 2696 xmits = budget - 1; 2697 else 2698 xmits = budget; 2699 } 2700 2701 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2702 priv->eee_sw_timer_en) { 2703 if (stmmac_enable_eee_mode(priv)) 2704 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2705 } 2706 2707 /* We still have pending packets, let's call for a new scheduling */ 2708 if (tx_q->dirty_tx != tx_q->cur_tx) 2709 stmmac_tx_timer_arm(priv, queue); 2710 2711 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 2712 txq_stats->tx_packets += tx_packets; 2713 txq_stats->tx_pkt_n += tx_packets; 2714 txq_stats->tx_clean++; 2715 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 2716 2717 priv->xstats.tx_errors += tx_errors; 2718 2719 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 2720 2721 /* Combine decisions from TX clean and XSK TX */ 2722 return max(count, xmits); 2723 } 2724 2725 /** 2726 * stmmac_tx_err - to manage the tx error 2727 * @priv: driver private structure 2728 * @chan: channel index 2729 * Description: it cleans the descriptors and restarts the transmission 2730 * in case of transmission errors. 2731 */ 2732 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 2733 { 2734 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2735 2736 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 2737 2738 stmmac_stop_tx_dma(priv, chan); 2739 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); 2740 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); 2741 stmmac_reset_tx_queue(priv, chan); 2742 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2743 tx_q->dma_tx_phy, chan); 2744 stmmac_start_tx_dma(priv, chan); 2745 2746 priv->xstats.tx_errors++; 2747 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 2748 } 2749 2750 /** 2751 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 2752 * @priv: driver private structure 2753 * @txmode: TX operating mode 2754 * @rxmode: RX operating mode 2755 * @chan: channel index 2756 * Description: it is used for configuring of the DMA operation mode in 2757 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 2758 * mode. 2759 */ 2760 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 2761 u32 rxmode, u32 chan) 2762 { 2763 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2764 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2765 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2766 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2767 int rxfifosz = priv->plat->rx_fifo_size; 2768 int txfifosz = priv->plat->tx_fifo_size; 2769 2770 if (rxfifosz == 0) 2771 rxfifosz = priv->dma_cap.rx_fifo_size; 2772 if (txfifosz == 0) 2773 txfifosz = priv->dma_cap.tx_fifo_size; 2774 2775 /* Adjust for real per queue fifo size */ 2776 rxfifosz /= rx_channels_count; 2777 txfifosz /= tx_channels_count; 2778 2779 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2780 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 2781 } 2782 2783 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 2784 { 2785 int ret; 2786 2787 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2788 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2789 if (ret && (ret != -EINVAL)) { 2790 stmmac_global_err(priv); 2791 return true; 2792 } 2793 2794 return false; 2795 } 2796 2797 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir) 2798 { 2799 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2800 &priv->xstats, chan, dir); 2801 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; 2802 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 2803 struct stmmac_channel *ch = &priv->channel[chan]; 2804 struct napi_struct *rx_napi; 2805 struct napi_struct *tx_napi; 2806 unsigned long flags; 2807 2808 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; 2809 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 2810 2811 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2812 if (napi_schedule_prep(rx_napi)) { 2813 spin_lock_irqsave(&ch->lock, flags); 2814 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2815 spin_unlock_irqrestore(&ch->lock, flags); 2816 __napi_schedule(rx_napi); 2817 } 2818 } 2819 2820 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2821 if (napi_schedule_prep(tx_napi)) { 2822 spin_lock_irqsave(&ch->lock, flags); 2823 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2824 spin_unlock_irqrestore(&ch->lock, flags); 2825 __napi_schedule(tx_napi); 2826 } 2827 } 2828 2829 return status; 2830 } 2831 2832 /** 2833 * stmmac_dma_interrupt - DMA ISR 2834 * @priv: driver private structure 2835 * Description: this is the DMA ISR. It is called by the main ISR. 2836 * It calls the dwmac dma routine and schedule poll method in case of some 2837 * work can be done. 2838 */ 2839 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2840 { 2841 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2842 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2843 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2844 tx_channel_count : rx_channel_count; 2845 u32 chan; 2846 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2847 2848 /* Make sure we never check beyond our status buffer. */ 2849 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2850 channels_to_check = ARRAY_SIZE(status); 2851 2852 for (chan = 0; chan < channels_to_check; chan++) 2853 status[chan] = stmmac_napi_check(priv, chan, 2854 DMA_DIR_RXTX); 2855 2856 for (chan = 0; chan < tx_channel_count; chan++) { 2857 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2858 /* Try to bump up the dma threshold on this failure */ 2859 stmmac_bump_dma_threshold(priv, chan); 2860 } else if (unlikely(status[chan] == tx_hard_error)) { 2861 stmmac_tx_err(priv, chan); 2862 } 2863 } 2864 } 2865 2866 /** 2867 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2868 * @priv: driver private structure 2869 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2870 */ 2871 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2872 { 2873 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2874 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2875 2876 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 2877 2878 if (priv->dma_cap.rmon) { 2879 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 2880 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2881 } else 2882 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2883 } 2884 2885 /** 2886 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2887 * @priv: driver private structure 2888 * Description: 2889 * new GMAC chip generations have a new register to indicate the 2890 * presence of the optional feature/functions. 2891 * This can be also used to override the value passed through the 2892 * platform and necessary for old MAC10/100 and GMAC chips. 2893 */ 2894 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2895 { 2896 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2897 } 2898 2899 /** 2900 * stmmac_check_ether_addr - check if the MAC addr is valid 2901 * @priv: driver private structure 2902 * Description: 2903 * it is to verify if the MAC address is valid, in case of failures it 2904 * generates a random MAC address 2905 */ 2906 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2907 { 2908 u8 addr[ETH_ALEN]; 2909 2910 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2911 stmmac_get_umac_addr(priv, priv->hw, addr, 0); 2912 if (is_valid_ether_addr(addr)) 2913 eth_hw_addr_set(priv->dev, addr); 2914 else 2915 eth_hw_addr_random(priv->dev); 2916 dev_info(priv->device, "device MAC address %pM\n", 2917 priv->dev->dev_addr); 2918 } 2919 } 2920 2921 /** 2922 * stmmac_init_dma_engine - DMA init. 2923 * @priv: driver private structure 2924 * Description: 2925 * It inits the DMA invoking the specific MAC/GMAC callback. 2926 * Some DMA parameters can be passed from the platform; 2927 * in case of these are not passed a default is kept for the MAC or GMAC. 2928 */ 2929 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2930 { 2931 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2932 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2933 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2934 struct stmmac_rx_queue *rx_q; 2935 struct stmmac_tx_queue *tx_q; 2936 u32 chan = 0; 2937 int atds = 0; 2938 int ret = 0; 2939 2940 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2941 dev_err(priv->device, "Invalid DMA configuration\n"); 2942 return -EINVAL; 2943 } 2944 2945 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2946 atds = 1; 2947 2948 ret = stmmac_reset(priv, priv->ioaddr); 2949 if (ret) { 2950 dev_err(priv->device, "Failed to reset the dma\n"); 2951 return ret; 2952 } 2953 2954 /* DMA Configuration */ 2955 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2956 2957 if (priv->plat->axi) 2958 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2959 2960 /* DMA CSR Channel configuration */ 2961 for (chan = 0; chan < dma_csr_ch; chan++) { 2962 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2963 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 2964 } 2965 2966 /* DMA RX Channel Configuration */ 2967 for (chan = 0; chan < rx_channels_count; chan++) { 2968 rx_q = &priv->dma_conf.rx_queue[chan]; 2969 2970 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2971 rx_q->dma_rx_phy, chan); 2972 2973 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2974 (rx_q->buf_alloc_num * 2975 sizeof(struct dma_desc)); 2976 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2977 rx_q->rx_tail_addr, chan); 2978 } 2979 2980 /* DMA TX Channel Configuration */ 2981 for (chan = 0; chan < tx_channels_count; chan++) { 2982 tx_q = &priv->dma_conf.tx_queue[chan]; 2983 2984 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2985 tx_q->dma_tx_phy, chan); 2986 2987 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2988 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2989 tx_q->tx_tail_addr, chan); 2990 } 2991 2992 return ret; 2993 } 2994 2995 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 2996 { 2997 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 2998 u32 tx_coal_timer = priv->tx_coal_timer[queue]; 2999 3000 if (!tx_coal_timer) 3001 return; 3002 3003 hrtimer_start(&tx_q->txtimer, 3004 STMMAC_COAL_TIMER(tx_coal_timer), 3005 HRTIMER_MODE_REL); 3006 } 3007 3008 /** 3009 * stmmac_tx_timer - mitigation sw timer for tx. 3010 * @t: data pointer 3011 * Description: 3012 * This is the timer handler to directly invoke the stmmac_tx_clean. 3013 */ 3014 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 3015 { 3016 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 3017 struct stmmac_priv *priv = tx_q->priv_data; 3018 struct stmmac_channel *ch; 3019 struct napi_struct *napi; 3020 3021 ch = &priv->channel[tx_q->queue_index]; 3022 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; 3023 3024 if (likely(napi_schedule_prep(napi))) { 3025 unsigned long flags; 3026 3027 spin_lock_irqsave(&ch->lock, flags); 3028 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 3029 spin_unlock_irqrestore(&ch->lock, flags); 3030 __napi_schedule(napi); 3031 } 3032 3033 return HRTIMER_NORESTART; 3034 } 3035 3036 /** 3037 * stmmac_init_coalesce - init mitigation options. 3038 * @priv: driver private structure 3039 * Description: 3040 * This inits the coalesce parameters: i.e. timer rate, 3041 * timer handler and default threshold used for enabling the 3042 * interrupt on completion bit. 3043 */ 3044 static void stmmac_init_coalesce(struct stmmac_priv *priv) 3045 { 3046 u32 tx_channel_count = priv->plat->tx_queues_to_use; 3047 u32 rx_channel_count = priv->plat->rx_queues_to_use; 3048 u32 chan; 3049 3050 for (chan = 0; chan < tx_channel_count; chan++) { 3051 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3052 3053 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 3054 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 3055 3056 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 3057 tx_q->txtimer.function = stmmac_tx_timer; 3058 } 3059 3060 for (chan = 0; chan < rx_channel_count; chan++) 3061 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 3062 } 3063 3064 static void stmmac_set_rings_length(struct stmmac_priv *priv) 3065 { 3066 u32 rx_channels_count = priv->plat->rx_queues_to_use; 3067 u32 tx_channels_count = priv->plat->tx_queues_to_use; 3068 u32 chan; 3069 3070 /* set TX ring length */ 3071 for (chan = 0; chan < tx_channels_count; chan++) 3072 stmmac_set_tx_ring_len(priv, priv->ioaddr, 3073 (priv->dma_conf.dma_tx_size - 1), chan); 3074 3075 /* set RX ring length */ 3076 for (chan = 0; chan < rx_channels_count; chan++) 3077 stmmac_set_rx_ring_len(priv, priv->ioaddr, 3078 (priv->dma_conf.dma_rx_size - 1), chan); 3079 } 3080 3081 /** 3082 * stmmac_set_tx_queue_weight - Set TX queue weight 3083 * @priv: driver private structure 3084 * Description: It is used for setting TX queues weight 3085 */ 3086 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 3087 { 3088 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3089 u32 weight; 3090 u32 queue; 3091 3092 for (queue = 0; queue < tx_queues_count; queue++) { 3093 weight = priv->plat->tx_queues_cfg[queue].weight; 3094 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 3095 } 3096 } 3097 3098 /** 3099 * stmmac_configure_cbs - Configure CBS in TX queue 3100 * @priv: driver private structure 3101 * Description: It is used for configuring CBS in AVB TX queues 3102 */ 3103 static void stmmac_configure_cbs(struct stmmac_priv *priv) 3104 { 3105 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3106 u32 mode_to_use; 3107 u32 queue; 3108 3109 /* queue 0 is reserved for legacy traffic */ 3110 for (queue = 1; queue < tx_queues_count; queue++) { 3111 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 3112 if (mode_to_use == MTL_QUEUE_DCB) 3113 continue; 3114 3115 stmmac_config_cbs(priv, priv->hw, 3116 priv->plat->tx_queues_cfg[queue].send_slope, 3117 priv->plat->tx_queues_cfg[queue].idle_slope, 3118 priv->plat->tx_queues_cfg[queue].high_credit, 3119 priv->plat->tx_queues_cfg[queue].low_credit, 3120 queue); 3121 } 3122 } 3123 3124 /** 3125 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 3126 * @priv: driver private structure 3127 * Description: It is used for mapping RX queues to RX dma channels 3128 */ 3129 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 3130 { 3131 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3132 u32 queue; 3133 u32 chan; 3134 3135 for (queue = 0; queue < rx_queues_count; queue++) { 3136 chan = priv->plat->rx_queues_cfg[queue].chan; 3137 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 3138 } 3139 } 3140 3141 /** 3142 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 3143 * @priv: driver private structure 3144 * Description: It is used for configuring the RX Queue Priority 3145 */ 3146 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 3147 { 3148 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3149 u32 queue; 3150 u32 prio; 3151 3152 for (queue = 0; queue < rx_queues_count; queue++) { 3153 if (!priv->plat->rx_queues_cfg[queue].use_prio) 3154 continue; 3155 3156 prio = priv->plat->rx_queues_cfg[queue].prio; 3157 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 3158 } 3159 } 3160 3161 /** 3162 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 3163 * @priv: driver private structure 3164 * Description: It is used for configuring the TX Queue Priority 3165 */ 3166 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 3167 { 3168 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3169 u32 queue; 3170 u32 prio; 3171 3172 for (queue = 0; queue < tx_queues_count; queue++) { 3173 if (!priv->plat->tx_queues_cfg[queue].use_prio) 3174 continue; 3175 3176 prio = priv->plat->tx_queues_cfg[queue].prio; 3177 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 3178 } 3179 } 3180 3181 /** 3182 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 3183 * @priv: driver private structure 3184 * Description: It is used for configuring the RX queue routing 3185 */ 3186 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 3187 { 3188 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3189 u32 queue; 3190 u8 packet; 3191 3192 for (queue = 0; queue < rx_queues_count; queue++) { 3193 /* no specific packet type routing specified for the queue */ 3194 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 3195 continue; 3196 3197 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 3198 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 3199 } 3200 } 3201 3202 static void stmmac_mac_config_rss(struct stmmac_priv *priv) 3203 { 3204 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 3205 priv->rss.enable = false; 3206 return; 3207 } 3208 3209 if (priv->dev->features & NETIF_F_RXHASH) 3210 priv->rss.enable = true; 3211 else 3212 priv->rss.enable = false; 3213 3214 stmmac_rss_configure(priv, priv->hw, &priv->rss, 3215 priv->plat->rx_queues_to_use); 3216 } 3217 3218 /** 3219 * stmmac_mtl_configuration - Configure MTL 3220 * @priv: driver private structure 3221 * Description: It is used for configurring MTL 3222 */ 3223 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 3224 { 3225 u32 rx_queues_count = priv->plat->rx_queues_to_use; 3226 u32 tx_queues_count = priv->plat->tx_queues_to_use; 3227 3228 if (tx_queues_count > 1) 3229 stmmac_set_tx_queue_weight(priv); 3230 3231 /* Configure MTL RX algorithms */ 3232 if (rx_queues_count > 1) 3233 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 3234 priv->plat->rx_sched_algorithm); 3235 3236 /* Configure MTL TX algorithms */ 3237 if (tx_queues_count > 1) 3238 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 3239 priv->plat->tx_sched_algorithm); 3240 3241 /* Configure CBS in AVB TX queues */ 3242 if (tx_queues_count > 1) 3243 stmmac_configure_cbs(priv); 3244 3245 /* Map RX MTL to DMA channels */ 3246 stmmac_rx_queue_dma_chan_map(priv); 3247 3248 /* Enable MAC RX Queues */ 3249 stmmac_mac_enable_rx_queues(priv); 3250 3251 /* Set RX priorities */ 3252 if (rx_queues_count > 1) 3253 stmmac_mac_config_rx_queues_prio(priv); 3254 3255 /* Set TX priorities */ 3256 if (tx_queues_count > 1) 3257 stmmac_mac_config_tx_queues_prio(priv); 3258 3259 /* Set RX routing */ 3260 if (rx_queues_count > 1) 3261 stmmac_mac_config_rx_queues_routing(priv); 3262 3263 /* Receive Side Scaling */ 3264 if (rx_queues_count > 1) 3265 stmmac_mac_config_rss(priv); 3266 } 3267 3268 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 3269 { 3270 if (priv->dma_cap.asp) { 3271 netdev_info(priv->dev, "Enabling Safety Features\n"); 3272 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, 3273 priv->plat->safety_feat_cfg); 3274 } else { 3275 netdev_info(priv->dev, "No Safety Features support found\n"); 3276 } 3277 } 3278 3279 static int stmmac_fpe_start_wq(struct stmmac_priv *priv) 3280 { 3281 char *name; 3282 3283 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 3284 clear_bit(__FPE_REMOVING, &priv->fpe_task_state); 3285 3286 name = priv->wq_name; 3287 sprintf(name, "%s-fpe", priv->dev->name); 3288 3289 priv->fpe_wq = create_singlethread_workqueue(name); 3290 if (!priv->fpe_wq) { 3291 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); 3292 3293 return -ENOMEM; 3294 } 3295 netdev_info(priv->dev, "FPE workqueue start"); 3296 3297 return 0; 3298 } 3299 3300 /** 3301 * stmmac_hw_setup - setup mac in a usable state. 3302 * @dev : pointer to the device structure. 3303 * @ptp_register: register PTP if set 3304 * Description: 3305 * this is the main function to setup the HW in a usable state because the 3306 * dma engine is reset, the core registers are configured (e.g. AXI, 3307 * Checksum features, timers). The DMA is ready to start receiving and 3308 * transmitting. 3309 * Return value: 3310 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3311 * file on failure. 3312 */ 3313 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) 3314 { 3315 struct stmmac_priv *priv = netdev_priv(dev); 3316 u32 rx_cnt = priv->plat->rx_queues_to_use; 3317 u32 tx_cnt = priv->plat->tx_queues_to_use; 3318 bool sph_en; 3319 u32 chan; 3320 int ret; 3321 3322 /* DMA initialization and SW reset */ 3323 ret = stmmac_init_dma_engine(priv); 3324 if (ret < 0) { 3325 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 3326 __func__); 3327 return ret; 3328 } 3329 3330 /* Copy the MAC addr into the HW */ 3331 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 3332 3333 /* PS and related bits will be programmed according to the speed */ 3334 if (priv->hw->pcs) { 3335 int speed = priv->plat->mac_port_sel_speed; 3336 3337 if ((speed == SPEED_10) || (speed == SPEED_100) || 3338 (speed == SPEED_1000)) { 3339 priv->hw->ps = speed; 3340 } else { 3341 dev_warn(priv->device, "invalid port speed\n"); 3342 priv->hw->ps = 0; 3343 } 3344 } 3345 3346 /* Initialize the MAC Core */ 3347 stmmac_core_init(priv, priv->hw, dev); 3348 3349 /* Initialize MTL*/ 3350 stmmac_mtl_configuration(priv); 3351 3352 /* Initialize Safety Features */ 3353 stmmac_safety_feat_configuration(priv); 3354 3355 ret = stmmac_rx_ipc(priv, priv->hw); 3356 if (!ret) { 3357 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 3358 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 3359 priv->hw->rx_csum = 0; 3360 } 3361 3362 /* Enable the MAC Rx/Tx */ 3363 stmmac_mac_set(priv, priv->ioaddr, true); 3364 3365 /* Set the HW DMA mode and the COE */ 3366 stmmac_dma_operation_mode(priv); 3367 3368 stmmac_mmc_setup(priv); 3369 3370 if (ptp_register) { 3371 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 3372 if (ret < 0) 3373 netdev_warn(priv->dev, 3374 "failed to enable PTP reference clock: %pe\n", 3375 ERR_PTR(ret)); 3376 } 3377 3378 ret = stmmac_init_ptp(priv); 3379 if (ret == -EOPNOTSUPP) 3380 netdev_info(priv->dev, "PTP not supported by HW\n"); 3381 else if (ret) 3382 netdev_warn(priv->dev, "PTP init failed\n"); 3383 else if (ptp_register) 3384 stmmac_ptp_register(priv); 3385 3386 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 3387 3388 /* Convert the timer from msec to usec */ 3389 if (!priv->tx_lpi_timer) 3390 priv->tx_lpi_timer = eee_timer * 1000; 3391 3392 if (priv->use_riwt) { 3393 u32 queue; 3394 3395 for (queue = 0; queue < rx_cnt; queue++) { 3396 if (!priv->rx_riwt[queue]) 3397 priv->rx_riwt[queue] = DEF_DMA_RIWT; 3398 3399 stmmac_rx_watchdog(priv, priv->ioaddr, 3400 priv->rx_riwt[queue], queue); 3401 } 3402 } 3403 3404 if (priv->hw->pcs) 3405 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 3406 3407 /* set TX and RX rings length */ 3408 stmmac_set_rings_length(priv); 3409 3410 /* Enable TSO */ 3411 if (priv->tso) { 3412 for (chan = 0; chan < tx_cnt; chan++) { 3413 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3414 3415 /* TSO and TBS cannot co-exist */ 3416 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3417 continue; 3418 3419 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 3420 } 3421 } 3422 3423 /* Enable Split Header */ 3424 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 3425 for (chan = 0; chan < rx_cnt; chan++) 3426 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 3427 3428 3429 /* VLAN Tag Insertion */ 3430 if (priv->dma_cap.vlins) 3431 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 3432 3433 /* TBS */ 3434 for (chan = 0; chan < tx_cnt; chan++) { 3435 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; 3436 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 3437 3438 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 3439 } 3440 3441 /* Configure real RX and TX queues */ 3442 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 3443 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 3444 3445 /* Start the ball rolling... */ 3446 stmmac_start_all_dma(priv); 3447 3448 if (priv->dma_cap.fpesel) { 3449 stmmac_fpe_start_wq(priv); 3450 3451 if (priv->plat->fpe_cfg->enable) 3452 stmmac_fpe_handshake(priv, true); 3453 } 3454 3455 return 0; 3456 } 3457 3458 static void stmmac_hw_teardown(struct net_device *dev) 3459 { 3460 struct stmmac_priv *priv = netdev_priv(dev); 3461 3462 clk_disable_unprepare(priv->plat->clk_ptp_ref); 3463 } 3464 3465 static void stmmac_free_irq(struct net_device *dev, 3466 enum request_irq_err irq_err, int irq_idx) 3467 { 3468 struct stmmac_priv *priv = netdev_priv(dev); 3469 int j; 3470 3471 switch (irq_err) { 3472 case REQ_IRQ_ERR_ALL: 3473 irq_idx = priv->plat->tx_queues_to_use; 3474 fallthrough; 3475 case REQ_IRQ_ERR_TX: 3476 for (j = irq_idx - 1; j >= 0; j--) { 3477 if (priv->tx_irq[j] > 0) { 3478 irq_set_affinity_hint(priv->tx_irq[j], NULL); 3479 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); 3480 } 3481 } 3482 irq_idx = priv->plat->rx_queues_to_use; 3483 fallthrough; 3484 case REQ_IRQ_ERR_RX: 3485 for (j = irq_idx - 1; j >= 0; j--) { 3486 if (priv->rx_irq[j] > 0) { 3487 irq_set_affinity_hint(priv->rx_irq[j], NULL); 3488 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); 3489 } 3490 } 3491 3492 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) 3493 free_irq(priv->sfty_ue_irq, dev); 3494 fallthrough; 3495 case REQ_IRQ_ERR_SFTY_UE: 3496 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) 3497 free_irq(priv->sfty_ce_irq, dev); 3498 fallthrough; 3499 case REQ_IRQ_ERR_SFTY_CE: 3500 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) 3501 free_irq(priv->lpi_irq, dev); 3502 fallthrough; 3503 case REQ_IRQ_ERR_LPI: 3504 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) 3505 free_irq(priv->wol_irq, dev); 3506 fallthrough; 3507 case REQ_IRQ_ERR_WOL: 3508 free_irq(dev->irq, dev); 3509 fallthrough; 3510 case REQ_IRQ_ERR_MAC: 3511 case REQ_IRQ_ERR_NO: 3512 /* If MAC IRQ request error, no more IRQ to free */ 3513 break; 3514 } 3515 } 3516 3517 static int stmmac_request_irq_multi_msi(struct net_device *dev) 3518 { 3519 struct stmmac_priv *priv = netdev_priv(dev); 3520 enum request_irq_err irq_err; 3521 cpumask_t cpu_mask; 3522 int irq_idx = 0; 3523 char *int_name; 3524 int ret; 3525 int i; 3526 3527 /* For common interrupt */ 3528 int_name = priv->int_name_mac; 3529 sprintf(int_name, "%s:%s", dev->name, "mac"); 3530 ret = request_irq(dev->irq, stmmac_mac_interrupt, 3531 0, int_name, dev); 3532 if (unlikely(ret < 0)) { 3533 netdev_err(priv->dev, 3534 "%s: alloc mac MSI %d (error: %d)\n", 3535 __func__, dev->irq, ret); 3536 irq_err = REQ_IRQ_ERR_MAC; 3537 goto irq_error; 3538 } 3539 3540 /* Request the Wake IRQ in case of another line 3541 * is used for WoL 3542 */ 3543 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3544 int_name = priv->int_name_wol; 3545 sprintf(int_name, "%s:%s", dev->name, "wol"); 3546 ret = request_irq(priv->wol_irq, 3547 stmmac_mac_interrupt, 3548 0, int_name, dev); 3549 if (unlikely(ret < 0)) { 3550 netdev_err(priv->dev, 3551 "%s: alloc wol MSI %d (error: %d)\n", 3552 __func__, priv->wol_irq, ret); 3553 irq_err = REQ_IRQ_ERR_WOL; 3554 goto irq_error; 3555 } 3556 } 3557 3558 /* Request the LPI IRQ in case of another line 3559 * is used for LPI 3560 */ 3561 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3562 int_name = priv->int_name_lpi; 3563 sprintf(int_name, "%s:%s", dev->name, "lpi"); 3564 ret = request_irq(priv->lpi_irq, 3565 stmmac_mac_interrupt, 3566 0, int_name, dev); 3567 if (unlikely(ret < 0)) { 3568 netdev_err(priv->dev, 3569 "%s: alloc lpi MSI %d (error: %d)\n", 3570 __func__, priv->lpi_irq, ret); 3571 irq_err = REQ_IRQ_ERR_LPI; 3572 goto irq_error; 3573 } 3574 } 3575 3576 /* Request the Safety Feature Correctible Error line in 3577 * case of another line is used 3578 */ 3579 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { 3580 int_name = priv->int_name_sfty_ce; 3581 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); 3582 ret = request_irq(priv->sfty_ce_irq, 3583 stmmac_safety_interrupt, 3584 0, int_name, dev); 3585 if (unlikely(ret < 0)) { 3586 netdev_err(priv->dev, 3587 "%s: alloc sfty ce MSI %d (error: %d)\n", 3588 __func__, priv->sfty_ce_irq, ret); 3589 irq_err = REQ_IRQ_ERR_SFTY_CE; 3590 goto irq_error; 3591 } 3592 } 3593 3594 /* Request the Safety Feature Uncorrectible Error line in 3595 * case of another line is used 3596 */ 3597 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { 3598 int_name = priv->int_name_sfty_ue; 3599 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); 3600 ret = request_irq(priv->sfty_ue_irq, 3601 stmmac_safety_interrupt, 3602 0, int_name, dev); 3603 if (unlikely(ret < 0)) { 3604 netdev_err(priv->dev, 3605 "%s: alloc sfty ue MSI %d (error: %d)\n", 3606 __func__, priv->sfty_ue_irq, ret); 3607 irq_err = REQ_IRQ_ERR_SFTY_UE; 3608 goto irq_error; 3609 } 3610 } 3611 3612 /* Request Rx MSI irq */ 3613 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { 3614 if (i >= MTL_MAX_RX_QUEUES) 3615 break; 3616 if (priv->rx_irq[i] == 0) 3617 continue; 3618 3619 int_name = priv->int_name_rx_irq[i]; 3620 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); 3621 ret = request_irq(priv->rx_irq[i], 3622 stmmac_msi_intr_rx, 3623 0, int_name, &priv->dma_conf.rx_queue[i]); 3624 if (unlikely(ret < 0)) { 3625 netdev_err(priv->dev, 3626 "%s: alloc rx-%d MSI %d (error: %d)\n", 3627 __func__, i, priv->rx_irq[i], ret); 3628 irq_err = REQ_IRQ_ERR_RX; 3629 irq_idx = i; 3630 goto irq_error; 3631 } 3632 cpumask_clear(&cpu_mask); 3633 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3634 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); 3635 } 3636 3637 /* Request Tx MSI irq */ 3638 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { 3639 if (i >= MTL_MAX_TX_QUEUES) 3640 break; 3641 if (priv->tx_irq[i] == 0) 3642 continue; 3643 3644 int_name = priv->int_name_tx_irq[i]; 3645 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); 3646 ret = request_irq(priv->tx_irq[i], 3647 stmmac_msi_intr_tx, 3648 0, int_name, &priv->dma_conf.tx_queue[i]); 3649 if (unlikely(ret < 0)) { 3650 netdev_err(priv->dev, 3651 "%s: alloc tx-%d MSI %d (error: %d)\n", 3652 __func__, i, priv->tx_irq[i], ret); 3653 irq_err = REQ_IRQ_ERR_TX; 3654 irq_idx = i; 3655 goto irq_error; 3656 } 3657 cpumask_clear(&cpu_mask); 3658 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); 3659 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); 3660 } 3661 3662 return 0; 3663 3664 irq_error: 3665 stmmac_free_irq(dev, irq_err, irq_idx); 3666 return ret; 3667 } 3668 3669 static int stmmac_request_irq_single(struct net_device *dev) 3670 { 3671 struct stmmac_priv *priv = netdev_priv(dev); 3672 enum request_irq_err irq_err; 3673 int ret; 3674 3675 ret = request_irq(dev->irq, stmmac_interrupt, 3676 IRQF_SHARED, dev->name, dev); 3677 if (unlikely(ret < 0)) { 3678 netdev_err(priv->dev, 3679 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 3680 __func__, dev->irq, ret); 3681 irq_err = REQ_IRQ_ERR_MAC; 3682 goto irq_error; 3683 } 3684 3685 /* Request the Wake IRQ in case of another line 3686 * is used for WoL 3687 */ 3688 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { 3689 ret = request_irq(priv->wol_irq, stmmac_interrupt, 3690 IRQF_SHARED, dev->name, dev); 3691 if (unlikely(ret < 0)) { 3692 netdev_err(priv->dev, 3693 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 3694 __func__, priv->wol_irq, ret); 3695 irq_err = REQ_IRQ_ERR_WOL; 3696 goto irq_error; 3697 } 3698 } 3699 3700 /* Request the IRQ lines */ 3701 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { 3702 ret = request_irq(priv->lpi_irq, stmmac_interrupt, 3703 IRQF_SHARED, dev->name, dev); 3704 if (unlikely(ret < 0)) { 3705 netdev_err(priv->dev, 3706 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 3707 __func__, priv->lpi_irq, ret); 3708 irq_err = REQ_IRQ_ERR_LPI; 3709 goto irq_error; 3710 } 3711 } 3712 3713 return 0; 3714 3715 irq_error: 3716 stmmac_free_irq(dev, irq_err, 0); 3717 return ret; 3718 } 3719 3720 static int stmmac_request_irq(struct net_device *dev) 3721 { 3722 struct stmmac_priv *priv = netdev_priv(dev); 3723 int ret; 3724 3725 /* Request the IRQ lines */ 3726 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) 3727 ret = stmmac_request_irq_multi_msi(dev); 3728 else 3729 ret = stmmac_request_irq_single(dev); 3730 3731 return ret; 3732 } 3733 3734 /** 3735 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue 3736 * @priv: driver private structure 3737 * @mtu: MTU to setup the dma queue and buf with 3738 * Description: Allocate and generate a dma_conf based on the provided MTU. 3739 * Allocate the Tx/Rx DMA queue and init them. 3740 * Return value: 3741 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure. 3742 */ 3743 static struct stmmac_dma_conf * 3744 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu) 3745 { 3746 struct stmmac_dma_conf *dma_conf; 3747 int chan, bfsize, ret; 3748 3749 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL); 3750 if (!dma_conf) { 3751 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", 3752 __func__); 3753 return ERR_PTR(-ENOMEM); 3754 } 3755 3756 bfsize = stmmac_set_16kib_bfsize(priv, mtu); 3757 if (bfsize < 0) 3758 bfsize = 0; 3759 3760 if (bfsize < BUF_SIZE_16KiB) 3761 bfsize = stmmac_set_bfsize(mtu, 0); 3762 3763 dma_conf->dma_buf_sz = bfsize; 3764 /* Chose the tx/rx size from the already defined one in the 3765 * priv struct. (if defined) 3766 */ 3767 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; 3768 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; 3769 3770 if (!dma_conf->dma_tx_size) 3771 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; 3772 if (!dma_conf->dma_rx_size) 3773 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; 3774 3775 /* Earlier check for TBS */ 3776 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 3777 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; 3778 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3779 3780 /* Setup per-TXQ tbs flag before TX descriptor alloc */ 3781 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3782 } 3783 3784 ret = alloc_dma_desc_resources(priv, dma_conf); 3785 if (ret < 0) { 3786 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3787 __func__); 3788 goto alloc_error; 3789 } 3790 3791 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); 3792 if (ret < 0) { 3793 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3794 __func__); 3795 goto init_error; 3796 } 3797 3798 return dma_conf; 3799 3800 init_error: 3801 free_dma_desc_resources(priv, dma_conf); 3802 alloc_error: 3803 kfree(dma_conf); 3804 return ERR_PTR(ret); 3805 } 3806 3807 /** 3808 * __stmmac_open - open entry point of the driver 3809 * @dev : pointer to the device structure. 3810 * @dma_conf : structure to take the dma data 3811 * Description: 3812 * This function is the open entry point of the driver. 3813 * Return value: 3814 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3815 * file on failure. 3816 */ 3817 static int __stmmac_open(struct net_device *dev, 3818 struct stmmac_dma_conf *dma_conf) 3819 { 3820 struct stmmac_priv *priv = netdev_priv(dev); 3821 int mode = priv->plat->phy_interface; 3822 u32 chan; 3823 int ret; 3824 3825 ret = pm_runtime_resume_and_get(priv->device); 3826 if (ret < 0) 3827 return ret; 3828 3829 if (priv->hw->pcs != STMMAC_PCS_TBI && 3830 priv->hw->pcs != STMMAC_PCS_RTBI && 3831 (!priv->hw->xpcs || 3832 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) && 3833 !priv->hw->lynx_pcs) { 3834 ret = stmmac_init_phy(dev); 3835 if (ret) { 3836 netdev_err(priv->dev, 3837 "%s: Cannot attach to PHY (error: %d)\n", 3838 __func__, ret); 3839 goto init_phy_error; 3840 } 3841 } 3842 3843 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 3844 3845 buf_sz = dma_conf->dma_buf_sz; 3846 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); 3847 3848 stmmac_reset_queues_param(priv); 3849 3850 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 3851 priv->plat->serdes_powerup) { 3852 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); 3853 if (ret < 0) { 3854 netdev_err(priv->dev, "%s: Serdes powerup failed\n", 3855 __func__); 3856 goto init_error; 3857 } 3858 } 3859 3860 ret = stmmac_hw_setup(dev, true); 3861 if (ret < 0) { 3862 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3863 goto init_error; 3864 } 3865 3866 stmmac_init_coalesce(priv); 3867 3868 phylink_start(priv->phylink); 3869 /* We may have called phylink_speed_down before */ 3870 phylink_speed_up(priv->phylink); 3871 3872 ret = stmmac_request_irq(dev); 3873 if (ret) 3874 goto irq_error; 3875 3876 stmmac_enable_all_queues(priv); 3877 netif_tx_start_all_queues(priv->dev); 3878 stmmac_enable_all_dma_irq(priv); 3879 3880 return 0; 3881 3882 irq_error: 3883 phylink_stop(priv->phylink); 3884 3885 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3886 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 3887 3888 stmmac_hw_teardown(dev); 3889 init_error: 3890 phylink_disconnect_phy(priv->phylink); 3891 init_phy_error: 3892 pm_runtime_put(priv->device); 3893 return ret; 3894 } 3895 3896 static int stmmac_open(struct net_device *dev) 3897 { 3898 struct stmmac_priv *priv = netdev_priv(dev); 3899 struct stmmac_dma_conf *dma_conf; 3900 int ret; 3901 3902 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); 3903 if (IS_ERR(dma_conf)) 3904 return PTR_ERR(dma_conf); 3905 3906 ret = __stmmac_open(dev, dma_conf); 3907 if (ret) 3908 free_dma_desc_resources(priv, dma_conf); 3909 3910 kfree(dma_conf); 3911 return ret; 3912 } 3913 3914 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) 3915 { 3916 set_bit(__FPE_REMOVING, &priv->fpe_task_state); 3917 3918 if (priv->fpe_wq) 3919 destroy_workqueue(priv->fpe_wq); 3920 3921 netdev_info(priv->dev, "FPE workqueue stop"); 3922 } 3923 3924 /** 3925 * stmmac_release - close entry point of the driver 3926 * @dev : device pointer. 3927 * Description: 3928 * This is the stop entry point of the driver. 3929 */ 3930 static int stmmac_release(struct net_device *dev) 3931 { 3932 struct stmmac_priv *priv = netdev_priv(dev); 3933 u32 chan; 3934 3935 if (device_may_wakeup(priv->device)) 3936 phylink_speed_down(priv->phylink, false); 3937 /* Stop and disconnect the PHY */ 3938 phylink_stop(priv->phylink); 3939 phylink_disconnect_phy(priv->phylink); 3940 3941 stmmac_disable_all_queues(priv); 3942 3943 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3944 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 3945 3946 netif_tx_disable(dev); 3947 3948 /* Free the IRQ lines */ 3949 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 3950 3951 if (priv->eee_enabled) { 3952 priv->tx_path_in_lpi_mode = false; 3953 del_timer_sync(&priv->eee_ctrl_timer); 3954 } 3955 3956 /* Stop TX/RX DMA and clear the descriptors */ 3957 stmmac_stop_all_dma(priv); 3958 3959 /* Release and free the Rx/Tx resources */ 3960 free_dma_desc_resources(priv, &priv->dma_conf); 3961 3962 /* Disable the MAC Rx/Tx */ 3963 stmmac_mac_set(priv, priv->ioaddr, false); 3964 3965 /* Powerdown Serdes if there is */ 3966 if (priv->plat->serdes_powerdown) 3967 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); 3968 3969 netif_carrier_off(dev); 3970 3971 stmmac_release_ptp(priv); 3972 3973 pm_runtime_put(priv->device); 3974 3975 if (priv->dma_cap.fpesel) 3976 stmmac_fpe_stop_wq(priv); 3977 3978 return 0; 3979 } 3980 3981 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 3982 struct stmmac_tx_queue *tx_q) 3983 { 3984 u16 tag = 0x0, inner_tag = 0x0; 3985 u32 inner_type = 0x0; 3986 struct dma_desc *p; 3987 3988 if (!priv->dma_cap.vlins) 3989 return false; 3990 if (!skb_vlan_tag_present(skb)) 3991 return false; 3992 if (skb->vlan_proto == htons(ETH_P_8021AD)) { 3993 inner_tag = skb_vlan_tag_get(skb); 3994 inner_type = STMMAC_VLAN_INSERT; 3995 } 3996 3997 tag = skb_vlan_tag_get(skb); 3998 3999 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4000 p = &tx_q->dma_entx[tx_q->cur_tx].basic; 4001 else 4002 p = &tx_q->dma_tx[tx_q->cur_tx]; 4003 4004 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 4005 return false; 4006 4007 stmmac_set_tx_owner(priv, p); 4008 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4009 return true; 4010 } 4011 4012 /** 4013 * stmmac_tso_allocator - close entry point of the driver 4014 * @priv: driver private structure 4015 * @des: buffer start address 4016 * @total_len: total length to fill in descriptors 4017 * @last_segment: condition for the last descriptor 4018 * @queue: TX queue index 4019 * Description: 4020 * This function fills descriptor and request new descriptors according to 4021 * buffer length to fill 4022 */ 4023 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 4024 int total_len, bool last_segment, u32 queue) 4025 { 4026 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4027 struct dma_desc *desc; 4028 u32 buff_size; 4029 int tmp_len; 4030 4031 tmp_len = total_len; 4032 4033 while (tmp_len > 0) { 4034 dma_addr_t curr_addr; 4035 4036 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4037 priv->dma_conf.dma_tx_size); 4038 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4039 4040 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4041 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4042 else 4043 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4044 4045 curr_addr = des + (total_len - tmp_len); 4046 if (priv->dma_cap.addr64 <= 32) 4047 desc->des0 = cpu_to_le32(curr_addr); 4048 else 4049 stmmac_set_desc_addr(priv, desc, curr_addr); 4050 4051 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 4052 TSO_MAX_BUFF_SIZE : tmp_len; 4053 4054 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 4055 0, 1, 4056 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 4057 0, 0); 4058 4059 tmp_len -= TSO_MAX_BUFF_SIZE; 4060 } 4061 } 4062 4063 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) 4064 { 4065 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4066 int desc_size; 4067 4068 if (likely(priv->extend_desc)) 4069 desc_size = sizeof(struct dma_extended_desc); 4070 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4071 desc_size = sizeof(struct dma_edesc); 4072 else 4073 desc_size = sizeof(struct dma_desc); 4074 4075 /* The own bit must be the latest setting done when prepare the 4076 * descriptor and then barrier is needed to make sure that 4077 * all is coherent before granting the DMA engine. 4078 */ 4079 wmb(); 4080 4081 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 4082 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 4083 } 4084 4085 /** 4086 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 4087 * @skb : the socket buffer 4088 * @dev : device pointer 4089 * Description: this is the transmit function that is called on TSO frames 4090 * (support available on GMAC4 and newer chips). 4091 * Diagram below show the ring programming in case of TSO frames: 4092 * 4093 * First Descriptor 4094 * -------- 4095 * | DES0 |---> buffer1 = L2/L3/L4 header 4096 * | DES1 |---> TCP Payload (can continue on next descr...) 4097 * | DES2 |---> buffer 1 and 2 len 4098 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 4099 * -------- 4100 * | 4101 * ... 4102 * | 4103 * -------- 4104 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 4105 * | DES1 | --| 4106 * | DES2 | --> buffer 1 and 2 len 4107 * | DES3 | 4108 * -------- 4109 * 4110 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 4111 */ 4112 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 4113 { 4114 struct dma_desc *desc, *first, *mss_desc = NULL; 4115 struct stmmac_priv *priv = netdev_priv(dev); 4116 int nfrags = skb_shinfo(skb)->nr_frags; 4117 u32 queue = skb_get_queue_mapping(skb); 4118 unsigned int first_entry, tx_packets; 4119 struct stmmac_txq_stats *txq_stats; 4120 int tmp_pay_len = 0, first_tx; 4121 struct stmmac_tx_queue *tx_q; 4122 bool has_vlan, set_ic; 4123 u8 proto_hdr_len, hdr; 4124 unsigned long flags; 4125 u32 pay_len, mss; 4126 dma_addr_t des; 4127 int i; 4128 4129 tx_q = &priv->dma_conf.tx_queue[queue]; 4130 txq_stats = &priv->xstats.txq_stats[queue]; 4131 first_tx = tx_q->cur_tx; 4132 4133 /* Compute header lengths */ 4134 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 4135 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 4136 hdr = sizeof(struct udphdr); 4137 } else { 4138 proto_hdr_len = skb_tcp_all_headers(skb); 4139 hdr = tcp_hdrlen(skb); 4140 } 4141 4142 /* Desc availability based on threshold should be enough safe */ 4143 if (unlikely(stmmac_tx_avail(priv, queue) < 4144 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 4145 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4146 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4147 queue)); 4148 /* This is a hard error, log it. */ 4149 netdev_err(priv->dev, 4150 "%s: Tx Ring full when queue awake\n", 4151 __func__); 4152 } 4153 return NETDEV_TX_BUSY; 4154 } 4155 4156 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 4157 4158 mss = skb_shinfo(skb)->gso_size; 4159 4160 /* set new MSS value if needed */ 4161 if (mss != tx_q->mss) { 4162 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4163 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4164 else 4165 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 4166 4167 stmmac_set_mss(priv, mss_desc, mss); 4168 tx_q->mss = mss; 4169 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 4170 priv->dma_conf.dma_tx_size); 4171 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 4172 } 4173 4174 if (netif_msg_tx_queued(priv)) { 4175 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 4176 __func__, hdr, proto_hdr_len, pay_len, mss); 4177 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 4178 skb->data_len); 4179 } 4180 4181 /* Check if VLAN can be inserted by HW */ 4182 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4183 4184 first_entry = tx_q->cur_tx; 4185 WARN_ON(tx_q->tx_skbuff[first_entry]); 4186 4187 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4188 desc = &tx_q->dma_entx[first_entry].basic; 4189 else 4190 desc = &tx_q->dma_tx[first_entry]; 4191 first = desc; 4192 4193 if (has_vlan) 4194 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4195 4196 /* first descriptor: fill Headers on Buf1 */ 4197 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 4198 DMA_TO_DEVICE); 4199 if (dma_mapping_error(priv->device, des)) 4200 goto dma_map_err; 4201 4202 tx_q->tx_skbuff_dma[first_entry].buf = des; 4203 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 4204 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4205 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4206 4207 if (priv->dma_cap.addr64 <= 32) { 4208 first->des0 = cpu_to_le32(des); 4209 4210 /* Fill start of payload in buff2 of first descriptor */ 4211 if (pay_len) 4212 first->des1 = cpu_to_le32(des + proto_hdr_len); 4213 4214 /* If needed take extra descriptors to fill the remaining payload */ 4215 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 4216 } else { 4217 stmmac_set_desc_addr(priv, first, des); 4218 tmp_pay_len = pay_len; 4219 des += proto_hdr_len; 4220 pay_len = 0; 4221 } 4222 4223 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 4224 4225 /* Prepare fragments */ 4226 for (i = 0; i < nfrags; i++) { 4227 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4228 4229 des = skb_frag_dma_map(priv->device, frag, 0, 4230 skb_frag_size(frag), 4231 DMA_TO_DEVICE); 4232 if (dma_mapping_error(priv->device, des)) 4233 goto dma_map_err; 4234 4235 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 4236 (i == nfrags - 1), queue); 4237 4238 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 4239 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 4240 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 4241 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4242 } 4243 4244 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 4245 4246 /* Only the last descriptor gets to point to the skb. */ 4247 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 4248 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; 4249 4250 /* Manage tx mitigation */ 4251 tx_packets = (tx_q->cur_tx + 1) - first_tx; 4252 tx_q->tx_count_frames += tx_packets; 4253 4254 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4255 set_ic = true; 4256 else if (!priv->tx_coal_frames[queue]) 4257 set_ic = false; 4258 else if (tx_packets > priv->tx_coal_frames[queue]) 4259 set_ic = true; 4260 else if ((tx_q->tx_count_frames % 4261 priv->tx_coal_frames[queue]) < tx_packets) 4262 set_ic = true; 4263 else 4264 set_ic = false; 4265 4266 if (set_ic) { 4267 if (tx_q->tbs & STMMAC_TBS_AVAIL) 4268 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 4269 else 4270 desc = &tx_q->dma_tx[tx_q->cur_tx]; 4271 4272 tx_q->tx_count_frames = 0; 4273 stmmac_set_tx_ic(priv, desc); 4274 } 4275 4276 /* We've used all descriptors we need for this skb, however, 4277 * advance cur_tx so that it references a fresh descriptor. 4278 * ndo_start_xmit will fill this descriptor the next time it's 4279 * called and stmmac_tx_clean may clean up to this descriptor. 4280 */ 4281 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); 4282 4283 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4284 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4285 __func__); 4286 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4287 } 4288 4289 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4290 txq_stats->tx_bytes += skb->len; 4291 txq_stats->tx_tso_frames++; 4292 txq_stats->tx_tso_nfrags += nfrags; 4293 if (set_ic) 4294 txq_stats->tx_set_ic_bit++; 4295 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4296 4297 if (priv->sarc_type) 4298 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4299 4300 skb_tx_timestamp(skb); 4301 4302 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4303 priv->hwts_tx_en)) { 4304 /* declare that device is doing timestamping */ 4305 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4306 stmmac_enable_tx_timestamp(priv, first); 4307 } 4308 4309 /* Complete the first descriptor before granting the DMA */ 4310 stmmac_prepare_tso_tx_desc(priv, first, 1, 4311 proto_hdr_len, 4312 pay_len, 4313 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 4314 hdr / 4, (skb->len - proto_hdr_len)); 4315 4316 /* If context desc is used to change MSS */ 4317 if (mss_desc) { 4318 /* Make sure that first descriptor has been completely 4319 * written, including its own bit. This is because MSS is 4320 * actually before first descriptor, so we need to make 4321 * sure that MSS's own bit is the last thing written. 4322 */ 4323 dma_wmb(); 4324 stmmac_set_tx_owner(priv, mss_desc); 4325 } 4326 4327 if (netif_msg_pktdata(priv)) { 4328 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 4329 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4330 tx_q->cur_tx, first, nfrags); 4331 pr_info(">>> frame to be transmitted: "); 4332 print_pkt(skb->data, skb_headlen(skb)); 4333 } 4334 4335 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4336 4337 stmmac_flush_tx_descriptors(priv, queue); 4338 stmmac_tx_timer_arm(priv, queue); 4339 4340 return NETDEV_TX_OK; 4341 4342 dma_map_err: 4343 dev_err(priv->device, "Tx dma map failed\n"); 4344 dev_kfree_skb(skb); 4345 priv->xstats.tx_dropped++; 4346 return NETDEV_TX_OK; 4347 } 4348 4349 /** 4350 * stmmac_xmit - Tx entry point of the driver 4351 * @skb : the socket buffer 4352 * @dev : device pointer 4353 * Description : this is the tx entry point of the driver. 4354 * It programs the chain or the ring and supports oversized frames 4355 * and SG feature. 4356 */ 4357 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 4358 { 4359 unsigned int first_entry, tx_packets, enh_desc; 4360 struct stmmac_priv *priv = netdev_priv(dev); 4361 unsigned int nopaged_len = skb_headlen(skb); 4362 int i, csum_insertion = 0, is_jumbo = 0; 4363 u32 queue = skb_get_queue_mapping(skb); 4364 int nfrags = skb_shinfo(skb)->nr_frags; 4365 int gso = skb_shinfo(skb)->gso_type; 4366 struct stmmac_txq_stats *txq_stats; 4367 struct dma_edesc *tbs_desc = NULL; 4368 struct dma_desc *desc, *first; 4369 struct stmmac_tx_queue *tx_q; 4370 bool has_vlan, set_ic; 4371 int entry, first_tx; 4372 unsigned long flags; 4373 dma_addr_t des; 4374 4375 tx_q = &priv->dma_conf.tx_queue[queue]; 4376 txq_stats = &priv->xstats.txq_stats[queue]; 4377 first_tx = tx_q->cur_tx; 4378 4379 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 4380 stmmac_disable_eee_mode(priv); 4381 4382 /* Manage oversized TCP frames for GMAC4 device */ 4383 if (skb_is_gso(skb) && priv->tso) { 4384 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 4385 return stmmac_tso_xmit(skb, dev); 4386 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 4387 return stmmac_tso_xmit(skb, dev); 4388 } 4389 4390 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 4391 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 4392 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 4393 queue)); 4394 /* This is a hard error, log it. */ 4395 netdev_err(priv->dev, 4396 "%s: Tx Ring full when queue awake\n", 4397 __func__); 4398 } 4399 return NETDEV_TX_BUSY; 4400 } 4401 4402 /* Check if VLAN can be inserted by HW */ 4403 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 4404 4405 entry = tx_q->cur_tx; 4406 first_entry = entry; 4407 WARN_ON(tx_q->tx_skbuff[first_entry]); 4408 4409 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 4410 /* DWMAC IPs can be synthesized to support tx coe only for a few tx 4411 * queues. In that case, checksum offloading for those queues that don't 4412 * support tx coe needs to fallback to software checksum calculation. 4413 */ 4414 if (csum_insertion && 4415 priv->plat->tx_queues_cfg[queue].coe_unsupported) { 4416 if (unlikely(skb_checksum_help(skb))) 4417 goto dma_map_err; 4418 csum_insertion = !csum_insertion; 4419 } 4420 4421 if (likely(priv->extend_desc)) 4422 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4423 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4424 desc = &tx_q->dma_entx[entry].basic; 4425 else 4426 desc = tx_q->dma_tx + entry; 4427 4428 first = desc; 4429 4430 if (has_vlan) 4431 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 4432 4433 enh_desc = priv->plat->enh_desc; 4434 /* To program the descriptors according to the size of the frame */ 4435 if (enh_desc) 4436 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 4437 4438 if (unlikely(is_jumbo)) { 4439 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 4440 if (unlikely(entry < 0) && (entry != -EINVAL)) 4441 goto dma_map_err; 4442 } 4443 4444 for (i = 0; i < nfrags; i++) { 4445 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4446 int len = skb_frag_size(frag); 4447 bool last_segment = (i == (nfrags - 1)); 4448 4449 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4450 WARN_ON(tx_q->tx_skbuff[entry]); 4451 4452 if (likely(priv->extend_desc)) 4453 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4454 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4455 desc = &tx_q->dma_entx[entry].basic; 4456 else 4457 desc = tx_q->dma_tx + entry; 4458 4459 des = skb_frag_dma_map(priv->device, frag, 0, len, 4460 DMA_TO_DEVICE); 4461 if (dma_mapping_error(priv->device, des)) 4462 goto dma_map_err; /* should reuse desc w/o issues */ 4463 4464 tx_q->tx_skbuff_dma[entry].buf = des; 4465 4466 stmmac_set_desc_addr(priv, desc, des); 4467 4468 tx_q->tx_skbuff_dma[entry].map_as_page = true; 4469 tx_q->tx_skbuff_dma[entry].len = len; 4470 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 4471 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4472 4473 /* Prepare the descriptor and set the own bit too */ 4474 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 4475 priv->mode, 1, last_segment, skb->len); 4476 } 4477 4478 /* Only the last descriptor gets to point to the skb. */ 4479 tx_q->tx_skbuff[entry] = skb; 4480 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; 4481 4482 /* According to the coalesce parameter the IC bit for the latest 4483 * segment is reset and the timer re-started to clean the tx status. 4484 * This approach takes care about the fragments: desc is the first 4485 * element in case of no SG. 4486 */ 4487 tx_packets = (entry + 1) - first_tx; 4488 tx_q->tx_count_frames += tx_packets; 4489 4490 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 4491 set_ic = true; 4492 else if (!priv->tx_coal_frames[queue]) 4493 set_ic = false; 4494 else if (tx_packets > priv->tx_coal_frames[queue]) 4495 set_ic = true; 4496 else if ((tx_q->tx_count_frames % 4497 priv->tx_coal_frames[queue]) < tx_packets) 4498 set_ic = true; 4499 else 4500 set_ic = false; 4501 4502 if (set_ic) { 4503 if (likely(priv->extend_desc)) 4504 desc = &tx_q->dma_etx[entry].basic; 4505 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4506 desc = &tx_q->dma_entx[entry].basic; 4507 else 4508 desc = &tx_q->dma_tx[entry]; 4509 4510 tx_q->tx_count_frames = 0; 4511 stmmac_set_tx_ic(priv, desc); 4512 } 4513 4514 /* We've used all descriptors we need for this skb, however, 4515 * advance cur_tx so that it references a fresh descriptor. 4516 * ndo_start_xmit will fill this descriptor the next time it's 4517 * called and stmmac_tx_clean may clean up to this descriptor. 4518 */ 4519 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4520 tx_q->cur_tx = entry; 4521 4522 if (netif_msg_pktdata(priv)) { 4523 netdev_dbg(priv->dev, 4524 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 4525 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 4526 entry, first, nfrags); 4527 4528 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 4529 print_pkt(skb->data, skb->len); 4530 } 4531 4532 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 4533 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 4534 __func__); 4535 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 4536 } 4537 4538 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4539 txq_stats->tx_bytes += skb->len; 4540 if (set_ic) 4541 txq_stats->tx_set_ic_bit++; 4542 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4543 4544 if (priv->sarc_type) 4545 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 4546 4547 skb_tx_timestamp(skb); 4548 4549 /* Ready to fill the first descriptor and set the OWN bit w/o any 4550 * problems because all the descriptors are actually ready to be 4551 * passed to the DMA engine. 4552 */ 4553 if (likely(!is_jumbo)) { 4554 bool last_segment = (nfrags == 0); 4555 4556 des = dma_map_single(priv->device, skb->data, 4557 nopaged_len, DMA_TO_DEVICE); 4558 if (dma_mapping_error(priv->device, des)) 4559 goto dma_map_err; 4560 4561 tx_q->tx_skbuff_dma[first_entry].buf = des; 4562 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; 4563 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; 4564 4565 stmmac_set_desc_addr(priv, first, des); 4566 4567 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 4568 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 4569 4570 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4571 priv->hwts_tx_en)) { 4572 /* declare that device is doing timestamping */ 4573 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4574 stmmac_enable_tx_timestamp(priv, first); 4575 } 4576 4577 /* Prepare the first descriptor setting the OWN bit too */ 4578 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 4579 csum_insertion, priv->mode, 0, last_segment, 4580 skb->len); 4581 } 4582 4583 if (tx_q->tbs & STMMAC_TBS_EN) { 4584 struct timespec64 ts = ns_to_timespec64(skb->tstamp); 4585 4586 tbs_desc = &tx_q->dma_entx[first_entry]; 4587 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 4588 } 4589 4590 stmmac_set_tx_owner(priv, first); 4591 4592 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 4593 4594 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4595 4596 stmmac_flush_tx_descriptors(priv, queue); 4597 stmmac_tx_timer_arm(priv, queue); 4598 4599 return NETDEV_TX_OK; 4600 4601 dma_map_err: 4602 netdev_err(priv->dev, "Tx DMA map failed\n"); 4603 dev_kfree_skb(skb); 4604 priv->xstats.tx_dropped++; 4605 return NETDEV_TX_OK; 4606 } 4607 4608 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 4609 { 4610 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); 4611 __be16 vlan_proto = veth->h_vlan_proto; 4612 u16 vlanid; 4613 4614 if ((vlan_proto == htons(ETH_P_8021Q) && 4615 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 4616 (vlan_proto == htons(ETH_P_8021AD) && 4617 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 4618 /* pop the vlan tag */ 4619 vlanid = ntohs(veth->h_vlan_TCI); 4620 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 4621 skb_pull(skb, VLAN_HLEN); 4622 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 4623 } 4624 } 4625 4626 /** 4627 * stmmac_rx_refill - refill used skb preallocated buffers 4628 * @priv: driver private structure 4629 * @queue: RX queue index 4630 * Description : this is to reallocate the skb for the reception process 4631 * that is based on zero-copy. 4632 */ 4633 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 4634 { 4635 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4636 int dirty = stmmac_rx_dirty(priv, queue); 4637 unsigned int entry = rx_q->dirty_rx; 4638 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); 4639 4640 if (priv->dma_cap.host_dma_width <= 32) 4641 gfp |= GFP_DMA32; 4642 4643 while (dirty-- > 0) { 4644 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 4645 struct dma_desc *p; 4646 bool use_rx_wd; 4647 4648 if (priv->extend_desc) 4649 p = (struct dma_desc *)(rx_q->dma_erx + entry); 4650 else 4651 p = rx_q->dma_rx + entry; 4652 4653 if (!buf->page) { 4654 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); 4655 if (!buf->page) 4656 break; 4657 } 4658 4659 if (priv->sph && !buf->sec_page) { 4660 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); 4661 if (!buf->sec_page) 4662 break; 4663 4664 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 4665 } 4666 4667 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; 4668 4669 stmmac_set_desc_addr(priv, p, buf->addr); 4670 if (priv->sph) 4671 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 4672 else 4673 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 4674 stmmac_refill_desc3(priv, rx_q, p); 4675 4676 rx_q->rx_count_frames++; 4677 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 4678 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 4679 rx_q->rx_count_frames = 0; 4680 4681 use_rx_wd = !priv->rx_coal_frames[queue]; 4682 use_rx_wd |= rx_q->rx_count_frames > 0; 4683 if (!priv->use_riwt) 4684 use_rx_wd = false; 4685 4686 dma_wmb(); 4687 stmmac_set_rx_owner(priv, p, use_rx_wd); 4688 4689 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 4690 } 4691 rx_q->dirty_rx = entry; 4692 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 4693 (rx_q->dirty_rx * sizeof(struct dma_desc)); 4694 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 4695 } 4696 4697 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 4698 struct dma_desc *p, 4699 int status, unsigned int len) 4700 { 4701 unsigned int plen = 0, hlen = 0; 4702 int coe = priv->hw->rx_csum; 4703 4704 /* Not first descriptor, buffer is always zero */ 4705 if (priv->sph && len) 4706 return 0; 4707 4708 /* First descriptor, get split header length */ 4709 stmmac_get_rx_header_len(priv, p, &hlen); 4710 if (priv->sph && hlen) { 4711 priv->xstats.rx_split_hdr_pkt_n++; 4712 return hlen; 4713 } 4714 4715 /* First descriptor, not last descriptor and not split header */ 4716 if (status & rx_not_ls) 4717 return priv->dma_conf.dma_buf_sz; 4718 4719 plen = stmmac_get_rx_frame_len(priv, p, coe); 4720 4721 /* First descriptor and last descriptor and not split header */ 4722 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); 4723 } 4724 4725 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 4726 struct dma_desc *p, 4727 int status, unsigned int len) 4728 { 4729 int coe = priv->hw->rx_csum; 4730 unsigned int plen = 0; 4731 4732 /* Not split header, buffer is not available */ 4733 if (!priv->sph) 4734 return 0; 4735 4736 /* Not last descriptor */ 4737 if (status & rx_not_ls) 4738 return priv->dma_conf.dma_buf_sz; 4739 4740 plen = stmmac_get_rx_frame_len(priv, p, coe); 4741 4742 /* Last descriptor */ 4743 return plen - len; 4744 } 4745 4746 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, 4747 struct xdp_frame *xdpf, bool dma_map) 4748 { 4749 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; 4750 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 4751 unsigned int entry = tx_q->cur_tx; 4752 struct dma_desc *tx_desc; 4753 dma_addr_t dma_addr; 4754 bool set_ic; 4755 4756 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) 4757 return STMMAC_XDP_CONSUMED; 4758 4759 if (likely(priv->extend_desc)) 4760 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); 4761 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 4762 tx_desc = &tx_q->dma_entx[entry].basic; 4763 else 4764 tx_desc = tx_q->dma_tx + entry; 4765 4766 if (dma_map) { 4767 dma_addr = dma_map_single(priv->device, xdpf->data, 4768 xdpf->len, DMA_TO_DEVICE); 4769 if (dma_mapping_error(priv->device, dma_addr)) 4770 return STMMAC_XDP_CONSUMED; 4771 4772 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; 4773 } else { 4774 struct page *page = virt_to_page(xdpf->data); 4775 4776 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + 4777 xdpf->headroom; 4778 dma_sync_single_for_device(priv->device, dma_addr, 4779 xdpf->len, DMA_BIDIRECTIONAL); 4780 4781 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; 4782 } 4783 4784 tx_q->tx_skbuff_dma[entry].buf = dma_addr; 4785 tx_q->tx_skbuff_dma[entry].map_as_page = false; 4786 tx_q->tx_skbuff_dma[entry].len = xdpf->len; 4787 tx_q->tx_skbuff_dma[entry].last_segment = true; 4788 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 4789 4790 tx_q->xdpf[entry] = xdpf; 4791 4792 stmmac_set_desc_addr(priv, tx_desc, dma_addr); 4793 4794 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, 4795 true, priv->mode, true, true, 4796 xdpf->len); 4797 4798 tx_q->tx_count_frames++; 4799 4800 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) 4801 set_ic = true; 4802 else 4803 set_ic = false; 4804 4805 if (set_ic) { 4806 unsigned long flags; 4807 tx_q->tx_count_frames = 0; 4808 stmmac_set_tx_ic(priv, tx_desc); 4809 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 4810 txq_stats->tx_set_ic_bit++; 4811 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 4812 } 4813 4814 stmmac_enable_dma_transmission(priv, priv->ioaddr); 4815 4816 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); 4817 tx_q->cur_tx = entry; 4818 4819 return STMMAC_XDP_TX; 4820 } 4821 4822 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv, 4823 int cpu) 4824 { 4825 int index = cpu; 4826 4827 if (unlikely(index < 0)) 4828 index = 0; 4829 4830 while (index >= priv->plat->tx_queues_to_use) 4831 index -= priv->plat->tx_queues_to_use; 4832 4833 return index; 4834 } 4835 4836 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, 4837 struct xdp_buff *xdp) 4838 { 4839 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 4840 int cpu = smp_processor_id(); 4841 struct netdev_queue *nq; 4842 int queue; 4843 int res; 4844 4845 if (unlikely(!xdpf)) 4846 return STMMAC_XDP_CONSUMED; 4847 4848 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4849 nq = netdev_get_tx_queue(priv->dev, queue); 4850 4851 __netif_tx_lock(nq, cpu); 4852 /* Avoids TX time-out as we are sharing with slow path */ 4853 txq_trans_cond_update(nq); 4854 4855 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); 4856 if (res == STMMAC_XDP_TX) 4857 stmmac_flush_tx_descriptors(priv, queue); 4858 4859 __netif_tx_unlock(nq); 4860 4861 return res; 4862 } 4863 4864 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv, 4865 struct bpf_prog *prog, 4866 struct xdp_buff *xdp) 4867 { 4868 u32 act; 4869 int res; 4870 4871 act = bpf_prog_run_xdp(prog, xdp); 4872 switch (act) { 4873 case XDP_PASS: 4874 res = STMMAC_XDP_PASS; 4875 break; 4876 case XDP_TX: 4877 res = stmmac_xdp_xmit_back(priv, xdp); 4878 break; 4879 case XDP_REDIRECT: 4880 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) 4881 res = STMMAC_XDP_CONSUMED; 4882 else 4883 res = STMMAC_XDP_REDIRECT; 4884 break; 4885 default: 4886 bpf_warn_invalid_xdp_action(priv->dev, prog, act); 4887 fallthrough; 4888 case XDP_ABORTED: 4889 trace_xdp_exception(priv->dev, prog, act); 4890 fallthrough; 4891 case XDP_DROP: 4892 res = STMMAC_XDP_CONSUMED; 4893 break; 4894 } 4895 4896 return res; 4897 } 4898 4899 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, 4900 struct xdp_buff *xdp) 4901 { 4902 struct bpf_prog *prog; 4903 int res; 4904 4905 prog = READ_ONCE(priv->xdp_prog); 4906 if (!prog) { 4907 res = STMMAC_XDP_PASS; 4908 goto out; 4909 } 4910 4911 res = __stmmac_xdp_run_prog(priv, prog, xdp); 4912 out: 4913 return ERR_PTR(-res); 4914 } 4915 4916 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, 4917 int xdp_status) 4918 { 4919 int cpu = smp_processor_id(); 4920 int queue; 4921 4922 queue = stmmac_xdp_get_tx_queue(priv, cpu); 4923 4924 if (xdp_status & STMMAC_XDP_TX) 4925 stmmac_tx_timer_arm(priv, queue); 4926 4927 if (xdp_status & STMMAC_XDP_REDIRECT) 4928 xdp_do_flush(); 4929 } 4930 4931 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, 4932 struct xdp_buff *xdp) 4933 { 4934 unsigned int metasize = xdp->data - xdp->data_meta; 4935 unsigned int datasize = xdp->data_end - xdp->data; 4936 struct sk_buff *skb; 4937 4938 skb = __napi_alloc_skb(&ch->rxtx_napi, 4939 xdp->data_end - xdp->data_hard_start, 4940 GFP_ATOMIC | __GFP_NOWARN); 4941 if (unlikely(!skb)) 4942 return NULL; 4943 4944 skb_reserve(skb, xdp->data - xdp->data_hard_start); 4945 memcpy(__skb_put(skb, datasize), xdp->data, datasize); 4946 if (metasize) 4947 skb_metadata_set(skb, metasize); 4948 4949 return skb; 4950 } 4951 4952 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, 4953 struct dma_desc *p, struct dma_desc *np, 4954 struct xdp_buff *xdp) 4955 { 4956 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 4957 struct stmmac_channel *ch = &priv->channel[queue]; 4958 unsigned int len = xdp->data_end - xdp->data; 4959 enum pkt_hash_types hash_type; 4960 int coe = priv->hw->rx_csum; 4961 unsigned long flags; 4962 struct sk_buff *skb; 4963 u32 hash; 4964 4965 skb = stmmac_construct_skb_zc(ch, xdp); 4966 if (!skb) { 4967 priv->xstats.rx_dropped++; 4968 return; 4969 } 4970 4971 stmmac_get_rx_hwtstamp(priv, p, np, skb); 4972 stmmac_rx_vlan(priv->dev, skb); 4973 skb->protocol = eth_type_trans(skb, priv->dev); 4974 4975 if (unlikely(!coe)) 4976 skb_checksum_none_assert(skb); 4977 else 4978 skb->ip_summed = CHECKSUM_UNNECESSARY; 4979 4980 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4981 skb_set_hash(skb, hash, hash_type); 4982 4983 skb_record_rx_queue(skb, queue); 4984 napi_gro_receive(&ch->rxtx_napi, skb); 4985 4986 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 4987 rxq_stats->rx_pkt_n++; 4988 rxq_stats->rx_bytes += len; 4989 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 4990 } 4991 4992 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) 4993 { 4994 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 4995 unsigned int entry = rx_q->dirty_rx; 4996 struct dma_desc *rx_desc = NULL; 4997 bool ret = true; 4998 4999 budget = min(budget, stmmac_rx_dirty(priv, queue)); 5000 5001 while (budget-- > 0 && entry != rx_q->cur_rx) { 5002 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 5003 dma_addr_t dma_addr; 5004 bool use_rx_wd; 5005 5006 if (!buf->xdp) { 5007 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); 5008 if (!buf->xdp) { 5009 ret = false; 5010 break; 5011 } 5012 } 5013 5014 if (priv->extend_desc) 5015 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); 5016 else 5017 rx_desc = rx_q->dma_rx + entry; 5018 5019 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); 5020 stmmac_set_desc_addr(priv, rx_desc, dma_addr); 5021 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false); 5022 stmmac_refill_desc3(priv, rx_q, rx_desc); 5023 5024 rx_q->rx_count_frames++; 5025 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 5026 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 5027 rx_q->rx_count_frames = 0; 5028 5029 use_rx_wd = !priv->rx_coal_frames[queue]; 5030 use_rx_wd |= rx_q->rx_count_frames > 0; 5031 if (!priv->use_riwt) 5032 use_rx_wd = false; 5033 5034 dma_wmb(); 5035 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd); 5036 5037 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); 5038 } 5039 5040 if (rx_desc) { 5041 rx_q->dirty_rx = entry; 5042 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 5043 (rx_q->dirty_rx * sizeof(struct dma_desc)); 5044 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 5045 } 5046 5047 return ret; 5048 } 5049 5050 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp) 5051 { 5052 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used 5053 * to represent incoming packet, whereas cb field in the same structure 5054 * is used to store driver specific info. Thus, struct stmmac_xdp_buff 5055 * is laid on top of xdp and cb fields of struct xdp_buff_xsk. 5056 */ 5057 return (struct stmmac_xdp_buff *)xdp; 5058 } 5059 5060 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) 5061 { 5062 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 5063 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5064 unsigned int count = 0, error = 0, len = 0; 5065 int dirty = stmmac_rx_dirty(priv, queue); 5066 unsigned int next_entry = rx_q->cur_rx; 5067 u32 rx_errors = 0, rx_dropped = 0; 5068 unsigned int desc_size; 5069 struct bpf_prog *prog; 5070 bool failure = false; 5071 unsigned long flags; 5072 int xdp_status = 0; 5073 int status = 0; 5074 5075 if (netif_msg_rx_status(priv)) { 5076 void *rx_head; 5077 5078 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5079 if (priv->extend_desc) { 5080 rx_head = (void *)rx_q->dma_erx; 5081 desc_size = sizeof(struct dma_extended_desc); 5082 } else { 5083 rx_head = (void *)rx_q->dma_rx; 5084 desc_size = sizeof(struct dma_desc); 5085 } 5086 5087 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5088 rx_q->dma_rx_phy, desc_size); 5089 } 5090 while (count < limit) { 5091 struct stmmac_rx_buffer *buf; 5092 struct stmmac_xdp_buff *ctx; 5093 unsigned int buf1_len = 0; 5094 struct dma_desc *np, *p; 5095 int entry; 5096 int res; 5097 5098 if (!count && rx_q->state_saved) { 5099 error = rx_q->state.error; 5100 len = rx_q->state.len; 5101 } else { 5102 rx_q->state_saved = false; 5103 error = 0; 5104 len = 0; 5105 } 5106 5107 if (count >= limit) 5108 break; 5109 5110 read_again: 5111 buf1_len = 0; 5112 entry = next_entry; 5113 buf = &rx_q->buf_pool[entry]; 5114 5115 if (dirty >= STMMAC_RX_FILL_BATCH) { 5116 failure = failure || 5117 !stmmac_rx_refill_zc(priv, queue, dirty); 5118 dirty = 0; 5119 } 5120 5121 if (priv->extend_desc) 5122 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5123 else 5124 p = rx_q->dma_rx + entry; 5125 5126 /* read the status of the incoming frame */ 5127 status = stmmac_rx_status(priv, &priv->xstats, p); 5128 /* check if managed by the DMA otherwise go ahead */ 5129 if (unlikely(status & dma_own)) 5130 break; 5131 5132 /* Prefetch the next RX descriptor */ 5133 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5134 priv->dma_conf.dma_rx_size); 5135 next_entry = rx_q->cur_rx; 5136 5137 if (priv->extend_desc) 5138 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5139 else 5140 np = rx_q->dma_rx + next_entry; 5141 5142 prefetch(np); 5143 5144 /* Ensure a valid XSK buffer before proceed */ 5145 if (!buf->xdp) 5146 break; 5147 5148 if (priv->extend_desc) 5149 stmmac_rx_extended_status(priv, &priv->xstats, 5150 rx_q->dma_erx + entry); 5151 if (unlikely(status == discard_frame)) { 5152 xsk_buff_free(buf->xdp); 5153 buf->xdp = NULL; 5154 dirty++; 5155 error = 1; 5156 if (!priv->hwts_rx_en) 5157 rx_errors++; 5158 } 5159 5160 if (unlikely(error && (status & rx_not_ls))) 5161 goto read_again; 5162 if (unlikely(error)) { 5163 count++; 5164 continue; 5165 } 5166 5167 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ 5168 if (likely(status & rx_not_ls)) { 5169 xsk_buff_free(buf->xdp); 5170 buf->xdp = NULL; 5171 dirty++; 5172 count++; 5173 goto read_again; 5174 } 5175 5176 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); 5177 ctx->priv = priv; 5178 ctx->desc = p; 5179 ctx->ndesc = np; 5180 5181 /* XDP ZC Frame only support primary buffers for now */ 5182 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5183 len += buf1_len; 5184 5185 /* ACS is disabled; strip manually. */ 5186 if (likely(!(status & rx_not_ls))) { 5187 buf1_len -= ETH_FCS_LEN; 5188 len -= ETH_FCS_LEN; 5189 } 5190 5191 /* RX buffer is good and fit into a XSK pool buffer */ 5192 buf->xdp->data_end = buf->xdp->data + buf1_len; 5193 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); 5194 5195 prog = READ_ONCE(priv->xdp_prog); 5196 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); 5197 5198 switch (res) { 5199 case STMMAC_XDP_PASS: 5200 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); 5201 xsk_buff_free(buf->xdp); 5202 break; 5203 case STMMAC_XDP_CONSUMED: 5204 xsk_buff_free(buf->xdp); 5205 rx_dropped++; 5206 break; 5207 case STMMAC_XDP_TX: 5208 case STMMAC_XDP_REDIRECT: 5209 xdp_status |= res; 5210 break; 5211 } 5212 5213 buf->xdp = NULL; 5214 dirty++; 5215 count++; 5216 } 5217 5218 if (status & rx_not_ls) { 5219 rx_q->state_saved = true; 5220 rx_q->state.error = error; 5221 rx_q->state.len = len; 5222 } 5223 5224 stmmac_finalize_xdp_rx(priv, xdp_status); 5225 5226 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5227 rxq_stats->rx_pkt_n += count; 5228 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5229 5230 priv->xstats.rx_dropped += rx_dropped; 5231 priv->xstats.rx_errors += rx_errors; 5232 5233 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { 5234 if (failure || stmmac_rx_dirty(priv, queue) > 0) 5235 xsk_set_rx_need_wakeup(rx_q->xsk_pool); 5236 else 5237 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); 5238 5239 return (int)count; 5240 } 5241 5242 return failure ? limit : (int)count; 5243 } 5244 5245 /** 5246 * stmmac_rx - manage the receive process 5247 * @priv: driver private structure 5248 * @limit: napi bugget 5249 * @queue: RX queue index. 5250 * Description : this the function called by the napi poll method. 5251 * It gets all the frames inside the ring. 5252 */ 5253 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 5254 { 5255 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0; 5256 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; 5257 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 5258 struct stmmac_channel *ch = &priv->channel[queue]; 5259 unsigned int count = 0, error = 0, len = 0; 5260 int status = 0, coe = priv->hw->rx_csum; 5261 unsigned int next_entry = rx_q->cur_rx; 5262 enum dma_data_direction dma_dir; 5263 unsigned int desc_size; 5264 struct sk_buff *skb = NULL; 5265 struct stmmac_xdp_buff ctx; 5266 unsigned long flags; 5267 int xdp_status = 0; 5268 int buf_sz; 5269 5270 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); 5271 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 5272 5273 if (netif_msg_rx_status(priv)) { 5274 void *rx_head; 5275 5276 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 5277 if (priv->extend_desc) { 5278 rx_head = (void *)rx_q->dma_erx; 5279 desc_size = sizeof(struct dma_extended_desc); 5280 } else { 5281 rx_head = (void *)rx_q->dma_rx; 5282 desc_size = sizeof(struct dma_desc); 5283 } 5284 5285 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, 5286 rx_q->dma_rx_phy, desc_size); 5287 } 5288 while (count < limit) { 5289 unsigned int buf1_len = 0, buf2_len = 0; 5290 enum pkt_hash_types hash_type; 5291 struct stmmac_rx_buffer *buf; 5292 struct dma_desc *np, *p; 5293 int entry; 5294 u32 hash; 5295 5296 if (!count && rx_q->state_saved) { 5297 skb = rx_q->state.skb; 5298 error = rx_q->state.error; 5299 len = rx_q->state.len; 5300 } else { 5301 rx_q->state_saved = false; 5302 skb = NULL; 5303 error = 0; 5304 len = 0; 5305 } 5306 5307 if (count >= limit) 5308 break; 5309 5310 read_again: 5311 buf1_len = 0; 5312 buf2_len = 0; 5313 entry = next_entry; 5314 buf = &rx_q->buf_pool[entry]; 5315 5316 if (priv->extend_desc) 5317 p = (struct dma_desc *)(rx_q->dma_erx + entry); 5318 else 5319 p = rx_q->dma_rx + entry; 5320 5321 /* read the status of the incoming frame */ 5322 status = stmmac_rx_status(priv, &priv->xstats, p); 5323 /* check if managed by the DMA otherwise go ahead */ 5324 if (unlikely(status & dma_own)) 5325 break; 5326 5327 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 5328 priv->dma_conf.dma_rx_size); 5329 next_entry = rx_q->cur_rx; 5330 5331 if (priv->extend_desc) 5332 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 5333 else 5334 np = rx_q->dma_rx + next_entry; 5335 5336 prefetch(np); 5337 5338 if (priv->extend_desc) 5339 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); 5340 if (unlikely(status == discard_frame)) { 5341 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5342 buf->page = NULL; 5343 error = 1; 5344 if (!priv->hwts_rx_en) 5345 rx_errors++; 5346 } 5347 5348 if (unlikely(error && (status & rx_not_ls))) 5349 goto read_again; 5350 if (unlikely(error)) { 5351 dev_kfree_skb(skb); 5352 skb = NULL; 5353 count++; 5354 continue; 5355 } 5356 5357 /* Buffer is good. Go on. */ 5358 5359 prefetch(page_address(buf->page) + buf->page_offset); 5360 if (buf->sec_page) 5361 prefetch(page_address(buf->sec_page)); 5362 5363 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 5364 len += buf1_len; 5365 buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 5366 len += buf2_len; 5367 5368 /* ACS is disabled; strip manually. */ 5369 if (likely(!(status & rx_not_ls))) { 5370 if (buf2_len) { 5371 buf2_len -= ETH_FCS_LEN; 5372 len -= ETH_FCS_LEN; 5373 } else if (buf1_len) { 5374 buf1_len -= ETH_FCS_LEN; 5375 len -= ETH_FCS_LEN; 5376 } 5377 } 5378 5379 if (!skb) { 5380 unsigned int pre_len, sync_len; 5381 5382 dma_sync_single_for_cpu(priv->device, buf->addr, 5383 buf1_len, dma_dir); 5384 5385 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); 5386 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), 5387 buf->page_offset, buf1_len, true); 5388 5389 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - 5390 buf->page_offset; 5391 5392 ctx.priv = priv; 5393 ctx.desc = p; 5394 ctx.ndesc = np; 5395 5396 skb = stmmac_xdp_run_prog(priv, &ctx.xdp); 5397 /* Due xdp_adjust_tail: DMA sync for_device 5398 * cover max len CPU touch 5399 */ 5400 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - 5401 buf->page_offset; 5402 sync_len = max(sync_len, pre_len); 5403 5404 /* For Not XDP_PASS verdict */ 5405 if (IS_ERR(skb)) { 5406 unsigned int xdp_res = -PTR_ERR(skb); 5407 5408 if (xdp_res & STMMAC_XDP_CONSUMED) { 5409 page_pool_put_page(rx_q->page_pool, 5410 virt_to_head_page(ctx.xdp.data), 5411 sync_len, true); 5412 buf->page = NULL; 5413 rx_dropped++; 5414 5415 /* Clear skb as it was set as 5416 * status by XDP program. 5417 */ 5418 skb = NULL; 5419 5420 if (unlikely((status & rx_not_ls))) 5421 goto read_again; 5422 5423 count++; 5424 continue; 5425 } else if (xdp_res & (STMMAC_XDP_TX | 5426 STMMAC_XDP_REDIRECT)) { 5427 xdp_status |= xdp_res; 5428 buf->page = NULL; 5429 skb = NULL; 5430 count++; 5431 continue; 5432 } 5433 } 5434 } 5435 5436 if (!skb) { 5437 /* XDP program may expand or reduce tail */ 5438 buf1_len = ctx.xdp.data_end - ctx.xdp.data; 5439 5440 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 5441 if (!skb) { 5442 rx_dropped++; 5443 count++; 5444 goto drain_data; 5445 } 5446 5447 /* XDP program may adjust header */ 5448 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len); 5449 skb_put(skb, buf1_len); 5450 5451 /* Data payload copied into SKB, page ready for recycle */ 5452 page_pool_recycle_direct(rx_q->page_pool, buf->page); 5453 buf->page = NULL; 5454 } else if (buf1_len) { 5455 dma_sync_single_for_cpu(priv->device, buf->addr, 5456 buf1_len, dma_dir); 5457 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5458 buf->page, buf->page_offset, buf1_len, 5459 priv->dma_conf.dma_buf_sz); 5460 5461 /* Data payload appended into SKB */ 5462 skb_mark_for_recycle(skb); 5463 buf->page = NULL; 5464 } 5465 5466 if (buf2_len) { 5467 dma_sync_single_for_cpu(priv->device, buf->sec_addr, 5468 buf2_len, dma_dir); 5469 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 5470 buf->sec_page, 0, buf2_len, 5471 priv->dma_conf.dma_buf_sz); 5472 5473 /* Data payload appended into SKB */ 5474 skb_mark_for_recycle(skb); 5475 buf->sec_page = NULL; 5476 } 5477 5478 drain_data: 5479 if (likely(status & rx_not_ls)) 5480 goto read_again; 5481 if (!skb) 5482 continue; 5483 5484 /* Got entire packet into SKB. Finish it. */ 5485 5486 stmmac_get_rx_hwtstamp(priv, p, np, skb); 5487 stmmac_rx_vlan(priv->dev, skb); 5488 skb->protocol = eth_type_trans(skb, priv->dev); 5489 5490 if (unlikely(!coe)) 5491 skb_checksum_none_assert(skb); 5492 else 5493 skb->ip_summed = CHECKSUM_UNNECESSARY; 5494 5495 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 5496 skb_set_hash(skb, hash, hash_type); 5497 5498 skb_record_rx_queue(skb, queue); 5499 napi_gro_receive(&ch->rx_napi, skb); 5500 skb = NULL; 5501 5502 rx_packets++; 5503 rx_bytes += len; 5504 count++; 5505 } 5506 5507 if (status & rx_not_ls || skb) { 5508 rx_q->state_saved = true; 5509 rx_q->state.skb = skb; 5510 rx_q->state.error = error; 5511 rx_q->state.len = len; 5512 } 5513 5514 stmmac_finalize_xdp_rx(priv, xdp_status); 5515 5516 stmmac_rx_refill(priv, queue); 5517 5518 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5519 rxq_stats->rx_packets += rx_packets; 5520 rxq_stats->rx_bytes += rx_bytes; 5521 rxq_stats->rx_pkt_n += count; 5522 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5523 5524 priv->xstats.rx_dropped += rx_dropped; 5525 priv->xstats.rx_errors += rx_errors; 5526 5527 return count; 5528 } 5529 5530 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 5531 { 5532 struct stmmac_channel *ch = 5533 container_of(napi, struct stmmac_channel, rx_napi); 5534 struct stmmac_priv *priv = ch->priv_data; 5535 struct stmmac_rxq_stats *rxq_stats; 5536 u32 chan = ch->index; 5537 unsigned long flags; 5538 int work_done; 5539 5540 rxq_stats = &priv->xstats.rxq_stats[chan]; 5541 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5542 rxq_stats->napi_poll++; 5543 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5544 5545 work_done = stmmac_rx(priv, budget, chan); 5546 if (work_done < budget && napi_complete_done(napi, work_done)) { 5547 unsigned long flags; 5548 5549 spin_lock_irqsave(&ch->lock, flags); 5550 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 5551 spin_unlock_irqrestore(&ch->lock, flags); 5552 } 5553 5554 return work_done; 5555 } 5556 5557 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 5558 { 5559 struct stmmac_channel *ch = 5560 container_of(napi, struct stmmac_channel, tx_napi); 5561 struct stmmac_priv *priv = ch->priv_data; 5562 struct stmmac_txq_stats *txq_stats; 5563 u32 chan = ch->index; 5564 unsigned long flags; 5565 int work_done; 5566 5567 txq_stats = &priv->xstats.txq_stats[chan]; 5568 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 5569 txq_stats->napi_poll++; 5570 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 5571 5572 work_done = stmmac_tx_clean(priv, budget, chan); 5573 work_done = min(work_done, budget); 5574 5575 if (work_done < budget && napi_complete_done(napi, work_done)) { 5576 unsigned long flags; 5577 5578 spin_lock_irqsave(&ch->lock, flags); 5579 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 5580 spin_unlock_irqrestore(&ch->lock, flags); 5581 } 5582 5583 return work_done; 5584 } 5585 5586 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) 5587 { 5588 struct stmmac_channel *ch = 5589 container_of(napi, struct stmmac_channel, rxtx_napi); 5590 struct stmmac_priv *priv = ch->priv_data; 5591 int rx_done, tx_done, rxtx_done; 5592 struct stmmac_rxq_stats *rxq_stats; 5593 struct stmmac_txq_stats *txq_stats; 5594 u32 chan = ch->index; 5595 unsigned long flags; 5596 5597 rxq_stats = &priv->xstats.rxq_stats[chan]; 5598 flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); 5599 rxq_stats->napi_poll++; 5600 u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); 5601 5602 txq_stats = &priv->xstats.txq_stats[chan]; 5603 flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); 5604 txq_stats->napi_poll++; 5605 u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); 5606 5607 tx_done = stmmac_tx_clean(priv, budget, chan); 5608 tx_done = min(tx_done, budget); 5609 5610 rx_done = stmmac_rx_zc(priv, budget, chan); 5611 5612 rxtx_done = max(tx_done, rx_done); 5613 5614 /* If either TX or RX work is not complete, return budget 5615 * and keep pooling 5616 */ 5617 if (rxtx_done >= budget) 5618 return budget; 5619 5620 /* all work done, exit the polling mode */ 5621 if (napi_complete_done(napi, rxtx_done)) { 5622 unsigned long flags; 5623 5624 spin_lock_irqsave(&ch->lock, flags); 5625 /* Both RX and TX work done are compelte, 5626 * so enable both RX & TX IRQs. 5627 */ 5628 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 5629 spin_unlock_irqrestore(&ch->lock, flags); 5630 } 5631 5632 return min(rxtx_done, budget - 1); 5633 } 5634 5635 /** 5636 * stmmac_tx_timeout 5637 * @dev : Pointer to net device structure 5638 * @txqueue: the index of the hanging transmit queue 5639 * Description: this function is called when a packet transmission fails to 5640 * complete within a reasonable time. The driver will mark the error in the 5641 * netdev structure and arrange for the device to be reset to a sane state 5642 * in order to transmit a new packet. 5643 */ 5644 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 5645 { 5646 struct stmmac_priv *priv = netdev_priv(dev); 5647 5648 stmmac_global_err(priv); 5649 } 5650 5651 /** 5652 * stmmac_set_rx_mode - entry point for multicast addressing 5653 * @dev : pointer to the device structure 5654 * Description: 5655 * This function is a driver entry point which gets called by the kernel 5656 * whenever multicast addresses must be enabled/disabled. 5657 * Return value: 5658 * void. 5659 */ 5660 static void stmmac_set_rx_mode(struct net_device *dev) 5661 { 5662 struct stmmac_priv *priv = netdev_priv(dev); 5663 5664 stmmac_set_filter(priv, priv->hw, dev); 5665 } 5666 5667 /** 5668 * stmmac_change_mtu - entry point to change MTU size for the device. 5669 * @dev : device pointer. 5670 * @new_mtu : the new MTU size for the device. 5671 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 5672 * to drive packet transmission. Ethernet has an MTU of 1500 octets 5673 * (ETH_DATA_LEN). This value can be changed with ifconfig. 5674 * Return value: 5675 * 0 on success and an appropriate (-)ve integer as defined in errno.h 5676 * file on failure. 5677 */ 5678 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 5679 { 5680 struct stmmac_priv *priv = netdev_priv(dev); 5681 int txfifosz = priv->plat->tx_fifo_size; 5682 struct stmmac_dma_conf *dma_conf; 5683 const int mtu = new_mtu; 5684 int ret; 5685 5686 if (txfifosz == 0) 5687 txfifosz = priv->dma_cap.tx_fifo_size; 5688 5689 txfifosz /= priv->plat->tx_queues_to_use; 5690 5691 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) { 5692 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); 5693 return -EINVAL; 5694 } 5695 5696 new_mtu = STMMAC_ALIGN(new_mtu); 5697 5698 /* If condition true, FIFO is too small or MTU too large */ 5699 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 5700 return -EINVAL; 5701 5702 if (netif_running(dev)) { 5703 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); 5704 /* Try to allocate the new DMA conf with the new mtu */ 5705 dma_conf = stmmac_setup_dma_desc(priv, mtu); 5706 if (IS_ERR(dma_conf)) { 5707 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", 5708 mtu); 5709 return PTR_ERR(dma_conf); 5710 } 5711 5712 stmmac_release(dev); 5713 5714 ret = __stmmac_open(dev, dma_conf); 5715 if (ret) { 5716 free_dma_desc_resources(priv, dma_conf); 5717 kfree(dma_conf); 5718 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); 5719 return ret; 5720 } 5721 5722 kfree(dma_conf); 5723 5724 stmmac_set_rx_mode(dev); 5725 } 5726 5727 dev->mtu = mtu; 5728 netdev_update_features(dev); 5729 5730 return 0; 5731 } 5732 5733 static netdev_features_t stmmac_fix_features(struct net_device *dev, 5734 netdev_features_t features) 5735 { 5736 struct stmmac_priv *priv = netdev_priv(dev); 5737 5738 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 5739 features &= ~NETIF_F_RXCSUM; 5740 5741 if (!priv->plat->tx_coe) 5742 features &= ~NETIF_F_CSUM_MASK; 5743 5744 /* Some GMAC devices have a bugged Jumbo frame support that 5745 * needs to have the Tx COE disabled for oversized frames 5746 * (due to limited buffer sizes). In this case we disable 5747 * the TX csum insertion in the TDES and not use SF. 5748 */ 5749 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 5750 features &= ~NETIF_F_CSUM_MASK; 5751 5752 /* Disable tso if asked by ethtool */ 5753 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { 5754 if (features & NETIF_F_TSO) 5755 priv->tso = true; 5756 else 5757 priv->tso = false; 5758 } 5759 5760 return features; 5761 } 5762 5763 static int stmmac_set_features(struct net_device *netdev, 5764 netdev_features_t features) 5765 { 5766 struct stmmac_priv *priv = netdev_priv(netdev); 5767 5768 /* Keep the COE Type in case of csum is supporting */ 5769 if (features & NETIF_F_RXCSUM) 5770 priv->hw->rx_csum = priv->plat->rx_coe; 5771 else 5772 priv->hw->rx_csum = 0; 5773 /* No check needed because rx_coe has been set before and it will be 5774 * fixed in case of issue. 5775 */ 5776 stmmac_rx_ipc(priv, priv->hw); 5777 5778 if (priv->sph_cap) { 5779 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; 5780 u32 chan; 5781 5782 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 5783 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 5784 } 5785 5786 return 0; 5787 } 5788 5789 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status) 5790 { 5791 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 5792 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 5793 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 5794 bool *hs_enable = &fpe_cfg->hs_enable; 5795 5796 if (status == FPE_EVENT_UNKNOWN || !*hs_enable) 5797 return; 5798 5799 /* If LP has sent verify mPacket, LP is FPE capable */ 5800 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) { 5801 if (*lp_state < FPE_STATE_CAPABLE) 5802 *lp_state = FPE_STATE_CAPABLE; 5803 5804 /* If user has requested FPE enable, quickly response */ 5805 if (*hs_enable) 5806 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 5807 MPACKET_RESPONSE); 5808 } 5809 5810 /* If Local has sent verify mPacket, Local is FPE capable */ 5811 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) { 5812 if (*lo_state < FPE_STATE_CAPABLE) 5813 *lo_state = FPE_STATE_CAPABLE; 5814 } 5815 5816 /* If LP has sent response mPacket, LP is entering FPE ON */ 5817 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP) 5818 *lp_state = FPE_STATE_ENTERING_ON; 5819 5820 /* If Local has sent response mPacket, Local is entering FPE ON */ 5821 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP) 5822 *lo_state = FPE_STATE_ENTERING_ON; 5823 5824 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && 5825 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && 5826 priv->fpe_wq) { 5827 queue_work(priv->fpe_wq, &priv->fpe_task); 5828 } 5829 } 5830 5831 static void stmmac_common_interrupt(struct stmmac_priv *priv) 5832 { 5833 u32 rx_cnt = priv->plat->rx_queues_to_use; 5834 u32 tx_cnt = priv->plat->tx_queues_to_use; 5835 u32 queues_count; 5836 u32 queue; 5837 bool xmac; 5838 5839 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 5840 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 5841 5842 if (priv->irq_wake) 5843 pm_wakeup_event(priv->device, 0); 5844 5845 if (priv->dma_cap.estsel) 5846 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 5847 &priv->xstats, tx_cnt); 5848 5849 if (priv->dma_cap.fpesel) { 5850 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, 5851 priv->dev); 5852 5853 stmmac_fpe_event_status(priv, status); 5854 } 5855 5856 /* To handle GMAC own interrupts */ 5857 if ((priv->plat->has_gmac) || xmac) { 5858 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 5859 5860 if (unlikely(status)) { 5861 /* For LPI we need to save the tx status */ 5862 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 5863 priv->tx_path_in_lpi_mode = true; 5864 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 5865 priv->tx_path_in_lpi_mode = false; 5866 } 5867 5868 for (queue = 0; queue < queues_count; queue++) { 5869 status = stmmac_host_mtl_irq_status(priv, priv->hw, 5870 queue); 5871 } 5872 5873 /* PCS link status */ 5874 if (priv->hw->pcs && 5875 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { 5876 if (priv->xstats.pcs_link) 5877 netif_carrier_on(priv->dev); 5878 else 5879 netif_carrier_off(priv->dev); 5880 } 5881 5882 stmmac_timestamp_interrupt(priv, priv); 5883 } 5884 } 5885 5886 /** 5887 * stmmac_interrupt - main ISR 5888 * @irq: interrupt number. 5889 * @dev_id: to pass the net device pointer. 5890 * Description: this is the main driver interrupt service routine. 5891 * It can call: 5892 * o DMA service routine (to manage incoming frame reception and transmission 5893 * status) 5894 * o Core interrupts to manage: remote wake-up, management counter, LPI 5895 * interrupts. 5896 */ 5897 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 5898 { 5899 struct net_device *dev = (struct net_device *)dev_id; 5900 struct stmmac_priv *priv = netdev_priv(dev); 5901 5902 /* Check if adapter is up */ 5903 if (test_bit(STMMAC_DOWN, &priv->state)) 5904 return IRQ_HANDLED; 5905 5906 /* Check if a fatal error happened */ 5907 if (stmmac_safety_feat_interrupt(priv)) 5908 return IRQ_HANDLED; 5909 5910 /* To handle Common interrupts */ 5911 stmmac_common_interrupt(priv); 5912 5913 /* To handle DMA interrupts */ 5914 stmmac_dma_interrupt(priv); 5915 5916 return IRQ_HANDLED; 5917 } 5918 5919 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) 5920 { 5921 struct net_device *dev = (struct net_device *)dev_id; 5922 struct stmmac_priv *priv = netdev_priv(dev); 5923 5924 if (unlikely(!dev)) { 5925 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5926 return IRQ_NONE; 5927 } 5928 5929 /* Check if adapter is up */ 5930 if (test_bit(STMMAC_DOWN, &priv->state)) 5931 return IRQ_HANDLED; 5932 5933 /* To handle Common interrupts */ 5934 stmmac_common_interrupt(priv); 5935 5936 return IRQ_HANDLED; 5937 } 5938 5939 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) 5940 { 5941 struct net_device *dev = (struct net_device *)dev_id; 5942 struct stmmac_priv *priv = netdev_priv(dev); 5943 5944 if (unlikely(!dev)) { 5945 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5946 return IRQ_NONE; 5947 } 5948 5949 /* Check if adapter is up */ 5950 if (test_bit(STMMAC_DOWN, &priv->state)) 5951 return IRQ_HANDLED; 5952 5953 /* Check if a fatal error happened */ 5954 stmmac_safety_feat_interrupt(priv); 5955 5956 return IRQ_HANDLED; 5957 } 5958 5959 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) 5960 { 5961 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data; 5962 struct stmmac_dma_conf *dma_conf; 5963 int chan = tx_q->queue_index; 5964 struct stmmac_priv *priv; 5965 int status; 5966 5967 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); 5968 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 5969 5970 if (unlikely(!data)) { 5971 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 5972 return IRQ_NONE; 5973 } 5974 5975 /* Check if adapter is up */ 5976 if (test_bit(STMMAC_DOWN, &priv->state)) 5977 return IRQ_HANDLED; 5978 5979 status = stmmac_napi_check(priv, chan, DMA_DIR_TX); 5980 5981 if (unlikely(status & tx_hard_error_bump_tc)) { 5982 /* Try to bump up the dma threshold on this failure */ 5983 stmmac_bump_dma_threshold(priv, chan); 5984 } else if (unlikely(status == tx_hard_error)) { 5985 stmmac_tx_err(priv, chan); 5986 } 5987 5988 return IRQ_HANDLED; 5989 } 5990 5991 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) 5992 { 5993 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data; 5994 struct stmmac_dma_conf *dma_conf; 5995 int chan = rx_q->queue_index; 5996 struct stmmac_priv *priv; 5997 5998 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); 5999 priv = container_of(dma_conf, struct stmmac_priv, dma_conf); 6000 6001 if (unlikely(!data)) { 6002 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 6003 return IRQ_NONE; 6004 } 6005 6006 /* Check if adapter is up */ 6007 if (test_bit(STMMAC_DOWN, &priv->state)) 6008 return IRQ_HANDLED; 6009 6010 stmmac_napi_check(priv, chan, DMA_DIR_RX); 6011 6012 return IRQ_HANDLED; 6013 } 6014 6015 #ifdef CONFIG_NET_POLL_CONTROLLER 6016 /* Polling receive - used by NETCONSOLE and other diagnostic tools 6017 * to allow network I/O with interrupts disabled. 6018 */ 6019 static void stmmac_poll_controller(struct net_device *dev) 6020 { 6021 struct stmmac_priv *priv = netdev_priv(dev); 6022 int i; 6023 6024 /* If adapter is down, do nothing */ 6025 if (test_bit(STMMAC_DOWN, &priv->state)) 6026 return; 6027 6028 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) { 6029 for (i = 0; i < priv->plat->rx_queues_to_use; i++) 6030 stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]); 6031 6032 for (i = 0; i < priv->plat->tx_queues_to_use; i++) 6033 stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]); 6034 } else { 6035 disable_irq(dev->irq); 6036 stmmac_interrupt(dev->irq, dev); 6037 enable_irq(dev->irq); 6038 } 6039 } 6040 #endif 6041 6042 /** 6043 * stmmac_ioctl - Entry point for the Ioctl 6044 * @dev: Device pointer. 6045 * @rq: An IOCTL specefic structure, that can contain a pointer to 6046 * a proprietary structure used to pass information to the driver. 6047 * @cmd: IOCTL command 6048 * Description: 6049 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 6050 */ 6051 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 6052 { 6053 struct stmmac_priv *priv = netdev_priv (dev); 6054 int ret = -EOPNOTSUPP; 6055 6056 if (!netif_running(dev)) 6057 return -EINVAL; 6058 6059 switch (cmd) { 6060 case SIOCGMIIPHY: 6061 case SIOCGMIIREG: 6062 case SIOCSMIIREG: 6063 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 6064 break; 6065 case SIOCSHWTSTAMP: 6066 ret = stmmac_hwtstamp_set(dev, rq); 6067 break; 6068 case SIOCGHWTSTAMP: 6069 ret = stmmac_hwtstamp_get(dev, rq); 6070 break; 6071 default: 6072 break; 6073 } 6074 6075 return ret; 6076 } 6077 6078 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 6079 void *cb_priv) 6080 { 6081 struct stmmac_priv *priv = cb_priv; 6082 int ret = -EOPNOTSUPP; 6083 6084 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 6085 return ret; 6086 6087 __stmmac_disable_all_queues(priv); 6088 6089 switch (type) { 6090 case TC_SETUP_CLSU32: 6091 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 6092 break; 6093 case TC_SETUP_CLSFLOWER: 6094 ret = stmmac_tc_setup_cls(priv, priv, type_data); 6095 break; 6096 default: 6097 break; 6098 } 6099 6100 stmmac_enable_all_queues(priv); 6101 return ret; 6102 } 6103 6104 static LIST_HEAD(stmmac_block_cb_list); 6105 6106 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 6107 void *type_data) 6108 { 6109 struct stmmac_priv *priv = netdev_priv(ndev); 6110 6111 switch (type) { 6112 case TC_QUERY_CAPS: 6113 return stmmac_tc_query_caps(priv, priv, type_data); 6114 case TC_SETUP_BLOCK: 6115 return flow_block_cb_setup_simple(type_data, 6116 &stmmac_block_cb_list, 6117 stmmac_setup_tc_block_cb, 6118 priv, priv, true); 6119 case TC_SETUP_QDISC_CBS: 6120 return stmmac_tc_setup_cbs(priv, priv, type_data); 6121 case TC_SETUP_QDISC_TAPRIO: 6122 return stmmac_tc_setup_taprio(priv, priv, type_data); 6123 case TC_SETUP_QDISC_ETF: 6124 return stmmac_tc_setup_etf(priv, priv, type_data); 6125 default: 6126 return -EOPNOTSUPP; 6127 } 6128 } 6129 6130 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 6131 struct net_device *sb_dev) 6132 { 6133 int gso = skb_shinfo(skb)->gso_type; 6134 6135 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 6136 /* 6137 * There is no way to determine the number of TSO/USO 6138 * capable Queues. Let's use always the Queue 0 6139 * because if TSO/USO is supported then at least this 6140 * one will be capable. 6141 */ 6142 return 0; 6143 } 6144 6145 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 6146 } 6147 6148 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 6149 { 6150 struct stmmac_priv *priv = netdev_priv(ndev); 6151 int ret = 0; 6152 6153 ret = pm_runtime_resume_and_get(priv->device); 6154 if (ret < 0) 6155 return ret; 6156 6157 ret = eth_mac_addr(ndev, addr); 6158 if (ret) 6159 goto set_mac_error; 6160 6161 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 6162 6163 set_mac_error: 6164 pm_runtime_put(priv->device); 6165 6166 return ret; 6167 } 6168 6169 #ifdef CONFIG_DEBUG_FS 6170 static struct dentry *stmmac_fs_dir; 6171 6172 static void sysfs_display_ring(void *head, int size, int extend_desc, 6173 struct seq_file *seq, dma_addr_t dma_phy_addr) 6174 { 6175 int i; 6176 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 6177 struct dma_desc *p = (struct dma_desc *)head; 6178 dma_addr_t dma_addr; 6179 6180 for (i = 0; i < size; i++) { 6181 if (extend_desc) { 6182 dma_addr = dma_phy_addr + i * sizeof(*ep); 6183 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6184 i, &dma_addr, 6185 le32_to_cpu(ep->basic.des0), 6186 le32_to_cpu(ep->basic.des1), 6187 le32_to_cpu(ep->basic.des2), 6188 le32_to_cpu(ep->basic.des3)); 6189 ep++; 6190 } else { 6191 dma_addr = dma_phy_addr + i * sizeof(*p); 6192 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 6193 i, &dma_addr, 6194 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 6195 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 6196 p++; 6197 } 6198 seq_printf(seq, "\n"); 6199 } 6200 } 6201 6202 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 6203 { 6204 struct net_device *dev = seq->private; 6205 struct stmmac_priv *priv = netdev_priv(dev); 6206 u32 rx_count = priv->plat->rx_queues_to_use; 6207 u32 tx_count = priv->plat->tx_queues_to_use; 6208 u32 queue; 6209 6210 if ((dev->flags & IFF_UP) == 0) 6211 return 0; 6212 6213 for (queue = 0; queue < rx_count; queue++) { 6214 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6215 6216 seq_printf(seq, "RX Queue %d:\n", queue); 6217 6218 if (priv->extend_desc) { 6219 seq_printf(seq, "Extended descriptor ring:\n"); 6220 sysfs_display_ring((void *)rx_q->dma_erx, 6221 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); 6222 } else { 6223 seq_printf(seq, "Descriptor ring:\n"); 6224 sysfs_display_ring((void *)rx_q->dma_rx, 6225 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); 6226 } 6227 } 6228 6229 for (queue = 0; queue < tx_count; queue++) { 6230 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6231 6232 seq_printf(seq, "TX Queue %d:\n", queue); 6233 6234 if (priv->extend_desc) { 6235 seq_printf(seq, "Extended descriptor ring:\n"); 6236 sysfs_display_ring((void *)tx_q->dma_etx, 6237 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); 6238 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 6239 seq_printf(seq, "Descriptor ring:\n"); 6240 sysfs_display_ring((void *)tx_q->dma_tx, 6241 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); 6242 } 6243 } 6244 6245 return 0; 6246 } 6247 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 6248 6249 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 6250 { 6251 static const char * const dwxgmac_timestamp_source[] = { 6252 "None", 6253 "Internal", 6254 "External", 6255 "Both", 6256 }; 6257 static const char * const dwxgmac_safety_feature_desc[] = { 6258 "No", 6259 "All Safety Features with ECC and Parity", 6260 "All Safety Features without ECC or Parity", 6261 "All Safety Features with Parity Only", 6262 "ECC Only", 6263 "UNDEFINED", 6264 "UNDEFINED", 6265 "UNDEFINED", 6266 }; 6267 struct net_device *dev = seq->private; 6268 struct stmmac_priv *priv = netdev_priv(dev); 6269 6270 if (!priv->hw_cap_support) { 6271 seq_printf(seq, "DMA HW features not supported\n"); 6272 return 0; 6273 } 6274 6275 seq_printf(seq, "==============================\n"); 6276 seq_printf(seq, "\tDMA HW features\n"); 6277 seq_printf(seq, "==============================\n"); 6278 6279 seq_printf(seq, "\t10/100 Mbps: %s\n", 6280 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 6281 seq_printf(seq, "\t1000 Mbps: %s\n", 6282 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 6283 seq_printf(seq, "\tHalf duplex: %s\n", 6284 (priv->dma_cap.half_duplex) ? "Y" : "N"); 6285 if (priv->plat->has_xgmac) { 6286 seq_printf(seq, 6287 "\tNumber of Additional MAC address registers: %d\n", 6288 priv->dma_cap.multi_addr); 6289 } else { 6290 seq_printf(seq, "\tHash Filter: %s\n", 6291 (priv->dma_cap.hash_filter) ? "Y" : "N"); 6292 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 6293 (priv->dma_cap.multi_addr) ? "Y" : "N"); 6294 } 6295 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 6296 (priv->dma_cap.pcs) ? "Y" : "N"); 6297 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 6298 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 6299 seq_printf(seq, "\tPMT Remote wake up: %s\n", 6300 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 6301 seq_printf(seq, "\tPMT Magic Frame: %s\n", 6302 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 6303 seq_printf(seq, "\tRMON module: %s\n", 6304 (priv->dma_cap.rmon) ? "Y" : "N"); 6305 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 6306 (priv->dma_cap.time_stamp) ? "Y" : "N"); 6307 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 6308 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 6309 if (priv->plat->has_xgmac) 6310 seq_printf(seq, "\tTimestamp System Time Source: %s\n", 6311 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); 6312 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 6313 (priv->dma_cap.eee) ? "Y" : "N"); 6314 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 6315 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 6316 (priv->dma_cap.tx_coe) ? "Y" : "N"); 6317 if (priv->synopsys_id >= DWMAC_CORE_4_00 || 6318 priv->plat->has_xgmac) { 6319 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 6320 (priv->dma_cap.rx_coe) ? "Y" : "N"); 6321 } else { 6322 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 6323 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 6324 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 6325 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 6326 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 6327 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 6328 } 6329 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 6330 priv->dma_cap.number_rx_channel); 6331 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 6332 priv->dma_cap.number_tx_channel); 6333 seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 6334 priv->dma_cap.number_rx_queues); 6335 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 6336 priv->dma_cap.number_tx_queues); 6337 seq_printf(seq, "\tEnhanced descriptors: %s\n", 6338 (priv->dma_cap.enh_desc) ? "Y" : "N"); 6339 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 6340 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 6341 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? 6342 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); 6343 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 6344 seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 6345 priv->dma_cap.pps_out_num); 6346 seq_printf(seq, "\tSafety Features: %s\n", 6347 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); 6348 seq_printf(seq, "\tFlexible RX Parser: %s\n", 6349 priv->dma_cap.frpsel ? "Y" : "N"); 6350 seq_printf(seq, "\tEnhanced Addressing: %d\n", 6351 priv->dma_cap.host_dma_width); 6352 seq_printf(seq, "\tReceive Side Scaling: %s\n", 6353 priv->dma_cap.rssen ? "Y" : "N"); 6354 seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 6355 priv->dma_cap.vlhash ? "Y" : "N"); 6356 seq_printf(seq, "\tSplit Header: %s\n", 6357 priv->dma_cap.sphen ? "Y" : "N"); 6358 seq_printf(seq, "\tVLAN TX Insertion: %s\n", 6359 priv->dma_cap.vlins ? "Y" : "N"); 6360 seq_printf(seq, "\tDouble VLAN: %s\n", 6361 priv->dma_cap.dvlan ? "Y" : "N"); 6362 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 6363 priv->dma_cap.l3l4fnum); 6364 seq_printf(seq, "\tARP Offloading: %s\n", 6365 priv->dma_cap.arpoffsel ? "Y" : "N"); 6366 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 6367 priv->dma_cap.estsel ? "Y" : "N"); 6368 seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 6369 priv->dma_cap.fpesel ? "Y" : "N"); 6370 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 6371 priv->dma_cap.tbssel ? "Y" : "N"); 6372 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n", 6373 priv->dma_cap.tbs_ch_num); 6374 seq_printf(seq, "\tPer-Stream Filtering: %s\n", 6375 priv->dma_cap.sgfsel ? "Y" : "N"); 6376 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n", 6377 BIT(priv->dma_cap.ttsfd) >> 1); 6378 seq_printf(seq, "\tNumber of Traffic Classes: %d\n", 6379 priv->dma_cap.numtc); 6380 seq_printf(seq, "\tDCB Feature: %s\n", 6381 priv->dma_cap.dcben ? "Y" : "N"); 6382 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n", 6383 priv->dma_cap.advthword ? "Y" : "N"); 6384 seq_printf(seq, "\tPTP Offload: %s\n", 6385 priv->dma_cap.ptoen ? "Y" : "N"); 6386 seq_printf(seq, "\tOne-Step Timestamping: %s\n", 6387 priv->dma_cap.osten ? "Y" : "N"); 6388 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", 6389 priv->dma_cap.pfcen ? "Y" : "N"); 6390 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n", 6391 BIT(priv->dma_cap.frpes) << 6); 6392 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n", 6393 BIT(priv->dma_cap.frpbs) << 6); 6394 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n", 6395 priv->dma_cap.frppipe_num); 6396 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n", 6397 priv->dma_cap.nrvf_num ? 6398 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); 6399 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n", 6400 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); 6401 seq_printf(seq, "\tDepth of GCL: %lu\n", 6402 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); 6403 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", 6404 priv->dma_cap.cbtisel ? "Y" : "N"); 6405 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n", 6406 priv->dma_cap.aux_snapshot_n); 6407 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", 6408 priv->dma_cap.pou_ost_en ? "Y" : "N"); 6409 seq_printf(seq, "\tEnhanced DMA: %s\n", 6410 priv->dma_cap.edma ? "Y" : "N"); 6411 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n", 6412 priv->dma_cap.ediffc ? "Y" : "N"); 6413 seq_printf(seq, "\tVxLAN/NVGRE: %s\n", 6414 priv->dma_cap.vxn ? "Y" : "N"); 6415 seq_printf(seq, "\tDebug Memory Interface: %s\n", 6416 priv->dma_cap.dbgmem ? "Y" : "N"); 6417 seq_printf(seq, "\tNumber of Policing Counters: %lu\n", 6418 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); 6419 return 0; 6420 } 6421 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 6422 6423 /* Use network device events to rename debugfs file entries. 6424 */ 6425 static int stmmac_device_event(struct notifier_block *unused, 6426 unsigned long event, void *ptr) 6427 { 6428 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 6429 struct stmmac_priv *priv = netdev_priv(dev); 6430 6431 if (dev->netdev_ops != &stmmac_netdev_ops) 6432 goto done; 6433 6434 switch (event) { 6435 case NETDEV_CHANGENAME: 6436 if (priv->dbgfs_dir) 6437 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 6438 priv->dbgfs_dir, 6439 stmmac_fs_dir, 6440 dev->name); 6441 break; 6442 } 6443 done: 6444 return NOTIFY_DONE; 6445 } 6446 6447 static struct notifier_block stmmac_notifier = { 6448 .notifier_call = stmmac_device_event, 6449 }; 6450 6451 static void stmmac_init_fs(struct net_device *dev) 6452 { 6453 struct stmmac_priv *priv = netdev_priv(dev); 6454 6455 rtnl_lock(); 6456 6457 /* Create per netdev entries */ 6458 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 6459 6460 /* Entry to report DMA RX/TX rings */ 6461 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 6462 &stmmac_rings_status_fops); 6463 6464 /* Entry to report the DMA HW features */ 6465 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 6466 &stmmac_dma_cap_fops); 6467 6468 rtnl_unlock(); 6469 } 6470 6471 static void stmmac_exit_fs(struct net_device *dev) 6472 { 6473 struct stmmac_priv *priv = netdev_priv(dev); 6474 6475 debugfs_remove_recursive(priv->dbgfs_dir); 6476 } 6477 #endif /* CONFIG_DEBUG_FS */ 6478 6479 static u32 stmmac_vid_crc32_le(__le16 vid_le) 6480 { 6481 unsigned char *data = (unsigned char *)&vid_le; 6482 unsigned char data_byte = 0; 6483 u32 crc = ~0x0; 6484 u32 temp = 0; 6485 int i, bits; 6486 6487 bits = get_bitmask_order(VLAN_VID_MASK); 6488 for (i = 0; i < bits; i++) { 6489 if ((i % 8) == 0) 6490 data_byte = data[i / 8]; 6491 6492 temp = ((crc & 1) ^ data_byte) & 1; 6493 crc >>= 1; 6494 data_byte >>= 1; 6495 6496 if (temp) 6497 crc ^= 0xedb88320; 6498 } 6499 6500 return crc; 6501 } 6502 6503 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 6504 { 6505 u32 crc, hash = 0; 6506 __le16 pmatch = 0; 6507 int count = 0; 6508 u16 vid = 0; 6509 6510 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 6511 __le16 vid_le = cpu_to_le16(vid); 6512 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 6513 hash |= (1 << crc); 6514 count++; 6515 } 6516 6517 if (!priv->dma_cap.vlhash) { 6518 if (count > 2) /* VID = 0 always passes filter */ 6519 return -EOPNOTSUPP; 6520 6521 pmatch = cpu_to_le16(vid); 6522 hash = 0; 6523 } 6524 6525 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 6526 } 6527 6528 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 6529 { 6530 struct stmmac_priv *priv = netdev_priv(ndev); 6531 bool is_double = false; 6532 int ret; 6533 6534 ret = pm_runtime_resume_and_get(priv->device); 6535 if (ret < 0) 6536 return ret; 6537 6538 if (be16_to_cpu(proto) == ETH_P_8021AD) 6539 is_double = true; 6540 6541 set_bit(vid, priv->active_vlans); 6542 ret = stmmac_vlan_update(priv, is_double); 6543 if (ret) { 6544 clear_bit(vid, priv->active_vlans); 6545 goto err_pm_put; 6546 } 6547 6548 if (priv->hw->num_vlan) { 6549 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6550 if (ret) 6551 goto err_pm_put; 6552 } 6553 err_pm_put: 6554 pm_runtime_put(priv->device); 6555 6556 return ret; 6557 } 6558 6559 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 6560 { 6561 struct stmmac_priv *priv = netdev_priv(ndev); 6562 bool is_double = false; 6563 int ret; 6564 6565 ret = pm_runtime_resume_and_get(priv->device); 6566 if (ret < 0) 6567 return ret; 6568 6569 if (be16_to_cpu(proto) == ETH_P_8021AD) 6570 is_double = true; 6571 6572 clear_bit(vid, priv->active_vlans); 6573 6574 if (priv->hw->num_vlan) { 6575 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 6576 if (ret) 6577 goto del_vlan_error; 6578 } 6579 6580 ret = stmmac_vlan_update(priv, is_double); 6581 6582 del_vlan_error: 6583 pm_runtime_put(priv->device); 6584 6585 return ret; 6586 } 6587 6588 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) 6589 { 6590 struct stmmac_priv *priv = netdev_priv(dev); 6591 6592 switch (bpf->command) { 6593 case XDP_SETUP_PROG: 6594 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); 6595 case XDP_SETUP_XSK_POOL: 6596 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, 6597 bpf->xsk.queue_id); 6598 default: 6599 return -EOPNOTSUPP; 6600 } 6601 } 6602 6603 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames, 6604 struct xdp_frame **frames, u32 flags) 6605 { 6606 struct stmmac_priv *priv = netdev_priv(dev); 6607 int cpu = smp_processor_id(); 6608 struct netdev_queue *nq; 6609 int i, nxmit = 0; 6610 int queue; 6611 6612 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) 6613 return -ENETDOWN; 6614 6615 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6616 return -EINVAL; 6617 6618 queue = stmmac_xdp_get_tx_queue(priv, cpu); 6619 nq = netdev_get_tx_queue(priv->dev, queue); 6620 6621 __netif_tx_lock(nq, cpu); 6622 /* Avoids TX time-out as we are sharing with slow path */ 6623 txq_trans_cond_update(nq); 6624 6625 for (i = 0; i < num_frames; i++) { 6626 int res; 6627 6628 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); 6629 if (res == STMMAC_XDP_CONSUMED) 6630 break; 6631 6632 nxmit++; 6633 } 6634 6635 if (flags & XDP_XMIT_FLUSH) { 6636 stmmac_flush_tx_descriptors(priv, queue); 6637 stmmac_tx_timer_arm(priv, queue); 6638 } 6639 6640 __netif_tx_unlock(nq); 6641 6642 return nxmit; 6643 } 6644 6645 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) 6646 { 6647 struct stmmac_channel *ch = &priv->channel[queue]; 6648 unsigned long flags; 6649 6650 spin_lock_irqsave(&ch->lock, flags); 6651 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6652 spin_unlock_irqrestore(&ch->lock, flags); 6653 6654 stmmac_stop_rx_dma(priv, queue); 6655 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6656 } 6657 6658 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) 6659 { 6660 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 6661 struct stmmac_channel *ch = &priv->channel[queue]; 6662 unsigned long flags; 6663 u32 buf_size; 6664 int ret; 6665 6666 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6667 if (ret) { 6668 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); 6669 return; 6670 } 6671 6672 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); 6673 if (ret) { 6674 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); 6675 netdev_err(priv->dev, "Failed to init RX desc.\n"); 6676 return; 6677 } 6678 6679 stmmac_reset_rx_queue(priv, queue); 6680 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); 6681 6682 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6683 rx_q->dma_rx_phy, rx_q->queue_index); 6684 6685 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * 6686 sizeof(struct dma_desc)); 6687 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6688 rx_q->rx_tail_addr, rx_q->queue_index); 6689 6690 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6691 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6692 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6693 buf_size, 6694 rx_q->queue_index); 6695 } else { 6696 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6697 priv->dma_conf.dma_buf_sz, 6698 rx_q->queue_index); 6699 } 6700 6701 stmmac_start_rx_dma(priv, queue); 6702 6703 spin_lock_irqsave(&ch->lock, flags); 6704 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); 6705 spin_unlock_irqrestore(&ch->lock, flags); 6706 } 6707 6708 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) 6709 { 6710 struct stmmac_channel *ch = &priv->channel[queue]; 6711 unsigned long flags; 6712 6713 spin_lock_irqsave(&ch->lock, flags); 6714 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6715 spin_unlock_irqrestore(&ch->lock, flags); 6716 6717 stmmac_stop_tx_dma(priv, queue); 6718 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6719 } 6720 6721 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) 6722 { 6723 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 6724 struct stmmac_channel *ch = &priv->channel[queue]; 6725 unsigned long flags; 6726 int ret; 6727 6728 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6729 if (ret) { 6730 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); 6731 return; 6732 } 6733 6734 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); 6735 if (ret) { 6736 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); 6737 netdev_err(priv->dev, "Failed to init TX desc.\n"); 6738 return; 6739 } 6740 6741 stmmac_reset_tx_queue(priv, queue); 6742 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); 6743 6744 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6745 tx_q->dma_tx_phy, tx_q->queue_index); 6746 6747 if (tx_q->tbs & STMMAC_TBS_AVAIL) 6748 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); 6749 6750 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6751 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6752 tx_q->tx_tail_addr, tx_q->queue_index); 6753 6754 stmmac_start_tx_dma(priv, queue); 6755 6756 spin_lock_irqsave(&ch->lock, flags); 6757 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); 6758 spin_unlock_irqrestore(&ch->lock, flags); 6759 } 6760 6761 void stmmac_xdp_release(struct net_device *dev) 6762 { 6763 struct stmmac_priv *priv = netdev_priv(dev); 6764 u32 chan; 6765 6766 /* Ensure tx function is not running */ 6767 netif_tx_disable(dev); 6768 6769 /* Disable NAPI process */ 6770 stmmac_disable_all_queues(priv); 6771 6772 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 6773 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6774 6775 /* Free the IRQ lines */ 6776 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0); 6777 6778 /* Stop TX/RX DMA channels */ 6779 stmmac_stop_all_dma(priv); 6780 6781 /* Release and free the Rx/Tx resources */ 6782 free_dma_desc_resources(priv, &priv->dma_conf); 6783 6784 /* Disable the MAC Rx/Tx */ 6785 stmmac_mac_set(priv, priv->ioaddr, false); 6786 6787 /* set trans_start so we don't get spurious 6788 * watchdogs during reset 6789 */ 6790 netif_trans_update(dev); 6791 netif_carrier_off(dev); 6792 } 6793 6794 int stmmac_xdp_open(struct net_device *dev) 6795 { 6796 struct stmmac_priv *priv = netdev_priv(dev); 6797 u32 rx_cnt = priv->plat->rx_queues_to_use; 6798 u32 tx_cnt = priv->plat->tx_queues_to_use; 6799 u32 dma_csr_ch = max(rx_cnt, tx_cnt); 6800 struct stmmac_rx_queue *rx_q; 6801 struct stmmac_tx_queue *tx_q; 6802 u32 buf_size; 6803 bool sph_en; 6804 u32 chan; 6805 int ret; 6806 6807 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); 6808 if (ret < 0) { 6809 netdev_err(dev, "%s: DMA descriptors allocation failed\n", 6810 __func__); 6811 goto dma_desc_error; 6812 } 6813 6814 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); 6815 if (ret < 0) { 6816 netdev_err(dev, "%s: DMA descriptors initialization failed\n", 6817 __func__); 6818 goto init_error; 6819 } 6820 6821 stmmac_reset_queues_param(priv); 6822 6823 /* DMA CSR Channel configuration */ 6824 for (chan = 0; chan < dma_csr_ch; chan++) { 6825 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 6826 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); 6827 } 6828 6829 /* Adjust Split header */ 6830 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 6831 6832 /* DMA RX Channel Configuration */ 6833 for (chan = 0; chan < rx_cnt; chan++) { 6834 rx_q = &priv->dma_conf.rx_queue[chan]; 6835 6836 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6837 rx_q->dma_rx_phy, chan); 6838 6839 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 6840 (rx_q->buf_alloc_num * 6841 sizeof(struct dma_desc)); 6842 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 6843 rx_q->rx_tail_addr, chan); 6844 6845 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { 6846 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); 6847 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6848 buf_size, 6849 rx_q->queue_index); 6850 } else { 6851 stmmac_set_dma_bfsize(priv, priv->ioaddr, 6852 priv->dma_conf.dma_buf_sz, 6853 rx_q->queue_index); 6854 } 6855 6856 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 6857 } 6858 6859 /* DMA TX Channel Configuration */ 6860 for (chan = 0; chan < tx_cnt; chan++) { 6861 tx_q = &priv->dma_conf.tx_queue[chan]; 6862 6863 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 6864 tx_q->dma_tx_phy, chan); 6865 6866 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 6867 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 6868 tx_q->tx_tail_addr, chan); 6869 6870 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 6871 tx_q->txtimer.function = stmmac_tx_timer; 6872 } 6873 6874 /* Enable the MAC Rx/Tx */ 6875 stmmac_mac_set(priv, priv->ioaddr, true); 6876 6877 /* Start Rx & Tx DMA Channels */ 6878 stmmac_start_all_dma(priv); 6879 6880 ret = stmmac_request_irq(dev); 6881 if (ret) 6882 goto irq_error; 6883 6884 /* Enable NAPI process*/ 6885 stmmac_enable_all_queues(priv); 6886 netif_carrier_on(dev); 6887 netif_tx_start_all_queues(dev); 6888 stmmac_enable_all_dma_irq(priv); 6889 6890 return 0; 6891 6892 irq_error: 6893 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 6894 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 6895 6896 stmmac_hw_teardown(dev); 6897 init_error: 6898 free_dma_desc_resources(priv, &priv->dma_conf); 6899 dma_desc_error: 6900 return ret; 6901 } 6902 6903 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) 6904 { 6905 struct stmmac_priv *priv = netdev_priv(dev); 6906 struct stmmac_rx_queue *rx_q; 6907 struct stmmac_tx_queue *tx_q; 6908 struct stmmac_channel *ch; 6909 6910 if (test_bit(STMMAC_DOWN, &priv->state) || 6911 !netif_carrier_ok(priv->dev)) 6912 return -ENETDOWN; 6913 6914 if (!stmmac_xdp_is_enabled(priv)) 6915 return -EINVAL; 6916 6917 if (queue >= priv->plat->rx_queues_to_use || 6918 queue >= priv->plat->tx_queues_to_use) 6919 return -EINVAL; 6920 6921 rx_q = &priv->dma_conf.rx_queue[queue]; 6922 tx_q = &priv->dma_conf.tx_queue[queue]; 6923 ch = &priv->channel[queue]; 6924 6925 if (!rx_q->xsk_pool && !tx_q->xsk_pool) 6926 return -EINVAL; 6927 6928 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { 6929 /* EQoS does not have per-DMA channel SW interrupt, 6930 * so we schedule RX Napi straight-away. 6931 */ 6932 if (likely(napi_schedule_prep(&ch->rxtx_napi))) 6933 __napi_schedule(&ch->rxtx_napi); 6934 } 6935 6936 return 0; 6937 } 6938 6939 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 6940 { 6941 struct stmmac_priv *priv = netdev_priv(dev); 6942 u32 tx_cnt = priv->plat->tx_queues_to_use; 6943 u32 rx_cnt = priv->plat->rx_queues_to_use; 6944 unsigned int start; 6945 int q; 6946 6947 for (q = 0; q < tx_cnt; q++) { 6948 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; 6949 u64 tx_packets; 6950 u64 tx_bytes; 6951 6952 do { 6953 start = u64_stats_fetch_begin(&txq_stats->syncp); 6954 tx_packets = txq_stats->tx_packets; 6955 tx_bytes = txq_stats->tx_bytes; 6956 } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); 6957 6958 stats->tx_packets += tx_packets; 6959 stats->tx_bytes += tx_bytes; 6960 } 6961 6962 for (q = 0; q < rx_cnt; q++) { 6963 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; 6964 u64 rx_packets; 6965 u64 rx_bytes; 6966 6967 do { 6968 start = u64_stats_fetch_begin(&rxq_stats->syncp); 6969 rx_packets = rxq_stats->rx_packets; 6970 rx_bytes = rxq_stats->rx_bytes; 6971 } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); 6972 6973 stats->rx_packets += rx_packets; 6974 stats->rx_bytes += rx_bytes; 6975 } 6976 6977 stats->rx_dropped = priv->xstats.rx_dropped; 6978 stats->rx_errors = priv->xstats.rx_errors; 6979 stats->tx_dropped = priv->xstats.tx_dropped; 6980 stats->tx_errors = priv->xstats.tx_errors; 6981 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; 6982 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; 6983 stats->rx_length_errors = priv->xstats.rx_length; 6984 stats->rx_crc_errors = priv->xstats.rx_crc_errors; 6985 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; 6986 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; 6987 } 6988 6989 static const struct net_device_ops stmmac_netdev_ops = { 6990 .ndo_open = stmmac_open, 6991 .ndo_start_xmit = stmmac_xmit, 6992 .ndo_stop = stmmac_release, 6993 .ndo_change_mtu = stmmac_change_mtu, 6994 .ndo_fix_features = stmmac_fix_features, 6995 .ndo_set_features = stmmac_set_features, 6996 .ndo_set_rx_mode = stmmac_set_rx_mode, 6997 .ndo_tx_timeout = stmmac_tx_timeout, 6998 .ndo_eth_ioctl = stmmac_ioctl, 6999 .ndo_get_stats64 = stmmac_get_stats64, 7000 .ndo_setup_tc = stmmac_setup_tc, 7001 .ndo_select_queue = stmmac_select_queue, 7002 #ifdef CONFIG_NET_POLL_CONTROLLER 7003 .ndo_poll_controller = stmmac_poll_controller, 7004 #endif 7005 .ndo_set_mac_address = stmmac_set_mac_address, 7006 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 7007 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 7008 .ndo_bpf = stmmac_bpf, 7009 .ndo_xdp_xmit = stmmac_xdp_xmit, 7010 .ndo_xsk_wakeup = stmmac_xsk_wakeup, 7011 }; 7012 7013 static void stmmac_reset_subtask(struct stmmac_priv *priv) 7014 { 7015 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 7016 return; 7017 if (test_bit(STMMAC_DOWN, &priv->state)) 7018 return; 7019 7020 netdev_err(priv->dev, "Reset adapter.\n"); 7021 7022 rtnl_lock(); 7023 netif_trans_update(priv->dev); 7024 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 7025 usleep_range(1000, 2000); 7026 7027 set_bit(STMMAC_DOWN, &priv->state); 7028 dev_close(priv->dev); 7029 dev_open(priv->dev, NULL); 7030 clear_bit(STMMAC_DOWN, &priv->state); 7031 clear_bit(STMMAC_RESETING, &priv->state); 7032 rtnl_unlock(); 7033 } 7034 7035 static void stmmac_service_task(struct work_struct *work) 7036 { 7037 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 7038 service_task); 7039 7040 stmmac_reset_subtask(priv); 7041 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 7042 } 7043 7044 /** 7045 * stmmac_hw_init - Init the MAC device 7046 * @priv: driver private structure 7047 * Description: this function is to configure the MAC device according to 7048 * some platform parameters or the HW capability register. It prepares the 7049 * driver to use either ring or chain modes and to setup either enhanced or 7050 * normal descriptors. 7051 */ 7052 static int stmmac_hw_init(struct stmmac_priv *priv) 7053 { 7054 int ret; 7055 7056 /* dwmac-sun8i only work in chain mode */ 7057 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) 7058 chain_mode = 1; 7059 priv->chain_mode = chain_mode; 7060 7061 /* Initialize HW Interface */ 7062 ret = stmmac_hwif_init(priv); 7063 if (ret) 7064 return ret; 7065 7066 /* Get the HW capability (new GMAC newer than 3.50a) */ 7067 priv->hw_cap_support = stmmac_get_hw_features(priv); 7068 if (priv->hw_cap_support) { 7069 dev_info(priv->device, "DMA HW capability register supported\n"); 7070 7071 /* We can override some gmac/dma configuration fields: e.g. 7072 * enh_desc, tx_coe (e.g. that are passed through the 7073 * platform) with the values from the HW capability 7074 * register (if supported). 7075 */ 7076 priv->plat->enh_desc = priv->dma_cap.enh_desc; 7077 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && 7078 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); 7079 priv->hw->pmt = priv->plat->pmt; 7080 if (priv->dma_cap.hash_tb_sz) { 7081 priv->hw->multicast_filter_bins = 7082 (BIT(priv->dma_cap.hash_tb_sz) << 5); 7083 priv->hw->mcast_bits_log2 = 7084 ilog2(priv->hw->multicast_filter_bins); 7085 } 7086 7087 /* TXCOE doesn't work in thresh DMA mode */ 7088 if (priv->plat->force_thresh_dma_mode) 7089 priv->plat->tx_coe = 0; 7090 else 7091 priv->plat->tx_coe = priv->dma_cap.tx_coe; 7092 7093 /* In case of GMAC4 rx_coe is from HW cap register. */ 7094 priv->plat->rx_coe = priv->dma_cap.rx_coe; 7095 7096 if (priv->dma_cap.rx_coe_type2) 7097 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 7098 else if (priv->dma_cap.rx_coe_type1) 7099 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 7100 7101 } else { 7102 dev_info(priv->device, "No HW DMA feature register supported\n"); 7103 } 7104 7105 if (priv->plat->rx_coe) { 7106 priv->hw->rx_csum = priv->plat->rx_coe; 7107 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 7108 if (priv->synopsys_id < DWMAC_CORE_4_00) 7109 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 7110 } 7111 if (priv->plat->tx_coe) 7112 dev_info(priv->device, "TX Checksum insertion supported\n"); 7113 7114 if (priv->plat->pmt) { 7115 dev_info(priv->device, "Wake-Up On Lan supported\n"); 7116 device_set_wakeup_capable(priv->device, 1); 7117 } 7118 7119 if (priv->dma_cap.tsoen) 7120 dev_info(priv->device, "TSO supported\n"); 7121 7122 priv->hw->vlan_fail_q_en = 7123 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); 7124 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 7125 7126 /* Run HW quirks, if any */ 7127 if (priv->hwif_quirks) { 7128 ret = priv->hwif_quirks(priv); 7129 if (ret) 7130 return ret; 7131 } 7132 7133 /* Rx Watchdog is available in the COREs newer than the 3.40. 7134 * In some case, for example on bugged HW this feature 7135 * has to be disable and this can be done by passing the 7136 * riwt_off field from the platform. 7137 */ 7138 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 7139 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 7140 priv->use_riwt = 1; 7141 dev_info(priv->device, 7142 "Enable RX Mitigation via HW Watchdog Timer\n"); 7143 } 7144 7145 return 0; 7146 } 7147 7148 static void stmmac_napi_add(struct net_device *dev) 7149 { 7150 struct stmmac_priv *priv = netdev_priv(dev); 7151 u32 queue, maxq; 7152 7153 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 7154 7155 for (queue = 0; queue < maxq; queue++) { 7156 struct stmmac_channel *ch = &priv->channel[queue]; 7157 7158 ch->priv_data = priv; 7159 ch->index = queue; 7160 spin_lock_init(&ch->lock); 7161 7162 if (queue < priv->plat->rx_queues_to_use) { 7163 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); 7164 } 7165 if (queue < priv->plat->tx_queues_to_use) { 7166 netif_napi_add_tx(dev, &ch->tx_napi, 7167 stmmac_napi_poll_tx); 7168 } 7169 if (queue < priv->plat->rx_queues_to_use && 7170 queue < priv->plat->tx_queues_to_use) { 7171 netif_napi_add(dev, &ch->rxtx_napi, 7172 stmmac_napi_poll_rxtx); 7173 } 7174 } 7175 } 7176 7177 static void stmmac_napi_del(struct net_device *dev) 7178 { 7179 struct stmmac_priv *priv = netdev_priv(dev); 7180 u32 queue, maxq; 7181 7182 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 7183 7184 for (queue = 0; queue < maxq; queue++) { 7185 struct stmmac_channel *ch = &priv->channel[queue]; 7186 7187 if (queue < priv->plat->rx_queues_to_use) 7188 netif_napi_del(&ch->rx_napi); 7189 if (queue < priv->plat->tx_queues_to_use) 7190 netif_napi_del(&ch->tx_napi); 7191 if (queue < priv->plat->rx_queues_to_use && 7192 queue < priv->plat->tx_queues_to_use) { 7193 netif_napi_del(&ch->rxtx_napi); 7194 } 7195 } 7196 } 7197 7198 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 7199 { 7200 struct stmmac_priv *priv = netdev_priv(dev); 7201 int ret = 0, i; 7202 7203 if (netif_running(dev)) 7204 stmmac_release(dev); 7205 7206 stmmac_napi_del(dev); 7207 7208 priv->plat->rx_queues_to_use = rx_cnt; 7209 priv->plat->tx_queues_to_use = tx_cnt; 7210 if (!netif_is_rxfh_configured(dev)) 7211 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 7212 priv->rss.table[i] = ethtool_rxfh_indir_default(i, 7213 rx_cnt); 7214 7215 stmmac_napi_add(dev); 7216 7217 if (netif_running(dev)) 7218 ret = stmmac_open(dev); 7219 7220 return ret; 7221 } 7222 7223 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 7224 { 7225 struct stmmac_priv *priv = netdev_priv(dev); 7226 int ret = 0; 7227 7228 if (netif_running(dev)) 7229 stmmac_release(dev); 7230 7231 priv->dma_conf.dma_rx_size = rx_size; 7232 priv->dma_conf.dma_tx_size = tx_size; 7233 7234 if (netif_running(dev)) 7235 ret = stmmac_open(dev); 7236 7237 return ret; 7238 } 7239 7240 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n" 7241 static void stmmac_fpe_lp_task(struct work_struct *work) 7242 { 7243 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 7244 fpe_task); 7245 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; 7246 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; 7247 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; 7248 bool *hs_enable = &fpe_cfg->hs_enable; 7249 bool *enable = &fpe_cfg->enable; 7250 int retries = 20; 7251 7252 while (retries-- > 0) { 7253 /* Bail out immediately if FPE handshake is OFF */ 7254 if (*lo_state == FPE_STATE_OFF || !*hs_enable) 7255 break; 7256 7257 if (*lo_state == FPE_STATE_ENTERING_ON && 7258 *lp_state == FPE_STATE_ENTERING_ON) { 7259 stmmac_fpe_configure(priv, priv->ioaddr, 7260 priv->plat->tx_queues_to_use, 7261 priv->plat->rx_queues_to_use, 7262 *enable); 7263 7264 netdev_info(priv->dev, "configured FPE\n"); 7265 7266 *lo_state = FPE_STATE_ON; 7267 *lp_state = FPE_STATE_ON; 7268 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); 7269 break; 7270 } 7271 7272 if ((*lo_state == FPE_STATE_CAPABLE || 7273 *lo_state == FPE_STATE_ENTERING_ON) && 7274 *lp_state != FPE_STATE_ON) { 7275 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, 7276 *lo_state, *lp_state); 7277 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 7278 MPACKET_VERIFY); 7279 } 7280 /* Sleep then retry */ 7281 msleep(500); 7282 } 7283 7284 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); 7285 } 7286 7287 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable) 7288 { 7289 if (priv->plat->fpe_cfg->hs_enable != enable) { 7290 if (enable) { 7291 stmmac_fpe_send_mpacket(priv, priv->ioaddr, 7292 MPACKET_VERIFY); 7293 } else { 7294 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; 7295 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; 7296 } 7297 7298 priv->plat->fpe_cfg->hs_enable = enable; 7299 } 7300 } 7301 7302 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) 7303 { 7304 const struct stmmac_xdp_buff *ctx = (void *)_ctx; 7305 struct dma_desc *desc_contains_ts = ctx->desc; 7306 struct stmmac_priv *priv = ctx->priv; 7307 struct dma_desc *ndesc = ctx->ndesc; 7308 struct dma_desc *desc = ctx->desc; 7309 u64 ns = 0; 7310 7311 if (!priv->hwts_rx_en) 7312 return -ENODATA; 7313 7314 /* For GMAC4, the valid timestamp is from CTX next desc. */ 7315 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 7316 desc_contains_ts = ndesc; 7317 7318 /* Check if timestamp is available */ 7319 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { 7320 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); 7321 ns -= priv->plat->cdc_error_adj; 7322 *timestamp = ns_to_ktime(ns); 7323 return 0; 7324 } 7325 7326 return -ENODATA; 7327 } 7328 7329 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = { 7330 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp, 7331 }; 7332 7333 /** 7334 * stmmac_dvr_probe 7335 * @device: device pointer 7336 * @plat_dat: platform data pointer 7337 * @res: stmmac resource pointer 7338 * Description: this is the main probe function used to 7339 * call the alloc_etherdev, allocate the priv structure. 7340 * Return: 7341 * returns 0 on success, otherwise errno. 7342 */ 7343 int stmmac_dvr_probe(struct device *device, 7344 struct plat_stmmacenet_data *plat_dat, 7345 struct stmmac_resources *res) 7346 { 7347 struct net_device *ndev = NULL; 7348 struct stmmac_priv *priv; 7349 u32 rxq; 7350 int i, ret = 0; 7351 7352 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 7353 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 7354 if (!ndev) 7355 return -ENOMEM; 7356 7357 SET_NETDEV_DEV(ndev, device); 7358 7359 priv = netdev_priv(ndev); 7360 priv->device = device; 7361 priv->dev = ndev; 7362 7363 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 7364 u64_stats_init(&priv->xstats.rxq_stats[i].syncp); 7365 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 7366 u64_stats_init(&priv->xstats.txq_stats[i].syncp); 7367 7368 stmmac_set_ethtool_ops(ndev); 7369 priv->pause = pause; 7370 priv->plat = plat_dat; 7371 priv->ioaddr = res->addr; 7372 priv->dev->base_addr = (unsigned long)res->addr; 7373 priv->plat->dma_cfg->multi_msi_en = 7374 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); 7375 7376 priv->dev->irq = res->irq; 7377 priv->wol_irq = res->wol_irq; 7378 priv->lpi_irq = res->lpi_irq; 7379 priv->sfty_ce_irq = res->sfty_ce_irq; 7380 priv->sfty_ue_irq = res->sfty_ue_irq; 7381 for (i = 0; i < MTL_MAX_RX_QUEUES; i++) 7382 priv->rx_irq[i] = res->rx_irq[i]; 7383 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) 7384 priv->tx_irq[i] = res->tx_irq[i]; 7385 7386 if (!is_zero_ether_addr(res->mac)) 7387 eth_hw_addr_set(priv->dev, res->mac); 7388 7389 dev_set_drvdata(device, priv->dev); 7390 7391 /* Verify driver arguments */ 7392 stmmac_verify_args(); 7393 7394 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); 7395 if (!priv->af_xdp_zc_qps) 7396 return -ENOMEM; 7397 7398 /* Allocate workqueue */ 7399 priv->wq = create_singlethread_workqueue("stmmac_wq"); 7400 if (!priv->wq) { 7401 dev_err(priv->device, "failed to create workqueue\n"); 7402 ret = -ENOMEM; 7403 goto error_wq_init; 7404 } 7405 7406 INIT_WORK(&priv->service_task, stmmac_service_task); 7407 7408 /* Initialize Link Partner FPE workqueue */ 7409 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); 7410 7411 /* Override with kernel parameters if supplied XXX CRS XXX 7412 * this needs to have multiple instances 7413 */ 7414 if ((phyaddr >= 0) && (phyaddr <= 31)) 7415 priv->plat->phy_addr = phyaddr; 7416 7417 if (priv->plat->stmmac_rst) { 7418 ret = reset_control_assert(priv->plat->stmmac_rst); 7419 reset_control_deassert(priv->plat->stmmac_rst); 7420 /* Some reset controllers have only reset callback instead of 7421 * assert + deassert callbacks pair. 7422 */ 7423 if (ret == -ENOTSUPP) 7424 reset_control_reset(priv->plat->stmmac_rst); 7425 } 7426 7427 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); 7428 if (ret == -ENOTSUPP) 7429 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", 7430 ERR_PTR(ret)); 7431 7432 /* Init MAC and get the capabilities */ 7433 ret = stmmac_hw_init(priv); 7434 if (ret) 7435 goto error_hw_init; 7436 7437 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch. 7438 */ 7439 if (priv->synopsys_id < DWMAC_CORE_5_20) 7440 priv->plat->dma_cfg->dche = false; 7441 7442 stmmac_check_ether_addr(priv); 7443 7444 ndev->netdev_ops = &stmmac_netdev_ops; 7445 7446 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; 7447 7448 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 7449 NETIF_F_RXCSUM; 7450 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 7451 NETDEV_XDP_ACT_XSK_ZEROCOPY; 7452 7453 ret = stmmac_tc_init(priv, priv); 7454 if (!ret) { 7455 ndev->hw_features |= NETIF_F_HW_TC; 7456 } 7457 7458 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { 7459 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 7460 if (priv->plat->has_gmac4) 7461 ndev->hw_features |= NETIF_F_GSO_UDP_L4; 7462 priv->tso = true; 7463 dev_info(priv->device, "TSO feature enabled\n"); 7464 } 7465 7466 if (priv->dma_cap.sphen && 7467 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { 7468 ndev->hw_features |= NETIF_F_GRO; 7469 priv->sph_cap = true; 7470 priv->sph = priv->sph_cap; 7471 dev_info(priv->device, "SPH feature enabled\n"); 7472 } 7473 7474 /* Ideally our host DMA address width is the same as for the 7475 * device. However, it may differ and then we have to use our 7476 * host DMA width for allocation and the device DMA width for 7477 * register handling. 7478 */ 7479 if (priv->plat->host_dma_width) 7480 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; 7481 else 7482 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; 7483 7484 if (priv->dma_cap.host_dma_width) { 7485 ret = dma_set_mask_and_coherent(device, 7486 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); 7487 if (!ret) { 7488 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", 7489 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); 7490 7491 /* 7492 * If more than 32 bits can be addressed, make sure to 7493 * enable enhanced addressing mode. 7494 */ 7495 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 7496 priv->plat->dma_cfg->eame = true; 7497 } else { 7498 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 7499 if (ret) { 7500 dev_err(priv->device, "Failed to set DMA Mask\n"); 7501 goto error_hw_init; 7502 } 7503 7504 priv->dma_cap.host_dma_width = 32; 7505 } 7506 } 7507 7508 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 7509 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 7510 #ifdef STMMAC_VLAN_TAG_USED 7511 /* Both mac100 and gmac support receive VLAN tag detection */ 7512 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 7513 if (priv->dma_cap.vlhash) { 7514 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 7515 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 7516 } 7517 if (priv->dma_cap.vlins) { 7518 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 7519 if (priv->dma_cap.dvlan) 7520 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 7521 } 7522 #endif 7523 priv->msg_enable = netif_msg_init(debug, default_msg_level); 7524 7525 priv->xstats.threshold = tc; 7526 7527 /* Initialize RSS */ 7528 rxq = priv->plat->rx_queues_to_use; 7529 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 7530 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 7531 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 7532 7533 if (priv->dma_cap.rssen && priv->plat->rss_en) 7534 ndev->features |= NETIF_F_RXHASH; 7535 7536 ndev->vlan_features |= ndev->features; 7537 /* TSO doesn't work on VLANs yet */ 7538 ndev->vlan_features &= ~NETIF_F_TSO; 7539 7540 /* MTU range: 46 - hw-specific max */ 7541 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 7542 if (priv->plat->has_xgmac) 7543 ndev->max_mtu = XGMAC_JUMBO_LEN; 7544 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 7545 ndev->max_mtu = JUMBO_LEN; 7546 else 7547 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 7548 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 7549 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 7550 */ 7551 if ((priv->plat->maxmtu < ndev->max_mtu) && 7552 (priv->plat->maxmtu >= ndev->min_mtu)) 7553 ndev->max_mtu = priv->plat->maxmtu; 7554 else if (priv->plat->maxmtu < ndev->min_mtu) 7555 dev_warn(priv->device, 7556 "%s: warning: maxmtu having invalid value (%d)\n", 7557 __func__, priv->plat->maxmtu); 7558 7559 if (flow_ctrl) 7560 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 7561 7562 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 7563 7564 /* Setup channels NAPI */ 7565 stmmac_napi_add(ndev); 7566 7567 mutex_init(&priv->lock); 7568 7569 /* If a specific clk_csr value is passed from the platform 7570 * this means that the CSR Clock Range selection cannot be 7571 * changed at run-time and it is fixed. Viceversa the driver'll try to 7572 * set the MDC clock dynamically according to the csr actual 7573 * clock input. 7574 */ 7575 if (priv->plat->clk_csr >= 0) 7576 priv->clk_csr = priv->plat->clk_csr; 7577 else 7578 stmmac_clk_csr_set(priv); 7579 7580 stmmac_check_pcs_mode(priv); 7581 7582 pm_runtime_get_noresume(device); 7583 pm_runtime_set_active(device); 7584 if (!pm_runtime_enabled(device)) 7585 pm_runtime_enable(device); 7586 7587 if (priv->hw->pcs != STMMAC_PCS_TBI && 7588 priv->hw->pcs != STMMAC_PCS_RTBI) { 7589 /* MDIO bus Registration */ 7590 ret = stmmac_mdio_register(ndev); 7591 if (ret < 0) { 7592 dev_err_probe(priv->device, ret, 7593 "%s: MDIO bus (id: %d) registration failed\n", 7594 __func__, priv->plat->bus_id); 7595 goto error_mdio_register; 7596 } 7597 } 7598 7599 if (priv->plat->speed_mode_2500) 7600 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); 7601 7602 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { 7603 ret = stmmac_xpcs_setup(priv->mii); 7604 if (ret) 7605 goto error_xpcs_setup; 7606 } 7607 7608 ret = stmmac_phy_setup(priv); 7609 if (ret) { 7610 netdev_err(ndev, "failed to setup phy (%d)\n", ret); 7611 goto error_phy_setup; 7612 } 7613 7614 ret = register_netdev(ndev); 7615 if (ret) { 7616 dev_err(priv->device, "%s: ERROR %i registering the device\n", 7617 __func__, ret); 7618 goto error_netdev_register; 7619 } 7620 7621 #ifdef CONFIG_DEBUG_FS 7622 stmmac_init_fs(ndev); 7623 #endif 7624 7625 if (priv->plat->dump_debug_regs) 7626 priv->plat->dump_debug_regs(priv->plat->bsp_priv); 7627 7628 /* Let pm_runtime_put() disable the clocks. 7629 * If CONFIG_PM is not enabled, the clocks will stay powered. 7630 */ 7631 pm_runtime_put(device); 7632 7633 return ret; 7634 7635 error_netdev_register: 7636 phylink_destroy(priv->phylink); 7637 error_xpcs_setup: 7638 error_phy_setup: 7639 if (priv->hw->pcs != STMMAC_PCS_TBI && 7640 priv->hw->pcs != STMMAC_PCS_RTBI) 7641 stmmac_mdio_unregister(ndev); 7642 error_mdio_register: 7643 stmmac_napi_del(ndev); 7644 error_hw_init: 7645 destroy_workqueue(priv->wq); 7646 error_wq_init: 7647 bitmap_free(priv->af_xdp_zc_qps); 7648 7649 return ret; 7650 } 7651 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 7652 7653 /** 7654 * stmmac_dvr_remove 7655 * @dev: device pointer 7656 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 7657 * changes the link status, releases the DMA descriptor rings. 7658 */ 7659 void stmmac_dvr_remove(struct device *dev) 7660 { 7661 struct net_device *ndev = dev_get_drvdata(dev); 7662 struct stmmac_priv *priv = netdev_priv(ndev); 7663 7664 netdev_info(priv->dev, "%s: removing driver", __func__); 7665 7666 pm_runtime_get_sync(dev); 7667 7668 stmmac_stop_all_dma(priv); 7669 stmmac_mac_set(priv, priv->ioaddr, false); 7670 netif_carrier_off(ndev); 7671 unregister_netdev(ndev); 7672 7673 #ifdef CONFIG_DEBUG_FS 7674 stmmac_exit_fs(ndev); 7675 #endif 7676 phylink_destroy(priv->phylink); 7677 if (priv->plat->stmmac_rst) 7678 reset_control_assert(priv->plat->stmmac_rst); 7679 reset_control_assert(priv->plat->stmmac_ahb_rst); 7680 if (priv->hw->pcs != STMMAC_PCS_TBI && 7681 priv->hw->pcs != STMMAC_PCS_RTBI) 7682 stmmac_mdio_unregister(ndev); 7683 destroy_workqueue(priv->wq); 7684 mutex_destroy(&priv->lock); 7685 bitmap_free(priv->af_xdp_zc_qps); 7686 7687 pm_runtime_disable(dev); 7688 pm_runtime_put_noidle(dev); 7689 } 7690 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 7691 7692 /** 7693 * stmmac_suspend - suspend callback 7694 * @dev: device pointer 7695 * Description: this is the function to suspend the device and it is called 7696 * by the platform driver to stop the network queue, release the resources, 7697 * program the PMT register (for WoL), clean and release driver resources. 7698 */ 7699 int stmmac_suspend(struct device *dev) 7700 { 7701 struct net_device *ndev = dev_get_drvdata(dev); 7702 struct stmmac_priv *priv = netdev_priv(ndev); 7703 u32 chan; 7704 7705 if (!ndev || !netif_running(ndev)) 7706 return 0; 7707 7708 mutex_lock(&priv->lock); 7709 7710 netif_device_detach(ndev); 7711 7712 stmmac_disable_all_queues(priv); 7713 7714 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 7715 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); 7716 7717 if (priv->eee_enabled) { 7718 priv->tx_path_in_lpi_mode = false; 7719 del_timer_sync(&priv->eee_ctrl_timer); 7720 } 7721 7722 /* Stop TX/RX DMA */ 7723 stmmac_stop_all_dma(priv); 7724 7725 if (priv->plat->serdes_powerdown) 7726 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 7727 7728 /* Enable Power down mode by programming the PMT regs */ 7729 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7730 stmmac_pmt(priv, priv->hw, priv->wolopts); 7731 priv->irq_wake = 1; 7732 } else { 7733 stmmac_mac_set(priv, priv->ioaddr, false); 7734 pinctrl_pm_select_sleep_state(priv->device); 7735 } 7736 7737 mutex_unlock(&priv->lock); 7738 7739 rtnl_lock(); 7740 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7741 phylink_suspend(priv->phylink, true); 7742 } else { 7743 if (device_may_wakeup(priv->device)) 7744 phylink_speed_down(priv->phylink, false); 7745 phylink_suspend(priv->phylink, false); 7746 } 7747 rtnl_unlock(); 7748 7749 if (priv->dma_cap.fpesel) { 7750 /* Disable FPE */ 7751 stmmac_fpe_configure(priv, priv->ioaddr, 7752 priv->plat->tx_queues_to_use, 7753 priv->plat->rx_queues_to_use, false); 7754 7755 stmmac_fpe_handshake(priv, false); 7756 stmmac_fpe_stop_wq(priv); 7757 } 7758 7759 priv->speed = SPEED_UNKNOWN; 7760 return 0; 7761 } 7762 EXPORT_SYMBOL_GPL(stmmac_suspend); 7763 7764 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) 7765 { 7766 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; 7767 7768 rx_q->cur_rx = 0; 7769 rx_q->dirty_rx = 0; 7770 } 7771 7772 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) 7773 { 7774 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; 7775 7776 tx_q->cur_tx = 0; 7777 tx_q->dirty_tx = 0; 7778 tx_q->mss = 0; 7779 7780 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 7781 } 7782 7783 /** 7784 * stmmac_reset_queues_param - reset queue parameters 7785 * @priv: device pointer 7786 */ 7787 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 7788 { 7789 u32 rx_cnt = priv->plat->rx_queues_to_use; 7790 u32 tx_cnt = priv->plat->tx_queues_to_use; 7791 u32 queue; 7792 7793 for (queue = 0; queue < rx_cnt; queue++) 7794 stmmac_reset_rx_queue(priv, queue); 7795 7796 for (queue = 0; queue < tx_cnt; queue++) 7797 stmmac_reset_tx_queue(priv, queue); 7798 } 7799 7800 /** 7801 * stmmac_resume - resume callback 7802 * @dev: device pointer 7803 * Description: when resume this function is invoked to setup the DMA and CORE 7804 * in a usable state. 7805 */ 7806 int stmmac_resume(struct device *dev) 7807 { 7808 struct net_device *ndev = dev_get_drvdata(dev); 7809 struct stmmac_priv *priv = netdev_priv(ndev); 7810 int ret; 7811 7812 if (!netif_running(ndev)) 7813 return 0; 7814 7815 /* Power Down bit, into the PM register, is cleared 7816 * automatically as soon as a magic packet or a Wake-up frame 7817 * is received. Anyway, it's better to manually clear 7818 * this bit because it can generate problems while resuming 7819 * from another devices (e.g. serial console). 7820 */ 7821 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7822 mutex_lock(&priv->lock); 7823 stmmac_pmt(priv, priv->hw, 0); 7824 mutex_unlock(&priv->lock); 7825 priv->irq_wake = 0; 7826 } else { 7827 pinctrl_pm_select_default_state(priv->device); 7828 /* reset the phy so that it's ready */ 7829 if (priv->mii) 7830 stmmac_mdio_reset(priv->mii); 7831 } 7832 7833 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && 7834 priv->plat->serdes_powerup) { 7835 ret = priv->plat->serdes_powerup(ndev, 7836 priv->plat->bsp_priv); 7837 7838 if (ret < 0) 7839 return ret; 7840 } 7841 7842 rtnl_lock(); 7843 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 7844 phylink_resume(priv->phylink); 7845 } else { 7846 phylink_resume(priv->phylink); 7847 if (device_may_wakeup(priv->device)) 7848 phylink_speed_up(priv->phylink); 7849 } 7850 rtnl_unlock(); 7851 7852 rtnl_lock(); 7853 mutex_lock(&priv->lock); 7854 7855 stmmac_reset_queues_param(priv); 7856 7857 stmmac_free_tx_skbufs(priv); 7858 stmmac_clear_descriptors(priv, &priv->dma_conf); 7859 7860 stmmac_hw_setup(ndev, false); 7861 stmmac_init_coalesce(priv); 7862 stmmac_set_rx_mode(ndev); 7863 7864 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 7865 7866 stmmac_enable_all_queues(priv); 7867 stmmac_enable_all_dma_irq(priv); 7868 7869 mutex_unlock(&priv->lock); 7870 rtnl_unlock(); 7871 7872 netif_device_attach(ndev); 7873 7874 return 0; 7875 } 7876 EXPORT_SYMBOL_GPL(stmmac_resume); 7877 7878 #ifndef MODULE 7879 static int __init stmmac_cmdline_opt(char *str) 7880 { 7881 char *opt; 7882 7883 if (!str || !*str) 7884 return 1; 7885 while ((opt = strsep(&str, ",")) != NULL) { 7886 if (!strncmp(opt, "debug:", 6)) { 7887 if (kstrtoint(opt + 6, 0, &debug)) 7888 goto err; 7889 } else if (!strncmp(opt, "phyaddr:", 8)) { 7890 if (kstrtoint(opt + 8, 0, &phyaddr)) 7891 goto err; 7892 } else if (!strncmp(opt, "buf_sz:", 7)) { 7893 if (kstrtoint(opt + 7, 0, &buf_sz)) 7894 goto err; 7895 } else if (!strncmp(opt, "tc:", 3)) { 7896 if (kstrtoint(opt + 3, 0, &tc)) 7897 goto err; 7898 } else if (!strncmp(opt, "watchdog:", 9)) { 7899 if (kstrtoint(opt + 9, 0, &watchdog)) 7900 goto err; 7901 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 7902 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 7903 goto err; 7904 } else if (!strncmp(opt, "pause:", 6)) { 7905 if (kstrtoint(opt + 6, 0, &pause)) 7906 goto err; 7907 } else if (!strncmp(opt, "eee_timer:", 10)) { 7908 if (kstrtoint(opt + 10, 0, &eee_timer)) 7909 goto err; 7910 } else if (!strncmp(opt, "chain_mode:", 11)) { 7911 if (kstrtoint(opt + 11, 0, &chain_mode)) 7912 goto err; 7913 } 7914 } 7915 return 1; 7916 7917 err: 7918 pr_err("%s: ERROR broken module parameter conversion", __func__); 7919 return 1; 7920 } 7921 7922 __setup("stmmaceth=", stmmac_cmdline_opt); 7923 #endif /* MODULE */ 7924 7925 static int __init stmmac_init(void) 7926 { 7927 #ifdef CONFIG_DEBUG_FS 7928 /* Create debugfs main directory if it doesn't exist yet */ 7929 if (!stmmac_fs_dir) 7930 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 7931 register_netdevice_notifier(&stmmac_notifier); 7932 #endif 7933 7934 return 0; 7935 } 7936 7937 static void __exit stmmac_exit(void) 7938 { 7939 #ifdef CONFIG_DEBUG_FS 7940 unregister_netdevice_notifier(&stmmac_notifier); 7941 debugfs_remove_recursive(stmmac_fs_dir); 7942 #endif 7943 } 7944 7945 module_init(stmmac_init) 7946 module_exit(stmmac_exit) 7947 7948 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 7949 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 7950 MODULE_LICENSE("GPL"); 7951