1 // SPDX-License-Identifier: GPL-2.0-only 2 /******************************************************************************* 3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 4 ST Ethernet IPs are built around a Synopsys IP Core. 5 6 Copyright(C) 2007-2011 STMicroelectronics Ltd 7 8 9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 10 11 Documentation available at: 12 http://www.stlinux.com 13 Support available at: 14 https://bugzilla.stlinux.com/ 15 *******************************************************************************/ 16 17 #include <linux/clk.h> 18 #include <linux/kernel.h> 19 #include <linux/interrupt.h> 20 #include <linux/ip.h> 21 #include <linux/tcp.h> 22 #include <linux/skbuff.h> 23 #include <linux/ethtool.h> 24 #include <linux/if_ether.h> 25 #include <linux/crc32.h> 26 #include <linux/mii.h> 27 #include <linux/if.h> 28 #include <linux/if_vlan.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/slab.h> 31 #include <linux/pm_runtime.h> 32 #include <linux/prefetch.h> 33 #include <linux/pinctrl/consumer.h> 34 #ifdef CONFIG_DEBUG_FS 35 #include <linux/debugfs.h> 36 #include <linux/seq_file.h> 37 #endif /* CONFIG_DEBUG_FS */ 38 #include <linux/net_tstamp.h> 39 #include <linux/phylink.h> 40 #include <linux/udp.h> 41 #include <net/pkt_cls.h> 42 #include "stmmac_ptp.h" 43 #include "stmmac.h" 44 #include <linux/reset.h> 45 #include <linux/of_mdio.h> 46 #include "dwmac1000.h" 47 #include "dwxgmac2.h" 48 #include "hwif.h" 49 50 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16) 51 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 52 53 /* Module parameters */ 54 #define TX_TIMEO 5000 55 static int watchdog = TX_TIMEO; 56 module_param(watchdog, int, 0644); 57 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 58 59 static int debug = -1; 60 module_param(debug, int, 0644); 61 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 62 63 static int phyaddr = -1; 64 module_param(phyaddr, int, 0444); 65 MODULE_PARM_DESC(phyaddr, "Physical device address"); 66 67 #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4) 68 #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4) 69 70 static int flow_ctrl = FLOW_AUTO; 71 module_param(flow_ctrl, int, 0644); 72 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 73 74 static int pause = PAUSE_TIME; 75 module_param(pause, int, 0644); 76 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 77 78 #define TC_DEFAULT 64 79 static int tc = TC_DEFAULT; 80 module_param(tc, int, 0644); 81 MODULE_PARM_DESC(tc, "DMA threshold control value"); 82 83 #define DEFAULT_BUFSIZE 1536 84 static int buf_sz = DEFAULT_BUFSIZE; 85 module_param(buf_sz, int, 0644); 86 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 87 88 #define STMMAC_RX_COPYBREAK 256 89 90 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 91 NETIF_MSG_LINK | NETIF_MSG_IFUP | 92 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 93 94 #define STMMAC_DEFAULT_LPI_TIMER 1000 95 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 96 module_param(eee_timer, int, 0644); 97 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 98 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x)) 99 100 /* By default the driver will use the ring mode to manage tx and rx descriptors, 101 * but allow user to force to use the chain instead of the ring 102 */ 103 static unsigned int chain_mode; 104 module_param(chain_mode, int, 0444); 105 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 106 107 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 108 109 #ifdef CONFIG_DEBUG_FS 110 static const struct net_device_ops stmmac_netdev_ops; 111 static void stmmac_init_fs(struct net_device *dev); 112 static void stmmac_exit_fs(struct net_device *dev); 113 #endif 114 115 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC)) 116 117 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) 118 { 119 int ret = 0; 120 121 if (enabled) { 122 ret = clk_prepare_enable(priv->plat->stmmac_clk); 123 if (ret) 124 return ret; 125 ret = clk_prepare_enable(priv->plat->pclk); 126 if (ret) { 127 clk_disable_unprepare(priv->plat->stmmac_clk); 128 return ret; 129 } 130 if (priv->plat->clks_config) { 131 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); 132 if (ret) { 133 clk_disable_unprepare(priv->plat->stmmac_clk); 134 clk_disable_unprepare(priv->plat->pclk); 135 return ret; 136 } 137 } 138 } else { 139 clk_disable_unprepare(priv->plat->stmmac_clk); 140 clk_disable_unprepare(priv->plat->pclk); 141 if (priv->plat->clks_config) 142 priv->plat->clks_config(priv->plat->bsp_priv, enabled); 143 } 144 145 return ret; 146 } 147 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); 148 149 /** 150 * stmmac_verify_args - verify the driver parameters. 151 * Description: it checks the driver parameters and set a default in case of 152 * errors. 153 */ 154 static void stmmac_verify_args(void) 155 { 156 if (unlikely(watchdog < 0)) 157 watchdog = TX_TIMEO; 158 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 159 buf_sz = DEFAULT_BUFSIZE; 160 if (unlikely(flow_ctrl > 1)) 161 flow_ctrl = FLOW_AUTO; 162 else if (likely(flow_ctrl < 0)) 163 flow_ctrl = FLOW_OFF; 164 if (unlikely((pause < 0) || (pause > 0xffff))) 165 pause = PAUSE_TIME; 166 if (eee_timer < 0) 167 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 168 } 169 170 /** 171 * stmmac_disable_all_queues - Disable all queues 172 * @priv: driver private structure 173 */ 174 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 175 { 176 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 177 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 178 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 179 u32 queue; 180 181 for (queue = 0; queue < maxq; queue++) { 182 struct stmmac_channel *ch = &priv->channel[queue]; 183 184 if (queue < rx_queues_cnt) 185 napi_disable(&ch->rx_napi); 186 if (queue < tx_queues_cnt) 187 napi_disable(&ch->tx_napi); 188 } 189 } 190 191 /** 192 * stmmac_enable_all_queues - Enable all queues 193 * @priv: driver private structure 194 */ 195 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 196 { 197 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 198 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 199 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 200 u32 queue; 201 202 for (queue = 0; queue < maxq; queue++) { 203 struct stmmac_channel *ch = &priv->channel[queue]; 204 205 if (queue < rx_queues_cnt) 206 napi_enable(&ch->rx_napi); 207 if (queue < tx_queues_cnt) 208 napi_enable(&ch->tx_napi); 209 } 210 } 211 212 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 213 { 214 if (!test_bit(STMMAC_DOWN, &priv->state) && 215 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 216 queue_work(priv->wq, &priv->service_task); 217 } 218 219 static void stmmac_global_err(struct stmmac_priv *priv) 220 { 221 netif_carrier_off(priv->dev); 222 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 223 stmmac_service_event_schedule(priv); 224 } 225 226 /** 227 * stmmac_clk_csr_set - dynamically set the MDC clock 228 * @priv: driver private structure 229 * Description: this is to dynamically set the MDC clock according to the csr 230 * clock input. 231 * Note: 232 * If a specific clk_csr value is passed from the platform 233 * this means that the CSR Clock Range selection cannot be 234 * changed at run-time and it is fixed (as reported in the driver 235 * documentation). Viceversa the driver will try to set the MDC 236 * clock dynamically according to the actual clock input. 237 */ 238 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 239 { 240 u32 clk_rate; 241 242 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 243 244 /* Platform provided default clk_csr would be assumed valid 245 * for all other cases except for the below mentioned ones. 246 * For values higher than the IEEE 802.3 specified frequency 247 * we can not estimate the proper divider as it is not known 248 * the frequency of clk_csr_i. So we do not change the default 249 * divider. 250 */ 251 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 252 if (clk_rate < CSR_F_35M) 253 priv->clk_csr = STMMAC_CSR_20_35M; 254 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 255 priv->clk_csr = STMMAC_CSR_35_60M; 256 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 257 priv->clk_csr = STMMAC_CSR_60_100M; 258 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 259 priv->clk_csr = STMMAC_CSR_100_150M; 260 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 261 priv->clk_csr = STMMAC_CSR_150_250M; 262 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 263 priv->clk_csr = STMMAC_CSR_250_300M; 264 } 265 266 if (priv->plat->has_sun8i) { 267 if (clk_rate > 160000000) 268 priv->clk_csr = 0x03; 269 else if (clk_rate > 80000000) 270 priv->clk_csr = 0x02; 271 else if (clk_rate > 40000000) 272 priv->clk_csr = 0x01; 273 else 274 priv->clk_csr = 0; 275 } 276 277 if (priv->plat->has_xgmac) { 278 if (clk_rate > 400000000) 279 priv->clk_csr = 0x5; 280 else if (clk_rate > 350000000) 281 priv->clk_csr = 0x4; 282 else if (clk_rate > 300000000) 283 priv->clk_csr = 0x3; 284 else if (clk_rate > 250000000) 285 priv->clk_csr = 0x2; 286 else if (clk_rate > 150000000) 287 priv->clk_csr = 0x1; 288 else 289 priv->clk_csr = 0x0; 290 } 291 } 292 293 static void print_pkt(unsigned char *buf, int len) 294 { 295 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 296 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 297 } 298 299 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 300 { 301 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 302 u32 avail; 303 304 if (tx_q->dirty_tx > tx_q->cur_tx) 305 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 306 else 307 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; 308 309 return avail; 310 } 311 312 /** 313 * stmmac_rx_dirty - Get RX queue dirty 314 * @priv: driver private structure 315 * @queue: RX queue index 316 */ 317 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 318 { 319 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 320 u32 dirty; 321 322 if (rx_q->dirty_rx <= rx_q->cur_rx) 323 dirty = rx_q->cur_rx - rx_q->dirty_rx; 324 else 325 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; 326 327 return dirty; 328 } 329 330 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en) 331 { 332 int tx_lpi_timer; 333 334 /* Clear/set the SW EEE timer flag based on LPI ET enablement */ 335 priv->eee_sw_timer_en = en ? 0 : 1; 336 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; 337 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); 338 } 339 340 /** 341 * stmmac_enable_eee_mode - check and enter in LPI mode 342 * @priv: driver private structure 343 * Description: this function is to verify and enter in LPI mode in case of 344 * EEE. 345 */ 346 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 347 { 348 u32 tx_cnt = priv->plat->tx_queues_to_use; 349 u32 queue; 350 351 /* check if all TX queues have the work finished */ 352 for (queue = 0; queue < tx_cnt; queue++) { 353 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 354 355 if (tx_q->dirty_tx != tx_q->cur_tx) 356 return; /* still unfinished work */ 357 } 358 359 /* Check and enter in LPI mode */ 360 if (!priv->tx_path_in_lpi_mode) 361 stmmac_set_eee_mode(priv, priv->hw, 362 priv->plat->en_tx_lpi_clockgating); 363 } 364 365 /** 366 * stmmac_disable_eee_mode - disable and exit from LPI mode 367 * @priv: driver private structure 368 * Description: this function is to exit and disable EEE in case of 369 * LPI state is true. This is called by the xmit. 370 */ 371 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 372 { 373 if (!priv->eee_sw_timer_en) { 374 stmmac_lpi_entry_timer_config(priv, 0); 375 return; 376 } 377 378 stmmac_reset_eee_mode(priv, priv->hw); 379 del_timer_sync(&priv->eee_ctrl_timer); 380 priv->tx_path_in_lpi_mode = false; 381 } 382 383 /** 384 * stmmac_eee_ctrl_timer - EEE TX SW timer. 385 * @t: timer_list struct containing private info 386 * Description: 387 * if there is no data transfer and if we are not in LPI state, 388 * then MAC Transmitter can be moved to LPI state. 389 */ 390 static void stmmac_eee_ctrl_timer(struct timer_list *t) 391 { 392 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 393 394 stmmac_enable_eee_mode(priv); 395 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 396 } 397 398 /** 399 * stmmac_eee_init - init EEE 400 * @priv: driver private structure 401 * Description: 402 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 403 * can also manage EEE, this function enable the LPI state and start related 404 * timer. 405 */ 406 bool stmmac_eee_init(struct stmmac_priv *priv) 407 { 408 int eee_tw_timer = priv->eee_tw_timer; 409 410 /* Using PCS we cannot dial with the phy registers at this stage 411 * so we do not support extra feature like EEE. 412 */ 413 if (priv->hw->pcs == STMMAC_PCS_TBI || 414 priv->hw->pcs == STMMAC_PCS_RTBI) 415 return false; 416 417 /* Check if MAC core supports the EEE feature. */ 418 if (!priv->dma_cap.eee) 419 return false; 420 421 mutex_lock(&priv->lock); 422 423 /* Check if it needs to be deactivated */ 424 if (!priv->eee_active) { 425 if (priv->eee_enabled) { 426 netdev_dbg(priv->dev, "disable EEE\n"); 427 stmmac_lpi_entry_timer_config(priv, 0); 428 del_timer_sync(&priv->eee_ctrl_timer); 429 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); 430 } 431 mutex_unlock(&priv->lock); 432 return false; 433 } 434 435 if (priv->eee_active && !priv->eee_enabled) { 436 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 437 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 438 eee_tw_timer); 439 } 440 441 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { 442 del_timer_sync(&priv->eee_ctrl_timer); 443 priv->tx_path_in_lpi_mode = false; 444 stmmac_lpi_entry_timer_config(priv, 1); 445 } else { 446 stmmac_lpi_entry_timer_config(priv, 0); 447 mod_timer(&priv->eee_ctrl_timer, 448 STMMAC_LPI_T(priv->tx_lpi_timer)); 449 } 450 451 mutex_unlock(&priv->lock); 452 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 453 return true; 454 } 455 456 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 457 * @priv: driver private structure 458 * @p : descriptor pointer 459 * @skb : the socket buffer 460 * Description : 461 * This function will read timestamp from the descriptor & pass it to stack. 462 * and also perform some sanity checks. 463 */ 464 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 465 struct dma_desc *p, struct sk_buff *skb) 466 { 467 struct skb_shared_hwtstamps shhwtstamp; 468 bool found = false; 469 s64 adjust = 0; 470 u64 ns = 0; 471 472 if (!priv->hwts_tx_en) 473 return; 474 475 /* exit if skb doesn't support hw tstamp */ 476 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 477 return; 478 479 /* check tx tstamp status */ 480 if (stmmac_get_tx_timestamp_status(priv, p)) { 481 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 482 found = true; 483 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { 484 found = true; 485 } 486 487 if (found) { 488 /* Correct the clk domain crossing(CDC) error */ 489 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { 490 adjust += -(2 * (NSEC_PER_SEC / 491 priv->plat->clk_ptp_rate)); 492 ns += adjust; 493 } 494 495 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 496 shhwtstamp.hwtstamp = ns_to_ktime(ns); 497 498 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 499 /* pass tstamp to stack */ 500 skb_tstamp_tx(skb, &shhwtstamp); 501 } 502 } 503 504 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 505 * @priv: driver private structure 506 * @p : descriptor pointer 507 * @np : next descriptor pointer 508 * @skb : the socket buffer 509 * Description : 510 * This function will read received packet's timestamp from the descriptor 511 * and pass it to stack. It also perform some sanity checks. 512 */ 513 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 514 struct dma_desc *np, struct sk_buff *skb) 515 { 516 struct skb_shared_hwtstamps *shhwtstamp = NULL; 517 struct dma_desc *desc = p; 518 u64 adjust = 0; 519 u64 ns = 0; 520 521 if (!priv->hwts_rx_en) 522 return; 523 /* For GMAC4, the valid timestamp is from CTX next desc. */ 524 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 525 desc = np; 526 527 /* Check if timestamp is available */ 528 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 529 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 530 531 /* Correct the clk domain crossing(CDC) error */ 532 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) { 533 adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate); 534 ns -= adjust; 535 } 536 537 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 538 shhwtstamp = skb_hwtstamps(skb); 539 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 540 shhwtstamp->hwtstamp = ns_to_ktime(ns); 541 } else { 542 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 543 } 544 } 545 546 /** 547 * stmmac_hwtstamp_set - control hardware timestamping. 548 * @dev: device pointer. 549 * @ifr: An IOCTL specific structure, that can contain a pointer to 550 * a proprietary structure used to pass information to the driver. 551 * Description: 552 * This function configures the MAC to enable/disable both outgoing(TX) 553 * and incoming(RX) packets time stamping based on user input. 554 * Return Value: 555 * 0 on success and an appropriate -ve integer on failure. 556 */ 557 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 558 { 559 struct stmmac_priv *priv = netdev_priv(dev); 560 struct hwtstamp_config config; 561 struct timespec64 now; 562 u64 temp = 0; 563 u32 ptp_v2 = 0; 564 u32 tstamp_all = 0; 565 u32 ptp_over_ipv4_udp = 0; 566 u32 ptp_over_ipv6_udp = 0; 567 u32 ptp_over_ethernet = 0; 568 u32 snap_type_sel = 0; 569 u32 ts_master_en = 0; 570 u32 ts_event_en = 0; 571 u32 sec_inc = 0; 572 u32 value = 0; 573 bool xmac; 574 575 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 576 577 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 578 netdev_alert(priv->dev, "No support for HW time stamping\n"); 579 priv->hwts_tx_en = 0; 580 priv->hwts_rx_en = 0; 581 582 return -EOPNOTSUPP; 583 } 584 585 if (copy_from_user(&config, ifr->ifr_data, 586 sizeof(config))) 587 return -EFAULT; 588 589 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 590 __func__, config.flags, config.tx_type, config.rx_filter); 591 592 /* reserved for future extensions */ 593 if (config.flags) 594 return -EINVAL; 595 596 if (config.tx_type != HWTSTAMP_TX_OFF && 597 config.tx_type != HWTSTAMP_TX_ON) 598 return -ERANGE; 599 600 if (priv->adv_ts) { 601 switch (config.rx_filter) { 602 case HWTSTAMP_FILTER_NONE: 603 /* time stamp no incoming packet at all */ 604 config.rx_filter = HWTSTAMP_FILTER_NONE; 605 break; 606 607 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 608 /* PTP v1, UDP, any kind of event packet */ 609 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 610 /* 'xmac' hardware can support Sync, Pdelay_Req and 611 * Pdelay_resp by setting bit14 and bits17/16 to 01 612 * This leaves Delay_Req timestamps out. 613 * Enable all events *and* general purpose message 614 * timestamping 615 */ 616 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 617 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 618 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 619 break; 620 621 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 622 /* PTP v1, UDP, Sync packet */ 623 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 624 /* take time stamp for SYNC messages only */ 625 ts_event_en = PTP_TCR_TSEVNTENA; 626 627 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 628 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 629 break; 630 631 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 632 /* PTP v1, UDP, Delay_req packet */ 633 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 634 /* take time stamp for Delay_Req messages only */ 635 ts_master_en = PTP_TCR_TSMSTRENA; 636 ts_event_en = PTP_TCR_TSEVNTENA; 637 638 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 639 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 640 break; 641 642 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 643 /* PTP v2, UDP, any kind of event packet */ 644 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 645 ptp_v2 = PTP_TCR_TSVER2ENA; 646 /* take time stamp for all event messages */ 647 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 648 649 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 650 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 651 break; 652 653 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 654 /* PTP v2, UDP, Sync packet */ 655 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 656 ptp_v2 = PTP_TCR_TSVER2ENA; 657 /* take time stamp for SYNC messages only */ 658 ts_event_en = PTP_TCR_TSEVNTENA; 659 660 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 661 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 662 break; 663 664 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 665 /* PTP v2, UDP, Delay_req packet */ 666 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 667 ptp_v2 = PTP_TCR_TSVER2ENA; 668 /* take time stamp for Delay_Req messages only */ 669 ts_master_en = PTP_TCR_TSMSTRENA; 670 ts_event_en = PTP_TCR_TSEVNTENA; 671 672 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 673 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 674 break; 675 676 case HWTSTAMP_FILTER_PTP_V2_EVENT: 677 /* PTP v2/802.AS1 any layer, any kind of event packet */ 678 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 679 ptp_v2 = PTP_TCR_TSVER2ENA; 680 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 681 if (priv->synopsys_id != DWMAC_CORE_5_10) 682 ts_event_en = PTP_TCR_TSEVNTENA; 683 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 684 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 685 ptp_over_ethernet = PTP_TCR_TSIPENA; 686 break; 687 688 case HWTSTAMP_FILTER_PTP_V2_SYNC: 689 /* PTP v2/802.AS1, any layer, Sync packet */ 690 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 691 ptp_v2 = PTP_TCR_TSVER2ENA; 692 /* take time stamp for SYNC messages only */ 693 ts_event_en = PTP_TCR_TSEVNTENA; 694 695 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 696 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 697 ptp_over_ethernet = PTP_TCR_TSIPENA; 698 break; 699 700 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 701 /* PTP v2/802.AS1, any layer, Delay_req packet */ 702 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 703 ptp_v2 = PTP_TCR_TSVER2ENA; 704 /* take time stamp for Delay_Req messages only */ 705 ts_master_en = PTP_TCR_TSMSTRENA; 706 ts_event_en = PTP_TCR_TSEVNTENA; 707 708 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 709 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 710 ptp_over_ethernet = PTP_TCR_TSIPENA; 711 break; 712 713 case HWTSTAMP_FILTER_NTP_ALL: 714 case HWTSTAMP_FILTER_ALL: 715 /* time stamp any incoming packet */ 716 config.rx_filter = HWTSTAMP_FILTER_ALL; 717 tstamp_all = PTP_TCR_TSENALL; 718 break; 719 720 default: 721 return -ERANGE; 722 } 723 } else { 724 switch (config.rx_filter) { 725 case HWTSTAMP_FILTER_NONE: 726 config.rx_filter = HWTSTAMP_FILTER_NONE; 727 break; 728 default: 729 /* PTP v1, UDP, any kind of event packet */ 730 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 731 break; 732 } 733 } 734 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 735 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 736 737 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 738 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 739 else { 740 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 741 tstamp_all | ptp_v2 | ptp_over_ethernet | 742 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 743 ts_master_en | snap_type_sel); 744 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 745 746 /* program Sub Second Increment reg */ 747 stmmac_config_sub_second_increment(priv, 748 priv->ptpaddr, priv->plat->clk_ptp_rate, 749 xmac, &sec_inc); 750 temp = div_u64(1000000000ULL, sec_inc); 751 752 /* Store sub second increment and flags for later use */ 753 priv->sub_second_inc = sec_inc; 754 priv->systime_flags = value; 755 756 /* calculate default added value: 757 * formula is : 758 * addend = (2^32)/freq_div_ratio; 759 * where, freq_div_ratio = 1e9ns/sec_inc 760 */ 761 temp = (u64)(temp << 32); 762 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 763 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 764 765 /* initialize system time */ 766 ktime_get_real_ts64(&now); 767 768 /* lower 32 bits of tv_sec are safe until y2106 */ 769 stmmac_init_systime(priv, priv->ptpaddr, 770 (u32)now.tv_sec, now.tv_nsec); 771 } 772 773 memcpy(&priv->tstamp_config, &config, sizeof(config)); 774 775 return copy_to_user(ifr->ifr_data, &config, 776 sizeof(config)) ? -EFAULT : 0; 777 } 778 779 /** 780 * stmmac_hwtstamp_get - read hardware timestamping. 781 * @dev: device pointer. 782 * @ifr: An IOCTL specific structure, that can contain a pointer to 783 * a proprietary structure used to pass information to the driver. 784 * Description: 785 * This function obtain the current hardware timestamping settings 786 * as requested. 787 */ 788 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 789 { 790 struct stmmac_priv *priv = netdev_priv(dev); 791 struct hwtstamp_config *config = &priv->tstamp_config; 792 793 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 794 return -EOPNOTSUPP; 795 796 return copy_to_user(ifr->ifr_data, config, 797 sizeof(*config)) ? -EFAULT : 0; 798 } 799 800 /** 801 * stmmac_init_ptp - init PTP 802 * @priv: driver private structure 803 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 804 * This is done by looking at the HW cap. register. 805 * This function also registers the ptp driver. 806 */ 807 static int stmmac_init_ptp(struct stmmac_priv *priv) 808 { 809 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 810 811 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 812 return -EOPNOTSUPP; 813 814 priv->adv_ts = 0; 815 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 816 if (xmac && priv->dma_cap.atime_stamp) 817 priv->adv_ts = 1; 818 /* Dwmac 3.x core with extend_desc can support adv_ts */ 819 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 820 priv->adv_ts = 1; 821 822 if (priv->dma_cap.time_stamp) 823 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 824 825 if (priv->adv_ts) 826 netdev_info(priv->dev, 827 "IEEE 1588-2008 Advanced Timestamp supported\n"); 828 829 priv->hwts_tx_en = 0; 830 priv->hwts_rx_en = 0; 831 832 stmmac_ptp_register(priv); 833 834 return 0; 835 } 836 837 static void stmmac_release_ptp(struct stmmac_priv *priv) 838 { 839 clk_disable_unprepare(priv->plat->clk_ptp_ref); 840 stmmac_ptp_unregister(priv); 841 } 842 843 /** 844 * stmmac_mac_flow_ctrl - Configure flow control in all queues 845 * @priv: driver private structure 846 * @duplex: duplex passed to the next function 847 * Description: It is used for configuring the flow control in all queues 848 */ 849 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 850 { 851 u32 tx_cnt = priv->plat->tx_queues_to_use; 852 853 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 854 priv->pause, tx_cnt); 855 } 856 857 static void stmmac_validate(struct phylink_config *config, 858 unsigned long *supported, 859 struct phylink_link_state *state) 860 { 861 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 862 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, }; 863 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 864 int tx_cnt = priv->plat->tx_queues_to_use; 865 int max_speed = priv->plat->max_speed; 866 867 phylink_set(mac_supported, 10baseT_Half); 868 phylink_set(mac_supported, 10baseT_Full); 869 phylink_set(mac_supported, 100baseT_Half); 870 phylink_set(mac_supported, 100baseT_Full); 871 phylink_set(mac_supported, 1000baseT_Half); 872 phylink_set(mac_supported, 1000baseT_Full); 873 phylink_set(mac_supported, 1000baseKX_Full); 874 875 phylink_set(mac_supported, Autoneg); 876 phylink_set(mac_supported, Pause); 877 phylink_set(mac_supported, Asym_Pause); 878 phylink_set_port_modes(mac_supported); 879 880 /* Cut down 1G if asked to */ 881 if ((max_speed > 0) && (max_speed < 1000)) { 882 phylink_set(mask, 1000baseT_Full); 883 phylink_set(mask, 1000baseX_Full); 884 } else if (priv->plat->has_xgmac) { 885 if (!max_speed || (max_speed >= 2500)) { 886 phylink_set(mac_supported, 2500baseT_Full); 887 phylink_set(mac_supported, 2500baseX_Full); 888 } 889 if (!max_speed || (max_speed >= 5000)) { 890 phylink_set(mac_supported, 5000baseT_Full); 891 } 892 if (!max_speed || (max_speed >= 10000)) { 893 phylink_set(mac_supported, 10000baseSR_Full); 894 phylink_set(mac_supported, 10000baseLR_Full); 895 phylink_set(mac_supported, 10000baseER_Full); 896 phylink_set(mac_supported, 10000baseLRM_Full); 897 phylink_set(mac_supported, 10000baseT_Full); 898 phylink_set(mac_supported, 10000baseKX4_Full); 899 phylink_set(mac_supported, 10000baseKR_Full); 900 } 901 if (!max_speed || (max_speed >= 25000)) { 902 phylink_set(mac_supported, 25000baseCR_Full); 903 phylink_set(mac_supported, 25000baseKR_Full); 904 phylink_set(mac_supported, 25000baseSR_Full); 905 } 906 if (!max_speed || (max_speed >= 40000)) { 907 phylink_set(mac_supported, 40000baseKR4_Full); 908 phylink_set(mac_supported, 40000baseCR4_Full); 909 phylink_set(mac_supported, 40000baseSR4_Full); 910 phylink_set(mac_supported, 40000baseLR4_Full); 911 } 912 if (!max_speed || (max_speed >= 50000)) { 913 phylink_set(mac_supported, 50000baseCR2_Full); 914 phylink_set(mac_supported, 50000baseKR2_Full); 915 phylink_set(mac_supported, 50000baseSR2_Full); 916 phylink_set(mac_supported, 50000baseKR_Full); 917 phylink_set(mac_supported, 50000baseSR_Full); 918 phylink_set(mac_supported, 50000baseCR_Full); 919 phylink_set(mac_supported, 50000baseLR_ER_FR_Full); 920 phylink_set(mac_supported, 50000baseDR_Full); 921 } 922 if (!max_speed || (max_speed >= 100000)) { 923 phylink_set(mac_supported, 100000baseKR4_Full); 924 phylink_set(mac_supported, 100000baseSR4_Full); 925 phylink_set(mac_supported, 100000baseCR4_Full); 926 phylink_set(mac_supported, 100000baseLR4_ER4_Full); 927 phylink_set(mac_supported, 100000baseKR2_Full); 928 phylink_set(mac_supported, 100000baseSR2_Full); 929 phylink_set(mac_supported, 100000baseCR2_Full); 930 phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full); 931 phylink_set(mac_supported, 100000baseDR2_Full); 932 } 933 } 934 935 /* Half-Duplex can only work with single queue */ 936 if (tx_cnt > 1) { 937 phylink_set(mask, 10baseT_Half); 938 phylink_set(mask, 100baseT_Half); 939 phylink_set(mask, 1000baseT_Half); 940 } 941 942 linkmode_and(supported, supported, mac_supported); 943 linkmode_andnot(supported, supported, mask); 944 945 linkmode_and(state->advertising, state->advertising, mac_supported); 946 linkmode_andnot(state->advertising, state->advertising, mask); 947 948 /* If PCS is supported, check which modes it supports. */ 949 stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state); 950 } 951 952 static void stmmac_mac_pcs_get_state(struct phylink_config *config, 953 struct phylink_link_state *state) 954 { 955 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 956 957 state->link = 0; 958 stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state); 959 } 960 961 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 962 const struct phylink_link_state *state) 963 { 964 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 965 966 stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state); 967 } 968 969 static void stmmac_mac_an_restart(struct phylink_config *config) 970 { 971 /* Not Supported */ 972 } 973 974 static void stmmac_mac_link_down(struct phylink_config *config, 975 unsigned int mode, phy_interface_t interface) 976 { 977 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 978 979 stmmac_mac_set(priv, priv->ioaddr, false); 980 priv->eee_active = false; 981 priv->tx_lpi_enabled = false; 982 stmmac_eee_init(priv); 983 stmmac_set_eee_pls(priv, priv->hw, false); 984 } 985 986 static void stmmac_mac_link_up(struct phylink_config *config, 987 struct phy_device *phy, 988 unsigned int mode, phy_interface_t interface, 989 int speed, int duplex, 990 bool tx_pause, bool rx_pause) 991 { 992 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 993 u32 ctrl; 994 995 stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface); 996 997 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 998 ctrl &= ~priv->hw->link.speed_mask; 999 1000 if (interface == PHY_INTERFACE_MODE_USXGMII) { 1001 switch (speed) { 1002 case SPEED_10000: 1003 ctrl |= priv->hw->link.xgmii.speed10000; 1004 break; 1005 case SPEED_5000: 1006 ctrl |= priv->hw->link.xgmii.speed5000; 1007 break; 1008 case SPEED_2500: 1009 ctrl |= priv->hw->link.xgmii.speed2500; 1010 break; 1011 default: 1012 return; 1013 } 1014 } else if (interface == PHY_INTERFACE_MODE_XLGMII) { 1015 switch (speed) { 1016 case SPEED_100000: 1017 ctrl |= priv->hw->link.xlgmii.speed100000; 1018 break; 1019 case SPEED_50000: 1020 ctrl |= priv->hw->link.xlgmii.speed50000; 1021 break; 1022 case SPEED_40000: 1023 ctrl |= priv->hw->link.xlgmii.speed40000; 1024 break; 1025 case SPEED_25000: 1026 ctrl |= priv->hw->link.xlgmii.speed25000; 1027 break; 1028 case SPEED_10000: 1029 ctrl |= priv->hw->link.xgmii.speed10000; 1030 break; 1031 case SPEED_2500: 1032 ctrl |= priv->hw->link.speed2500; 1033 break; 1034 case SPEED_1000: 1035 ctrl |= priv->hw->link.speed1000; 1036 break; 1037 default: 1038 return; 1039 } 1040 } else { 1041 switch (speed) { 1042 case SPEED_2500: 1043 ctrl |= priv->hw->link.speed2500; 1044 break; 1045 case SPEED_1000: 1046 ctrl |= priv->hw->link.speed1000; 1047 break; 1048 case SPEED_100: 1049 ctrl |= priv->hw->link.speed100; 1050 break; 1051 case SPEED_10: 1052 ctrl |= priv->hw->link.speed10; 1053 break; 1054 default: 1055 return; 1056 } 1057 } 1058 1059 priv->speed = speed; 1060 1061 if (priv->plat->fix_mac_speed) 1062 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); 1063 1064 if (!duplex) 1065 ctrl &= ~priv->hw->link.duplex; 1066 else 1067 ctrl |= priv->hw->link.duplex; 1068 1069 /* Flow Control operation */ 1070 if (tx_pause && rx_pause) 1071 stmmac_mac_flow_ctrl(priv, duplex); 1072 1073 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 1074 1075 stmmac_mac_set(priv, priv->ioaddr, true); 1076 if (phy && priv->dma_cap.eee) { 1077 priv->eee_active = phy_init_eee(phy, 1) >= 0; 1078 priv->eee_enabled = stmmac_eee_init(priv); 1079 priv->tx_lpi_enabled = priv->eee_enabled; 1080 stmmac_set_eee_pls(priv, priv->hw, true); 1081 } 1082 } 1083 1084 static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 1085 .validate = stmmac_validate, 1086 .mac_pcs_get_state = stmmac_mac_pcs_get_state, 1087 .mac_config = stmmac_mac_config, 1088 .mac_an_restart = stmmac_mac_an_restart, 1089 .mac_link_down = stmmac_mac_link_down, 1090 .mac_link_up = stmmac_mac_link_up, 1091 }; 1092 1093 /** 1094 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 1095 * @priv: driver private structure 1096 * Description: this is to verify if the HW supports the PCS. 1097 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 1098 * configured for the TBI, RTBI, or SGMII PHY interface. 1099 */ 1100 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 1101 { 1102 int interface = priv->plat->interface; 1103 1104 if (priv->dma_cap.pcs) { 1105 if ((interface == PHY_INTERFACE_MODE_RGMII) || 1106 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 1107 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 1108 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 1109 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 1110 priv->hw->pcs = STMMAC_PCS_RGMII; 1111 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 1112 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 1113 priv->hw->pcs = STMMAC_PCS_SGMII; 1114 } 1115 } 1116 } 1117 1118 /** 1119 * stmmac_init_phy - PHY initialization 1120 * @dev: net device structure 1121 * Description: it initializes the driver's PHY state, and attaches the PHY 1122 * to the mac driver. 1123 * Return value: 1124 * 0 on success 1125 */ 1126 static int stmmac_init_phy(struct net_device *dev) 1127 { 1128 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1129 struct stmmac_priv *priv = netdev_priv(dev); 1130 struct device_node *node; 1131 int ret; 1132 1133 node = priv->plat->phylink_node; 1134 1135 if (node) 1136 ret = phylink_of_phy_connect(priv->phylink, node, 0); 1137 1138 /* Some DT bindings do not set-up the PHY handle. Let's try to 1139 * manually parse it 1140 */ 1141 if (!node || ret) { 1142 int addr = priv->plat->phy_addr; 1143 struct phy_device *phydev; 1144 1145 phydev = mdiobus_get_phy(priv->mii, addr); 1146 if (!phydev) { 1147 netdev_err(priv->dev, "no phy at addr %d\n", addr); 1148 return -ENODEV; 1149 } 1150 1151 ret = phylink_connect_phy(priv->phylink, phydev); 1152 } 1153 1154 phylink_ethtool_get_wol(priv->phylink, &wol); 1155 device_set_wakeup_capable(priv->device, !!wol.supported); 1156 1157 return ret; 1158 } 1159 1160 static int stmmac_phy_setup(struct stmmac_priv *priv) 1161 { 1162 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); 1163 int mode = priv->plat->phy_interface; 1164 struct phylink *phylink; 1165 1166 priv->phylink_config.dev = &priv->dev->dev; 1167 priv->phylink_config.type = PHYLINK_NETDEV; 1168 priv->phylink_config.pcs_poll = true; 1169 priv->phylink_config.ovr_an_inband = 1170 priv->plat->mdio_bus_data->xpcs_an_inband; 1171 1172 if (!fwnode) 1173 fwnode = dev_fwnode(priv->device); 1174 1175 phylink = phylink_create(&priv->phylink_config, fwnode, 1176 mode, &stmmac_phylink_mac_ops); 1177 if (IS_ERR(phylink)) 1178 return PTR_ERR(phylink); 1179 1180 priv->phylink = phylink; 1181 return 0; 1182 } 1183 1184 static void stmmac_display_rx_rings(struct stmmac_priv *priv) 1185 { 1186 u32 rx_cnt = priv->plat->rx_queues_to_use; 1187 unsigned int desc_size; 1188 void *head_rx; 1189 u32 queue; 1190 1191 /* Display RX rings */ 1192 for (queue = 0; queue < rx_cnt; queue++) { 1193 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1194 1195 pr_info("\tRX Queue %u rings\n", queue); 1196 1197 if (priv->extend_desc) { 1198 head_rx = (void *)rx_q->dma_erx; 1199 desc_size = sizeof(struct dma_extended_desc); 1200 } else { 1201 head_rx = (void *)rx_q->dma_rx; 1202 desc_size = sizeof(struct dma_desc); 1203 } 1204 1205 /* Display RX ring */ 1206 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true, 1207 rx_q->dma_rx_phy, desc_size); 1208 } 1209 } 1210 1211 static void stmmac_display_tx_rings(struct stmmac_priv *priv) 1212 { 1213 u32 tx_cnt = priv->plat->tx_queues_to_use; 1214 unsigned int desc_size; 1215 void *head_tx; 1216 u32 queue; 1217 1218 /* Display TX rings */ 1219 for (queue = 0; queue < tx_cnt; queue++) { 1220 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1221 1222 pr_info("\tTX Queue %d rings\n", queue); 1223 1224 if (priv->extend_desc) { 1225 head_tx = (void *)tx_q->dma_etx; 1226 desc_size = sizeof(struct dma_extended_desc); 1227 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1228 head_tx = (void *)tx_q->dma_entx; 1229 desc_size = sizeof(struct dma_edesc); 1230 } else { 1231 head_tx = (void *)tx_q->dma_tx; 1232 desc_size = sizeof(struct dma_desc); 1233 } 1234 1235 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false, 1236 tx_q->dma_tx_phy, desc_size); 1237 } 1238 } 1239 1240 static void stmmac_display_rings(struct stmmac_priv *priv) 1241 { 1242 /* Display RX ring */ 1243 stmmac_display_rx_rings(priv); 1244 1245 /* Display TX ring */ 1246 stmmac_display_tx_rings(priv); 1247 } 1248 1249 static int stmmac_set_bfsize(int mtu, int bufsize) 1250 { 1251 int ret = bufsize; 1252 1253 if (mtu >= BUF_SIZE_8KiB) 1254 ret = BUF_SIZE_16KiB; 1255 else if (mtu >= BUF_SIZE_4KiB) 1256 ret = BUF_SIZE_8KiB; 1257 else if (mtu >= BUF_SIZE_2KiB) 1258 ret = BUF_SIZE_4KiB; 1259 else if (mtu > DEFAULT_BUFSIZE) 1260 ret = BUF_SIZE_2KiB; 1261 else 1262 ret = DEFAULT_BUFSIZE; 1263 1264 return ret; 1265 } 1266 1267 /** 1268 * stmmac_clear_rx_descriptors - clear RX descriptors 1269 * @priv: driver private structure 1270 * @queue: RX queue index 1271 * Description: this function is called to clear the RX descriptors 1272 * in case of both basic and extended descriptors are used. 1273 */ 1274 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1275 { 1276 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1277 int i; 1278 1279 /* Clear the RX descriptors */ 1280 for (i = 0; i < priv->dma_rx_size; i++) 1281 if (priv->extend_desc) 1282 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1283 priv->use_riwt, priv->mode, 1284 (i == priv->dma_rx_size - 1), 1285 priv->dma_buf_sz); 1286 else 1287 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1288 priv->use_riwt, priv->mode, 1289 (i == priv->dma_rx_size - 1), 1290 priv->dma_buf_sz); 1291 } 1292 1293 /** 1294 * stmmac_clear_tx_descriptors - clear tx descriptors 1295 * @priv: driver private structure 1296 * @queue: TX queue index. 1297 * Description: this function is called to clear the TX descriptors 1298 * in case of both basic and extended descriptors are used. 1299 */ 1300 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 1301 { 1302 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1303 int i; 1304 1305 /* Clear the TX descriptors */ 1306 for (i = 0; i < priv->dma_tx_size; i++) { 1307 int last = (i == (priv->dma_tx_size - 1)); 1308 struct dma_desc *p; 1309 1310 if (priv->extend_desc) 1311 p = &tx_q->dma_etx[i].basic; 1312 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1313 p = &tx_q->dma_entx[i].basic; 1314 else 1315 p = &tx_q->dma_tx[i]; 1316 1317 stmmac_init_tx_desc(priv, p, priv->mode, last); 1318 } 1319 } 1320 1321 /** 1322 * stmmac_clear_descriptors - clear descriptors 1323 * @priv: driver private structure 1324 * Description: this function is called to clear the TX and RX descriptors 1325 * in case of both basic and extended descriptors are used. 1326 */ 1327 static void stmmac_clear_descriptors(struct stmmac_priv *priv) 1328 { 1329 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1330 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1331 u32 queue; 1332 1333 /* Clear the RX descriptors */ 1334 for (queue = 0; queue < rx_queue_cnt; queue++) 1335 stmmac_clear_rx_descriptors(priv, queue); 1336 1337 /* Clear the TX descriptors */ 1338 for (queue = 0; queue < tx_queue_cnt; queue++) 1339 stmmac_clear_tx_descriptors(priv, queue); 1340 } 1341 1342 /** 1343 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1344 * @priv: driver private structure 1345 * @p: descriptor pointer 1346 * @i: descriptor index 1347 * @flags: gfp flag 1348 * @queue: RX queue index 1349 * Description: this function is called to allocate a receive buffer, perform 1350 * the DMA mapping and init the descriptor. 1351 */ 1352 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 1353 int i, gfp_t flags, u32 queue) 1354 { 1355 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1356 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1357 1358 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 1359 if (!buf->page) 1360 return -ENOMEM; 1361 1362 if (priv->sph) { 1363 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 1364 if (!buf->sec_page) 1365 return -ENOMEM; 1366 1367 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1368 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1369 } else { 1370 buf->sec_page = NULL; 1371 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1372 } 1373 1374 buf->addr = page_pool_get_dma_addr(buf->page); 1375 stmmac_set_desc_addr(priv, p, buf->addr); 1376 if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1377 stmmac_init_desc3(priv, p); 1378 1379 return 0; 1380 } 1381 1382 /** 1383 * stmmac_free_rx_buffer - free RX dma buffers 1384 * @priv: private structure 1385 * @queue: RX queue index 1386 * @i: buffer index. 1387 */ 1388 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1389 { 1390 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1391 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1392 1393 if (buf->page) 1394 page_pool_put_full_page(rx_q->page_pool, buf->page, false); 1395 buf->page = NULL; 1396 1397 if (buf->sec_page) 1398 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); 1399 buf->sec_page = NULL; 1400 } 1401 1402 /** 1403 * stmmac_free_tx_buffer - free RX dma buffers 1404 * @priv: private structure 1405 * @queue: RX queue index 1406 * @i: buffer index. 1407 */ 1408 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1409 { 1410 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1411 1412 if (tx_q->tx_skbuff_dma[i].buf) { 1413 if (tx_q->tx_skbuff_dma[i].map_as_page) 1414 dma_unmap_page(priv->device, 1415 tx_q->tx_skbuff_dma[i].buf, 1416 tx_q->tx_skbuff_dma[i].len, 1417 DMA_TO_DEVICE); 1418 else 1419 dma_unmap_single(priv->device, 1420 tx_q->tx_skbuff_dma[i].buf, 1421 tx_q->tx_skbuff_dma[i].len, 1422 DMA_TO_DEVICE); 1423 } 1424 1425 if (tx_q->tx_skbuff[i]) { 1426 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1427 tx_q->tx_skbuff[i] = NULL; 1428 tx_q->tx_skbuff_dma[i].buf = 0; 1429 tx_q->tx_skbuff_dma[i].map_as_page = false; 1430 } 1431 } 1432 1433 /** 1434 * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer. 1435 * @priv: driver private structure 1436 * Description: this function is called to re-allocate a receive buffer, perform 1437 * the DMA mapping and init the descriptor. 1438 */ 1439 static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv) 1440 { 1441 u32 rx_count = priv->plat->rx_queues_to_use; 1442 u32 queue; 1443 int i; 1444 1445 for (queue = 0; queue < rx_count; queue++) { 1446 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1447 1448 for (i = 0; i < priv->dma_rx_size; i++) { 1449 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1450 1451 if (buf->page) { 1452 page_pool_recycle_direct(rx_q->page_pool, buf->page); 1453 buf->page = NULL; 1454 } 1455 1456 if (priv->sph && buf->sec_page) { 1457 page_pool_recycle_direct(rx_q->page_pool, buf->sec_page); 1458 buf->sec_page = NULL; 1459 } 1460 } 1461 } 1462 1463 for (queue = 0; queue < rx_count; queue++) { 1464 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1465 1466 for (i = 0; i < priv->dma_rx_size; i++) { 1467 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; 1468 struct dma_desc *p; 1469 1470 if (priv->extend_desc) 1471 p = &((rx_q->dma_erx + i)->basic); 1472 else 1473 p = rx_q->dma_rx + i; 1474 1475 if (!buf->page) { 1476 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 1477 if (!buf->page) 1478 goto err_reinit_rx_buffers; 1479 1480 buf->addr = page_pool_get_dma_addr(buf->page); 1481 } 1482 1483 if (priv->sph && !buf->sec_page) { 1484 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 1485 if (!buf->sec_page) 1486 goto err_reinit_rx_buffers; 1487 1488 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 1489 } 1490 1491 stmmac_set_desc_addr(priv, p, buf->addr); 1492 if (priv->sph) 1493 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 1494 else 1495 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 1496 if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1497 stmmac_init_desc3(priv, p); 1498 } 1499 } 1500 1501 return; 1502 1503 err_reinit_rx_buffers: 1504 do { 1505 while (--i >= 0) 1506 stmmac_free_rx_buffer(priv, queue, i); 1507 1508 if (queue == 0) 1509 break; 1510 1511 i = priv->dma_rx_size; 1512 } while (queue-- > 0); 1513 } 1514 1515 /** 1516 * init_dma_rx_desc_rings - init the RX descriptor rings 1517 * @dev: net device structure 1518 * @flags: gfp flag. 1519 * Description: this function initializes the DMA RX descriptors 1520 * and allocates the socket buffers. It supports the chained and ring 1521 * modes. 1522 */ 1523 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 1524 { 1525 struct stmmac_priv *priv = netdev_priv(dev); 1526 u32 rx_count = priv->plat->rx_queues_to_use; 1527 int ret = -ENOMEM; 1528 int queue; 1529 int i; 1530 1531 /* RX INITIALIZATION */ 1532 netif_dbg(priv, probe, priv->dev, 1533 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1534 1535 for (queue = 0; queue < rx_count; queue++) { 1536 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1537 1538 netif_dbg(priv, probe, priv->dev, 1539 "(%s) dma_rx_phy=0x%08x\n", __func__, 1540 (u32)rx_q->dma_rx_phy); 1541 1542 stmmac_clear_rx_descriptors(priv, queue); 1543 1544 for (i = 0; i < priv->dma_rx_size; i++) { 1545 struct dma_desc *p; 1546 1547 if (priv->extend_desc) 1548 p = &((rx_q->dma_erx + i)->basic); 1549 else 1550 p = rx_q->dma_rx + i; 1551 1552 ret = stmmac_init_rx_buffers(priv, p, i, flags, 1553 queue); 1554 if (ret) 1555 goto err_init_rx_buffers; 1556 } 1557 1558 rx_q->cur_rx = 0; 1559 rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); 1560 1561 /* Setup the chained descriptor addresses */ 1562 if (priv->mode == STMMAC_CHAIN_MODE) { 1563 if (priv->extend_desc) 1564 stmmac_mode_init(priv, rx_q->dma_erx, 1565 rx_q->dma_rx_phy, 1566 priv->dma_rx_size, 1); 1567 else 1568 stmmac_mode_init(priv, rx_q->dma_rx, 1569 rx_q->dma_rx_phy, 1570 priv->dma_rx_size, 0); 1571 } 1572 } 1573 1574 return 0; 1575 1576 err_init_rx_buffers: 1577 while (queue >= 0) { 1578 while (--i >= 0) 1579 stmmac_free_rx_buffer(priv, queue, i); 1580 1581 if (queue == 0) 1582 break; 1583 1584 i = priv->dma_rx_size; 1585 queue--; 1586 } 1587 1588 return ret; 1589 } 1590 1591 /** 1592 * init_dma_tx_desc_rings - init the TX descriptor rings 1593 * @dev: net device structure. 1594 * Description: this function initializes the DMA TX descriptors 1595 * and allocates the socket buffers. It supports the chained and ring 1596 * modes. 1597 */ 1598 static int init_dma_tx_desc_rings(struct net_device *dev) 1599 { 1600 struct stmmac_priv *priv = netdev_priv(dev); 1601 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1602 u32 queue; 1603 int i; 1604 1605 for (queue = 0; queue < tx_queue_cnt; queue++) { 1606 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1607 1608 netif_dbg(priv, probe, priv->dev, 1609 "(%s) dma_tx_phy=0x%08x\n", __func__, 1610 (u32)tx_q->dma_tx_phy); 1611 1612 /* Setup the chained descriptor addresses */ 1613 if (priv->mode == STMMAC_CHAIN_MODE) { 1614 if (priv->extend_desc) 1615 stmmac_mode_init(priv, tx_q->dma_etx, 1616 tx_q->dma_tx_phy, 1617 priv->dma_tx_size, 1); 1618 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) 1619 stmmac_mode_init(priv, tx_q->dma_tx, 1620 tx_q->dma_tx_phy, 1621 priv->dma_tx_size, 0); 1622 } 1623 1624 for (i = 0; i < priv->dma_tx_size; i++) { 1625 struct dma_desc *p; 1626 if (priv->extend_desc) 1627 p = &((tx_q->dma_etx + i)->basic); 1628 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1629 p = &((tx_q->dma_entx + i)->basic); 1630 else 1631 p = tx_q->dma_tx + i; 1632 1633 stmmac_clear_desc(priv, p); 1634 1635 tx_q->tx_skbuff_dma[i].buf = 0; 1636 tx_q->tx_skbuff_dma[i].map_as_page = false; 1637 tx_q->tx_skbuff_dma[i].len = 0; 1638 tx_q->tx_skbuff_dma[i].last_segment = false; 1639 tx_q->tx_skbuff[i] = NULL; 1640 } 1641 1642 tx_q->dirty_tx = 0; 1643 tx_q->cur_tx = 0; 1644 tx_q->mss = 0; 1645 1646 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1647 } 1648 1649 return 0; 1650 } 1651 1652 /** 1653 * init_dma_desc_rings - init the RX/TX descriptor rings 1654 * @dev: net device structure 1655 * @flags: gfp flag. 1656 * Description: this function initializes the DMA RX/TX descriptors 1657 * and allocates the socket buffers. It supports the chained and ring 1658 * modes. 1659 */ 1660 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 1661 { 1662 struct stmmac_priv *priv = netdev_priv(dev); 1663 int ret; 1664 1665 ret = init_dma_rx_desc_rings(dev, flags); 1666 if (ret) 1667 return ret; 1668 1669 ret = init_dma_tx_desc_rings(dev); 1670 1671 stmmac_clear_descriptors(priv); 1672 1673 if (netif_msg_hw(priv)) 1674 stmmac_display_rings(priv); 1675 1676 return ret; 1677 } 1678 1679 /** 1680 * dma_free_rx_skbufs - free RX dma buffers 1681 * @priv: private structure 1682 * @queue: RX queue index 1683 */ 1684 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 1685 { 1686 int i; 1687 1688 for (i = 0; i < priv->dma_rx_size; i++) 1689 stmmac_free_rx_buffer(priv, queue, i); 1690 } 1691 1692 /** 1693 * dma_free_tx_skbufs - free TX dma buffers 1694 * @priv: private structure 1695 * @queue: TX queue index 1696 */ 1697 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 1698 { 1699 int i; 1700 1701 for (i = 0; i < priv->dma_tx_size; i++) 1702 stmmac_free_tx_buffer(priv, queue, i); 1703 } 1704 1705 /** 1706 * stmmac_free_tx_skbufs - free TX skb buffers 1707 * @priv: private structure 1708 */ 1709 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv) 1710 { 1711 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1712 u32 queue; 1713 1714 for (queue = 0; queue < tx_queue_cnt; queue++) 1715 dma_free_tx_skbufs(priv, queue); 1716 } 1717 1718 /** 1719 * free_dma_rx_desc_resources - free RX dma desc resources 1720 * @priv: private structure 1721 */ 1722 static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 1723 { 1724 u32 rx_count = priv->plat->rx_queues_to_use; 1725 u32 queue; 1726 1727 /* Free RX queue resources */ 1728 for (queue = 0; queue < rx_count; queue++) { 1729 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1730 1731 /* Release the DMA RX socket buffers */ 1732 dma_free_rx_skbufs(priv, queue); 1733 1734 /* Free DMA regions of consistent memory previously allocated */ 1735 if (!priv->extend_desc) 1736 dma_free_coherent(priv->device, priv->dma_rx_size * 1737 sizeof(struct dma_desc), 1738 rx_q->dma_rx, rx_q->dma_rx_phy); 1739 else 1740 dma_free_coherent(priv->device, priv->dma_rx_size * 1741 sizeof(struct dma_extended_desc), 1742 rx_q->dma_erx, rx_q->dma_rx_phy); 1743 1744 kfree(rx_q->buf_pool); 1745 if (rx_q->page_pool) 1746 page_pool_destroy(rx_q->page_pool); 1747 } 1748 } 1749 1750 /** 1751 * free_dma_tx_desc_resources - free TX dma desc resources 1752 * @priv: private structure 1753 */ 1754 static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1755 { 1756 u32 tx_count = priv->plat->tx_queues_to_use; 1757 u32 queue; 1758 1759 /* Free TX queue resources */ 1760 for (queue = 0; queue < tx_count; queue++) { 1761 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1762 size_t size; 1763 void *addr; 1764 1765 /* Release the DMA TX socket buffers */ 1766 dma_free_tx_skbufs(priv, queue); 1767 1768 if (priv->extend_desc) { 1769 size = sizeof(struct dma_extended_desc); 1770 addr = tx_q->dma_etx; 1771 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { 1772 size = sizeof(struct dma_edesc); 1773 addr = tx_q->dma_entx; 1774 } else { 1775 size = sizeof(struct dma_desc); 1776 addr = tx_q->dma_tx; 1777 } 1778 1779 size *= priv->dma_tx_size; 1780 1781 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); 1782 1783 kfree(tx_q->tx_skbuff_dma); 1784 kfree(tx_q->tx_skbuff); 1785 } 1786 } 1787 1788 /** 1789 * alloc_dma_rx_desc_resources - alloc RX resources. 1790 * @priv: private structure 1791 * Description: according to which descriptor can be used (extend or basic) 1792 * this function allocates the resources for TX and RX paths. In case of 1793 * reception, for example, it pre-allocated the RX socket buffer in order to 1794 * allow zero-copy mechanism. 1795 */ 1796 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 1797 { 1798 u32 rx_count = priv->plat->rx_queues_to_use; 1799 int ret = -ENOMEM; 1800 u32 queue; 1801 1802 /* RX queues buffers and DMA */ 1803 for (queue = 0; queue < rx_count; queue++) { 1804 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1805 struct page_pool_params pp_params = { 0 }; 1806 unsigned int num_pages; 1807 1808 rx_q->queue_index = queue; 1809 rx_q->priv_data = priv; 1810 1811 pp_params.flags = PP_FLAG_DMA_MAP; 1812 pp_params.pool_size = priv->dma_rx_size; 1813 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); 1814 pp_params.order = ilog2(num_pages); 1815 pp_params.nid = dev_to_node(priv->device); 1816 pp_params.dev = priv->device; 1817 pp_params.dma_dir = DMA_FROM_DEVICE; 1818 1819 rx_q->page_pool = page_pool_create(&pp_params); 1820 if (IS_ERR(rx_q->page_pool)) { 1821 ret = PTR_ERR(rx_q->page_pool); 1822 rx_q->page_pool = NULL; 1823 goto err_dma; 1824 } 1825 1826 rx_q->buf_pool = kcalloc(priv->dma_rx_size, 1827 sizeof(*rx_q->buf_pool), 1828 GFP_KERNEL); 1829 if (!rx_q->buf_pool) 1830 goto err_dma; 1831 1832 if (priv->extend_desc) { 1833 rx_q->dma_erx = dma_alloc_coherent(priv->device, 1834 priv->dma_rx_size * 1835 sizeof(struct dma_extended_desc), 1836 &rx_q->dma_rx_phy, 1837 GFP_KERNEL); 1838 if (!rx_q->dma_erx) 1839 goto err_dma; 1840 1841 } else { 1842 rx_q->dma_rx = dma_alloc_coherent(priv->device, 1843 priv->dma_rx_size * 1844 sizeof(struct dma_desc), 1845 &rx_q->dma_rx_phy, 1846 GFP_KERNEL); 1847 if (!rx_q->dma_rx) 1848 goto err_dma; 1849 } 1850 } 1851 1852 return 0; 1853 1854 err_dma: 1855 free_dma_rx_desc_resources(priv); 1856 1857 return ret; 1858 } 1859 1860 /** 1861 * alloc_dma_tx_desc_resources - alloc TX resources. 1862 * @priv: private structure 1863 * Description: according to which descriptor can be used (extend or basic) 1864 * this function allocates the resources for TX and RX paths. In case of 1865 * reception, for example, it pre-allocated the RX socket buffer in order to 1866 * allow zero-copy mechanism. 1867 */ 1868 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 1869 { 1870 u32 tx_count = priv->plat->tx_queues_to_use; 1871 int ret = -ENOMEM; 1872 u32 queue; 1873 1874 /* TX queues buffers and DMA */ 1875 for (queue = 0; queue < tx_count; queue++) { 1876 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1877 size_t size; 1878 void *addr; 1879 1880 tx_q->queue_index = queue; 1881 tx_q->priv_data = priv; 1882 1883 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, 1884 sizeof(*tx_q->tx_skbuff_dma), 1885 GFP_KERNEL); 1886 if (!tx_q->tx_skbuff_dma) 1887 goto err_dma; 1888 1889 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, 1890 sizeof(struct sk_buff *), 1891 GFP_KERNEL); 1892 if (!tx_q->tx_skbuff) 1893 goto err_dma; 1894 1895 if (priv->extend_desc) 1896 size = sizeof(struct dma_extended_desc); 1897 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1898 size = sizeof(struct dma_edesc); 1899 else 1900 size = sizeof(struct dma_desc); 1901 1902 size *= priv->dma_tx_size; 1903 1904 addr = dma_alloc_coherent(priv->device, size, 1905 &tx_q->dma_tx_phy, GFP_KERNEL); 1906 if (!addr) 1907 goto err_dma; 1908 1909 if (priv->extend_desc) 1910 tx_q->dma_etx = addr; 1911 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 1912 tx_q->dma_entx = addr; 1913 else 1914 tx_q->dma_tx = addr; 1915 } 1916 1917 return 0; 1918 1919 err_dma: 1920 free_dma_tx_desc_resources(priv); 1921 return ret; 1922 } 1923 1924 /** 1925 * alloc_dma_desc_resources - alloc TX/RX resources. 1926 * @priv: private structure 1927 * Description: according to which descriptor can be used (extend or basic) 1928 * this function allocates the resources for TX and RX paths. In case of 1929 * reception, for example, it pre-allocated the RX socket buffer in order to 1930 * allow zero-copy mechanism. 1931 */ 1932 static int alloc_dma_desc_resources(struct stmmac_priv *priv) 1933 { 1934 /* RX Allocation */ 1935 int ret = alloc_dma_rx_desc_resources(priv); 1936 1937 if (ret) 1938 return ret; 1939 1940 ret = alloc_dma_tx_desc_resources(priv); 1941 1942 return ret; 1943 } 1944 1945 /** 1946 * free_dma_desc_resources - free dma desc resources 1947 * @priv: private structure 1948 */ 1949 static void free_dma_desc_resources(struct stmmac_priv *priv) 1950 { 1951 /* Release the DMA RX socket buffers */ 1952 free_dma_rx_desc_resources(priv); 1953 1954 /* Release the DMA TX socket buffers */ 1955 free_dma_tx_desc_resources(priv); 1956 } 1957 1958 /** 1959 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 1960 * @priv: driver private structure 1961 * Description: It is used for enabling the rx queues in the MAC 1962 */ 1963 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 1964 { 1965 u32 rx_queues_count = priv->plat->rx_queues_to_use; 1966 int queue; 1967 u8 mode; 1968 1969 for (queue = 0; queue < rx_queues_count; queue++) { 1970 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 1971 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 1972 } 1973 } 1974 1975 /** 1976 * stmmac_start_rx_dma - start RX DMA channel 1977 * @priv: driver private structure 1978 * @chan: RX channel index 1979 * Description: 1980 * This starts a RX DMA channel 1981 */ 1982 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 1983 { 1984 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 1985 stmmac_start_rx(priv, priv->ioaddr, chan); 1986 } 1987 1988 /** 1989 * stmmac_start_tx_dma - start TX DMA channel 1990 * @priv: driver private structure 1991 * @chan: TX channel index 1992 * Description: 1993 * This starts a TX DMA channel 1994 */ 1995 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 1996 { 1997 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 1998 stmmac_start_tx(priv, priv->ioaddr, chan); 1999 } 2000 2001 /** 2002 * stmmac_stop_rx_dma - stop RX DMA channel 2003 * @priv: driver private structure 2004 * @chan: RX channel index 2005 * Description: 2006 * This stops a RX DMA channel 2007 */ 2008 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 2009 { 2010 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 2011 stmmac_stop_rx(priv, priv->ioaddr, chan); 2012 } 2013 2014 /** 2015 * stmmac_stop_tx_dma - stop TX DMA channel 2016 * @priv: driver private structure 2017 * @chan: TX channel index 2018 * Description: 2019 * This stops a TX DMA channel 2020 */ 2021 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 2022 { 2023 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 2024 stmmac_stop_tx(priv, priv->ioaddr, chan); 2025 } 2026 2027 /** 2028 * stmmac_start_all_dma - start all RX and TX DMA channels 2029 * @priv: driver private structure 2030 * Description: 2031 * This starts all the RX and TX DMA channels 2032 */ 2033 static void stmmac_start_all_dma(struct stmmac_priv *priv) 2034 { 2035 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2036 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2037 u32 chan = 0; 2038 2039 for (chan = 0; chan < rx_channels_count; chan++) 2040 stmmac_start_rx_dma(priv, chan); 2041 2042 for (chan = 0; chan < tx_channels_count; chan++) 2043 stmmac_start_tx_dma(priv, chan); 2044 } 2045 2046 /** 2047 * stmmac_stop_all_dma - stop all RX and TX DMA channels 2048 * @priv: driver private structure 2049 * Description: 2050 * This stops the RX and TX DMA channels 2051 */ 2052 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 2053 { 2054 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2055 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2056 u32 chan = 0; 2057 2058 for (chan = 0; chan < rx_channels_count; chan++) 2059 stmmac_stop_rx_dma(priv, chan); 2060 2061 for (chan = 0; chan < tx_channels_count; chan++) 2062 stmmac_stop_tx_dma(priv, chan); 2063 } 2064 2065 /** 2066 * stmmac_dma_operation_mode - HW DMA operation mode 2067 * @priv: driver private structure 2068 * Description: it is used for configuring the DMA operation mode register in 2069 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 2070 */ 2071 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 2072 { 2073 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2074 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2075 int rxfifosz = priv->plat->rx_fifo_size; 2076 int txfifosz = priv->plat->tx_fifo_size; 2077 u32 txmode = 0; 2078 u32 rxmode = 0; 2079 u32 chan = 0; 2080 u8 qmode = 0; 2081 2082 if (rxfifosz == 0) 2083 rxfifosz = priv->dma_cap.rx_fifo_size; 2084 if (txfifosz == 0) 2085 txfifosz = priv->dma_cap.tx_fifo_size; 2086 2087 /* Adjust for real per queue fifo size */ 2088 rxfifosz /= rx_channels_count; 2089 txfifosz /= tx_channels_count; 2090 2091 if (priv->plat->force_thresh_dma_mode) { 2092 txmode = tc; 2093 rxmode = tc; 2094 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 2095 /* 2096 * In case of GMAC, SF mode can be enabled 2097 * to perform the TX COE in HW. This depends on: 2098 * 1) TX COE if actually supported 2099 * 2) There is no bugged Jumbo frame support 2100 * that needs to not insert csum in the TDES. 2101 */ 2102 txmode = SF_DMA_MODE; 2103 rxmode = SF_DMA_MODE; 2104 priv->xstats.threshold = SF_DMA_MODE; 2105 } else { 2106 txmode = tc; 2107 rxmode = SF_DMA_MODE; 2108 } 2109 2110 /* configure all channels */ 2111 for (chan = 0; chan < rx_channels_count; chan++) { 2112 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2113 2114 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 2115 rxfifosz, qmode); 2116 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, 2117 chan); 2118 } 2119 2120 for (chan = 0; chan < tx_channels_count; chan++) { 2121 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2122 2123 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 2124 txfifosz, qmode); 2125 } 2126 } 2127 2128 /** 2129 * stmmac_tx_clean - to manage the transmission completion 2130 * @priv: driver private structure 2131 * @budget: napi budget limiting this functions packet handling 2132 * @queue: TX queue index 2133 * Description: it reclaims the transmit resources after transmission completes. 2134 */ 2135 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 2136 { 2137 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2138 unsigned int bytes_compl = 0, pkts_compl = 0; 2139 unsigned int entry, count = 0; 2140 2141 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 2142 2143 priv->xstats.tx_clean++; 2144 2145 entry = tx_q->dirty_tx; 2146 while ((entry != tx_q->cur_tx) && (count < budget)) { 2147 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 2148 struct dma_desc *p; 2149 int status; 2150 2151 if (priv->extend_desc) 2152 p = (struct dma_desc *)(tx_q->dma_etx + entry); 2153 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 2154 p = &tx_q->dma_entx[entry].basic; 2155 else 2156 p = tx_q->dma_tx + entry; 2157 2158 status = stmmac_tx_status(priv, &priv->dev->stats, 2159 &priv->xstats, p, priv->ioaddr); 2160 /* Check if the descriptor is owned by the DMA */ 2161 if (unlikely(status & tx_dma_own)) 2162 break; 2163 2164 count++; 2165 2166 /* Make sure descriptor fields are read after reading 2167 * the own bit. 2168 */ 2169 dma_rmb(); 2170 2171 /* Just consider the last segment and ...*/ 2172 if (likely(!(status & tx_not_ls))) { 2173 /* ... verify the status error condition */ 2174 if (unlikely(status & tx_err)) { 2175 priv->dev->stats.tx_errors++; 2176 } else { 2177 priv->dev->stats.tx_packets++; 2178 priv->xstats.tx_pkt_n++; 2179 } 2180 stmmac_get_tx_hwtstamp(priv, p, skb); 2181 } 2182 2183 if (likely(tx_q->tx_skbuff_dma[entry].buf)) { 2184 if (tx_q->tx_skbuff_dma[entry].map_as_page) 2185 dma_unmap_page(priv->device, 2186 tx_q->tx_skbuff_dma[entry].buf, 2187 tx_q->tx_skbuff_dma[entry].len, 2188 DMA_TO_DEVICE); 2189 else 2190 dma_unmap_single(priv->device, 2191 tx_q->tx_skbuff_dma[entry].buf, 2192 tx_q->tx_skbuff_dma[entry].len, 2193 DMA_TO_DEVICE); 2194 tx_q->tx_skbuff_dma[entry].buf = 0; 2195 tx_q->tx_skbuff_dma[entry].len = 0; 2196 tx_q->tx_skbuff_dma[entry].map_as_page = false; 2197 } 2198 2199 stmmac_clean_desc3(priv, tx_q, p); 2200 2201 tx_q->tx_skbuff_dma[entry].last_segment = false; 2202 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 2203 2204 if (likely(skb != NULL)) { 2205 pkts_compl++; 2206 bytes_compl += skb->len; 2207 dev_consume_skb_any(skb); 2208 tx_q->tx_skbuff[entry] = NULL; 2209 } 2210 2211 stmmac_release_tx_desc(priv, p, priv->mode); 2212 2213 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 2214 } 2215 tx_q->dirty_tx = entry; 2216 2217 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 2218 pkts_compl, bytes_compl); 2219 2220 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 2221 queue))) && 2222 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { 2223 2224 netif_dbg(priv, tx_done, priv->dev, 2225 "%s: restart transmit\n", __func__); 2226 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 2227 } 2228 2229 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && 2230 priv->eee_sw_timer_en) { 2231 stmmac_enable_eee_mode(priv); 2232 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); 2233 } 2234 2235 /* We still have pending packets, let's call for a new scheduling */ 2236 if (tx_q->dirty_tx != tx_q->cur_tx) 2237 hrtimer_start(&tx_q->txtimer, 2238 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2239 HRTIMER_MODE_REL); 2240 2241 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 2242 2243 return count; 2244 } 2245 2246 /** 2247 * stmmac_tx_err - to manage the tx error 2248 * @priv: driver private structure 2249 * @chan: channel index 2250 * Description: it cleans the descriptors and restarts the transmission 2251 * in case of transmission errors. 2252 */ 2253 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 2254 { 2255 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2256 2257 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 2258 2259 stmmac_stop_tx_dma(priv, chan); 2260 dma_free_tx_skbufs(priv, chan); 2261 stmmac_clear_tx_descriptors(priv, chan); 2262 tx_q->dirty_tx = 0; 2263 tx_q->cur_tx = 0; 2264 tx_q->mss = 0; 2265 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 2266 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2267 tx_q->dma_tx_phy, chan); 2268 stmmac_start_tx_dma(priv, chan); 2269 2270 priv->dev->stats.tx_errors++; 2271 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 2272 } 2273 2274 /** 2275 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 2276 * @priv: driver private structure 2277 * @txmode: TX operating mode 2278 * @rxmode: RX operating mode 2279 * @chan: channel index 2280 * Description: it is used for configuring of the DMA operation mode in 2281 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 2282 * mode. 2283 */ 2284 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 2285 u32 rxmode, u32 chan) 2286 { 2287 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 2288 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 2289 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2290 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2291 int rxfifosz = priv->plat->rx_fifo_size; 2292 int txfifosz = priv->plat->tx_fifo_size; 2293 2294 if (rxfifosz == 0) 2295 rxfifosz = priv->dma_cap.rx_fifo_size; 2296 if (txfifosz == 0) 2297 txfifosz = priv->dma_cap.tx_fifo_size; 2298 2299 /* Adjust for real per queue fifo size */ 2300 rxfifosz /= rx_channels_count; 2301 txfifosz /= tx_channels_count; 2302 2303 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 2304 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 2305 } 2306 2307 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 2308 { 2309 int ret; 2310 2311 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 2312 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 2313 if (ret && (ret != -EINVAL)) { 2314 stmmac_global_err(priv); 2315 return true; 2316 } 2317 2318 return false; 2319 } 2320 2321 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) 2322 { 2323 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2324 &priv->xstats, chan); 2325 struct stmmac_channel *ch = &priv->channel[chan]; 2326 unsigned long flags; 2327 2328 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2329 if (napi_schedule_prep(&ch->rx_napi)) { 2330 spin_lock_irqsave(&ch->lock, flags); 2331 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 2332 spin_unlock_irqrestore(&ch->lock, flags); 2333 __napi_schedule(&ch->rx_napi); 2334 } 2335 } 2336 2337 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2338 if (napi_schedule_prep(&ch->tx_napi)) { 2339 spin_lock_irqsave(&ch->lock, flags); 2340 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 2341 spin_unlock_irqrestore(&ch->lock, flags); 2342 __napi_schedule(&ch->tx_napi); 2343 } 2344 } 2345 2346 return status; 2347 } 2348 2349 /** 2350 * stmmac_dma_interrupt - DMA ISR 2351 * @priv: driver private structure 2352 * Description: this is the DMA ISR. It is called by the main ISR. 2353 * It calls the dwmac dma routine and schedule poll method in case of some 2354 * work can be done. 2355 */ 2356 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2357 { 2358 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2359 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2360 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2361 tx_channel_count : rx_channel_count; 2362 u32 chan; 2363 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2364 2365 /* Make sure we never check beyond our status buffer. */ 2366 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2367 channels_to_check = ARRAY_SIZE(status); 2368 2369 for (chan = 0; chan < channels_to_check; chan++) 2370 status[chan] = stmmac_napi_check(priv, chan); 2371 2372 for (chan = 0; chan < tx_channel_count; chan++) { 2373 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2374 /* Try to bump up the dma threshold on this failure */ 2375 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2376 (tc <= 256)) { 2377 tc += 64; 2378 if (priv->plat->force_thresh_dma_mode) 2379 stmmac_set_dma_operation_mode(priv, 2380 tc, 2381 tc, 2382 chan); 2383 else 2384 stmmac_set_dma_operation_mode(priv, 2385 tc, 2386 SF_DMA_MODE, 2387 chan); 2388 priv->xstats.threshold = tc; 2389 } 2390 } else if (unlikely(status[chan] == tx_hard_error)) { 2391 stmmac_tx_err(priv, chan); 2392 } 2393 } 2394 } 2395 2396 /** 2397 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2398 * @priv: driver private structure 2399 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2400 */ 2401 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2402 { 2403 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2404 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2405 2406 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 2407 2408 if (priv->dma_cap.rmon) { 2409 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 2410 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2411 } else 2412 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2413 } 2414 2415 /** 2416 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2417 * @priv: driver private structure 2418 * Description: 2419 * new GMAC chip generations have a new register to indicate the 2420 * presence of the optional feature/functions. 2421 * This can be also used to override the value passed through the 2422 * platform and necessary for old MAC10/100 and GMAC chips. 2423 */ 2424 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2425 { 2426 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2427 } 2428 2429 /** 2430 * stmmac_check_ether_addr - check if the MAC addr is valid 2431 * @priv: driver private structure 2432 * Description: 2433 * it is to verify if the MAC address is valid, in case of failures it 2434 * generates a random MAC address 2435 */ 2436 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2437 { 2438 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2439 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); 2440 if (!is_valid_ether_addr(priv->dev->dev_addr)) 2441 eth_hw_addr_random(priv->dev); 2442 dev_info(priv->device, "device MAC address %pM\n", 2443 priv->dev->dev_addr); 2444 } 2445 } 2446 2447 /** 2448 * stmmac_init_dma_engine - DMA init. 2449 * @priv: driver private structure 2450 * Description: 2451 * It inits the DMA invoking the specific MAC/GMAC callback. 2452 * Some DMA parameters can be passed from the platform; 2453 * in case of these are not passed a default is kept for the MAC or GMAC. 2454 */ 2455 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2456 { 2457 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2458 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2459 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2460 struct stmmac_rx_queue *rx_q; 2461 struct stmmac_tx_queue *tx_q; 2462 u32 chan = 0; 2463 int atds = 0; 2464 int ret = 0; 2465 2466 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2467 dev_err(priv->device, "Invalid DMA configuration\n"); 2468 return -EINVAL; 2469 } 2470 2471 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2472 atds = 1; 2473 2474 ret = stmmac_reset(priv, priv->ioaddr); 2475 if (ret) { 2476 dev_err(priv->device, "Failed to reset the dma\n"); 2477 return ret; 2478 } 2479 2480 /* DMA Configuration */ 2481 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2482 2483 if (priv->plat->axi) 2484 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2485 2486 /* DMA CSR Channel configuration */ 2487 for (chan = 0; chan < dma_csr_ch; chan++) 2488 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2489 2490 /* DMA RX Channel Configuration */ 2491 for (chan = 0; chan < rx_channels_count; chan++) { 2492 rx_q = &priv->rx_queue[chan]; 2493 2494 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2495 rx_q->dma_rx_phy, chan); 2496 2497 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2498 (priv->dma_rx_size * 2499 sizeof(struct dma_desc)); 2500 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2501 rx_q->rx_tail_addr, chan); 2502 } 2503 2504 /* DMA TX Channel Configuration */ 2505 for (chan = 0; chan < tx_channels_count; chan++) { 2506 tx_q = &priv->tx_queue[chan]; 2507 2508 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2509 tx_q->dma_tx_phy, chan); 2510 2511 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2512 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2513 tx_q->tx_tail_addr, chan); 2514 } 2515 2516 return ret; 2517 } 2518 2519 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 2520 { 2521 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2522 2523 hrtimer_start(&tx_q->txtimer, 2524 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]), 2525 HRTIMER_MODE_REL); 2526 } 2527 2528 /** 2529 * stmmac_tx_timer - mitigation sw timer for tx. 2530 * @t: data pointer 2531 * Description: 2532 * This is the timer handler to directly invoke the stmmac_tx_clean. 2533 */ 2534 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t) 2535 { 2536 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer); 2537 struct stmmac_priv *priv = tx_q->priv_data; 2538 struct stmmac_channel *ch; 2539 2540 ch = &priv->channel[tx_q->queue_index]; 2541 2542 if (likely(napi_schedule_prep(&ch->tx_napi))) { 2543 unsigned long flags; 2544 2545 spin_lock_irqsave(&ch->lock, flags); 2546 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); 2547 spin_unlock_irqrestore(&ch->lock, flags); 2548 __napi_schedule(&ch->tx_napi); 2549 } 2550 2551 return HRTIMER_NORESTART; 2552 } 2553 2554 /** 2555 * stmmac_init_coalesce - init mitigation options. 2556 * @priv: driver private structure 2557 * Description: 2558 * This inits the coalesce parameters: i.e. timer rate, 2559 * timer handler and default threshold used for enabling the 2560 * interrupt on completion bit. 2561 */ 2562 static void stmmac_init_coalesce(struct stmmac_priv *priv) 2563 { 2564 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2565 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2566 u32 chan; 2567 2568 for (chan = 0; chan < tx_channel_count; chan++) { 2569 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2570 2571 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; 2572 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; 2573 2574 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2575 tx_q->txtimer.function = stmmac_tx_timer; 2576 } 2577 2578 for (chan = 0; chan < rx_channel_count; chan++) 2579 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; 2580 } 2581 2582 static void stmmac_set_rings_length(struct stmmac_priv *priv) 2583 { 2584 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2585 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2586 u32 chan; 2587 2588 /* set TX ring length */ 2589 for (chan = 0; chan < tx_channels_count; chan++) 2590 stmmac_set_tx_ring_len(priv, priv->ioaddr, 2591 (priv->dma_tx_size - 1), chan); 2592 2593 /* set RX ring length */ 2594 for (chan = 0; chan < rx_channels_count; chan++) 2595 stmmac_set_rx_ring_len(priv, priv->ioaddr, 2596 (priv->dma_rx_size - 1), chan); 2597 } 2598 2599 /** 2600 * stmmac_set_tx_queue_weight - Set TX queue weight 2601 * @priv: driver private structure 2602 * Description: It is used for setting TX queues weight 2603 */ 2604 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 2605 { 2606 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2607 u32 weight; 2608 u32 queue; 2609 2610 for (queue = 0; queue < tx_queues_count; queue++) { 2611 weight = priv->plat->tx_queues_cfg[queue].weight; 2612 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 2613 } 2614 } 2615 2616 /** 2617 * stmmac_configure_cbs - Configure CBS in TX queue 2618 * @priv: driver private structure 2619 * Description: It is used for configuring CBS in AVB TX queues 2620 */ 2621 static void stmmac_configure_cbs(struct stmmac_priv *priv) 2622 { 2623 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2624 u32 mode_to_use; 2625 u32 queue; 2626 2627 /* queue 0 is reserved for legacy traffic */ 2628 for (queue = 1; queue < tx_queues_count; queue++) { 2629 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 2630 if (mode_to_use == MTL_QUEUE_DCB) 2631 continue; 2632 2633 stmmac_config_cbs(priv, priv->hw, 2634 priv->plat->tx_queues_cfg[queue].send_slope, 2635 priv->plat->tx_queues_cfg[queue].idle_slope, 2636 priv->plat->tx_queues_cfg[queue].high_credit, 2637 priv->plat->tx_queues_cfg[queue].low_credit, 2638 queue); 2639 } 2640 } 2641 2642 /** 2643 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 2644 * @priv: driver private structure 2645 * Description: It is used for mapping RX queues to RX dma channels 2646 */ 2647 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 2648 { 2649 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2650 u32 queue; 2651 u32 chan; 2652 2653 for (queue = 0; queue < rx_queues_count; queue++) { 2654 chan = priv->plat->rx_queues_cfg[queue].chan; 2655 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 2656 } 2657 } 2658 2659 /** 2660 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 2661 * @priv: driver private structure 2662 * Description: It is used for configuring the RX Queue Priority 2663 */ 2664 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 2665 { 2666 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2667 u32 queue; 2668 u32 prio; 2669 2670 for (queue = 0; queue < rx_queues_count; queue++) { 2671 if (!priv->plat->rx_queues_cfg[queue].use_prio) 2672 continue; 2673 2674 prio = priv->plat->rx_queues_cfg[queue].prio; 2675 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 2676 } 2677 } 2678 2679 /** 2680 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 2681 * @priv: driver private structure 2682 * Description: It is used for configuring the TX Queue Priority 2683 */ 2684 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 2685 { 2686 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2687 u32 queue; 2688 u32 prio; 2689 2690 for (queue = 0; queue < tx_queues_count; queue++) { 2691 if (!priv->plat->tx_queues_cfg[queue].use_prio) 2692 continue; 2693 2694 prio = priv->plat->tx_queues_cfg[queue].prio; 2695 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 2696 } 2697 } 2698 2699 /** 2700 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 2701 * @priv: driver private structure 2702 * Description: It is used for configuring the RX queue routing 2703 */ 2704 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 2705 { 2706 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2707 u32 queue; 2708 u8 packet; 2709 2710 for (queue = 0; queue < rx_queues_count; queue++) { 2711 /* no specific packet type routing specified for the queue */ 2712 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 2713 continue; 2714 2715 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 2716 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 2717 } 2718 } 2719 2720 static void stmmac_mac_config_rss(struct stmmac_priv *priv) 2721 { 2722 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { 2723 priv->rss.enable = false; 2724 return; 2725 } 2726 2727 if (priv->dev->features & NETIF_F_RXHASH) 2728 priv->rss.enable = true; 2729 else 2730 priv->rss.enable = false; 2731 2732 stmmac_rss_configure(priv, priv->hw, &priv->rss, 2733 priv->plat->rx_queues_to_use); 2734 } 2735 2736 /** 2737 * stmmac_mtl_configuration - Configure MTL 2738 * @priv: driver private structure 2739 * Description: It is used for configurring MTL 2740 */ 2741 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 2742 { 2743 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2744 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2745 2746 if (tx_queues_count > 1) 2747 stmmac_set_tx_queue_weight(priv); 2748 2749 /* Configure MTL RX algorithms */ 2750 if (rx_queues_count > 1) 2751 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 2752 priv->plat->rx_sched_algorithm); 2753 2754 /* Configure MTL TX algorithms */ 2755 if (tx_queues_count > 1) 2756 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 2757 priv->plat->tx_sched_algorithm); 2758 2759 /* Configure CBS in AVB TX queues */ 2760 if (tx_queues_count > 1) 2761 stmmac_configure_cbs(priv); 2762 2763 /* Map RX MTL to DMA channels */ 2764 stmmac_rx_queue_dma_chan_map(priv); 2765 2766 /* Enable MAC RX Queues */ 2767 stmmac_mac_enable_rx_queues(priv); 2768 2769 /* Set RX priorities */ 2770 if (rx_queues_count > 1) 2771 stmmac_mac_config_rx_queues_prio(priv); 2772 2773 /* Set TX priorities */ 2774 if (tx_queues_count > 1) 2775 stmmac_mac_config_tx_queues_prio(priv); 2776 2777 /* Set RX routing */ 2778 if (rx_queues_count > 1) 2779 stmmac_mac_config_rx_queues_routing(priv); 2780 2781 /* Receive Side Scaling */ 2782 if (rx_queues_count > 1) 2783 stmmac_mac_config_rss(priv); 2784 } 2785 2786 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 2787 { 2788 if (priv->dma_cap.asp) { 2789 netdev_info(priv->dev, "Enabling Safety Features\n"); 2790 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); 2791 } else { 2792 netdev_info(priv->dev, "No Safety Features support found\n"); 2793 } 2794 } 2795 2796 /** 2797 * stmmac_hw_setup - setup mac in a usable state. 2798 * @dev : pointer to the device structure. 2799 * @init_ptp: initialize PTP if set 2800 * Description: 2801 * this is the main function to setup the HW in a usable state because the 2802 * dma engine is reset, the core registers are configured (e.g. AXI, 2803 * Checksum features, timers). The DMA is ready to start receiving and 2804 * transmitting. 2805 * Return value: 2806 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2807 * file on failure. 2808 */ 2809 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 2810 { 2811 struct stmmac_priv *priv = netdev_priv(dev); 2812 u32 rx_cnt = priv->plat->rx_queues_to_use; 2813 u32 tx_cnt = priv->plat->tx_queues_to_use; 2814 u32 chan; 2815 int ret; 2816 2817 /* DMA initialization and SW reset */ 2818 ret = stmmac_init_dma_engine(priv); 2819 if (ret < 0) { 2820 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 2821 __func__); 2822 return ret; 2823 } 2824 2825 /* Copy the MAC addr into the HW */ 2826 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 2827 2828 /* PS and related bits will be programmed according to the speed */ 2829 if (priv->hw->pcs) { 2830 int speed = priv->plat->mac_port_sel_speed; 2831 2832 if ((speed == SPEED_10) || (speed == SPEED_100) || 2833 (speed == SPEED_1000)) { 2834 priv->hw->ps = speed; 2835 } else { 2836 dev_warn(priv->device, "invalid port speed\n"); 2837 priv->hw->ps = 0; 2838 } 2839 } 2840 2841 /* Initialize the MAC Core */ 2842 stmmac_core_init(priv, priv->hw, dev); 2843 2844 /* Initialize MTL*/ 2845 stmmac_mtl_configuration(priv); 2846 2847 /* Initialize Safety Features */ 2848 stmmac_safety_feat_configuration(priv); 2849 2850 ret = stmmac_rx_ipc(priv, priv->hw); 2851 if (!ret) { 2852 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 2853 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 2854 priv->hw->rx_csum = 0; 2855 } 2856 2857 /* Enable the MAC Rx/Tx */ 2858 stmmac_mac_set(priv, priv->ioaddr, true); 2859 2860 /* Set the HW DMA mode and the COE */ 2861 stmmac_dma_operation_mode(priv); 2862 2863 stmmac_mmc_setup(priv); 2864 2865 if (init_ptp) { 2866 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 2867 if (ret < 0) 2868 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 2869 2870 ret = stmmac_init_ptp(priv); 2871 if (ret == -EOPNOTSUPP) 2872 netdev_warn(priv->dev, "PTP not supported by HW\n"); 2873 else if (ret) 2874 netdev_warn(priv->dev, "PTP init failed\n"); 2875 } 2876 2877 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; 2878 2879 /* Convert the timer from msec to usec */ 2880 if (!priv->tx_lpi_timer) 2881 priv->tx_lpi_timer = eee_timer * 1000; 2882 2883 if (priv->use_riwt) { 2884 u32 queue; 2885 2886 for (queue = 0; queue < rx_cnt; queue++) { 2887 if (!priv->rx_riwt[queue]) 2888 priv->rx_riwt[queue] = DEF_DMA_RIWT; 2889 2890 stmmac_rx_watchdog(priv, priv->ioaddr, 2891 priv->rx_riwt[queue], queue); 2892 } 2893 } 2894 2895 if (priv->hw->pcs) 2896 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); 2897 2898 /* set TX and RX rings length */ 2899 stmmac_set_rings_length(priv); 2900 2901 /* Enable TSO */ 2902 if (priv->tso) { 2903 for (chan = 0; chan < tx_cnt; chan++) 2904 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 2905 } 2906 2907 /* Enable Split Header */ 2908 if (priv->sph && priv->hw->rx_csum) { 2909 for (chan = 0; chan < rx_cnt; chan++) 2910 stmmac_enable_sph(priv, priv->ioaddr, 1, chan); 2911 } 2912 2913 /* VLAN Tag Insertion */ 2914 if (priv->dma_cap.vlins) 2915 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); 2916 2917 /* TBS */ 2918 for (chan = 0; chan < tx_cnt; chan++) { 2919 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2920 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; 2921 2922 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); 2923 } 2924 2925 /* Configure real RX and TX queues */ 2926 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); 2927 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); 2928 2929 /* Start the ball rolling... */ 2930 stmmac_start_all_dma(priv); 2931 2932 return 0; 2933 } 2934 2935 static void stmmac_hw_teardown(struct net_device *dev) 2936 { 2937 struct stmmac_priv *priv = netdev_priv(dev); 2938 2939 clk_disable_unprepare(priv->plat->clk_ptp_ref); 2940 } 2941 2942 /** 2943 * stmmac_open - open entry point of the driver 2944 * @dev : pointer to the device structure. 2945 * Description: 2946 * This function is the open entry point of the driver. 2947 * Return value: 2948 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2949 * file on failure. 2950 */ 2951 static int stmmac_open(struct net_device *dev) 2952 { 2953 struct stmmac_priv *priv = netdev_priv(dev); 2954 int bfsize = 0; 2955 u32 chan; 2956 int ret; 2957 2958 ret = pm_runtime_get_sync(priv->device); 2959 if (ret < 0) { 2960 pm_runtime_put_noidle(priv->device); 2961 return ret; 2962 } 2963 2964 if (priv->hw->pcs != STMMAC_PCS_TBI && 2965 priv->hw->pcs != STMMAC_PCS_RTBI && 2966 priv->hw->xpcs_args.an_mode != DW_AN_C73) { 2967 ret = stmmac_init_phy(dev); 2968 if (ret) { 2969 netdev_err(priv->dev, 2970 "%s: Cannot attach to PHY (error: %d)\n", 2971 __func__, ret); 2972 goto init_phy_error; 2973 } 2974 } 2975 2976 /* Extra statistics */ 2977 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 2978 priv->xstats.threshold = tc; 2979 2980 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 2981 if (bfsize < 0) 2982 bfsize = 0; 2983 2984 if (bfsize < BUF_SIZE_16KiB) 2985 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 2986 2987 priv->dma_buf_sz = bfsize; 2988 buf_sz = bfsize; 2989 2990 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 2991 2992 if (!priv->dma_tx_size) 2993 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; 2994 if (!priv->dma_rx_size) 2995 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; 2996 2997 /* Earlier check for TBS */ 2998 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { 2999 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 3000 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; 3001 3002 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; 3003 if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan)) 3004 tx_q->tbs &= ~STMMAC_TBS_AVAIL; 3005 } 3006 3007 ret = alloc_dma_desc_resources(priv); 3008 if (ret < 0) { 3009 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 3010 __func__); 3011 goto dma_desc_error; 3012 } 3013 3014 ret = init_dma_desc_rings(dev, GFP_KERNEL); 3015 if (ret < 0) { 3016 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 3017 __func__); 3018 goto init_error; 3019 } 3020 3021 ret = stmmac_hw_setup(dev, true); 3022 if (ret < 0) { 3023 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 3024 goto init_error; 3025 } 3026 3027 stmmac_init_coalesce(priv); 3028 3029 phylink_start(priv->phylink); 3030 /* We may have called phylink_speed_down before */ 3031 phylink_speed_up(priv->phylink); 3032 3033 /* Request the IRQ lines */ 3034 ret = request_irq(dev->irq, stmmac_interrupt, 3035 IRQF_SHARED, dev->name, dev); 3036 if (unlikely(ret < 0)) { 3037 netdev_err(priv->dev, 3038 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 3039 __func__, dev->irq, ret); 3040 goto irq_error; 3041 } 3042 3043 /* Request the Wake IRQ in case of another line is used for WoL */ 3044 if (priv->wol_irq != dev->irq) { 3045 ret = request_irq(priv->wol_irq, stmmac_interrupt, 3046 IRQF_SHARED, dev->name, dev); 3047 if (unlikely(ret < 0)) { 3048 netdev_err(priv->dev, 3049 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 3050 __func__, priv->wol_irq, ret); 3051 goto wolirq_error; 3052 } 3053 } 3054 3055 /* Request the IRQ lines */ 3056 if (priv->lpi_irq > 0) { 3057 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 3058 dev->name, dev); 3059 if (unlikely(ret < 0)) { 3060 netdev_err(priv->dev, 3061 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 3062 __func__, priv->lpi_irq, ret); 3063 goto lpiirq_error; 3064 } 3065 } 3066 3067 stmmac_enable_all_queues(priv); 3068 netif_tx_start_all_queues(priv->dev); 3069 3070 return 0; 3071 3072 lpiirq_error: 3073 if (priv->wol_irq != dev->irq) 3074 free_irq(priv->wol_irq, dev); 3075 wolirq_error: 3076 free_irq(dev->irq, dev); 3077 irq_error: 3078 phylink_stop(priv->phylink); 3079 3080 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3081 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 3082 3083 stmmac_hw_teardown(dev); 3084 init_error: 3085 free_dma_desc_resources(priv); 3086 dma_desc_error: 3087 phylink_disconnect_phy(priv->phylink); 3088 init_phy_error: 3089 pm_runtime_put(priv->device); 3090 return ret; 3091 } 3092 3093 /** 3094 * stmmac_release - close entry point of the driver 3095 * @dev : device pointer. 3096 * Description: 3097 * This is the stop entry point of the driver. 3098 */ 3099 static int stmmac_release(struct net_device *dev) 3100 { 3101 struct stmmac_priv *priv = netdev_priv(dev); 3102 u32 chan; 3103 3104 if (device_may_wakeup(priv->device)) 3105 phylink_speed_down(priv->phylink, false); 3106 /* Stop and disconnect the PHY */ 3107 phylink_stop(priv->phylink); 3108 phylink_disconnect_phy(priv->phylink); 3109 3110 stmmac_disable_all_queues(priv); 3111 3112 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 3113 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 3114 3115 /* Free the IRQ lines */ 3116 free_irq(dev->irq, dev); 3117 if (priv->wol_irq != dev->irq) 3118 free_irq(priv->wol_irq, dev); 3119 if (priv->lpi_irq > 0) 3120 free_irq(priv->lpi_irq, dev); 3121 3122 if (priv->eee_enabled) { 3123 priv->tx_path_in_lpi_mode = false; 3124 del_timer_sync(&priv->eee_ctrl_timer); 3125 } 3126 3127 /* Stop TX/RX DMA and clear the descriptors */ 3128 stmmac_stop_all_dma(priv); 3129 3130 /* Release and free the Rx/Tx resources */ 3131 free_dma_desc_resources(priv); 3132 3133 /* Disable the MAC Rx/Tx */ 3134 stmmac_mac_set(priv, priv->ioaddr, false); 3135 3136 netif_carrier_off(dev); 3137 3138 stmmac_release_ptp(priv); 3139 3140 pm_runtime_put(priv->device); 3141 3142 return 0; 3143 } 3144 3145 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, 3146 struct stmmac_tx_queue *tx_q) 3147 { 3148 u16 tag = 0x0, inner_tag = 0x0; 3149 u32 inner_type = 0x0; 3150 struct dma_desc *p; 3151 3152 if (!priv->dma_cap.vlins) 3153 return false; 3154 if (!skb_vlan_tag_present(skb)) 3155 return false; 3156 if (skb->vlan_proto == htons(ETH_P_8021AD)) { 3157 inner_tag = skb_vlan_tag_get(skb); 3158 inner_type = STMMAC_VLAN_INSERT; 3159 } 3160 3161 tag = skb_vlan_tag_get(skb); 3162 3163 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3164 p = &tx_q->dma_entx[tx_q->cur_tx].basic; 3165 else 3166 p = &tx_q->dma_tx[tx_q->cur_tx]; 3167 3168 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) 3169 return false; 3170 3171 stmmac_set_tx_owner(priv, p); 3172 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 3173 return true; 3174 } 3175 3176 /** 3177 * stmmac_tso_allocator - close entry point of the driver 3178 * @priv: driver private structure 3179 * @des: buffer start address 3180 * @total_len: total length to fill in descriptors 3181 * @last_segment: condition for the last descriptor 3182 * @queue: TX queue index 3183 * Description: 3184 * This function fills descriptor and request new descriptors according to 3185 * buffer length to fill 3186 */ 3187 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, 3188 int total_len, bool last_segment, u32 queue) 3189 { 3190 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3191 struct dma_desc *desc; 3192 u32 buff_size; 3193 int tmp_len; 3194 3195 tmp_len = total_len; 3196 3197 while (tmp_len > 0) { 3198 dma_addr_t curr_addr; 3199 3200 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3201 priv->dma_tx_size); 3202 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3203 3204 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3205 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3206 else 3207 desc = &tx_q->dma_tx[tx_q->cur_tx]; 3208 3209 curr_addr = des + (total_len - tmp_len); 3210 if (priv->dma_cap.addr64 <= 32) 3211 desc->des0 = cpu_to_le32(curr_addr); 3212 else 3213 stmmac_set_desc_addr(priv, desc, curr_addr); 3214 3215 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 3216 TSO_MAX_BUFF_SIZE : tmp_len; 3217 3218 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 3219 0, 1, 3220 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 3221 0, 0); 3222 3223 tmp_len -= TSO_MAX_BUFF_SIZE; 3224 } 3225 } 3226 3227 /** 3228 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 3229 * @skb : the socket buffer 3230 * @dev : device pointer 3231 * Description: this is the transmit function that is called on TSO frames 3232 * (support available on GMAC4 and newer chips). 3233 * Diagram below show the ring programming in case of TSO frames: 3234 * 3235 * First Descriptor 3236 * -------- 3237 * | DES0 |---> buffer1 = L2/L3/L4 header 3238 * | DES1 |---> TCP Payload (can continue on next descr...) 3239 * | DES2 |---> buffer 1 and 2 len 3240 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 3241 * -------- 3242 * | 3243 * ... 3244 * | 3245 * -------- 3246 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 3247 * | DES1 | --| 3248 * | DES2 | --> buffer 1 and 2 len 3249 * | DES3 | 3250 * -------- 3251 * 3252 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 3253 */ 3254 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 3255 { 3256 struct dma_desc *desc, *first, *mss_desc = NULL; 3257 struct stmmac_priv *priv = netdev_priv(dev); 3258 int desc_size, tmp_pay_len = 0, first_tx; 3259 int nfrags = skb_shinfo(skb)->nr_frags; 3260 u32 queue = skb_get_queue_mapping(skb); 3261 unsigned int first_entry, tx_packets; 3262 struct stmmac_tx_queue *tx_q; 3263 bool has_vlan, set_ic; 3264 u8 proto_hdr_len, hdr; 3265 u32 pay_len, mss; 3266 dma_addr_t des; 3267 int i; 3268 3269 tx_q = &priv->tx_queue[queue]; 3270 first_tx = tx_q->cur_tx; 3271 3272 /* Compute header lengths */ 3273 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 3274 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr); 3275 hdr = sizeof(struct udphdr); 3276 } else { 3277 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3278 hdr = tcp_hdrlen(skb); 3279 } 3280 3281 /* Desc availability based on threshold should be enough safe */ 3282 if (unlikely(stmmac_tx_avail(priv, queue) < 3283 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 3284 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3285 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3286 queue)); 3287 /* This is a hard error, log it. */ 3288 netdev_err(priv->dev, 3289 "%s: Tx Ring full when queue awake\n", 3290 __func__); 3291 } 3292 return NETDEV_TX_BUSY; 3293 } 3294 3295 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 3296 3297 mss = skb_shinfo(skb)->gso_size; 3298 3299 /* set new MSS value if needed */ 3300 if (mss != tx_q->mss) { 3301 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3302 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3303 else 3304 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; 3305 3306 stmmac_set_mss(priv, mss_desc, mss); 3307 tx_q->mss = mss; 3308 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, 3309 priv->dma_tx_size); 3310 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 3311 } 3312 3313 if (netif_msg_tx_queued(priv)) { 3314 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 3315 __func__, hdr, proto_hdr_len, pay_len, mss); 3316 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 3317 skb->data_len); 3318 } 3319 3320 /* Check if VLAN can be inserted by HW */ 3321 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 3322 3323 first_entry = tx_q->cur_tx; 3324 WARN_ON(tx_q->tx_skbuff[first_entry]); 3325 3326 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3327 desc = &tx_q->dma_entx[first_entry].basic; 3328 else 3329 desc = &tx_q->dma_tx[first_entry]; 3330 first = desc; 3331 3332 if (has_vlan) 3333 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 3334 3335 /* first descriptor: fill Headers on Buf1 */ 3336 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 3337 DMA_TO_DEVICE); 3338 if (dma_mapping_error(priv->device, des)) 3339 goto dma_map_err; 3340 3341 tx_q->tx_skbuff_dma[first_entry].buf = des; 3342 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 3343 3344 if (priv->dma_cap.addr64 <= 32) { 3345 first->des0 = cpu_to_le32(des); 3346 3347 /* Fill start of payload in buff2 of first descriptor */ 3348 if (pay_len) 3349 first->des1 = cpu_to_le32(des + proto_hdr_len); 3350 3351 /* If needed take extra descriptors to fill the remaining payload */ 3352 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 3353 } else { 3354 stmmac_set_desc_addr(priv, first, des); 3355 tmp_pay_len = pay_len; 3356 des += proto_hdr_len; 3357 pay_len = 0; 3358 } 3359 3360 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 3361 3362 /* Prepare fragments */ 3363 for (i = 0; i < nfrags; i++) { 3364 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3365 3366 des = skb_frag_dma_map(priv->device, frag, 0, 3367 skb_frag_size(frag), 3368 DMA_TO_DEVICE); 3369 if (dma_mapping_error(priv->device, des)) 3370 goto dma_map_err; 3371 3372 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 3373 (i == nfrags - 1), queue); 3374 3375 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 3376 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 3377 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 3378 } 3379 3380 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 3381 3382 /* Only the last descriptor gets to point to the skb. */ 3383 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 3384 3385 /* Manage tx mitigation */ 3386 tx_packets = (tx_q->cur_tx + 1) - first_tx; 3387 tx_q->tx_count_frames += tx_packets; 3388 3389 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 3390 set_ic = true; 3391 else if (!priv->tx_coal_frames[queue]) 3392 set_ic = false; 3393 else if (tx_packets > priv->tx_coal_frames[queue]) 3394 set_ic = true; 3395 else if ((tx_q->tx_count_frames % 3396 priv->tx_coal_frames[queue]) < tx_packets) 3397 set_ic = true; 3398 else 3399 set_ic = false; 3400 3401 if (set_ic) { 3402 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3403 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; 3404 else 3405 desc = &tx_q->dma_tx[tx_q->cur_tx]; 3406 3407 tx_q->tx_count_frames = 0; 3408 stmmac_set_tx_ic(priv, desc); 3409 priv->xstats.tx_set_ic_bit++; 3410 } 3411 3412 /* We've used all descriptors we need for this skb, however, 3413 * advance cur_tx so that it references a fresh descriptor. 3414 * ndo_start_xmit will fill this descriptor the next time it's 3415 * called and stmmac_tx_clean may clean up to this descriptor. 3416 */ 3417 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); 3418 3419 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3420 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3421 __func__); 3422 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 3423 } 3424 3425 dev->stats.tx_bytes += skb->len; 3426 priv->xstats.tx_tso_frames++; 3427 priv->xstats.tx_tso_nfrags += nfrags; 3428 3429 if (priv->sarc_type) 3430 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 3431 3432 skb_tx_timestamp(skb); 3433 3434 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3435 priv->hwts_tx_en)) { 3436 /* declare that device is doing timestamping */ 3437 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3438 stmmac_enable_tx_timestamp(priv, first); 3439 } 3440 3441 /* Complete the first descriptor before granting the DMA */ 3442 stmmac_prepare_tso_tx_desc(priv, first, 1, 3443 proto_hdr_len, 3444 pay_len, 3445 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 3446 hdr / 4, (skb->len - proto_hdr_len)); 3447 3448 /* If context desc is used to change MSS */ 3449 if (mss_desc) { 3450 /* Make sure that first descriptor has been completely 3451 * written, including its own bit. This is because MSS is 3452 * actually before first descriptor, so we need to make 3453 * sure that MSS's own bit is the last thing written. 3454 */ 3455 dma_wmb(); 3456 stmmac_set_tx_owner(priv, mss_desc); 3457 } 3458 3459 /* The own bit must be the latest setting done when prepare the 3460 * descriptor and then barrier is needed to make sure that 3461 * all is coherent before granting the DMA engine. 3462 */ 3463 wmb(); 3464 3465 if (netif_msg_pktdata(priv)) { 3466 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 3467 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3468 tx_q->cur_tx, first, nfrags); 3469 pr_info(">>> frame to be transmitted: "); 3470 print_pkt(skb->data, skb_headlen(skb)); 3471 } 3472 3473 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3474 3475 if (tx_q->tbs & STMMAC_TBS_AVAIL) 3476 desc_size = sizeof(struct dma_edesc); 3477 else 3478 desc_size = sizeof(struct dma_desc); 3479 3480 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 3481 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3482 stmmac_tx_timer_arm(priv, queue); 3483 3484 return NETDEV_TX_OK; 3485 3486 dma_map_err: 3487 dev_err(priv->device, "Tx dma map failed\n"); 3488 dev_kfree_skb(skb); 3489 priv->dev->stats.tx_dropped++; 3490 return NETDEV_TX_OK; 3491 } 3492 3493 /** 3494 * stmmac_xmit - Tx entry point of the driver 3495 * @skb : the socket buffer 3496 * @dev : device pointer 3497 * Description : this is the tx entry point of the driver. 3498 * It programs the chain or the ring and supports oversized frames 3499 * and SG feature. 3500 */ 3501 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 3502 { 3503 unsigned int first_entry, tx_packets, enh_desc; 3504 struct stmmac_priv *priv = netdev_priv(dev); 3505 unsigned int nopaged_len = skb_headlen(skb); 3506 int i, csum_insertion = 0, is_jumbo = 0; 3507 u32 queue = skb_get_queue_mapping(skb); 3508 int nfrags = skb_shinfo(skb)->nr_frags; 3509 int gso = skb_shinfo(skb)->gso_type; 3510 struct dma_edesc *tbs_desc = NULL; 3511 int entry, desc_size, first_tx; 3512 struct dma_desc *desc, *first; 3513 struct stmmac_tx_queue *tx_q; 3514 bool has_vlan, set_ic; 3515 dma_addr_t des; 3516 3517 tx_q = &priv->tx_queue[queue]; 3518 first_tx = tx_q->cur_tx; 3519 3520 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) 3521 stmmac_disable_eee_mode(priv); 3522 3523 /* Manage oversized TCP frames for GMAC4 device */ 3524 if (skb_is_gso(skb) && priv->tso) { 3525 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) 3526 return stmmac_tso_xmit(skb, dev); 3527 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) 3528 return stmmac_tso_xmit(skb, dev); 3529 } 3530 3531 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3532 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3533 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3534 queue)); 3535 /* This is a hard error, log it. */ 3536 netdev_err(priv->dev, 3537 "%s: Tx Ring full when queue awake\n", 3538 __func__); 3539 } 3540 return NETDEV_TX_BUSY; 3541 } 3542 3543 /* Check if VLAN can be inserted by HW */ 3544 has_vlan = stmmac_vlan_insert(priv, skb, tx_q); 3545 3546 entry = tx_q->cur_tx; 3547 first_entry = entry; 3548 WARN_ON(tx_q->tx_skbuff[first_entry]); 3549 3550 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 3551 3552 if (likely(priv->extend_desc)) 3553 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3554 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3555 desc = &tx_q->dma_entx[entry].basic; 3556 else 3557 desc = tx_q->dma_tx + entry; 3558 3559 first = desc; 3560 3561 if (has_vlan) 3562 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); 3563 3564 enh_desc = priv->plat->enh_desc; 3565 /* To program the descriptors according to the size of the frame */ 3566 if (enh_desc) 3567 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 3568 3569 if (unlikely(is_jumbo)) { 3570 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 3571 if (unlikely(entry < 0) && (entry != -EINVAL)) 3572 goto dma_map_err; 3573 } 3574 3575 for (i = 0; i < nfrags; i++) { 3576 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3577 int len = skb_frag_size(frag); 3578 bool last_segment = (i == (nfrags - 1)); 3579 3580 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 3581 WARN_ON(tx_q->tx_skbuff[entry]); 3582 3583 if (likely(priv->extend_desc)) 3584 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3585 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3586 desc = &tx_q->dma_entx[entry].basic; 3587 else 3588 desc = tx_q->dma_tx + entry; 3589 3590 des = skb_frag_dma_map(priv->device, frag, 0, len, 3591 DMA_TO_DEVICE); 3592 if (dma_mapping_error(priv->device, des)) 3593 goto dma_map_err; /* should reuse desc w/o issues */ 3594 3595 tx_q->tx_skbuff_dma[entry].buf = des; 3596 3597 stmmac_set_desc_addr(priv, desc, des); 3598 3599 tx_q->tx_skbuff_dma[entry].map_as_page = true; 3600 tx_q->tx_skbuff_dma[entry].len = len; 3601 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 3602 3603 /* Prepare the descriptor and set the own bit too */ 3604 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 3605 priv->mode, 1, last_segment, skb->len); 3606 } 3607 3608 /* Only the last descriptor gets to point to the skb. */ 3609 tx_q->tx_skbuff[entry] = skb; 3610 3611 /* According to the coalesce parameter the IC bit for the latest 3612 * segment is reset and the timer re-started to clean the tx status. 3613 * This approach takes care about the fragments: desc is the first 3614 * element in case of no SG. 3615 */ 3616 tx_packets = (entry + 1) - first_tx; 3617 tx_q->tx_count_frames += tx_packets; 3618 3619 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) 3620 set_ic = true; 3621 else if (!priv->tx_coal_frames[queue]) 3622 set_ic = false; 3623 else if (tx_packets > priv->tx_coal_frames[queue]) 3624 set_ic = true; 3625 else if ((tx_q->tx_count_frames % 3626 priv->tx_coal_frames[queue]) < tx_packets) 3627 set_ic = true; 3628 else 3629 set_ic = false; 3630 3631 if (set_ic) { 3632 if (likely(priv->extend_desc)) 3633 desc = &tx_q->dma_etx[entry].basic; 3634 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3635 desc = &tx_q->dma_entx[entry].basic; 3636 else 3637 desc = &tx_q->dma_tx[entry]; 3638 3639 tx_q->tx_count_frames = 0; 3640 stmmac_set_tx_ic(priv, desc); 3641 priv->xstats.tx_set_ic_bit++; 3642 } 3643 3644 /* We've used all descriptors we need for this skb, however, 3645 * advance cur_tx so that it references a fresh descriptor. 3646 * ndo_start_xmit will fill this descriptor the next time it's 3647 * called and stmmac_tx_clean may clean up to this descriptor. 3648 */ 3649 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); 3650 tx_q->cur_tx = entry; 3651 3652 if (netif_msg_pktdata(priv)) { 3653 netdev_dbg(priv->dev, 3654 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 3655 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3656 entry, first, nfrags); 3657 3658 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 3659 print_pkt(skb->data, skb->len); 3660 } 3661 3662 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3663 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3664 __func__); 3665 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 3666 } 3667 3668 dev->stats.tx_bytes += skb->len; 3669 3670 if (priv->sarc_type) 3671 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 3672 3673 skb_tx_timestamp(skb); 3674 3675 /* Ready to fill the first descriptor and set the OWN bit w/o any 3676 * problems because all the descriptors are actually ready to be 3677 * passed to the DMA engine. 3678 */ 3679 if (likely(!is_jumbo)) { 3680 bool last_segment = (nfrags == 0); 3681 3682 des = dma_map_single(priv->device, skb->data, 3683 nopaged_len, DMA_TO_DEVICE); 3684 if (dma_mapping_error(priv->device, des)) 3685 goto dma_map_err; 3686 3687 tx_q->tx_skbuff_dma[first_entry].buf = des; 3688 3689 stmmac_set_desc_addr(priv, first, des); 3690 3691 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 3692 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 3693 3694 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3695 priv->hwts_tx_en)) { 3696 /* declare that device is doing timestamping */ 3697 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3698 stmmac_enable_tx_timestamp(priv, first); 3699 } 3700 3701 /* Prepare the first descriptor setting the OWN bit too */ 3702 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3703 csum_insertion, priv->mode, 0, last_segment, 3704 skb->len); 3705 } 3706 3707 if (tx_q->tbs & STMMAC_TBS_EN) { 3708 struct timespec64 ts = ns_to_timespec64(skb->tstamp); 3709 3710 tbs_desc = &tx_q->dma_entx[first_entry]; 3711 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); 3712 } 3713 3714 stmmac_set_tx_owner(priv, first); 3715 3716 /* The own bit must be the latest setting done when prepare the 3717 * descriptor and then barrier is needed to make sure that 3718 * all is coherent before granting the DMA engine. 3719 */ 3720 wmb(); 3721 3722 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3723 3724 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3725 3726 if (likely(priv->extend_desc)) 3727 desc_size = sizeof(struct dma_extended_desc); 3728 else if (tx_q->tbs & STMMAC_TBS_AVAIL) 3729 desc_size = sizeof(struct dma_edesc); 3730 else 3731 desc_size = sizeof(struct dma_desc); 3732 3733 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); 3734 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3735 stmmac_tx_timer_arm(priv, queue); 3736 3737 return NETDEV_TX_OK; 3738 3739 dma_map_err: 3740 netdev_err(priv->dev, "Tx DMA map failed\n"); 3741 dev_kfree_skb(skb); 3742 priv->dev->stats.tx_dropped++; 3743 return NETDEV_TX_OK; 3744 } 3745 3746 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 3747 { 3748 struct vlan_ethhdr *veth; 3749 __be16 vlan_proto; 3750 u16 vlanid; 3751 3752 veth = (struct vlan_ethhdr *)skb->data; 3753 vlan_proto = veth->h_vlan_proto; 3754 3755 if ((vlan_proto == htons(ETH_P_8021Q) && 3756 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 3757 (vlan_proto == htons(ETH_P_8021AD) && 3758 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 3759 /* pop the vlan tag */ 3760 vlanid = ntohs(veth->h_vlan_TCI); 3761 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 3762 skb_pull(skb, VLAN_HLEN); 3763 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 3764 } 3765 } 3766 3767 /** 3768 * stmmac_rx_refill - refill used skb preallocated buffers 3769 * @priv: driver private structure 3770 * @queue: RX queue index 3771 * Description : this is to reallocate the skb for the reception process 3772 * that is based on zero-copy. 3773 */ 3774 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 3775 { 3776 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3777 int len, dirty = stmmac_rx_dirty(priv, queue); 3778 unsigned int entry = rx_q->dirty_rx; 3779 3780 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; 3781 3782 while (dirty-- > 0) { 3783 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; 3784 struct dma_desc *p; 3785 bool use_rx_wd; 3786 3787 if (priv->extend_desc) 3788 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3789 else 3790 p = rx_q->dma_rx + entry; 3791 3792 if (!buf->page) { 3793 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); 3794 if (!buf->page) 3795 break; 3796 } 3797 3798 if (priv->sph && !buf->sec_page) { 3799 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); 3800 if (!buf->sec_page) 3801 break; 3802 3803 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); 3804 3805 dma_sync_single_for_device(priv->device, buf->sec_addr, 3806 len, DMA_FROM_DEVICE); 3807 } 3808 3809 buf->addr = page_pool_get_dma_addr(buf->page); 3810 3811 /* Sync whole allocation to device. This will invalidate old 3812 * data. 3813 */ 3814 dma_sync_single_for_device(priv->device, buf->addr, len, 3815 DMA_FROM_DEVICE); 3816 3817 stmmac_set_desc_addr(priv, p, buf->addr); 3818 if (priv->sph) 3819 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); 3820 else 3821 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); 3822 stmmac_refill_desc3(priv, rx_q, p); 3823 3824 rx_q->rx_count_frames++; 3825 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; 3826 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) 3827 rx_q->rx_count_frames = 0; 3828 3829 use_rx_wd = !priv->rx_coal_frames[queue]; 3830 use_rx_wd |= rx_q->rx_count_frames > 0; 3831 if (!priv->use_riwt) 3832 use_rx_wd = false; 3833 3834 dma_wmb(); 3835 stmmac_set_rx_owner(priv, p, use_rx_wd); 3836 3837 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); 3838 } 3839 rx_q->dirty_rx = entry; 3840 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 3841 (rx_q->dirty_rx * sizeof(struct dma_desc)); 3842 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 3843 } 3844 3845 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv, 3846 struct dma_desc *p, 3847 int status, unsigned int len) 3848 { 3849 unsigned int plen = 0, hlen = 0; 3850 int coe = priv->hw->rx_csum; 3851 3852 /* Not first descriptor, buffer is always zero */ 3853 if (priv->sph && len) 3854 return 0; 3855 3856 /* First descriptor, get split header length */ 3857 stmmac_get_rx_header_len(priv, p, &hlen); 3858 if (priv->sph && hlen) { 3859 priv->xstats.rx_split_hdr_pkt_n++; 3860 return hlen; 3861 } 3862 3863 /* First descriptor, not last descriptor and not split header */ 3864 if (status & rx_not_ls) 3865 return priv->dma_buf_sz; 3866 3867 plen = stmmac_get_rx_frame_len(priv, p, coe); 3868 3869 /* First descriptor and last descriptor and not split header */ 3870 return min_t(unsigned int, priv->dma_buf_sz, plen); 3871 } 3872 3873 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, 3874 struct dma_desc *p, 3875 int status, unsigned int len) 3876 { 3877 int coe = priv->hw->rx_csum; 3878 unsigned int plen = 0; 3879 3880 /* Not split header, buffer is not available */ 3881 if (!priv->sph) 3882 return 0; 3883 3884 /* Not last descriptor */ 3885 if (status & rx_not_ls) 3886 return priv->dma_buf_sz; 3887 3888 plen = stmmac_get_rx_frame_len(priv, p, coe); 3889 3890 /* Last descriptor */ 3891 return plen - len; 3892 } 3893 3894 /** 3895 * stmmac_rx - manage the receive process 3896 * @priv: driver private structure 3897 * @limit: napi bugget 3898 * @queue: RX queue index. 3899 * Description : this the function called by the napi poll method. 3900 * It gets all the frames inside the ring. 3901 */ 3902 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 3903 { 3904 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3905 struct stmmac_channel *ch = &priv->channel[queue]; 3906 unsigned int count = 0, error = 0, len = 0; 3907 int status = 0, coe = priv->hw->rx_csum; 3908 unsigned int next_entry = rx_q->cur_rx; 3909 unsigned int desc_size; 3910 struct sk_buff *skb = NULL; 3911 3912 if (netif_msg_rx_status(priv)) { 3913 void *rx_head; 3914 3915 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 3916 if (priv->extend_desc) { 3917 rx_head = (void *)rx_q->dma_erx; 3918 desc_size = sizeof(struct dma_extended_desc); 3919 } else { 3920 rx_head = (void *)rx_q->dma_rx; 3921 desc_size = sizeof(struct dma_desc); 3922 } 3923 3924 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true, 3925 rx_q->dma_rx_phy, desc_size); 3926 } 3927 while (count < limit) { 3928 unsigned int buf1_len = 0, buf2_len = 0; 3929 enum pkt_hash_types hash_type; 3930 struct stmmac_rx_buffer *buf; 3931 struct dma_desc *np, *p; 3932 int entry; 3933 u32 hash; 3934 3935 if (!count && rx_q->state_saved) { 3936 skb = rx_q->state.skb; 3937 error = rx_q->state.error; 3938 len = rx_q->state.len; 3939 } else { 3940 rx_q->state_saved = false; 3941 skb = NULL; 3942 error = 0; 3943 len = 0; 3944 } 3945 3946 if (count >= limit) 3947 break; 3948 3949 read_again: 3950 buf1_len = 0; 3951 buf2_len = 0; 3952 entry = next_entry; 3953 buf = &rx_q->buf_pool[entry]; 3954 3955 if (priv->extend_desc) 3956 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3957 else 3958 p = rx_q->dma_rx + entry; 3959 3960 /* read the status of the incoming frame */ 3961 status = stmmac_rx_status(priv, &priv->dev->stats, 3962 &priv->xstats, p); 3963 /* check if managed by the DMA otherwise go ahead */ 3964 if (unlikely(status & dma_own)) 3965 break; 3966 3967 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, 3968 priv->dma_rx_size); 3969 next_entry = rx_q->cur_rx; 3970 3971 if (priv->extend_desc) 3972 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 3973 else 3974 np = rx_q->dma_rx + next_entry; 3975 3976 prefetch(np); 3977 3978 if (priv->extend_desc) 3979 stmmac_rx_extended_status(priv, &priv->dev->stats, 3980 &priv->xstats, rx_q->dma_erx + entry); 3981 if (unlikely(status == discard_frame)) { 3982 page_pool_recycle_direct(rx_q->page_pool, buf->page); 3983 buf->page = NULL; 3984 error = 1; 3985 if (!priv->hwts_rx_en) 3986 priv->dev->stats.rx_errors++; 3987 } 3988 3989 if (unlikely(error && (status & rx_not_ls))) 3990 goto read_again; 3991 if (unlikely(error)) { 3992 dev_kfree_skb(skb); 3993 skb = NULL; 3994 count++; 3995 continue; 3996 } 3997 3998 /* Buffer is good. Go on. */ 3999 4000 prefetch(page_address(buf->page)); 4001 if (buf->sec_page) 4002 prefetch(page_address(buf->sec_page)); 4003 4004 buf1_len = stmmac_rx_buf1_len(priv, p, status, len); 4005 len += buf1_len; 4006 buf2_len = stmmac_rx_buf2_len(priv, p, status, len); 4007 len += buf2_len; 4008 4009 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 4010 * Type frames (LLC/LLC-SNAP) 4011 * 4012 * llc_snap is never checked in GMAC >= 4, so this ACS 4013 * feature is always disabled and packets need to be 4014 * stripped manually. 4015 */ 4016 if (likely(!(status & rx_not_ls)) && 4017 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || 4018 unlikely(status != llc_snap))) { 4019 if (buf2_len) 4020 buf2_len -= ETH_FCS_LEN; 4021 else 4022 buf1_len -= ETH_FCS_LEN; 4023 4024 len -= ETH_FCS_LEN; 4025 } 4026 4027 if (!skb) { 4028 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); 4029 if (!skb) { 4030 priv->dev->stats.rx_dropped++; 4031 count++; 4032 goto drain_data; 4033 } 4034 4035 dma_sync_single_for_cpu(priv->device, buf->addr, 4036 buf1_len, DMA_FROM_DEVICE); 4037 skb_copy_to_linear_data(skb, page_address(buf->page), 4038 buf1_len); 4039 skb_put(skb, buf1_len); 4040 4041 /* Data payload copied into SKB, page ready for recycle */ 4042 page_pool_recycle_direct(rx_q->page_pool, buf->page); 4043 buf->page = NULL; 4044 } else if (buf1_len) { 4045 dma_sync_single_for_cpu(priv->device, buf->addr, 4046 buf1_len, DMA_FROM_DEVICE); 4047 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 4048 buf->page, 0, buf1_len, 4049 priv->dma_buf_sz); 4050 4051 /* Data payload appended into SKB */ 4052 page_pool_release_page(rx_q->page_pool, buf->page); 4053 buf->page = NULL; 4054 } 4055 4056 if (buf2_len) { 4057 dma_sync_single_for_cpu(priv->device, buf->sec_addr, 4058 buf2_len, DMA_FROM_DEVICE); 4059 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 4060 buf->sec_page, 0, buf2_len, 4061 priv->dma_buf_sz); 4062 4063 /* Data payload appended into SKB */ 4064 page_pool_release_page(rx_q->page_pool, buf->sec_page); 4065 buf->sec_page = NULL; 4066 } 4067 4068 drain_data: 4069 if (likely(status & rx_not_ls)) 4070 goto read_again; 4071 if (!skb) 4072 continue; 4073 4074 /* Got entire packet into SKB. Finish it. */ 4075 4076 stmmac_get_rx_hwtstamp(priv, p, np, skb); 4077 stmmac_rx_vlan(priv->dev, skb); 4078 skb->protocol = eth_type_trans(skb, priv->dev); 4079 4080 if (unlikely(!coe)) 4081 skb_checksum_none_assert(skb); 4082 else 4083 skb->ip_summed = CHECKSUM_UNNECESSARY; 4084 4085 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) 4086 skb_set_hash(skb, hash, hash_type); 4087 4088 skb_record_rx_queue(skb, queue); 4089 napi_gro_receive(&ch->rx_napi, skb); 4090 skb = NULL; 4091 4092 priv->dev->stats.rx_packets++; 4093 priv->dev->stats.rx_bytes += len; 4094 count++; 4095 } 4096 4097 if (status & rx_not_ls || skb) { 4098 rx_q->state_saved = true; 4099 rx_q->state.skb = skb; 4100 rx_q->state.error = error; 4101 rx_q->state.len = len; 4102 } 4103 4104 stmmac_rx_refill(priv, queue); 4105 4106 priv->xstats.rx_pkt_n += count; 4107 4108 return count; 4109 } 4110 4111 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 4112 { 4113 struct stmmac_channel *ch = 4114 container_of(napi, struct stmmac_channel, rx_napi); 4115 struct stmmac_priv *priv = ch->priv_data; 4116 u32 chan = ch->index; 4117 int work_done; 4118 4119 priv->xstats.napi_poll++; 4120 4121 work_done = stmmac_rx(priv, budget, chan); 4122 if (work_done < budget && napi_complete_done(napi, work_done)) { 4123 unsigned long flags; 4124 4125 spin_lock_irqsave(&ch->lock, flags); 4126 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); 4127 spin_unlock_irqrestore(&ch->lock, flags); 4128 } 4129 4130 return work_done; 4131 } 4132 4133 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 4134 { 4135 struct stmmac_channel *ch = 4136 container_of(napi, struct stmmac_channel, tx_napi); 4137 struct stmmac_priv *priv = ch->priv_data; 4138 u32 chan = ch->index; 4139 int work_done; 4140 4141 priv->xstats.napi_poll++; 4142 4143 work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); 4144 work_done = min(work_done, budget); 4145 4146 if (work_done < budget && napi_complete_done(napi, work_done)) { 4147 unsigned long flags; 4148 4149 spin_lock_irqsave(&ch->lock, flags); 4150 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); 4151 spin_unlock_irqrestore(&ch->lock, flags); 4152 } 4153 4154 return work_done; 4155 } 4156 4157 /** 4158 * stmmac_tx_timeout 4159 * @dev : Pointer to net device structure 4160 * @txqueue: the index of the hanging transmit queue 4161 * Description: this function is called when a packet transmission fails to 4162 * complete within a reasonable time. The driver will mark the error in the 4163 * netdev structure and arrange for the device to be reset to a sane state 4164 * in order to transmit a new packet. 4165 */ 4166 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) 4167 { 4168 struct stmmac_priv *priv = netdev_priv(dev); 4169 4170 stmmac_global_err(priv); 4171 } 4172 4173 /** 4174 * stmmac_set_rx_mode - entry point for multicast addressing 4175 * @dev : pointer to the device structure 4176 * Description: 4177 * This function is a driver entry point which gets called by the kernel 4178 * whenever multicast addresses must be enabled/disabled. 4179 * Return value: 4180 * void. 4181 */ 4182 static void stmmac_set_rx_mode(struct net_device *dev) 4183 { 4184 struct stmmac_priv *priv = netdev_priv(dev); 4185 4186 stmmac_set_filter(priv, priv->hw, dev); 4187 } 4188 4189 /** 4190 * stmmac_change_mtu - entry point to change MTU size for the device. 4191 * @dev : device pointer. 4192 * @new_mtu : the new MTU size for the device. 4193 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 4194 * to drive packet transmission. Ethernet has an MTU of 1500 octets 4195 * (ETH_DATA_LEN). This value can be changed with ifconfig. 4196 * Return value: 4197 * 0 on success and an appropriate (-)ve integer as defined in errno.h 4198 * file on failure. 4199 */ 4200 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 4201 { 4202 struct stmmac_priv *priv = netdev_priv(dev); 4203 int txfifosz = priv->plat->tx_fifo_size; 4204 const int mtu = new_mtu; 4205 4206 if (txfifosz == 0) 4207 txfifosz = priv->dma_cap.tx_fifo_size; 4208 4209 txfifosz /= priv->plat->tx_queues_to_use; 4210 4211 if (netif_running(dev)) { 4212 netdev_err(priv->dev, "must be stopped to change its MTU\n"); 4213 return -EBUSY; 4214 } 4215 4216 new_mtu = STMMAC_ALIGN(new_mtu); 4217 4218 /* If condition true, FIFO is too small or MTU too large */ 4219 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB)) 4220 return -EINVAL; 4221 4222 dev->mtu = mtu; 4223 4224 netdev_update_features(dev); 4225 4226 return 0; 4227 } 4228 4229 static netdev_features_t stmmac_fix_features(struct net_device *dev, 4230 netdev_features_t features) 4231 { 4232 struct stmmac_priv *priv = netdev_priv(dev); 4233 4234 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 4235 features &= ~NETIF_F_RXCSUM; 4236 4237 if (!priv->plat->tx_coe) 4238 features &= ~NETIF_F_CSUM_MASK; 4239 4240 /* Some GMAC devices have a bugged Jumbo frame support that 4241 * needs to have the Tx COE disabled for oversized frames 4242 * (due to limited buffer sizes). In this case we disable 4243 * the TX csum insertion in the TDES and not use SF. 4244 */ 4245 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 4246 features &= ~NETIF_F_CSUM_MASK; 4247 4248 /* Disable tso if asked by ethtool */ 4249 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 4250 if (features & NETIF_F_TSO) 4251 priv->tso = true; 4252 else 4253 priv->tso = false; 4254 } 4255 4256 return features; 4257 } 4258 4259 static int stmmac_set_features(struct net_device *netdev, 4260 netdev_features_t features) 4261 { 4262 struct stmmac_priv *priv = netdev_priv(netdev); 4263 bool sph_en; 4264 u32 chan; 4265 4266 /* Keep the COE Type in case of csum is supporting */ 4267 if (features & NETIF_F_RXCSUM) 4268 priv->hw->rx_csum = priv->plat->rx_coe; 4269 else 4270 priv->hw->rx_csum = 0; 4271 /* No check needed because rx_coe has been set before and it will be 4272 * fixed in case of issue. 4273 */ 4274 stmmac_rx_ipc(priv, priv->hw); 4275 4276 sph_en = (priv->hw->rx_csum > 0) && priv->sph; 4277 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) 4278 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); 4279 4280 return 0; 4281 } 4282 4283 /** 4284 * stmmac_interrupt - main ISR 4285 * @irq: interrupt number. 4286 * @dev_id: to pass the net device pointer (must be valid). 4287 * Description: this is the main driver interrupt service routine. 4288 * It can call: 4289 * o DMA service routine (to manage incoming frame reception and transmission 4290 * status) 4291 * o Core interrupts to manage: remote wake-up, management counter, LPI 4292 * interrupts. 4293 */ 4294 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 4295 { 4296 struct net_device *dev = (struct net_device *)dev_id; 4297 struct stmmac_priv *priv = netdev_priv(dev); 4298 u32 rx_cnt = priv->plat->rx_queues_to_use; 4299 u32 tx_cnt = priv->plat->tx_queues_to_use; 4300 u32 queues_count; 4301 u32 queue; 4302 bool xmac; 4303 4304 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 4305 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 4306 4307 if (priv->irq_wake) 4308 pm_wakeup_event(priv->device, 0); 4309 4310 /* Check if adapter is up */ 4311 if (test_bit(STMMAC_DOWN, &priv->state)) 4312 return IRQ_HANDLED; 4313 /* Check if a fatal error happened */ 4314 if (stmmac_safety_feat_interrupt(priv)) 4315 return IRQ_HANDLED; 4316 4317 if (priv->dma_cap.estsel) 4318 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev, 4319 &priv->xstats, tx_cnt); 4320 4321 /* To handle GMAC own interrupts */ 4322 if ((priv->plat->has_gmac) || xmac) { 4323 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 4324 int mtl_status; 4325 4326 if (unlikely(status)) { 4327 /* For LPI we need to save the tx status */ 4328 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 4329 priv->tx_path_in_lpi_mode = true; 4330 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 4331 priv->tx_path_in_lpi_mode = false; 4332 } 4333 4334 for (queue = 0; queue < queues_count; queue++) { 4335 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4336 4337 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, 4338 queue); 4339 if (mtl_status != -EINVAL) 4340 status |= mtl_status; 4341 4342 if (status & CORE_IRQ_MTL_RX_OVERFLOW) 4343 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 4344 rx_q->rx_tail_addr, 4345 queue); 4346 } 4347 4348 /* PCS link status */ 4349 if (priv->hw->pcs) { 4350 if (priv->xstats.pcs_link) 4351 netif_carrier_on(dev); 4352 else 4353 netif_carrier_off(dev); 4354 } 4355 } 4356 4357 /* To handle DMA interrupts */ 4358 stmmac_dma_interrupt(priv); 4359 4360 return IRQ_HANDLED; 4361 } 4362 4363 #ifdef CONFIG_NET_POLL_CONTROLLER 4364 /* Polling receive - used by NETCONSOLE and other diagnostic tools 4365 * to allow network I/O with interrupts disabled. 4366 */ 4367 static void stmmac_poll_controller(struct net_device *dev) 4368 { 4369 disable_irq(dev->irq); 4370 stmmac_interrupt(dev->irq, dev); 4371 enable_irq(dev->irq); 4372 } 4373 #endif 4374 4375 /** 4376 * stmmac_ioctl - Entry point for the Ioctl 4377 * @dev: Device pointer. 4378 * @rq: An IOCTL specefic structure, that can contain a pointer to 4379 * a proprietary structure used to pass information to the driver. 4380 * @cmd: IOCTL command 4381 * Description: 4382 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 4383 */ 4384 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 4385 { 4386 struct stmmac_priv *priv = netdev_priv (dev); 4387 int ret = -EOPNOTSUPP; 4388 4389 if (!netif_running(dev)) 4390 return -EINVAL; 4391 4392 switch (cmd) { 4393 case SIOCGMIIPHY: 4394 case SIOCGMIIREG: 4395 case SIOCSMIIREG: 4396 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 4397 break; 4398 case SIOCSHWTSTAMP: 4399 ret = stmmac_hwtstamp_set(dev, rq); 4400 break; 4401 case SIOCGHWTSTAMP: 4402 ret = stmmac_hwtstamp_get(dev, rq); 4403 break; 4404 default: 4405 break; 4406 } 4407 4408 return ret; 4409 } 4410 4411 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 4412 void *cb_priv) 4413 { 4414 struct stmmac_priv *priv = cb_priv; 4415 int ret = -EOPNOTSUPP; 4416 4417 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) 4418 return ret; 4419 4420 stmmac_disable_all_queues(priv); 4421 4422 switch (type) { 4423 case TC_SETUP_CLSU32: 4424 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 4425 break; 4426 case TC_SETUP_CLSFLOWER: 4427 ret = stmmac_tc_setup_cls(priv, priv, type_data); 4428 break; 4429 default: 4430 break; 4431 } 4432 4433 stmmac_enable_all_queues(priv); 4434 return ret; 4435 } 4436 4437 static LIST_HEAD(stmmac_block_cb_list); 4438 4439 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 4440 void *type_data) 4441 { 4442 struct stmmac_priv *priv = netdev_priv(ndev); 4443 4444 switch (type) { 4445 case TC_SETUP_BLOCK: 4446 return flow_block_cb_setup_simple(type_data, 4447 &stmmac_block_cb_list, 4448 stmmac_setup_tc_block_cb, 4449 priv, priv, true); 4450 case TC_SETUP_QDISC_CBS: 4451 return stmmac_tc_setup_cbs(priv, priv, type_data); 4452 case TC_SETUP_QDISC_TAPRIO: 4453 return stmmac_tc_setup_taprio(priv, priv, type_data); 4454 case TC_SETUP_QDISC_ETF: 4455 return stmmac_tc_setup_etf(priv, priv, type_data); 4456 default: 4457 return -EOPNOTSUPP; 4458 } 4459 } 4460 4461 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb, 4462 struct net_device *sb_dev) 4463 { 4464 int gso = skb_shinfo(skb)->gso_type; 4465 4466 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) { 4467 /* 4468 * There is no way to determine the number of TSO/USO 4469 * capable Queues. Let's use always the Queue 0 4470 * because if TSO/USO is supported then at least this 4471 * one will be capable. 4472 */ 4473 return 0; 4474 } 4475 4476 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; 4477 } 4478 4479 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 4480 { 4481 struct stmmac_priv *priv = netdev_priv(ndev); 4482 int ret = 0; 4483 4484 ret = eth_mac_addr(ndev, addr); 4485 if (ret) 4486 return ret; 4487 4488 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 4489 4490 return ret; 4491 } 4492 4493 #ifdef CONFIG_DEBUG_FS 4494 static struct dentry *stmmac_fs_dir; 4495 4496 static void sysfs_display_ring(void *head, int size, int extend_desc, 4497 struct seq_file *seq, dma_addr_t dma_phy_addr) 4498 { 4499 int i; 4500 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 4501 struct dma_desc *p = (struct dma_desc *)head; 4502 dma_addr_t dma_addr; 4503 4504 for (i = 0; i < size; i++) { 4505 if (extend_desc) { 4506 dma_addr = dma_phy_addr + i * sizeof(*ep); 4507 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 4508 i, &dma_addr, 4509 le32_to_cpu(ep->basic.des0), 4510 le32_to_cpu(ep->basic.des1), 4511 le32_to_cpu(ep->basic.des2), 4512 le32_to_cpu(ep->basic.des3)); 4513 ep++; 4514 } else { 4515 dma_addr = dma_phy_addr + i * sizeof(*p); 4516 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n", 4517 i, &dma_addr, 4518 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 4519 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 4520 p++; 4521 } 4522 seq_printf(seq, "\n"); 4523 } 4524 } 4525 4526 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 4527 { 4528 struct net_device *dev = seq->private; 4529 struct stmmac_priv *priv = netdev_priv(dev); 4530 u32 rx_count = priv->plat->rx_queues_to_use; 4531 u32 tx_count = priv->plat->tx_queues_to_use; 4532 u32 queue; 4533 4534 if ((dev->flags & IFF_UP) == 0) 4535 return 0; 4536 4537 for (queue = 0; queue < rx_count; queue++) { 4538 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4539 4540 seq_printf(seq, "RX Queue %d:\n", queue); 4541 4542 if (priv->extend_desc) { 4543 seq_printf(seq, "Extended descriptor ring:\n"); 4544 sysfs_display_ring((void *)rx_q->dma_erx, 4545 priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy); 4546 } else { 4547 seq_printf(seq, "Descriptor ring:\n"); 4548 sysfs_display_ring((void *)rx_q->dma_rx, 4549 priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy); 4550 } 4551 } 4552 4553 for (queue = 0; queue < tx_count; queue++) { 4554 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 4555 4556 seq_printf(seq, "TX Queue %d:\n", queue); 4557 4558 if (priv->extend_desc) { 4559 seq_printf(seq, "Extended descriptor ring:\n"); 4560 sysfs_display_ring((void *)tx_q->dma_etx, 4561 priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy); 4562 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { 4563 seq_printf(seq, "Descriptor ring:\n"); 4564 sysfs_display_ring((void *)tx_q->dma_tx, 4565 priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy); 4566 } 4567 } 4568 4569 return 0; 4570 } 4571 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 4572 4573 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 4574 { 4575 struct net_device *dev = seq->private; 4576 struct stmmac_priv *priv = netdev_priv(dev); 4577 4578 if (!priv->hw_cap_support) { 4579 seq_printf(seq, "DMA HW features not supported\n"); 4580 return 0; 4581 } 4582 4583 seq_printf(seq, "==============================\n"); 4584 seq_printf(seq, "\tDMA HW features\n"); 4585 seq_printf(seq, "==============================\n"); 4586 4587 seq_printf(seq, "\t10/100 Mbps: %s\n", 4588 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 4589 seq_printf(seq, "\t1000 Mbps: %s\n", 4590 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 4591 seq_printf(seq, "\tHalf duplex: %s\n", 4592 (priv->dma_cap.half_duplex) ? "Y" : "N"); 4593 seq_printf(seq, "\tHash Filter: %s\n", 4594 (priv->dma_cap.hash_filter) ? "Y" : "N"); 4595 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 4596 (priv->dma_cap.multi_addr) ? "Y" : "N"); 4597 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 4598 (priv->dma_cap.pcs) ? "Y" : "N"); 4599 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 4600 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 4601 seq_printf(seq, "\tPMT Remote wake up: %s\n", 4602 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 4603 seq_printf(seq, "\tPMT Magic Frame: %s\n", 4604 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 4605 seq_printf(seq, "\tRMON module: %s\n", 4606 (priv->dma_cap.rmon) ? "Y" : "N"); 4607 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 4608 (priv->dma_cap.time_stamp) ? "Y" : "N"); 4609 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 4610 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 4611 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 4612 (priv->dma_cap.eee) ? "Y" : "N"); 4613 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 4614 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 4615 (priv->dma_cap.tx_coe) ? "Y" : "N"); 4616 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 4617 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 4618 (priv->dma_cap.rx_coe) ? "Y" : "N"); 4619 } else { 4620 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 4621 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 4622 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 4623 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 4624 } 4625 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 4626 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 4627 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 4628 priv->dma_cap.number_rx_channel); 4629 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 4630 priv->dma_cap.number_tx_channel); 4631 seq_printf(seq, "\tNumber of Additional RX queues: %d\n", 4632 priv->dma_cap.number_rx_queues); 4633 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", 4634 priv->dma_cap.number_tx_queues); 4635 seq_printf(seq, "\tEnhanced descriptors: %s\n", 4636 (priv->dma_cap.enh_desc) ? "Y" : "N"); 4637 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); 4638 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); 4639 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); 4640 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); 4641 seq_printf(seq, "\tNumber of PPS Outputs: %d\n", 4642 priv->dma_cap.pps_out_num); 4643 seq_printf(seq, "\tSafety Features: %s\n", 4644 priv->dma_cap.asp ? "Y" : "N"); 4645 seq_printf(seq, "\tFlexible RX Parser: %s\n", 4646 priv->dma_cap.frpsel ? "Y" : "N"); 4647 seq_printf(seq, "\tEnhanced Addressing: %d\n", 4648 priv->dma_cap.addr64); 4649 seq_printf(seq, "\tReceive Side Scaling: %s\n", 4650 priv->dma_cap.rssen ? "Y" : "N"); 4651 seq_printf(seq, "\tVLAN Hash Filtering: %s\n", 4652 priv->dma_cap.vlhash ? "Y" : "N"); 4653 seq_printf(seq, "\tSplit Header: %s\n", 4654 priv->dma_cap.sphen ? "Y" : "N"); 4655 seq_printf(seq, "\tVLAN TX Insertion: %s\n", 4656 priv->dma_cap.vlins ? "Y" : "N"); 4657 seq_printf(seq, "\tDouble VLAN: %s\n", 4658 priv->dma_cap.dvlan ? "Y" : "N"); 4659 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n", 4660 priv->dma_cap.l3l4fnum); 4661 seq_printf(seq, "\tARP Offloading: %s\n", 4662 priv->dma_cap.arpoffsel ? "Y" : "N"); 4663 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n", 4664 priv->dma_cap.estsel ? "Y" : "N"); 4665 seq_printf(seq, "\tFrame Preemption (FPE): %s\n", 4666 priv->dma_cap.fpesel ? "Y" : "N"); 4667 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", 4668 priv->dma_cap.tbssel ? "Y" : "N"); 4669 return 0; 4670 } 4671 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 4672 4673 /* Use network device events to rename debugfs file entries. 4674 */ 4675 static int stmmac_device_event(struct notifier_block *unused, 4676 unsigned long event, void *ptr) 4677 { 4678 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4679 struct stmmac_priv *priv = netdev_priv(dev); 4680 4681 if (dev->netdev_ops != &stmmac_netdev_ops) 4682 goto done; 4683 4684 switch (event) { 4685 case NETDEV_CHANGENAME: 4686 if (priv->dbgfs_dir) 4687 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, 4688 priv->dbgfs_dir, 4689 stmmac_fs_dir, 4690 dev->name); 4691 break; 4692 } 4693 done: 4694 return NOTIFY_DONE; 4695 } 4696 4697 static struct notifier_block stmmac_notifier = { 4698 .notifier_call = stmmac_device_event, 4699 }; 4700 4701 static void stmmac_init_fs(struct net_device *dev) 4702 { 4703 struct stmmac_priv *priv = netdev_priv(dev); 4704 4705 rtnl_lock(); 4706 4707 /* Create per netdev entries */ 4708 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 4709 4710 /* Entry to report DMA RX/TX rings */ 4711 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, 4712 &stmmac_rings_status_fops); 4713 4714 /* Entry to report the DMA HW features */ 4715 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, 4716 &stmmac_dma_cap_fops); 4717 4718 rtnl_unlock(); 4719 } 4720 4721 static void stmmac_exit_fs(struct net_device *dev) 4722 { 4723 struct stmmac_priv *priv = netdev_priv(dev); 4724 4725 debugfs_remove_recursive(priv->dbgfs_dir); 4726 } 4727 #endif /* CONFIG_DEBUG_FS */ 4728 4729 static u32 stmmac_vid_crc32_le(__le16 vid_le) 4730 { 4731 unsigned char *data = (unsigned char *)&vid_le; 4732 unsigned char data_byte = 0; 4733 u32 crc = ~0x0; 4734 u32 temp = 0; 4735 int i, bits; 4736 4737 bits = get_bitmask_order(VLAN_VID_MASK); 4738 for (i = 0; i < bits; i++) { 4739 if ((i % 8) == 0) 4740 data_byte = data[i / 8]; 4741 4742 temp = ((crc & 1) ^ data_byte) & 1; 4743 crc >>= 1; 4744 data_byte >>= 1; 4745 4746 if (temp) 4747 crc ^= 0xedb88320; 4748 } 4749 4750 return crc; 4751 } 4752 4753 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) 4754 { 4755 u32 crc, hash = 0; 4756 __le16 pmatch = 0; 4757 int count = 0; 4758 u16 vid = 0; 4759 4760 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { 4761 __le16 vid_le = cpu_to_le16(vid); 4762 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28; 4763 hash |= (1 << crc); 4764 count++; 4765 } 4766 4767 if (!priv->dma_cap.vlhash) { 4768 if (count > 2) /* VID = 0 always passes filter */ 4769 return -EOPNOTSUPP; 4770 4771 pmatch = cpu_to_le16(vid); 4772 hash = 0; 4773 } 4774 4775 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); 4776 } 4777 4778 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 4779 { 4780 struct stmmac_priv *priv = netdev_priv(ndev); 4781 bool is_double = false; 4782 int ret; 4783 4784 ret = pm_runtime_get_sync(priv->device); 4785 if (ret < 0) { 4786 pm_runtime_put_noidle(priv->device); 4787 return ret; 4788 } 4789 4790 if (be16_to_cpu(proto) == ETH_P_8021AD) 4791 is_double = true; 4792 4793 set_bit(vid, priv->active_vlans); 4794 ret = stmmac_vlan_update(priv, is_double); 4795 if (ret) { 4796 clear_bit(vid, priv->active_vlans); 4797 return ret; 4798 } 4799 4800 if (priv->hw->num_vlan) { 4801 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 4802 if (ret) 4803 return ret; 4804 } 4805 4806 return 0; 4807 } 4808 4809 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 4810 { 4811 struct stmmac_priv *priv = netdev_priv(ndev); 4812 bool is_double = false; 4813 int ret; 4814 4815 if (be16_to_cpu(proto) == ETH_P_8021AD) 4816 is_double = true; 4817 4818 clear_bit(vid, priv->active_vlans); 4819 4820 if (priv->hw->num_vlan) { 4821 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); 4822 if (ret) 4823 goto del_vlan_error; 4824 } 4825 4826 ret = stmmac_vlan_update(priv, is_double); 4827 4828 del_vlan_error: 4829 pm_runtime_put(priv->device); 4830 4831 return ret; 4832 } 4833 4834 static const struct net_device_ops stmmac_netdev_ops = { 4835 .ndo_open = stmmac_open, 4836 .ndo_start_xmit = stmmac_xmit, 4837 .ndo_stop = stmmac_release, 4838 .ndo_change_mtu = stmmac_change_mtu, 4839 .ndo_fix_features = stmmac_fix_features, 4840 .ndo_set_features = stmmac_set_features, 4841 .ndo_set_rx_mode = stmmac_set_rx_mode, 4842 .ndo_tx_timeout = stmmac_tx_timeout, 4843 .ndo_do_ioctl = stmmac_ioctl, 4844 .ndo_setup_tc = stmmac_setup_tc, 4845 .ndo_select_queue = stmmac_select_queue, 4846 #ifdef CONFIG_NET_POLL_CONTROLLER 4847 .ndo_poll_controller = stmmac_poll_controller, 4848 #endif 4849 .ndo_set_mac_address = stmmac_set_mac_address, 4850 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, 4851 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, 4852 }; 4853 4854 static void stmmac_reset_subtask(struct stmmac_priv *priv) 4855 { 4856 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 4857 return; 4858 if (test_bit(STMMAC_DOWN, &priv->state)) 4859 return; 4860 4861 netdev_err(priv->dev, "Reset adapter.\n"); 4862 4863 rtnl_lock(); 4864 netif_trans_update(priv->dev); 4865 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 4866 usleep_range(1000, 2000); 4867 4868 set_bit(STMMAC_DOWN, &priv->state); 4869 dev_close(priv->dev); 4870 dev_open(priv->dev, NULL); 4871 clear_bit(STMMAC_DOWN, &priv->state); 4872 clear_bit(STMMAC_RESETING, &priv->state); 4873 rtnl_unlock(); 4874 } 4875 4876 static void stmmac_service_task(struct work_struct *work) 4877 { 4878 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 4879 service_task); 4880 4881 stmmac_reset_subtask(priv); 4882 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 4883 } 4884 4885 /** 4886 * stmmac_hw_init - Init the MAC device 4887 * @priv: driver private structure 4888 * Description: this function is to configure the MAC device according to 4889 * some platform parameters or the HW capability register. It prepares the 4890 * driver to use either ring or chain modes and to setup either enhanced or 4891 * normal descriptors. 4892 */ 4893 static int stmmac_hw_init(struct stmmac_priv *priv) 4894 { 4895 int ret; 4896 4897 /* dwmac-sun8i only work in chain mode */ 4898 if (priv->plat->has_sun8i) 4899 chain_mode = 1; 4900 priv->chain_mode = chain_mode; 4901 4902 /* Initialize HW Interface */ 4903 ret = stmmac_hwif_init(priv); 4904 if (ret) 4905 return ret; 4906 4907 /* Get the HW capability (new GMAC newer than 3.50a) */ 4908 priv->hw_cap_support = stmmac_get_hw_features(priv); 4909 if (priv->hw_cap_support) { 4910 dev_info(priv->device, "DMA HW capability register supported\n"); 4911 4912 /* We can override some gmac/dma configuration fields: e.g. 4913 * enh_desc, tx_coe (e.g. that are passed through the 4914 * platform) with the values from the HW capability 4915 * register (if supported). 4916 */ 4917 priv->plat->enh_desc = priv->dma_cap.enh_desc; 4918 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 4919 priv->hw->pmt = priv->plat->pmt; 4920 if (priv->dma_cap.hash_tb_sz) { 4921 priv->hw->multicast_filter_bins = 4922 (BIT(priv->dma_cap.hash_tb_sz) << 5); 4923 priv->hw->mcast_bits_log2 = 4924 ilog2(priv->hw->multicast_filter_bins); 4925 } 4926 4927 /* TXCOE doesn't work in thresh DMA mode */ 4928 if (priv->plat->force_thresh_dma_mode) 4929 priv->plat->tx_coe = 0; 4930 else 4931 priv->plat->tx_coe = priv->dma_cap.tx_coe; 4932 4933 /* In case of GMAC4 rx_coe is from HW cap register. */ 4934 priv->plat->rx_coe = priv->dma_cap.rx_coe; 4935 4936 if (priv->dma_cap.rx_coe_type2) 4937 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 4938 else if (priv->dma_cap.rx_coe_type1) 4939 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 4940 4941 } else { 4942 dev_info(priv->device, "No HW DMA feature register supported\n"); 4943 } 4944 4945 if (priv->plat->rx_coe) { 4946 priv->hw->rx_csum = priv->plat->rx_coe; 4947 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 4948 if (priv->synopsys_id < DWMAC_CORE_4_00) 4949 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 4950 } 4951 if (priv->plat->tx_coe) 4952 dev_info(priv->device, "TX Checksum insertion supported\n"); 4953 4954 if (priv->plat->pmt) { 4955 dev_info(priv->device, "Wake-Up On Lan supported\n"); 4956 device_set_wakeup_capable(priv->device, 1); 4957 } 4958 4959 if (priv->dma_cap.tsoen) 4960 dev_info(priv->device, "TSO supported\n"); 4961 4962 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; 4963 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; 4964 4965 /* Run HW quirks, if any */ 4966 if (priv->hwif_quirks) { 4967 ret = priv->hwif_quirks(priv); 4968 if (ret) 4969 return ret; 4970 } 4971 4972 /* Rx Watchdog is available in the COREs newer than the 3.40. 4973 * In some case, for example on bugged HW this feature 4974 * has to be disable and this can be done by passing the 4975 * riwt_off field from the platform. 4976 */ 4977 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 4978 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 4979 priv->use_riwt = 1; 4980 dev_info(priv->device, 4981 "Enable RX Mitigation via HW Watchdog Timer\n"); 4982 } 4983 4984 return 0; 4985 } 4986 4987 static void stmmac_napi_add(struct net_device *dev) 4988 { 4989 struct stmmac_priv *priv = netdev_priv(dev); 4990 u32 queue, maxq; 4991 4992 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 4993 4994 for (queue = 0; queue < maxq; queue++) { 4995 struct stmmac_channel *ch = &priv->channel[queue]; 4996 4997 ch->priv_data = priv; 4998 ch->index = queue; 4999 spin_lock_init(&ch->lock); 5000 5001 if (queue < priv->plat->rx_queues_to_use) { 5002 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, 5003 NAPI_POLL_WEIGHT); 5004 } 5005 if (queue < priv->plat->tx_queues_to_use) { 5006 netif_tx_napi_add(dev, &ch->tx_napi, 5007 stmmac_napi_poll_tx, 5008 NAPI_POLL_WEIGHT); 5009 } 5010 } 5011 } 5012 5013 static void stmmac_napi_del(struct net_device *dev) 5014 { 5015 struct stmmac_priv *priv = netdev_priv(dev); 5016 u32 queue, maxq; 5017 5018 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 5019 5020 for (queue = 0; queue < maxq; queue++) { 5021 struct stmmac_channel *ch = &priv->channel[queue]; 5022 5023 if (queue < priv->plat->rx_queues_to_use) 5024 netif_napi_del(&ch->rx_napi); 5025 if (queue < priv->plat->tx_queues_to_use) 5026 netif_napi_del(&ch->tx_napi); 5027 } 5028 } 5029 5030 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt) 5031 { 5032 struct stmmac_priv *priv = netdev_priv(dev); 5033 int ret = 0; 5034 5035 if (netif_running(dev)) 5036 stmmac_release(dev); 5037 5038 stmmac_napi_del(dev); 5039 5040 priv->plat->rx_queues_to_use = rx_cnt; 5041 priv->plat->tx_queues_to_use = tx_cnt; 5042 5043 stmmac_napi_add(dev); 5044 5045 if (netif_running(dev)) 5046 ret = stmmac_open(dev); 5047 5048 return ret; 5049 } 5050 5051 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size) 5052 { 5053 struct stmmac_priv *priv = netdev_priv(dev); 5054 int ret = 0; 5055 5056 if (netif_running(dev)) 5057 stmmac_release(dev); 5058 5059 priv->dma_rx_size = rx_size; 5060 priv->dma_tx_size = tx_size; 5061 5062 if (netif_running(dev)) 5063 ret = stmmac_open(dev); 5064 5065 return ret; 5066 } 5067 5068 /** 5069 * stmmac_dvr_probe 5070 * @device: device pointer 5071 * @plat_dat: platform data pointer 5072 * @res: stmmac resource pointer 5073 * Description: this is the main probe function used to 5074 * call the alloc_etherdev, allocate the priv structure. 5075 * Return: 5076 * returns 0 on success, otherwise errno. 5077 */ 5078 int stmmac_dvr_probe(struct device *device, 5079 struct plat_stmmacenet_data *plat_dat, 5080 struct stmmac_resources *res) 5081 { 5082 struct net_device *ndev = NULL; 5083 struct stmmac_priv *priv; 5084 u32 rxq; 5085 int i, ret = 0; 5086 5087 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 5088 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 5089 if (!ndev) 5090 return -ENOMEM; 5091 5092 SET_NETDEV_DEV(ndev, device); 5093 5094 priv = netdev_priv(ndev); 5095 priv->device = device; 5096 priv->dev = ndev; 5097 5098 stmmac_set_ethtool_ops(ndev); 5099 priv->pause = pause; 5100 priv->plat = plat_dat; 5101 priv->ioaddr = res->addr; 5102 priv->dev->base_addr = (unsigned long)res->addr; 5103 5104 priv->dev->irq = res->irq; 5105 priv->wol_irq = res->wol_irq; 5106 priv->lpi_irq = res->lpi_irq; 5107 5108 if (!IS_ERR_OR_NULL(res->mac)) 5109 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 5110 5111 dev_set_drvdata(device, priv->dev); 5112 5113 /* Verify driver arguments */ 5114 stmmac_verify_args(); 5115 5116 /* Allocate workqueue */ 5117 priv->wq = create_singlethread_workqueue("stmmac_wq"); 5118 if (!priv->wq) { 5119 dev_err(priv->device, "failed to create workqueue\n"); 5120 return -ENOMEM; 5121 } 5122 5123 INIT_WORK(&priv->service_task, stmmac_service_task); 5124 5125 /* Override with kernel parameters if supplied XXX CRS XXX 5126 * this needs to have multiple instances 5127 */ 5128 if ((phyaddr >= 0) && (phyaddr <= 31)) 5129 priv->plat->phy_addr = phyaddr; 5130 5131 if (priv->plat->stmmac_rst) { 5132 ret = reset_control_assert(priv->plat->stmmac_rst); 5133 reset_control_deassert(priv->plat->stmmac_rst); 5134 /* Some reset controllers have only reset callback instead of 5135 * assert + deassert callbacks pair. 5136 */ 5137 if (ret == -ENOTSUPP) 5138 reset_control_reset(priv->plat->stmmac_rst); 5139 } 5140 5141 /* Init MAC and get the capabilities */ 5142 ret = stmmac_hw_init(priv); 5143 if (ret) 5144 goto error_hw_init; 5145 5146 stmmac_check_ether_addr(priv); 5147 5148 ndev->netdev_ops = &stmmac_netdev_ops; 5149 5150 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 5151 NETIF_F_RXCSUM; 5152 5153 ret = stmmac_tc_init(priv, priv); 5154 if (!ret) { 5155 ndev->hw_features |= NETIF_F_HW_TC; 5156 } 5157 5158 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 5159 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 5160 if (priv->plat->has_gmac4) 5161 ndev->hw_features |= NETIF_F_GSO_UDP_L4; 5162 priv->tso = true; 5163 dev_info(priv->device, "TSO feature enabled\n"); 5164 } 5165 5166 if (priv->dma_cap.sphen) { 5167 ndev->hw_features |= NETIF_F_GRO; 5168 priv->sph = true; 5169 dev_info(priv->device, "SPH feature enabled\n"); 5170 } 5171 5172 /* The current IP register MAC_HW_Feature1[ADDR64] only define 5173 * 32/40/64 bit width, but some SOC support others like i.MX8MP 5174 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64]. 5175 * So overwrite dma_cap.addr64 according to HW real design. 5176 */ 5177 if (priv->plat->addr64) 5178 priv->dma_cap.addr64 = priv->plat->addr64; 5179 5180 if (priv->dma_cap.addr64) { 5181 ret = dma_set_mask_and_coherent(device, 5182 DMA_BIT_MASK(priv->dma_cap.addr64)); 5183 if (!ret) { 5184 dev_info(priv->device, "Using %d bits DMA width\n", 5185 priv->dma_cap.addr64); 5186 5187 /* 5188 * If more than 32 bits can be addressed, make sure to 5189 * enable enhanced addressing mode. 5190 */ 5191 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)) 5192 priv->plat->dma_cfg->eame = true; 5193 } else { 5194 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32)); 5195 if (ret) { 5196 dev_err(priv->device, "Failed to set DMA Mask\n"); 5197 goto error_hw_init; 5198 } 5199 5200 priv->dma_cap.addr64 = 32; 5201 } 5202 } 5203 5204 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 5205 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 5206 #ifdef STMMAC_VLAN_TAG_USED 5207 /* Both mac100 and gmac support receive VLAN tag detection */ 5208 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 5209 if (priv->dma_cap.vlhash) { 5210 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 5211 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 5212 } 5213 if (priv->dma_cap.vlins) { 5214 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; 5215 if (priv->dma_cap.dvlan) 5216 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; 5217 } 5218 #endif 5219 priv->msg_enable = netif_msg_init(debug, default_msg_level); 5220 5221 /* Initialize RSS */ 5222 rxq = priv->plat->rx_queues_to_use; 5223 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); 5224 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) 5225 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); 5226 5227 if (priv->dma_cap.rssen && priv->plat->rss_en) 5228 ndev->features |= NETIF_F_RXHASH; 5229 5230 /* MTU range: 46 - hw-specific max */ 5231 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 5232 if (priv->plat->has_xgmac) 5233 ndev->max_mtu = XGMAC_JUMBO_LEN; 5234 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 5235 ndev->max_mtu = JUMBO_LEN; 5236 else 5237 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 5238 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 5239 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 5240 */ 5241 if ((priv->plat->maxmtu < ndev->max_mtu) && 5242 (priv->plat->maxmtu >= ndev->min_mtu)) 5243 ndev->max_mtu = priv->plat->maxmtu; 5244 else if (priv->plat->maxmtu < ndev->min_mtu) 5245 dev_warn(priv->device, 5246 "%s: warning: maxmtu having invalid value (%d)\n", 5247 __func__, priv->plat->maxmtu); 5248 5249 if (flow_ctrl) 5250 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 5251 5252 /* Setup channels NAPI */ 5253 stmmac_napi_add(ndev); 5254 5255 mutex_init(&priv->lock); 5256 5257 /* If a specific clk_csr value is passed from the platform 5258 * this means that the CSR Clock Range selection cannot be 5259 * changed at run-time and it is fixed. Viceversa the driver'll try to 5260 * set the MDC clock dynamically according to the csr actual 5261 * clock input. 5262 */ 5263 if (priv->plat->clk_csr >= 0) 5264 priv->clk_csr = priv->plat->clk_csr; 5265 else 5266 stmmac_clk_csr_set(priv); 5267 5268 stmmac_check_pcs_mode(priv); 5269 5270 pm_runtime_get_noresume(device); 5271 pm_runtime_set_active(device); 5272 pm_runtime_enable(device); 5273 5274 if (priv->hw->pcs != STMMAC_PCS_TBI && 5275 priv->hw->pcs != STMMAC_PCS_RTBI) { 5276 /* MDIO bus Registration */ 5277 ret = stmmac_mdio_register(ndev); 5278 if (ret < 0) { 5279 dev_err(priv->device, 5280 "%s: MDIO bus (id: %d) registration failed", 5281 __func__, priv->plat->bus_id); 5282 goto error_mdio_register; 5283 } 5284 } 5285 5286 ret = stmmac_phy_setup(priv); 5287 if (ret) { 5288 netdev_err(ndev, "failed to setup phy (%d)\n", ret); 5289 goto error_phy_setup; 5290 } 5291 5292 ret = register_netdev(ndev); 5293 if (ret) { 5294 dev_err(priv->device, "%s: ERROR %i registering the device\n", 5295 __func__, ret); 5296 goto error_netdev_register; 5297 } 5298 5299 if (priv->plat->serdes_powerup) { 5300 ret = priv->plat->serdes_powerup(ndev, 5301 priv->plat->bsp_priv); 5302 5303 if (ret < 0) 5304 goto error_serdes_powerup; 5305 } 5306 5307 #ifdef CONFIG_DEBUG_FS 5308 stmmac_init_fs(ndev); 5309 #endif 5310 5311 /* Let pm_runtime_put() disable the clocks. 5312 * If CONFIG_PM is not enabled, the clocks will stay powered. 5313 */ 5314 pm_runtime_put(device); 5315 5316 return ret; 5317 5318 error_serdes_powerup: 5319 unregister_netdev(ndev); 5320 error_netdev_register: 5321 phylink_destroy(priv->phylink); 5322 error_phy_setup: 5323 if (priv->hw->pcs != STMMAC_PCS_TBI && 5324 priv->hw->pcs != STMMAC_PCS_RTBI) 5325 stmmac_mdio_unregister(ndev); 5326 error_mdio_register: 5327 stmmac_napi_del(ndev); 5328 error_hw_init: 5329 destroy_workqueue(priv->wq); 5330 stmmac_bus_clks_config(priv, false); 5331 5332 return ret; 5333 } 5334 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 5335 5336 /** 5337 * stmmac_dvr_remove 5338 * @dev: device pointer 5339 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 5340 * changes the link status, releases the DMA descriptor rings. 5341 */ 5342 int stmmac_dvr_remove(struct device *dev) 5343 { 5344 struct net_device *ndev = dev_get_drvdata(dev); 5345 struct stmmac_priv *priv = netdev_priv(ndev); 5346 5347 netdev_info(priv->dev, "%s: removing driver", __func__); 5348 5349 stmmac_stop_all_dma(priv); 5350 stmmac_mac_set(priv, priv->ioaddr, false); 5351 netif_carrier_off(ndev); 5352 unregister_netdev(ndev); 5353 5354 /* Serdes power down needs to happen after VLAN filter 5355 * is deleted that is triggered by unregister_netdev(). 5356 */ 5357 if (priv->plat->serdes_powerdown) 5358 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 5359 5360 #ifdef CONFIG_DEBUG_FS 5361 stmmac_exit_fs(ndev); 5362 #endif 5363 phylink_destroy(priv->phylink); 5364 if (priv->plat->stmmac_rst) 5365 reset_control_assert(priv->plat->stmmac_rst); 5366 pm_runtime_put(dev); 5367 pm_runtime_disable(dev); 5368 if (priv->hw->pcs != STMMAC_PCS_TBI && 5369 priv->hw->pcs != STMMAC_PCS_RTBI) 5370 stmmac_mdio_unregister(ndev); 5371 destroy_workqueue(priv->wq); 5372 mutex_destroy(&priv->lock); 5373 5374 return 0; 5375 } 5376 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 5377 5378 /** 5379 * stmmac_suspend - suspend callback 5380 * @dev: device pointer 5381 * Description: this is the function to suspend the device and it is called 5382 * by the platform driver to stop the network queue, release the resources, 5383 * program the PMT register (for WoL), clean and release driver resources. 5384 */ 5385 int stmmac_suspend(struct device *dev) 5386 { 5387 struct net_device *ndev = dev_get_drvdata(dev); 5388 struct stmmac_priv *priv = netdev_priv(ndev); 5389 u32 chan; 5390 int ret; 5391 5392 if (!ndev || !netif_running(ndev)) 5393 return 0; 5394 5395 phylink_mac_change(priv->phylink, false); 5396 5397 mutex_lock(&priv->lock); 5398 5399 netif_device_detach(ndev); 5400 5401 stmmac_disable_all_queues(priv); 5402 5403 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 5404 hrtimer_cancel(&priv->tx_queue[chan].txtimer); 5405 5406 if (priv->eee_enabled) { 5407 priv->tx_path_in_lpi_mode = false; 5408 del_timer_sync(&priv->eee_ctrl_timer); 5409 } 5410 5411 /* Stop TX/RX DMA */ 5412 stmmac_stop_all_dma(priv); 5413 5414 if (priv->plat->serdes_powerdown) 5415 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); 5416 5417 /* Enable Power down mode by programming the PMT regs */ 5418 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 5419 stmmac_pmt(priv, priv->hw, priv->wolopts); 5420 priv->irq_wake = 1; 5421 } else { 5422 mutex_unlock(&priv->lock); 5423 rtnl_lock(); 5424 if (device_may_wakeup(priv->device)) 5425 phylink_speed_down(priv->phylink, false); 5426 phylink_stop(priv->phylink); 5427 rtnl_unlock(); 5428 mutex_lock(&priv->lock); 5429 5430 stmmac_mac_set(priv, priv->ioaddr, false); 5431 pinctrl_pm_select_sleep_state(priv->device); 5432 /* Disable clock in case of PWM is off */ 5433 clk_disable_unprepare(priv->plat->clk_ptp_ref); 5434 ret = pm_runtime_force_suspend(dev); 5435 if (ret) 5436 return ret; 5437 } 5438 mutex_unlock(&priv->lock); 5439 5440 priv->speed = SPEED_UNKNOWN; 5441 return 0; 5442 } 5443 EXPORT_SYMBOL_GPL(stmmac_suspend); 5444 5445 /** 5446 * stmmac_reset_queues_param - reset queue parameters 5447 * @priv: device pointer 5448 */ 5449 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 5450 { 5451 u32 rx_cnt = priv->plat->rx_queues_to_use; 5452 u32 tx_cnt = priv->plat->tx_queues_to_use; 5453 u32 queue; 5454 5455 for (queue = 0; queue < rx_cnt; queue++) { 5456 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 5457 5458 rx_q->cur_rx = 0; 5459 rx_q->dirty_rx = 0; 5460 } 5461 5462 for (queue = 0; queue < tx_cnt; queue++) { 5463 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 5464 5465 tx_q->cur_tx = 0; 5466 tx_q->dirty_tx = 0; 5467 tx_q->mss = 0; 5468 5469 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 5470 } 5471 } 5472 5473 /** 5474 * stmmac_resume - resume callback 5475 * @dev: device pointer 5476 * Description: when resume this function is invoked to setup the DMA and CORE 5477 * in a usable state. 5478 */ 5479 int stmmac_resume(struct device *dev) 5480 { 5481 struct net_device *ndev = dev_get_drvdata(dev); 5482 struct stmmac_priv *priv = netdev_priv(ndev); 5483 int ret; 5484 5485 if (!netif_running(ndev)) 5486 return 0; 5487 5488 /* Power Down bit, into the PM register, is cleared 5489 * automatically as soon as a magic packet or a Wake-up frame 5490 * is received. Anyway, it's better to manually clear 5491 * this bit because it can generate problems while resuming 5492 * from another devices (e.g. serial console). 5493 */ 5494 if (device_may_wakeup(priv->device) && priv->plat->pmt) { 5495 mutex_lock(&priv->lock); 5496 stmmac_pmt(priv, priv->hw, 0); 5497 mutex_unlock(&priv->lock); 5498 priv->irq_wake = 0; 5499 } else { 5500 pinctrl_pm_select_default_state(priv->device); 5501 /* enable the clk previously disabled */ 5502 ret = pm_runtime_force_resume(dev); 5503 if (ret) 5504 return ret; 5505 if (priv->plat->clk_ptp_ref) 5506 clk_prepare_enable(priv->plat->clk_ptp_ref); 5507 /* reset the phy so that it's ready */ 5508 if (priv->mii) 5509 stmmac_mdio_reset(priv->mii); 5510 } 5511 5512 if (priv->plat->serdes_powerup) { 5513 ret = priv->plat->serdes_powerup(ndev, 5514 priv->plat->bsp_priv); 5515 5516 if (ret < 0) 5517 return ret; 5518 } 5519 5520 if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { 5521 rtnl_lock(); 5522 phylink_start(priv->phylink); 5523 /* We may have called phylink_speed_down before */ 5524 phylink_speed_up(priv->phylink); 5525 rtnl_unlock(); 5526 } 5527 5528 rtnl_lock(); 5529 mutex_lock(&priv->lock); 5530 5531 stmmac_reset_queues_param(priv); 5532 stmmac_reinit_rx_buffers(priv); 5533 stmmac_free_tx_skbufs(priv); 5534 stmmac_clear_descriptors(priv); 5535 5536 stmmac_hw_setup(ndev, false); 5537 stmmac_init_coalesce(priv); 5538 stmmac_set_rx_mode(ndev); 5539 5540 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); 5541 5542 stmmac_enable_all_queues(priv); 5543 5544 mutex_unlock(&priv->lock); 5545 rtnl_unlock(); 5546 5547 phylink_mac_change(priv->phylink, true); 5548 5549 netif_device_attach(ndev); 5550 5551 return 0; 5552 } 5553 EXPORT_SYMBOL_GPL(stmmac_resume); 5554 5555 #ifndef MODULE 5556 static int __init stmmac_cmdline_opt(char *str) 5557 { 5558 char *opt; 5559 5560 if (!str || !*str) 5561 return -EINVAL; 5562 while ((opt = strsep(&str, ",")) != NULL) { 5563 if (!strncmp(opt, "debug:", 6)) { 5564 if (kstrtoint(opt + 6, 0, &debug)) 5565 goto err; 5566 } else if (!strncmp(opt, "phyaddr:", 8)) { 5567 if (kstrtoint(opt + 8, 0, &phyaddr)) 5568 goto err; 5569 } else if (!strncmp(opt, "buf_sz:", 7)) { 5570 if (kstrtoint(opt + 7, 0, &buf_sz)) 5571 goto err; 5572 } else if (!strncmp(opt, "tc:", 3)) { 5573 if (kstrtoint(opt + 3, 0, &tc)) 5574 goto err; 5575 } else if (!strncmp(opt, "watchdog:", 9)) { 5576 if (kstrtoint(opt + 9, 0, &watchdog)) 5577 goto err; 5578 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 5579 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 5580 goto err; 5581 } else if (!strncmp(opt, "pause:", 6)) { 5582 if (kstrtoint(opt + 6, 0, &pause)) 5583 goto err; 5584 } else if (!strncmp(opt, "eee_timer:", 10)) { 5585 if (kstrtoint(opt + 10, 0, &eee_timer)) 5586 goto err; 5587 } else if (!strncmp(opt, "chain_mode:", 11)) { 5588 if (kstrtoint(opt + 11, 0, &chain_mode)) 5589 goto err; 5590 } 5591 } 5592 return 0; 5593 5594 err: 5595 pr_err("%s: ERROR broken module parameter conversion", __func__); 5596 return -EINVAL; 5597 } 5598 5599 __setup("stmmaceth=", stmmac_cmdline_opt); 5600 #endif /* MODULE */ 5601 5602 static int __init stmmac_init(void) 5603 { 5604 #ifdef CONFIG_DEBUG_FS 5605 /* Create debugfs main directory if it doesn't exist yet */ 5606 if (!stmmac_fs_dir) 5607 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 5608 register_netdevice_notifier(&stmmac_notifier); 5609 #endif 5610 5611 return 0; 5612 } 5613 5614 static void __exit stmmac_exit(void) 5615 { 5616 #ifdef CONFIG_DEBUG_FS 5617 unregister_netdevice_notifier(&stmmac_notifier); 5618 debugfs_remove_recursive(stmmac_fs_dir); 5619 #endif 5620 } 5621 5622 module_init(stmmac_init) 5623 module_exit(stmmac_exit) 5624 5625 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 5626 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 5627 MODULE_LICENSE("GPL"); 5628