1 /******************************************************************************* 2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. 3 ST Ethernet IPs are built around a Synopsys IP Core. 4 5 Copyright(C) 2007-2011 STMicroelectronics Ltd 6 7 This program is free software; you can redistribute it and/or modify it 8 under the terms and conditions of the GNU General Public License, 9 version 2, as published by the Free Software Foundation. 10 11 This program is distributed in the hope it will be useful, but WITHOUT 12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 more details. 15 16 The full GNU General Public License is included in this distribution in 17 the file called "COPYING". 18 19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 21 Documentation available at: 22 http://www.stlinux.com 23 Support available at: 24 https://bugzilla.stlinux.com/ 25 *******************************************************************************/ 26 27 #include <linux/clk.h> 28 #include <linux/kernel.h> 29 #include <linux/interrupt.h> 30 #include <linux/ip.h> 31 #include <linux/tcp.h> 32 #include <linux/skbuff.h> 33 #include <linux/ethtool.h> 34 #include <linux/if_ether.h> 35 #include <linux/crc32.h> 36 #include <linux/mii.h> 37 #include <linux/if.h> 38 #include <linux/if_vlan.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/slab.h> 41 #include <linux/prefetch.h> 42 #include <linux/pinctrl/consumer.h> 43 #ifdef CONFIG_DEBUG_FS 44 #include <linux/debugfs.h> 45 #include <linux/seq_file.h> 46 #endif /* CONFIG_DEBUG_FS */ 47 #include <linux/net_tstamp.h> 48 #include <linux/phylink.h> 49 #include <net/pkt_cls.h> 50 #include "stmmac_ptp.h" 51 #include "stmmac.h" 52 #include <linux/reset.h> 53 #include <linux/of_mdio.h> 54 #include "dwmac1000.h" 55 #include "dwxgmac2.h" 56 #include "hwif.h" 57 58 #define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) 59 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) 60 61 /* Module parameters */ 62 #define TX_TIMEO 5000 63 static int watchdog = TX_TIMEO; 64 module_param(watchdog, int, 0644); 65 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); 66 67 static int debug = -1; 68 module_param(debug, int, 0644); 69 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); 70 71 static int phyaddr = -1; 72 module_param(phyaddr, int, 0444); 73 MODULE_PARM_DESC(phyaddr, "Physical device address"); 74 75 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) 76 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) 77 78 static int flow_ctrl = FLOW_AUTO; 79 module_param(flow_ctrl, int, 0644); 80 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); 81 82 static int pause = PAUSE_TIME; 83 module_param(pause, int, 0644); 84 MODULE_PARM_DESC(pause, "Flow Control Pause Time"); 85 86 #define TC_DEFAULT 64 87 static int tc = TC_DEFAULT; 88 module_param(tc, int, 0644); 89 MODULE_PARM_DESC(tc, "DMA threshold control value"); 90 91 #define DEFAULT_BUFSIZE 1536 92 static int buf_sz = DEFAULT_BUFSIZE; 93 module_param(buf_sz, int, 0644); 94 MODULE_PARM_DESC(buf_sz, "DMA buffer size"); 95 96 #define STMMAC_RX_COPYBREAK 256 97 98 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | 99 NETIF_MSG_LINK | NETIF_MSG_IFUP | 100 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); 101 102 #define STMMAC_DEFAULT_LPI_TIMER 1000 103 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; 104 module_param(eee_timer, int, 0644); 105 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 106 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) 107 108 /* By default the driver will use the ring mode to manage tx and rx descriptors, 109 * but allow user to force to use the chain instead of the ring 110 */ 111 static unsigned int chain_mode; 112 module_param(chain_mode, int, 0444); 113 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); 114 115 static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 116 117 #ifdef CONFIG_DEBUG_FS 118 static int stmmac_init_fs(struct net_device *dev); 119 static void stmmac_exit_fs(struct net_device *dev); 120 #endif 121 122 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) 123 124 /** 125 * stmmac_verify_args - verify the driver parameters. 126 * Description: it checks the driver parameters and set a default in case of 127 * errors. 128 */ 129 static void stmmac_verify_args(void) 130 { 131 if (unlikely(watchdog < 0)) 132 watchdog = TX_TIMEO; 133 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) 134 buf_sz = DEFAULT_BUFSIZE; 135 if (unlikely(flow_ctrl > 1)) 136 flow_ctrl = FLOW_AUTO; 137 else if (likely(flow_ctrl < 0)) 138 flow_ctrl = FLOW_OFF; 139 if (unlikely((pause < 0) || (pause > 0xffff))) 140 pause = PAUSE_TIME; 141 if (eee_timer < 0) 142 eee_timer = STMMAC_DEFAULT_LPI_TIMER; 143 } 144 145 /** 146 * stmmac_disable_all_queues - Disable all queues 147 * @priv: driver private structure 148 */ 149 static void stmmac_disable_all_queues(struct stmmac_priv *priv) 150 { 151 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 152 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 153 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 154 u32 queue; 155 156 for (queue = 0; queue < maxq; queue++) { 157 struct stmmac_channel *ch = &priv->channel[queue]; 158 159 if (queue < rx_queues_cnt) 160 napi_disable(&ch->rx_napi); 161 if (queue < tx_queues_cnt) 162 napi_disable(&ch->tx_napi); 163 } 164 } 165 166 /** 167 * stmmac_enable_all_queues - Enable all queues 168 * @priv: driver private structure 169 */ 170 static void stmmac_enable_all_queues(struct stmmac_priv *priv) 171 { 172 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 173 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 174 u32 maxq = max(rx_queues_cnt, tx_queues_cnt); 175 u32 queue; 176 177 for (queue = 0; queue < maxq; queue++) { 178 struct stmmac_channel *ch = &priv->channel[queue]; 179 180 if (queue < rx_queues_cnt) 181 napi_enable(&ch->rx_napi); 182 if (queue < tx_queues_cnt) 183 napi_enable(&ch->tx_napi); 184 } 185 } 186 187 /** 188 * stmmac_stop_all_queues - Stop all queues 189 * @priv: driver private structure 190 */ 191 static void stmmac_stop_all_queues(struct stmmac_priv *priv) 192 { 193 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 194 u32 queue; 195 196 for (queue = 0; queue < tx_queues_cnt; queue++) 197 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 198 } 199 200 /** 201 * stmmac_start_all_queues - Start all queues 202 * @priv: driver private structure 203 */ 204 static void stmmac_start_all_queues(struct stmmac_priv *priv) 205 { 206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; 207 u32 queue; 208 209 for (queue = 0; queue < tx_queues_cnt; queue++) 210 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); 211 } 212 213 static void stmmac_service_event_schedule(struct stmmac_priv *priv) 214 { 215 if (!test_bit(STMMAC_DOWN, &priv->state) && 216 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) 217 queue_work(priv->wq, &priv->service_task); 218 } 219 220 static void stmmac_global_err(struct stmmac_priv *priv) 221 { 222 netif_carrier_off(priv->dev); 223 set_bit(STMMAC_RESET_REQUESTED, &priv->state); 224 stmmac_service_event_schedule(priv); 225 } 226 227 /** 228 * stmmac_clk_csr_set - dynamically set the MDC clock 229 * @priv: driver private structure 230 * Description: this is to dynamically set the MDC clock according to the csr 231 * clock input. 232 * Note: 233 * If a specific clk_csr value is passed from the platform 234 * this means that the CSR Clock Range selection cannot be 235 * changed at run-time and it is fixed (as reported in the driver 236 * documentation). Viceversa the driver will try to set the MDC 237 * clock dynamically according to the actual clock input. 238 */ 239 static void stmmac_clk_csr_set(struct stmmac_priv *priv) 240 { 241 u32 clk_rate; 242 243 clk_rate = clk_get_rate(priv->plat->stmmac_clk); 244 245 /* Platform provided default clk_csr would be assumed valid 246 * for all other cases except for the below mentioned ones. 247 * For values higher than the IEEE 802.3 specified frequency 248 * we can not estimate the proper divider as it is not known 249 * the frequency of clk_csr_i. So we do not change the default 250 * divider. 251 */ 252 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { 253 if (clk_rate < CSR_F_35M) 254 priv->clk_csr = STMMAC_CSR_20_35M; 255 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) 256 priv->clk_csr = STMMAC_CSR_35_60M; 257 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) 258 priv->clk_csr = STMMAC_CSR_60_100M; 259 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) 260 priv->clk_csr = STMMAC_CSR_100_150M; 261 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) 262 priv->clk_csr = STMMAC_CSR_150_250M; 263 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) 264 priv->clk_csr = STMMAC_CSR_250_300M; 265 } 266 267 if (priv->plat->has_sun8i) { 268 if (clk_rate > 160000000) 269 priv->clk_csr = 0x03; 270 else if (clk_rate > 80000000) 271 priv->clk_csr = 0x02; 272 else if (clk_rate > 40000000) 273 priv->clk_csr = 0x01; 274 else 275 priv->clk_csr = 0; 276 } 277 278 if (priv->plat->has_xgmac) { 279 if (clk_rate > 400000000) 280 priv->clk_csr = 0x5; 281 else if (clk_rate > 350000000) 282 priv->clk_csr = 0x4; 283 else if (clk_rate > 300000000) 284 priv->clk_csr = 0x3; 285 else if (clk_rate > 250000000) 286 priv->clk_csr = 0x2; 287 else if (clk_rate > 150000000) 288 priv->clk_csr = 0x1; 289 else 290 priv->clk_csr = 0x0; 291 } 292 } 293 294 static void print_pkt(unsigned char *buf, int len) 295 { 296 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); 297 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); 298 } 299 300 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) 301 { 302 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 303 u32 avail; 304 305 if (tx_q->dirty_tx > tx_q->cur_tx) 306 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; 307 else 308 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; 309 310 return avail; 311 } 312 313 /** 314 * stmmac_rx_dirty - Get RX queue dirty 315 * @priv: driver private structure 316 * @queue: RX queue index 317 */ 318 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) 319 { 320 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 321 u32 dirty; 322 323 if (rx_q->dirty_rx <= rx_q->cur_rx) 324 dirty = rx_q->cur_rx - rx_q->dirty_rx; 325 else 326 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; 327 328 return dirty; 329 } 330 331 /** 332 * stmmac_enable_eee_mode - check and enter in LPI mode 333 * @priv: driver private structure 334 * Description: this function is to verify and enter in LPI mode in case of 335 * EEE. 336 */ 337 static void stmmac_enable_eee_mode(struct stmmac_priv *priv) 338 { 339 u32 tx_cnt = priv->plat->tx_queues_to_use; 340 u32 queue; 341 342 /* check if all TX queues have the work finished */ 343 for (queue = 0; queue < tx_cnt; queue++) { 344 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 345 346 if (tx_q->dirty_tx != tx_q->cur_tx) 347 return; /* still unfinished work */ 348 } 349 350 /* Check and enter in LPI mode */ 351 if (!priv->tx_path_in_lpi_mode) 352 stmmac_set_eee_mode(priv, priv->hw, 353 priv->plat->en_tx_lpi_clockgating); 354 } 355 356 /** 357 * stmmac_disable_eee_mode - disable and exit from LPI mode 358 * @priv: driver private structure 359 * Description: this function is to exit and disable EEE in case of 360 * LPI state is true. This is called by the xmit. 361 */ 362 void stmmac_disable_eee_mode(struct stmmac_priv *priv) 363 { 364 stmmac_reset_eee_mode(priv, priv->hw); 365 del_timer_sync(&priv->eee_ctrl_timer); 366 priv->tx_path_in_lpi_mode = false; 367 } 368 369 /** 370 * stmmac_eee_ctrl_timer - EEE TX SW timer. 371 * @arg : data hook 372 * Description: 373 * if there is no data transfer and if we are not in LPI state, 374 * then MAC Transmitter can be moved to LPI state. 375 */ 376 static void stmmac_eee_ctrl_timer(struct timer_list *t) 377 { 378 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); 379 380 stmmac_enable_eee_mode(priv); 381 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 382 } 383 384 /** 385 * stmmac_eee_init - init EEE 386 * @priv: driver private structure 387 * Description: 388 * if the GMAC supports the EEE (from the HW cap reg) and the phy device 389 * can also manage EEE, this function enable the LPI state and start related 390 * timer. 391 */ 392 bool stmmac_eee_init(struct stmmac_priv *priv) 393 { 394 int tx_lpi_timer = priv->tx_lpi_timer; 395 396 /* Using PCS we cannot dial with the phy registers at this stage 397 * so we do not support extra feature like EEE. 398 */ 399 if ((priv->hw->pcs == STMMAC_PCS_RGMII) || 400 (priv->hw->pcs == STMMAC_PCS_TBI) || 401 (priv->hw->pcs == STMMAC_PCS_RTBI)) 402 return false; 403 404 /* Check if MAC core supports the EEE feature. */ 405 if (!priv->dma_cap.eee) 406 return false; 407 408 mutex_lock(&priv->lock); 409 410 /* Check if it needs to be deactivated */ 411 if (!priv->eee_active && priv->eee_enabled) { 412 netdev_dbg(priv->dev, "disable EEE\n"); 413 del_timer_sync(&priv->eee_ctrl_timer); 414 stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer); 415 return false; 416 } 417 418 if (priv->eee_active && !priv->eee_enabled) { 419 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); 420 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 421 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, 422 tx_lpi_timer); 423 } 424 425 mutex_unlock(&priv->lock); 426 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); 427 return true; 428 } 429 430 /* stmmac_get_tx_hwtstamp - get HW TX timestamps 431 * @priv: driver private structure 432 * @p : descriptor pointer 433 * @skb : the socket buffer 434 * Description : 435 * This function will read timestamp from the descriptor & pass it to stack. 436 * and also perform some sanity checks. 437 */ 438 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, 439 struct dma_desc *p, struct sk_buff *skb) 440 { 441 struct skb_shared_hwtstamps shhwtstamp; 442 u64 ns = 0; 443 444 if (!priv->hwts_tx_en) 445 return; 446 447 /* exit if skb doesn't support hw tstamp */ 448 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) 449 return; 450 451 /* check tx tstamp status */ 452 if (stmmac_get_tx_timestamp_status(priv, p)) { 453 /* get the valid tstamp */ 454 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); 455 456 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 457 shhwtstamp.hwtstamp = ns_to_ktime(ns); 458 459 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); 460 /* pass tstamp to stack */ 461 skb_tstamp_tx(skb, &shhwtstamp); 462 } 463 464 return; 465 } 466 467 /* stmmac_get_rx_hwtstamp - get HW RX timestamps 468 * @priv: driver private structure 469 * @p : descriptor pointer 470 * @np : next descriptor pointer 471 * @skb : the socket buffer 472 * Description : 473 * This function will read received packet's timestamp from the descriptor 474 * and pass it to stack. It also perform some sanity checks. 475 */ 476 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, 477 struct dma_desc *np, struct sk_buff *skb) 478 { 479 struct skb_shared_hwtstamps *shhwtstamp = NULL; 480 struct dma_desc *desc = p; 481 u64 ns = 0; 482 483 if (!priv->hwts_rx_en) 484 return; 485 /* For GMAC4, the valid timestamp is from CTX next desc. */ 486 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) 487 desc = np; 488 489 /* Check if timestamp is available */ 490 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { 491 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); 492 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); 493 shhwtstamp = skb_hwtstamps(skb); 494 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); 495 shhwtstamp->hwtstamp = ns_to_ktime(ns); 496 } else { 497 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); 498 } 499 } 500 501 /** 502 * stmmac_hwtstamp_set - control hardware timestamping. 503 * @dev: device pointer. 504 * @ifr: An IOCTL specific structure, that can contain a pointer to 505 * a proprietary structure used to pass information to the driver. 506 * Description: 507 * This function configures the MAC to enable/disable both outgoing(TX) 508 * and incoming(RX) packets time stamping based on user input. 509 * Return Value: 510 * 0 on success and an appropriate -ve integer on failure. 511 */ 512 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) 513 { 514 struct stmmac_priv *priv = netdev_priv(dev); 515 struct hwtstamp_config config; 516 struct timespec64 now; 517 u64 temp = 0; 518 u32 ptp_v2 = 0; 519 u32 tstamp_all = 0; 520 u32 ptp_over_ipv4_udp = 0; 521 u32 ptp_over_ipv6_udp = 0; 522 u32 ptp_over_ethernet = 0; 523 u32 snap_type_sel = 0; 524 u32 ts_master_en = 0; 525 u32 ts_event_en = 0; 526 u32 sec_inc = 0; 527 u32 value = 0; 528 bool xmac; 529 530 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 531 532 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { 533 netdev_alert(priv->dev, "No support for HW time stamping\n"); 534 priv->hwts_tx_en = 0; 535 priv->hwts_rx_en = 0; 536 537 return -EOPNOTSUPP; 538 } 539 540 if (copy_from_user(&config, ifr->ifr_data, 541 sizeof(config))) 542 return -EFAULT; 543 544 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", 545 __func__, config.flags, config.tx_type, config.rx_filter); 546 547 /* reserved for future extensions */ 548 if (config.flags) 549 return -EINVAL; 550 551 if (config.tx_type != HWTSTAMP_TX_OFF && 552 config.tx_type != HWTSTAMP_TX_ON) 553 return -ERANGE; 554 555 if (priv->adv_ts) { 556 switch (config.rx_filter) { 557 case HWTSTAMP_FILTER_NONE: 558 /* time stamp no incoming packet at all */ 559 config.rx_filter = HWTSTAMP_FILTER_NONE; 560 break; 561 562 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 563 /* PTP v1, UDP, any kind of event packet */ 564 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 565 /* 'xmac' hardware can support Sync, Pdelay_Req and 566 * Pdelay_resp by setting bit14 and bits17/16 to 01 567 * This leaves Delay_Req timestamps out. 568 * Enable all events *and* general purpose message 569 * timestamping 570 */ 571 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 572 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 573 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 574 break; 575 576 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 577 /* PTP v1, UDP, Sync packet */ 578 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; 579 /* take time stamp for SYNC messages only */ 580 ts_event_en = PTP_TCR_TSEVNTENA; 581 582 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 583 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 584 break; 585 586 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 587 /* PTP v1, UDP, Delay_req packet */ 588 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; 589 /* take time stamp for Delay_Req messages only */ 590 ts_master_en = PTP_TCR_TSMSTRENA; 591 ts_event_en = PTP_TCR_TSEVNTENA; 592 593 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 594 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 595 break; 596 597 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 598 /* PTP v2, UDP, any kind of event packet */ 599 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 600 ptp_v2 = PTP_TCR_TSVER2ENA; 601 /* take time stamp for all event messages */ 602 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 603 604 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 605 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 606 break; 607 608 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 609 /* PTP v2, UDP, Sync packet */ 610 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; 611 ptp_v2 = PTP_TCR_TSVER2ENA; 612 /* take time stamp for SYNC messages only */ 613 ts_event_en = PTP_TCR_TSEVNTENA; 614 615 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 616 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 617 break; 618 619 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 620 /* PTP v2, UDP, Delay_req packet */ 621 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; 622 ptp_v2 = PTP_TCR_TSVER2ENA; 623 /* take time stamp for Delay_Req messages only */ 624 ts_master_en = PTP_TCR_TSMSTRENA; 625 ts_event_en = PTP_TCR_TSEVNTENA; 626 627 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 628 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 629 break; 630 631 case HWTSTAMP_FILTER_PTP_V2_EVENT: 632 /* PTP v2/802.AS1 any layer, any kind of event packet */ 633 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 634 ptp_v2 = PTP_TCR_TSVER2ENA; 635 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 636 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 637 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 638 ptp_over_ethernet = PTP_TCR_TSIPENA; 639 break; 640 641 case HWTSTAMP_FILTER_PTP_V2_SYNC: 642 /* PTP v2/802.AS1, any layer, Sync packet */ 643 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; 644 ptp_v2 = PTP_TCR_TSVER2ENA; 645 /* take time stamp for SYNC messages only */ 646 ts_event_en = PTP_TCR_TSEVNTENA; 647 648 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 649 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 650 ptp_over_ethernet = PTP_TCR_TSIPENA; 651 break; 652 653 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 654 /* PTP v2/802.AS1, any layer, Delay_req packet */ 655 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; 656 ptp_v2 = PTP_TCR_TSVER2ENA; 657 /* take time stamp for Delay_Req messages only */ 658 ts_master_en = PTP_TCR_TSMSTRENA; 659 ts_event_en = PTP_TCR_TSEVNTENA; 660 661 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 662 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 663 ptp_over_ethernet = PTP_TCR_TSIPENA; 664 break; 665 666 case HWTSTAMP_FILTER_NTP_ALL: 667 case HWTSTAMP_FILTER_ALL: 668 /* time stamp any incoming packet */ 669 config.rx_filter = HWTSTAMP_FILTER_ALL; 670 tstamp_all = PTP_TCR_TSENALL; 671 break; 672 673 default: 674 return -ERANGE; 675 } 676 } else { 677 switch (config.rx_filter) { 678 case HWTSTAMP_FILTER_NONE: 679 config.rx_filter = HWTSTAMP_FILTER_NONE; 680 break; 681 default: 682 /* PTP v1, UDP, any kind of event packet */ 683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; 684 break; 685 } 686 } 687 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); 688 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; 689 690 if (!priv->hwts_tx_en && !priv->hwts_rx_en) 691 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); 692 else { 693 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | 694 tstamp_all | ptp_v2 | ptp_over_ethernet | 695 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | 696 ts_master_en | snap_type_sel); 697 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); 698 699 /* program Sub Second Increment reg */ 700 stmmac_config_sub_second_increment(priv, 701 priv->ptpaddr, priv->plat->clk_ptp_rate, 702 xmac, &sec_inc); 703 temp = div_u64(1000000000ULL, sec_inc); 704 705 /* Store sub second increment and flags for later use */ 706 priv->sub_second_inc = sec_inc; 707 priv->systime_flags = value; 708 709 /* calculate default added value: 710 * formula is : 711 * addend = (2^32)/freq_div_ratio; 712 * where, freq_div_ratio = 1e9ns/sec_inc 713 */ 714 temp = (u64)(temp << 32); 715 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); 716 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); 717 718 /* initialize system time */ 719 ktime_get_real_ts64(&now); 720 721 /* lower 32 bits of tv_sec are safe until y2106 */ 722 stmmac_init_systime(priv, priv->ptpaddr, 723 (u32)now.tv_sec, now.tv_nsec); 724 } 725 726 memcpy(&priv->tstamp_config, &config, sizeof(config)); 727 728 return copy_to_user(ifr->ifr_data, &config, 729 sizeof(config)) ? -EFAULT : 0; 730 } 731 732 /** 733 * stmmac_hwtstamp_get - read hardware timestamping. 734 * @dev: device pointer. 735 * @ifr: An IOCTL specific structure, that can contain a pointer to 736 * a proprietary structure used to pass information to the driver. 737 * Description: 738 * This function obtain the current hardware timestamping settings 739 as requested. 740 */ 741 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) 742 { 743 struct stmmac_priv *priv = netdev_priv(dev); 744 struct hwtstamp_config *config = &priv->tstamp_config; 745 746 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 747 return -EOPNOTSUPP; 748 749 return copy_to_user(ifr->ifr_data, config, 750 sizeof(*config)) ? -EFAULT : 0; 751 } 752 753 /** 754 * stmmac_init_ptp - init PTP 755 * @priv: driver private structure 756 * Description: this is to verify if the HW supports the PTPv1 or PTPv2. 757 * This is done by looking at the HW cap. register. 758 * This function also registers the ptp driver. 759 */ 760 static int stmmac_init_ptp(struct stmmac_priv *priv) 761 { 762 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 763 764 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 765 return -EOPNOTSUPP; 766 767 priv->adv_ts = 0; 768 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */ 769 if (xmac && priv->dma_cap.atime_stamp) 770 priv->adv_ts = 1; 771 /* Dwmac 3.x core with extend_desc can support adv_ts */ 772 else if (priv->extend_desc && priv->dma_cap.atime_stamp) 773 priv->adv_ts = 1; 774 775 if (priv->dma_cap.time_stamp) 776 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); 777 778 if (priv->adv_ts) 779 netdev_info(priv->dev, 780 "IEEE 1588-2008 Advanced Timestamp supported\n"); 781 782 priv->hwts_tx_en = 0; 783 priv->hwts_rx_en = 0; 784 785 stmmac_ptp_register(priv); 786 787 return 0; 788 } 789 790 static void stmmac_release_ptp(struct stmmac_priv *priv) 791 { 792 if (priv->plat->clk_ptp_ref) 793 clk_disable_unprepare(priv->plat->clk_ptp_ref); 794 stmmac_ptp_unregister(priv); 795 } 796 797 /** 798 * stmmac_mac_flow_ctrl - Configure flow control in all queues 799 * @priv: driver private structure 800 * Description: It is used for configuring the flow control in all queues 801 */ 802 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) 803 { 804 u32 tx_cnt = priv->plat->tx_queues_to_use; 805 806 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, 807 priv->pause, tx_cnt); 808 } 809 810 static void stmmac_validate(struct phylink_config *config, 811 unsigned long *supported, 812 struct phylink_link_state *state) 813 { 814 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 815 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; 816 int tx_cnt = priv->plat->tx_queues_to_use; 817 int max_speed = priv->plat->max_speed; 818 819 /* Cut down 1G if asked to */ 820 if ((max_speed > 0) && (max_speed < 1000)) { 821 phylink_set(mask, 1000baseT_Full); 822 phylink_set(mask, 1000baseX_Full); 823 } 824 825 /* Half-Duplex can only work with single queue */ 826 if (tx_cnt > 1) { 827 phylink_set(mask, 10baseT_Half); 828 phylink_set(mask, 100baseT_Half); 829 phylink_set(mask, 1000baseT_Half); 830 } 831 832 bitmap_andnot(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); 833 bitmap_andnot(state->advertising, state->advertising, mask, 834 __ETHTOOL_LINK_MODE_MASK_NBITS); 835 } 836 837 static int stmmac_mac_link_state(struct phylink_config *config, 838 struct phylink_link_state *state) 839 { 840 return -EOPNOTSUPP; 841 } 842 843 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, 844 const struct phylink_link_state *state) 845 { 846 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 847 u32 ctrl; 848 849 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); 850 ctrl &= ~priv->hw->link.speed_mask; 851 852 switch (state->speed) { 853 case SPEED_1000: 854 ctrl |= priv->hw->link.speed1000; 855 break; 856 case SPEED_100: 857 ctrl |= priv->hw->link.speed100; 858 break; 859 case SPEED_10: 860 ctrl |= priv->hw->link.speed10; 861 break; 862 default: 863 return; 864 } 865 866 priv->speed = state->speed; 867 868 if (priv->plat->fix_mac_speed) 869 priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed); 870 871 if (!state->duplex) 872 ctrl &= ~priv->hw->link.duplex; 873 else 874 ctrl |= priv->hw->link.duplex; 875 876 /* Flow Control operation */ 877 if (state->pause) 878 stmmac_mac_flow_ctrl(priv, state->duplex); 879 880 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); 881 } 882 883 static void stmmac_mac_an_restart(struct phylink_config *config) 884 { 885 /* Not Supported */ 886 } 887 888 static void stmmac_mac_link_down(struct phylink_config *config, 889 unsigned int mode, phy_interface_t interface) 890 { 891 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 892 893 stmmac_mac_set(priv, priv->ioaddr, false); 894 priv->eee_active = false; 895 stmmac_eee_init(priv); 896 stmmac_set_eee_pls(priv, priv->hw, false); 897 } 898 899 static void stmmac_mac_link_up(struct phylink_config *config, 900 unsigned int mode, phy_interface_t interface, 901 struct phy_device *phy) 902 { 903 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); 904 905 stmmac_mac_set(priv, priv->ioaddr, true); 906 if (phy) { 907 priv->eee_active = phy_init_eee(phy, 1) >= 0; 908 priv->eee_enabled = stmmac_eee_init(priv); 909 stmmac_set_eee_pls(priv, priv->hw, true); 910 } 911 } 912 913 static const struct phylink_mac_ops stmmac_phylink_mac_ops = { 914 .validate = stmmac_validate, 915 .mac_link_state = stmmac_mac_link_state, 916 .mac_config = stmmac_mac_config, 917 .mac_an_restart = stmmac_mac_an_restart, 918 .mac_link_down = stmmac_mac_link_down, 919 .mac_link_up = stmmac_mac_link_up, 920 }; 921 922 /** 923 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported 924 * @priv: driver private structure 925 * Description: this is to verify if the HW supports the PCS. 926 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is 927 * configured for the TBI, RTBI, or SGMII PHY interface. 928 */ 929 static void stmmac_check_pcs_mode(struct stmmac_priv *priv) 930 { 931 int interface = priv->plat->interface; 932 933 if (priv->dma_cap.pcs) { 934 if ((interface == PHY_INTERFACE_MODE_RGMII) || 935 (interface == PHY_INTERFACE_MODE_RGMII_ID) || 936 (interface == PHY_INTERFACE_MODE_RGMII_RXID) || 937 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { 938 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); 939 priv->hw->pcs = STMMAC_PCS_RGMII; 940 } else if (interface == PHY_INTERFACE_MODE_SGMII) { 941 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); 942 priv->hw->pcs = STMMAC_PCS_SGMII; 943 } 944 } 945 } 946 947 /** 948 * stmmac_init_phy - PHY initialization 949 * @dev: net device structure 950 * Description: it initializes the driver's PHY state, and attaches the PHY 951 * to the mac driver. 952 * Return value: 953 * 0 on success 954 */ 955 static int stmmac_init_phy(struct net_device *dev) 956 { 957 struct stmmac_priv *priv = netdev_priv(dev); 958 struct device_node *node; 959 int ret; 960 961 node = priv->plat->phylink_node; 962 963 if (node) { 964 ret = phylink_of_phy_connect(priv->phylink, node, 0); 965 } else { 966 int addr = priv->plat->phy_addr; 967 struct phy_device *phydev; 968 969 phydev = mdiobus_get_phy(priv->mii, addr); 970 if (!phydev) { 971 netdev_err(priv->dev, "no phy at addr %d\n", addr); 972 return -ENODEV; 973 } 974 975 ret = phylink_connect_phy(priv->phylink, phydev); 976 } 977 978 return ret; 979 } 980 981 static int stmmac_phy_setup(struct stmmac_priv *priv) 982 { 983 struct device_node *node = priv->plat->phylink_node; 984 int mode = priv->plat->interface; 985 struct phylink *phylink; 986 987 priv->phylink_config.dev = &priv->dev->dev; 988 priv->phylink_config.type = PHYLINK_NETDEV; 989 990 phylink = phylink_create(&priv->phylink_config, of_fwnode_handle(node), 991 mode, &stmmac_phylink_mac_ops); 992 if (IS_ERR(phylink)) 993 return PTR_ERR(phylink); 994 995 priv->phylink = phylink; 996 return 0; 997 } 998 999 static void stmmac_display_rx_rings(struct stmmac_priv *priv) 1000 { 1001 u32 rx_cnt = priv->plat->rx_queues_to_use; 1002 void *head_rx; 1003 u32 queue; 1004 1005 /* Display RX rings */ 1006 for (queue = 0; queue < rx_cnt; queue++) { 1007 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1008 1009 pr_info("\tRX Queue %u rings\n", queue); 1010 1011 if (priv->extend_desc) 1012 head_rx = (void *)rx_q->dma_erx; 1013 else 1014 head_rx = (void *)rx_q->dma_rx; 1015 1016 /* Display RX ring */ 1017 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true); 1018 } 1019 } 1020 1021 static void stmmac_display_tx_rings(struct stmmac_priv *priv) 1022 { 1023 u32 tx_cnt = priv->plat->tx_queues_to_use; 1024 void *head_tx; 1025 u32 queue; 1026 1027 /* Display TX rings */ 1028 for (queue = 0; queue < tx_cnt; queue++) { 1029 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1030 1031 pr_info("\tTX Queue %d rings\n", queue); 1032 1033 if (priv->extend_desc) 1034 head_tx = (void *)tx_q->dma_etx; 1035 else 1036 head_tx = (void *)tx_q->dma_tx; 1037 1038 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false); 1039 } 1040 } 1041 1042 static void stmmac_display_rings(struct stmmac_priv *priv) 1043 { 1044 /* Display RX ring */ 1045 stmmac_display_rx_rings(priv); 1046 1047 /* Display TX ring */ 1048 stmmac_display_tx_rings(priv); 1049 } 1050 1051 static int stmmac_set_bfsize(int mtu, int bufsize) 1052 { 1053 int ret = bufsize; 1054 1055 if (mtu >= BUF_SIZE_4KiB) 1056 ret = BUF_SIZE_8KiB; 1057 else if (mtu >= BUF_SIZE_2KiB) 1058 ret = BUF_SIZE_4KiB; 1059 else if (mtu > DEFAULT_BUFSIZE) 1060 ret = BUF_SIZE_2KiB; 1061 else 1062 ret = DEFAULT_BUFSIZE; 1063 1064 return ret; 1065 } 1066 1067 /** 1068 * stmmac_clear_rx_descriptors - clear RX descriptors 1069 * @priv: driver private structure 1070 * @queue: RX queue index 1071 * Description: this function is called to clear the RX descriptors 1072 * in case of both basic and extended descriptors are used. 1073 */ 1074 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) 1075 { 1076 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1077 int i; 1078 1079 /* Clear the RX descriptors */ 1080 for (i = 0; i < DMA_RX_SIZE; i++) 1081 if (priv->extend_desc) 1082 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1083 priv->use_riwt, priv->mode, 1084 (i == DMA_RX_SIZE - 1), 1085 priv->dma_buf_sz); 1086 else 1087 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1088 priv->use_riwt, priv->mode, 1089 (i == DMA_RX_SIZE - 1), 1090 priv->dma_buf_sz); 1091 } 1092 1093 /** 1094 * stmmac_clear_tx_descriptors - clear tx descriptors 1095 * @priv: driver private structure 1096 * @queue: TX queue index. 1097 * Description: this function is called to clear the TX descriptors 1098 * in case of both basic and extended descriptors are used. 1099 */ 1100 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) 1101 { 1102 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1103 int i; 1104 1105 /* Clear the TX descriptors */ 1106 for (i = 0; i < DMA_TX_SIZE; i++) 1107 if (priv->extend_desc) 1108 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, 1109 priv->mode, (i == DMA_TX_SIZE - 1)); 1110 else 1111 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], 1112 priv->mode, (i == DMA_TX_SIZE - 1)); 1113 } 1114 1115 /** 1116 * stmmac_clear_descriptors - clear descriptors 1117 * @priv: driver private structure 1118 * Description: this function is called to clear the TX and RX descriptors 1119 * in case of both basic and extended descriptors are used. 1120 */ 1121 static void stmmac_clear_descriptors(struct stmmac_priv *priv) 1122 { 1123 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; 1124 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1125 u32 queue; 1126 1127 /* Clear the RX descriptors */ 1128 for (queue = 0; queue < rx_queue_cnt; queue++) 1129 stmmac_clear_rx_descriptors(priv, queue); 1130 1131 /* Clear the TX descriptors */ 1132 for (queue = 0; queue < tx_queue_cnt; queue++) 1133 stmmac_clear_tx_descriptors(priv, queue); 1134 } 1135 1136 /** 1137 * stmmac_init_rx_buffers - init the RX descriptor buffer. 1138 * @priv: driver private structure 1139 * @p: descriptor pointer 1140 * @i: descriptor index 1141 * @flags: gfp flag 1142 * @queue: RX queue index 1143 * Description: this function is called to allocate a receive buffer, perform 1144 * the DMA mapping and init the descriptor. 1145 */ 1146 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 1147 int i, gfp_t flags, u32 queue) 1148 { 1149 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1150 struct sk_buff *skb; 1151 1152 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); 1153 if (!skb) { 1154 netdev_err(priv->dev, 1155 "%s: Rx init fails; skb is NULL\n", __func__); 1156 return -ENOMEM; 1157 } 1158 rx_q->rx_skbuff[i] = skb; 1159 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, 1160 priv->dma_buf_sz, 1161 DMA_FROM_DEVICE); 1162 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { 1163 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); 1164 dev_kfree_skb_any(skb); 1165 return -EINVAL; 1166 } 1167 1168 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]); 1169 1170 if (priv->dma_buf_sz == BUF_SIZE_16KiB) 1171 stmmac_init_desc3(priv, p); 1172 1173 return 0; 1174 } 1175 1176 /** 1177 * stmmac_free_rx_buffer - free RX dma buffers 1178 * @priv: private structure 1179 * @queue: RX queue index 1180 * @i: buffer index. 1181 */ 1182 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1183 { 1184 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1185 1186 if (rx_q->rx_skbuff[i]) { 1187 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], 1188 priv->dma_buf_sz, DMA_FROM_DEVICE); 1189 dev_kfree_skb_any(rx_q->rx_skbuff[i]); 1190 } 1191 rx_q->rx_skbuff[i] = NULL; 1192 } 1193 1194 /** 1195 * stmmac_free_tx_buffer - free RX dma buffers 1196 * @priv: private structure 1197 * @queue: RX queue index 1198 * @i: buffer index. 1199 */ 1200 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) 1201 { 1202 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1203 1204 if (tx_q->tx_skbuff_dma[i].buf) { 1205 if (tx_q->tx_skbuff_dma[i].map_as_page) 1206 dma_unmap_page(priv->device, 1207 tx_q->tx_skbuff_dma[i].buf, 1208 tx_q->tx_skbuff_dma[i].len, 1209 DMA_TO_DEVICE); 1210 else 1211 dma_unmap_single(priv->device, 1212 tx_q->tx_skbuff_dma[i].buf, 1213 tx_q->tx_skbuff_dma[i].len, 1214 DMA_TO_DEVICE); 1215 } 1216 1217 if (tx_q->tx_skbuff[i]) { 1218 dev_kfree_skb_any(tx_q->tx_skbuff[i]); 1219 tx_q->tx_skbuff[i] = NULL; 1220 tx_q->tx_skbuff_dma[i].buf = 0; 1221 tx_q->tx_skbuff_dma[i].map_as_page = false; 1222 } 1223 } 1224 1225 /** 1226 * init_dma_rx_desc_rings - init the RX descriptor rings 1227 * @dev: net device structure 1228 * @flags: gfp flag. 1229 * Description: this function initializes the DMA RX descriptors 1230 * and allocates the socket buffers. It supports the chained and ring 1231 * modes. 1232 */ 1233 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) 1234 { 1235 struct stmmac_priv *priv = netdev_priv(dev); 1236 u32 rx_count = priv->plat->rx_queues_to_use; 1237 int ret = -ENOMEM; 1238 int bfsize = 0; 1239 int queue; 1240 int i; 1241 1242 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); 1243 if (bfsize < 0) 1244 bfsize = 0; 1245 1246 if (bfsize < BUF_SIZE_16KiB) 1247 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 1248 1249 priv->dma_buf_sz = bfsize; 1250 1251 /* RX INITIALIZATION */ 1252 netif_dbg(priv, probe, priv->dev, 1253 "SKB addresses:\nskb\t\tskb data\tdma data\n"); 1254 1255 for (queue = 0; queue < rx_count; queue++) { 1256 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1257 1258 netif_dbg(priv, probe, priv->dev, 1259 "(%s) dma_rx_phy=0x%08x\n", __func__, 1260 (u32)rx_q->dma_rx_phy); 1261 1262 for (i = 0; i < DMA_RX_SIZE; i++) { 1263 struct dma_desc *p; 1264 1265 if (priv->extend_desc) 1266 p = &((rx_q->dma_erx + i)->basic); 1267 else 1268 p = rx_q->dma_rx + i; 1269 1270 ret = stmmac_init_rx_buffers(priv, p, i, flags, 1271 queue); 1272 if (ret) 1273 goto err_init_rx_buffers; 1274 1275 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", 1276 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, 1277 (unsigned int)rx_q->rx_skbuff_dma[i]); 1278 } 1279 1280 rx_q->cur_rx = 0; 1281 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); 1282 1283 stmmac_clear_rx_descriptors(priv, queue); 1284 1285 /* Setup the chained descriptor addresses */ 1286 if (priv->mode == STMMAC_CHAIN_MODE) { 1287 if (priv->extend_desc) 1288 stmmac_mode_init(priv, rx_q->dma_erx, 1289 rx_q->dma_rx_phy, DMA_RX_SIZE, 1); 1290 else 1291 stmmac_mode_init(priv, rx_q->dma_rx, 1292 rx_q->dma_rx_phy, DMA_RX_SIZE, 0); 1293 } 1294 } 1295 1296 buf_sz = bfsize; 1297 1298 return 0; 1299 1300 err_init_rx_buffers: 1301 while (queue >= 0) { 1302 while (--i >= 0) 1303 stmmac_free_rx_buffer(priv, queue, i); 1304 1305 if (queue == 0) 1306 break; 1307 1308 i = DMA_RX_SIZE; 1309 queue--; 1310 } 1311 1312 return ret; 1313 } 1314 1315 /** 1316 * init_dma_tx_desc_rings - init the TX descriptor rings 1317 * @dev: net device structure. 1318 * Description: this function initializes the DMA TX descriptors 1319 * and allocates the socket buffers. It supports the chained and ring 1320 * modes. 1321 */ 1322 static int init_dma_tx_desc_rings(struct net_device *dev) 1323 { 1324 struct stmmac_priv *priv = netdev_priv(dev); 1325 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; 1326 u32 queue; 1327 int i; 1328 1329 for (queue = 0; queue < tx_queue_cnt; queue++) { 1330 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1331 1332 netif_dbg(priv, probe, priv->dev, 1333 "(%s) dma_tx_phy=0x%08x\n", __func__, 1334 (u32)tx_q->dma_tx_phy); 1335 1336 /* Setup the chained descriptor addresses */ 1337 if (priv->mode == STMMAC_CHAIN_MODE) { 1338 if (priv->extend_desc) 1339 stmmac_mode_init(priv, tx_q->dma_etx, 1340 tx_q->dma_tx_phy, DMA_TX_SIZE, 1); 1341 else 1342 stmmac_mode_init(priv, tx_q->dma_tx, 1343 tx_q->dma_tx_phy, DMA_TX_SIZE, 0); 1344 } 1345 1346 for (i = 0; i < DMA_TX_SIZE; i++) { 1347 struct dma_desc *p; 1348 if (priv->extend_desc) 1349 p = &((tx_q->dma_etx + i)->basic); 1350 else 1351 p = tx_q->dma_tx + i; 1352 1353 stmmac_clear_desc(priv, p); 1354 1355 tx_q->tx_skbuff_dma[i].buf = 0; 1356 tx_q->tx_skbuff_dma[i].map_as_page = false; 1357 tx_q->tx_skbuff_dma[i].len = 0; 1358 tx_q->tx_skbuff_dma[i].last_segment = false; 1359 tx_q->tx_skbuff[i] = NULL; 1360 } 1361 1362 tx_q->dirty_tx = 0; 1363 tx_q->cur_tx = 0; 1364 tx_q->mss = 0; 1365 1366 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); 1367 } 1368 1369 return 0; 1370 } 1371 1372 /** 1373 * init_dma_desc_rings - init the RX/TX descriptor rings 1374 * @dev: net device structure 1375 * @flags: gfp flag. 1376 * Description: this function initializes the DMA RX/TX descriptors 1377 * and allocates the socket buffers. It supports the chained and ring 1378 * modes. 1379 */ 1380 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) 1381 { 1382 struct stmmac_priv *priv = netdev_priv(dev); 1383 int ret; 1384 1385 ret = init_dma_rx_desc_rings(dev, flags); 1386 if (ret) 1387 return ret; 1388 1389 ret = init_dma_tx_desc_rings(dev); 1390 1391 stmmac_clear_descriptors(priv); 1392 1393 if (netif_msg_hw(priv)) 1394 stmmac_display_rings(priv); 1395 1396 return ret; 1397 } 1398 1399 /** 1400 * dma_free_rx_skbufs - free RX dma buffers 1401 * @priv: private structure 1402 * @queue: RX queue index 1403 */ 1404 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) 1405 { 1406 int i; 1407 1408 for (i = 0; i < DMA_RX_SIZE; i++) 1409 stmmac_free_rx_buffer(priv, queue, i); 1410 } 1411 1412 /** 1413 * dma_free_tx_skbufs - free TX dma buffers 1414 * @priv: private structure 1415 * @queue: TX queue index 1416 */ 1417 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) 1418 { 1419 int i; 1420 1421 for (i = 0; i < DMA_TX_SIZE; i++) 1422 stmmac_free_tx_buffer(priv, queue, i); 1423 } 1424 1425 /** 1426 * free_dma_rx_desc_resources - free RX dma desc resources 1427 * @priv: private structure 1428 */ 1429 static void free_dma_rx_desc_resources(struct stmmac_priv *priv) 1430 { 1431 u32 rx_count = priv->plat->rx_queues_to_use; 1432 u32 queue; 1433 1434 /* Free RX queue resources */ 1435 for (queue = 0; queue < rx_count; queue++) { 1436 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1437 1438 /* Release the DMA RX socket buffers */ 1439 dma_free_rx_skbufs(priv, queue); 1440 1441 /* Free DMA regions of consistent memory previously allocated */ 1442 if (!priv->extend_desc) 1443 dma_free_coherent(priv->device, 1444 DMA_RX_SIZE * sizeof(struct dma_desc), 1445 rx_q->dma_rx, rx_q->dma_rx_phy); 1446 else 1447 dma_free_coherent(priv->device, DMA_RX_SIZE * 1448 sizeof(struct dma_extended_desc), 1449 rx_q->dma_erx, rx_q->dma_rx_phy); 1450 1451 kfree(rx_q->rx_skbuff_dma); 1452 kfree(rx_q->rx_skbuff); 1453 } 1454 } 1455 1456 /** 1457 * free_dma_tx_desc_resources - free TX dma desc resources 1458 * @priv: private structure 1459 */ 1460 static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1461 { 1462 u32 tx_count = priv->plat->tx_queues_to_use; 1463 u32 queue; 1464 1465 /* Free TX queue resources */ 1466 for (queue = 0; queue < tx_count; queue++) { 1467 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1468 1469 /* Release the DMA TX socket buffers */ 1470 dma_free_tx_skbufs(priv, queue); 1471 1472 /* Free DMA regions of consistent memory previously allocated */ 1473 if (!priv->extend_desc) 1474 dma_free_coherent(priv->device, 1475 DMA_TX_SIZE * sizeof(struct dma_desc), 1476 tx_q->dma_tx, tx_q->dma_tx_phy); 1477 else 1478 dma_free_coherent(priv->device, DMA_TX_SIZE * 1479 sizeof(struct dma_extended_desc), 1480 tx_q->dma_etx, tx_q->dma_tx_phy); 1481 1482 kfree(tx_q->tx_skbuff_dma); 1483 kfree(tx_q->tx_skbuff); 1484 } 1485 } 1486 1487 /** 1488 * alloc_dma_rx_desc_resources - alloc RX resources. 1489 * @priv: private structure 1490 * Description: according to which descriptor can be used (extend or basic) 1491 * this function allocates the resources for TX and RX paths. In case of 1492 * reception, for example, it pre-allocated the RX socket buffer in order to 1493 * allow zero-copy mechanism. 1494 */ 1495 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) 1496 { 1497 u32 rx_count = priv->plat->rx_queues_to_use; 1498 int ret = -ENOMEM; 1499 u32 queue; 1500 1501 /* RX queues buffers and DMA */ 1502 for (queue = 0; queue < rx_count; queue++) { 1503 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 1504 1505 rx_q->queue_index = queue; 1506 rx_q->priv_data = priv; 1507 1508 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, 1509 sizeof(dma_addr_t), 1510 GFP_KERNEL); 1511 if (!rx_q->rx_skbuff_dma) 1512 goto err_dma; 1513 1514 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, 1515 sizeof(struct sk_buff *), 1516 GFP_KERNEL); 1517 if (!rx_q->rx_skbuff) 1518 goto err_dma; 1519 1520 if (priv->extend_desc) { 1521 rx_q->dma_erx = dma_alloc_coherent(priv->device, 1522 DMA_RX_SIZE * sizeof(struct dma_extended_desc), 1523 &rx_q->dma_rx_phy, 1524 GFP_KERNEL); 1525 if (!rx_q->dma_erx) 1526 goto err_dma; 1527 1528 } else { 1529 rx_q->dma_rx = dma_alloc_coherent(priv->device, 1530 DMA_RX_SIZE * sizeof(struct dma_desc), 1531 &rx_q->dma_rx_phy, 1532 GFP_KERNEL); 1533 if (!rx_q->dma_rx) 1534 goto err_dma; 1535 } 1536 } 1537 1538 return 0; 1539 1540 err_dma: 1541 free_dma_rx_desc_resources(priv); 1542 1543 return ret; 1544 } 1545 1546 /** 1547 * alloc_dma_tx_desc_resources - alloc TX resources. 1548 * @priv: private structure 1549 * Description: according to which descriptor can be used (extend or basic) 1550 * this function allocates the resources for TX and RX paths. In case of 1551 * reception, for example, it pre-allocated the RX socket buffer in order to 1552 * allow zero-copy mechanism. 1553 */ 1554 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) 1555 { 1556 u32 tx_count = priv->plat->tx_queues_to_use; 1557 int ret = -ENOMEM; 1558 u32 queue; 1559 1560 /* TX queues buffers and DMA */ 1561 for (queue = 0; queue < tx_count; queue++) { 1562 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1563 1564 tx_q->queue_index = queue; 1565 tx_q->priv_data = priv; 1566 1567 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, 1568 sizeof(*tx_q->tx_skbuff_dma), 1569 GFP_KERNEL); 1570 if (!tx_q->tx_skbuff_dma) 1571 goto err_dma; 1572 1573 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, 1574 sizeof(struct sk_buff *), 1575 GFP_KERNEL); 1576 if (!tx_q->tx_skbuff) 1577 goto err_dma; 1578 1579 if (priv->extend_desc) { 1580 tx_q->dma_etx = dma_alloc_coherent(priv->device, 1581 DMA_TX_SIZE * sizeof(struct dma_extended_desc), 1582 &tx_q->dma_tx_phy, 1583 GFP_KERNEL); 1584 if (!tx_q->dma_etx) 1585 goto err_dma; 1586 } else { 1587 tx_q->dma_tx = dma_alloc_coherent(priv->device, 1588 DMA_TX_SIZE * sizeof(struct dma_desc), 1589 &tx_q->dma_tx_phy, 1590 GFP_KERNEL); 1591 if (!tx_q->dma_tx) 1592 goto err_dma; 1593 } 1594 } 1595 1596 return 0; 1597 1598 err_dma: 1599 free_dma_tx_desc_resources(priv); 1600 1601 return ret; 1602 } 1603 1604 /** 1605 * alloc_dma_desc_resources - alloc TX/RX resources. 1606 * @priv: private structure 1607 * Description: according to which descriptor can be used (extend or basic) 1608 * this function allocates the resources for TX and RX paths. In case of 1609 * reception, for example, it pre-allocated the RX socket buffer in order to 1610 * allow zero-copy mechanism. 1611 */ 1612 static int alloc_dma_desc_resources(struct stmmac_priv *priv) 1613 { 1614 /* RX Allocation */ 1615 int ret = alloc_dma_rx_desc_resources(priv); 1616 1617 if (ret) 1618 return ret; 1619 1620 ret = alloc_dma_tx_desc_resources(priv); 1621 1622 return ret; 1623 } 1624 1625 /** 1626 * free_dma_desc_resources - free dma desc resources 1627 * @priv: private structure 1628 */ 1629 static void free_dma_desc_resources(struct stmmac_priv *priv) 1630 { 1631 /* Release the DMA RX socket buffers */ 1632 free_dma_rx_desc_resources(priv); 1633 1634 /* Release the DMA TX socket buffers */ 1635 free_dma_tx_desc_resources(priv); 1636 } 1637 1638 /** 1639 * stmmac_mac_enable_rx_queues - Enable MAC rx queues 1640 * @priv: driver private structure 1641 * Description: It is used for enabling the rx queues in the MAC 1642 */ 1643 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) 1644 { 1645 u32 rx_queues_count = priv->plat->rx_queues_to_use; 1646 int queue; 1647 u8 mode; 1648 1649 for (queue = 0; queue < rx_queues_count; queue++) { 1650 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; 1651 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); 1652 } 1653 } 1654 1655 /** 1656 * stmmac_start_rx_dma - start RX DMA channel 1657 * @priv: driver private structure 1658 * @chan: RX channel index 1659 * Description: 1660 * This starts a RX DMA channel 1661 */ 1662 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) 1663 { 1664 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); 1665 stmmac_start_rx(priv, priv->ioaddr, chan); 1666 } 1667 1668 /** 1669 * stmmac_start_tx_dma - start TX DMA channel 1670 * @priv: driver private structure 1671 * @chan: TX channel index 1672 * Description: 1673 * This starts a TX DMA channel 1674 */ 1675 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) 1676 { 1677 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); 1678 stmmac_start_tx(priv, priv->ioaddr, chan); 1679 } 1680 1681 /** 1682 * stmmac_stop_rx_dma - stop RX DMA channel 1683 * @priv: driver private structure 1684 * @chan: RX channel index 1685 * Description: 1686 * This stops a RX DMA channel 1687 */ 1688 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) 1689 { 1690 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); 1691 stmmac_stop_rx(priv, priv->ioaddr, chan); 1692 } 1693 1694 /** 1695 * stmmac_stop_tx_dma - stop TX DMA channel 1696 * @priv: driver private structure 1697 * @chan: TX channel index 1698 * Description: 1699 * This stops a TX DMA channel 1700 */ 1701 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) 1702 { 1703 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); 1704 stmmac_stop_tx(priv, priv->ioaddr, chan); 1705 } 1706 1707 /** 1708 * stmmac_start_all_dma - start all RX and TX DMA channels 1709 * @priv: driver private structure 1710 * Description: 1711 * This starts all the RX and TX DMA channels 1712 */ 1713 static void stmmac_start_all_dma(struct stmmac_priv *priv) 1714 { 1715 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1716 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1717 u32 chan = 0; 1718 1719 for (chan = 0; chan < rx_channels_count; chan++) 1720 stmmac_start_rx_dma(priv, chan); 1721 1722 for (chan = 0; chan < tx_channels_count; chan++) 1723 stmmac_start_tx_dma(priv, chan); 1724 } 1725 1726 /** 1727 * stmmac_stop_all_dma - stop all RX and TX DMA channels 1728 * @priv: driver private structure 1729 * Description: 1730 * This stops the RX and TX DMA channels 1731 */ 1732 static void stmmac_stop_all_dma(struct stmmac_priv *priv) 1733 { 1734 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1735 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1736 u32 chan = 0; 1737 1738 for (chan = 0; chan < rx_channels_count; chan++) 1739 stmmac_stop_rx_dma(priv, chan); 1740 1741 for (chan = 0; chan < tx_channels_count; chan++) 1742 stmmac_stop_tx_dma(priv, chan); 1743 } 1744 1745 /** 1746 * stmmac_dma_operation_mode - HW DMA operation mode 1747 * @priv: driver private structure 1748 * Description: it is used for configuring the DMA operation mode register in 1749 * order to program the tx/rx DMA thresholds or Store-And-Forward mode. 1750 */ 1751 static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 1752 { 1753 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1754 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1755 int rxfifosz = priv->plat->rx_fifo_size; 1756 int txfifosz = priv->plat->tx_fifo_size; 1757 u32 txmode = 0; 1758 u32 rxmode = 0; 1759 u32 chan = 0; 1760 u8 qmode = 0; 1761 1762 if (rxfifosz == 0) 1763 rxfifosz = priv->dma_cap.rx_fifo_size; 1764 if (txfifosz == 0) 1765 txfifosz = priv->dma_cap.tx_fifo_size; 1766 1767 /* Adjust for real per queue fifo size */ 1768 rxfifosz /= rx_channels_count; 1769 txfifosz /= tx_channels_count; 1770 1771 if (priv->plat->force_thresh_dma_mode) { 1772 txmode = tc; 1773 rxmode = tc; 1774 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { 1775 /* 1776 * In case of GMAC, SF mode can be enabled 1777 * to perform the TX COE in HW. This depends on: 1778 * 1) TX COE if actually supported 1779 * 2) There is no bugged Jumbo frame support 1780 * that needs to not insert csum in the TDES. 1781 */ 1782 txmode = SF_DMA_MODE; 1783 rxmode = SF_DMA_MODE; 1784 priv->xstats.threshold = SF_DMA_MODE; 1785 } else { 1786 txmode = tc; 1787 rxmode = SF_DMA_MODE; 1788 } 1789 1790 /* configure all channels */ 1791 for (chan = 0; chan < rx_channels_count; chan++) { 1792 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 1793 1794 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 1795 rxfifosz, qmode); 1796 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, 1797 chan); 1798 } 1799 1800 for (chan = 0; chan < tx_channels_count; chan++) { 1801 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 1802 1803 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, 1804 txfifosz, qmode); 1805 } 1806 } 1807 1808 /** 1809 * stmmac_tx_clean - to manage the transmission completion 1810 * @priv: driver private structure 1811 * @queue: TX queue index 1812 * Description: it reclaims the transmit resources after transmission completes. 1813 */ 1814 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) 1815 { 1816 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1817 unsigned int bytes_compl = 0, pkts_compl = 0; 1818 unsigned int entry, count = 0; 1819 1820 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); 1821 1822 priv->xstats.tx_clean++; 1823 1824 entry = tx_q->dirty_tx; 1825 while ((entry != tx_q->cur_tx) && (count < budget)) { 1826 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1827 struct dma_desc *p; 1828 int status; 1829 1830 if (priv->extend_desc) 1831 p = (struct dma_desc *)(tx_q->dma_etx + entry); 1832 else 1833 p = tx_q->dma_tx + entry; 1834 1835 status = stmmac_tx_status(priv, &priv->dev->stats, 1836 &priv->xstats, p, priv->ioaddr); 1837 /* Check if the descriptor is owned by the DMA */ 1838 if (unlikely(status & tx_dma_own)) 1839 break; 1840 1841 count++; 1842 1843 /* Make sure descriptor fields are read after reading 1844 * the own bit. 1845 */ 1846 dma_rmb(); 1847 1848 /* Just consider the last segment and ...*/ 1849 if (likely(!(status & tx_not_ls))) { 1850 /* ... verify the status error condition */ 1851 if (unlikely(status & tx_err)) { 1852 priv->dev->stats.tx_errors++; 1853 } else { 1854 priv->dev->stats.tx_packets++; 1855 priv->xstats.tx_pkt_n++; 1856 } 1857 stmmac_get_tx_hwtstamp(priv, p, skb); 1858 } 1859 1860 if (likely(tx_q->tx_skbuff_dma[entry].buf)) { 1861 if (tx_q->tx_skbuff_dma[entry].map_as_page) 1862 dma_unmap_page(priv->device, 1863 tx_q->tx_skbuff_dma[entry].buf, 1864 tx_q->tx_skbuff_dma[entry].len, 1865 DMA_TO_DEVICE); 1866 else 1867 dma_unmap_single(priv->device, 1868 tx_q->tx_skbuff_dma[entry].buf, 1869 tx_q->tx_skbuff_dma[entry].len, 1870 DMA_TO_DEVICE); 1871 tx_q->tx_skbuff_dma[entry].buf = 0; 1872 tx_q->tx_skbuff_dma[entry].len = 0; 1873 tx_q->tx_skbuff_dma[entry].map_as_page = false; 1874 } 1875 1876 stmmac_clean_desc3(priv, tx_q, p); 1877 1878 tx_q->tx_skbuff_dma[entry].last_segment = false; 1879 tx_q->tx_skbuff_dma[entry].is_jumbo = false; 1880 1881 if (likely(skb != NULL)) { 1882 pkts_compl++; 1883 bytes_compl += skb->len; 1884 dev_consume_skb_any(skb); 1885 tx_q->tx_skbuff[entry] = NULL; 1886 } 1887 1888 stmmac_release_tx_desc(priv, p, priv->mode); 1889 1890 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 1891 } 1892 tx_q->dirty_tx = entry; 1893 1894 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), 1895 pkts_compl, bytes_compl); 1896 1897 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, 1898 queue))) && 1899 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { 1900 1901 netif_dbg(priv, tx_done, priv->dev, 1902 "%s: restart transmit\n", __func__); 1903 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); 1904 } 1905 1906 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { 1907 stmmac_enable_eee_mode(priv); 1908 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 1909 } 1910 1911 /* We still have pending packets, let's call for a new scheduling */ 1912 if (tx_q->dirty_tx != tx_q->cur_tx) 1913 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10)); 1914 1915 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); 1916 1917 return count; 1918 } 1919 1920 /** 1921 * stmmac_tx_err - to manage the tx error 1922 * @priv: driver private structure 1923 * @chan: channel index 1924 * Description: it cleans the descriptors and restarts the transmission 1925 * in case of transmission errors. 1926 */ 1927 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) 1928 { 1929 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 1930 int i; 1931 1932 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); 1933 1934 stmmac_stop_tx_dma(priv, chan); 1935 dma_free_tx_skbufs(priv, chan); 1936 for (i = 0; i < DMA_TX_SIZE; i++) 1937 if (priv->extend_desc) 1938 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic, 1939 priv->mode, (i == DMA_TX_SIZE - 1)); 1940 else 1941 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i], 1942 priv->mode, (i == DMA_TX_SIZE - 1)); 1943 tx_q->dirty_tx = 0; 1944 tx_q->cur_tx = 0; 1945 tx_q->mss = 0; 1946 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); 1947 stmmac_start_tx_dma(priv, chan); 1948 1949 priv->dev->stats.tx_errors++; 1950 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); 1951 } 1952 1953 /** 1954 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel 1955 * @priv: driver private structure 1956 * @txmode: TX operating mode 1957 * @rxmode: RX operating mode 1958 * @chan: channel index 1959 * Description: it is used for configuring of the DMA operation mode in 1960 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward 1961 * mode. 1962 */ 1963 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, 1964 u32 rxmode, u32 chan) 1965 { 1966 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; 1967 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; 1968 u32 rx_channels_count = priv->plat->rx_queues_to_use; 1969 u32 tx_channels_count = priv->plat->tx_queues_to_use; 1970 int rxfifosz = priv->plat->rx_fifo_size; 1971 int txfifosz = priv->plat->tx_fifo_size; 1972 1973 if (rxfifosz == 0) 1974 rxfifosz = priv->dma_cap.rx_fifo_size; 1975 if (txfifosz == 0) 1976 txfifosz = priv->dma_cap.tx_fifo_size; 1977 1978 /* Adjust for real per queue fifo size */ 1979 rxfifosz /= rx_channels_count; 1980 txfifosz /= tx_channels_count; 1981 1982 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); 1983 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); 1984 } 1985 1986 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv) 1987 { 1988 int ret; 1989 1990 ret = stmmac_safety_feat_irq_status(priv, priv->dev, 1991 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); 1992 if (ret && (ret != -EINVAL)) { 1993 stmmac_global_err(priv); 1994 return true; 1995 } 1996 1997 return false; 1998 } 1999 2000 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) 2001 { 2002 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2003 &priv->xstats, chan); 2004 struct stmmac_channel *ch = &priv->channel[chan]; 2005 2006 if (status) 2007 status |= handle_rx | handle_tx; 2008 2009 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { 2010 stmmac_disable_dma_irq(priv, priv->ioaddr, chan); 2011 napi_schedule_irqoff(&ch->rx_napi); 2012 } 2013 2014 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { 2015 stmmac_disable_dma_irq(priv, priv->ioaddr, chan); 2016 napi_schedule_irqoff(&ch->tx_napi); 2017 } 2018 2019 return status; 2020 } 2021 2022 /** 2023 * stmmac_dma_interrupt - DMA ISR 2024 * @priv: driver private structure 2025 * Description: this is the DMA ISR. It is called by the main ISR. 2026 * It calls the dwmac dma routine and schedule poll method in case of some 2027 * work can be done. 2028 */ 2029 static void stmmac_dma_interrupt(struct stmmac_priv *priv) 2030 { 2031 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2032 u32 rx_channel_count = priv->plat->rx_queues_to_use; 2033 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2034 tx_channel_count : rx_channel_count; 2035 u32 chan; 2036 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2037 2038 /* Make sure we never check beyond our status buffer. */ 2039 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2040 channels_to_check = ARRAY_SIZE(status); 2041 2042 for (chan = 0; chan < channels_to_check; chan++) 2043 status[chan] = stmmac_napi_check(priv, chan); 2044 2045 for (chan = 0; chan < tx_channel_count; chan++) { 2046 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2047 /* Try to bump up the dma threshold on this failure */ 2048 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && 2049 (tc <= 256)) { 2050 tc += 64; 2051 if (priv->plat->force_thresh_dma_mode) 2052 stmmac_set_dma_operation_mode(priv, 2053 tc, 2054 tc, 2055 chan); 2056 else 2057 stmmac_set_dma_operation_mode(priv, 2058 tc, 2059 SF_DMA_MODE, 2060 chan); 2061 priv->xstats.threshold = tc; 2062 } 2063 } else if (unlikely(status[chan] == tx_hard_error)) { 2064 stmmac_tx_err(priv, chan); 2065 } 2066 } 2067 } 2068 2069 /** 2070 * stmmac_mmc_setup: setup the Mac Management Counters (MMC) 2071 * @priv: driver private structure 2072 * Description: this masks the MMC irq, in fact, the counters are managed in SW. 2073 */ 2074 static void stmmac_mmc_setup(struct stmmac_priv *priv) 2075 { 2076 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | 2077 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; 2078 2079 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); 2080 2081 if (priv->dma_cap.rmon) { 2082 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); 2083 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); 2084 } else 2085 netdev_info(priv->dev, "No MAC Management Counters available\n"); 2086 } 2087 2088 /** 2089 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. 2090 * @priv: driver private structure 2091 * Description: 2092 * new GMAC chip generations have a new register to indicate the 2093 * presence of the optional feature/functions. 2094 * This can be also used to override the value passed through the 2095 * platform and necessary for old MAC10/100 and GMAC chips. 2096 */ 2097 static int stmmac_get_hw_features(struct stmmac_priv *priv) 2098 { 2099 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; 2100 } 2101 2102 /** 2103 * stmmac_check_ether_addr - check if the MAC addr is valid 2104 * @priv: driver private structure 2105 * Description: 2106 * it is to verify if the MAC address is valid, in case of failures it 2107 * generates a random MAC address 2108 */ 2109 static void stmmac_check_ether_addr(struct stmmac_priv *priv) 2110 { 2111 if (!is_valid_ether_addr(priv->dev->dev_addr)) { 2112 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); 2113 if (!is_valid_ether_addr(priv->dev->dev_addr)) 2114 eth_hw_addr_random(priv->dev); 2115 dev_info(priv->device, "device MAC address %pM\n", 2116 priv->dev->dev_addr); 2117 } 2118 } 2119 2120 /** 2121 * stmmac_init_dma_engine - DMA init. 2122 * @priv: driver private structure 2123 * Description: 2124 * It inits the DMA invoking the specific MAC/GMAC callback. 2125 * Some DMA parameters can be passed from the platform; 2126 * in case of these are not passed a default is kept for the MAC or GMAC. 2127 */ 2128 static int stmmac_init_dma_engine(struct stmmac_priv *priv) 2129 { 2130 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2131 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2132 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count); 2133 struct stmmac_rx_queue *rx_q; 2134 struct stmmac_tx_queue *tx_q; 2135 u32 chan = 0; 2136 int atds = 0; 2137 int ret = 0; 2138 2139 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { 2140 dev_err(priv->device, "Invalid DMA configuration\n"); 2141 return -EINVAL; 2142 } 2143 2144 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) 2145 atds = 1; 2146 2147 ret = stmmac_reset(priv, priv->ioaddr); 2148 if (ret) { 2149 dev_err(priv->device, "Failed to reset the dma\n"); 2150 return ret; 2151 } 2152 2153 /* DMA Configuration */ 2154 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); 2155 2156 if (priv->plat->axi) 2157 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); 2158 2159 /* DMA CSR Channel configuration */ 2160 for (chan = 0; chan < dma_csr_ch; chan++) 2161 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); 2162 2163 /* DMA RX Channel Configuration */ 2164 for (chan = 0; chan < rx_channels_count; chan++) { 2165 rx_q = &priv->rx_queue[chan]; 2166 2167 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2168 rx_q->dma_rx_phy, chan); 2169 2170 rx_q->rx_tail_addr = rx_q->dma_rx_phy + 2171 (DMA_RX_SIZE * sizeof(struct dma_desc)); 2172 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 2173 rx_q->rx_tail_addr, chan); 2174 } 2175 2176 /* DMA TX Channel Configuration */ 2177 for (chan = 0; chan < tx_channels_count; chan++) { 2178 tx_q = &priv->tx_queue[chan]; 2179 2180 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2181 tx_q->dma_tx_phy, chan); 2182 2183 tx_q->tx_tail_addr = tx_q->dma_tx_phy; 2184 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2185 tx_q->tx_tail_addr, chan); 2186 } 2187 2188 return ret; 2189 } 2190 2191 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) 2192 { 2193 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2194 2195 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); 2196 } 2197 2198 /** 2199 * stmmac_tx_timer - mitigation sw timer for tx. 2200 * @data: data pointer 2201 * Description: 2202 * This is the timer handler to directly invoke the stmmac_tx_clean. 2203 */ 2204 static void stmmac_tx_timer(struct timer_list *t) 2205 { 2206 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); 2207 struct stmmac_priv *priv = tx_q->priv_data; 2208 struct stmmac_channel *ch; 2209 2210 ch = &priv->channel[tx_q->queue_index]; 2211 2212 /* 2213 * If NAPI is already running we can miss some events. Let's rearm 2214 * the timer and try again. 2215 */ 2216 if (likely(napi_schedule_prep(&ch->tx_napi))) 2217 __napi_schedule(&ch->tx_napi); 2218 else 2219 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10)); 2220 } 2221 2222 /** 2223 * stmmac_init_tx_coalesce - init tx mitigation options. 2224 * @priv: driver private structure 2225 * Description: 2226 * This inits the transmit coalesce parameters: i.e. timer rate, 2227 * timer handler and default threshold used for enabling the 2228 * interrupt on completion bit. 2229 */ 2230 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) 2231 { 2232 u32 tx_channel_count = priv->plat->tx_queues_to_use; 2233 u32 chan; 2234 2235 priv->tx_coal_frames = STMMAC_TX_FRAMES; 2236 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; 2237 2238 for (chan = 0; chan < tx_channel_count; chan++) { 2239 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; 2240 2241 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); 2242 } 2243 } 2244 2245 static void stmmac_set_rings_length(struct stmmac_priv *priv) 2246 { 2247 u32 rx_channels_count = priv->plat->rx_queues_to_use; 2248 u32 tx_channels_count = priv->plat->tx_queues_to_use; 2249 u32 chan; 2250 2251 /* set TX ring length */ 2252 for (chan = 0; chan < tx_channels_count; chan++) 2253 stmmac_set_tx_ring_len(priv, priv->ioaddr, 2254 (DMA_TX_SIZE - 1), chan); 2255 2256 /* set RX ring length */ 2257 for (chan = 0; chan < rx_channels_count; chan++) 2258 stmmac_set_rx_ring_len(priv, priv->ioaddr, 2259 (DMA_RX_SIZE - 1), chan); 2260 } 2261 2262 /** 2263 * stmmac_set_tx_queue_weight - Set TX queue weight 2264 * @priv: driver private structure 2265 * Description: It is used for setting TX queues weight 2266 */ 2267 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) 2268 { 2269 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2270 u32 weight; 2271 u32 queue; 2272 2273 for (queue = 0; queue < tx_queues_count; queue++) { 2274 weight = priv->plat->tx_queues_cfg[queue].weight; 2275 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); 2276 } 2277 } 2278 2279 /** 2280 * stmmac_configure_cbs - Configure CBS in TX queue 2281 * @priv: driver private structure 2282 * Description: It is used for configuring CBS in AVB TX queues 2283 */ 2284 static void stmmac_configure_cbs(struct stmmac_priv *priv) 2285 { 2286 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2287 u32 mode_to_use; 2288 u32 queue; 2289 2290 /* queue 0 is reserved for legacy traffic */ 2291 for (queue = 1; queue < tx_queues_count; queue++) { 2292 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; 2293 if (mode_to_use == MTL_QUEUE_DCB) 2294 continue; 2295 2296 stmmac_config_cbs(priv, priv->hw, 2297 priv->plat->tx_queues_cfg[queue].send_slope, 2298 priv->plat->tx_queues_cfg[queue].idle_slope, 2299 priv->plat->tx_queues_cfg[queue].high_credit, 2300 priv->plat->tx_queues_cfg[queue].low_credit, 2301 queue); 2302 } 2303 } 2304 2305 /** 2306 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel 2307 * @priv: driver private structure 2308 * Description: It is used for mapping RX queues to RX dma channels 2309 */ 2310 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) 2311 { 2312 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2313 u32 queue; 2314 u32 chan; 2315 2316 for (queue = 0; queue < rx_queues_count; queue++) { 2317 chan = priv->plat->rx_queues_cfg[queue].chan; 2318 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); 2319 } 2320 } 2321 2322 /** 2323 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority 2324 * @priv: driver private structure 2325 * Description: It is used for configuring the RX Queue Priority 2326 */ 2327 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) 2328 { 2329 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2330 u32 queue; 2331 u32 prio; 2332 2333 for (queue = 0; queue < rx_queues_count; queue++) { 2334 if (!priv->plat->rx_queues_cfg[queue].use_prio) 2335 continue; 2336 2337 prio = priv->plat->rx_queues_cfg[queue].prio; 2338 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); 2339 } 2340 } 2341 2342 /** 2343 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority 2344 * @priv: driver private structure 2345 * Description: It is used for configuring the TX Queue Priority 2346 */ 2347 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) 2348 { 2349 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2350 u32 queue; 2351 u32 prio; 2352 2353 for (queue = 0; queue < tx_queues_count; queue++) { 2354 if (!priv->plat->tx_queues_cfg[queue].use_prio) 2355 continue; 2356 2357 prio = priv->plat->tx_queues_cfg[queue].prio; 2358 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); 2359 } 2360 } 2361 2362 /** 2363 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing 2364 * @priv: driver private structure 2365 * Description: It is used for configuring the RX queue routing 2366 */ 2367 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) 2368 { 2369 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2370 u32 queue; 2371 u8 packet; 2372 2373 for (queue = 0; queue < rx_queues_count; queue++) { 2374 /* no specific packet type routing specified for the queue */ 2375 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) 2376 continue; 2377 2378 packet = priv->plat->rx_queues_cfg[queue].pkt_route; 2379 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); 2380 } 2381 } 2382 2383 /** 2384 * stmmac_mtl_configuration - Configure MTL 2385 * @priv: driver private structure 2386 * Description: It is used for configurring MTL 2387 */ 2388 static void stmmac_mtl_configuration(struct stmmac_priv *priv) 2389 { 2390 u32 rx_queues_count = priv->plat->rx_queues_to_use; 2391 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2392 2393 if (tx_queues_count > 1) 2394 stmmac_set_tx_queue_weight(priv); 2395 2396 /* Configure MTL RX algorithms */ 2397 if (rx_queues_count > 1) 2398 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, 2399 priv->plat->rx_sched_algorithm); 2400 2401 /* Configure MTL TX algorithms */ 2402 if (tx_queues_count > 1) 2403 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, 2404 priv->plat->tx_sched_algorithm); 2405 2406 /* Configure CBS in AVB TX queues */ 2407 if (tx_queues_count > 1) 2408 stmmac_configure_cbs(priv); 2409 2410 /* Map RX MTL to DMA channels */ 2411 stmmac_rx_queue_dma_chan_map(priv); 2412 2413 /* Enable MAC RX Queues */ 2414 stmmac_mac_enable_rx_queues(priv); 2415 2416 /* Set RX priorities */ 2417 if (rx_queues_count > 1) 2418 stmmac_mac_config_rx_queues_prio(priv); 2419 2420 /* Set TX priorities */ 2421 if (tx_queues_count > 1) 2422 stmmac_mac_config_tx_queues_prio(priv); 2423 2424 /* Set RX routing */ 2425 if (rx_queues_count > 1) 2426 stmmac_mac_config_rx_queues_routing(priv); 2427 } 2428 2429 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv) 2430 { 2431 if (priv->dma_cap.asp) { 2432 netdev_info(priv->dev, "Enabling Safety Features\n"); 2433 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); 2434 } else { 2435 netdev_info(priv->dev, "No Safety Features support found\n"); 2436 } 2437 } 2438 2439 /** 2440 * stmmac_hw_setup - setup mac in a usable state. 2441 * @dev : pointer to the device structure. 2442 * Description: 2443 * this is the main function to setup the HW in a usable state because the 2444 * dma engine is reset, the core registers are configured (e.g. AXI, 2445 * Checksum features, timers). The DMA is ready to start receiving and 2446 * transmitting. 2447 * Return value: 2448 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2449 * file on failure. 2450 */ 2451 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) 2452 { 2453 struct stmmac_priv *priv = netdev_priv(dev); 2454 u32 rx_cnt = priv->plat->rx_queues_to_use; 2455 u32 tx_cnt = priv->plat->tx_queues_to_use; 2456 u32 chan; 2457 int ret; 2458 2459 /* DMA initialization and SW reset */ 2460 ret = stmmac_init_dma_engine(priv); 2461 if (ret < 0) { 2462 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", 2463 __func__); 2464 return ret; 2465 } 2466 2467 /* Copy the MAC addr into the HW */ 2468 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); 2469 2470 /* PS and related bits will be programmed according to the speed */ 2471 if (priv->hw->pcs) { 2472 int speed = priv->plat->mac_port_sel_speed; 2473 2474 if ((speed == SPEED_10) || (speed == SPEED_100) || 2475 (speed == SPEED_1000)) { 2476 priv->hw->ps = speed; 2477 } else { 2478 dev_warn(priv->device, "invalid port speed\n"); 2479 priv->hw->ps = 0; 2480 } 2481 } 2482 2483 /* Initialize the MAC Core */ 2484 stmmac_core_init(priv, priv->hw, dev); 2485 2486 /* Initialize MTL*/ 2487 stmmac_mtl_configuration(priv); 2488 2489 /* Initialize Safety Features */ 2490 stmmac_safety_feat_configuration(priv); 2491 2492 ret = stmmac_rx_ipc(priv, priv->hw); 2493 if (!ret) { 2494 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); 2495 priv->plat->rx_coe = STMMAC_RX_COE_NONE; 2496 priv->hw->rx_csum = 0; 2497 } 2498 2499 /* Enable the MAC Rx/Tx */ 2500 stmmac_mac_set(priv, priv->ioaddr, true); 2501 2502 /* Set the HW DMA mode and the COE */ 2503 stmmac_dma_operation_mode(priv); 2504 2505 stmmac_mmc_setup(priv); 2506 2507 if (init_ptp) { 2508 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); 2509 if (ret < 0) 2510 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); 2511 2512 ret = stmmac_init_ptp(priv); 2513 if (ret == -EOPNOTSUPP) 2514 netdev_warn(priv->dev, "PTP not supported by HW\n"); 2515 else if (ret) 2516 netdev_warn(priv->dev, "PTP init failed\n"); 2517 } 2518 2519 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 2520 2521 if (priv->use_riwt) { 2522 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt); 2523 if (!ret) 2524 priv->rx_riwt = MAX_DMA_RIWT; 2525 } 2526 2527 if (priv->hw->pcs) 2528 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0); 2529 2530 /* set TX and RX rings length */ 2531 stmmac_set_rings_length(priv); 2532 2533 /* Enable TSO */ 2534 if (priv->tso) { 2535 for (chan = 0; chan < tx_cnt; chan++) 2536 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); 2537 } 2538 2539 /* Start the ball rolling... */ 2540 stmmac_start_all_dma(priv); 2541 2542 return 0; 2543 } 2544 2545 static void stmmac_hw_teardown(struct net_device *dev) 2546 { 2547 struct stmmac_priv *priv = netdev_priv(dev); 2548 2549 clk_disable_unprepare(priv->plat->clk_ptp_ref); 2550 } 2551 2552 /** 2553 * stmmac_open - open entry point of the driver 2554 * @dev : pointer to the device structure. 2555 * Description: 2556 * This function is the open entry point of the driver. 2557 * Return value: 2558 * 0 on success and an appropriate (-)ve integer as defined in errno.h 2559 * file on failure. 2560 */ 2561 static int stmmac_open(struct net_device *dev) 2562 { 2563 struct stmmac_priv *priv = netdev_priv(dev); 2564 u32 chan; 2565 int ret; 2566 2567 if (priv->hw->pcs != STMMAC_PCS_RGMII && 2568 priv->hw->pcs != STMMAC_PCS_TBI && 2569 priv->hw->pcs != STMMAC_PCS_RTBI) { 2570 ret = stmmac_init_phy(dev); 2571 if (ret) { 2572 netdev_err(priv->dev, 2573 "%s: Cannot attach to PHY (error: %d)\n", 2574 __func__, ret); 2575 return ret; 2576 } 2577 } 2578 2579 /* Extra statistics */ 2580 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); 2581 priv->xstats.threshold = tc; 2582 2583 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); 2584 priv->rx_copybreak = STMMAC_RX_COPYBREAK; 2585 2586 ret = alloc_dma_desc_resources(priv); 2587 if (ret < 0) { 2588 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", 2589 __func__); 2590 goto dma_desc_error; 2591 } 2592 2593 ret = init_dma_desc_rings(dev, GFP_KERNEL); 2594 if (ret < 0) { 2595 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", 2596 __func__); 2597 goto init_error; 2598 } 2599 2600 ret = stmmac_hw_setup(dev, true); 2601 if (ret < 0) { 2602 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); 2603 goto init_error; 2604 } 2605 2606 stmmac_init_tx_coalesce(priv); 2607 2608 phylink_start(priv->phylink); 2609 2610 /* Request the IRQ lines */ 2611 ret = request_irq(dev->irq, stmmac_interrupt, 2612 IRQF_SHARED, dev->name, dev); 2613 if (unlikely(ret < 0)) { 2614 netdev_err(priv->dev, 2615 "%s: ERROR: allocating the IRQ %d (error: %d)\n", 2616 __func__, dev->irq, ret); 2617 goto irq_error; 2618 } 2619 2620 /* Request the Wake IRQ in case of another line is used for WoL */ 2621 if (priv->wol_irq != dev->irq) { 2622 ret = request_irq(priv->wol_irq, stmmac_interrupt, 2623 IRQF_SHARED, dev->name, dev); 2624 if (unlikely(ret < 0)) { 2625 netdev_err(priv->dev, 2626 "%s: ERROR: allocating the WoL IRQ %d (%d)\n", 2627 __func__, priv->wol_irq, ret); 2628 goto wolirq_error; 2629 } 2630 } 2631 2632 /* Request the IRQ lines */ 2633 if (priv->lpi_irq > 0) { 2634 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, 2635 dev->name, dev); 2636 if (unlikely(ret < 0)) { 2637 netdev_err(priv->dev, 2638 "%s: ERROR: allocating the LPI IRQ %d (%d)\n", 2639 __func__, priv->lpi_irq, ret); 2640 goto lpiirq_error; 2641 } 2642 } 2643 2644 stmmac_enable_all_queues(priv); 2645 stmmac_start_all_queues(priv); 2646 2647 return 0; 2648 2649 lpiirq_error: 2650 if (priv->wol_irq != dev->irq) 2651 free_irq(priv->wol_irq, dev); 2652 wolirq_error: 2653 free_irq(dev->irq, dev); 2654 irq_error: 2655 phylink_stop(priv->phylink); 2656 2657 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 2658 del_timer_sync(&priv->tx_queue[chan].txtimer); 2659 2660 stmmac_hw_teardown(dev); 2661 init_error: 2662 free_dma_desc_resources(priv); 2663 dma_desc_error: 2664 phylink_disconnect_phy(priv->phylink); 2665 return ret; 2666 } 2667 2668 /** 2669 * stmmac_release - close entry point of the driver 2670 * @dev : device pointer. 2671 * Description: 2672 * This is the stop entry point of the driver. 2673 */ 2674 static int stmmac_release(struct net_device *dev) 2675 { 2676 struct stmmac_priv *priv = netdev_priv(dev); 2677 u32 chan; 2678 2679 if (priv->eee_enabled) 2680 del_timer_sync(&priv->eee_ctrl_timer); 2681 2682 /* Stop and disconnect the PHY */ 2683 phylink_stop(priv->phylink); 2684 phylink_disconnect_phy(priv->phylink); 2685 2686 stmmac_stop_all_queues(priv); 2687 2688 stmmac_disable_all_queues(priv); 2689 2690 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) 2691 del_timer_sync(&priv->tx_queue[chan].txtimer); 2692 2693 /* Free the IRQ lines */ 2694 free_irq(dev->irq, dev); 2695 if (priv->wol_irq != dev->irq) 2696 free_irq(priv->wol_irq, dev); 2697 if (priv->lpi_irq > 0) 2698 free_irq(priv->lpi_irq, dev); 2699 2700 /* Stop TX/RX DMA and clear the descriptors */ 2701 stmmac_stop_all_dma(priv); 2702 2703 /* Release and free the Rx/Tx resources */ 2704 free_dma_desc_resources(priv); 2705 2706 /* Disable the MAC Rx/Tx */ 2707 stmmac_mac_set(priv, priv->ioaddr, false); 2708 2709 netif_carrier_off(dev); 2710 2711 stmmac_release_ptp(priv); 2712 2713 return 0; 2714 } 2715 2716 /** 2717 * stmmac_tso_allocator - close entry point of the driver 2718 * @priv: driver private structure 2719 * @des: buffer start address 2720 * @total_len: total length to fill in descriptors 2721 * @last_segmant: condition for the last descriptor 2722 * @queue: TX queue index 2723 * Description: 2724 * This function fills descriptor and request new descriptors according to 2725 * buffer length to fill 2726 */ 2727 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, 2728 int total_len, bool last_segment, u32 queue) 2729 { 2730 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 2731 struct dma_desc *desc; 2732 u32 buff_size; 2733 int tmp_len; 2734 2735 tmp_len = total_len; 2736 2737 while (tmp_len > 0) { 2738 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2739 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 2740 desc = tx_q->dma_tx + tx_q->cur_tx; 2741 2742 desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); 2743 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? 2744 TSO_MAX_BUFF_SIZE : tmp_len; 2745 2746 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size, 2747 0, 1, 2748 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 2749 0, 0); 2750 2751 tmp_len -= TSO_MAX_BUFF_SIZE; 2752 } 2753 } 2754 2755 /** 2756 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) 2757 * @skb : the socket buffer 2758 * @dev : device pointer 2759 * Description: this is the transmit function that is called on TSO frames 2760 * (support available on GMAC4 and newer chips). 2761 * Diagram below show the ring programming in case of TSO frames: 2762 * 2763 * First Descriptor 2764 * -------- 2765 * | DES0 |---> buffer1 = L2/L3/L4 header 2766 * | DES1 |---> TCP Payload (can continue on next descr...) 2767 * | DES2 |---> buffer 1 and 2 len 2768 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] 2769 * -------- 2770 * | 2771 * ... 2772 * | 2773 * -------- 2774 * | DES0 | --| Split TCP Payload on Buffers 1 and 2 2775 * | DES1 | --| 2776 * | DES2 | --> buffer 1 and 2 len 2777 * | DES3 | 2778 * -------- 2779 * 2780 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. 2781 */ 2782 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 2783 { 2784 struct dma_desc *desc, *first, *mss_desc = NULL; 2785 struct stmmac_priv *priv = netdev_priv(dev); 2786 int nfrags = skb_shinfo(skb)->nr_frags; 2787 u32 queue = skb_get_queue_mapping(skb); 2788 unsigned int first_entry, des; 2789 struct stmmac_tx_queue *tx_q; 2790 int tmp_pay_len = 0; 2791 u32 pay_len, mss; 2792 u8 proto_hdr_len; 2793 int i; 2794 2795 tx_q = &priv->tx_queue[queue]; 2796 2797 /* Compute header lengths */ 2798 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2799 2800 /* Desc availability based on threshold should be enough safe */ 2801 if (unlikely(stmmac_tx_avail(priv, queue) < 2802 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { 2803 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 2804 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 2805 queue)); 2806 /* This is a hard error, log it. */ 2807 netdev_err(priv->dev, 2808 "%s: Tx Ring full when queue awake\n", 2809 __func__); 2810 } 2811 return NETDEV_TX_BUSY; 2812 } 2813 2814 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ 2815 2816 mss = skb_shinfo(skb)->gso_size; 2817 2818 /* set new MSS value if needed */ 2819 if (mss != tx_q->mss) { 2820 mss_desc = tx_q->dma_tx + tx_q->cur_tx; 2821 stmmac_set_mss(priv, mss_desc, mss); 2822 tx_q->mss = mss; 2823 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2824 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); 2825 } 2826 2827 if (netif_msg_tx_queued(priv)) { 2828 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n", 2829 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss); 2830 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, 2831 skb->data_len); 2832 } 2833 2834 first_entry = tx_q->cur_tx; 2835 WARN_ON(tx_q->tx_skbuff[first_entry]); 2836 2837 desc = tx_q->dma_tx + first_entry; 2838 first = desc; 2839 2840 /* first descriptor: fill Headers on Buf1 */ 2841 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), 2842 DMA_TO_DEVICE); 2843 if (dma_mapping_error(priv->device, des)) 2844 goto dma_map_err; 2845 2846 tx_q->tx_skbuff_dma[first_entry].buf = des; 2847 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); 2848 2849 first->des0 = cpu_to_le32(des); 2850 2851 /* Fill start of payload in buff2 of first descriptor */ 2852 if (pay_len) 2853 first->des1 = cpu_to_le32(des + proto_hdr_len); 2854 2855 /* If needed take extra descriptors to fill the remaining payload */ 2856 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; 2857 2858 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 2859 2860 /* Prepare fragments */ 2861 for (i = 0; i < nfrags; i++) { 2862 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2863 2864 des = skb_frag_dma_map(priv->device, frag, 0, 2865 skb_frag_size(frag), 2866 DMA_TO_DEVICE); 2867 if (dma_mapping_error(priv->device, des)) 2868 goto dma_map_err; 2869 2870 stmmac_tso_allocator(priv, des, skb_frag_size(frag), 2871 (i == nfrags - 1), queue); 2872 2873 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; 2874 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); 2875 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; 2876 } 2877 2878 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; 2879 2880 /* Only the last descriptor gets to point to the skb. */ 2881 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 2882 2883 /* We've used all descriptors we need for this skb, however, 2884 * advance cur_tx so that it references a fresh descriptor. 2885 * ndo_start_xmit will fill this descriptor the next time it's 2886 * called and stmmac_tx_clean may clean up to this descriptor. 2887 */ 2888 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); 2889 2890 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 2891 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 2892 __func__); 2893 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 2894 } 2895 2896 dev->stats.tx_bytes += skb->len; 2897 priv->xstats.tx_tso_frames++; 2898 priv->xstats.tx_tso_nfrags += nfrags; 2899 2900 /* Manage tx mitigation */ 2901 tx_q->tx_count_frames += nfrags + 1; 2902 if (priv->tx_coal_frames <= tx_q->tx_count_frames) { 2903 stmmac_set_tx_ic(priv, desc); 2904 priv->xstats.tx_set_ic_bit++; 2905 tx_q->tx_count_frames = 0; 2906 } else { 2907 stmmac_tx_timer_arm(priv, queue); 2908 } 2909 2910 skb_tx_timestamp(skb); 2911 2912 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2913 priv->hwts_tx_en)) { 2914 /* declare that device is doing timestamping */ 2915 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2916 stmmac_enable_tx_timestamp(priv, first); 2917 } 2918 2919 /* Complete the first descriptor before granting the DMA */ 2920 stmmac_prepare_tso_tx_desc(priv, first, 1, 2921 proto_hdr_len, 2922 pay_len, 2923 1, tx_q->tx_skbuff_dma[first_entry].last_segment, 2924 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); 2925 2926 /* If context desc is used to change MSS */ 2927 if (mss_desc) { 2928 /* Make sure that first descriptor has been completely 2929 * written, including its own bit. This is because MSS is 2930 * actually before first descriptor, so we need to make 2931 * sure that MSS's own bit is the last thing written. 2932 */ 2933 dma_wmb(); 2934 stmmac_set_tx_owner(priv, mss_desc); 2935 } 2936 2937 /* The own bit must be the latest setting done when prepare the 2938 * descriptor and then barrier is needed to make sure that 2939 * all is coherent before granting the DMA engine. 2940 */ 2941 wmb(); 2942 2943 if (netif_msg_pktdata(priv)) { 2944 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", 2945 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 2946 tx_q->cur_tx, first, nfrags); 2947 2948 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0); 2949 2950 pr_info(">>> frame to be transmitted: "); 2951 print_pkt(skb->data, skb_headlen(skb)); 2952 } 2953 2954 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 2955 2956 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); 2957 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 2958 2959 return NETDEV_TX_OK; 2960 2961 dma_map_err: 2962 dev_err(priv->device, "Tx dma map failed\n"); 2963 dev_kfree_skb(skb); 2964 priv->dev->stats.tx_dropped++; 2965 return NETDEV_TX_OK; 2966 } 2967 2968 /** 2969 * stmmac_xmit - Tx entry point of the driver 2970 * @skb : the socket buffer 2971 * @dev : device pointer 2972 * Description : this is the tx entry point of the driver. 2973 * It programs the chain or the ring and supports oversized frames 2974 * and SG feature. 2975 */ 2976 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) 2977 { 2978 struct stmmac_priv *priv = netdev_priv(dev); 2979 unsigned int nopaged_len = skb_headlen(skb); 2980 int i, csum_insertion = 0, is_jumbo = 0; 2981 u32 queue = skb_get_queue_mapping(skb); 2982 int nfrags = skb_shinfo(skb)->nr_frags; 2983 int entry; 2984 unsigned int first_entry; 2985 struct dma_desc *desc, *first; 2986 struct stmmac_tx_queue *tx_q; 2987 unsigned int enh_desc; 2988 unsigned int des; 2989 2990 tx_q = &priv->tx_queue[queue]; 2991 2992 if (priv->tx_path_in_lpi_mode) 2993 stmmac_disable_eee_mode(priv); 2994 2995 /* Manage oversized TCP frames for GMAC4 device */ 2996 if (skb_is_gso(skb) && priv->tso) { 2997 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { 2998 /* 2999 * There is no way to determine the number of TSO 3000 * capable Queues. Let's use always the Queue 0 3001 * because if TSO is supported then at least this 3002 * one will be capable. 3003 */ 3004 skb_set_queue_mapping(skb, 0); 3005 3006 return stmmac_tso_xmit(skb, dev); 3007 } 3008 } 3009 3010 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { 3011 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { 3012 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, 3013 queue)); 3014 /* This is a hard error, log it. */ 3015 netdev_err(priv->dev, 3016 "%s: Tx Ring full when queue awake\n", 3017 __func__); 3018 } 3019 return NETDEV_TX_BUSY; 3020 } 3021 3022 entry = tx_q->cur_tx; 3023 first_entry = entry; 3024 WARN_ON(tx_q->tx_skbuff[first_entry]); 3025 3026 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 3027 3028 if (likely(priv->extend_desc)) 3029 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3030 else 3031 desc = tx_q->dma_tx + entry; 3032 3033 first = desc; 3034 3035 enh_desc = priv->plat->enh_desc; 3036 /* To program the descriptors according to the size of the frame */ 3037 if (enh_desc) 3038 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); 3039 3040 if (unlikely(is_jumbo)) { 3041 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); 3042 if (unlikely(entry < 0) && (entry != -EINVAL)) 3043 goto dma_map_err; 3044 } 3045 3046 for (i = 0; i < nfrags; i++) { 3047 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 3048 int len = skb_frag_size(frag); 3049 bool last_segment = (i == (nfrags - 1)); 3050 3051 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3052 WARN_ON(tx_q->tx_skbuff[entry]); 3053 3054 if (likely(priv->extend_desc)) 3055 desc = (struct dma_desc *)(tx_q->dma_etx + entry); 3056 else 3057 desc = tx_q->dma_tx + entry; 3058 3059 des = skb_frag_dma_map(priv->device, frag, 0, len, 3060 DMA_TO_DEVICE); 3061 if (dma_mapping_error(priv->device, des)) 3062 goto dma_map_err; /* should reuse desc w/o issues */ 3063 3064 tx_q->tx_skbuff_dma[entry].buf = des; 3065 3066 stmmac_set_desc_addr(priv, desc, des); 3067 3068 tx_q->tx_skbuff_dma[entry].map_as_page = true; 3069 tx_q->tx_skbuff_dma[entry].len = len; 3070 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; 3071 3072 /* Prepare the descriptor and set the own bit too */ 3073 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, 3074 priv->mode, 1, last_segment, skb->len); 3075 } 3076 3077 /* Only the last descriptor gets to point to the skb. */ 3078 tx_q->tx_skbuff[entry] = skb; 3079 3080 /* We've used all descriptors we need for this skb, however, 3081 * advance cur_tx so that it references a fresh descriptor. 3082 * ndo_start_xmit will fill this descriptor the next time it's 3083 * called and stmmac_tx_clean may clean up to this descriptor. 3084 */ 3085 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 3086 tx_q->cur_tx = entry; 3087 3088 if (netif_msg_pktdata(priv)) { 3089 void *tx_head; 3090 3091 netdev_dbg(priv->dev, 3092 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", 3093 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, 3094 entry, first, nfrags); 3095 3096 if (priv->extend_desc) 3097 tx_head = (void *)tx_q->dma_etx; 3098 else 3099 tx_head = (void *)tx_q->dma_tx; 3100 3101 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false); 3102 3103 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); 3104 print_pkt(skb->data, skb->len); 3105 } 3106 3107 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { 3108 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", 3109 __func__); 3110 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); 3111 } 3112 3113 dev->stats.tx_bytes += skb->len; 3114 3115 /* According to the coalesce parameter the IC bit for the latest 3116 * segment is reset and the timer re-started to clean the tx status. 3117 * This approach takes care about the fragments: desc is the first 3118 * element in case of no SG. 3119 */ 3120 tx_q->tx_count_frames += nfrags + 1; 3121 if (priv->tx_coal_frames <= tx_q->tx_count_frames) { 3122 stmmac_set_tx_ic(priv, desc); 3123 priv->xstats.tx_set_ic_bit++; 3124 tx_q->tx_count_frames = 0; 3125 } else { 3126 stmmac_tx_timer_arm(priv, queue); 3127 } 3128 3129 skb_tx_timestamp(skb); 3130 3131 /* Ready to fill the first descriptor and set the OWN bit w/o any 3132 * problems because all the descriptors are actually ready to be 3133 * passed to the DMA engine. 3134 */ 3135 if (likely(!is_jumbo)) { 3136 bool last_segment = (nfrags == 0); 3137 3138 des = dma_map_single(priv->device, skb->data, 3139 nopaged_len, DMA_TO_DEVICE); 3140 if (dma_mapping_error(priv->device, des)) 3141 goto dma_map_err; 3142 3143 tx_q->tx_skbuff_dma[first_entry].buf = des; 3144 3145 stmmac_set_desc_addr(priv, first, des); 3146 3147 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; 3148 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; 3149 3150 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 3151 priv->hwts_tx_en)) { 3152 /* declare that device is doing timestamping */ 3153 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 3154 stmmac_enable_tx_timestamp(priv, first); 3155 } 3156 3157 /* Prepare the first descriptor setting the OWN bit too */ 3158 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3159 csum_insertion, priv->mode, 1, last_segment, 3160 skb->len); 3161 } else { 3162 stmmac_set_tx_owner(priv, first); 3163 } 3164 3165 /* The own bit must be the latest setting done when prepare the 3166 * descriptor and then barrier is needed to make sure that 3167 * all is coherent before granting the DMA engine. 3168 */ 3169 wmb(); 3170 3171 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3172 3173 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3174 3175 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); 3176 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3177 3178 return NETDEV_TX_OK; 3179 3180 dma_map_err: 3181 netdev_err(priv->dev, "Tx DMA map failed\n"); 3182 dev_kfree_skb(skb); 3183 priv->dev->stats.tx_dropped++; 3184 return NETDEV_TX_OK; 3185 } 3186 3187 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) 3188 { 3189 struct vlan_ethhdr *veth; 3190 __be16 vlan_proto; 3191 u16 vlanid; 3192 3193 veth = (struct vlan_ethhdr *)skb->data; 3194 vlan_proto = veth->h_vlan_proto; 3195 3196 if ((vlan_proto == htons(ETH_P_8021Q) && 3197 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || 3198 (vlan_proto == htons(ETH_P_8021AD) && 3199 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { 3200 /* pop the vlan tag */ 3201 vlanid = ntohs(veth->h_vlan_TCI); 3202 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); 3203 skb_pull(skb, VLAN_HLEN); 3204 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid); 3205 } 3206 } 3207 3208 3209 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) 3210 { 3211 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) 3212 return 0; 3213 3214 return 1; 3215 } 3216 3217 /** 3218 * stmmac_rx_refill - refill used skb preallocated buffers 3219 * @priv: driver private structure 3220 * @queue: RX queue index 3221 * Description : this is to reallocate the skb for the reception process 3222 * that is based on zero-copy. 3223 */ 3224 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) 3225 { 3226 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3227 int dirty = stmmac_rx_dirty(priv, queue); 3228 unsigned int entry = rx_q->dirty_rx; 3229 3230 int bfsize = priv->dma_buf_sz; 3231 3232 while (dirty-- > 0) { 3233 struct dma_desc *p; 3234 3235 if (priv->extend_desc) 3236 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3237 else 3238 p = rx_q->dma_rx + entry; 3239 3240 if (likely(!rx_q->rx_skbuff[entry])) { 3241 struct sk_buff *skb; 3242 3243 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); 3244 if (unlikely(!skb)) { 3245 /* so for a while no zero-copy! */ 3246 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; 3247 if (unlikely(net_ratelimit())) 3248 dev_err(priv->device, 3249 "fail to alloc skb entry %d\n", 3250 entry); 3251 break; 3252 } 3253 3254 rx_q->rx_skbuff[entry] = skb; 3255 rx_q->rx_skbuff_dma[entry] = 3256 dma_map_single(priv->device, skb->data, bfsize, 3257 DMA_FROM_DEVICE); 3258 if (dma_mapping_error(priv->device, 3259 rx_q->rx_skbuff_dma[entry])) { 3260 netdev_err(priv->dev, "Rx DMA map failed\n"); 3261 dev_kfree_skb(skb); 3262 break; 3263 } 3264 3265 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]); 3266 stmmac_refill_desc3(priv, rx_q, p); 3267 3268 if (rx_q->rx_zeroc_thresh > 0) 3269 rx_q->rx_zeroc_thresh--; 3270 3271 netif_dbg(priv, rx_status, priv->dev, 3272 "refill entry #%d\n", entry); 3273 } 3274 dma_wmb(); 3275 3276 stmmac_set_rx_owner(priv, p, priv->use_riwt); 3277 3278 dma_wmb(); 3279 3280 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); 3281 } 3282 rx_q->dirty_rx = entry; 3283 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); 3284 } 3285 3286 /** 3287 * stmmac_rx - manage the receive process 3288 * @priv: driver private structure 3289 * @limit: napi bugget 3290 * @queue: RX queue index. 3291 * Description : this the function called by the napi poll method. 3292 * It gets all the frames inside the ring. 3293 */ 3294 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 3295 { 3296 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3297 struct stmmac_channel *ch = &priv->channel[queue]; 3298 unsigned int next_entry = rx_q->cur_rx; 3299 int coe = priv->hw->rx_csum; 3300 unsigned int count = 0; 3301 bool xmac; 3302 3303 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 3304 3305 if (netif_msg_rx_status(priv)) { 3306 void *rx_head; 3307 3308 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); 3309 if (priv->extend_desc) 3310 rx_head = (void *)rx_q->dma_erx; 3311 else 3312 rx_head = (void *)rx_q->dma_rx; 3313 3314 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); 3315 } 3316 while (count < limit) { 3317 int entry, status; 3318 struct dma_desc *p; 3319 struct dma_desc *np; 3320 3321 entry = next_entry; 3322 3323 if (priv->extend_desc) 3324 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3325 else 3326 p = rx_q->dma_rx + entry; 3327 3328 /* read the status of the incoming frame */ 3329 status = stmmac_rx_status(priv, &priv->dev->stats, 3330 &priv->xstats, p); 3331 /* check if managed by the DMA otherwise go ahead */ 3332 if (unlikely(status & dma_own)) 3333 break; 3334 3335 count++; 3336 3337 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); 3338 next_entry = rx_q->cur_rx; 3339 3340 if (priv->extend_desc) 3341 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); 3342 else 3343 np = rx_q->dma_rx + next_entry; 3344 3345 prefetch(np); 3346 3347 if (priv->extend_desc) 3348 stmmac_rx_extended_status(priv, &priv->dev->stats, 3349 &priv->xstats, rx_q->dma_erx + entry); 3350 if (unlikely(status == discard_frame)) { 3351 priv->dev->stats.rx_errors++; 3352 if (priv->hwts_rx_en && !priv->extend_desc) { 3353 /* DESC2 & DESC3 will be overwritten by device 3354 * with timestamp value, hence reinitialize 3355 * them in stmmac_rx_refill() function so that 3356 * device can reuse it. 3357 */ 3358 dev_kfree_skb_any(rx_q->rx_skbuff[entry]); 3359 rx_q->rx_skbuff[entry] = NULL; 3360 dma_unmap_single(priv->device, 3361 rx_q->rx_skbuff_dma[entry], 3362 priv->dma_buf_sz, 3363 DMA_FROM_DEVICE); 3364 } 3365 } else { 3366 struct sk_buff *skb; 3367 int frame_len; 3368 unsigned int des; 3369 3370 stmmac_get_desc_addr(priv, p, &des); 3371 frame_len = stmmac_get_rx_frame_len(priv, p, coe); 3372 3373 /* If frame length is greater than skb buffer size 3374 * (preallocated during init) then the packet is 3375 * ignored 3376 */ 3377 if (frame_len > priv->dma_buf_sz) { 3378 if (net_ratelimit()) 3379 netdev_err(priv->dev, 3380 "len %d larger than size (%d)\n", 3381 frame_len, priv->dma_buf_sz); 3382 priv->dev->stats.rx_length_errors++; 3383 continue; 3384 } 3385 3386 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3387 * Type frames (LLC/LLC-SNAP) 3388 * 3389 * llc_snap is never checked in GMAC >= 4, so this ACS 3390 * feature is always disabled and packets need to be 3391 * stripped manually. 3392 */ 3393 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || 3394 unlikely(status != llc_snap)) 3395 frame_len -= ETH_FCS_LEN; 3396 3397 if (netif_msg_rx_status(priv)) { 3398 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", 3399 p, entry, des); 3400 netdev_dbg(priv->dev, "frame size %d, COE: %d\n", 3401 frame_len, status); 3402 } 3403 3404 /* The zero-copy is always used for all the sizes 3405 * in case of GMAC4 because it needs 3406 * to refill the used descriptors, always. 3407 */ 3408 if (unlikely(!xmac && 3409 ((frame_len < priv->rx_copybreak) || 3410 stmmac_rx_threshold_count(rx_q)))) { 3411 skb = netdev_alloc_skb_ip_align(priv->dev, 3412 frame_len); 3413 if (unlikely(!skb)) { 3414 if (net_ratelimit()) 3415 dev_warn(priv->device, 3416 "packet dropped\n"); 3417 priv->dev->stats.rx_dropped++; 3418 continue; 3419 } 3420 3421 dma_sync_single_for_cpu(priv->device, 3422 rx_q->rx_skbuff_dma 3423 [entry], frame_len, 3424 DMA_FROM_DEVICE); 3425 skb_copy_to_linear_data(skb, 3426 rx_q-> 3427 rx_skbuff[entry]->data, 3428 frame_len); 3429 3430 skb_put(skb, frame_len); 3431 dma_sync_single_for_device(priv->device, 3432 rx_q->rx_skbuff_dma 3433 [entry], frame_len, 3434 DMA_FROM_DEVICE); 3435 } else { 3436 skb = rx_q->rx_skbuff[entry]; 3437 if (unlikely(!skb)) { 3438 if (net_ratelimit()) 3439 netdev_err(priv->dev, 3440 "%s: Inconsistent Rx chain\n", 3441 priv->dev->name); 3442 priv->dev->stats.rx_dropped++; 3443 continue; 3444 } 3445 prefetch(skb->data - NET_IP_ALIGN); 3446 rx_q->rx_skbuff[entry] = NULL; 3447 rx_q->rx_zeroc_thresh++; 3448 3449 skb_put(skb, frame_len); 3450 dma_unmap_single(priv->device, 3451 rx_q->rx_skbuff_dma[entry], 3452 priv->dma_buf_sz, 3453 DMA_FROM_DEVICE); 3454 } 3455 3456 if (netif_msg_pktdata(priv)) { 3457 netdev_dbg(priv->dev, "frame received (%dbytes)", 3458 frame_len); 3459 print_pkt(skb->data, frame_len); 3460 } 3461 3462 stmmac_get_rx_hwtstamp(priv, p, np, skb); 3463 3464 stmmac_rx_vlan(priv->dev, skb); 3465 3466 skb->protocol = eth_type_trans(skb, priv->dev); 3467 3468 if (unlikely(!coe)) 3469 skb_checksum_none_assert(skb); 3470 else 3471 skb->ip_summed = CHECKSUM_UNNECESSARY; 3472 3473 napi_gro_receive(&ch->rx_napi, skb); 3474 3475 priv->dev->stats.rx_packets++; 3476 priv->dev->stats.rx_bytes += frame_len; 3477 } 3478 } 3479 3480 stmmac_rx_refill(priv, queue); 3481 3482 priv->xstats.rx_pkt_n += count; 3483 3484 return count; 3485 } 3486 3487 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) 3488 { 3489 struct stmmac_channel *ch = 3490 container_of(napi, struct stmmac_channel, rx_napi); 3491 struct stmmac_priv *priv = ch->priv_data; 3492 u32 chan = ch->index; 3493 int work_done; 3494 3495 priv->xstats.napi_poll++; 3496 3497 work_done = stmmac_rx(priv, budget, chan); 3498 if (work_done < budget && napi_complete_done(napi, work_done)) 3499 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3500 return work_done; 3501 } 3502 3503 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) 3504 { 3505 struct stmmac_channel *ch = 3506 container_of(napi, struct stmmac_channel, tx_napi); 3507 struct stmmac_priv *priv = ch->priv_data; 3508 struct stmmac_tx_queue *tx_q; 3509 u32 chan = ch->index; 3510 int work_done; 3511 3512 priv->xstats.napi_poll++; 3513 3514 work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan); 3515 work_done = min(work_done, budget); 3516 3517 if (work_done < budget && napi_complete_done(napi, work_done)) 3518 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3519 3520 /* Force transmission restart */ 3521 tx_q = &priv->tx_queue[chan]; 3522 if (tx_q->cur_tx != tx_q->dirty_tx) { 3523 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3524 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, 3525 chan); 3526 } 3527 3528 return work_done; 3529 } 3530 3531 /** 3532 * stmmac_tx_timeout 3533 * @dev : Pointer to net device structure 3534 * Description: this function is called when a packet transmission fails to 3535 * complete within a reasonable time. The driver will mark the error in the 3536 * netdev structure and arrange for the device to be reset to a sane state 3537 * in order to transmit a new packet. 3538 */ 3539 static void stmmac_tx_timeout(struct net_device *dev) 3540 { 3541 struct stmmac_priv *priv = netdev_priv(dev); 3542 3543 stmmac_global_err(priv); 3544 } 3545 3546 /** 3547 * stmmac_set_rx_mode - entry point for multicast addressing 3548 * @dev : pointer to the device structure 3549 * Description: 3550 * This function is a driver entry point which gets called by the kernel 3551 * whenever multicast addresses must be enabled/disabled. 3552 * Return value: 3553 * void. 3554 */ 3555 static void stmmac_set_rx_mode(struct net_device *dev) 3556 { 3557 struct stmmac_priv *priv = netdev_priv(dev); 3558 3559 stmmac_set_filter(priv, priv->hw, dev); 3560 } 3561 3562 /** 3563 * stmmac_change_mtu - entry point to change MTU size for the device. 3564 * @dev : device pointer. 3565 * @new_mtu : the new MTU size for the device. 3566 * Description: the Maximum Transfer Unit (MTU) is used by the network layer 3567 * to drive packet transmission. Ethernet has an MTU of 1500 octets 3568 * (ETH_DATA_LEN). This value can be changed with ifconfig. 3569 * Return value: 3570 * 0 on success and an appropriate (-)ve integer as defined in errno.h 3571 * file on failure. 3572 */ 3573 static int stmmac_change_mtu(struct net_device *dev, int new_mtu) 3574 { 3575 struct stmmac_priv *priv = netdev_priv(dev); 3576 3577 if (netif_running(dev)) { 3578 netdev_err(priv->dev, "must be stopped to change its MTU\n"); 3579 return -EBUSY; 3580 } 3581 3582 dev->mtu = new_mtu; 3583 3584 netdev_update_features(dev); 3585 3586 return 0; 3587 } 3588 3589 static netdev_features_t stmmac_fix_features(struct net_device *dev, 3590 netdev_features_t features) 3591 { 3592 struct stmmac_priv *priv = netdev_priv(dev); 3593 3594 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 3595 features &= ~NETIF_F_RXCSUM; 3596 3597 if (!priv->plat->tx_coe) 3598 features &= ~NETIF_F_CSUM_MASK; 3599 3600 /* Some GMAC devices have a bugged Jumbo frame support that 3601 * needs to have the Tx COE disabled for oversized frames 3602 * (due to limited buffer sizes). In this case we disable 3603 * the TX csum insertion in the TDES and not use SF. 3604 */ 3605 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) 3606 features &= ~NETIF_F_CSUM_MASK; 3607 3608 /* Disable tso if asked by ethtool */ 3609 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 3610 if (features & NETIF_F_TSO) 3611 priv->tso = true; 3612 else 3613 priv->tso = false; 3614 } 3615 3616 return features; 3617 } 3618 3619 static int stmmac_set_features(struct net_device *netdev, 3620 netdev_features_t features) 3621 { 3622 struct stmmac_priv *priv = netdev_priv(netdev); 3623 3624 /* Keep the COE Type in case of csum is supporting */ 3625 if (features & NETIF_F_RXCSUM) 3626 priv->hw->rx_csum = priv->plat->rx_coe; 3627 else 3628 priv->hw->rx_csum = 0; 3629 /* No check needed because rx_coe has been set before and it will be 3630 * fixed in case of issue. 3631 */ 3632 stmmac_rx_ipc(priv, priv->hw); 3633 3634 return 0; 3635 } 3636 3637 /** 3638 * stmmac_interrupt - main ISR 3639 * @irq: interrupt number. 3640 * @dev_id: to pass the net device pointer. 3641 * Description: this is the main driver interrupt service routine. 3642 * It can call: 3643 * o DMA service routine (to manage incoming frame reception and transmission 3644 * status) 3645 * o Core interrupts to manage: remote wake-up, management counter, LPI 3646 * interrupts. 3647 */ 3648 static irqreturn_t stmmac_interrupt(int irq, void *dev_id) 3649 { 3650 struct net_device *dev = (struct net_device *)dev_id; 3651 struct stmmac_priv *priv = netdev_priv(dev); 3652 u32 rx_cnt = priv->plat->rx_queues_to_use; 3653 u32 tx_cnt = priv->plat->tx_queues_to_use; 3654 u32 queues_count; 3655 u32 queue; 3656 bool xmac; 3657 3658 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; 3659 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; 3660 3661 if (priv->irq_wake) 3662 pm_wakeup_event(priv->device, 0); 3663 3664 if (unlikely(!dev)) { 3665 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); 3666 return IRQ_NONE; 3667 } 3668 3669 /* Check if adapter is up */ 3670 if (test_bit(STMMAC_DOWN, &priv->state)) 3671 return IRQ_HANDLED; 3672 /* Check if a fatal error happened */ 3673 if (stmmac_safety_feat_interrupt(priv)) 3674 return IRQ_HANDLED; 3675 3676 /* To handle GMAC own interrupts */ 3677 if ((priv->plat->has_gmac) || xmac) { 3678 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); 3679 int mtl_status; 3680 3681 if (unlikely(status)) { 3682 /* For LPI we need to save the tx status */ 3683 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) 3684 priv->tx_path_in_lpi_mode = true; 3685 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) 3686 priv->tx_path_in_lpi_mode = false; 3687 } 3688 3689 for (queue = 0; queue < queues_count; queue++) { 3690 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3691 3692 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, 3693 queue); 3694 if (mtl_status != -EINVAL) 3695 status |= mtl_status; 3696 3697 if (status & CORE_IRQ_MTL_RX_OVERFLOW) 3698 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, 3699 rx_q->rx_tail_addr, 3700 queue); 3701 } 3702 3703 /* PCS link status */ 3704 if (priv->hw->pcs) { 3705 if (priv->xstats.pcs_link) 3706 netif_carrier_on(dev); 3707 else 3708 netif_carrier_off(dev); 3709 } 3710 } 3711 3712 /* To handle DMA interrupts */ 3713 stmmac_dma_interrupt(priv); 3714 3715 return IRQ_HANDLED; 3716 } 3717 3718 #ifdef CONFIG_NET_POLL_CONTROLLER 3719 /* Polling receive - used by NETCONSOLE and other diagnostic tools 3720 * to allow network I/O with interrupts disabled. 3721 */ 3722 static void stmmac_poll_controller(struct net_device *dev) 3723 { 3724 disable_irq(dev->irq); 3725 stmmac_interrupt(dev->irq, dev); 3726 enable_irq(dev->irq); 3727 } 3728 #endif 3729 3730 /** 3731 * stmmac_ioctl - Entry point for the Ioctl 3732 * @dev: Device pointer. 3733 * @rq: An IOCTL specefic structure, that can contain a pointer to 3734 * a proprietary structure used to pass information to the driver. 3735 * @cmd: IOCTL command 3736 * Description: 3737 * Currently it supports the phy_mii_ioctl(...) and HW time stamping. 3738 */ 3739 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3740 { 3741 struct stmmac_priv *priv = netdev_priv (dev); 3742 int ret = -EOPNOTSUPP; 3743 3744 if (!netif_running(dev)) 3745 return -EINVAL; 3746 3747 switch (cmd) { 3748 case SIOCGMIIPHY: 3749 case SIOCGMIIREG: 3750 case SIOCSMIIREG: 3751 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); 3752 break; 3753 case SIOCSHWTSTAMP: 3754 ret = stmmac_hwtstamp_set(dev, rq); 3755 break; 3756 case SIOCGHWTSTAMP: 3757 ret = stmmac_hwtstamp_get(dev, rq); 3758 break; 3759 default: 3760 break; 3761 } 3762 3763 return ret; 3764 } 3765 3766 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data, 3767 void *cb_priv) 3768 { 3769 struct stmmac_priv *priv = cb_priv; 3770 int ret = -EOPNOTSUPP; 3771 3772 stmmac_disable_all_queues(priv); 3773 3774 switch (type) { 3775 case TC_SETUP_CLSU32: 3776 if (tc_cls_can_offload_and_chain0(priv->dev, type_data)) 3777 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data); 3778 break; 3779 default: 3780 break; 3781 } 3782 3783 stmmac_enable_all_queues(priv); 3784 return ret; 3785 } 3786 3787 static int stmmac_setup_tc_block(struct stmmac_priv *priv, 3788 struct tc_block_offload *f) 3789 { 3790 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 3791 return -EOPNOTSUPP; 3792 3793 switch (f->command) { 3794 case TC_BLOCK_BIND: 3795 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb, 3796 priv, priv, f->extack); 3797 case TC_BLOCK_UNBIND: 3798 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv); 3799 return 0; 3800 default: 3801 return -EOPNOTSUPP; 3802 } 3803 } 3804 3805 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, 3806 void *type_data) 3807 { 3808 struct stmmac_priv *priv = netdev_priv(ndev); 3809 3810 switch (type) { 3811 case TC_SETUP_BLOCK: 3812 return stmmac_setup_tc_block(priv, type_data); 3813 case TC_SETUP_QDISC_CBS: 3814 return stmmac_tc_setup_cbs(priv, priv, type_data); 3815 default: 3816 return -EOPNOTSUPP; 3817 } 3818 } 3819 3820 static int stmmac_set_mac_address(struct net_device *ndev, void *addr) 3821 { 3822 struct stmmac_priv *priv = netdev_priv(ndev); 3823 int ret = 0; 3824 3825 ret = eth_mac_addr(ndev, addr); 3826 if (ret) 3827 return ret; 3828 3829 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); 3830 3831 return ret; 3832 } 3833 3834 #ifdef CONFIG_DEBUG_FS 3835 static struct dentry *stmmac_fs_dir; 3836 3837 static void sysfs_display_ring(void *head, int size, int extend_desc, 3838 struct seq_file *seq) 3839 { 3840 int i; 3841 struct dma_extended_desc *ep = (struct dma_extended_desc *)head; 3842 struct dma_desc *p = (struct dma_desc *)head; 3843 3844 for (i = 0; i < size; i++) { 3845 if (extend_desc) { 3846 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3847 i, (unsigned int)virt_to_phys(ep), 3848 le32_to_cpu(ep->basic.des0), 3849 le32_to_cpu(ep->basic.des1), 3850 le32_to_cpu(ep->basic.des2), 3851 le32_to_cpu(ep->basic.des3)); 3852 ep++; 3853 } else { 3854 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", 3855 i, (unsigned int)virt_to_phys(p), 3856 le32_to_cpu(p->des0), le32_to_cpu(p->des1), 3857 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); 3858 p++; 3859 } 3860 seq_printf(seq, "\n"); 3861 } 3862 } 3863 3864 static int stmmac_rings_status_show(struct seq_file *seq, void *v) 3865 { 3866 struct net_device *dev = seq->private; 3867 struct stmmac_priv *priv = netdev_priv(dev); 3868 u32 rx_count = priv->plat->rx_queues_to_use; 3869 u32 tx_count = priv->plat->tx_queues_to_use; 3870 u32 queue; 3871 3872 if ((dev->flags & IFF_UP) == 0) 3873 return 0; 3874 3875 for (queue = 0; queue < rx_count; queue++) { 3876 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3877 3878 seq_printf(seq, "RX Queue %d:\n", queue); 3879 3880 if (priv->extend_desc) { 3881 seq_printf(seq, "Extended descriptor ring:\n"); 3882 sysfs_display_ring((void *)rx_q->dma_erx, 3883 DMA_RX_SIZE, 1, seq); 3884 } else { 3885 seq_printf(seq, "Descriptor ring:\n"); 3886 sysfs_display_ring((void *)rx_q->dma_rx, 3887 DMA_RX_SIZE, 0, seq); 3888 } 3889 } 3890 3891 for (queue = 0; queue < tx_count; queue++) { 3892 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 3893 3894 seq_printf(seq, "TX Queue %d:\n", queue); 3895 3896 if (priv->extend_desc) { 3897 seq_printf(seq, "Extended descriptor ring:\n"); 3898 sysfs_display_ring((void *)tx_q->dma_etx, 3899 DMA_TX_SIZE, 1, seq); 3900 } else { 3901 seq_printf(seq, "Descriptor ring:\n"); 3902 sysfs_display_ring((void *)tx_q->dma_tx, 3903 DMA_TX_SIZE, 0, seq); 3904 } 3905 } 3906 3907 return 0; 3908 } 3909 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); 3910 3911 static int stmmac_dma_cap_show(struct seq_file *seq, void *v) 3912 { 3913 struct net_device *dev = seq->private; 3914 struct stmmac_priv *priv = netdev_priv(dev); 3915 3916 if (!priv->hw_cap_support) { 3917 seq_printf(seq, "DMA HW features not supported\n"); 3918 return 0; 3919 } 3920 3921 seq_printf(seq, "==============================\n"); 3922 seq_printf(seq, "\tDMA HW features\n"); 3923 seq_printf(seq, "==============================\n"); 3924 3925 seq_printf(seq, "\t10/100 Mbps: %s\n", 3926 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); 3927 seq_printf(seq, "\t1000 Mbps: %s\n", 3928 (priv->dma_cap.mbps_1000) ? "Y" : "N"); 3929 seq_printf(seq, "\tHalf duplex: %s\n", 3930 (priv->dma_cap.half_duplex) ? "Y" : "N"); 3931 seq_printf(seq, "\tHash Filter: %s\n", 3932 (priv->dma_cap.hash_filter) ? "Y" : "N"); 3933 seq_printf(seq, "\tMultiple MAC address registers: %s\n", 3934 (priv->dma_cap.multi_addr) ? "Y" : "N"); 3935 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", 3936 (priv->dma_cap.pcs) ? "Y" : "N"); 3937 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", 3938 (priv->dma_cap.sma_mdio) ? "Y" : "N"); 3939 seq_printf(seq, "\tPMT Remote wake up: %s\n", 3940 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); 3941 seq_printf(seq, "\tPMT Magic Frame: %s\n", 3942 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); 3943 seq_printf(seq, "\tRMON module: %s\n", 3944 (priv->dma_cap.rmon) ? "Y" : "N"); 3945 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", 3946 (priv->dma_cap.time_stamp) ? "Y" : "N"); 3947 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", 3948 (priv->dma_cap.atime_stamp) ? "Y" : "N"); 3949 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", 3950 (priv->dma_cap.eee) ? "Y" : "N"); 3951 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); 3952 seq_printf(seq, "\tChecksum Offload in TX: %s\n", 3953 (priv->dma_cap.tx_coe) ? "Y" : "N"); 3954 if (priv->synopsys_id >= DWMAC_CORE_4_00) { 3955 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", 3956 (priv->dma_cap.rx_coe) ? "Y" : "N"); 3957 } else { 3958 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", 3959 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); 3960 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", 3961 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); 3962 } 3963 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", 3964 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); 3965 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", 3966 priv->dma_cap.number_rx_channel); 3967 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", 3968 priv->dma_cap.number_tx_channel); 3969 seq_printf(seq, "\tEnhanced descriptors: %s\n", 3970 (priv->dma_cap.enh_desc) ? "Y" : "N"); 3971 3972 return 0; 3973 } 3974 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); 3975 3976 static int stmmac_init_fs(struct net_device *dev) 3977 { 3978 struct stmmac_priv *priv = netdev_priv(dev); 3979 3980 /* Create per netdev entries */ 3981 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); 3982 3983 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { 3984 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); 3985 3986 return -ENOMEM; 3987 } 3988 3989 /* Entry to report DMA RX/TX rings */ 3990 priv->dbgfs_rings_status = 3991 debugfs_create_file("descriptors_status", 0444, 3992 priv->dbgfs_dir, dev, 3993 &stmmac_rings_status_fops); 3994 3995 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { 3996 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); 3997 debugfs_remove_recursive(priv->dbgfs_dir); 3998 3999 return -ENOMEM; 4000 } 4001 4002 /* Entry to report the DMA HW features */ 4003 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444, 4004 priv->dbgfs_dir, 4005 dev, &stmmac_dma_cap_fops); 4006 4007 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { 4008 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); 4009 debugfs_remove_recursive(priv->dbgfs_dir); 4010 4011 return -ENOMEM; 4012 } 4013 4014 return 0; 4015 } 4016 4017 static void stmmac_exit_fs(struct net_device *dev) 4018 { 4019 struct stmmac_priv *priv = netdev_priv(dev); 4020 4021 debugfs_remove_recursive(priv->dbgfs_dir); 4022 } 4023 #endif /* CONFIG_DEBUG_FS */ 4024 4025 static const struct net_device_ops stmmac_netdev_ops = { 4026 .ndo_open = stmmac_open, 4027 .ndo_start_xmit = stmmac_xmit, 4028 .ndo_stop = stmmac_release, 4029 .ndo_change_mtu = stmmac_change_mtu, 4030 .ndo_fix_features = stmmac_fix_features, 4031 .ndo_set_features = stmmac_set_features, 4032 .ndo_set_rx_mode = stmmac_set_rx_mode, 4033 .ndo_tx_timeout = stmmac_tx_timeout, 4034 .ndo_do_ioctl = stmmac_ioctl, 4035 .ndo_setup_tc = stmmac_setup_tc, 4036 #ifdef CONFIG_NET_POLL_CONTROLLER 4037 .ndo_poll_controller = stmmac_poll_controller, 4038 #endif 4039 .ndo_set_mac_address = stmmac_set_mac_address, 4040 }; 4041 4042 static void stmmac_reset_subtask(struct stmmac_priv *priv) 4043 { 4044 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) 4045 return; 4046 if (test_bit(STMMAC_DOWN, &priv->state)) 4047 return; 4048 4049 netdev_err(priv->dev, "Reset adapter.\n"); 4050 4051 rtnl_lock(); 4052 netif_trans_update(priv->dev); 4053 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) 4054 usleep_range(1000, 2000); 4055 4056 set_bit(STMMAC_DOWN, &priv->state); 4057 dev_close(priv->dev); 4058 dev_open(priv->dev, NULL); 4059 clear_bit(STMMAC_DOWN, &priv->state); 4060 clear_bit(STMMAC_RESETING, &priv->state); 4061 rtnl_unlock(); 4062 } 4063 4064 static void stmmac_service_task(struct work_struct *work) 4065 { 4066 struct stmmac_priv *priv = container_of(work, struct stmmac_priv, 4067 service_task); 4068 4069 stmmac_reset_subtask(priv); 4070 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); 4071 } 4072 4073 /** 4074 * stmmac_hw_init - Init the MAC device 4075 * @priv: driver private structure 4076 * Description: this function is to configure the MAC device according to 4077 * some platform parameters or the HW capability register. It prepares the 4078 * driver to use either ring or chain modes and to setup either enhanced or 4079 * normal descriptors. 4080 */ 4081 static int stmmac_hw_init(struct stmmac_priv *priv) 4082 { 4083 int ret; 4084 4085 /* dwmac-sun8i only work in chain mode */ 4086 if (priv->plat->has_sun8i) 4087 chain_mode = 1; 4088 priv->chain_mode = chain_mode; 4089 4090 /* Initialize HW Interface */ 4091 ret = stmmac_hwif_init(priv); 4092 if (ret) 4093 return ret; 4094 4095 /* Get the HW capability (new GMAC newer than 3.50a) */ 4096 priv->hw_cap_support = stmmac_get_hw_features(priv); 4097 if (priv->hw_cap_support) { 4098 dev_info(priv->device, "DMA HW capability register supported\n"); 4099 4100 /* We can override some gmac/dma configuration fields: e.g. 4101 * enh_desc, tx_coe (e.g. that are passed through the 4102 * platform) with the values from the HW capability 4103 * register (if supported). 4104 */ 4105 priv->plat->enh_desc = priv->dma_cap.enh_desc; 4106 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 4107 priv->hw->pmt = priv->plat->pmt; 4108 4109 /* TXCOE doesn't work in thresh DMA mode */ 4110 if (priv->plat->force_thresh_dma_mode) 4111 priv->plat->tx_coe = 0; 4112 else 4113 priv->plat->tx_coe = priv->dma_cap.tx_coe; 4114 4115 /* In case of GMAC4 rx_coe is from HW cap register. */ 4116 priv->plat->rx_coe = priv->dma_cap.rx_coe; 4117 4118 if (priv->dma_cap.rx_coe_type2) 4119 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; 4120 else if (priv->dma_cap.rx_coe_type1) 4121 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; 4122 4123 } else { 4124 dev_info(priv->device, "No HW DMA feature register supported\n"); 4125 } 4126 4127 if (priv->plat->rx_coe) { 4128 priv->hw->rx_csum = priv->plat->rx_coe; 4129 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); 4130 if (priv->synopsys_id < DWMAC_CORE_4_00) 4131 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); 4132 } 4133 if (priv->plat->tx_coe) 4134 dev_info(priv->device, "TX Checksum insertion supported\n"); 4135 4136 if (priv->plat->pmt) { 4137 dev_info(priv->device, "Wake-Up On Lan supported\n"); 4138 device_set_wakeup_capable(priv->device, 1); 4139 } 4140 4141 if (priv->dma_cap.tsoen) 4142 dev_info(priv->device, "TSO supported\n"); 4143 4144 /* Run HW quirks, if any */ 4145 if (priv->hwif_quirks) { 4146 ret = priv->hwif_quirks(priv); 4147 if (ret) 4148 return ret; 4149 } 4150 4151 /* Rx Watchdog is available in the COREs newer than the 3.40. 4152 * In some case, for example on bugged HW this feature 4153 * has to be disable and this can be done by passing the 4154 * riwt_off field from the platform. 4155 */ 4156 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || 4157 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { 4158 priv->use_riwt = 1; 4159 dev_info(priv->device, 4160 "Enable RX Mitigation via HW Watchdog Timer\n"); 4161 } 4162 4163 return 0; 4164 } 4165 4166 /** 4167 * stmmac_dvr_probe 4168 * @device: device pointer 4169 * @plat_dat: platform data pointer 4170 * @res: stmmac resource pointer 4171 * Description: this is the main probe function used to 4172 * call the alloc_etherdev, allocate the priv structure. 4173 * Return: 4174 * returns 0 on success, otherwise errno. 4175 */ 4176 int stmmac_dvr_probe(struct device *device, 4177 struct plat_stmmacenet_data *plat_dat, 4178 struct stmmac_resources *res) 4179 { 4180 struct net_device *ndev = NULL; 4181 struct stmmac_priv *priv; 4182 u32 queue, maxq; 4183 int ret = 0; 4184 4185 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), 4186 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); 4187 if (!ndev) 4188 return -ENOMEM; 4189 4190 SET_NETDEV_DEV(ndev, device); 4191 4192 priv = netdev_priv(ndev); 4193 priv->device = device; 4194 priv->dev = ndev; 4195 4196 stmmac_set_ethtool_ops(ndev); 4197 priv->pause = pause; 4198 priv->plat = plat_dat; 4199 priv->ioaddr = res->addr; 4200 priv->dev->base_addr = (unsigned long)res->addr; 4201 4202 priv->dev->irq = res->irq; 4203 priv->wol_irq = res->wol_irq; 4204 priv->lpi_irq = res->lpi_irq; 4205 4206 if (!IS_ERR_OR_NULL(res->mac)) 4207 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 4208 4209 dev_set_drvdata(device, priv->dev); 4210 4211 /* Verify driver arguments */ 4212 stmmac_verify_args(); 4213 4214 /* Allocate workqueue */ 4215 priv->wq = create_singlethread_workqueue("stmmac_wq"); 4216 if (!priv->wq) { 4217 dev_err(priv->device, "failed to create workqueue\n"); 4218 return -ENOMEM; 4219 } 4220 4221 INIT_WORK(&priv->service_task, stmmac_service_task); 4222 4223 /* Override with kernel parameters if supplied XXX CRS XXX 4224 * this needs to have multiple instances 4225 */ 4226 if ((phyaddr >= 0) && (phyaddr <= 31)) 4227 priv->plat->phy_addr = phyaddr; 4228 4229 if (priv->plat->stmmac_rst) { 4230 ret = reset_control_assert(priv->plat->stmmac_rst); 4231 reset_control_deassert(priv->plat->stmmac_rst); 4232 /* Some reset controllers have only reset callback instead of 4233 * assert + deassert callbacks pair. 4234 */ 4235 if (ret == -ENOTSUPP) 4236 reset_control_reset(priv->plat->stmmac_rst); 4237 } 4238 4239 /* Init MAC and get the capabilities */ 4240 ret = stmmac_hw_init(priv); 4241 if (ret) 4242 goto error_hw_init; 4243 4244 stmmac_check_ether_addr(priv); 4245 4246 /* Configure real RX and TX queues */ 4247 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); 4248 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); 4249 4250 ndev->netdev_ops = &stmmac_netdev_ops; 4251 4252 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4253 NETIF_F_RXCSUM; 4254 4255 ret = stmmac_tc_init(priv, priv); 4256 if (!ret) { 4257 ndev->hw_features |= NETIF_F_HW_TC; 4258 } 4259 4260 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { 4261 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 4262 priv->tso = true; 4263 dev_info(priv->device, "TSO feature enabled\n"); 4264 } 4265 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; 4266 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 4267 #ifdef STMMAC_VLAN_TAG_USED 4268 /* Both mac100 and gmac support receive VLAN tag detection */ 4269 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; 4270 #endif 4271 priv->msg_enable = netif_msg_init(debug, default_msg_level); 4272 4273 /* MTU range: 46 - hw-specific max */ 4274 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; 4275 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) 4276 ndev->max_mtu = JUMBO_LEN; 4277 else if (priv->plat->has_xgmac) 4278 ndev->max_mtu = XGMAC_JUMBO_LEN; 4279 else 4280 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); 4281 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu 4282 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. 4283 */ 4284 if ((priv->plat->maxmtu < ndev->max_mtu) && 4285 (priv->plat->maxmtu >= ndev->min_mtu)) 4286 ndev->max_mtu = priv->plat->maxmtu; 4287 else if (priv->plat->maxmtu < ndev->min_mtu) 4288 dev_warn(priv->device, 4289 "%s: warning: maxmtu having invalid value (%d)\n", 4290 __func__, priv->plat->maxmtu); 4291 4292 if (flow_ctrl) 4293 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 4294 4295 /* Setup channels NAPI */ 4296 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); 4297 4298 for (queue = 0; queue < maxq; queue++) { 4299 struct stmmac_channel *ch = &priv->channel[queue]; 4300 4301 ch->priv_data = priv; 4302 ch->index = queue; 4303 4304 if (queue < priv->plat->rx_queues_to_use) { 4305 netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx, 4306 NAPI_POLL_WEIGHT); 4307 } 4308 if (queue < priv->plat->tx_queues_to_use) { 4309 netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx, 4310 NAPI_POLL_WEIGHT); 4311 } 4312 } 4313 4314 mutex_init(&priv->lock); 4315 4316 /* If a specific clk_csr value is passed from the platform 4317 * this means that the CSR Clock Range selection cannot be 4318 * changed at run-time and it is fixed. Viceversa the driver'll try to 4319 * set the MDC clock dynamically according to the csr actual 4320 * clock input. 4321 */ 4322 if (priv->plat->clk_csr >= 0) 4323 priv->clk_csr = priv->plat->clk_csr; 4324 else 4325 stmmac_clk_csr_set(priv); 4326 4327 stmmac_check_pcs_mode(priv); 4328 4329 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4330 priv->hw->pcs != STMMAC_PCS_TBI && 4331 priv->hw->pcs != STMMAC_PCS_RTBI) { 4332 /* MDIO bus Registration */ 4333 ret = stmmac_mdio_register(ndev); 4334 if (ret < 0) { 4335 dev_err(priv->device, 4336 "%s: MDIO bus (id: %d) registration failed", 4337 __func__, priv->plat->bus_id); 4338 goto error_mdio_register; 4339 } 4340 } 4341 4342 ret = stmmac_phy_setup(priv); 4343 if (ret) { 4344 netdev_err(ndev, "failed to setup phy (%d)\n", ret); 4345 goto error_phy_setup; 4346 } 4347 4348 ret = register_netdev(ndev); 4349 if (ret) { 4350 dev_err(priv->device, "%s: ERROR %i registering the device\n", 4351 __func__, ret); 4352 goto error_netdev_register; 4353 } 4354 4355 #ifdef CONFIG_DEBUG_FS 4356 ret = stmmac_init_fs(ndev); 4357 if (ret < 0) 4358 netdev_warn(priv->dev, "%s: failed debugFS registration\n", 4359 __func__); 4360 #endif 4361 4362 return ret; 4363 4364 error_netdev_register: 4365 phylink_destroy(priv->phylink); 4366 error_phy_setup: 4367 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4368 priv->hw->pcs != STMMAC_PCS_TBI && 4369 priv->hw->pcs != STMMAC_PCS_RTBI) 4370 stmmac_mdio_unregister(ndev); 4371 error_mdio_register: 4372 for (queue = 0; queue < maxq; queue++) { 4373 struct stmmac_channel *ch = &priv->channel[queue]; 4374 4375 if (queue < priv->plat->rx_queues_to_use) 4376 netif_napi_del(&ch->rx_napi); 4377 if (queue < priv->plat->tx_queues_to_use) 4378 netif_napi_del(&ch->tx_napi); 4379 } 4380 error_hw_init: 4381 destroy_workqueue(priv->wq); 4382 4383 return ret; 4384 } 4385 EXPORT_SYMBOL_GPL(stmmac_dvr_probe); 4386 4387 /** 4388 * stmmac_dvr_remove 4389 * @dev: device pointer 4390 * Description: this function resets the TX/RX processes, disables the MAC RX/TX 4391 * changes the link status, releases the DMA descriptor rings. 4392 */ 4393 int stmmac_dvr_remove(struct device *dev) 4394 { 4395 struct net_device *ndev = dev_get_drvdata(dev); 4396 struct stmmac_priv *priv = netdev_priv(ndev); 4397 4398 netdev_info(priv->dev, "%s: removing driver", __func__); 4399 4400 #ifdef CONFIG_DEBUG_FS 4401 stmmac_exit_fs(ndev); 4402 #endif 4403 stmmac_stop_all_dma(priv); 4404 4405 stmmac_mac_set(priv, priv->ioaddr, false); 4406 netif_carrier_off(ndev); 4407 unregister_netdev(ndev); 4408 phylink_destroy(priv->phylink); 4409 if (priv->plat->stmmac_rst) 4410 reset_control_assert(priv->plat->stmmac_rst); 4411 clk_disable_unprepare(priv->plat->pclk); 4412 clk_disable_unprepare(priv->plat->stmmac_clk); 4413 if (priv->hw->pcs != STMMAC_PCS_RGMII && 4414 priv->hw->pcs != STMMAC_PCS_TBI && 4415 priv->hw->pcs != STMMAC_PCS_RTBI) 4416 stmmac_mdio_unregister(ndev); 4417 destroy_workqueue(priv->wq); 4418 mutex_destroy(&priv->lock); 4419 4420 return 0; 4421 } 4422 EXPORT_SYMBOL_GPL(stmmac_dvr_remove); 4423 4424 /** 4425 * stmmac_suspend - suspend callback 4426 * @dev: device pointer 4427 * Description: this is the function to suspend the device and it is called 4428 * by the platform driver to stop the network queue, release the resources, 4429 * program the PMT register (for WoL), clean and release driver resources. 4430 */ 4431 int stmmac_suspend(struct device *dev) 4432 { 4433 struct net_device *ndev = dev_get_drvdata(dev); 4434 struct stmmac_priv *priv = netdev_priv(ndev); 4435 4436 if (!ndev || !netif_running(ndev)) 4437 return 0; 4438 4439 phylink_stop(priv->phylink); 4440 4441 mutex_lock(&priv->lock); 4442 4443 netif_device_detach(ndev); 4444 stmmac_stop_all_queues(priv); 4445 4446 stmmac_disable_all_queues(priv); 4447 4448 /* Stop TX/RX DMA */ 4449 stmmac_stop_all_dma(priv); 4450 4451 /* Enable Power down mode by programming the PMT regs */ 4452 if (device_may_wakeup(priv->device)) { 4453 stmmac_pmt(priv, priv->hw, priv->wolopts); 4454 priv->irq_wake = 1; 4455 } else { 4456 stmmac_mac_set(priv, priv->ioaddr, false); 4457 pinctrl_pm_select_sleep_state(priv->device); 4458 /* Disable clock in case of PWM is off */ 4459 clk_disable(priv->plat->pclk); 4460 clk_disable(priv->plat->stmmac_clk); 4461 } 4462 mutex_unlock(&priv->lock); 4463 4464 priv->speed = SPEED_UNKNOWN; 4465 return 0; 4466 } 4467 EXPORT_SYMBOL_GPL(stmmac_suspend); 4468 4469 /** 4470 * stmmac_reset_queues_param - reset queue parameters 4471 * @dev: device pointer 4472 */ 4473 static void stmmac_reset_queues_param(struct stmmac_priv *priv) 4474 { 4475 u32 rx_cnt = priv->plat->rx_queues_to_use; 4476 u32 tx_cnt = priv->plat->tx_queues_to_use; 4477 u32 queue; 4478 4479 for (queue = 0; queue < rx_cnt; queue++) { 4480 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4481 4482 rx_q->cur_rx = 0; 4483 rx_q->dirty_rx = 0; 4484 } 4485 4486 for (queue = 0; queue < tx_cnt; queue++) { 4487 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 4488 4489 tx_q->cur_tx = 0; 4490 tx_q->dirty_tx = 0; 4491 tx_q->mss = 0; 4492 } 4493 } 4494 4495 /** 4496 * stmmac_resume - resume callback 4497 * @dev: device pointer 4498 * Description: when resume this function is invoked to setup the DMA and CORE 4499 * in a usable state. 4500 */ 4501 int stmmac_resume(struct device *dev) 4502 { 4503 struct net_device *ndev = dev_get_drvdata(dev); 4504 struct stmmac_priv *priv = netdev_priv(ndev); 4505 4506 if (!netif_running(ndev)) 4507 return 0; 4508 4509 /* Power Down bit, into the PM register, is cleared 4510 * automatically as soon as a magic packet or a Wake-up frame 4511 * is received. Anyway, it's better to manually clear 4512 * this bit because it can generate problems while resuming 4513 * from another devices (e.g. serial console). 4514 */ 4515 if (device_may_wakeup(priv->device)) { 4516 mutex_lock(&priv->lock); 4517 stmmac_pmt(priv, priv->hw, 0); 4518 mutex_unlock(&priv->lock); 4519 priv->irq_wake = 0; 4520 } else { 4521 pinctrl_pm_select_default_state(priv->device); 4522 /* enable the clk previously disabled */ 4523 clk_enable(priv->plat->stmmac_clk); 4524 clk_enable(priv->plat->pclk); 4525 /* reset the phy so that it's ready */ 4526 if (priv->mii) 4527 stmmac_mdio_reset(priv->mii); 4528 } 4529 4530 netif_device_attach(ndev); 4531 4532 mutex_lock(&priv->lock); 4533 4534 stmmac_reset_queues_param(priv); 4535 4536 stmmac_clear_descriptors(priv); 4537 4538 stmmac_hw_setup(ndev, false); 4539 stmmac_init_tx_coalesce(priv); 4540 stmmac_set_rx_mode(ndev); 4541 4542 stmmac_enable_all_queues(priv); 4543 4544 stmmac_start_all_queues(priv); 4545 4546 mutex_unlock(&priv->lock); 4547 4548 phylink_start(priv->phylink); 4549 4550 return 0; 4551 } 4552 EXPORT_SYMBOL_GPL(stmmac_resume); 4553 4554 #ifndef MODULE 4555 static int __init stmmac_cmdline_opt(char *str) 4556 { 4557 char *opt; 4558 4559 if (!str || !*str) 4560 return -EINVAL; 4561 while ((opt = strsep(&str, ",")) != NULL) { 4562 if (!strncmp(opt, "debug:", 6)) { 4563 if (kstrtoint(opt + 6, 0, &debug)) 4564 goto err; 4565 } else if (!strncmp(opt, "phyaddr:", 8)) { 4566 if (kstrtoint(opt + 8, 0, &phyaddr)) 4567 goto err; 4568 } else if (!strncmp(opt, "buf_sz:", 7)) { 4569 if (kstrtoint(opt + 7, 0, &buf_sz)) 4570 goto err; 4571 } else if (!strncmp(opt, "tc:", 3)) { 4572 if (kstrtoint(opt + 3, 0, &tc)) 4573 goto err; 4574 } else if (!strncmp(opt, "watchdog:", 9)) { 4575 if (kstrtoint(opt + 9, 0, &watchdog)) 4576 goto err; 4577 } else if (!strncmp(opt, "flow_ctrl:", 10)) { 4578 if (kstrtoint(opt + 10, 0, &flow_ctrl)) 4579 goto err; 4580 } else if (!strncmp(opt, "pause:", 6)) { 4581 if (kstrtoint(opt + 6, 0, &pause)) 4582 goto err; 4583 } else if (!strncmp(opt, "eee_timer:", 10)) { 4584 if (kstrtoint(opt + 10, 0, &eee_timer)) 4585 goto err; 4586 } else if (!strncmp(opt, "chain_mode:", 11)) { 4587 if (kstrtoint(opt + 11, 0, &chain_mode)) 4588 goto err; 4589 } 4590 } 4591 return 0; 4592 4593 err: 4594 pr_err("%s: ERROR broken module parameter conversion", __func__); 4595 return -EINVAL; 4596 } 4597 4598 __setup("stmmaceth=", stmmac_cmdline_opt); 4599 #endif /* MODULE */ 4600 4601 static int __init stmmac_init(void) 4602 { 4603 #ifdef CONFIG_DEBUG_FS 4604 /* Create debugfs main directory if it doesn't exist yet */ 4605 if (!stmmac_fs_dir) { 4606 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); 4607 4608 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { 4609 pr_err("ERROR %s, debugfs create directory failed\n", 4610 STMMAC_RESOURCE_NAME); 4611 4612 return -ENOMEM; 4613 } 4614 } 4615 #endif 4616 4617 return 0; 4618 } 4619 4620 static void __exit stmmac_exit(void) 4621 { 4622 #ifdef CONFIG_DEBUG_FS 4623 debugfs_remove_recursive(stmmac_fs_dir); 4624 #endif 4625 } 4626 4627 module_init(stmmac_init) 4628 module_exit(stmmac_exit) 4629 4630 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); 4631 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 4632 MODULE_LICENSE("GPL"); 4633