1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) 2 /* 3 * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. 4 * Copyright (c) 2014, Synopsys, Inc. 5 * All rights reserved 6 */ 7 8 #include <linux/phy.h> 9 #include <linux/mdio.h> 10 #include <linux/clk.h> 11 #include <linux/bitrev.h> 12 #include <linux/crc32.h> 13 #include <linux/crc32poly.h> 14 #include <linux/pci.h> 15 16 #include "xgbe.h" 17 #include "xgbe-common.h" 18 #include "xgbe-smn.h" 19 20 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) 21 { 22 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 23 } 24 25 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, 26 unsigned int usec) 27 { 28 unsigned long rate; 29 unsigned int ret; 30 31 DBGPR("-->xgbe_usec_to_riwt\n"); 32 33 rate = pdata->sysclk_rate; 34 35 /* 36 * Convert the input usec value to the watchdog timer value. Each 37 * watchdog timer value is equivalent to 256 clock cycles. 38 * Calculate the required value as: 39 * ( usec * ( system_clock_mhz / 10^6 ) / 256 40 */ 41 ret = (usec * (rate / 1000000)) / 256; 42 43 DBGPR("<--xgbe_usec_to_riwt\n"); 44 45 return ret; 46 } 47 48 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, 49 unsigned int riwt) 50 { 51 unsigned long rate; 52 unsigned int ret; 53 54 DBGPR("-->xgbe_riwt_to_usec\n"); 55 56 rate = pdata->sysclk_rate; 57 58 /* 59 * Convert the input watchdog timer value to the usec value. Each 60 * watchdog timer value is equivalent to 256 clock cycles. 61 * Calculate the required value as: 62 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) 63 */ 64 ret = (riwt * 256) / (rate / 1000000); 65 66 DBGPR("<--xgbe_riwt_to_usec\n"); 67 68 return ret; 69 } 70 71 static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata) 72 { 73 unsigned int pblx8, pbl; 74 unsigned int i; 75 76 pblx8 = DMA_PBL_X8_DISABLE; 77 pbl = pdata->pbl; 78 79 if (pdata->pbl > 32) { 80 pblx8 = DMA_PBL_X8_ENABLE; 81 pbl >>= 3; 82 } 83 84 for (i = 0; i < pdata->channel_count; i++) { 85 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, 86 pblx8); 87 88 if (pdata->channel[i]->tx_ring) 89 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, 90 PBL, pbl); 91 92 if (pdata->channel[i]->rx_ring) 93 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, 94 PBL, pbl); 95 } 96 97 return 0; 98 } 99 100 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) 101 { 102 unsigned int i; 103 104 for (i = 0; i < pdata->channel_count; i++) { 105 if (!pdata->channel[i]->tx_ring) 106 break; 107 108 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, 109 pdata->tx_osp_mode); 110 } 111 112 return 0; 113 } 114 115 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 116 { 117 unsigned int i; 118 119 for (i = 0; i < pdata->rx_q_count; i++) 120 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 121 122 return 0; 123 } 124 125 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 126 { 127 unsigned int i; 128 129 for (i = 0; i < pdata->tx_q_count; i++) 130 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 131 132 return 0; 133 } 134 135 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, 136 unsigned int val) 137 { 138 unsigned int i; 139 140 for (i = 0; i < pdata->rx_q_count; i++) 141 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 142 143 return 0; 144 } 145 146 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, 147 unsigned int val) 148 { 149 unsigned int i; 150 151 for (i = 0; i < pdata->tx_q_count; i++) 152 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 153 154 return 0; 155 } 156 157 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) 158 { 159 unsigned int i; 160 161 for (i = 0; i < pdata->channel_count; i++) { 162 if (!pdata->channel[i]->rx_ring) 163 break; 164 165 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, 166 pdata->rx_riwt); 167 } 168 169 return 0; 170 } 171 172 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) 173 { 174 return 0; 175 } 176 177 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) 178 { 179 unsigned int i; 180 181 for (i = 0; i < pdata->channel_count; i++) { 182 if (!pdata->channel[i]->rx_ring) 183 break; 184 185 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, 186 pdata->rx_buf_size); 187 } 188 } 189 190 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) 191 { 192 unsigned int i; 193 194 for (i = 0; i < pdata->channel_count; i++) { 195 if (!pdata->channel[i]->tx_ring) 196 break; 197 198 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); 199 } 200 } 201 202 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) 203 { 204 unsigned int i; 205 206 for (i = 0; i < pdata->channel_count; i++) { 207 if (!pdata->channel[i]->rx_ring) 208 break; 209 210 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); 211 } 212 213 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); 214 } 215 216 static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) 217 { 218 unsigned int i; 219 220 for (i = 0; i < pdata->channel_count; i++) { 221 if (!pdata->channel[i]->rx_ring) 222 break; 223 224 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); 225 } 226 } 227 228 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, 229 unsigned int index, unsigned int val) 230 { 231 unsigned int wait; 232 int ret = 0; 233 234 mutex_lock(&pdata->rss_mutex); 235 236 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { 237 ret = -EBUSY; 238 goto unlock; 239 } 240 241 XGMAC_IOWRITE(pdata, MAC_RSSDR, val); 242 243 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); 244 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); 245 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); 246 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); 247 248 wait = 1000; 249 while (wait--) { 250 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 251 goto unlock; 252 253 usleep_range(1000, 1500); 254 } 255 256 ret = -EBUSY; 257 258 unlock: 259 mutex_unlock(&pdata->rss_mutex); 260 261 return ret; 262 } 263 264 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) 265 { 266 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); 267 unsigned int *key = (unsigned int *)&pdata->rss_key; 268 int ret; 269 270 while (key_regs--) { 271 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, 272 key_regs, *key++); 273 if (ret) 274 return ret; 275 } 276 277 return 0; 278 } 279 280 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) 281 { 282 unsigned int i; 283 int ret; 284 285 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { 286 ret = xgbe_write_rss_reg(pdata, 287 XGBE_RSS_LOOKUP_TABLE_TYPE, i, 288 pdata->rss_table[i]); 289 if (ret) 290 return ret; 291 } 292 293 return 0; 294 } 295 296 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key) 297 { 298 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); 299 300 return xgbe_write_rss_hash_key(pdata); 301 } 302 303 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, 304 const u32 *table) 305 { 306 unsigned int i; 307 308 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) 309 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); 310 311 return xgbe_write_rss_lookup_table(pdata); 312 } 313 314 static int xgbe_enable_rss(struct xgbe_prv_data *pdata) 315 { 316 int ret; 317 318 if (!pdata->hw_feat.rss) 319 return -EOPNOTSUPP; 320 321 /* Program the hash key */ 322 ret = xgbe_write_rss_hash_key(pdata); 323 if (ret) 324 return ret; 325 326 /* Program the lookup table */ 327 ret = xgbe_write_rss_lookup_table(pdata); 328 if (ret) 329 return ret; 330 331 /* Set the RSS options */ 332 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 333 334 /* Enable RSS */ 335 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); 336 337 return 0; 338 } 339 340 static int xgbe_disable_rss(struct xgbe_prv_data *pdata) 341 { 342 if (!pdata->hw_feat.rss) 343 return -EOPNOTSUPP; 344 345 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); 346 347 return 0; 348 } 349 350 static void xgbe_config_rss(struct xgbe_prv_data *pdata) 351 { 352 int ret; 353 354 if (!pdata->hw_feat.rss) 355 return; 356 357 if (pdata->netdev->features & NETIF_F_RXHASH) 358 ret = xgbe_enable_rss(pdata); 359 else 360 ret = xgbe_disable_rss(pdata); 361 362 if (ret) 363 netdev_err(pdata->netdev, 364 "error configuring RSS, RSS disabled\n"); 365 } 366 367 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata, 368 unsigned int queue) 369 { 370 unsigned int prio, tc; 371 372 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { 373 /* Does this queue handle the priority? */ 374 if (pdata->prio2q_map[prio] != queue) 375 continue; 376 377 /* Get the Traffic Class for this priority */ 378 tc = pdata->ets->prio_tc[prio]; 379 380 /* Check if PFC is enabled for this traffic class */ 381 if (pdata->pfc->pfc_en & (1 << tc)) 382 return true; 383 } 384 385 return false; 386 } 387 388 static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata) 389 { 390 /* Program the VXLAN port */ 391 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port); 392 393 netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n", 394 pdata->vxlan_port); 395 } 396 397 static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata) 398 { 399 if (!pdata->hw_feat.vxn) 400 return; 401 402 /* Program the VXLAN port */ 403 xgbe_set_vxlan_id(pdata); 404 405 /* Allow for IPv6/UDP zero-checksum VXLAN packets */ 406 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1); 407 408 /* Enable VXLAN tunneling mode */ 409 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0); 410 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1); 411 412 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n"); 413 } 414 415 static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) 416 { 417 if (!pdata->hw_feat.vxn) 418 return; 419 420 /* Disable tunneling mode */ 421 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0); 422 423 /* Clear IPv6/UDP zero-checksum VXLAN packets setting */ 424 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0); 425 426 /* Clear the VXLAN port */ 427 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0); 428 429 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); 430 } 431 432 static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) 433 { 434 unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 435 436 /* From MAC ver 30H the TFCR is per priority, instead of per queue */ 437 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) 438 return max_q_count; 439 else 440 return min_t(unsigned int, pdata->tx_q_count, max_q_count); 441 } 442 443 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 444 { 445 unsigned int reg, reg_val; 446 unsigned int i, q_count; 447 448 /* Clear MTL flow control */ 449 for (i = 0; i < pdata->rx_q_count; i++) 450 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 451 452 /* Clear MAC flow control */ 453 q_count = xgbe_get_fc_queue_count(pdata); 454 reg = MAC_Q0TFCR; 455 for (i = 0; i < q_count; i++) { 456 reg_val = XGMAC_IOREAD(pdata, reg); 457 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 458 XGMAC_IOWRITE(pdata, reg, reg_val); 459 460 reg += MAC_QTFCR_INC; 461 } 462 463 return 0; 464 } 465 466 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) 467 { 468 struct ieee_pfc *pfc = pdata->pfc; 469 struct ieee_ets *ets = pdata->ets; 470 unsigned int reg, reg_val; 471 unsigned int i, q_count; 472 473 /* Set MTL flow control */ 474 for (i = 0; i < pdata->rx_q_count; i++) { 475 unsigned int ehfc = 0; 476 477 if (pdata->rx_rfd[i]) { 478 /* Flow control thresholds are established */ 479 if (pfc && ets) { 480 if (xgbe_is_pfc_queue(pdata, i)) 481 ehfc = 1; 482 } else { 483 ehfc = 1; 484 } 485 } 486 487 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); 488 489 netif_dbg(pdata, drv, pdata->netdev, 490 "flow control %s for RXq%u\n", 491 ehfc ? "enabled" : "disabled", i); 492 } 493 494 /* Set MAC flow control */ 495 q_count = xgbe_get_fc_queue_count(pdata); 496 reg = MAC_Q0TFCR; 497 for (i = 0; i < q_count; i++) { 498 reg_val = XGMAC_IOREAD(pdata, reg); 499 500 /* Enable transmit flow control */ 501 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 502 /* Set pause time */ 503 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 504 505 XGMAC_IOWRITE(pdata, reg, reg_val); 506 507 reg += MAC_QTFCR_INC; 508 } 509 510 return 0; 511 } 512 513 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) 514 { 515 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 516 517 return 0; 518 } 519 520 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) 521 { 522 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 523 524 return 0; 525 } 526 527 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) 528 { 529 struct ieee_pfc *pfc = pdata->pfc; 530 531 if (pdata->tx_pause || (pfc && pfc->pfc_en)) 532 xgbe_enable_tx_flow_control(pdata); 533 else 534 xgbe_disable_tx_flow_control(pdata); 535 536 return 0; 537 } 538 539 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) 540 { 541 struct ieee_pfc *pfc = pdata->pfc; 542 543 if (pdata->rx_pause || (pfc && pfc->pfc_en)) 544 xgbe_enable_rx_flow_control(pdata); 545 else 546 xgbe_disable_rx_flow_control(pdata); 547 548 return 0; 549 } 550 551 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) 552 { 553 struct ieee_pfc *pfc = pdata->pfc; 554 555 xgbe_config_tx_flow_control(pdata); 556 xgbe_config_rx_flow_control(pdata); 557 558 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 559 (pfc && pfc->pfc_en) ? 1 : 0); 560 } 561 562 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) 563 { 564 struct xgbe_channel *channel; 565 unsigned int i, ver; 566 567 /* Set the interrupt mode if supported */ 568 if (pdata->channel_irq_mode) 569 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, 570 pdata->channel_irq_mode); 571 572 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); 573 574 for (i = 0; i < pdata->channel_count; i++) { 575 channel = pdata->channel[i]; 576 577 /* Clear all the interrupts which are set */ 578 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, 579 XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); 580 581 /* Clear all interrupt enable bits */ 582 channel->curr_ier = 0; 583 584 /* Enable following interrupts 585 * NIE - Normal Interrupt Summary Enable 586 * AIE - Abnormal Interrupt Summary Enable 587 * FBEE - Fatal Bus Error Enable 588 */ 589 if (ver < 0x21) { 590 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); 591 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); 592 } else { 593 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); 594 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); 595 } 596 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); 597 598 if (channel->tx_ring) { 599 /* Enable the following Tx interrupts 600 * TIE - Transmit Interrupt Enable (unless using 601 * per channel interrupts in edge triggered 602 * mode) 603 */ 604 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 605 XGMAC_SET_BITS(channel->curr_ier, 606 DMA_CH_IER, TIE, 1); 607 } 608 if (channel->rx_ring) { 609 /* Enable following Rx interrupts 610 * RBUE - Receive Buffer Unavailable Enable 611 * RIE - Receive Interrupt Enable (unless using 612 * per channel interrupts in edge triggered 613 * mode) 614 */ 615 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); 616 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 617 XGMAC_SET_BITS(channel->curr_ier, 618 DMA_CH_IER, RIE, 1); 619 } 620 621 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 622 } 623 } 624 625 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) 626 { 627 unsigned int mtl_q_isr; 628 unsigned int q_count, i; 629 630 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 631 for (i = 0; i < q_count; i++) { 632 /* Clear all the interrupts which are set */ 633 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 634 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 635 636 /* No MTL interrupts to be enabled */ 637 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); 638 } 639 } 640 641 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) 642 { 643 unsigned int mac_ier = 0; 644 645 /* Enable Timestamp interrupt */ 646 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); 647 648 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 649 650 /* Enable all counter interrupts */ 651 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); 652 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); 653 654 /* Enable MDIO single command completion interrupt */ 655 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); 656 } 657 658 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata) 659 { 660 unsigned int ecc_isr, ecc_ier = 0; 661 662 if (!pdata->vdata->ecc_support) 663 return; 664 665 /* Clear all the interrupts which are set */ 666 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR); 667 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr); 668 669 /* Enable ECC interrupts */ 670 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1); 671 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1); 672 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1); 673 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1); 674 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1); 675 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1); 676 677 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 678 } 679 680 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata) 681 { 682 unsigned int ecc_ier; 683 684 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER); 685 686 /* Disable ECC DED interrupts */ 687 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0); 688 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0); 689 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0); 690 691 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 692 } 693 694 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata, 695 enum xgbe_ecc_sec sec) 696 { 697 unsigned int ecc_ier; 698 699 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER); 700 701 /* Disable ECC SEC interrupt */ 702 switch (sec) { 703 case XGBE_ECC_SEC_TX: 704 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0); 705 break; 706 case XGBE_ECC_SEC_RX: 707 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0); 708 break; 709 case XGBE_ECC_SEC_DESC: 710 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0); 711 break; 712 } 713 714 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 715 } 716 717 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) 718 { 719 unsigned int ss; 720 721 switch (speed) { 722 case SPEED_10: 723 ss = 0x07; 724 break; 725 case SPEED_1000: 726 ss = 0x03; 727 break; 728 case SPEED_2500: 729 ss = 0x02; 730 break; 731 case SPEED_10000: 732 ss = 0x00; 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) 739 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); 740 741 return 0; 742 } 743 744 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 745 { 746 /* Put the VLAN tag in the Rx descriptor */ 747 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 748 749 /* Don't check the VLAN type */ 750 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 751 752 /* Check only C-TAG (0x8100) packets */ 753 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 754 755 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 756 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 757 758 /* Enable VLAN tag stripping */ 759 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 760 761 return 0; 762 } 763 764 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 765 { 766 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 767 768 return 0; 769 } 770 771 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 772 { 773 /* Enable VLAN filtering */ 774 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 775 776 /* Enable VLAN Hash Table filtering */ 777 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); 778 779 /* Disable VLAN tag inverse matching */ 780 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); 781 782 /* Only filter on the lower 12-bits of the VLAN tag */ 783 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); 784 785 /* In order for the VLAN Hash Table filtering to be effective, 786 * the VLAN tag identifier in the VLAN Tag Register must not 787 * be zero. Set the VLAN tag identifier to "1" to enable the 788 * VLAN Hash Table filtering. This implies that a VLAN tag of 789 * 1 will always pass filtering. 790 */ 791 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); 792 793 return 0; 794 } 795 796 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 797 { 798 /* Disable VLAN filtering */ 799 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 800 801 return 0; 802 } 803 804 static u32 xgbe_vid_crc32_le(__le16 vid_le) 805 { 806 u32 crc = ~0; 807 u32 temp = 0; 808 unsigned char *data = (unsigned char *)&vid_le; 809 unsigned char data_byte = 0; 810 int i, bits; 811 812 bits = get_bitmask_order(VLAN_VID_MASK); 813 for (i = 0; i < bits; i++) { 814 if ((i % 8) == 0) 815 data_byte = data[i / 8]; 816 817 temp = ((crc & 1) ^ data_byte) & 1; 818 crc >>= 1; 819 data_byte >>= 1; 820 821 if (temp) 822 crc ^= CRC32_POLY_LE; 823 } 824 825 return crc; 826 } 827 828 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) 829 { 830 u32 crc; 831 u16 vid; 832 __le16 vid_le; 833 u16 vlan_hash_table = 0; 834 835 /* Generate the VLAN Hash Table value */ 836 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { 837 /* Get the CRC32 value of the VLAN ID */ 838 vid_le = cpu_to_le16(vid); 839 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; 840 841 vlan_hash_table |= (1 << crc); 842 } 843 844 /* Set the VLAN Hash Table filtering register */ 845 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); 846 847 return 0; 848 } 849 850 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, 851 unsigned int enable) 852 { 853 unsigned int val = enable ? 1 : 0; 854 855 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) 856 return 0; 857 858 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", 859 enable ? "entering" : "leaving"); 860 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); 861 862 /* Hardware will still perform VLAN filtering in promiscuous mode */ 863 if (enable) { 864 xgbe_disable_rx_vlan_filtering(pdata); 865 } else { 866 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 867 xgbe_enable_rx_vlan_filtering(pdata); 868 } 869 870 return 0; 871 } 872 873 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, 874 unsigned int enable) 875 { 876 unsigned int val = enable ? 1 : 0; 877 878 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) 879 return 0; 880 881 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", 882 enable ? "entering" : "leaving"); 883 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); 884 885 return 0; 886 } 887 888 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, 889 struct netdev_hw_addr *ha, unsigned int *mac_reg) 890 { 891 unsigned int mac_addr_hi, mac_addr_lo; 892 u8 *mac_addr; 893 894 mac_addr_lo = 0; 895 mac_addr_hi = 0; 896 897 if (ha) { 898 mac_addr = (u8 *)&mac_addr_lo; 899 mac_addr[0] = ha->addr[0]; 900 mac_addr[1] = ha->addr[1]; 901 mac_addr[2] = ha->addr[2]; 902 mac_addr[3] = ha->addr[3]; 903 mac_addr = (u8 *)&mac_addr_hi; 904 mac_addr[0] = ha->addr[4]; 905 mac_addr[1] = ha->addr[5]; 906 907 netif_dbg(pdata, drv, pdata->netdev, 908 "adding mac address %pM at %#x\n", 909 ha->addr, *mac_reg); 910 911 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 912 } 913 914 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); 915 *mac_reg += MAC_MACA_INC; 916 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); 917 *mac_reg += MAC_MACA_INC; 918 } 919 920 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) 921 { 922 struct net_device *netdev = pdata->netdev; 923 struct netdev_hw_addr *ha; 924 unsigned int mac_reg; 925 unsigned int addn_macs; 926 927 mac_reg = MAC_MACA1HR; 928 addn_macs = pdata->hw_feat.addn_mac; 929 930 if (netdev_uc_count(netdev) > addn_macs) { 931 xgbe_set_promiscuous_mode(pdata, 1); 932 } else { 933 netdev_for_each_uc_addr(ha, netdev) { 934 xgbe_set_mac_reg(pdata, ha, &mac_reg); 935 addn_macs--; 936 } 937 938 if (netdev_mc_count(netdev) > addn_macs) { 939 xgbe_set_all_multicast_mode(pdata, 1); 940 } else { 941 netdev_for_each_mc_addr(ha, netdev) { 942 xgbe_set_mac_reg(pdata, ha, &mac_reg); 943 addn_macs--; 944 } 945 } 946 } 947 948 /* Clear remaining additional MAC address entries */ 949 while (addn_macs--) 950 xgbe_set_mac_reg(pdata, NULL, &mac_reg); 951 } 952 953 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata) 954 { 955 struct net_device *netdev = pdata->netdev; 956 struct netdev_hw_addr *ha; 957 unsigned int hash_reg; 958 unsigned int hash_table_shift, hash_table_count; 959 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE]; 960 u32 crc; 961 unsigned int i; 962 963 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); 964 hash_table_count = pdata->hw_feat.hash_table_size / 32; 965 memset(hash_table, 0, sizeof(hash_table)); 966 967 /* Build the MAC Hash Table register values */ 968 netdev_for_each_uc_addr(ha, netdev) { 969 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 970 crc >>= hash_table_shift; 971 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 972 } 973 974 netdev_for_each_mc_addr(ha, netdev) { 975 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 976 crc >>= hash_table_shift; 977 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 978 } 979 980 /* Set the MAC Hash Table registers */ 981 hash_reg = MAC_HTR0; 982 for (i = 0; i < hash_table_count; i++) { 983 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]); 984 hash_reg += MAC_HTR_INC; 985 } 986 } 987 988 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) 989 { 990 if (pdata->hw_feat.hash_table_size) 991 xgbe_set_mac_hash_table(pdata); 992 else 993 xgbe_set_mac_addn_addrs(pdata); 994 995 return 0; 996 } 997 998 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr) 999 { 1000 unsigned int mac_addr_hi, mac_addr_lo; 1001 1002 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 1003 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 1004 (addr[1] << 8) | (addr[0] << 0); 1005 1006 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 1007 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 1008 1009 return 0; 1010 } 1011 1012 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) 1013 { 1014 struct net_device *netdev = pdata->netdev; 1015 unsigned int pr_mode, am_mode; 1016 1017 pr_mode = ((netdev->flags & IFF_PROMISC) != 0); 1018 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); 1019 1020 xgbe_set_promiscuous_mode(pdata, pr_mode); 1021 xgbe_set_all_multicast_mode(pdata, am_mode); 1022 1023 xgbe_add_mac_addresses(pdata); 1024 1025 return 0; 1026 } 1027 1028 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 1029 { 1030 unsigned int reg; 1031 1032 if (gpio > 15) 1033 return -EINVAL; 1034 1035 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 1036 1037 reg &= ~(1 << (gpio + 16)); 1038 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 1039 1040 return 0; 1041 } 1042 1043 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 1044 { 1045 unsigned int reg; 1046 1047 if (gpio > 15) 1048 return -EINVAL; 1049 1050 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 1051 1052 reg |= (1 << (gpio + 16)); 1053 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 1054 1055 return 0; 1056 } 1057 1058 static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata, 1059 int mmd_reg) 1060 { 1061 return (mmd_reg & XGBE_ADDR_C45) ? 1062 mmd_reg & ~XGBE_ADDR_C45 : 1063 (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1064 } 1065 1066 static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata, 1067 unsigned int mmd_address, 1068 unsigned int *index, 1069 unsigned int *offset) 1070 { 1071 /* The PCS registers are accessed using mmio. The underlying 1072 * management interface uses indirect addressing to access the MMD 1073 * register sets. This requires accessing of the PCS register in two 1074 * phases, an address phase and a data phase. 1075 * 1076 * The mmio interface is based on 16-bit offsets and values. All 1077 * register offsets must therefore be adjusted by left shifting the 1078 * offset 1 bit and reading 16 bits of data. 1079 */ 1080 mmd_address <<= 1; 1081 *index = mmd_address & ~pdata->xpcs_window_mask; 1082 *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1083 } 1084 1085 static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, 1086 int mmd_reg) 1087 { 1088 unsigned int mmd_address, index, offset; 1089 u32 smn_address; 1090 int mmd_data; 1091 int ret; 1092 1093 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1094 1095 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); 1096 1097 smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; 1098 ret = amd_smn_write(0, smn_address, index); 1099 if (ret) 1100 return ret; 1101 1102 ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data); 1103 if (ret) 1104 return ret; 1105 1106 mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) : 1107 FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); 1108 1109 return mmd_data; 1110 } 1111 1112 static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, 1113 int mmd_reg, int mmd_data) 1114 { 1115 unsigned int pci_mmd_data, hi_mask, lo_mask; 1116 unsigned int mmd_address, index, offset; 1117 struct pci_dev *dev; 1118 u32 smn_address; 1119 int ret; 1120 1121 dev = pdata->pcidev; 1122 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1123 1124 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); 1125 1126 smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; 1127 ret = amd_smn_write(0, smn_address, index); 1128 if (ret) { 1129 pci_err(dev, "Failed to write data 0x%x\n", index); 1130 return; 1131 } 1132 1133 ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data); 1134 if (ret) { 1135 pci_err(dev, "Failed to read data\n"); 1136 return; 1137 } 1138 1139 if (offset % 4) { 1140 hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data); 1141 lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data); 1142 } else { 1143 hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, 1144 FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data)); 1145 lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); 1146 } 1147 1148 pci_mmd_data = hi_mask | lo_mask; 1149 1150 ret = amd_smn_write(0, smn_address, index); 1151 if (ret) { 1152 pci_err(dev, "Failed to write data 0x%x\n", index); 1153 return; 1154 } 1155 1156 ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data); 1157 if (ret) { 1158 pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data); 1159 return; 1160 } 1161 } 1162 1163 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, 1164 int mmd_reg) 1165 { 1166 unsigned int mmd_address, index, offset; 1167 unsigned long flags; 1168 int mmd_data; 1169 1170 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1171 1172 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); 1173 1174 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1175 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1176 mmd_data = XPCS16_IOREAD(pdata, offset); 1177 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1178 1179 return mmd_data; 1180 } 1181 1182 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, 1183 int mmd_reg, int mmd_data) 1184 { 1185 unsigned long flags; 1186 unsigned int mmd_address, index, offset; 1187 1188 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1189 1190 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); 1191 1192 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1193 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1194 XPCS16_IOWRITE(pdata, offset, mmd_data); 1195 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1196 } 1197 1198 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, 1199 int mmd_reg) 1200 { 1201 unsigned long flags; 1202 unsigned int mmd_address; 1203 int mmd_data; 1204 1205 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1206 1207 /* The PCS registers are accessed using mmio. The underlying APB3 1208 * management interface uses indirect addressing to access the MMD 1209 * register sets. This requires accessing of the PCS register in two 1210 * phases, an address phase and a data phase. 1211 * 1212 * The mmio interface is based on 32-bit offsets and values. All 1213 * register offsets must therefore be adjusted by left shifting the 1214 * offset 2 bits and reading 32 bits of data. 1215 */ 1216 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1217 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1218 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); 1219 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1220 1221 return mmd_data; 1222 } 1223 1224 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, 1225 int mmd_reg, int mmd_data) 1226 { 1227 unsigned int mmd_address; 1228 unsigned long flags; 1229 1230 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1231 1232 /* The PCS registers are accessed using mmio. The underlying APB3 1233 * management interface uses indirect addressing to access the MMD 1234 * register sets. This requires accessing of the PCS register in two 1235 * phases, an address phase and a data phase. 1236 * 1237 * The mmio interface is based on 32-bit offsets and values. All 1238 * register offsets must therefore be adjusted by left shifting the 1239 * offset 2 bits and writing 32 bits of data. 1240 */ 1241 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1242 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1243 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); 1244 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1245 } 1246 1247 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 1248 int mmd_reg) 1249 { 1250 switch (pdata->vdata->xpcs_access) { 1251 case XGBE_XPCS_ACCESS_V1: 1252 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg); 1253 1254 case XGBE_XPCS_ACCESS_V2: 1255 default: 1256 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); 1257 1258 case XGBE_XPCS_ACCESS_V3: 1259 return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg); 1260 } 1261 } 1262 1263 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 1264 int mmd_reg, int mmd_data) 1265 { 1266 switch (pdata->vdata->xpcs_access) { 1267 case XGBE_XPCS_ACCESS_V1: 1268 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data); 1269 1270 case XGBE_XPCS_ACCESS_V3: 1271 return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data); 1272 1273 case XGBE_XPCS_ACCESS_V2: 1274 default: 1275 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); 1276 } 1277 } 1278 1279 static unsigned int xgbe_create_mdio_sca_c22(int port, int reg) 1280 { 1281 unsigned int mdio_sca; 1282 1283 mdio_sca = 0; 1284 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); 1285 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); 1286 1287 return mdio_sca; 1288 } 1289 1290 static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg) 1291 { 1292 unsigned int mdio_sca; 1293 1294 mdio_sca = 0; 1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); 1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); 1297 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); 1298 1299 return mdio_sca; 1300 } 1301 1302 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, 1303 unsigned int mdio_sca, u16 val) 1304 { 1305 unsigned int mdio_sccd; 1306 1307 reinit_completion(&pdata->mdio_complete); 1308 1309 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1310 1311 mdio_sccd = 0; 1312 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); 1313 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); 1314 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1315 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1316 1317 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) { 1318 netdev_err(pdata->netdev, "mdio write operation timed out\n"); 1319 return -ETIMEDOUT; 1320 } 1321 1322 return 0; 1323 } 1324 1325 static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, 1326 int reg, u16 val) 1327 { 1328 unsigned int mdio_sca; 1329 1330 mdio_sca = xgbe_create_mdio_sca_c22(addr, reg); 1331 1332 return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); 1333 } 1334 1335 static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, 1336 int devad, int reg, u16 val) 1337 { 1338 unsigned int mdio_sca; 1339 1340 mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg); 1341 1342 return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); 1343 } 1344 1345 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, 1346 unsigned int mdio_sca) 1347 { 1348 unsigned int mdio_sccd; 1349 1350 reinit_completion(&pdata->mdio_complete); 1351 1352 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1353 1354 mdio_sccd = 0; 1355 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); 1356 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1357 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1358 1359 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) { 1360 netdev_err(pdata->netdev, "mdio read operation timed out\n"); 1361 return -ETIMEDOUT; 1362 } 1363 1364 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); 1365 } 1366 1367 static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, 1368 int reg) 1369 { 1370 unsigned int mdio_sca; 1371 1372 mdio_sca = xgbe_create_mdio_sca_c22(addr, reg); 1373 1374 return xgbe_read_ext_mii_regs(pdata, mdio_sca); 1375 } 1376 1377 static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, 1378 int devad, int reg) 1379 { 1380 unsigned int mdio_sca; 1381 1382 mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg); 1383 1384 return xgbe_read_ext_mii_regs(pdata, mdio_sca); 1385 } 1386 1387 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, 1388 enum xgbe_mdio_mode mode) 1389 { 1390 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); 1391 1392 switch (mode) { 1393 case XGBE_MDIO_MODE_CL22: 1394 if (port > XGMAC_MAX_C22_PORT) 1395 return -EINVAL; 1396 reg_val |= (1 << port); 1397 break; 1398 case XGBE_MDIO_MODE_CL45: 1399 break; 1400 default: 1401 return -EINVAL; 1402 } 1403 1404 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); 1405 1406 return 0; 1407 } 1408 1409 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) 1410 { 1411 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); 1412 } 1413 1414 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) 1415 { 1416 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 1417 1418 return 0; 1419 } 1420 1421 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) 1422 { 1423 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 1424 1425 return 0; 1426 } 1427 1428 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) 1429 { 1430 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1431 1432 /* Reset the Tx descriptor 1433 * Set buffer 1 (lo) address to zero 1434 * Set buffer 1 (hi) address to zero 1435 * Reset all other control bits (IC, TTSE, B2L & B1L) 1436 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) 1437 */ 1438 rdesc->desc0 = 0; 1439 rdesc->desc1 = 0; 1440 rdesc->desc2 = 0; 1441 rdesc->desc3 = 0; 1442 1443 /* Make sure ownership is written to the descriptor */ 1444 dma_wmb(); 1445 } 1446 1447 static void xgbe_tx_desc_init(struct xgbe_channel *channel) 1448 { 1449 struct xgbe_ring *ring = channel->tx_ring; 1450 struct xgbe_ring_data *rdata; 1451 int i; 1452 int start_index = ring->cur; 1453 1454 DBGPR("-->tx_desc_init\n"); 1455 1456 /* Initialze all descriptors */ 1457 for (i = 0; i < ring->rdesc_count; i++) { 1458 rdata = XGBE_GET_DESC_DATA(ring, i); 1459 1460 /* Initialize Tx descriptor */ 1461 xgbe_tx_desc_reset(rdata); 1462 } 1463 1464 /* Update the total number of Tx descriptors */ 1465 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 1466 1467 /* Update the starting address of descriptor ring */ 1468 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1469 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, 1470 upper_32_bits(rdata->rdesc_dma)); 1471 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, 1472 lower_32_bits(rdata->rdesc_dma)); 1473 1474 DBGPR("<--tx_desc_init\n"); 1475 } 1476 1477 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, 1478 struct xgbe_ring_data *rdata, unsigned int index) 1479 { 1480 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1481 unsigned int rx_usecs = pdata->rx_usecs; 1482 unsigned int rx_frames = pdata->rx_frames; 1483 unsigned int inte; 1484 dma_addr_t hdr_dma, buf_dma; 1485 1486 if (!rx_usecs && !rx_frames) { 1487 /* No coalescing, interrupt for every descriptor */ 1488 inte = 1; 1489 } else { 1490 /* Set interrupt based on Rx frame coalescing setting */ 1491 if (rx_frames && !((index + 1) % rx_frames)) 1492 inte = 1; 1493 else 1494 inte = 0; 1495 } 1496 1497 /* Reset the Rx descriptor 1498 * Set buffer 1 (lo) address to header dma address (lo) 1499 * Set buffer 1 (hi) address to header dma address (hi) 1500 * Set buffer 2 (lo) address to buffer dma address (lo) 1501 * Set buffer 2 (hi) address to buffer dma address (hi) and 1502 * set control bits OWN and INTE 1503 */ 1504 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; 1505 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; 1506 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); 1507 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); 1508 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); 1509 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); 1510 1511 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 1512 1513 /* Since the Rx DMA engine is likely running, make sure everything 1514 * is written to the descriptor(s) before setting the OWN bit 1515 * for the descriptor 1516 */ 1517 dma_wmb(); 1518 1519 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 1520 1521 /* Make sure ownership is written to the descriptor */ 1522 dma_wmb(); 1523 } 1524 1525 static void xgbe_rx_desc_init(struct xgbe_channel *channel) 1526 { 1527 struct xgbe_prv_data *pdata = channel->pdata; 1528 struct xgbe_ring *ring = channel->rx_ring; 1529 struct xgbe_ring_data *rdata; 1530 unsigned int start_index = ring->cur; 1531 unsigned int i; 1532 1533 DBGPR("-->rx_desc_init\n"); 1534 1535 /* Initialize all descriptors */ 1536 for (i = 0; i < ring->rdesc_count; i++) { 1537 rdata = XGBE_GET_DESC_DATA(ring, i); 1538 1539 /* Initialize Rx descriptor */ 1540 xgbe_rx_desc_reset(pdata, rdata, i); 1541 } 1542 1543 /* Update the total number of Rx descriptors */ 1544 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 1545 1546 /* Update the starting address of descriptor ring */ 1547 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1548 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, 1549 upper_32_bits(rdata->rdesc_dma)); 1550 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, 1551 lower_32_bits(rdata->rdesc_dma)); 1552 1553 /* Update the Rx Descriptor Tail Pointer */ 1554 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); 1555 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1556 lower_32_bits(rdata->rdesc_dma)); 1557 1558 DBGPR("<--rx_desc_init\n"); 1559 } 1560 1561 static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata, 1562 unsigned int addend) 1563 { 1564 unsigned int count = 10000; 1565 1566 /* Set the addend register value and tell the device */ 1567 XGMAC_IOWRITE(pdata, MAC_TSAR, addend); 1568 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1); 1569 1570 /* Wait for addend update to complete */ 1571 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG)) 1572 udelay(5); 1573 1574 if (!count) 1575 netdev_err(pdata->netdev, 1576 "timed out updating timestamp addend register\n"); 1577 } 1578 1579 static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec, 1580 unsigned int nsec) 1581 { 1582 unsigned int count = 10000; 1583 1584 /* Set the time values and tell the device */ 1585 XGMAC_IOWRITE(pdata, MAC_STSUR, sec); 1586 XGMAC_IOWRITE(pdata, MAC_STNUR, nsec); 1587 XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1); 1588 1589 /* Wait for time update to complete */ 1590 while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT)) 1591 udelay(5); 1592 1593 if (!count) 1594 netdev_err(pdata->netdev, "timed out initializing timestamp\n"); 1595 } 1596 1597 static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata) 1598 { 1599 u64 nsec; 1600 1601 nsec = XGMAC_IOREAD(pdata, MAC_STSR); 1602 nsec *= NSEC_PER_SEC; 1603 nsec += XGMAC_IOREAD(pdata, MAC_STNR); 1604 1605 return nsec; 1606 } 1607 1608 static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata) 1609 { 1610 unsigned int tx_snr, tx_ssr; 1611 u64 nsec; 1612 1613 if (pdata->vdata->tx_tstamp_workaround) { 1614 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); 1615 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR); 1616 } else { 1617 tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR); 1618 tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR); 1619 } 1620 1621 if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) 1622 return 0; 1623 1624 nsec = tx_ssr; 1625 nsec *= NSEC_PER_SEC; 1626 nsec += tx_snr; 1627 1628 return nsec; 1629 } 1630 1631 static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet, 1632 struct xgbe_ring_desc *rdesc) 1633 { 1634 u64 nsec; 1635 1636 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) && 1637 !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) { 1638 nsec = le32_to_cpu(rdesc->desc1); 1639 nsec <<= 32; 1640 nsec |= le32_to_cpu(rdesc->desc0); 1641 if (nsec != 0xffffffffffffffffULL) { 1642 packet->rx_tstamp = nsec; 1643 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1644 RX_TSTAMP, 1); 1645 } 1646 } 1647 } 1648 1649 static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, 1650 unsigned int mac_tscr) 1651 { 1652 /* Set one nano-second accuracy */ 1653 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1); 1654 1655 /* Set fine timestamp update */ 1656 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1); 1657 1658 /* Overwrite earlier timestamps */ 1659 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1); 1660 1661 XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr); 1662 1663 /* Exit if timestamping is not enabled */ 1664 if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) 1665 return 0; 1666 1667 /* Initialize time registers */ 1668 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC); 1669 XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC); 1670 xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend); 1671 xgbe_set_tstamp_time(pdata, 0, 0); 1672 1673 /* Initialize the timecounter */ 1674 timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, 1675 ktime_to_ns(ktime_get_real())); 1676 1677 return 0; 1678 } 1679 1680 static void xgbe_tx_start_xmit(struct xgbe_channel *channel, 1681 struct xgbe_ring *ring) 1682 { 1683 struct xgbe_prv_data *pdata = channel->pdata; 1684 struct xgbe_ring_data *rdata; 1685 1686 /* Make sure everything is written before the register write */ 1687 wmb(); 1688 1689 /* Issue a poll command to Tx DMA by writing address 1690 * of next immediate free descriptor */ 1691 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1692 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, 1693 lower_32_bits(rdata->rdesc_dma)); 1694 1695 /* Start the Tx timer */ 1696 if (pdata->tx_usecs && !channel->tx_timer_active) { 1697 channel->tx_timer_active = 1; 1698 mod_timer(&channel->tx_timer, 1699 jiffies + usecs_to_jiffies(pdata->tx_usecs)); 1700 } 1701 1702 ring->tx.xmit_more = 0; 1703 } 1704 1705 static void xgbe_dev_xmit(struct xgbe_channel *channel) 1706 { 1707 struct xgbe_prv_data *pdata = channel->pdata; 1708 struct xgbe_ring *ring = channel->tx_ring; 1709 struct xgbe_ring_data *rdata; 1710 struct xgbe_ring_desc *rdesc; 1711 struct xgbe_packet_data *packet = &ring->packet_data; 1712 unsigned int tx_packets, tx_bytes; 1713 unsigned int csum, tso, vlan, vxlan; 1714 unsigned int tso_context, vlan_context; 1715 unsigned int tx_set_ic; 1716 int start_index = ring->cur; 1717 int cur_index = ring->cur; 1718 int i; 1719 1720 DBGPR("-->xgbe_dev_xmit\n"); 1721 1722 tx_packets = packet->tx_packets; 1723 tx_bytes = packet->tx_bytes; 1724 1725 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1726 CSUM_ENABLE); 1727 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1728 TSO_ENABLE); 1729 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1730 VLAN_CTAG); 1731 vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1732 VXLAN); 1733 1734 if (tso && (packet->mss != ring->tx.cur_mss)) 1735 tso_context = 1; 1736 else 1737 tso_context = 0; 1738 1739 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) 1740 vlan_context = 1; 1741 else 1742 vlan_context = 0; 1743 1744 /* Determine if an interrupt should be generated for this Tx: 1745 * Interrupt: 1746 * - Tx frame count exceeds the frame count setting 1747 * - Addition of Tx frame count to the frame count since the 1748 * last interrupt was set exceeds the frame count setting 1749 * No interrupt: 1750 * - No frame count setting specified (ethtool -C ethX tx-frames 0) 1751 * - Addition of Tx frame count to the frame count since the 1752 * last interrupt was set does not exceed the frame count setting 1753 */ 1754 ring->coalesce_count += tx_packets; 1755 if (!pdata->tx_frames) 1756 tx_set_ic = 0; 1757 else if (tx_packets > pdata->tx_frames) 1758 tx_set_ic = 1; 1759 else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets) 1760 tx_set_ic = 1; 1761 else 1762 tx_set_ic = 0; 1763 1764 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1765 rdesc = rdata->rdesc; 1766 1767 /* Create a context descriptor if this is a TSO packet */ 1768 if (tso_context || vlan_context) { 1769 if (tso_context) { 1770 netif_dbg(pdata, tx_queued, pdata->netdev, 1771 "TSO context descriptor, mss=%u\n", 1772 packet->mss); 1773 1774 /* Set the MSS size */ 1775 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, 1776 MSS, packet->mss); 1777 1778 /* Mark it as a CONTEXT descriptor */ 1779 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1780 CTXT, 1); 1781 1782 /* Indicate this descriptor contains the MSS */ 1783 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1784 TCMSSV, 1); 1785 1786 ring->tx.cur_mss = packet->mss; 1787 } 1788 1789 if (vlan_context) { 1790 netif_dbg(pdata, tx_queued, pdata->netdev, 1791 "VLAN context descriptor, ctag=%u\n", 1792 packet->vlan_ctag); 1793 1794 /* Mark it as a CONTEXT descriptor */ 1795 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1796 CTXT, 1); 1797 1798 /* Set the VLAN tag */ 1799 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1800 VT, packet->vlan_ctag); 1801 1802 /* Indicate this descriptor contains the VLAN tag */ 1803 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1804 VLTV, 1); 1805 1806 ring->tx.cur_vlan_ctag = packet->vlan_ctag; 1807 } 1808 1809 cur_index++; 1810 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1811 rdesc = rdata->rdesc; 1812 } 1813 1814 /* Update buffer address (for TSO this is the header) */ 1815 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1816 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1817 1818 /* Update the buffer length */ 1819 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1820 rdata->skb_dma_len); 1821 1822 /* VLAN tag insertion check */ 1823 if (vlan) 1824 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, 1825 TX_NORMAL_DESC2_VLAN_INSERT); 1826 1827 /* Timestamp enablement check */ 1828 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) 1829 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); 1830 1831 /* Mark it as First Descriptor */ 1832 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); 1833 1834 /* Mark it as a NORMAL descriptor */ 1835 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1836 1837 /* Set OWN bit if not the first descriptor */ 1838 if (cur_index != start_index) 1839 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1840 1841 if (tso) { 1842 /* Enable TSO */ 1843 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); 1844 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, 1845 packet->tcp_payload_len); 1846 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, 1847 packet->tcp_header_len / 4); 1848 1849 pdata->ext_stats.tx_tso_packets += tx_packets; 1850 } else { 1851 /* Enable CRC and Pad Insertion */ 1852 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); 1853 1854 /* Enable HW CSUM */ 1855 if (csum) 1856 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1857 CIC, 0x3); 1858 1859 /* Set the total length to be transmitted */ 1860 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, 1861 packet->length); 1862 } 1863 1864 if (vxlan) { 1865 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP, 1866 TX_NORMAL_DESC3_VXLAN_PACKET); 1867 1868 pdata->ext_stats.tx_vxlan_packets += packet->tx_packets; 1869 } 1870 1871 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { 1872 cur_index++; 1873 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1874 rdesc = rdata->rdesc; 1875 1876 /* Update buffer address */ 1877 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1878 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1879 1880 /* Update the buffer length */ 1881 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1882 rdata->skb_dma_len); 1883 1884 /* Set OWN bit */ 1885 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1886 1887 /* Mark it as NORMAL descriptor */ 1888 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1889 1890 /* Enable HW CSUM */ 1891 if (csum) 1892 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1893 CIC, 0x3); 1894 } 1895 1896 /* Set LAST bit for the last descriptor */ 1897 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); 1898 1899 /* Set IC bit based on Tx coalescing settings */ 1900 if (tx_set_ic) 1901 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 1902 1903 /* Save the Tx info to report back during cleanup */ 1904 rdata->tx.packets = tx_packets; 1905 rdata->tx.bytes = tx_bytes; 1906 1907 pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets; 1908 pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes; 1909 1910 /* In case the Tx DMA engine is running, make sure everything 1911 * is written to the descriptor(s) before setting the OWN bit 1912 * for the first descriptor 1913 */ 1914 dma_wmb(); 1915 1916 /* Set OWN bit for the first descriptor */ 1917 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1918 rdesc = rdata->rdesc; 1919 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1920 1921 if (netif_msg_tx_queued(pdata)) 1922 xgbe_dump_tx_desc(pdata, ring, start_index, 1923 packet->rdesc_count, 1); 1924 1925 /* Make sure ownership is written to the descriptor */ 1926 smp_wmb(); 1927 1928 ring->cur = cur_index + 1; 1929 if (!netdev_xmit_more() || 1930 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, 1931 channel->queue_index))) 1932 xgbe_tx_start_xmit(channel, ring); 1933 else 1934 ring->tx.xmit_more = 1; 1935 1936 DBGPR(" %s: descriptors %u to %u written\n", 1937 channel->name, start_index & (ring->rdesc_count - 1), 1938 (ring->cur - 1) & (ring->rdesc_count - 1)); 1939 1940 DBGPR("<--xgbe_dev_xmit\n"); 1941 } 1942 1943 static int xgbe_dev_read(struct xgbe_channel *channel) 1944 { 1945 struct xgbe_prv_data *pdata = channel->pdata; 1946 struct xgbe_ring *ring = channel->rx_ring; 1947 struct xgbe_ring_data *rdata; 1948 struct xgbe_ring_desc *rdesc; 1949 struct xgbe_packet_data *packet = &ring->packet_data; 1950 struct net_device *netdev = pdata->netdev; 1951 unsigned int err, etlt, l34t; 1952 1953 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); 1954 1955 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1956 rdesc = rdata->rdesc; 1957 1958 /* Check for data availability */ 1959 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 1960 return 1; 1961 1962 /* Make sure descriptor fields are read after reading the OWN bit */ 1963 dma_rmb(); 1964 1965 if (netif_msg_rx_status(pdata)) 1966 xgbe_dump_rx_desc(pdata, ring, ring->cur); 1967 1968 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { 1969 /* Timestamp Context Descriptor */ 1970 xgbe_get_rx_tstamp(packet, rdesc); 1971 1972 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1973 CONTEXT, 1); 1974 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1975 CONTEXT_NEXT, 0); 1976 return 0; 1977 } 1978 1979 /* Normal Descriptor, be sure Context Descriptor bit is off */ 1980 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); 1981 1982 /* Indicate if a Context Descriptor is next */ 1983 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 1984 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1985 CONTEXT_NEXT, 1); 1986 1987 /* Get the header length */ 1988 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1989 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1990 FIRST, 1); 1991 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1992 RX_NORMAL_DESC2, HL); 1993 if (rdata->rx.hdr_len) 1994 pdata->ext_stats.rx_split_header_packets++; 1995 } else { 1996 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1997 FIRST, 0); 1998 } 1999 2000 /* Get the RSS hash */ 2001 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { 2002 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2003 RSS_HASH, 1); 2004 2005 packet->rss_hash = le32_to_cpu(rdesc->desc1); 2006 2007 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 2008 switch (l34t) { 2009 case RX_DESC3_L34T_IPV4_TCP: 2010 case RX_DESC3_L34T_IPV4_UDP: 2011 case RX_DESC3_L34T_IPV6_TCP: 2012 case RX_DESC3_L34T_IPV6_UDP: 2013 packet->rss_hash_type = PKT_HASH_TYPE_L4; 2014 break; 2015 default: 2016 packet->rss_hash_type = PKT_HASH_TYPE_L3; 2017 } 2018 } 2019 2020 /* Not all the data has been transferred for this packet */ 2021 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) 2022 return 0; 2023 2024 /* This is the last of the data for this packet */ 2025 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2026 LAST, 1); 2027 2028 /* Get the packet length */ 2029 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 2030 2031 /* Set checksum done indicator as appropriate */ 2032 if (netdev->features & NETIF_F_RXCSUM) { 2033 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2034 CSUM_DONE, 1); 2035 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2036 TNPCSUM_DONE, 1); 2037 } 2038 2039 /* Set the tunneled packet indicator */ 2040 if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) { 2041 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2042 TNP, 1); 2043 pdata->ext_stats.rx_vxlan_packets++; 2044 2045 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 2046 switch (l34t) { 2047 case RX_DESC3_L34T_IPV4_UNKNOWN: 2048 case RX_DESC3_L34T_IPV6_UNKNOWN: 2049 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2050 TNPCSUM_DONE, 0); 2051 break; 2052 } 2053 } 2054 2055 /* Check for errors (only valid in last descriptor) */ 2056 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); 2057 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); 2058 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); 2059 2060 if (!err || !etlt) { 2061 /* No error if err is 0 or etlt is 0 */ 2062 if ((etlt == 0x09) && 2063 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 2064 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2065 VLAN_CTAG, 1); 2066 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, 2067 RX_NORMAL_DESC0, 2068 OVT); 2069 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", 2070 packet->vlan_ctag); 2071 } 2072 } else { 2073 unsigned int tnp = XGMAC_GET_BITS(packet->attributes, 2074 RX_PACKET_ATTRIBUTES, TNP); 2075 2076 if ((etlt == 0x05) || (etlt == 0x06)) { 2077 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2078 CSUM_DONE, 0); 2079 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2080 TNPCSUM_DONE, 0); 2081 pdata->ext_stats.rx_csum_errors++; 2082 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { 2083 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2084 CSUM_DONE, 0); 2085 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 2086 TNPCSUM_DONE, 0); 2087 pdata->ext_stats.rx_vxlan_csum_errors++; 2088 } else { 2089 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, 2090 FRAME, 1); 2091 } 2092 } 2093 2094 pdata->ext_stats.rxq_packets[channel->queue_index]++; 2095 pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len; 2096 2097 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, 2098 ring->cur & (ring->rdesc_count - 1), ring->cur); 2099 2100 return 0; 2101 } 2102 2103 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) 2104 { 2105 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ 2106 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); 2107 } 2108 2109 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) 2110 { 2111 /* Rx and Tx share LD bit, so check TDES3.LD bit */ 2112 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); 2113 } 2114 2115 static int xgbe_enable_int(struct xgbe_channel *channel, 2116 enum xgbe_int int_id) 2117 { 2118 switch (int_id) { 2119 case XGMAC_INT_DMA_CH_SR_TI: 2120 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); 2121 break; 2122 case XGMAC_INT_DMA_CH_SR_TPS: 2123 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); 2124 break; 2125 case XGMAC_INT_DMA_CH_SR_TBU: 2126 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); 2127 break; 2128 case XGMAC_INT_DMA_CH_SR_RI: 2129 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); 2130 break; 2131 case XGMAC_INT_DMA_CH_SR_RBU: 2132 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); 2133 break; 2134 case XGMAC_INT_DMA_CH_SR_RPS: 2135 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); 2136 break; 2137 case XGMAC_INT_DMA_CH_SR_TI_RI: 2138 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); 2139 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); 2140 break; 2141 case XGMAC_INT_DMA_CH_SR_FBE: 2142 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); 2143 break; 2144 case XGMAC_INT_DMA_ALL: 2145 channel->curr_ier |= channel->saved_ier; 2146 break; 2147 default: 2148 return -1; 2149 } 2150 2151 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 2152 2153 return 0; 2154 } 2155 2156 static int xgbe_disable_int(struct xgbe_channel *channel, 2157 enum xgbe_int int_id) 2158 { 2159 switch (int_id) { 2160 case XGMAC_INT_DMA_CH_SR_TI: 2161 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); 2162 break; 2163 case XGMAC_INT_DMA_CH_SR_TPS: 2164 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); 2165 break; 2166 case XGMAC_INT_DMA_CH_SR_TBU: 2167 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); 2168 break; 2169 case XGMAC_INT_DMA_CH_SR_RI: 2170 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); 2171 break; 2172 case XGMAC_INT_DMA_CH_SR_RBU: 2173 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); 2174 break; 2175 case XGMAC_INT_DMA_CH_SR_RPS: 2176 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); 2177 break; 2178 case XGMAC_INT_DMA_CH_SR_TI_RI: 2179 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); 2180 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); 2181 break; 2182 case XGMAC_INT_DMA_CH_SR_FBE: 2183 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); 2184 break; 2185 case XGMAC_INT_DMA_ALL: 2186 channel->saved_ier = channel->curr_ier; 2187 channel->curr_ier = 0; 2188 break; 2189 default: 2190 return -1; 2191 } 2192 2193 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 2194 2195 return 0; 2196 } 2197 2198 static int __xgbe_exit(struct xgbe_prv_data *pdata) 2199 { 2200 unsigned int count = 2000; 2201 2202 DBGPR("-->xgbe_exit\n"); 2203 2204 /* Issue a software reset */ 2205 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 2206 usleep_range(10, 15); 2207 2208 /* Poll Until Poll Condition */ 2209 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 2210 usleep_range(500, 600); 2211 2212 if (!count) 2213 return -EBUSY; 2214 2215 DBGPR("<--xgbe_exit\n"); 2216 2217 return 0; 2218 } 2219 2220 static int xgbe_exit(struct xgbe_prv_data *pdata) 2221 { 2222 int ret; 2223 2224 /* To guard against possible incorrectly generated interrupts, 2225 * issue the software reset twice. 2226 */ 2227 ret = __xgbe_exit(pdata); 2228 if (ret) 2229 return ret; 2230 2231 return __xgbe_exit(pdata); 2232 } 2233 2234 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) 2235 { 2236 unsigned int i, count; 2237 2238 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) 2239 return 0; 2240 2241 for (i = 0; i < pdata->tx_q_count; i++) 2242 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 2243 2244 /* Poll Until Poll Condition */ 2245 for (i = 0; i < pdata->tx_q_count; i++) { 2246 count = 2000; 2247 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, 2248 MTL_Q_TQOMR, FTQ)) 2249 usleep_range(500, 600); 2250 2251 if (!count) 2252 return -EBUSY; 2253 } 2254 2255 return 0; 2256 } 2257 2258 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) 2259 { 2260 unsigned int sbmr; 2261 2262 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); 2263 2264 /* Set enhanced addressing mode */ 2265 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); 2266 2267 /* Set the System Bus mode */ 2268 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); 2269 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); 2270 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); 2271 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); 2272 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); 2273 2274 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); 2275 2276 /* Set descriptor fetching threshold */ 2277 if (pdata->vdata->tx_desc_prefetch) 2278 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, 2279 pdata->vdata->tx_desc_prefetch); 2280 2281 if (pdata->vdata->rx_desc_prefetch) 2282 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, 2283 pdata->vdata->rx_desc_prefetch); 2284 } 2285 2286 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) 2287 { 2288 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); 2289 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); 2290 if (pdata->awarcr) 2291 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); 2292 } 2293 2294 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) 2295 { 2296 unsigned int i; 2297 2298 /* Set Tx to weighted round robin scheduling algorithm */ 2299 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 2300 2301 /* Set Tx traffic classes to use WRR algorithm with equal weights */ 2302 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 2303 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2304 MTL_TSA_ETS); 2305 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); 2306 } 2307 2308 /* Set Rx to strict priority algorithm */ 2309 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 2310 } 2311 2312 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, 2313 unsigned int queue, 2314 unsigned int q_fifo_size) 2315 { 2316 unsigned int frame_fifo_size; 2317 unsigned int rfa, rfd; 2318 2319 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); 2320 2321 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) { 2322 /* PFC is active for this queue */ 2323 rfa = pdata->pfc_rfa; 2324 rfd = rfa + frame_fifo_size; 2325 if (rfd > XGMAC_FLOW_CONTROL_MAX) 2326 rfd = XGMAC_FLOW_CONTROL_MAX; 2327 if (rfa >= XGMAC_FLOW_CONTROL_MAX) 2328 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT; 2329 } else { 2330 /* This path deals with just maximum frame sizes which are 2331 * limited to a jumbo frame of 9,000 (plus headers, etc.) 2332 * so we can never exceed the maximum allowable RFA/RFD 2333 * values. 2334 */ 2335 if (q_fifo_size <= 2048) { 2336 /* rx_rfd to zero to signal no flow control */ 2337 pdata->rx_rfa[queue] = 0; 2338 pdata->rx_rfd[queue] = 0; 2339 return; 2340 } 2341 2342 if (q_fifo_size <= 4096) { 2343 /* Between 2048 and 4096 */ 2344 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ 2345 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ 2346 return; 2347 } 2348 2349 if (q_fifo_size <= frame_fifo_size) { 2350 /* Between 4096 and max-frame */ 2351 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ 2352 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ 2353 return; 2354 } 2355 2356 if (q_fifo_size <= (frame_fifo_size * 3)) { 2357 /* Between max-frame and 3 max-frames, 2358 * trigger if we get just over a frame of data and 2359 * resume when we have just under half a frame left. 2360 */ 2361 rfa = q_fifo_size - frame_fifo_size; 2362 rfd = rfa + (frame_fifo_size / 2); 2363 } else { 2364 /* Above 3 max-frames - trigger when just over 2365 * 2 frames of space available 2366 */ 2367 rfa = frame_fifo_size * 2; 2368 rfa += XGMAC_FLOW_CONTROL_UNIT; 2369 rfd = rfa + frame_fifo_size; 2370 } 2371 } 2372 2373 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); 2374 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); 2375 } 2376 2377 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, 2378 unsigned int *fifo) 2379 { 2380 unsigned int q_fifo_size; 2381 unsigned int i; 2382 2383 for (i = 0; i < pdata->rx_q_count; i++) { 2384 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; 2385 2386 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); 2387 } 2388 } 2389 2390 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) 2391 { 2392 unsigned int i; 2393 2394 for (i = 0; i < pdata->rx_q_count; i++) { 2395 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2396 pdata->rx_rfa[i]); 2397 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 2398 pdata->rx_rfd[i]); 2399 } 2400 } 2401 2402 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) 2403 { 2404 /* The configured value may not be the actual amount of fifo RAM */ 2405 return min_t(unsigned int, pdata->tx_max_fifo_size, 2406 pdata->hw_feat.tx_fifo_size); 2407 } 2408 2409 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) 2410 { 2411 /* The configured value may not be the actual amount of fifo RAM */ 2412 return min_t(unsigned int, pdata->rx_max_fifo_size, 2413 pdata->hw_feat.rx_fifo_size); 2414 } 2415 2416 static void xgbe_calculate_equal_fifo(unsigned int fifo_size, 2417 unsigned int queue_count, 2418 unsigned int *fifo) 2419 { 2420 unsigned int q_fifo_size; 2421 unsigned int p_fifo; 2422 unsigned int i; 2423 2424 q_fifo_size = fifo_size / queue_count; 2425 2426 /* Calculate the fifo setting by dividing the queue's fifo size 2427 * by the fifo allocation increment (with 0 representing the 2428 * base allocation increment so decrement the result by 1). 2429 */ 2430 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; 2431 if (p_fifo) 2432 p_fifo--; 2433 2434 /* Distribute the fifo equally amongst the queues */ 2435 for (i = 0; i < queue_count; i++) 2436 fifo[i] = p_fifo; 2437 } 2438 2439 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size, 2440 unsigned int queue_count, 2441 unsigned int *fifo) 2442 { 2443 unsigned int i; 2444 2445 BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC); 2446 2447 if (queue_count <= IEEE_8021QAZ_MAX_TCS) 2448 return fifo_size; 2449 2450 /* Rx queues 9 and up are for specialized packets, 2451 * such as PTP or DCB control packets, etc. and 2452 * don't require a large fifo 2453 */ 2454 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { 2455 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; 2456 fifo_size -= XGMAC_FIFO_MIN_ALLOC; 2457 } 2458 2459 return fifo_size; 2460 } 2461 2462 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata) 2463 { 2464 unsigned int delay; 2465 2466 /* If a delay has been provided, use that */ 2467 if (pdata->pfc->delay) 2468 return pdata->pfc->delay / 8; 2469 2470 /* Allow for two maximum size frames */ 2471 delay = xgbe_get_max_frame(pdata); 2472 delay += XGMAC_ETH_PREAMBLE; 2473 delay *= 2; 2474 2475 /* Allow for PFC frame */ 2476 delay += XGMAC_PFC_DATA_LEN; 2477 delay += ETH_HLEN + ETH_FCS_LEN; 2478 delay += XGMAC_ETH_PREAMBLE; 2479 2480 /* Allow for miscellaneous delays (LPI exit, cable, etc.) */ 2481 delay += XGMAC_PFC_DELAYS; 2482 2483 return delay; 2484 } 2485 2486 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata) 2487 { 2488 unsigned int count, prio_queues; 2489 unsigned int i; 2490 2491 if (!pdata->pfc->pfc_en) 2492 return 0; 2493 2494 count = 0; 2495 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2496 for (i = 0; i < prio_queues; i++) { 2497 if (!xgbe_is_pfc_queue(pdata, i)) 2498 continue; 2499 2500 pdata->pfcq[i] = 1; 2501 count++; 2502 } 2503 2504 return count; 2505 } 2506 2507 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata, 2508 unsigned int fifo_size, 2509 unsigned int *fifo) 2510 { 2511 unsigned int q_fifo_size, rem_fifo, addn_fifo; 2512 unsigned int prio_queues; 2513 unsigned int pfc_count; 2514 unsigned int i; 2515 2516 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata)); 2517 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2518 pfc_count = xgbe_get_pfc_queues(pdata); 2519 2520 if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) { 2521 /* No traffic classes with PFC enabled or can't do lossless */ 2522 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); 2523 return; 2524 } 2525 2526 /* Calculate how much fifo we have to play with */ 2527 rem_fifo = fifo_size - (q_fifo_size * prio_queues); 2528 2529 /* Calculate how much more than base fifo PFC needs, which also 2530 * becomes the threshold activation point (RFA) 2531 */ 2532 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata); 2533 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa); 2534 2535 if (pdata->pfc_rfa > q_fifo_size) { 2536 addn_fifo = pdata->pfc_rfa - q_fifo_size; 2537 addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo); 2538 } else { 2539 addn_fifo = 0; 2540 } 2541 2542 /* Calculate DCB fifo settings: 2543 * - distribute remaining fifo between the VLAN priority 2544 * queues based on traffic class PFC enablement and overall 2545 * priority (0 is lowest priority, so start at highest) 2546 */ 2547 i = prio_queues; 2548 while (i > 0) { 2549 i--; 2550 2551 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1; 2552 2553 if (!pdata->pfcq[i] || !addn_fifo) 2554 continue; 2555 2556 if (addn_fifo > rem_fifo) { 2557 netdev_warn(pdata->netdev, 2558 "RXq%u cannot set needed fifo size\n", i); 2559 if (!rem_fifo) 2560 continue; 2561 2562 addn_fifo = rem_fifo; 2563 } 2564 2565 fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT); 2566 rem_fifo -= addn_fifo; 2567 } 2568 2569 if (rem_fifo) { 2570 unsigned int inc_fifo = rem_fifo / prio_queues; 2571 2572 /* Distribute remaining fifo across queues */ 2573 for (i = 0; i < prio_queues; i++) 2574 fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT); 2575 } 2576 } 2577 2578 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) 2579 { 2580 unsigned int fifo_size; 2581 unsigned int fifo[XGBE_MAX_QUEUES]; 2582 unsigned int i; 2583 2584 fifo_size = xgbe_get_tx_fifo_size(pdata); 2585 2586 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); 2587 2588 for (i = 0; i < pdata->tx_q_count; i++) 2589 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); 2590 2591 netif_info(pdata, drv, pdata->netdev, 2592 "%d Tx hardware queues, %d byte fifo per queue\n", 2593 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 2594 } 2595 2596 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 2597 { 2598 unsigned int fifo_size; 2599 unsigned int fifo[XGBE_MAX_QUEUES]; 2600 unsigned int prio_queues; 2601 unsigned int i; 2602 2603 /* Clear any DCB related fifo/queue information */ 2604 memset(pdata->pfcq, 0, sizeof(pdata->pfcq)); 2605 pdata->pfc_rfa = 0; 2606 2607 fifo_size = xgbe_get_rx_fifo_size(pdata); 2608 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2609 2610 /* Assign a minimum fifo to the non-VLAN priority queues */ 2611 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); 2612 2613 if (pdata->pfc && pdata->ets) 2614 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo); 2615 else 2616 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); 2617 2618 for (i = 0; i < pdata->rx_q_count; i++) 2619 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); 2620 2621 xgbe_calculate_flow_control_threshold(pdata, fifo); 2622 xgbe_config_flow_control_threshold(pdata); 2623 2624 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) { 2625 netif_info(pdata, drv, pdata->netdev, 2626 "%u Rx hardware queues\n", pdata->rx_q_count); 2627 for (i = 0; i < pdata->rx_q_count; i++) 2628 netif_info(pdata, drv, pdata->netdev, 2629 "RxQ%u, %u byte fifo queue\n", i, 2630 ((fifo[i] + 1) * XGMAC_FIFO_UNIT)); 2631 } else { 2632 netif_info(pdata, drv, pdata->netdev, 2633 "%u Rx hardware queues, %u byte fifo per queue\n", 2634 pdata->rx_q_count, 2635 ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 2636 } 2637 } 2638 2639 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) 2640 { 2641 unsigned int qptc, qptc_extra, queue; 2642 unsigned int prio_queues; 2643 unsigned int ppq, ppq_extra, prio; 2644 unsigned int mask; 2645 unsigned int i, j, reg, reg_val; 2646 2647 /* Map the MTL Tx Queues to Traffic Classes 2648 * Note: Tx Queues >= Traffic Classes 2649 */ 2650 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; 2651 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; 2652 2653 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { 2654 for (j = 0; j < qptc; j++) { 2655 netif_dbg(pdata, drv, pdata->netdev, 2656 "TXq%u mapped to TC%u\n", queue, i); 2657 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2658 Q2TCMAP, i); 2659 pdata->q2tc_map[queue++] = i; 2660 } 2661 2662 if (i < qptc_extra) { 2663 netif_dbg(pdata, drv, pdata->netdev, 2664 "TXq%u mapped to TC%u\n", queue, i); 2665 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2666 Q2TCMAP, i); 2667 pdata->q2tc_map[queue++] = i; 2668 } 2669 } 2670 2671 /* Map the 8 VLAN priority values to available MTL Rx queues */ 2672 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2673 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; 2674 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; 2675 2676 reg = MAC_RQC2R; 2677 reg_val = 0; 2678 for (i = 0, prio = 0; i < prio_queues;) { 2679 mask = 0; 2680 for (j = 0; j < ppq; j++) { 2681 netif_dbg(pdata, drv, pdata->netdev, 2682 "PRIO%u mapped to RXq%u\n", prio, i); 2683 mask |= (1 << prio); 2684 pdata->prio2q_map[prio++] = i; 2685 } 2686 2687 if (i < ppq_extra) { 2688 netif_dbg(pdata, drv, pdata->netdev, 2689 "PRIO%u mapped to RXq%u\n", prio, i); 2690 mask |= (1 << prio); 2691 pdata->prio2q_map[prio++] = i; 2692 } 2693 2694 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); 2695 2696 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) 2697 continue; 2698 2699 XGMAC_IOWRITE(pdata, reg, reg_val); 2700 reg += MAC_RQC2_INC; 2701 reg_val = 0; 2702 } 2703 2704 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 2705 reg = MTL_RQDCM0R; 2706 reg_val = 0; 2707 for (i = 0; i < pdata->rx_q_count;) { 2708 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 2709 2710 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) 2711 continue; 2712 2713 XGMAC_IOWRITE(pdata, reg, reg_val); 2714 2715 reg += MTL_RQDCM_INC; 2716 reg_val = 0; 2717 } 2718 } 2719 2720 static void xgbe_config_tc(struct xgbe_prv_data *pdata) 2721 { 2722 unsigned int offset, queue, prio; 2723 u8 i; 2724 2725 netdev_reset_tc(pdata->netdev); 2726 if (!pdata->num_tcs) 2727 return; 2728 2729 netdev_set_num_tc(pdata->netdev, pdata->num_tcs); 2730 2731 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) { 2732 while ((queue < pdata->tx_q_count) && 2733 (pdata->q2tc_map[queue] == i)) 2734 queue++; 2735 2736 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n", 2737 i, offset, queue - 1); 2738 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset); 2739 offset = queue; 2740 } 2741 2742 if (!pdata->ets) 2743 return; 2744 2745 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) 2746 netdev_set_prio_tc_map(pdata->netdev, prio, 2747 pdata->ets->prio_tc[prio]); 2748 } 2749 2750 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) 2751 { 2752 struct ieee_ets *ets = pdata->ets; 2753 unsigned int total_weight, min_weight, weight; 2754 unsigned int mask, reg, reg_val; 2755 unsigned int i, prio; 2756 2757 if (!ets) 2758 return; 2759 2760 /* Set Tx to deficit weighted round robin scheduling algorithm (when 2761 * traffic class is using ETS algorithm) 2762 */ 2763 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); 2764 2765 /* Set Traffic Class algorithms */ 2766 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; 2767 min_weight = total_weight / 100; 2768 if (!min_weight) 2769 min_weight = 1; 2770 2771 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 2772 /* Map the priorities to the traffic class */ 2773 mask = 0; 2774 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { 2775 if (ets->prio_tc[prio] == i) 2776 mask |= (1 << prio); 2777 } 2778 mask &= 0xff; 2779 2780 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n", 2781 i, mask); 2782 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG)); 2783 reg_val = XGMAC_IOREAD(pdata, reg); 2784 2785 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3)); 2786 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3)); 2787 2788 XGMAC_IOWRITE(pdata, reg, reg_val); 2789 2790 /* Set the traffic class algorithm */ 2791 switch (ets->tc_tsa[i]) { 2792 case IEEE_8021QAZ_TSA_STRICT: 2793 netif_dbg(pdata, drv, pdata->netdev, 2794 "TC%u using SP\n", i); 2795 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2796 MTL_TSA_SP); 2797 break; 2798 case IEEE_8021QAZ_TSA_ETS: 2799 weight = total_weight * ets->tc_tx_bw[i] / 100; 2800 weight = clamp(weight, min_weight, total_weight); 2801 2802 netif_dbg(pdata, drv, pdata->netdev, 2803 "TC%u using DWRR (weight %u)\n", i, weight); 2804 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2805 MTL_TSA_ETS); 2806 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 2807 weight); 2808 break; 2809 } 2810 } 2811 2812 xgbe_config_tc(pdata); 2813 } 2814 2815 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) 2816 { 2817 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { 2818 /* Just stop the Tx queues while Rx fifo is changed */ 2819 netif_tx_stop_all_queues(pdata->netdev); 2820 2821 /* Suspend Rx so that fifo's can be adjusted */ 2822 pdata->hw_if.disable_rx(pdata); 2823 } 2824 2825 xgbe_config_rx_fifo_size(pdata); 2826 xgbe_config_flow_control(pdata); 2827 2828 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { 2829 /* Resume Rx */ 2830 pdata->hw_if.enable_rx(pdata); 2831 2832 /* Resume Tx queues */ 2833 netif_tx_start_all_queues(pdata->netdev); 2834 } 2835 } 2836 2837 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) 2838 { 2839 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); 2840 2841 /* Filtering is done using perfect filtering and hash filtering */ 2842 if (pdata->hw_feat.hash_table_size) { 2843 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 2844 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 2845 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); 2846 } 2847 } 2848 2849 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) 2850 { 2851 unsigned int val; 2852 2853 if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) { 2854 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL, 2855 XGMAC_GIANT_PACKET_MTU); 2856 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1); 2857 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1); 2858 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1); 2859 } else { 2860 val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0; 2861 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0); 2862 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0); 2863 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0); 2864 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2865 } 2866 } 2867 2868 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) 2869 { 2870 xgbe_set_speed(pdata, pdata->phy_speed); 2871 } 2872 2873 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2874 { 2875 if (pdata->netdev->features & NETIF_F_RXCSUM) 2876 xgbe_enable_rx_csum(pdata); 2877 else 2878 xgbe_disable_rx_csum(pdata); 2879 } 2880 2881 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) 2882 { 2883 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 2884 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 2885 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 2886 2887 /* Set the current VLAN Hash Table register value */ 2888 xgbe_update_vlan_hash_table(pdata); 2889 2890 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 2891 xgbe_enable_rx_vlan_filtering(pdata); 2892 else 2893 xgbe_disable_rx_vlan_filtering(pdata); 2894 2895 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2896 xgbe_enable_rx_vlan_stripping(pdata); 2897 else 2898 xgbe_disable_rx_vlan_stripping(pdata); 2899 } 2900 2901 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) 2902 { 2903 bool read_hi; 2904 u64 val; 2905 2906 if (pdata->vdata->mmc_64bit) { 2907 switch (reg_lo) { 2908 /* These registers are always 32 bit */ 2909 case MMC_RXRUNTERROR: 2910 case MMC_RXJABBERERROR: 2911 case MMC_RXUNDERSIZE_G: 2912 case MMC_RXOVERSIZE_G: 2913 case MMC_RXWATCHDOGERROR: 2914 read_hi = false; 2915 break; 2916 2917 default: 2918 read_hi = true; 2919 } 2920 } else { 2921 switch (reg_lo) { 2922 /* These registers are always 64 bit */ 2923 case MMC_TXOCTETCOUNT_GB_LO: 2924 case MMC_TXOCTETCOUNT_G_LO: 2925 case MMC_RXOCTETCOUNT_GB_LO: 2926 case MMC_RXOCTETCOUNT_G_LO: 2927 read_hi = true; 2928 break; 2929 2930 default: 2931 read_hi = false; 2932 } 2933 } 2934 2935 val = XGMAC_IOREAD(pdata, reg_lo); 2936 2937 if (read_hi) 2938 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); 2939 2940 return val; 2941 } 2942 2943 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 2944 { 2945 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2946 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); 2947 2948 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 2949 stats->txoctetcount_gb += 2950 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2951 2952 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 2953 stats->txframecount_gb += 2954 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2955 2956 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 2957 stats->txbroadcastframes_g += 2958 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2959 2960 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2961 stats->txmulticastframes_g += 2962 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2963 2964 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2965 stats->tx64octets_gb += 2966 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2967 2968 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2969 stats->tx65to127octets_gb += 2970 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2971 2972 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2973 stats->tx128to255octets_gb += 2974 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2975 2976 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2977 stats->tx256to511octets_gb += 2978 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2979 2980 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2981 stats->tx512to1023octets_gb += 2982 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2983 2984 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2985 stats->tx1024tomaxoctets_gb += 2986 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2987 2988 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2989 stats->txunicastframes_gb += 2990 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2991 2992 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2993 stats->txmulticastframes_gb += 2994 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2995 2996 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2997 stats->txbroadcastframes_g += 2998 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2999 3000 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 3001 stats->txunderflowerror += 3002 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 3003 3004 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 3005 stats->txoctetcount_g += 3006 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 3007 3008 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 3009 stats->txframecount_g += 3010 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 3011 3012 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 3013 stats->txpauseframes += 3014 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 3015 3016 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 3017 stats->txvlanframes_g += 3018 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 3019 } 3020 3021 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 3022 { 3023 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 3024 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); 3025 3026 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 3027 stats->rxframecount_gb += 3028 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 3029 3030 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 3031 stats->rxoctetcount_gb += 3032 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 3033 3034 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 3035 stats->rxoctetcount_g += 3036 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 3037 3038 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 3039 stats->rxbroadcastframes_g += 3040 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 3041 3042 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 3043 stats->rxmulticastframes_g += 3044 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 3045 3046 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 3047 stats->rxcrcerror += 3048 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 3049 3050 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 3051 stats->rxrunterror += 3052 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 3053 3054 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 3055 stats->rxjabbererror += 3056 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 3057 3058 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 3059 stats->rxundersize_g += 3060 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 3061 3062 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 3063 stats->rxoversize_g += 3064 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 3065 3066 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 3067 stats->rx64octets_gb += 3068 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 3069 3070 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 3071 stats->rx65to127octets_gb += 3072 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 3073 3074 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 3075 stats->rx128to255octets_gb += 3076 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 3077 3078 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 3079 stats->rx256to511octets_gb += 3080 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 3081 3082 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 3083 stats->rx512to1023octets_gb += 3084 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 3085 3086 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 3087 stats->rx1024tomaxoctets_gb += 3088 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 3089 3090 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 3091 stats->rxunicastframes_g += 3092 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 3093 3094 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 3095 stats->rxlengtherror += 3096 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 3097 3098 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 3099 stats->rxoutofrangetype += 3100 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 3101 3102 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 3103 stats->rxpauseframes += 3104 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 3105 3106 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 3107 stats->rxfifooverflow += 3108 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 3109 3110 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 3111 stats->rxvlanframes_gb += 3112 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 3113 3114 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 3115 stats->rxwatchdogerror += 3116 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 3117 } 3118 3119 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 3120 { 3121 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 3122 3123 /* Freeze counters */ 3124 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 3125 3126 stats->txoctetcount_gb += 3127 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 3128 3129 stats->txframecount_gb += 3130 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 3131 3132 stats->txbroadcastframes_g += 3133 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 3134 3135 stats->txmulticastframes_g += 3136 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 3137 3138 stats->tx64octets_gb += 3139 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 3140 3141 stats->tx65to127octets_gb += 3142 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 3143 3144 stats->tx128to255octets_gb += 3145 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 3146 3147 stats->tx256to511octets_gb += 3148 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 3149 3150 stats->tx512to1023octets_gb += 3151 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 3152 3153 stats->tx1024tomaxoctets_gb += 3154 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 3155 3156 stats->txunicastframes_gb += 3157 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 3158 3159 stats->txmulticastframes_gb += 3160 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 3161 3162 stats->txbroadcastframes_g += 3163 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 3164 3165 stats->txunderflowerror += 3166 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 3167 3168 stats->txoctetcount_g += 3169 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 3170 3171 stats->txframecount_g += 3172 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 3173 3174 stats->txpauseframes += 3175 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 3176 3177 stats->txvlanframes_g += 3178 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 3179 3180 stats->rxframecount_gb += 3181 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 3182 3183 stats->rxoctetcount_gb += 3184 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 3185 3186 stats->rxoctetcount_g += 3187 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 3188 3189 stats->rxbroadcastframes_g += 3190 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 3191 3192 stats->rxmulticastframes_g += 3193 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 3194 3195 stats->rxcrcerror += 3196 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 3197 3198 stats->rxrunterror += 3199 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 3200 3201 stats->rxjabbererror += 3202 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 3203 3204 stats->rxundersize_g += 3205 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 3206 3207 stats->rxoversize_g += 3208 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 3209 3210 stats->rx64octets_gb += 3211 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 3212 3213 stats->rx65to127octets_gb += 3214 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 3215 3216 stats->rx128to255octets_gb += 3217 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 3218 3219 stats->rx256to511octets_gb += 3220 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 3221 3222 stats->rx512to1023octets_gb += 3223 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 3224 3225 stats->rx1024tomaxoctets_gb += 3226 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 3227 3228 stats->rxunicastframes_g += 3229 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 3230 3231 stats->rxlengtherror += 3232 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 3233 3234 stats->rxoutofrangetype += 3235 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 3236 3237 stats->rxpauseframes += 3238 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 3239 3240 stats->rxfifooverflow += 3241 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 3242 3243 stats->rxvlanframes_gb += 3244 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 3245 3246 stats->rxwatchdogerror += 3247 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 3248 3249 /* Un-freeze counters */ 3250 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 3251 } 3252 3253 static void xgbe_config_mmc(struct xgbe_prv_data *pdata) 3254 { 3255 /* Set counters to reset on read */ 3256 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 3257 3258 /* Reset the counters */ 3259 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 3260 } 3261 3262 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, 3263 unsigned int queue) 3264 { 3265 unsigned int tx_status; 3266 unsigned long tx_timeout; 3267 3268 /* The Tx engine cannot be stopped if it is actively processing 3269 * packets. Wait for the Tx queue to empty the Tx fifo. Don't 3270 * wait forever though... 3271 */ 3272 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3273 while (time_before(jiffies, tx_timeout)) { 3274 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 3275 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 3276 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 3277 break; 3278 3279 usleep_range(500, 1000); 3280 } 3281 3282 if (!time_before(jiffies, tx_timeout)) 3283 netdev_info(pdata->netdev, 3284 "timed out waiting for Tx queue %u to empty\n", 3285 queue); 3286 } 3287 3288 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, 3289 unsigned int queue) 3290 { 3291 unsigned int tx_dsr, tx_pos, tx_qidx; 3292 unsigned int tx_status; 3293 unsigned long tx_timeout; 3294 3295 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 3296 return xgbe_txq_prepare_tx_stop(pdata, queue); 3297 3298 /* Calculate the status register to read and the position within */ 3299 if (queue < DMA_DSRX_FIRST_QUEUE) { 3300 tx_dsr = DMA_DSR0; 3301 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 3302 } else { 3303 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 3304 3305 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 3306 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 3307 DMA_DSRX_TPS_START; 3308 } 3309 3310 /* The Tx engine cannot be stopped if it is actively processing 3311 * descriptors. Wait for the Tx engine to enter the stopped or 3312 * suspended state. Don't wait forever though... 3313 */ 3314 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3315 while (time_before(jiffies, tx_timeout)) { 3316 tx_status = XGMAC_IOREAD(pdata, tx_dsr); 3317 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 3318 if ((tx_status == DMA_TPS_STOPPED) || 3319 (tx_status == DMA_TPS_SUSPENDED)) 3320 break; 3321 3322 usleep_range(500, 1000); 3323 } 3324 3325 if (!time_before(jiffies, tx_timeout)) 3326 netdev_info(pdata->netdev, 3327 "timed out waiting for Tx DMA channel %u to stop\n", 3328 queue); 3329 } 3330 3331 static void xgbe_enable_tx(struct xgbe_prv_data *pdata) 3332 { 3333 unsigned int i; 3334 3335 /* Enable each Tx DMA channel */ 3336 for (i = 0; i < pdata->channel_count; i++) { 3337 if (!pdata->channel[i]->tx_ring) 3338 break; 3339 3340 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 3341 } 3342 3343 /* Enable each Tx queue */ 3344 for (i = 0; i < pdata->tx_q_count; i++) 3345 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 3346 MTL_Q_ENABLED); 3347 3348 /* Enable MAC Tx */ 3349 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 3350 } 3351 3352 static void xgbe_disable_tx(struct xgbe_prv_data *pdata) 3353 { 3354 unsigned int i; 3355 3356 /* Prepare for Tx DMA channel stop */ 3357 for (i = 0; i < pdata->tx_q_count; i++) 3358 xgbe_prepare_tx_stop(pdata, i); 3359 3360 /* Disable MAC Tx */ 3361 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 3362 3363 /* Disable each Tx queue */ 3364 for (i = 0; i < pdata->tx_q_count; i++) 3365 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 3366 3367 /* Disable each Tx DMA channel */ 3368 for (i = 0; i < pdata->channel_count; i++) { 3369 if (!pdata->channel[i]->tx_ring) 3370 break; 3371 3372 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 3373 } 3374 } 3375 3376 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, 3377 unsigned int queue) 3378 { 3379 unsigned int rx_status; 3380 unsigned long rx_timeout; 3381 3382 /* The Rx engine cannot be stopped if it is actively processing 3383 * packets. Wait for the Rx queue to empty the Rx fifo. Don't 3384 * wait forever though... 3385 */ 3386 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3387 while (time_before(jiffies, rx_timeout)) { 3388 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 3389 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 3390 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 3391 break; 3392 3393 usleep_range(500, 1000); 3394 } 3395 3396 if (!time_before(jiffies, rx_timeout)) 3397 netdev_info(pdata->netdev, 3398 "timed out waiting for Rx queue %u to empty\n", 3399 queue); 3400 } 3401 3402 static void xgbe_enable_rx(struct xgbe_prv_data *pdata) 3403 { 3404 unsigned int reg_val, i; 3405 3406 /* Enable each Rx DMA channel */ 3407 for (i = 0; i < pdata->channel_count; i++) { 3408 if (!pdata->channel[i]->rx_ring) 3409 break; 3410 3411 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 3412 } 3413 3414 /* Enable each Rx queue */ 3415 reg_val = 0; 3416 for (i = 0; i < pdata->rx_q_count; i++) 3417 reg_val |= (0x02 << (i << 1)); 3418 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 3419 3420 /* Enable MAC Rx */ 3421 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 3422 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 3423 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 3424 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 3425 } 3426 3427 static void xgbe_disable_rx(struct xgbe_prv_data *pdata) 3428 { 3429 unsigned int i; 3430 3431 /* Disable MAC Rx */ 3432 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 3433 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 3434 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 3435 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 3436 3437 /* Prepare for Rx DMA channel stop */ 3438 for (i = 0; i < pdata->rx_q_count; i++) 3439 xgbe_prepare_rx_stop(pdata, i); 3440 3441 /* Disable each Rx queue */ 3442 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 3443 3444 /* Disable each Rx DMA channel */ 3445 for (i = 0; i < pdata->channel_count; i++) { 3446 if (!pdata->channel[i]->rx_ring) 3447 break; 3448 3449 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 3450 } 3451 } 3452 3453 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) 3454 { 3455 unsigned int i; 3456 3457 /* Enable each Tx DMA channel */ 3458 for (i = 0; i < pdata->channel_count; i++) { 3459 if (!pdata->channel[i]->tx_ring) 3460 break; 3461 3462 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 3463 } 3464 3465 /* Enable MAC Tx */ 3466 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 3467 } 3468 3469 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) 3470 { 3471 unsigned int i; 3472 3473 /* Prepare for Tx DMA channel stop */ 3474 for (i = 0; i < pdata->tx_q_count; i++) 3475 xgbe_prepare_tx_stop(pdata, i); 3476 3477 /* Disable MAC Tx */ 3478 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 3479 3480 /* Disable each Tx DMA channel */ 3481 for (i = 0; i < pdata->channel_count; i++) { 3482 if (!pdata->channel[i]->tx_ring) 3483 break; 3484 3485 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 3486 } 3487 } 3488 3489 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) 3490 { 3491 unsigned int i; 3492 3493 /* Enable each Rx DMA channel */ 3494 for (i = 0; i < pdata->channel_count; i++) { 3495 if (!pdata->channel[i]->rx_ring) 3496 break; 3497 3498 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 3499 } 3500 } 3501 3502 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) 3503 { 3504 unsigned int i; 3505 3506 /* Disable each Rx DMA channel */ 3507 for (i = 0; i < pdata->channel_count; i++) { 3508 if (!pdata->channel[i]->rx_ring) 3509 break; 3510 3511 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 3512 } 3513 } 3514 3515 static int xgbe_init(struct xgbe_prv_data *pdata) 3516 { 3517 struct xgbe_desc_if *desc_if = &pdata->desc_if; 3518 int ret; 3519 3520 DBGPR("-->xgbe_init\n"); 3521 3522 /* Flush Tx queues */ 3523 ret = xgbe_flush_tx_queues(pdata); 3524 if (ret) { 3525 netdev_err(pdata->netdev, "error flushing TX queues\n"); 3526 return ret; 3527 } 3528 3529 /* 3530 * Initialize DMA related features 3531 */ 3532 xgbe_config_dma_bus(pdata); 3533 xgbe_config_dma_cache(pdata); 3534 xgbe_config_osp_mode(pdata); 3535 xgbe_config_pbl_val(pdata); 3536 xgbe_config_rx_coalesce(pdata); 3537 xgbe_config_tx_coalesce(pdata); 3538 xgbe_config_rx_buffer_size(pdata); 3539 xgbe_config_tso_mode(pdata); 3540 3541 if (pdata->netdev->features & NETIF_F_RXCSUM) { 3542 xgbe_config_sph_mode(pdata); 3543 xgbe_config_rss(pdata); 3544 } 3545 3546 desc_if->wrapper_tx_desc_init(pdata); 3547 desc_if->wrapper_rx_desc_init(pdata); 3548 xgbe_enable_dma_interrupts(pdata); 3549 3550 /* 3551 * Initialize MTL related features 3552 */ 3553 xgbe_config_mtl_mode(pdata); 3554 xgbe_config_queue_mapping(pdata); 3555 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 3556 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 3557 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); 3558 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); 3559 xgbe_config_tx_fifo_size(pdata); 3560 xgbe_config_rx_fifo_size(pdata); 3561 /*TODO: Error Packet and undersized good Packet forwarding enable 3562 (FEP and FUP) 3563 */ 3564 xgbe_config_dcb_tc(pdata); 3565 xgbe_enable_mtl_interrupts(pdata); 3566 3567 /* 3568 * Initialize MAC related features 3569 */ 3570 xgbe_config_mac_address(pdata); 3571 xgbe_config_rx_mode(pdata); 3572 xgbe_config_jumbo_enable(pdata); 3573 xgbe_config_flow_control(pdata); 3574 xgbe_config_mac_speed(pdata); 3575 xgbe_config_checksum_offload(pdata); 3576 xgbe_config_vlan_support(pdata); 3577 xgbe_config_mmc(pdata); 3578 xgbe_enable_mac_interrupts(pdata); 3579 3580 /* 3581 * Initialize ECC related features 3582 */ 3583 xgbe_enable_ecc_interrupts(pdata); 3584 3585 DBGPR("<--xgbe_init\n"); 3586 3587 return 0; 3588 } 3589 3590 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) 3591 { 3592 DBGPR("-->xgbe_init_function_ptrs\n"); 3593 3594 hw_if->tx_complete = xgbe_tx_complete; 3595 3596 hw_if->set_mac_address = xgbe_set_mac_address; 3597 hw_if->config_rx_mode = xgbe_config_rx_mode; 3598 3599 hw_if->enable_rx_csum = xgbe_enable_rx_csum; 3600 hw_if->disable_rx_csum = xgbe_disable_rx_csum; 3601 3602 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; 3603 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; 3604 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; 3605 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; 3606 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; 3607 3608 hw_if->read_mmd_regs = xgbe_read_mmd_regs; 3609 hw_if->write_mmd_regs = xgbe_write_mmd_regs; 3610 3611 hw_if->set_speed = xgbe_set_speed; 3612 3613 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; 3614 hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22; 3615 hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22; 3616 hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45; 3617 hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45; 3618 3619 hw_if->set_gpio = xgbe_set_gpio; 3620 hw_if->clr_gpio = xgbe_clr_gpio; 3621 3622 hw_if->enable_tx = xgbe_enable_tx; 3623 hw_if->disable_tx = xgbe_disable_tx; 3624 hw_if->enable_rx = xgbe_enable_rx; 3625 hw_if->disable_rx = xgbe_disable_rx; 3626 3627 hw_if->powerup_tx = xgbe_powerup_tx; 3628 hw_if->powerdown_tx = xgbe_powerdown_tx; 3629 hw_if->powerup_rx = xgbe_powerup_rx; 3630 hw_if->powerdown_rx = xgbe_powerdown_rx; 3631 3632 hw_if->dev_xmit = xgbe_dev_xmit; 3633 hw_if->dev_read = xgbe_dev_read; 3634 hw_if->enable_int = xgbe_enable_int; 3635 hw_if->disable_int = xgbe_disable_int; 3636 hw_if->init = xgbe_init; 3637 hw_if->exit = xgbe_exit; 3638 3639 /* Descriptor related Sequences have to be initialized here */ 3640 hw_if->tx_desc_init = xgbe_tx_desc_init; 3641 hw_if->rx_desc_init = xgbe_rx_desc_init; 3642 hw_if->tx_desc_reset = xgbe_tx_desc_reset; 3643 hw_if->rx_desc_reset = xgbe_rx_desc_reset; 3644 hw_if->is_last_desc = xgbe_is_last_desc; 3645 hw_if->is_context_desc = xgbe_is_context_desc; 3646 hw_if->tx_start_xmit = xgbe_tx_start_xmit; 3647 3648 /* For FLOW ctrl */ 3649 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; 3650 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; 3651 3652 /* For RX coalescing */ 3653 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; 3654 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; 3655 hw_if->usec_to_riwt = xgbe_usec_to_riwt; 3656 hw_if->riwt_to_usec = xgbe_riwt_to_usec; 3657 3658 /* For RX and TX threshold config */ 3659 hw_if->config_rx_threshold = xgbe_config_rx_threshold; 3660 hw_if->config_tx_threshold = xgbe_config_tx_threshold; 3661 3662 /* For RX and TX Store and Forward Mode config */ 3663 hw_if->config_rsf_mode = xgbe_config_rsf_mode; 3664 hw_if->config_tsf_mode = xgbe_config_tsf_mode; 3665 3666 /* For TX DMA Operating on Second Frame config */ 3667 hw_if->config_osp_mode = xgbe_config_osp_mode; 3668 3669 /* For MMC statistics support */ 3670 hw_if->tx_mmc_int = xgbe_tx_mmc_int; 3671 hw_if->rx_mmc_int = xgbe_rx_mmc_int; 3672 hw_if->read_mmc_stats = xgbe_read_mmc_stats; 3673 3674 /* For PTP config */ 3675 hw_if->config_tstamp = xgbe_config_tstamp; 3676 hw_if->update_tstamp_addend = xgbe_update_tstamp_addend; 3677 hw_if->set_tstamp_time = xgbe_set_tstamp_time; 3678 hw_if->get_tstamp_time = xgbe_get_tstamp_time; 3679 hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; 3680 3681 /* For Data Center Bridging config */ 3682 hw_if->config_tc = xgbe_config_tc; 3683 hw_if->config_dcb_tc = xgbe_config_dcb_tc; 3684 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; 3685 3686 /* For Receive Side Scaling */ 3687 hw_if->enable_rss = xgbe_enable_rss; 3688 hw_if->disable_rss = xgbe_disable_rss; 3689 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; 3690 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; 3691 3692 /* For ECC */ 3693 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded; 3694 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec; 3695 3696 /* For VXLAN */ 3697 hw_if->enable_vxlan = xgbe_enable_vxlan; 3698 hw_if->disable_vxlan = xgbe_disable_vxlan; 3699 hw_if->set_vxlan_id = xgbe_set_vxlan_id; 3700 3701 /* For Split Header*/ 3702 hw_if->enable_sph = xgbe_config_sph_mode; 3703 hw_if->disable_sph = xgbe_disable_sph_mode; 3704 3705 DBGPR("<--xgbe_init_function_ptrs\n"); 3706 } 3707