1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause) 2 /* 3 * Copyright (c) 2014-2025, Advanced Micro Devices, Inc. 4 * Copyright (c) 2014, Synopsys, Inc. 5 * All rights reserved 6 */ 7 8 #include <linux/phy.h> 9 #include <linux/mdio.h> 10 #include <linux/clk.h> 11 #include <linux/bitrev.h> 12 #include <linux/crc32.h> 13 #include <linux/crc32poly.h> 14 #include <linux/pci.h> 15 16 #include "xgbe.h" 17 #include "xgbe-common.h" 18 #include "xgbe-smn.h" 19 20 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) 21 { 22 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 23 } 24 25 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, 26 unsigned int usec) 27 { 28 unsigned long rate; 29 unsigned int ret; 30 31 DBGPR("-->xgbe_usec_to_riwt\n"); 32 33 rate = pdata->sysclk_rate; 34 35 /* 36 * Convert the input usec value to the watchdog timer value. Each 37 * watchdog timer value is equivalent to 256 clock cycles. 38 * Calculate the required value as: 39 * ( usec * ( system_clock_mhz / 10^6 ) / 256 40 */ 41 ret = (usec * (rate / 1000000)) / 256; 42 43 DBGPR("<--xgbe_usec_to_riwt\n"); 44 45 return ret; 46 } 47 48 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, 49 unsigned int riwt) 50 { 51 unsigned long rate; 52 unsigned int ret; 53 54 DBGPR("-->xgbe_riwt_to_usec\n"); 55 56 rate = pdata->sysclk_rate; 57 58 /* 59 * Convert the input watchdog timer value to the usec value. Each 60 * watchdog timer value is equivalent to 256 clock cycles. 61 * Calculate the required value as: 62 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) 63 */ 64 ret = (riwt * 256) / (rate / 1000000); 65 66 DBGPR("<--xgbe_riwt_to_usec\n"); 67 68 return ret; 69 } 70 71 static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata) 72 { 73 unsigned int pblx8, pbl; 74 unsigned int i; 75 76 pblx8 = DMA_PBL_X8_DISABLE; 77 pbl = pdata->pbl; 78 79 if (pdata->pbl > 32) { 80 pblx8 = DMA_PBL_X8_ENABLE; 81 pbl >>= 3; 82 } 83 84 for (i = 0; i < pdata->channel_count; i++) { 85 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, 86 pblx8); 87 88 if (pdata->channel[i]->tx_ring) 89 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, 90 PBL, pbl); 91 92 if (pdata->channel[i]->rx_ring) 93 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, 94 PBL, pbl); 95 } 96 97 return 0; 98 } 99 100 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) 101 { 102 unsigned int i; 103 104 for (i = 0; i < pdata->channel_count; i++) { 105 if (!pdata->channel[i]->tx_ring) 106 break; 107 108 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, 109 pdata->tx_osp_mode); 110 } 111 112 return 0; 113 } 114 115 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 116 { 117 unsigned int i; 118 119 for (i = 0; i < pdata->rx_q_count; i++) 120 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 121 122 return 0; 123 } 124 125 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 126 { 127 unsigned int i; 128 129 for (i = 0; i < pdata->tx_q_count; i++) 130 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 131 132 return 0; 133 } 134 135 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, 136 unsigned int val) 137 { 138 unsigned int i; 139 140 for (i = 0; i < pdata->rx_q_count; i++) 141 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 142 143 return 0; 144 } 145 146 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, 147 unsigned int val) 148 { 149 unsigned int i; 150 151 for (i = 0; i < pdata->tx_q_count; i++) 152 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 153 154 return 0; 155 } 156 157 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) 158 { 159 unsigned int i; 160 161 for (i = 0; i < pdata->channel_count; i++) { 162 if (!pdata->channel[i]->rx_ring) 163 break; 164 165 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, 166 pdata->rx_riwt); 167 } 168 169 return 0; 170 } 171 172 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) 173 { 174 return 0; 175 } 176 177 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) 178 { 179 unsigned int i; 180 181 for (i = 0; i < pdata->channel_count; i++) { 182 if (!pdata->channel[i]->rx_ring) 183 break; 184 185 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, 186 pdata->rx_buf_size); 187 } 188 } 189 190 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) 191 { 192 unsigned int i; 193 194 for (i = 0; i < pdata->channel_count; i++) { 195 if (!pdata->channel[i]->tx_ring) 196 break; 197 198 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); 199 } 200 } 201 202 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) 203 { 204 unsigned int i; 205 206 for (i = 0; i < pdata->channel_count; i++) { 207 if (!pdata->channel[i]->rx_ring) 208 break; 209 210 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); 211 } 212 213 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); 214 } 215 216 static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) 217 { 218 unsigned int i; 219 220 for (i = 0; i < pdata->channel_count; i++) { 221 if (!pdata->channel[i]->rx_ring) 222 break; 223 224 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); 225 } 226 } 227 228 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, 229 unsigned int index, unsigned int val) 230 { 231 unsigned int wait; 232 int ret = 0; 233 234 mutex_lock(&pdata->rss_mutex); 235 236 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { 237 ret = -EBUSY; 238 goto unlock; 239 } 240 241 XGMAC_IOWRITE(pdata, MAC_RSSDR, val); 242 243 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); 244 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); 245 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); 246 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); 247 248 wait = 1000; 249 while (wait--) { 250 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 251 goto unlock; 252 253 usleep_range(1000, 1500); 254 } 255 256 ret = -EBUSY; 257 258 unlock: 259 mutex_unlock(&pdata->rss_mutex); 260 261 return ret; 262 } 263 264 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) 265 { 266 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); 267 unsigned int *key = (unsigned int *)&pdata->rss_key; 268 int ret; 269 270 while (key_regs--) { 271 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, 272 key_regs, *key++); 273 if (ret) 274 return ret; 275 } 276 277 return 0; 278 } 279 280 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) 281 { 282 unsigned int i; 283 int ret; 284 285 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { 286 ret = xgbe_write_rss_reg(pdata, 287 XGBE_RSS_LOOKUP_TABLE_TYPE, i, 288 pdata->rss_table[i]); 289 if (ret) 290 return ret; 291 } 292 293 return 0; 294 } 295 296 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key) 297 { 298 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); 299 300 return xgbe_write_rss_hash_key(pdata); 301 } 302 303 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, 304 const u32 *table) 305 { 306 unsigned int i; 307 308 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) 309 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); 310 311 return xgbe_write_rss_lookup_table(pdata); 312 } 313 314 static int xgbe_enable_rss(struct xgbe_prv_data *pdata) 315 { 316 int ret; 317 318 if (!pdata->hw_feat.rss) 319 return -EOPNOTSUPP; 320 321 /* Program the hash key */ 322 ret = xgbe_write_rss_hash_key(pdata); 323 if (ret) 324 return ret; 325 326 /* Program the lookup table */ 327 ret = xgbe_write_rss_lookup_table(pdata); 328 if (ret) 329 return ret; 330 331 /* Set the RSS options */ 332 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 333 334 /* Enable RSS */ 335 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); 336 337 return 0; 338 } 339 340 static int xgbe_disable_rss(struct xgbe_prv_data *pdata) 341 { 342 if (!pdata->hw_feat.rss) 343 return -EOPNOTSUPP; 344 345 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); 346 347 return 0; 348 } 349 350 static void xgbe_config_rss(struct xgbe_prv_data *pdata) 351 { 352 int ret; 353 354 if (!pdata->hw_feat.rss) 355 return; 356 357 if (pdata->netdev->features & NETIF_F_RXHASH) 358 ret = xgbe_enable_rss(pdata); 359 else 360 ret = xgbe_disable_rss(pdata); 361 362 if (ret) 363 netdev_err(pdata->netdev, 364 "error configuring RSS, RSS disabled\n"); 365 } 366 367 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata, 368 unsigned int queue) 369 { 370 unsigned int prio, tc; 371 372 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { 373 /* Does this queue handle the priority? */ 374 if (pdata->prio2q_map[prio] != queue) 375 continue; 376 377 /* Get the Traffic Class for this priority */ 378 tc = pdata->ets->prio_tc[prio]; 379 380 /* Check if PFC is enabled for this traffic class */ 381 if (pdata->pfc->pfc_en & (1 << tc)) 382 return true; 383 } 384 385 return false; 386 } 387 388 static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata) 389 { 390 /* Program the VXLAN port */ 391 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port); 392 393 netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n", 394 pdata->vxlan_port); 395 } 396 397 static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata) 398 { 399 if (!pdata->hw_feat.vxn) 400 return; 401 402 /* Program the VXLAN port */ 403 xgbe_set_vxlan_id(pdata); 404 405 /* Allow for IPv6/UDP zero-checksum VXLAN packets */ 406 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1); 407 408 /* Enable VXLAN tunneling mode */ 409 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0); 410 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1); 411 412 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n"); 413 } 414 415 static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) 416 { 417 if (!pdata->hw_feat.vxn) 418 return; 419 420 /* Disable tunneling mode */ 421 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0); 422 423 /* Clear IPv6/UDP zero-checksum VXLAN packets setting */ 424 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0); 425 426 /* Clear the VXLAN port */ 427 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0); 428 429 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); 430 } 431 432 static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) 433 { 434 unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 435 436 /* From MAC ver 30H the TFCR is per priority, instead of per queue */ 437 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) 438 return max_q_count; 439 else 440 return min_t(unsigned int, pdata->tx_q_count, max_q_count); 441 } 442 443 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 444 { 445 unsigned int reg, reg_val; 446 unsigned int i, q_count; 447 448 /* Clear MTL flow control */ 449 for (i = 0; i < pdata->rx_q_count; i++) 450 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 451 452 /* Clear MAC flow control */ 453 q_count = xgbe_get_fc_queue_count(pdata); 454 reg = MAC_Q0TFCR; 455 for (i = 0; i < q_count; i++) { 456 reg_val = XGMAC_IOREAD(pdata, reg); 457 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 458 XGMAC_IOWRITE(pdata, reg, reg_val); 459 460 reg += MAC_QTFCR_INC; 461 } 462 463 return 0; 464 } 465 466 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) 467 { 468 struct ieee_pfc *pfc = pdata->pfc; 469 struct ieee_ets *ets = pdata->ets; 470 unsigned int reg, reg_val; 471 unsigned int i, q_count; 472 473 /* Set MTL flow control */ 474 for (i = 0; i < pdata->rx_q_count; i++) { 475 unsigned int ehfc = 0; 476 477 if (pdata->rx_rfd[i]) { 478 /* Flow control thresholds are established */ 479 if (pfc && ets) { 480 if (xgbe_is_pfc_queue(pdata, i)) 481 ehfc = 1; 482 } else { 483 ehfc = 1; 484 } 485 } 486 487 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); 488 489 netif_dbg(pdata, drv, pdata->netdev, 490 "flow control %s for RXq%u\n", 491 ehfc ? "enabled" : "disabled", i); 492 } 493 494 /* Set MAC flow control */ 495 q_count = xgbe_get_fc_queue_count(pdata); 496 reg = MAC_Q0TFCR; 497 for (i = 0; i < q_count; i++) { 498 reg_val = XGMAC_IOREAD(pdata, reg); 499 500 /* Enable transmit flow control */ 501 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 502 /* Set pause time */ 503 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 504 505 XGMAC_IOWRITE(pdata, reg, reg_val); 506 507 reg += MAC_QTFCR_INC; 508 } 509 510 return 0; 511 } 512 513 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) 514 { 515 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 516 517 return 0; 518 } 519 520 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) 521 { 522 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 523 524 return 0; 525 } 526 527 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) 528 { 529 struct ieee_pfc *pfc = pdata->pfc; 530 531 if (pdata->tx_pause || (pfc && pfc->pfc_en)) 532 xgbe_enable_tx_flow_control(pdata); 533 else 534 xgbe_disable_tx_flow_control(pdata); 535 536 return 0; 537 } 538 539 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) 540 { 541 struct ieee_pfc *pfc = pdata->pfc; 542 543 if (pdata->rx_pause || (pfc && pfc->pfc_en)) 544 xgbe_enable_rx_flow_control(pdata); 545 else 546 xgbe_disable_rx_flow_control(pdata); 547 548 return 0; 549 } 550 551 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) 552 { 553 struct ieee_pfc *pfc = pdata->pfc; 554 555 xgbe_config_tx_flow_control(pdata); 556 xgbe_config_rx_flow_control(pdata); 557 558 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 559 (pfc && pfc->pfc_en) ? 1 : 0); 560 } 561 562 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) 563 { 564 struct xgbe_channel *channel; 565 unsigned int i, ver; 566 567 /* Set the interrupt mode if supported */ 568 if (pdata->channel_irq_mode) 569 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, 570 pdata->channel_irq_mode); 571 572 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); 573 574 for (i = 0; i < pdata->channel_count; i++) { 575 channel = pdata->channel[i]; 576 577 /* Clear all the interrupts which are set */ 578 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, 579 XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); 580 581 /* Clear all interrupt enable bits */ 582 channel->curr_ier = 0; 583 584 /* Enable following interrupts 585 * NIE - Normal Interrupt Summary Enable 586 * AIE - Abnormal Interrupt Summary Enable 587 * FBEE - Fatal Bus Error Enable 588 */ 589 if (ver < 0x21) { 590 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); 591 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); 592 } else { 593 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); 594 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); 595 } 596 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); 597 598 if (channel->tx_ring) { 599 /* Enable the following Tx interrupts 600 * TIE - Transmit Interrupt Enable (unless using 601 * per channel interrupts in edge triggered 602 * mode) 603 */ 604 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 605 XGMAC_SET_BITS(channel->curr_ier, 606 DMA_CH_IER, TIE, 1); 607 } 608 if (channel->rx_ring) { 609 /* Enable following Rx interrupts 610 * RBUE - Receive Buffer Unavailable Enable 611 * RIE - Receive Interrupt Enable (unless using 612 * per channel interrupts in edge triggered 613 * mode) 614 */ 615 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); 616 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 617 XGMAC_SET_BITS(channel->curr_ier, 618 DMA_CH_IER, RIE, 1); 619 } 620 621 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 622 } 623 } 624 625 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) 626 { 627 unsigned int mtl_q_isr; 628 unsigned int q_count, i; 629 630 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 631 for (i = 0; i < q_count; i++) { 632 /* Clear all the interrupts which are set */ 633 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 634 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 635 636 /* No MTL interrupts to be enabled */ 637 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); 638 } 639 } 640 641 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) 642 { 643 unsigned int mac_ier = 0; 644 645 /* Enable Timestamp interrupt */ 646 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); 647 648 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 649 650 /* Enable all counter interrupts */ 651 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); 652 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); 653 654 /* Enable MDIO single command completion interrupt */ 655 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); 656 } 657 658 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata) 659 { 660 unsigned int ecc_isr, ecc_ier = 0; 661 662 if (!pdata->vdata->ecc_support) 663 return; 664 665 /* Clear all the interrupts which are set */ 666 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR); 667 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr); 668 669 /* Enable ECC interrupts */ 670 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1); 671 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1); 672 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1); 673 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1); 674 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1); 675 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1); 676 677 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 678 } 679 680 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata) 681 { 682 unsigned int ecc_ier; 683 684 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER); 685 686 /* Disable ECC DED interrupts */ 687 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0); 688 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0); 689 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0); 690 691 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 692 } 693 694 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata, 695 enum xgbe_ecc_sec sec) 696 { 697 unsigned int ecc_ier; 698 699 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER); 700 701 /* Disable ECC SEC interrupt */ 702 switch (sec) { 703 case XGBE_ECC_SEC_TX: 704 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0); 705 break; 706 case XGBE_ECC_SEC_RX: 707 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0); 708 break; 709 case XGBE_ECC_SEC_DESC: 710 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0); 711 break; 712 } 713 714 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier); 715 } 716 717 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) 718 { 719 unsigned int ss; 720 721 switch (speed) { 722 case SPEED_10: 723 ss = 0x07; 724 break; 725 case SPEED_1000: 726 ss = 0x03; 727 break; 728 case SPEED_2500: 729 ss = 0x02; 730 break; 731 case SPEED_10000: 732 ss = 0x00; 733 break; 734 default: 735 return -EINVAL; 736 } 737 738 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) 739 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); 740 741 return 0; 742 } 743 744 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 745 { 746 /* Put the VLAN tag in the Rx descriptor */ 747 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 748 749 /* Don't check the VLAN type */ 750 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 751 752 /* Check only C-TAG (0x8100) packets */ 753 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 754 755 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 756 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 757 758 /* Enable VLAN tag stripping */ 759 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 760 761 return 0; 762 } 763 764 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 765 { 766 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 767 768 return 0; 769 } 770 771 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 772 { 773 /* Enable VLAN filtering */ 774 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 775 776 /* Enable VLAN Hash Table filtering */ 777 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); 778 779 /* Disable VLAN tag inverse matching */ 780 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); 781 782 /* Only filter on the lower 12-bits of the VLAN tag */ 783 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); 784 785 /* In order for the VLAN Hash Table filtering to be effective, 786 * the VLAN tag identifier in the VLAN Tag Register must not 787 * be zero. Set the VLAN tag identifier to "1" to enable the 788 * VLAN Hash Table filtering. This implies that a VLAN tag of 789 * 1 will always pass filtering. 790 */ 791 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); 792 793 return 0; 794 } 795 796 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 797 { 798 /* Disable VLAN filtering */ 799 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 800 801 return 0; 802 } 803 804 static u32 xgbe_vid_crc32_le(__le16 vid_le) 805 { 806 u32 crc = ~0; 807 u32 temp = 0; 808 unsigned char *data = (unsigned char *)&vid_le; 809 unsigned char data_byte = 0; 810 int i, bits; 811 812 bits = get_bitmask_order(VLAN_VID_MASK); 813 for (i = 0; i < bits; i++) { 814 if ((i % 8) == 0) 815 data_byte = data[i / 8]; 816 817 temp = ((crc & 1) ^ data_byte) & 1; 818 crc >>= 1; 819 data_byte >>= 1; 820 821 if (temp) 822 crc ^= CRC32_POLY_LE; 823 } 824 825 return crc; 826 } 827 828 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) 829 { 830 u32 crc; 831 u16 vid; 832 __le16 vid_le; 833 u16 vlan_hash_table = 0; 834 835 /* Generate the VLAN Hash Table value */ 836 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { 837 /* Get the CRC32 value of the VLAN ID */ 838 vid_le = cpu_to_le16(vid); 839 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; 840 841 vlan_hash_table |= (1 << crc); 842 } 843 844 /* Set the VLAN Hash Table filtering register */ 845 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); 846 847 return 0; 848 } 849 850 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, 851 unsigned int enable) 852 { 853 unsigned int val = enable ? 1 : 0; 854 855 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) 856 return 0; 857 858 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", 859 enable ? "entering" : "leaving"); 860 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); 861 862 /* Hardware will still perform VLAN filtering in promiscuous mode */ 863 if (enable) { 864 xgbe_disable_rx_vlan_filtering(pdata); 865 } else { 866 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 867 xgbe_enable_rx_vlan_filtering(pdata); 868 } 869 870 return 0; 871 } 872 873 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, 874 unsigned int enable) 875 { 876 unsigned int val = enable ? 1 : 0; 877 878 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) 879 return 0; 880 881 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", 882 enable ? "entering" : "leaving"); 883 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); 884 885 return 0; 886 } 887 888 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, 889 struct netdev_hw_addr *ha, unsigned int *mac_reg) 890 { 891 unsigned int mac_addr_hi, mac_addr_lo; 892 u8 *mac_addr; 893 894 mac_addr_lo = 0; 895 mac_addr_hi = 0; 896 897 if (ha) { 898 mac_addr = (u8 *)&mac_addr_lo; 899 mac_addr[0] = ha->addr[0]; 900 mac_addr[1] = ha->addr[1]; 901 mac_addr[2] = ha->addr[2]; 902 mac_addr[3] = ha->addr[3]; 903 mac_addr = (u8 *)&mac_addr_hi; 904 mac_addr[0] = ha->addr[4]; 905 mac_addr[1] = ha->addr[5]; 906 907 netif_dbg(pdata, drv, pdata->netdev, 908 "adding mac address %pM at %#x\n", 909 ha->addr, *mac_reg); 910 911 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 912 } 913 914 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); 915 *mac_reg += MAC_MACA_INC; 916 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); 917 *mac_reg += MAC_MACA_INC; 918 } 919 920 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) 921 { 922 struct net_device *netdev = pdata->netdev; 923 struct netdev_hw_addr *ha; 924 unsigned int mac_reg; 925 unsigned int addn_macs; 926 927 mac_reg = MAC_MACA1HR; 928 addn_macs = pdata->hw_feat.addn_mac; 929 930 if (netdev_uc_count(netdev) > addn_macs) { 931 xgbe_set_promiscuous_mode(pdata, 1); 932 } else { 933 netdev_for_each_uc_addr(ha, netdev) { 934 xgbe_set_mac_reg(pdata, ha, &mac_reg); 935 addn_macs--; 936 } 937 938 if (netdev_mc_count(netdev) > addn_macs) { 939 xgbe_set_all_multicast_mode(pdata, 1); 940 } else { 941 netdev_for_each_mc_addr(ha, netdev) { 942 xgbe_set_mac_reg(pdata, ha, &mac_reg); 943 addn_macs--; 944 } 945 } 946 } 947 948 /* Clear remaining additional MAC address entries */ 949 while (addn_macs--) 950 xgbe_set_mac_reg(pdata, NULL, &mac_reg); 951 } 952 953 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata) 954 { 955 struct net_device *netdev = pdata->netdev; 956 struct netdev_hw_addr *ha; 957 unsigned int hash_reg; 958 unsigned int hash_table_shift, hash_table_count; 959 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE]; 960 u32 crc; 961 unsigned int i; 962 963 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7); 964 hash_table_count = pdata->hw_feat.hash_table_size / 32; 965 memset(hash_table, 0, sizeof(hash_table)); 966 967 /* Build the MAC Hash Table register values */ 968 netdev_for_each_uc_addr(ha, netdev) { 969 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 970 crc >>= hash_table_shift; 971 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 972 } 973 974 netdev_for_each_mc_addr(ha, netdev) { 975 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)); 976 crc >>= hash_table_shift; 977 hash_table[crc >> 5] |= (1 << (crc & 0x1f)); 978 } 979 980 /* Set the MAC Hash Table registers */ 981 hash_reg = MAC_HTR0; 982 for (i = 0; i < hash_table_count; i++) { 983 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]); 984 hash_reg += MAC_HTR_INC; 985 } 986 } 987 988 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) 989 { 990 if (pdata->hw_feat.hash_table_size) 991 xgbe_set_mac_hash_table(pdata); 992 else 993 xgbe_set_mac_addn_addrs(pdata); 994 995 return 0; 996 } 997 998 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr) 999 { 1000 unsigned int mac_addr_hi, mac_addr_lo; 1001 1002 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 1003 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 1004 (addr[1] << 8) | (addr[0] << 0); 1005 1006 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 1007 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 1008 1009 return 0; 1010 } 1011 1012 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) 1013 { 1014 struct net_device *netdev = pdata->netdev; 1015 unsigned int pr_mode, am_mode; 1016 1017 pr_mode = ((netdev->flags & IFF_PROMISC) != 0); 1018 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); 1019 1020 xgbe_set_promiscuous_mode(pdata, pr_mode); 1021 xgbe_set_all_multicast_mode(pdata, am_mode); 1022 1023 xgbe_add_mac_addresses(pdata); 1024 1025 return 0; 1026 } 1027 1028 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 1029 { 1030 unsigned int reg; 1031 1032 if (gpio > 15) 1033 return -EINVAL; 1034 1035 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 1036 1037 reg &= ~(1 << (gpio + 16)); 1038 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 1039 1040 return 0; 1041 } 1042 1043 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 1044 { 1045 unsigned int reg; 1046 1047 if (gpio > 15) 1048 return -EINVAL; 1049 1050 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 1051 1052 reg |= (1 << (gpio + 16)); 1053 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 1054 1055 return 0; 1056 } 1057 1058 static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata, 1059 int mmd_reg) 1060 { 1061 return (mmd_reg & XGBE_ADDR_C45) ? 1062 mmd_reg & ~XGBE_ADDR_C45 : 1063 (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1064 } 1065 1066 static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata, 1067 unsigned int mmd_address, 1068 unsigned int *index, 1069 unsigned int *offset) 1070 { 1071 /* The PCS registers are accessed using mmio. The underlying 1072 * management interface uses indirect addressing to access the MMD 1073 * register sets. This requires accessing of the PCS register in two 1074 * phases, an address phase and a data phase. 1075 * 1076 * The mmio interface is based on 16-bit offsets and values. All 1077 * register offsets must therefore be adjusted by left shifting the 1078 * offset 1 bit and reading 16 bits of data. 1079 */ 1080 mmd_address <<= 1; 1081 *index = mmd_address & ~pdata->xpcs_window_mask; 1082 *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1083 } 1084 1085 static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, 1086 int mmd_reg) 1087 { 1088 unsigned int mmd_address, index, offset; 1089 u32 smn_address; 1090 int mmd_data; 1091 int ret; 1092 1093 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1094 1095 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); 1096 1097 smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; 1098 ret = amd_smn_write(0, smn_address, index); 1099 if (ret) 1100 return ret; 1101 1102 ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data); 1103 if (ret) 1104 return ret; 1105 1106 mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) : 1107 FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); 1108 1109 return mmd_data; 1110 } 1111 1112 static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad, 1113 int mmd_reg, int mmd_data) 1114 { 1115 unsigned int pci_mmd_data, hi_mask, lo_mask; 1116 unsigned int mmd_address, index, offset; 1117 struct pci_dev *dev; 1118 u32 smn_address; 1119 int ret; 1120 1121 dev = pdata->pcidev; 1122 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1123 1124 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); 1125 1126 smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg; 1127 ret = amd_smn_write(0, smn_address, index); 1128 if (ret) { 1129 pci_err(dev, "Failed to write data 0x%x\n", index); 1130 return; 1131 } 1132 1133 ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data); 1134 if (ret) { 1135 pci_err(dev, "Failed to read data\n"); 1136 return; 1137 } 1138 1139 if (offset % 4) { 1140 hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data); 1141 lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data); 1142 } else { 1143 hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, 1144 FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data)); 1145 lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data); 1146 } 1147 1148 pci_mmd_data = hi_mask | lo_mask; 1149 1150 ret = amd_smn_write(0, smn_address, index); 1151 if (ret) { 1152 pci_err(dev, "Failed to write data 0x%x\n", index); 1153 return; 1154 } 1155 1156 ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data); 1157 if (ret) { 1158 pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data); 1159 return; 1160 } 1161 } 1162 1163 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, 1164 int mmd_reg) 1165 { 1166 unsigned int mmd_address, index, offset; 1167 unsigned long flags; 1168 int mmd_data; 1169 1170 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1171 1172 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); 1173 1174 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1175 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1176 mmd_data = XPCS16_IOREAD(pdata, offset); 1177 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1178 1179 return mmd_data; 1180 } 1181 1182 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, 1183 int mmd_reg, int mmd_data) 1184 { 1185 unsigned long flags; 1186 unsigned int mmd_address, index, offset; 1187 1188 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1189 1190 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset); 1191 1192 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1193 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1194 XPCS16_IOWRITE(pdata, offset, mmd_data); 1195 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1196 } 1197 1198 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, 1199 int mmd_reg) 1200 { 1201 unsigned long flags; 1202 unsigned int mmd_address; 1203 int mmd_data; 1204 1205 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1206 1207 /* The PCS registers are accessed using mmio. The underlying APB3 1208 * management interface uses indirect addressing to access the MMD 1209 * register sets. This requires accessing of the PCS register in two 1210 * phases, an address phase and a data phase. 1211 * 1212 * The mmio interface is based on 32-bit offsets and values. All 1213 * register offsets must therefore be adjusted by left shifting the 1214 * offset 2 bits and reading 32 bits of data. 1215 */ 1216 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1217 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1218 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); 1219 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1220 1221 return mmd_data; 1222 } 1223 1224 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, 1225 int mmd_reg, int mmd_data) 1226 { 1227 unsigned int mmd_address; 1228 unsigned long flags; 1229 1230 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg); 1231 1232 /* The PCS registers are accessed using mmio. The underlying APB3 1233 * management interface uses indirect addressing to access the MMD 1234 * register sets. This requires accessing of the PCS register in two 1235 * phases, an address phase and a data phase. 1236 * 1237 * The mmio interface is based on 32-bit offsets and values. All 1238 * register offsets must therefore be adjusted by left shifting the 1239 * offset 2 bits and writing 32 bits of data. 1240 */ 1241 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1242 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1243 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); 1244 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1245 } 1246 1247 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 1248 int mmd_reg) 1249 { 1250 switch (pdata->vdata->xpcs_access) { 1251 case XGBE_XPCS_ACCESS_V1: 1252 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg); 1253 1254 case XGBE_XPCS_ACCESS_V2: 1255 default: 1256 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); 1257 1258 case XGBE_XPCS_ACCESS_V3: 1259 return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg); 1260 } 1261 } 1262 1263 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, 1264 int mmd_reg, int mmd_data) 1265 { 1266 switch (pdata->vdata->xpcs_access) { 1267 case XGBE_XPCS_ACCESS_V1: 1268 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data); 1269 1270 case XGBE_XPCS_ACCESS_V3: 1271 return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data); 1272 1273 case XGBE_XPCS_ACCESS_V2: 1274 default: 1275 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); 1276 } 1277 } 1278 1279 static unsigned int xgbe_create_mdio_sca_c22(int port, int reg) 1280 { 1281 unsigned int mdio_sca; 1282 1283 mdio_sca = 0; 1284 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); 1285 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); 1286 1287 return mdio_sca; 1288 } 1289 1290 static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg) 1291 { 1292 unsigned int mdio_sca; 1293 1294 mdio_sca = 0; 1295 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); 1296 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); 1297 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); 1298 1299 return mdio_sca; 1300 } 1301 1302 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, 1303 unsigned int mdio_sca, u16 val) 1304 { 1305 unsigned int mdio_sccd; 1306 1307 reinit_completion(&pdata->mdio_complete); 1308 1309 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1310 1311 mdio_sccd = 0; 1312 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); 1313 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); 1314 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1315 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1316 1317 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) { 1318 netdev_err(pdata->netdev, "mdio write operation timed out\n"); 1319 return -ETIMEDOUT; 1320 } 1321 1322 return 0; 1323 } 1324 1325 static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, 1326 int reg, u16 val) 1327 { 1328 unsigned int mdio_sca; 1329 1330 mdio_sca = xgbe_create_mdio_sca_c22(addr, reg); 1331 1332 return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); 1333 } 1334 1335 static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, 1336 int devad, int reg, u16 val) 1337 { 1338 unsigned int mdio_sca; 1339 1340 mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg); 1341 1342 return xgbe_write_ext_mii_regs(pdata, mdio_sca, val); 1343 } 1344 1345 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, 1346 unsigned int mdio_sca) 1347 { 1348 unsigned int mdio_sccd; 1349 1350 reinit_completion(&pdata->mdio_complete); 1351 1352 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1353 1354 mdio_sccd = 0; 1355 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); 1356 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1357 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1358 1359 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) { 1360 netdev_err(pdata->netdev, "mdio read operation timed out\n"); 1361 return -ETIMEDOUT; 1362 } 1363 1364 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); 1365 } 1366 1367 static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr, 1368 int reg) 1369 { 1370 unsigned int mdio_sca; 1371 1372 mdio_sca = xgbe_create_mdio_sca_c22(addr, reg); 1373 1374 return xgbe_read_ext_mii_regs(pdata, mdio_sca); 1375 } 1376 1377 static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr, 1378 int devad, int reg) 1379 { 1380 unsigned int mdio_sca; 1381 1382 mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg); 1383 1384 return xgbe_read_ext_mii_regs(pdata, mdio_sca); 1385 } 1386 1387 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, 1388 enum xgbe_mdio_mode mode) 1389 { 1390 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); 1391 1392 switch (mode) { 1393 case XGBE_MDIO_MODE_CL22: 1394 if (port > XGMAC_MAX_C22_PORT) 1395 return -EINVAL; 1396 reg_val |= (1 << port); 1397 break; 1398 case XGBE_MDIO_MODE_CL45: 1399 break; 1400 default: 1401 return -EINVAL; 1402 } 1403 1404 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); 1405 1406 return 0; 1407 } 1408 1409 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) 1410 { 1411 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN); 1412 } 1413 1414 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) 1415 { 1416 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 1417 1418 return 0; 1419 } 1420 1421 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) 1422 { 1423 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 1424 1425 return 0; 1426 } 1427 1428 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) 1429 { 1430 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1431 1432 /* Reset the Tx descriptor 1433 * Set buffer 1 (lo) address to zero 1434 * Set buffer 1 (hi) address to zero 1435 * Reset all other control bits (IC, TTSE, B2L & B1L) 1436 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) 1437 */ 1438 rdesc->desc0 = 0; 1439 rdesc->desc1 = 0; 1440 rdesc->desc2 = 0; 1441 rdesc->desc3 = 0; 1442 1443 /* Make sure ownership is written to the descriptor */ 1444 dma_wmb(); 1445 } 1446 1447 static void xgbe_tx_desc_init(struct xgbe_channel *channel) 1448 { 1449 struct xgbe_ring *ring = channel->tx_ring; 1450 struct xgbe_ring_data *rdata; 1451 int i; 1452 int start_index = ring->cur; 1453 1454 DBGPR("-->tx_desc_init\n"); 1455 1456 /* Initialze all descriptors */ 1457 for (i = 0; i < ring->rdesc_count; i++) { 1458 rdata = XGBE_GET_DESC_DATA(ring, i); 1459 1460 /* Initialize Tx descriptor */ 1461 xgbe_tx_desc_reset(rdata); 1462 } 1463 1464 /* Update the total number of Tx descriptors */ 1465 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 1466 1467 /* Update the starting address of descriptor ring */ 1468 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1469 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, 1470 upper_32_bits(rdata->rdesc_dma)); 1471 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, 1472 lower_32_bits(rdata->rdesc_dma)); 1473 1474 DBGPR("<--tx_desc_init\n"); 1475 } 1476 1477 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, 1478 struct xgbe_ring_data *rdata, unsigned int index) 1479 { 1480 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1481 unsigned int rx_usecs = pdata->rx_usecs; 1482 unsigned int rx_frames = pdata->rx_frames; 1483 unsigned int inte; 1484 dma_addr_t hdr_dma, buf_dma; 1485 1486 if (!rx_usecs && !rx_frames) { 1487 /* No coalescing, interrupt for every descriptor */ 1488 inte = 1; 1489 } else { 1490 /* Set interrupt based on Rx frame coalescing setting */ 1491 if (rx_frames && !((index + 1) % rx_frames)) 1492 inte = 1; 1493 else 1494 inte = 0; 1495 } 1496 1497 /* Reset the Rx descriptor 1498 * Set buffer 1 (lo) address to header dma address (lo) 1499 * Set buffer 1 (hi) address to header dma address (hi) 1500 * Set buffer 2 (lo) address to buffer dma address (lo) 1501 * Set buffer 2 (hi) address to buffer dma address (hi) and 1502 * set control bits OWN and INTE 1503 */ 1504 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; 1505 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; 1506 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); 1507 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); 1508 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); 1509 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); 1510 1511 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); 1512 1513 /* Since the Rx DMA engine is likely running, make sure everything 1514 * is written to the descriptor(s) before setting the OWN bit 1515 * for the descriptor 1516 */ 1517 dma_wmb(); 1518 1519 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1); 1520 1521 /* Make sure ownership is written to the descriptor */ 1522 dma_wmb(); 1523 } 1524 1525 static void xgbe_rx_desc_init(struct xgbe_channel *channel) 1526 { 1527 struct xgbe_prv_data *pdata = channel->pdata; 1528 struct xgbe_ring *ring = channel->rx_ring; 1529 struct xgbe_ring_data *rdata; 1530 unsigned int start_index = ring->cur; 1531 unsigned int i; 1532 1533 DBGPR("-->rx_desc_init\n"); 1534 1535 /* Initialize all descriptors */ 1536 for (i = 0; i < ring->rdesc_count; i++) { 1537 rdata = XGBE_GET_DESC_DATA(ring, i); 1538 1539 /* Initialize Rx descriptor */ 1540 xgbe_rx_desc_reset(pdata, rdata, i); 1541 } 1542 1543 /* Update the total number of Rx descriptors */ 1544 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 1545 1546 /* Update the starting address of descriptor ring */ 1547 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1548 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, 1549 upper_32_bits(rdata->rdesc_dma)); 1550 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, 1551 lower_32_bits(rdata->rdesc_dma)); 1552 1553 /* Update the Rx Descriptor Tail Pointer */ 1554 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1); 1555 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO, 1556 lower_32_bits(rdata->rdesc_dma)); 1557 1558 DBGPR("<--rx_desc_init\n"); 1559 } 1560 1561 static void xgbe_tx_start_xmit(struct xgbe_channel *channel, 1562 struct xgbe_ring *ring) 1563 { 1564 struct xgbe_prv_data *pdata = channel->pdata; 1565 struct xgbe_ring_data *rdata; 1566 1567 /* Make sure everything is written before the register write */ 1568 wmb(); 1569 1570 /* Issue a poll command to Tx DMA by writing address 1571 * of next immediate free descriptor */ 1572 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1573 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO, 1574 lower_32_bits(rdata->rdesc_dma)); 1575 1576 /* Start the Tx timer */ 1577 if (pdata->tx_usecs && !channel->tx_timer_active) { 1578 channel->tx_timer_active = 1; 1579 mod_timer(&channel->tx_timer, 1580 jiffies + usecs_to_jiffies(pdata->tx_usecs)); 1581 } 1582 1583 ring->tx.xmit_more = 0; 1584 } 1585 1586 static void xgbe_dev_xmit(struct xgbe_channel *channel) 1587 { 1588 struct xgbe_prv_data *pdata = channel->pdata; 1589 struct xgbe_ring *ring = channel->tx_ring; 1590 struct xgbe_ring_data *rdata; 1591 struct xgbe_ring_desc *rdesc; 1592 struct xgbe_packet_data *packet = &ring->packet_data; 1593 unsigned int tx_packets, tx_bytes; 1594 unsigned int csum, tso, vlan, vxlan; 1595 unsigned int tso_context, vlan_context; 1596 unsigned int tx_set_ic; 1597 int start_index = ring->cur; 1598 int cur_index = ring->cur; 1599 int i; 1600 1601 DBGPR("-->xgbe_dev_xmit\n"); 1602 1603 tx_packets = packet->tx_packets; 1604 tx_bytes = packet->tx_bytes; 1605 1606 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1607 CSUM_ENABLE); 1608 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1609 TSO_ENABLE); 1610 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1611 VLAN_CTAG); 1612 vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, 1613 VXLAN); 1614 1615 if (tso && (packet->mss != ring->tx.cur_mss)) 1616 tso_context = 1; 1617 else 1618 tso_context = 0; 1619 1620 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)) 1621 vlan_context = 1; 1622 else 1623 vlan_context = 0; 1624 1625 /* Determine if an interrupt should be generated for this Tx: 1626 * Interrupt: 1627 * - Tx frame count exceeds the frame count setting 1628 * - Addition of Tx frame count to the frame count since the 1629 * last interrupt was set exceeds the frame count setting 1630 * No interrupt: 1631 * - No frame count setting specified (ethtool -C ethX tx-frames 0) 1632 * - Addition of Tx frame count to the frame count since the 1633 * last interrupt was set does not exceed the frame count setting 1634 */ 1635 ring->coalesce_count += tx_packets; 1636 if (!pdata->tx_frames) 1637 tx_set_ic = 0; 1638 else if (tx_packets > pdata->tx_frames) 1639 tx_set_ic = 1; 1640 else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets) 1641 tx_set_ic = 1; 1642 else 1643 tx_set_ic = 0; 1644 1645 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1646 rdesc = rdata->rdesc; 1647 1648 /* Create a context descriptor if this is a TSO packet */ 1649 if (tso_context || vlan_context) { 1650 if (tso_context) { 1651 netif_dbg(pdata, tx_queued, pdata->netdev, 1652 "TSO context descriptor, mss=%u\n", 1653 packet->mss); 1654 1655 /* Set the MSS size */ 1656 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, 1657 MSS, packet->mss); 1658 1659 /* Mark it as a CONTEXT descriptor */ 1660 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1661 CTXT, 1); 1662 1663 /* Indicate this descriptor contains the MSS */ 1664 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1665 TCMSSV, 1); 1666 1667 ring->tx.cur_mss = packet->mss; 1668 } 1669 1670 if (vlan_context) { 1671 netif_dbg(pdata, tx_queued, pdata->netdev, 1672 "VLAN context descriptor, ctag=%u\n", 1673 packet->vlan_ctag); 1674 1675 /* Mark it as a CONTEXT descriptor */ 1676 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1677 CTXT, 1); 1678 1679 /* Set the VLAN tag */ 1680 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1681 VT, packet->vlan_ctag); 1682 1683 /* Indicate this descriptor contains the VLAN tag */ 1684 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, 1685 VLTV, 1); 1686 1687 ring->tx.cur_vlan_ctag = packet->vlan_ctag; 1688 } 1689 1690 cur_index++; 1691 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1692 rdesc = rdata->rdesc; 1693 } 1694 1695 /* Update buffer address (for TSO this is the header) */ 1696 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1697 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1698 1699 /* Update the buffer length */ 1700 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1701 rdata->skb_dma_len); 1702 1703 /* VLAN tag insertion check */ 1704 if (vlan) 1705 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR, 1706 TX_NORMAL_DESC2_VLAN_INSERT); 1707 1708 /* Timestamp enablement check */ 1709 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) 1710 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1); 1711 1712 /* Mark it as First Descriptor */ 1713 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1); 1714 1715 /* Mark it as a NORMAL descriptor */ 1716 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1717 1718 /* Set OWN bit if not the first descriptor */ 1719 if (cur_index != start_index) 1720 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1721 1722 if (tso) { 1723 /* Enable TSO */ 1724 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1); 1725 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL, 1726 packet->tcp_payload_len); 1727 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, 1728 packet->tcp_header_len / 4); 1729 1730 pdata->ext_stats.tx_tso_packets += tx_packets; 1731 } else { 1732 /* Enable CRC and Pad Insertion */ 1733 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); 1734 1735 /* Enable HW CSUM */ 1736 if (csum) 1737 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1738 CIC, 0x3); 1739 1740 /* Set the total length to be transmitted */ 1741 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL, 1742 packet->length); 1743 } 1744 1745 if (vxlan) { 1746 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP, 1747 TX_NORMAL_DESC3_VXLAN_PACKET); 1748 1749 pdata->ext_stats.tx_vxlan_packets += packet->tx_packets; 1750 } 1751 1752 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { 1753 cur_index++; 1754 rdata = XGBE_GET_DESC_DATA(ring, cur_index); 1755 rdesc = rdata->rdesc; 1756 1757 /* Update buffer address */ 1758 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma)); 1759 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma)); 1760 1761 /* Update the buffer length */ 1762 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L, 1763 rdata->skb_dma_len); 1764 1765 /* Set OWN bit */ 1766 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1767 1768 /* Mark it as NORMAL descriptor */ 1769 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0); 1770 1771 /* Enable HW CSUM */ 1772 if (csum) 1773 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, 1774 CIC, 0x3); 1775 } 1776 1777 /* Set LAST bit for the last descriptor */ 1778 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1); 1779 1780 /* Set IC bit based on Tx coalescing settings */ 1781 if (tx_set_ic) 1782 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); 1783 1784 /* Save the Tx info to report back during cleanup */ 1785 rdata->tx.packets = tx_packets; 1786 rdata->tx.bytes = tx_bytes; 1787 1788 pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets; 1789 pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes; 1790 1791 /* In case the Tx DMA engine is running, make sure everything 1792 * is written to the descriptor(s) before setting the OWN bit 1793 * for the first descriptor 1794 */ 1795 dma_wmb(); 1796 1797 /* Set OWN bit for the first descriptor */ 1798 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1799 rdesc = rdata->rdesc; 1800 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); 1801 1802 if (netif_msg_tx_queued(pdata)) 1803 xgbe_dump_tx_desc(pdata, ring, start_index, 1804 packet->rdesc_count, 1); 1805 1806 /* Make sure ownership is written to the descriptor */ 1807 smp_wmb(); 1808 1809 ring->cur = cur_index + 1; 1810 if (!netdev_xmit_more() || 1811 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev, 1812 channel->queue_index))) 1813 xgbe_tx_start_xmit(channel, ring); 1814 else 1815 ring->tx.xmit_more = 1; 1816 1817 DBGPR(" %s: descriptors %u to %u written\n", 1818 channel->name, start_index & (ring->rdesc_count - 1), 1819 (ring->cur - 1) & (ring->rdesc_count - 1)); 1820 1821 DBGPR("<--xgbe_dev_xmit\n"); 1822 } 1823 1824 static int xgbe_dev_read(struct xgbe_channel *channel) 1825 { 1826 struct xgbe_prv_data *pdata = channel->pdata; 1827 struct xgbe_ring *ring = channel->rx_ring; 1828 struct xgbe_ring_data *rdata; 1829 struct xgbe_ring_desc *rdesc; 1830 struct xgbe_packet_data *packet = &ring->packet_data; 1831 struct net_device *netdev = pdata->netdev; 1832 unsigned int err, etlt, l34t; 1833 1834 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); 1835 1836 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1837 rdesc = rdata->rdesc; 1838 1839 /* Check for data availability */ 1840 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 1841 return 1; 1842 1843 /* Make sure descriptor fields are read after reading the OWN bit */ 1844 dma_rmb(); 1845 1846 if (netif_msg_rx_status(pdata)) 1847 xgbe_dump_rx_desc(pdata, ring, ring->cur); 1848 1849 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { 1850 /* Timestamp Context Descriptor */ 1851 xgbe_get_rx_tstamp(packet, rdesc); 1852 1853 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1854 CONTEXT, 1); 1855 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1856 CONTEXT_NEXT, 0); 1857 return 0; 1858 } 1859 1860 /* Normal Descriptor, be sure Context Descriptor bit is off */ 1861 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); 1862 1863 /* Indicate if a Context Descriptor is next */ 1864 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 1865 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1866 CONTEXT_NEXT, 1); 1867 1868 /* Get the header length */ 1869 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1870 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1871 FIRST, 1); 1872 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1873 RX_NORMAL_DESC2, HL); 1874 if (rdata->rx.hdr_len) 1875 pdata->ext_stats.rx_split_header_packets++; 1876 } else { 1877 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1878 FIRST, 0); 1879 } 1880 1881 /* Get the RSS hash */ 1882 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { 1883 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1884 RSS_HASH, 1); 1885 1886 packet->rss_hash = le32_to_cpu(rdesc->desc1); 1887 1888 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 1889 switch (l34t) { 1890 case RX_DESC3_L34T_IPV4_TCP: 1891 case RX_DESC3_L34T_IPV4_UDP: 1892 case RX_DESC3_L34T_IPV6_TCP: 1893 case RX_DESC3_L34T_IPV6_UDP: 1894 packet->rss_hash_type = PKT_HASH_TYPE_L4; 1895 break; 1896 default: 1897 packet->rss_hash_type = PKT_HASH_TYPE_L3; 1898 } 1899 } 1900 1901 /* Not all the data has been transferred for this packet */ 1902 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) 1903 return 0; 1904 1905 /* This is the last of the data for this packet */ 1906 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1907 LAST, 1); 1908 1909 /* Get the packet length */ 1910 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1911 1912 /* Set checksum done indicator as appropriate */ 1913 if (netdev->features & NETIF_F_RXCSUM) { 1914 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1915 CSUM_DONE, 1); 1916 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1917 TNPCSUM_DONE, 1); 1918 } 1919 1920 /* Set the tunneled packet indicator */ 1921 if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) { 1922 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1923 TNP, 1); 1924 pdata->ext_stats.rx_vxlan_packets++; 1925 1926 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 1927 switch (l34t) { 1928 case RX_DESC3_L34T_IPV4_UNKNOWN: 1929 case RX_DESC3_L34T_IPV6_UNKNOWN: 1930 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1931 TNPCSUM_DONE, 0); 1932 break; 1933 } 1934 } 1935 1936 /* Check for errors (only valid in last descriptor) */ 1937 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); 1938 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); 1939 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); 1940 1941 if (!err || !etlt) { 1942 /* No error if err is 0 or etlt is 0 */ 1943 if ((etlt == 0x09) && 1944 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1945 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1946 VLAN_CTAG, 1); 1947 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, 1948 RX_NORMAL_DESC0, 1949 OVT); 1950 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", 1951 packet->vlan_ctag); 1952 } 1953 } else { 1954 unsigned int tnp = XGMAC_GET_BITS(packet->attributes, 1955 RX_PACKET_ATTRIBUTES, TNP); 1956 1957 if ((etlt == 0x05) || (etlt == 0x06)) { 1958 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1959 CSUM_DONE, 0); 1960 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1961 TNPCSUM_DONE, 0); 1962 pdata->ext_stats.rx_csum_errors++; 1963 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { 1964 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1965 CSUM_DONE, 0); 1966 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1967 TNPCSUM_DONE, 0); 1968 pdata->ext_stats.rx_vxlan_csum_errors++; 1969 } else { 1970 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, 1971 FRAME, 1); 1972 } 1973 } 1974 1975 pdata->ext_stats.rxq_packets[channel->queue_index]++; 1976 pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len; 1977 1978 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, 1979 ring->cur & (ring->rdesc_count - 1), ring->cur); 1980 1981 return 0; 1982 } 1983 1984 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) 1985 { 1986 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ 1987 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT); 1988 } 1989 1990 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) 1991 { 1992 /* Rx and Tx share LD bit, so check TDES3.LD bit */ 1993 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); 1994 } 1995 1996 static int xgbe_enable_int(struct xgbe_channel *channel, 1997 enum xgbe_int int_id) 1998 { 1999 switch (int_id) { 2000 case XGMAC_INT_DMA_CH_SR_TI: 2001 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); 2002 break; 2003 case XGMAC_INT_DMA_CH_SR_TPS: 2004 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); 2005 break; 2006 case XGMAC_INT_DMA_CH_SR_TBU: 2007 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); 2008 break; 2009 case XGMAC_INT_DMA_CH_SR_RI: 2010 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); 2011 break; 2012 case XGMAC_INT_DMA_CH_SR_RBU: 2013 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); 2014 break; 2015 case XGMAC_INT_DMA_CH_SR_RPS: 2016 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); 2017 break; 2018 case XGMAC_INT_DMA_CH_SR_TI_RI: 2019 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); 2020 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); 2021 break; 2022 case XGMAC_INT_DMA_CH_SR_FBE: 2023 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); 2024 break; 2025 case XGMAC_INT_DMA_ALL: 2026 channel->curr_ier |= channel->saved_ier; 2027 break; 2028 default: 2029 return -1; 2030 } 2031 2032 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 2033 2034 return 0; 2035 } 2036 2037 static int xgbe_disable_int(struct xgbe_channel *channel, 2038 enum xgbe_int int_id) 2039 { 2040 switch (int_id) { 2041 case XGMAC_INT_DMA_CH_SR_TI: 2042 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); 2043 break; 2044 case XGMAC_INT_DMA_CH_SR_TPS: 2045 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); 2046 break; 2047 case XGMAC_INT_DMA_CH_SR_TBU: 2048 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); 2049 break; 2050 case XGMAC_INT_DMA_CH_SR_RI: 2051 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); 2052 break; 2053 case XGMAC_INT_DMA_CH_SR_RBU: 2054 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); 2055 break; 2056 case XGMAC_INT_DMA_CH_SR_RPS: 2057 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); 2058 break; 2059 case XGMAC_INT_DMA_CH_SR_TI_RI: 2060 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); 2061 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); 2062 break; 2063 case XGMAC_INT_DMA_CH_SR_FBE: 2064 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); 2065 break; 2066 case XGMAC_INT_DMA_ALL: 2067 channel->saved_ier = channel->curr_ier; 2068 channel->curr_ier = 0; 2069 break; 2070 default: 2071 return -1; 2072 } 2073 2074 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 2075 2076 return 0; 2077 } 2078 2079 static int __xgbe_exit(struct xgbe_prv_data *pdata) 2080 { 2081 unsigned int count = 2000; 2082 2083 DBGPR("-->xgbe_exit\n"); 2084 2085 /* Issue a software reset */ 2086 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 2087 usleep_range(10, 15); 2088 2089 /* Poll Until Poll Condition */ 2090 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 2091 usleep_range(500, 600); 2092 2093 if (!count) 2094 return -EBUSY; 2095 2096 DBGPR("<--xgbe_exit\n"); 2097 2098 return 0; 2099 } 2100 2101 static int xgbe_exit(struct xgbe_prv_data *pdata) 2102 { 2103 int ret; 2104 2105 /* To guard against possible incorrectly generated interrupts, 2106 * issue the software reset twice. 2107 */ 2108 ret = __xgbe_exit(pdata); 2109 if (ret) 2110 return ret; 2111 2112 return __xgbe_exit(pdata); 2113 } 2114 2115 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) 2116 { 2117 unsigned int i, count; 2118 2119 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) 2120 return 0; 2121 2122 for (i = 0; i < pdata->tx_q_count; i++) 2123 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 2124 2125 /* Poll Until Poll Condition */ 2126 for (i = 0; i < pdata->tx_q_count; i++) { 2127 count = 2000; 2128 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, 2129 MTL_Q_TQOMR, FTQ)) 2130 usleep_range(500, 600); 2131 2132 if (!count) 2133 return -EBUSY; 2134 } 2135 2136 return 0; 2137 } 2138 2139 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) 2140 { 2141 unsigned int sbmr; 2142 2143 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); 2144 2145 /* Set enhanced addressing mode */ 2146 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); 2147 2148 /* Set the System Bus mode */ 2149 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); 2150 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); 2151 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); 2152 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); 2153 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); 2154 2155 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); 2156 2157 /* Set descriptor fetching threshold */ 2158 if (pdata->vdata->tx_desc_prefetch) 2159 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, 2160 pdata->vdata->tx_desc_prefetch); 2161 2162 if (pdata->vdata->rx_desc_prefetch) 2163 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, 2164 pdata->vdata->rx_desc_prefetch); 2165 } 2166 2167 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) 2168 { 2169 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); 2170 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); 2171 if (pdata->awarcr) 2172 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); 2173 } 2174 2175 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) 2176 { 2177 unsigned int i; 2178 2179 /* Set Tx to weighted round robin scheduling algorithm */ 2180 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 2181 2182 /* Set Tx traffic classes to use WRR algorithm with equal weights */ 2183 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 2184 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2185 MTL_TSA_ETS); 2186 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); 2187 } 2188 2189 /* Set Rx to strict priority algorithm */ 2190 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 2191 } 2192 2193 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, 2194 unsigned int queue, 2195 unsigned int q_fifo_size) 2196 { 2197 unsigned int frame_fifo_size; 2198 unsigned int rfa, rfd; 2199 2200 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); 2201 2202 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) { 2203 /* PFC is active for this queue */ 2204 rfa = pdata->pfc_rfa; 2205 rfd = rfa + frame_fifo_size; 2206 if (rfd > XGMAC_FLOW_CONTROL_MAX) 2207 rfd = XGMAC_FLOW_CONTROL_MAX; 2208 if (rfa >= XGMAC_FLOW_CONTROL_MAX) 2209 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT; 2210 } else { 2211 /* This path deals with just maximum frame sizes which are 2212 * limited to a jumbo frame of 9,000 (plus headers, etc.) 2213 * so we can never exceed the maximum allowable RFA/RFD 2214 * values. 2215 */ 2216 if (q_fifo_size <= 2048) { 2217 /* rx_rfd to zero to signal no flow control */ 2218 pdata->rx_rfa[queue] = 0; 2219 pdata->rx_rfd[queue] = 0; 2220 return; 2221 } 2222 2223 if (q_fifo_size <= 4096) { 2224 /* Between 2048 and 4096 */ 2225 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ 2226 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ 2227 return; 2228 } 2229 2230 if (q_fifo_size <= frame_fifo_size) { 2231 /* Between 4096 and max-frame */ 2232 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ 2233 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ 2234 return; 2235 } 2236 2237 if (q_fifo_size <= (frame_fifo_size * 3)) { 2238 /* Between max-frame and 3 max-frames, 2239 * trigger if we get just over a frame of data and 2240 * resume when we have just under half a frame left. 2241 */ 2242 rfa = q_fifo_size - frame_fifo_size; 2243 rfd = rfa + (frame_fifo_size / 2); 2244 } else { 2245 /* Above 3 max-frames - trigger when just over 2246 * 2 frames of space available 2247 */ 2248 rfa = frame_fifo_size * 2; 2249 rfa += XGMAC_FLOW_CONTROL_UNIT; 2250 rfd = rfa + frame_fifo_size; 2251 } 2252 } 2253 2254 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); 2255 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); 2256 } 2257 2258 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, 2259 unsigned int *fifo) 2260 { 2261 unsigned int q_fifo_size; 2262 unsigned int i; 2263 2264 for (i = 0; i < pdata->rx_q_count; i++) { 2265 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; 2266 2267 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); 2268 } 2269 } 2270 2271 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) 2272 { 2273 unsigned int i; 2274 2275 for (i = 0; i < pdata->rx_q_count; i++) { 2276 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2277 pdata->rx_rfa[i]); 2278 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 2279 pdata->rx_rfd[i]); 2280 } 2281 } 2282 2283 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) 2284 { 2285 /* The configured value may not be the actual amount of fifo RAM */ 2286 return min_t(unsigned int, pdata->tx_max_fifo_size, 2287 pdata->hw_feat.tx_fifo_size); 2288 } 2289 2290 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) 2291 { 2292 /* The configured value may not be the actual amount of fifo RAM */ 2293 return min_t(unsigned int, pdata->rx_max_fifo_size, 2294 pdata->hw_feat.rx_fifo_size); 2295 } 2296 2297 static void xgbe_calculate_equal_fifo(unsigned int fifo_size, 2298 unsigned int queue_count, 2299 unsigned int *fifo) 2300 { 2301 unsigned int q_fifo_size; 2302 unsigned int p_fifo; 2303 unsigned int i; 2304 2305 q_fifo_size = fifo_size / queue_count; 2306 2307 /* Calculate the fifo setting by dividing the queue's fifo size 2308 * by the fifo allocation increment (with 0 representing the 2309 * base allocation increment so decrement the result by 1). 2310 */ 2311 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; 2312 if (p_fifo) 2313 p_fifo--; 2314 2315 /* Distribute the fifo equally amongst the queues */ 2316 for (i = 0; i < queue_count; i++) 2317 fifo[i] = p_fifo; 2318 } 2319 2320 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size, 2321 unsigned int queue_count, 2322 unsigned int *fifo) 2323 { 2324 unsigned int i; 2325 2326 BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC); 2327 2328 if (queue_count <= IEEE_8021QAZ_MAX_TCS) 2329 return fifo_size; 2330 2331 /* Rx queues 9 and up are for specialized packets, 2332 * such as PTP or DCB control packets, etc. and 2333 * don't require a large fifo 2334 */ 2335 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { 2336 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; 2337 fifo_size -= XGMAC_FIFO_MIN_ALLOC; 2338 } 2339 2340 return fifo_size; 2341 } 2342 2343 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata) 2344 { 2345 unsigned int delay; 2346 2347 /* If a delay has been provided, use that */ 2348 if (pdata->pfc->delay) 2349 return pdata->pfc->delay / 8; 2350 2351 /* Allow for two maximum size frames */ 2352 delay = xgbe_get_max_frame(pdata); 2353 delay += XGMAC_ETH_PREAMBLE; 2354 delay *= 2; 2355 2356 /* Allow for PFC frame */ 2357 delay += XGMAC_PFC_DATA_LEN; 2358 delay += ETH_HLEN + ETH_FCS_LEN; 2359 delay += XGMAC_ETH_PREAMBLE; 2360 2361 /* Allow for miscellaneous delays (LPI exit, cable, etc.) */ 2362 delay += XGMAC_PFC_DELAYS; 2363 2364 return delay; 2365 } 2366 2367 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata) 2368 { 2369 unsigned int count, prio_queues; 2370 unsigned int i; 2371 2372 if (!pdata->pfc->pfc_en) 2373 return 0; 2374 2375 count = 0; 2376 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2377 for (i = 0; i < prio_queues; i++) { 2378 if (!xgbe_is_pfc_queue(pdata, i)) 2379 continue; 2380 2381 pdata->pfcq[i] = 1; 2382 count++; 2383 } 2384 2385 return count; 2386 } 2387 2388 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata, 2389 unsigned int fifo_size, 2390 unsigned int *fifo) 2391 { 2392 unsigned int q_fifo_size, rem_fifo, addn_fifo; 2393 unsigned int prio_queues; 2394 unsigned int pfc_count; 2395 unsigned int i; 2396 2397 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata)); 2398 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2399 pfc_count = xgbe_get_pfc_queues(pdata); 2400 2401 if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) { 2402 /* No traffic classes with PFC enabled or can't do lossless */ 2403 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); 2404 return; 2405 } 2406 2407 /* Calculate how much fifo we have to play with */ 2408 rem_fifo = fifo_size - (q_fifo_size * prio_queues); 2409 2410 /* Calculate how much more than base fifo PFC needs, which also 2411 * becomes the threshold activation point (RFA) 2412 */ 2413 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata); 2414 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa); 2415 2416 if (pdata->pfc_rfa > q_fifo_size) { 2417 addn_fifo = pdata->pfc_rfa - q_fifo_size; 2418 addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo); 2419 } else { 2420 addn_fifo = 0; 2421 } 2422 2423 /* Calculate DCB fifo settings: 2424 * - distribute remaining fifo between the VLAN priority 2425 * queues based on traffic class PFC enablement and overall 2426 * priority (0 is lowest priority, so start at highest) 2427 */ 2428 i = prio_queues; 2429 while (i > 0) { 2430 i--; 2431 2432 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1; 2433 2434 if (!pdata->pfcq[i] || !addn_fifo) 2435 continue; 2436 2437 if (addn_fifo > rem_fifo) { 2438 netdev_warn(pdata->netdev, 2439 "RXq%u cannot set needed fifo size\n", i); 2440 if (!rem_fifo) 2441 continue; 2442 2443 addn_fifo = rem_fifo; 2444 } 2445 2446 fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT); 2447 rem_fifo -= addn_fifo; 2448 } 2449 2450 if (rem_fifo) { 2451 unsigned int inc_fifo = rem_fifo / prio_queues; 2452 2453 /* Distribute remaining fifo across queues */ 2454 for (i = 0; i < prio_queues; i++) 2455 fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT); 2456 } 2457 } 2458 2459 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) 2460 { 2461 unsigned int fifo_size; 2462 unsigned int fifo[XGBE_MAX_QUEUES]; 2463 unsigned int i; 2464 2465 fifo_size = xgbe_get_tx_fifo_size(pdata); 2466 2467 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); 2468 2469 for (i = 0; i < pdata->tx_q_count; i++) 2470 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); 2471 2472 netif_info(pdata, drv, pdata->netdev, 2473 "%d Tx hardware queues, %d byte fifo per queue\n", 2474 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 2475 } 2476 2477 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 2478 { 2479 unsigned int fifo_size; 2480 unsigned int fifo[XGBE_MAX_QUEUES]; 2481 unsigned int prio_queues; 2482 unsigned int i; 2483 2484 /* Clear any DCB related fifo/queue information */ 2485 memset(pdata->pfcq, 0, sizeof(pdata->pfcq)); 2486 pdata->pfc_rfa = 0; 2487 2488 fifo_size = xgbe_get_rx_fifo_size(pdata); 2489 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2490 2491 /* Assign a minimum fifo to the non-VLAN priority queues */ 2492 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); 2493 2494 if (pdata->pfc && pdata->ets) 2495 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo); 2496 else 2497 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); 2498 2499 for (i = 0; i < pdata->rx_q_count; i++) 2500 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); 2501 2502 xgbe_calculate_flow_control_threshold(pdata, fifo); 2503 xgbe_config_flow_control_threshold(pdata); 2504 2505 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) { 2506 netif_info(pdata, drv, pdata->netdev, 2507 "%u Rx hardware queues\n", pdata->rx_q_count); 2508 for (i = 0; i < pdata->rx_q_count; i++) 2509 netif_info(pdata, drv, pdata->netdev, 2510 "RxQ%u, %u byte fifo queue\n", i, 2511 ((fifo[i] + 1) * XGMAC_FIFO_UNIT)); 2512 } else { 2513 netif_info(pdata, drv, pdata->netdev, 2514 "%u Rx hardware queues, %u byte fifo per queue\n", 2515 pdata->rx_q_count, 2516 ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 2517 } 2518 } 2519 2520 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) 2521 { 2522 unsigned int qptc, qptc_extra, queue; 2523 unsigned int prio_queues; 2524 unsigned int ppq, ppq_extra, prio; 2525 unsigned int mask; 2526 unsigned int i, j, reg, reg_val; 2527 2528 /* Map the MTL Tx Queues to Traffic Classes 2529 * Note: Tx Queues >= Traffic Classes 2530 */ 2531 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; 2532 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; 2533 2534 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { 2535 for (j = 0; j < qptc; j++) { 2536 netif_dbg(pdata, drv, pdata->netdev, 2537 "TXq%u mapped to TC%u\n", queue, i); 2538 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2539 Q2TCMAP, i); 2540 pdata->q2tc_map[queue++] = i; 2541 } 2542 2543 if (i < qptc_extra) { 2544 netif_dbg(pdata, drv, pdata->netdev, 2545 "TXq%u mapped to TC%u\n", queue, i); 2546 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 2547 Q2TCMAP, i); 2548 pdata->q2tc_map[queue++] = i; 2549 } 2550 } 2551 2552 /* Map the 8 VLAN priority values to available MTL Rx queues */ 2553 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 2554 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; 2555 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; 2556 2557 reg = MAC_RQC2R; 2558 reg_val = 0; 2559 for (i = 0, prio = 0; i < prio_queues;) { 2560 mask = 0; 2561 for (j = 0; j < ppq; j++) { 2562 netif_dbg(pdata, drv, pdata->netdev, 2563 "PRIO%u mapped to RXq%u\n", prio, i); 2564 mask |= (1 << prio); 2565 pdata->prio2q_map[prio++] = i; 2566 } 2567 2568 if (i < ppq_extra) { 2569 netif_dbg(pdata, drv, pdata->netdev, 2570 "PRIO%u mapped to RXq%u\n", prio, i); 2571 mask |= (1 << prio); 2572 pdata->prio2q_map[prio++] = i; 2573 } 2574 2575 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); 2576 2577 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) 2578 continue; 2579 2580 XGMAC_IOWRITE(pdata, reg, reg_val); 2581 reg += MAC_RQC2_INC; 2582 reg_val = 0; 2583 } 2584 2585 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 2586 reg = MTL_RQDCM0R; 2587 reg_val = 0; 2588 for (i = 0; i < pdata->rx_q_count;) { 2589 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 2590 2591 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) 2592 continue; 2593 2594 XGMAC_IOWRITE(pdata, reg, reg_val); 2595 2596 reg += MTL_RQDCM_INC; 2597 reg_val = 0; 2598 } 2599 } 2600 2601 static void xgbe_config_tc(struct xgbe_prv_data *pdata) 2602 { 2603 unsigned int offset, queue, prio; 2604 u8 i; 2605 2606 netdev_reset_tc(pdata->netdev); 2607 if (!pdata->num_tcs) 2608 return; 2609 2610 netdev_set_num_tc(pdata->netdev, pdata->num_tcs); 2611 2612 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) { 2613 while ((queue < pdata->tx_q_count) && 2614 (pdata->q2tc_map[queue] == i)) 2615 queue++; 2616 2617 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n", 2618 i, offset, queue - 1); 2619 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset); 2620 offset = queue; 2621 } 2622 2623 if (!pdata->ets) 2624 return; 2625 2626 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) 2627 netdev_set_prio_tc_map(pdata->netdev, prio, 2628 pdata->ets->prio_tc[prio]); 2629 } 2630 2631 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) 2632 { 2633 struct ieee_ets *ets = pdata->ets; 2634 unsigned int total_weight, min_weight, weight; 2635 unsigned int mask, reg, reg_val; 2636 unsigned int i, prio; 2637 2638 if (!ets) 2639 return; 2640 2641 /* Set Tx to deficit weighted round robin scheduling algorithm (when 2642 * traffic class is using ETS algorithm) 2643 */ 2644 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR); 2645 2646 /* Set Traffic Class algorithms */ 2647 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt; 2648 min_weight = total_weight / 100; 2649 if (!min_weight) 2650 min_weight = 1; 2651 2652 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 2653 /* Map the priorities to the traffic class */ 2654 mask = 0; 2655 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { 2656 if (ets->prio_tc[prio] == i) 2657 mask |= (1 << prio); 2658 } 2659 mask &= 0xff; 2660 2661 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n", 2662 i, mask); 2663 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG)); 2664 reg_val = XGMAC_IOREAD(pdata, reg); 2665 2666 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3)); 2667 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3)); 2668 2669 XGMAC_IOWRITE(pdata, reg, reg_val); 2670 2671 /* Set the traffic class algorithm */ 2672 switch (ets->tc_tsa[i]) { 2673 case IEEE_8021QAZ_TSA_STRICT: 2674 netif_dbg(pdata, drv, pdata->netdev, 2675 "TC%u using SP\n", i); 2676 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2677 MTL_TSA_SP); 2678 break; 2679 case IEEE_8021QAZ_TSA_ETS: 2680 weight = total_weight * ets->tc_tx_bw[i] / 100; 2681 weight = clamp(weight, min_weight, total_weight); 2682 2683 netif_dbg(pdata, drv, pdata->netdev, 2684 "TC%u using DWRR (weight %u)\n", i, weight); 2685 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 2686 MTL_TSA_ETS); 2687 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 2688 weight); 2689 break; 2690 } 2691 } 2692 2693 xgbe_config_tc(pdata); 2694 } 2695 2696 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) 2697 { 2698 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { 2699 /* Just stop the Tx queues while Rx fifo is changed */ 2700 netif_tx_stop_all_queues(pdata->netdev); 2701 2702 /* Suspend Rx so that fifo's can be adjusted */ 2703 pdata->hw_if.disable_rx(pdata); 2704 } 2705 2706 xgbe_config_rx_fifo_size(pdata); 2707 xgbe_config_flow_control(pdata); 2708 2709 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) { 2710 /* Resume Rx */ 2711 pdata->hw_if.enable_rx(pdata); 2712 2713 /* Resume Tx queues */ 2714 netif_tx_start_all_queues(pdata->netdev); 2715 } 2716 } 2717 2718 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata) 2719 { 2720 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr); 2721 2722 /* Filtering is done using perfect filtering and hash filtering */ 2723 if (pdata->hw_feat.hash_table_size) { 2724 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 2725 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 2726 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); 2727 } 2728 } 2729 2730 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) 2731 { 2732 unsigned int val; 2733 2734 if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) { 2735 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL, 2736 XGMAC_GIANT_PACKET_MTU); 2737 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1); 2738 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1); 2739 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1); 2740 } else { 2741 val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0; 2742 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0); 2743 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0); 2744 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0); 2745 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2746 } 2747 } 2748 2749 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata) 2750 { 2751 xgbe_set_speed(pdata, pdata->phy_speed); 2752 } 2753 2754 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2755 { 2756 if (pdata->netdev->features & NETIF_F_RXCSUM) 2757 xgbe_enable_rx_csum(pdata); 2758 else 2759 xgbe_disable_rx_csum(pdata); 2760 } 2761 2762 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) 2763 { 2764 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 2765 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 2766 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 2767 2768 /* Set the current VLAN Hash Table register value */ 2769 xgbe_update_vlan_hash_table(pdata); 2770 2771 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) 2772 xgbe_enable_rx_vlan_filtering(pdata); 2773 else 2774 xgbe_disable_rx_vlan_filtering(pdata); 2775 2776 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 2777 xgbe_enable_rx_vlan_stripping(pdata); 2778 else 2779 xgbe_disable_rx_vlan_stripping(pdata); 2780 } 2781 2782 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) 2783 { 2784 bool read_hi; 2785 u64 val; 2786 2787 if (pdata->vdata->mmc_64bit) { 2788 switch (reg_lo) { 2789 /* These registers are always 32 bit */ 2790 case MMC_RXRUNTERROR: 2791 case MMC_RXJABBERERROR: 2792 case MMC_RXUNDERSIZE_G: 2793 case MMC_RXOVERSIZE_G: 2794 case MMC_RXWATCHDOGERROR: 2795 read_hi = false; 2796 break; 2797 2798 default: 2799 read_hi = true; 2800 } 2801 } else { 2802 switch (reg_lo) { 2803 /* These registers are always 64 bit */ 2804 case MMC_TXOCTETCOUNT_GB_LO: 2805 case MMC_TXOCTETCOUNT_G_LO: 2806 case MMC_RXOCTETCOUNT_GB_LO: 2807 case MMC_RXOCTETCOUNT_G_LO: 2808 read_hi = true; 2809 break; 2810 2811 default: 2812 read_hi = false; 2813 } 2814 } 2815 2816 val = XGMAC_IOREAD(pdata, reg_lo); 2817 2818 if (read_hi) 2819 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); 2820 2821 return val; 2822 } 2823 2824 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 2825 { 2826 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2827 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); 2828 2829 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 2830 stats->txoctetcount_gb += 2831 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2832 2833 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 2834 stats->txframecount_gb += 2835 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2836 2837 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 2838 stats->txbroadcastframes_g += 2839 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2840 2841 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2842 stats->txmulticastframes_g += 2843 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2844 2845 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2846 stats->tx64octets_gb += 2847 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2848 2849 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2850 stats->tx65to127octets_gb += 2851 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2852 2853 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2854 stats->tx128to255octets_gb += 2855 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2856 2857 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2858 stats->tx256to511octets_gb += 2859 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2860 2861 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2862 stats->tx512to1023octets_gb += 2863 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2864 2865 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2866 stats->tx1024tomaxoctets_gb += 2867 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2868 2869 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2870 stats->txunicastframes_gb += 2871 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2872 2873 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2874 stats->txmulticastframes_gb += 2875 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2876 2877 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2878 stats->txbroadcastframes_g += 2879 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2880 2881 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 2882 stats->txunderflowerror += 2883 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2884 2885 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 2886 stats->txoctetcount_g += 2887 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2888 2889 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 2890 stats->txframecount_g += 2891 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2892 2893 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 2894 stats->txpauseframes += 2895 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2896 2897 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 2898 stats->txvlanframes_g += 2899 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2900 } 2901 2902 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 2903 { 2904 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2905 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); 2906 2907 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 2908 stats->rxframecount_gb += 2909 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2910 2911 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 2912 stats->rxoctetcount_gb += 2913 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2914 2915 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 2916 stats->rxoctetcount_g += 2917 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2918 2919 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 2920 stats->rxbroadcastframes_g += 2921 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2922 2923 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 2924 stats->rxmulticastframes_g += 2925 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2926 2927 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 2928 stats->rxcrcerror += 2929 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2930 2931 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 2932 stats->rxrunterror += 2933 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2934 2935 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 2936 stats->rxjabbererror += 2937 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2938 2939 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 2940 stats->rxundersize_g += 2941 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2942 2943 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 2944 stats->rxoversize_g += 2945 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2946 2947 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 2948 stats->rx64octets_gb += 2949 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2950 2951 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 2952 stats->rx65to127octets_gb += 2953 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2954 2955 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 2956 stats->rx128to255octets_gb += 2957 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2958 2959 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 2960 stats->rx256to511octets_gb += 2961 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2962 2963 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 2964 stats->rx512to1023octets_gb += 2965 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2966 2967 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 2968 stats->rx1024tomaxoctets_gb += 2969 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2970 2971 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 2972 stats->rxunicastframes_g += 2973 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2974 2975 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 2976 stats->rxlengtherror += 2977 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2978 2979 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 2980 stats->rxoutofrangetype += 2981 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2982 2983 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 2984 stats->rxpauseframes += 2985 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2986 2987 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 2988 stats->rxfifooverflow += 2989 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2990 2991 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 2992 stats->rxvlanframes_gb += 2993 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2994 2995 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 2996 stats->rxwatchdogerror += 2997 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2998 } 2999 3000 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 3001 { 3002 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 3003 3004 /* Freeze counters */ 3005 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 3006 3007 stats->txoctetcount_gb += 3008 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 3009 3010 stats->txframecount_gb += 3011 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 3012 3013 stats->txbroadcastframes_g += 3014 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 3015 3016 stats->txmulticastframes_g += 3017 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 3018 3019 stats->tx64octets_gb += 3020 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 3021 3022 stats->tx65to127octets_gb += 3023 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 3024 3025 stats->tx128to255octets_gb += 3026 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 3027 3028 stats->tx256to511octets_gb += 3029 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 3030 3031 stats->tx512to1023octets_gb += 3032 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 3033 3034 stats->tx1024tomaxoctets_gb += 3035 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 3036 3037 stats->txunicastframes_gb += 3038 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 3039 3040 stats->txmulticastframes_gb += 3041 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 3042 3043 stats->txbroadcastframes_g += 3044 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 3045 3046 stats->txunderflowerror += 3047 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 3048 3049 stats->txoctetcount_g += 3050 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 3051 3052 stats->txframecount_g += 3053 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 3054 3055 stats->txpauseframes += 3056 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 3057 3058 stats->txvlanframes_g += 3059 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 3060 3061 stats->rxframecount_gb += 3062 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 3063 3064 stats->rxoctetcount_gb += 3065 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 3066 3067 stats->rxoctetcount_g += 3068 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 3069 3070 stats->rxbroadcastframes_g += 3071 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 3072 3073 stats->rxmulticastframes_g += 3074 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 3075 3076 stats->rxcrcerror += 3077 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 3078 3079 stats->rxrunterror += 3080 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 3081 3082 stats->rxjabbererror += 3083 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 3084 3085 stats->rxundersize_g += 3086 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 3087 3088 stats->rxoversize_g += 3089 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 3090 3091 stats->rx64octets_gb += 3092 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 3093 3094 stats->rx65to127octets_gb += 3095 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 3096 3097 stats->rx128to255octets_gb += 3098 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 3099 3100 stats->rx256to511octets_gb += 3101 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 3102 3103 stats->rx512to1023octets_gb += 3104 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 3105 3106 stats->rx1024tomaxoctets_gb += 3107 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 3108 3109 stats->rxunicastframes_g += 3110 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 3111 3112 stats->rxlengtherror += 3113 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 3114 3115 stats->rxoutofrangetype += 3116 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 3117 3118 stats->rxpauseframes += 3119 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 3120 3121 stats->rxfifooverflow += 3122 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 3123 3124 stats->rxvlanframes_gb += 3125 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 3126 3127 stats->rxwatchdogerror += 3128 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 3129 3130 /* Un-freeze counters */ 3131 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 3132 } 3133 3134 static void xgbe_config_mmc(struct xgbe_prv_data *pdata) 3135 { 3136 /* Set counters to reset on read */ 3137 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 3138 3139 /* Reset the counters */ 3140 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 3141 } 3142 3143 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, 3144 unsigned int queue) 3145 { 3146 unsigned int tx_status; 3147 unsigned long tx_timeout; 3148 3149 /* The Tx engine cannot be stopped if it is actively processing 3150 * packets. Wait for the Tx queue to empty the Tx fifo. Don't 3151 * wait forever though... 3152 */ 3153 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3154 while (time_before(jiffies, tx_timeout)) { 3155 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 3156 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 3157 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 3158 break; 3159 3160 usleep_range(500, 1000); 3161 } 3162 3163 if (!time_before(jiffies, tx_timeout)) 3164 netdev_info(pdata->netdev, 3165 "timed out waiting for Tx queue %u to empty\n", 3166 queue); 3167 } 3168 3169 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, 3170 unsigned int queue) 3171 { 3172 unsigned int tx_dsr, tx_pos, tx_qidx; 3173 unsigned int tx_status; 3174 unsigned long tx_timeout; 3175 3176 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 3177 return xgbe_txq_prepare_tx_stop(pdata, queue); 3178 3179 /* Calculate the status register to read and the position within */ 3180 if (queue < DMA_DSRX_FIRST_QUEUE) { 3181 tx_dsr = DMA_DSR0; 3182 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 3183 } else { 3184 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 3185 3186 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 3187 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 3188 DMA_DSRX_TPS_START; 3189 } 3190 3191 /* The Tx engine cannot be stopped if it is actively processing 3192 * descriptors. Wait for the Tx engine to enter the stopped or 3193 * suspended state. Don't wait forever though... 3194 */ 3195 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3196 while (time_before(jiffies, tx_timeout)) { 3197 tx_status = XGMAC_IOREAD(pdata, tx_dsr); 3198 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 3199 if ((tx_status == DMA_TPS_STOPPED) || 3200 (tx_status == DMA_TPS_SUSPENDED)) 3201 break; 3202 3203 usleep_range(500, 1000); 3204 } 3205 3206 if (!time_before(jiffies, tx_timeout)) 3207 netdev_info(pdata->netdev, 3208 "timed out waiting for Tx DMA channel %u to stop\n", 3209 queue); 3210 } 3211 3212 static void xgbe_enable_tx(struct xgbe_prv_data *pdata) 3213 { 3214 unsigned int i; 3215 3216 /* Enable each Tx DMA channel */ 3217 for (i = 0; i < pdata->channel_count; i++) { 3218 if (!pdata->channel[i]->tx_ring) 3219 break; 3220 3221 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 3222 } 3223 3224 /* Enable each Tx queue */ 3225 for (i = 0; i < pdata->tx_q_count; i++) 3226 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 3227 MTL_Q_ENABLED); 3228 3229 /* Enable MAC Tx */ 3230 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 3231 } 3232 3233 static void xgbe_disable_tx(struct xgbe_prv_data *pdata) 3234 { 3235 unsigned int i; 3236 3237 /* Prepare for Tx DMA channel stop */ 3238 for (i = 0; i < pdata->tx_q_count; i++) 3239 xgbe_prepare_tx_stop(pdata, i); 3240 3241 /* Disable MAC Tx */ 3242 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 3243 3244 /* Disable each Tx queue */ 3245 for (i = 0; i < pdata->tx_q_count; i++) 3246 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 3247 3248 /* Disable each Tx DMA channel */ 3249 for (i = 0; i < pdata->channel_count; i++) { 3250 if (!pdata->channel[i]->tx_ring) 3251 break; 3252 3253 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 3254 } 3255 } 3256 3257 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, 3258 unsigned int queue) 3259 { 3260 unsigned int rx_status; 3261 unsigned long rx_timeout; 3262 3263 /* The Rx engine cannot be stopped if it is actively processing 3264 * packets. Wait for the Rx queue to empty the Rx fifo. Don't 3265 * wait forever though... 3266 */ 3267 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); 3268 while (time_before(jiffies, rx_timeout)) { 3269 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 3270 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 3271 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 3272 break; 3273 3274 usleep_range(500, 1000); 3275 } 3276 3277 if (!time_before(jiffies, rx_timeout)) 3278 netdev_info(pdata->netdev, 3279 "timed out waiting for Rx queue %u to empty\n", 3280 queue); 3281 } 3282 3283 static void xgbe_enable_rx(struct xgbe_prv_data *pdata) 3284 { 3285 unsigned int reg_val, i; 3286 3287 /* Enable each Rx DMA channel */ 3288 for (i = 0; i < pdata->channel_count; i++) { 3289 if (!pdata->channel[i]->rx_ring) 3290 break; 3291 3292 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 3293 } 3294 3295 /* Enable each Rx queue */ 3296 reg_val = 0; 3297 for (i = 0; i < pdata->rx_q_count; i++) 3298 reg_val |= (0x02 << (i << 1)); 3299 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 3300 3301 /* Enable MAC Rx */ 3302 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 3303 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 3304 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 3305 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 3306 } 3307 3308 static void xgbe_disable_rx(struct xgbe_prv_data *pdata) 3309 { 3310 unsigned int i; 3311 3312 /* Disable MAC Rx */ 3313 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 3314 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 3315 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 3316 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 3317 3318 /* Prepare for Rx DMA channel stop */ 3319 for (i = 0; i < pdata->rx_q_count; i++) 3320 xgbe_prepare_rx_stop(pdata, i); 3321 3322 /* Disable each Rx queue */ 3323 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 3324 3325 /* Disable each Rx DMA channel */ 3326 for (i = 0; i < pdata->channel_count; i++) { 3327 if (!pdata->channel[i]->rx_ring) 3328 break; 3329 3330 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 3331 } 3332 } 3333 3334 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) 3335 { 3336 unsigned int i; 3337 3338 /* Enable each Tx DMA channel */ 3339 for (i = 0; i < pdata->channel_count; i++) { 3340 if (!pdata->channel[i]->tx_ring) 3341 break; 3342 3343 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 3344 } 3345 3346 /* Enable MAC Tx */ 3347 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 3348 } 3349 3350 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) 3351 { 3352 unsigned int i; 3353 3354 /* Prepare for Tx DMA channel stop */ 3355 for (i = 0; i < pdata->tx_q_count; i++) 3356 xgbe_prepare_tx_stop(pdata, i); 3357 3358 /* Disable MAC Tx */ 3359 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 3360 3361 /* Disable each Tx DMA channel */ 3362 for (i = 0; i < pdata->channel_count; i++) { 3363 if (!pdata->channel[i]->tx_ring) 3364 break; 3365 3366 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 3367 } 3368 } 3369 3370 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) 3371 { 3372 unsigned int i; 3373 3374 /* Enable each Rx DMA channel */ 3375 for (i = 0; i < pdata->channel_count; i++) { 3376 if (!pdata->channel[i]->rx_ring) 3377 break; 3378 3379 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 3380 } 3381 } 3382 3383 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) 3384 { 3385 unsigned int i; 3386 3387 /* Disable each Rx DMA channel */ 3388 for (i = 0; i < pdata->channel_count; i++) { 3389 if (!pdata->channel[i]->rx_ring) 3390 break; 3391 3392 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 3393 } 3394 } 3395 3396 static int xgbe_init(struct xgbe_prv_data *pdata) 3397 { 3398 struct xgbe_desc_if *desc_if = &pdata->desc_if; 3399 int ret; 3400 3401 DBGPR("-->xgbe_init\n"); 3402 3403 /* Flush Tx queues */ 3404 ret = xgbe_flush_tx_queues(pdata); 3405 if (ret) { 3406 netdev_err(pdata->netdev, "error flushing TX queues\n"); 3407 return ret; 3408 } 3409 3410 /* 3411 * Initialize DMA related features 3412 */ 3413 xgbe_config_dma_bus(pdata); 3414 xgbe_config_dma_cache(pdata); 3415 xgbe_config_osp_mode(pdata); 3416 xgbe_config_pbl_val(pdata); 3417 xgbe_config_rx_coalesce(pdata); 3418 xgbe_config_tx_coalesce(pdata); 3419 xgbe_config_rx_buffer_size(pdata); 3420 xgbe_config_tso_mode(pdata); 3421 3422 if (pdata->netdev->features & NETIF_F_RXCSUM) { 3423 xgbe_config_sph_mode(pdata); 3424 xgbe_config_rss(pdata); 3425 } 3426 3427 desc_if->wrapper_tx_desc_init(pdata); 3428 desc_if->wrapper_rx_desc_init(pdata); 3429 xgbe_enable_dma_interrupts(pdata); 3430 3431 /* 3432 * Initialize MTL related features 3433 */ 3434 xgbe_config_mtl_mode(pdata); 3435 xgbe_config_queue_mapping(pdata); 3436 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 3437 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 3438 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); 3439 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); 3440 xgbe_config_tx_fifo_size(pdata); 3441 xgbe_config_rx_fifo_size(pdata); 3442 /*TODO: Error Packet and undersized good Packet forwarding enable 3443 (FEP and FUP) 3444 */ 3445 xgbe_config_dcb_tc(pdata); 3446 xgbe_enable_mtl_interrupts(pdata); 3447 3448 /* 3449 * Initialize MAC related features 3450 */ 3451 xgbe_config_mac_address(pdata); 3452 xgbe_config_rx_mode(pdata); 3453 xgbe_config_jumbo_enable(pdata); 3454 xgbe_config_flow_control(pdata); 3455 xgbe_config_mac_speed(pdata); 3456 xgbe_config_checksum_offload(pdata); 3457 xgbe_config_vlan_support(pdata); 3458 xgbe_config_mmc(pdata); 3459 xgbe_enable_mac_interrupts(pdata); 3460 3461 /* 3462 * Initialize ECC related features 3463 */ 3464 xgbe_enable_ecc_interrupts(pdata); 3465 3466 DBGPR("<--xgbe_init\n"); 3467 3468 return 0; 3469 } 3470 3471 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) 3472 { 3473 DBGPR("-->xgbe_init_function_ptrs\n"); 3474 3475 hw_if->tx_complete = xgbe_tx_complete; 3476 3477 hw_if->set_mac_address = xgbe_set_mac_address; 3478 hw_if->config_rx_mode = xgbe_config_rx_mode; 3479 3480 hw_if->enable_rx_csum = xgbe_enable_rx_csum; 3481 hw_if->disable_rx_csum = xgbe_disable_rx_csum; 3482 3483 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; 3484 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; 3485 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; 3486 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; 3487 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; 3488 3489 hw_if->read_mmd_regs = xgbe_read_mmd_regs; 3490 hw_if->write_mmd_regs = xgbe_write_mmd_regs; 3491 3492 hw_if->set_speed = xgbe_set_speed; 3493 3494 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; 3495 hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22; 3496 hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22; 3497 hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45; 3498 hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45; 3499 3500 hw_if->set_gpio = xgbe_set_gpio; 3501 hw_if->clr_gpio = xgbe_clr_gpio; 3502 3503 hw_if->enable_tx = xgbe_enable_tx; 3504 hw_if->disable_tx = xgbe_disable_tx; 3505 hw_if->enable_rx = xgbe_enable_rx; 3506 hw_if->disable_rx = xgbe_disable_rx; 3507 3508 hw_if->powerup_tx = xgbe_powerup_tx; 3509 hw_if->powerdown_tx = xgbe_powerdown_tx; 3510 hw_if->powerup_rx = xgbe_powerup_rx; 3511 hw_if->powerdown_rx = xgbe_powerdown_rx; 3512 3513 hw_if->dev_xmit = xgbe_dev_xmit; 3514 hw_if->dev_read = xgbe_dev_read; 3515 hw_if->enable_int = xgbe_enable_int; 3516 hw_if->disable_int = xgbe_disable_int; 3517 hw_if->init = xgbe_init; 3518 hw_if->exit = xgbe_exit; 3519 3520 /* Descriptor related Sequences have to be initialized here */ 3521 hw_if->tx_desc_init = xgbe_tx_desc_init; 3522 hw_if->rx_desc_init = xgbe_rx_desc_init; 3523 hw_if->tx_desc_reset = xgbe_tx_desc_reset; 3524 hw_if->rx_desc_reset = xgbe_rx_desc_reset; 3525 hw_if->is_last_desc = xgbe_is_last_desc; 3526 hw_if->is_context_desc = xgbe_is_context_desc; 3527 hw_if->tx_start_xmit = xgbe_tx_start_xmit; 3528 3529 /* For FLOW ctrl */ 3530 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; 3531 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; 3532 3533 /* For RX coalescing */ 3534 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; 3535 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; 3536 hw_if->usec_to_riwt = xgbe_usec_to_riwt; 3537 hw_if->riwt_to_usec = xgbe_riwt_to_usec; 3538 3539 /* For RX and TX threshold config */ 3540 hw_if->config_rx_threshold = xgbe_config_rx_threshold; 3541 hw_if->config_tx_threshold = xgbe_config_tx_threshold; 3542 3543 /* For RX and TX Store and Forward Mode config */ 3544 hw_if->config_rsf_mode = xgbe_config_rsf_mode; 3545 hw_if->config_tsf_mode = xgbe_config_tsf_mode; 3546 3547 /* For TX DMA Operating on Second Frame config */ 3548 hw_if->config_osp_mode = xgbe_config_osp_mode; 3549 3550 /* For MMC statistics support */ 3551 hw_if->tx_mmc_int = xgbe_tx_mmc_int; 3552 hw_if->rx_mmc_int = xgbe_rx_mmc_int; 3553 hw_if->read_mmc_stats = xgbe_read_mmc_stats; 3554 3555 /* For Data Center Bridging config */ 3556 hw_if->config_tc = xgbe_config_tc; 3557 hw_if->config_dcb_tc = xgbe_config_dcb_tc; 3558 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; 3559 3560 /* For Receive Side Scaling */ 3561 hw_if->enable_rss = xgbe_enable_rss; 3562 hw_if->disable_rss = xgbe_disable_rss; 3563 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; 3564 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; 3565 3566 /* For ECC */ 3567 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded; 3568 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec; 3569 3570 /* For VXLAN */ 3571 hw_if->enable_vxlan = xgbe_enable_vxlan; 3572 hw_if->disable_vxlan = xgbe_disable_vxlan; 3573 hw_if->set_vxlan_id = xgbe_set_vxlan_id; 3574 3575 /* For Split Header*/ 3576 hw_if->enable_sph = xgbe_config_sph_mode; 3577 hw_if->disable_sph = xgbe_disable_sph_mode; 3578 3579 DBGPR("<--xgbe_init_function_ptrs\n"); 3580 } 3581