1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. 5 * 6 * This file is available to you under your choice of the following two 7 * licenses: 8 * 9 * License 1: GPLv2 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Redistribution and use in source and binary forms, with or without 60 * modification, are permitted provided that the following conditions are met: 61 * * Redistributions of source code must retain the above copyright 62 * notice, this list of conditions and the following disclaimer. 63 * * Redistributions in binary form must reproduce the above copyright 64 * notice, this list of conditions and the following disclaimer in the 65 * documentation and/or other materials provided with the distribution. 66 * * Neither the name of Advanced Micro Devices, Inc. nor the 67 * names of its contributors may be used to endorse or promote products 68 * derived from this software without specific prior written permission. 69 * 70 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 71 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 72 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 73 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 74 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 75 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 76 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 77 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 78 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 79 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 80 * 81 * This file incorporates work covered by the following copyright and 82 * permission notice: 83 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 84 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 85 * Inc. unless otherwise expressly agreed to in writing between Synopsys 86 * and you. 87 * 88 * The Software IS NOT an item of Licensed Software or Licensed Product 89 * under any End User Software License Agreement or Agreement for Licensed 90 * Product with Synopsys or any supplement thereto. Permission is hereby 91 * granted, free of charge, to any person obtaining a copy of this software 92 * annotated with this license and the Software, to deal in the Software 93 * without restriction, including without limitation the rights to use, 94 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 95 * of the Software, and to permit persons to whom the Software is furnished 96 * to do so, subject to the following conditions: 97 * 98 * The above copyright notice and this permission notice shall be included 99 * in all copies or substantial portions of the Software. 100 * 101 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 102 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 103 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 104 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 105 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 106 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 107 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 108 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 109 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 110 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 111 * THE POSSIBILITY OF SUCH DAMAGE. 112 */ 113 114 #include <sys/cdefs.h> 115 __FBSDID("$FreeBSD$"); 116 117 #include "xgbe.h" 118 #include "xgbe-common.h" 119 120 #include <net/if_dl.h> 121 122 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) 123 { 124 return (if_getmtu(pdata->netdev) + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 125 } 126 127 static unsigned int 128 xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec) 129 { 130 unsigned long rate; 131 unsigned int ret; 132 133 rate = pdata->sysclk_rate; 134 135 /* 136 * Convert the input usec value to the watchdog timer value. Each 137 * watchdog timer value is equivalent to 256 clock cycles. 138 * Calculate the required value as: 139 * ( usec * ( system_clock_mhz / 10^6 ) / 256 140 */ 141 ret = (usec * (rate / 1000000)) / 256; 142 143 return (ret); 144 } 145 146 static unsigned int 147 xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, unsigned int riwt) 148 { 149 unsigned long rate; 150 unsigned int ret; 151 152 rate = pdata->sysclk_rate; 153 154 /* 155 * Convert the input watchdog timer value to the usec value. Each 156 * watchdog timer value is equivalent to 256 clock cycles. 157 * Calculate the required value as: 158 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) 159 */ 160 ret = (riwt * 256) / (rate / 1000000); 161 162 return (ret); 163 } 164 165 static int 166 xgbe_config_pbl_val(struct xgbe_prv_data *pdata) 167 { 168 unsigned int pblx8, pbl; 169 unsigned int i; 170 171 pblx8 = DMA_PBL_X8_DISABLE; 172 pbl = pdata->pbl; 173 174 if (pdata->pbl > 32) { 175 pblx8 = DMA_PBL_X8_ENABLE; 176 pbl >>= 3; 177 } 178 179 for (i = 0; i < pdata->channel_count; i++) { 180 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, 181 pblx8); 182 183 if (pdata->channel[i]->tx_ring) 184 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, 185 PBL, pbl); 186 187 if (pdata->channel[i]->rx_ring) 188 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, 189 PBL, pbl); 190 } 191 192 return (0); 193 } 194 195 static int 196 xgbe_config_osp_mode(struct xgbe_prv_data *pdata) 197 { 198 unsigned int i; 199 200 for (i = 0; i < pdata->channel_count; i++) { 201 if (!pdata->channel[i]->tx_ring) 202 break; 203 204 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, 205 pdata->tx_osp_mode); 206 } 207 208 return (0); 209 } 210 211 static int 212 xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 213 { 214 unsigned int i; 215 216 for (i = 0; i < pdata->rx_q_count; i++) 217 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 218 219 return (0); 220 } 221 222 static int 223 xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 224 { 225 unsigned int i; 226 227 for (i = 0; i < pdata->tx_q_count; i++) 228 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 229 230 return (0); 231 } 232 233 static int 234 xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, unsigned int val) 235 { 236 unsigned int i; 237 238 for (i = 0; i < pdata->rx_q_count; i++) 239 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 240 241 return (0); 242 } 243 244 static int 245 xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, unsigned int val) 246 { 247 unsigned int i; 248 249 for (i = 0; i < pdata->tx_q_count; i++) 250 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 251 252 return (0); 253 } 254 255 static int 256 xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) 257 { 258 unsigned int i; 259 260 for (i = 0; i < pdata->channel_count; i++) { 261 if (!pdata->channel[i]->rx_ring) 262 break; 263 264 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, 265 pdata->rx_riwt); 266 } 267 268 return (0); 269 } 270 271 static int 272 xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) 273 { 274 return (0); 275 } 276 277 static void 278 xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) 279 { 280 unsigned int i; 281 282 for (i = 0; i < pdata->channel_count; i++) { 283 if (!pdata->channel[i]->rx_ring) 284 break; 285 286 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, 287 pdata->rx_buf_size); 288 } 289 } 290 291 static void 292 xgbe_config_tso_mode(struct xgbe_prv_data *pdata) 293 { 294 unsigned int i; 295 296 for (i = 0; i < pdata->channel_count; i++) { 297 if (!pdata->channel[i]->tx_ring) 298 break; 299 300 axgbe_printf(0, "Enabling TSO in channel %d\n", i); 301 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1); 302 } 303 } 304 305 static void 306 xgbe_config_sph_mode(struct xgbe_prv_data *pdata) 307 { 308 unsigned int i; 309 310 for (i = 0; i < pdata->channel_count; i++) { 311 if (!pdata->channel[i]->rx_ring) 312 break; 313 314 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); 315 } 316 317 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); 318 } 319 320 static int 321 xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, 322 unsigned int index, unsigned int val) 323 { 324 unsigned int wait; 325 int ret = 0; 326 327 mtx_lock(&pdata->rss_mutex); 328 329 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { 330 ret = -EBUSY; 331 goto unlock; 332 } 333 334 XGMAC_IOWRITE(pdata, MAC_RSSDR, val); 335 336 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); 337 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); 338 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); 339 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); 340 341 wait = 1000; 342 while (wait--) { 343 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 344 goto unlock; 345 346 DELAY(1000); 347 } 348 349 ret = -EBUSY; 350 351 unlock: 352 mtx_unlock(&pdata->rss_mutex); 353 354 return (ret); 355 } 356 357 static int 358 xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) 359 { 360 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(uint32_t); 361 unsigned int *key = (unsigned int *)&pdata->rss_key; 362 int ret; 363 364 while (key_regs--) { 365 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, 366 key_regs, *key++); 367 if (ret) 368 return (ret); 369 } 370 371 return (0); 372 } 373 374 static int 375 xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) 376 { 377 unsigned int i; 378 int ret; 379 380 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { 381 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_LOOKUP_TABLE_TYPE, i, 382 pdata->rss_table[i]); 383 if (ret) 384 return (ret); 385 } 386 387 return (0); 388 } 389 390 static int 391 xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const uint8_t *key) 392 { 393 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); 394 395 return (xgbe_write_rss_hash_key(pdata)); 396 } 397 398 static int 399 xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, const uint32_t *table) 400 { 401 unsigned int i; 402 403 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) 404 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); 405 406 return (xgbe_write_rss_lookup_table(pdata)); 407 } 408 409 static int 410 xgbe_enable_rss(struct xgbe_prv_data *pdata) 411 { 412 int ret; 413 414 if (!pdata->hw_feat.rss) 415 return (-EOPNOTSUPP); 416 417 /* Program the hash key */ 418 ret = xgbe_write_rss_hash_key(pdata); 419 if (ret) 420 return (ret); 421 422 /* Program the lookup table */ 423 ret = xgbe_write_rss_lookup_table(pdata); 424 if (ret) 425 return (ret); 426 427 /* Set the RSS options */ 428 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 429 430 /* Enable RSS */ 431 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); 432 433 axgbe_printf(0, "RSS Enabled\n"); 434 435 return (0); 436 } 437 438 static int 439 xgbe_disable_rss(struct xgbe_prv_data *pdata) 440 { 441 if (!pdata->hw_feat.rss) 442 return (-EOPNOTSUPP); 443 444 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); 445 446 axgbe_printf(0, "RSS Disabled\n"); 447 448 return (0); 449 } 450 451 static void 452 xgbe_config_rss(struct xgbe_prv_data *pdata) 453 { 454 int ret; 455 456 if (!pdata->hw_feat.rss) 457 return; 458 459 /* Check if the interface has RSS capability */ 460 if (pdata->enable_rss) 461 ret = xgbe_enable_rss(pdata); 462 else 463 ret = xgbe_disable_rss(pdata); 464 465 if (ret) 466 axgbe_error("error configuring RSS, RSS disabled\n"); 467 } 468 469 static int 470 xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 471 { 472 unsigned int max_q_count, q_count; 473 unsigned int reg, reg_val; 474 unsigned int i; 475 476 /* Clear MTL flow control */ 477 for (i = 0; i < pdata->rx_q_count; i++) 478 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 479 480 /* Clear MAC flow control */ 481 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 482 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 483 reg = MAC_Q0TFCR; 484 for (i = 0; i < q_count; i++) { 485 reg_val = XGMAC_IOREAD(pdata, reg); 486 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 487 XGMAC_IOWRITE(pdata, reg, reg_val); 488 489 reg += MAC_QTFCR_INC; 490 } 491 492 return (0); 493 } 494 495 static int 496 xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) 497 { 498 unsigned int max_q_count, q_count; 499 unsigned int reg, reg_val; 500 unsigned int i; 501 502 /* Set MTL flow control */ 503 for (i = 0; i < pdata->rx_q_count; i++) { 504 unsigned int ehfc = 0; 505 506 if (pdata->rx_rfd[i]) { 507 /* Flow control thresholds are established */ 508 /* TODO - enable pfc/ets support */ 509 ehfc = 1; 510 } 511 512 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); 513 514 axgbe_printf(1, "flow control %s for RXq%u\n", 515 ehfc ? "enabled" : "disabled", i); 516 } 517 518 /* Set MAC flow control */ 519 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 520 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 521 reg = MAC_Q0TFCR; 522 for (i = 0; i < q_count; i++) { 523 reg_val = XGMAC_IOREAD(pdata, reg); 524 525 /* Enable transmit flow control */ 526 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 527 528 /* Set pause time */ 529 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 530 531 XGMAC_IOWRITE(pdata, reg, reg_val); 532 533 reg += MAC_QTFCR_INC; 534 } 535 536 return (0); 537 } 538 539 static int 540 xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) 541 { 542 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 543 544 return (0); 545 } 546 547 static int 548 xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) 549 { 550 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 551 552 return (0); 553 } 554 555 static int 556 xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) 557 { 558 if (pdata->tx_pause) 559 xgbe_enable_tx_flow_control(pdata); 560 else 561 xgbe_disable_tx_flow_control(pdata); 562 563 return (0); 564 } 565 566 static int 567 xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) 568 { 569 if (pdata->rx_pause) 570 xgbe_enable_rx_flow_control(pdata); 571 else 572 xgbe_disable_rx_flow_control(pdata); 573 574 return (0); 575 } 576 577 static void 578 xgbe_config_flow_control(struct xgbe_prv_data *pdata) 579 { 580 xgbe_config_tx_flow_control(pdata); 581 xgbe_config_rx_flow_control(pdata); 582 583 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 584 } 585 586 static void 587 xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) 588 { 589 struct xgbe_channel *channel; 590 unsigned int i, ver; 591 592 /* Set the interrupt mode if supported */ 593 if (pdata->channel_irq_mode) 594 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, 595 pdata->channel_irq_mode); 596 597 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); 598 599 for (i = 0; i < pdata->channel_count; i++) { 600 channel = pdata->channel[i]; 601 602 /* Clear all the interrupts which are set */ 603 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, 604 XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); 605 606 /* Clear all interrupt enable bits */ 607 channel->curr_ier = 0; 608 609 /* Enable following interrupts 610 * NIE - Normal Interrupt Summary Enable 611 * AIE - Abnormal Interrupt Summary Enable 612 * FBEE - Fatal Bus Error Enable 613 */ 614 if (ver < 0x21) { 615 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); 616 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); 617 } else { 618 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); 619 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); 620 } 621 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); 622 623 if (channel->tx_ring) { 624 /* Enable the following Tx interrupts 625 * TIE - Transmit Interrupt Enable (unless using 626 * per channel interrupts in edge triggered 627 * mode) 628 */ 629 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 630 XGMAC_SET_BITS(channel->curr_ier, 631 DMA_CH_IER, TIE, 1); 632 } 633 if (channel->rx_ring) { 634 /* Enable following Rx interrupts 635 * RBUE - Receive Buffer Unavailable Enable 636 * RIE - Receive Interrupt Enable (unless using 637 * per channel interrupts in edge triggered 638 * mode) 639 */ 640 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); 641 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 642 XGMAC_SET_BITS(channel->curr_ier, 643 DMA_CH_IER, RIE, 1); 644 } 645 646 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 647 } 648 } 649 650 static void 651 xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) 652 { 653 unsigned int mtl_q_isr; 654 unsigned int q_count, i; 655 656 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 657 for (i = 0; i < q_count; i++) { 658 /* Clear all the interrupts which are set */ 659 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 660 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 661 662 /* No MTL interrupts to be enabled */ 663 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); 664 } 665 } 666 667 static void 668 xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) 669 { 670 unsigned int mac_ier = 0; 671 672 /* Enable Timestamp interrupt */ 673 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); 674 675 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 676 677 /* Enable all counter interrupts */ 678 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); 679 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); 680 681 /* Enable MDIO single command completion interrupt */ 682 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); 683 } 684 685 static int 686 xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) 687 { 688 unsigned int ss; 689 690 switch (speed) { 691 case SPEED_1000: 692 ss = 0x03; 693 break; 694 case SPEED_2500: 695 ss = 0x02; 696 break; 697 case SPEED_10000: 698 ss = 0x00; 699 break; 700 default: 701 return (-EINVAL); 702 } 703 704 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) 705 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); 706 707 return (0); 708 } 709 710 static int 711 xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 712 { 713 /* Put the VLAN tag in the Rx descriptor */ 714 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 715 716 /* Don't check the VLAN type */ 717 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 718 719 /* Check only C-TAG (0x8100) packets */ 720 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 721 722 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 723 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 724 725 /* Enable VLAN tag stripping */ 726 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 727 728 axgbe_printf(0, "VLAN Stripping Enabled\n"); 729 730 return (0); 731 } 732 733 static int 734 xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 735 { 736 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 737 738 axgbe_printf(0, "VLAN Stripping Disabled\n"); 739 740 return (0); 741 } 742 743 static int 744 xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 745 { 746 /* Enable VLAN filtering */ 747 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 748 749 /* Enable VLAN Hash Table filtering */ 750 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); 751 752 /* Disable VLAN tag inverse matching */ 753 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); 754 755 /* Only filter on the lower 12-bits of the VLAN tag */ 756 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); 757 758 /* In order for the VLAN Hash Table filtering to be effective, 759 * the VLAN tag identifier in the VLAN Tag Register must not 760 * be zero. Set the VLAN tag identifier to "1" to enable the 761 * VLAN Hash Table filtering. This implies that a VLAN tag of 762 * 1 will always pass filtering. 763 */ 764 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); 765 766 axgbe_printf(0, "VLAN filtering Enabled\n"); 767 768 return (0); 769 } 770 771 static int 772 xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 773 { 774 /* Disable VLAN filtering */ 775 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 776 777 axgbe_printf(0, "VLAN filtering Disabled\n"); 778 779 return (0); 780 } 781 782 static uint32_t 783 xgbe_vid_crc32_le(__le16 vid_le) 784 { 785 uint32_t crc = ~0; 786 uint32_t temp = 0; 787 unsigned char *data = (unsigned char *)&vid_le; 788 unsigned char data_byte = 0; 789 int i, bits; 790 791 bits = get_bitmask_order(VLAN_VID_MASK); 792 for (i = 0; i < bits; i++) { 793 if ((i % 8) == 0) 794 data_byte = data[i / 8]; 795 796 temp = ((crc & 1) ^ data_byte) & 1; 797 crc >>= 1; 798 data_byte >>= 1; 799 800 if (temp) 801 crc ^= CRC32_POLY_LE; 802 } 803 804 return (crc); 805 } 806 807 static int 808 xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) 809 { 810 uint32_t crc; 811 uint16_t vid; 812 uint16_t vlan_hash_table = 0; 813 __le16 vid_le = 0; 814 815 axgbe_printf(1, "%s: Before updating VLANHTR 0x%x\n", __func__, 816 XGMAC_IOREAD(pdata, MAC_VLANHTR)); 817 818 /* Generate the VLAN Hash Table value */ 819 for_each_set_bit(vid, pdata->active_vlans, VLAN_NVID) { 820 821 /* Get the CRC32 value of the VLAN ID */ 822 vid_le = cpu_to_le16(vid); 823 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; 824 825 vlan_hash_table |= (1 << crc); 826 axgbe_printf(1, "%s: vid 0x%x vid_le 0x%x crc 0x%x " 827 "vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc, 828 vlan_hash_table); 829 } 830 831 /* Set the VLAN Hash Table filtering register */ 832 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); 833 834 axgbe_printf(1, "%s: After updating VLANHTR 0x%x\n", __func__, 835 XGMAC_IOREAD(pdata, MAC_VLANHTR)); 836 837 return (0); 838 } 839 840 static int 841 xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable) 842 { 843 unsigned int val = enable ? 1 : 0; 844 845 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) 846 return (0); 847 848 axgbe_printf(1, "%s promiscous mode\n", enable? "entering" : "leaving"); 849 850 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); 851 852 /* Hardware will still perform VLAN filtering in promiscuous mode */ 853 if (enable) { 854 axgbe_printf(1, "Disabling rx vlan filtering\n"); 855 xgbe_disable_rx_vlan_filtering(pdata); 856 } else { 857 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { 858 axgbe_printf(1, "Enabling rx vlan filtering\n"); 859 xgbe_enable_rx_vlan_filtering(pdata); 860 } 861 } 862 863 return (0); 864 } 865 866 static int 867 xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, unsigned int enable) 868 { 869 unsigned int val = enable ? 1 : 0; 870 871 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) 872 return (0); 873 874 axgbe_printf(1,"%s allmulti mode\n", enable ? "entering" : "leaving"); 875 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); 876 877 return (0); 878 } 879 880 static void 881 xgbe_set_mac_reg(struct xgbe_prv_data *pdata, char *addr, unsigned int *mac_reg) 882 { 883 unsigned int mac_addr_hi, mac_addr_lo; 884 uint8_t *mac_addr; 885 886 mac_addr_lo = 0; 887 mac_addr_hi = 0; 888 889 if (addr) { 890 mac_addr = (uint8_t *)&mac_addr_lo; 891 mac_addr[0] = addr[0]; 892 mac_addr[1] = addr[1]; 893 mac_addr[2] = addr[2]; 894 mac_addr[3] = addr[3]; 895 mac_addr = (uint8_t *)&mac_addr_hi; 896 mac_addr[0] = addr[4]; 897 mac_addr[1] = addr[5]; 898 899 axgbe_printf(1, "adding mac address %pM at %#x\n", addr, *mac_reg); 900 901 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 902 } 903 904 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); 905 *mac_reg += MAC_MACA_INC; 906 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); 907 *mac_reg += MAC_MACA_INC; 908 } 909 910 static void 911 xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) 912 { 913 unsigned int mac_reg; 914 unsigned int addn_macs; 915 916 mac_reg = MAC_MACA1HR; 917 addn_macs = pdata->hw_feat.addn_mac; 918 919 xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg); 920 addn_macs--; 921 922 /* Clear remaining additional MAC address entries */ 923 while (addn_macs--) 924 xgbe_set_mac_reg(pdata, NULL, &mac_reg); 925 } 926 927 static int 928 xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) 929 { 930 /* TODO - add support to set mac hash table */ 931 xgbe_set_mac_addn_addrs(pdata); 932 933 return (0); 934 } 935 936 static int 937 xgbe_set_mac_address(struct xgbe_prv_data *pdata, uint8_t *addr) 938 { 939 unsigned int mac_addr_hi, mac_addr_lo; 940 941 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 942 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 943 (addr[1] << 8) | (addr[0] << 0); 944 945 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 946 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 947 948 return (0); 949 } 950 951 static int 952 xgbe_config_rx_mode(struct xgbe_prv_data *pdata) 953 { 954 unsigned int pr_mode, am_mode; 955 956 pr_mode = ((pdata->netdev->if_drv_flags & IFF_PPROMISC) != 0); 957 am_mode = ((pdata->netdev->if_drv_flags & IFF_ALLMULTI) != 0); 958 959 xgbe_set_promiscuous_mode(pdata, pr_mode); 960 xgbe_set_all_multicast_mode(pdata, am_mode); 961 962 xgbe_add_mac_addresses(pdata); 963 964 return (0); 965 } 966 967 static int 968 xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 969 { 970 unsigned int reg; 971 972 if (gpio > 15) 973 return (-EINVAL); 974 975 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 976 977 reg &= ~(1 << (gpio + 16)); 978 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 979 980 return (0); 981 } 982 983 static int 984 xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 985 { 986 unsigned int reg; 987 988 if (gpio > 15) 989 return (-EINVAL); 990 991 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 992 993 reg |= (1 << (gpio + 16)); 994 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 995 996 return (0); 997 } 998 999 static int 1000 xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) 1001 { 1002 unsigned long flags; 1003 unsigned int mmd_address, index, offset; 1004 int mmd_data; 1005 1006 if (mmd_reg & MII_ADDR_C45) 1007 mmd_address = mmd_reg & ~MII_ADDR_C45; 1008 else 1009 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1010 1011 /* The PCS registers are accessed using mmio. The underlying 1012 * management interface uses indirect addressing to access the MMD 1013 * register sets. This requires accessing of the PCS register in two 1014 * phases, an address phase and a data phase. 1015 * 1016 * The mmio interface is based on 16-bit offsets and values. All 1017 * register offsets must therefore be adjusted by left shifting the 1018 * offset 1 bit and reading 16 bits of data. 1019 */ 1020 mmd_address <<= 1; 1021 index = mmd_address & ~pdata->xpcs_window_mask; 1022 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1023 1024 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1025 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1026 mmd_data = XPCS16_IOREAD(pdata, offset); 1027 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1028 1029 return (mmd_data); 1030 } 1031 1032 static void 1033 xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, 1034 int mmd_data) 1035 { 1036 unsigned long flags; 1037 unsigned int mmd_address, index, offset; 1038 1039 if (mmd_reg & MII_ADDR_C45) 1040 mmd_address = mmd_reg & ~MII_ADDR_C45; 1041 else 1042 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1043 1044 /* The PCS registers are accessed using mmio. The underlying 1045 * management interface uses indirect addressing to access the MMD 1046 * register sets. This requires accessing of the PCS register in two 1047 * phases, an address phase and a data phase. 1048 * 1049 * The mmio interface is based on 16-bit offsets and values. All 1050 * register offsets must therefore be adjusted by left shifting the 1051 * offset 1 bit and writing 16 bits of data. 1052 */ 1053 mmd_address <<= 1; 1054 index = mmd_address & ~pdata->xpcs_window_mask; 1055 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1056 1057 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1058 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1059 XPCS16_IOWRITE(pdata, offset, mmd_data); 1060 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1061 } 1062 1063 static int 1064 xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) 1065 { 1066 unsigned long flags; 1067 unsigned int mmd_address; 1068 int mmd_data; 1069 1070 if (mmd_reg & MII_ADDR_C45) 1071 mmd_address = mmd_reg & ~MII_ADDR_C45; 1072 else 1073 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1074 1075 /* The PCS registers are accessed using mmio. The underlying APB3 1076 * management interface uses indirect addressing to access the MMD 1077 * register sets. This requires accessing of the PCS register in two 1078 * phases, an address phase and a data phase. 1079 * 1080 * The mmio interface is based on 32-bit offsets and values. All 1081 * register offsets must therefore be adjusted by left shifting the 1082 * offset 2 bits and reading 32 bits of data. 1083 */ 1084 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1085 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1086 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); 1087 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1088 1089 return (mmd_data); 1090 } 1091 1092 static void 1093 xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, 1094 int mmd_data) 1095 { 1096 unsigned int mmd_address; 1097 unsigned long flags; 1098 1099 if (mmd_reg & MII_ADDR_C45) 1100 mmd_address = mmd_reg & ~MII_ADDR_C45; 1101 else 1102 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1103 1104 /* The PCS registers are accessed using mmio. The underlying APB3 1105 * management interface uses indirect addressing to access the MMD 1106 * register sets. This requires accessing of the PCS register in two 1107 * phases, an address phase and a data phase. 1108 * 1109 * The mmio interface is based on 32-bit offsets and values. All 1110 * register offsets must therefore be adjusted by left shifting the 1111 * offset 2 bits and writing 32 bits of data. 1112 */ 1113 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1114 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1115 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); 1116 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1117 } 1118 1119 static int 1120 xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) 1121 { 1122 switch (pdata->vdata->xpcs_access) { 1123 case XGBE_XPCS_ACCESS_V1: 1124 return (xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg)); 1125 1126 case XGBE_XPCS_ACCESS_V2: 1127 default: 1128 return (xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg)); 1129 } 1130 } 1131 1132 static void 1133 xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, 1134 int mmd_data) 1135 { 1136 switch (pdata->vdata->xpcs_access) { 1137 case XGBE_XPCS_ACCESS_V1: 1138 return (xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data)); 1139 1140 case XGBE_XPCS_ACCESS_V2: 1141 default: 1142 return (xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data)); 1143 } 1144 } 1145 1146 static unsigned int 1147 xgbe_create_mdio_sca(int port, int reg) 1148 { 1149 unsigned int mdio_sca, da; 1150 1151 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; 1152 1153 mdio_sca = 0; 1154 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); 1155 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); 1156 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); 1157 1158 return (mdio_sca); 1159 } 1160 1161 static int 1162 xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg, 1163 uint16_t val) 1164 { 1165 unsigned int mdio_sca, mdio_sccd; 1166 1167 mtx_lock_spin(&pdata->mdio_mutex); 1168 1169 mdio_sca = xgbe_create_mdio_sca(addr, reg); 1170 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1171 1172 mdio_sccd = 0; 1173 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); 1174 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); 1175 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1176 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1177 1178 if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == 1179 EWOULDBLOCK) { 1180 axgbe_error("%s: MDIO write error\n", __func__); 1181 mtx_unlock_spin(&pdata->mdio_mutex); 1182 return (-ETIMEDOUT); 1183 } 1184 1185 mtx_unlock_spin(&pdata->mdio_mutex); 1186 return (0); 1187 } 1188 1189 static int 1190 xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg) 1191 { 1192 unsigned int mdio_sca, mdio_sccd; 1193 1194 mtx_lock_spin(&pdata->mdio_mutex); 1195 1196 mdio_sca = xgbe_create_mdio_sca(addr, reg); 1197 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1198 1199 mdio_sccd = 0; 1200 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); 1201 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1202 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1203 1204 if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == 1205 EWOULDBLOCK) { 1206 axgbe_error("%s: MDIO read error\n", __func__); 1207 mtx_unlock_spin(&pdata->mdio_mutex); 1208 return (-ETIMEDOUT); 1209 } 1210 1211 mtx_unlock_spin(&pdata->mdio_mutex); 1212 1213 return (XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA)); 1214 } 1215 1216 static int 1217 xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, 1218 enum xgbe_mdio_mode mode) 1219 { 1220 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); 1221 1222 switch (mode) { 1223 case XGBE_MDIO_MODE_CL22: 1224 if (port > XGMAC_MAX_C22_PORT) 1225 return (-EINVAL); 1226 reg_val |= (1 << port); 1227 break; 1228 case XGBE_MDIO_MODE_CL45: 1229 break; 1230 default: 1231 return (-EINVAL); 1232 } 1233 1234 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); 1235 1236 return (0); 1237 } 1238 1239 static int 1240 xgbe_tx_complete(struct xgbe_ring_desc *rdesc) 1241 { 1242 return (!XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN)); 1243 } 1244 1245 static int 1246 xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) 1247 { 1248 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 1249 1250 axgbe_printf(0, "Receive checksum offload Disabled\n"); 1251 return (0); 1252 } 1253 1254 static int 1255 xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) 1256 { 1257 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 1258 1259 axgbe_printf(0, "Receive checksum offload Enabled\n"); 1260 return (0); 1261 } 1262 1263 static void 1264 xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) 1265 { 1266 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1267 1268 /* Reset the Tx descriptor 1269 * Set buffer 1 (lo) address to zero 1270 * Set buffer 1 (hi) address to zero 1271 * Reset all other control bits (IC, TTSE, B2L & B1L) 1272 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) 1273 */ 1274 rdesc->desc0 = 0; 1275 rdesc->desc1 = 0; 1276 rdesc->desc2 = 0; 1277 rdesc->desc3 = 0; 1278 1279 wmb(); 1280 } 1281 1282 static void 1283 xgbe_tx_desc_init(struct xgbe_channel *channel) 1284 { 1285 struct xgbe_ring *ring = channel->tx_ring; 1286 struct xgbe_ring_data *rdata; 1287 int i; 1288 int start_index = ring->cur; 1289 1290 /* Initialze all descriptors */ 1291 for (i = 0; i < ring->rdesc_count; i++) { 1292 rdata = XGBE_GET_DESC_DATA(ring, i); 1293 1294 /* Initialize Tx descriptor */ 1295 xgbe_tx_desc_reset(rdata); 1296 } 1297 1298 /* Update the total number of Tx descriptors */ 1299 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 1300 1301 /* Update the starting address of descriptor ring */ 1302 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1303 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, 1304 upper_32_bits(rdata->rdata_paddr)); 1305 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, 1306 lower_32_bits(rdata->rdata_paddr)); 1307 } 1308 1309 static void 1310 xgbe_rx_desc_init(struct xgbe_channel *channel) 1311 { 1312 struct xgbe_ring *ring = channel->rx_ring; 1313 struct xgbe_ring_data *rdata; 1314 unsigned int start_index = ring->cur; 1315 1316 /* 1317 * Just set desc_count and the starting address of the desc list 1318 * here. Rest will be done as part of the txrx path. 1319 */ 1320 1321 /* Update the total number of Rx descriptors */ 1322 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 1323 1324 /* Update the starting address of descriptor ring */ 1325 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1326 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, 1327 upper_32_bits(rdata->rdata_paddr)); 1328 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, 1329 lower_32_bits(rdata->rdata_paddr)); 1330 } 1331 1332 static int 1333 xgbe_dev_read(struct xgbe_channel *channel) 1334 { 1335 struct xgbe_prv_data *pdata = channel->pdata; 1336 struct xgbe_ring *ring = channel->rx_ring; 1337 struct xgbe_ring_data *rdata; 1338 struct xgbe_ring_desc *rdesc; 1339 struct xgbe_packet_data *packet = &ring->packet_data; 1340 unsigned int err, etlt, l34t = 0; 1341 1342 axgbe_printf(1, "-->xgbe_dev_read: cur = %d\n", ring->cur); 1343 1344 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1345 rdesc = rdata->rdesc; 1346 1347 /* Check for data availability */ 1348 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 1349 return (1); 1350 1351 rmb(); 1352 1353 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { 1354 /* TODO - Timestamp Context Descriptor */ 1355 1356 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1357 CONTEXT, 1); 1358 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1359 CONTEXT_NEXT, 0); 1360 return (0); 1361 } 1362 1363 /* Normal Descriptor, be sure Context Descriptor bit is off */ 1364 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); 1365 1366 /* Indicate if a Context Descriptor is next */ 1367 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 1368 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1369 CONTEXT_NEXT, 1); 1370 1371 /* Get the header length */ 1372 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1373 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1374 FIRST, 1); 1375 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1376 RX_NORMAL_DESC2, HL); 1377 if (rdata->rx.hdr_len) 1378 pdata->ext_stats.rx_split_header_packets++; 1379 } else 1380 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1381 FIRST, 0); 1382 1383 /* Get the RSS hash */ 1384 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { 1385 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1386 RSS_HASH, 1); 1387 1388 packet->rss_hash = le32_to_cpu(rdesc->desc1); 1389 1390 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 1391 switch (l34t) { 1392 case RX_DESC3_L34T_IPV4_TCP: 1393 packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV4; 1394 break; 1395 case RX_DESC3_L34T_IPV4_UDP: 1396 packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV4; 1397 break; 1398 case RX_DESC3_L34T_IPV6_TCP: 1399 packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV6; 1400 break; 1401 case RX_DESC3_L34T_IPV6_UDP: 1402 packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV6; 1403 break; 1404 default: 1405 packet->rss_hash_type = M_HASHTYPE_OPAQUE; 1406 break; 1407 } 1408 } 1409 1410 /* Not all the data has been transferred for this packet */ 1411 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { 1412 /* This is not the last of the data for this packet */ 1413 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1414 LAST, 0); 1415 return (0); 1416 } 1417 1418 /* This is the last of the data for this packet */ 1419 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1420 LAST, 1); 1421 1422 /* Get the packet length */ 1423 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1424 1425 /* Set checksum done indicator as appropriate */ 1426 /* TODO - add tunneling support */ 1427 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1428 CSUM_DONE, 1); 1429 1430 /* Check for errors (only valid in last descriptor) */ 1431 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); 1432 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); 1433 axgbe_printf(1, "%s: err=%u, etlt=%#x\n", __func__, err, etlt); 1434 1435 if (!err || !etlt) { 1436 /* No error if err is 0 or etlt is 0 */ 1437 if (etlt == 0x09) { 1438 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1439 VLAN_CTAG, 1); 1440 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, 1441 RX_NORMAL_DESC0, OVT); 1442 axgbe_printf(1, "vlan-ctag=%#06x\n", packet->vlan_ctag); 1443 } 1444 } else { 1445 unsigned int tnp = XGMAC_GET_BITS(packet->attributes, 1446 RX_PACKET_ATTRIBUTES, TNP); 1447 1448 if ((etlt == 0x05) || (etlt == 0x06)) { 1449 axgbe_printf(1, "%s: err1 l34t %d err 0x%x etlt 0x%x\n", 1450 __func__, l34t, err, etlt); 1451 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1452 CSUM_DONE, 0); 1453 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1454 TNPCSUM_DONE, 0); 1455 pdata->ext_stats.rx_csum_errors++; 1456 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { 1457 axgbe_printf(1, "%s: err2 l34t %d err 0x%x etlt 0x%x\n", 1458 __func__, l34t, err, etlt); 1459 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1460 CSUM_DONE, 0); 1461 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1462 TNPCSUM_DONE, 0); 1463 pdata->ext_stats.rx_vxlan_csum_errors++; 1464 } else { 1465 axgbe_printf(1, "%s: tnp %d l34t %d err 0x%x etlt 0x%x\n", 1466 __func__, tnp, l34t, err, etlt); 1467 axgbe_printf(1, "%s: Channel: %d SR 0x%x DSR 0x%x \n", 1468 __func__, channel->queue_index, 1469 XGMAC_DMA_IOREAD(channel, DMA_CH_SR), 1470 XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); 1471 axgbe_printf(1, "%s: ring cur %d dirty %d\n", 1472 __func__, ring->cur, ring->dirty); 1473 axgbe_printf(1, "%s: Desc 0x%08x-0x%08x-0x%08x-0x%08x\n", 1474 __func__, rdesc->desc0, rdesc->desc1, rdesc->desc2, 1475 rdesc->desc3); 1476 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, 1477 FRAME, 1); 1478 } 1479 } 1480 1481 axgbe_printf(1, "<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", 1482 channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur); 1483 1484 return (0); 1485 } 1486 1487 static int 1488 xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) 1489 { 1490 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ 1491 return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT)); 1492 } 1493 1494 static int 1495 xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) 1496 { 1497 /* Rx and Tx share LD bit, so check TDES3.LD bit */ 1498 return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD)); 1499 } 1500 1501 static int 1502 xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id) 1503 { 1504 struct xgbe_prv_data *pdata = channel->pdata; 1505 1506 axgbe_printf(1, "enable_int: DMA_CH_IER read - 0x%x\n", 1507 channel->curr_ier); 1508 1509 switch (int_id) { 1510 case XGMAC_INT_DMA_CH_SR_TI: 1511 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); 1512 break; 1513 case XGMAC_INT_DMA_CH_SR_TPS: 1514 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); 1515 break; 1516 case XGMAC_INT_DMA_CH_SR_TBU: 1517 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); 1518 break; 1519 case XGMAC_INT_DMA_CH_SR_RI: 1520 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); 1521 break; 1522 case XGMAC_INT_DMA_CH_SR_RBU: 1523 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); 1524 break; 1525 case XGMAC_INT_DMA_CH_SR_RPS: 1526 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); 1527 break; 1528 case XGMAC_INT_DMA_CH_SR_TI_RI: 1529 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); 1530 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); 1531 break; 1532 case XGMAC_INT_DMA_CH_SR_FBE: 1533 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); 1534 break; 1535 case XGMAC_INT_DMA_ALL: 1536 channel->curr_ier |= channel->saved_ier; 1537 break; 1538 default: 1539 return (-1); 1540 } 1541 1542 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 1543 1544 axgbe_printf(1, "enable_int: DMA_CH_IER write - 0x%x\n", 1545 channel->curr_ier); 1546 1547 return (0); 1548 } 1549 1550 static int 1551 xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id) 1552 { 1553 struct xgbe_prv_data *pdata = channel->pdata; 1554 1555 axgbe_printf(1, "disable_int: DMA_CH_IER read - 0x%x\n", 1556 channel->curr_ier); 1557 1558 switch (int_id) { 1559 case XGMAC_INT_DMA_CH_SR_TI: 1560 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); 1561 break; 1562 case XGMAC_INT_DMA_CH_SR_TPS: 1563 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); 1564 break; 1565 case XGMAC_INT_DMA_CH_SR_TBU: 1566 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); 1567 break; 1568 case XGMAC_INT_DMA_CH_SR_RI: 1569 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); 1570 break; 1571 case XGMAC_INT_DMA_CH_SR_RBU: 1572 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); 1573 break; 1574 case XGMAC_INT_DMA_CH_SR_RPS: 1575 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); 1576 break; 1577 case XGMAC_INT_DMA_CH_SR_TI_RI: 1578 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); 1579 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); 1580 break; 1581 case XGMAC_INT_DMA_CH_SR_FBE: 1582 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); 1583 break; 1584 case XGMAC_INT_DMA_ALL: 1585 channel->saved_ier = channel->curr_ier; 1586 channel->curr_ier = 0; 1587 break; 1588 default: 1589 return (-1); 1590 } 1591 1592 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 1593 1594 axgbe_printf(1, "disable_int: DMA_CH_IER write - 0x%x\n", 1595 channel->curr_ier); 1596 1597 return (0); 1598 } 1599 1600 static int 1601 __xgbe_exit(struct xgbe_prv_data *pdata) 1602 { 1603 unsigned int count = 2000; 1604 1605 /* Issue a software reset */ 1606 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 1607 DELAY(10); 1608 1609 /* Poll Until Poll Condition */ 1610 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1611 DELAY(500); 1612 1613 if (!count) 1614 return (-EBUSY); 1615 1616 return (0); 1617 } 1618 1619 static int 1620 xgbe_exit(struct xgbe_prv_data *pdata) 1621 { 1622 int ret; 1623 1624 /* To guard against possible incorrectly generated interrupts, 1625 * issue the software reset twice. 1626 */ 1627 ret = __xgbe_exit(pdata); 1628 if (ret) { 1629 axgbe_error("%s: exit error %d\n", __func__, ret); 1630 return (ret); 1631 } 1632 1633 return (__xgbe_exit(pdata)); 1634 } 1635 1636 static int 1637 xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) 1638 { 1639 unsigned int i, count; 1640 1641 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) 1642 return (0); 1643 1644 for (i = 0; i < pdata->tx_q_count; i++) 1645 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 1646 1647 /* Poll Until Poll Condition */ 1648 for (i = 0; i < pdata->tx_q_count; i++) { 1649 count = 2000; 1650 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, 1651 MTL_Q_TQOMR, FTQ)) 1652 DELAY(500); 1653 1654 if (!count) 1655 return (-EBUSY); 1656 } 1657 1658 return (0); 1659 } 1660 1661 static void 1662 xgbe_config_dma_bus(struct xgbe_prv_data *pdata) 1663 { 1664 unsigned int sbmr; 1665 1666 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); 1667 1668 /* Set enhanced addressing mode */ 1669 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); 1670 1671 /* Set the System Bus mode */ 1672 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); 1673 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); 1674 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); 1675 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); 1676 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); 1677 1678 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); 1679 1680 /* Set descriptor fetching threshold */ 1681 if (pdata->vdata->tx_desc_prefetch) 1682 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, 1683 pdata->vdata->tx_desc_prefetch); 1684 1685 if (pdata->vdata->rx_desc_prefetch) 1686 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, 1687 pdata->vdata->rx_desc_prefetch); 1688 } 1689 1690 static void 1691 xgbe_config_dma_cache(struct xgbe_prv_data *pdata) 1692 { 1693 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); 1694 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); 1695 if (pdata->awarcr) 1696 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); 1697 } 1698 1699 static void 1700 xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) 1701 { 1702 unsigned int i; 1703 1704 /* Set Tx to weighted round robin scheduling algorithm */ 1705 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 1706 1707 /* Set Tx traffic classes to use WRR algorithm with equal weights */ 1708 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 1709 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 1710 MTL_TSA_ETS); 1711 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); 1712 } 1713 1714 /* Set Rx to strict priority algorithm */ 1715 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 1716 } 1717 1718 static void 1719 xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, 1720 unsigned int queue, unsigned int q_fifo_size) 1721 { 1722 unsigned int frame_fifo_size; 1723 unsigned int rfa, rfd; 1724 1725 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); 1726 axgbe_printf(1, "%s: queue %d q_fifo_size %d frame_fifo_size 0x%x\n", 1727 __func__, queue, q_fifo_size, frame_fifo_size); 1728 1729 /* TODO - add pfc/ets related support */ 1730 1731 /* This path deals with just maximum frame sizes which are 1732 * limited to a jumbo frame of 9,000 (plus headers, etc.) 1733 * so we can never exceed the maximum allowable RFA/RFD 1734 * values. 1735 */ 1736 if (q_fifo_size <= 2048) { 1737 /* rx_rfd to zero to signal no flow control */ 1738 pdata->rx_rfa[queue] = 0; 1739 pdata->rx_rfd[queue] = 0; 1740 return; 1741 } 1742 1743 if (q_fifo_size <= 4096) { 1744 /* Between 2048 and 4096 */ 1745 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ 1746 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ 1747 return; 1748 } 1749 1750 if (q_fifo_size <= frame_fifo_size) { 1751 /* Between 4096 and max-frame */ 1752 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ 1753 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ 1754 return; 1755 } 1756 1757 if (q_fifo_size <= (frame_fifo_size * 3)) { 1758 /* Between max-frame and 3 max-frames, 1759 * trigger if we get just over a frame of data and 1760 * resume when we have just under half a frame left. 1761 */ 1762 rfa = q_fifo_size - frame_fifo_size; 1763 rfd = rfa + (frame_fifo_size / 2); 1764 } else { 1765 /* Above 3 max-frames - trigger when just over 1766 * 2 frames of space available 1767 */ 1768 rfa = frame_fifo_size * 2; 1769 rfa += XGMAC_FLOW_CONTROL_UNIT; 1770 rfd = rfa + frame_fifo_size; 1771 } 1772 1773 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); 1774 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); 1775 axgbe_printf(1, "%s: forced queue %d rfa 0x%x rfd 0x%x\n", __func__, 1776 queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]); 1777 } 1778 1779 static void 1780 xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, 1781 unsigned int *fifo) 1782 { 1783 unsigned int q_fifo_size; 1784 unsigned int i; 1785 1786 for (i = 0; i < pdata->rx_q_count; i++) { 1787 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; 1788 1789 axgbe_printf(1, "%s: fifo[%d] - 0x%x q_fifo_size 0x%x\n", 1790 __func__, i, fifo[i], q_fifo_size); 1791 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); 1792 } 1793 } 1794 1795 static void 1796 xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) 1797 { 1798 unsigned int i; 1799 1800 for (i = 0; i < pdata->rx_q_count; i++) { 1801 axgbe_printf(1, "%s: queue %d rfa %d rfd %d\n", __func__, i, 1802 pdata->rx_rfa[i], pdata->rx_rfd[i]); 1803 1804 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 1805 pdata->rx_rfa[i]); 1806 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 1807 pdata->rx_rfd[i]); 1808 1809 axgbe_printf(1, "%s: MTL_Q_RQFCR 0x%x\n", __func__, 1810 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); 1811 } 1812 } 1813 1814 static unsigned int 1815 xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) 1816 { 1817 /* The configured value may not be the actual amount of fifo RAM */ 1818 return (min_t(unsigned int, pdata->tx_max_fifo_size, 1819 pdata->hw_feat.tx_fifo_size)); 1820 } 1821 1822 static unsigned int 1823 xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) 1824 { 1825 /* The configured value may not be the actual amount of fifo RAM */ 1826 return (min_t(unsigned int, pdata->rx_max_fifo_size, 1827 pdata->hw_feat.rx_fifo_size)); 1828 } 1829 1830 static void 1831 xgbe_calculate_equal_fifo(unsigned int fifo_size, unsigned int queue_count, 1832 unsigned int *fifo) 1833 { 1834 unsigned int q_fifo_size; 1835 unsigned int p_fifo; 1836 unsigned int i; 1837 1838 q_fifo_size = fifo_size / queue_count; 1839 1840 /* Calculate the fifo setting by dividing the queue's fifo size 1841 * by the fifo allocation increment (with 0 representing the 1842 * base allocation increment so decrement the result by 1). 1843 */ 1844 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; 1845 if (p_fifo) 1846 p_fifo--; 1847 1848 /* Distribute the fifo equally amongst the queues */ 1849 for (i = 0; i < queue_count; i++) 1850 fifo[i] = p_fifo; 1851 } 1852 1853 static unsigned int 1854 xgbe_set_nonprio_fifos(unsigned int fifo_size, unsigned int queue_count, 1855 unsigned int *fifo) 1856 { 1857 unsigned int i; 1858 1859 MPASS(powerof2(XGMAC_FIFO_MIN_ALLOC)); 1860 1861 if (queue_count <= IEEE_8021QAZ_MAX_TCS) 1862 return (fifo_size); 1863 1864 /* Rx queues 9 and up are for specialized packets, 1865 * such as PTP or DCB control packets, etc. and 1866 * don't require a large fifo 1867 */ 1868 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { 1869 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; 1870 fifo_size -= XGMAC_FIFO_MIN_ALLOC; 1871 } 1872 1873 return (fifo_size); 1874 } 1875 1876 static void 1877 xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) 1878 { 1879 unsigned int fifo_size; 1880 unsigned int fifo[XGBE_MAX_QUEUES]; 1881 unsigned int i; 1882 1883 fifo_size = xgbe_get_tx_fifo_size(pdata); 1884 axgbe_printf(1, "%s: fifo_size 0x%x\n", __func__, fifo_size); 1885 1886 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); 1887 1888 for (i = 0; i < pdata->tx_q_count; i++) { 1889 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); 1890 axgbe_printf(1, "Tx q %d FIFO Size 0x%x\n", i, 1891 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); 1892 } 1893 1894 axgbe_printf(1, "%d Tx hardware queues, %d byte fifo per queue\n", 1895 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 1896 } 1897 1898 static void 1899 xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 1900 { 1901 unsigned int fifo_size; 1902 unsigned int fifo[XGBE_MAX_QUEUES]; 1903 unsigned int prio_queues; 1904 unsigned int i; 1905 1906 /* TODO - add pfc/ets related support */ 1907 1908 /* Clear any DCB related fifo/queue information */ 1909 fifo_size = xgbe_get_rx_fifo_size(pdata); 1910 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 1911 axgbe_printf(1, "%s: fifo_size 0x%x rx_q_cnt %d prio %d\n", __func__, 1912 fifo_size, pdata->rx_q_count, prio_queues); 1913 1914 /* Assign a minimum fifo to the non-VLAN priority queues */ 1915 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); 1916 1917 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); 1918 1919 for (i = 0; i < pdata->rx_q_count; i++) { 1920 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); 1921 axgbe_printf(1, "Rx q %d FIFO Size 0x%x\n", i, 1922 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); 1923 } 1924 1925 xgbe_calculate_flow_control_threshold(pdata, fifo); 1926 xgbe_config_flow_control_threshold(pdata); 1927 1928 axgbe_printf(1, "%u Rx hardware queues, %u byte fifo/queue\n", 1929 pdata->rx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 1930 } 1931 1932 static void 1933 xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) 1934 { 1935 unsigned int qptc, qptc_extra, queue; 1936 unsigned int prio_queues; 1937 unsigned int ppq, ppq_extra, prio; 1938 unsigned int mask; 1939 unsigned int i, j, reg, reg_val; 1940 1941 /* Map the MTL Tx Queues to Traffic Classes 1942 * Note: Tx Queues >= Traffic Classes 1943 */ 1944 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; 1945 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; 1946 1947 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { 1948 for (j = 0; j < qptc; j++) { 1949 axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); 1950 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 1951 Q2TCMAP, i); 1952 pdata->q2tc_map[queue++] = i; 1953 } 1954 1955 if (i < qptc_extra) { 1956 axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); 1957 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 1958 Q2TCMAP, i); 1959 pdata->q2tc_map[queue++] = i; 1960 } 1961 } 1962 1963 /* Map the 8 VLAN priority values to available MTL Rx queues */ 1964 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 1965 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; 1966 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; 1967 1968 reg = MAC_RQC2R; 1969 reg_val = 0; 1970 for (i = 0, prio = 0; i < prio_queues;) { 1971 mask = 0; 1972 for (j = 0; j < ppq; j++) { 1973 axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); 1974 mask |= (1 << prio); 1975 pdata->prio2q_map[prio++] = i; 1976 } 1977 1978 if (i < ppq_extra) { 1979 axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); 1980 mask |= (1 << prio); 1981 pdata->prio2q_map[prio++] = i; 1982 } 1983 1984 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); 1985 1986 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) 1987 continue; 1988 1989 XGMAC_IOWRITE(pdata, reg, reg_val); 1990 reg += MAC_RQC2_INC; 1991 reg_val = 0; 1992 } 1993 1994 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 1995 reg = MTL_RQDCM0R; 1996 reg_val = 0; 1997 for (i = 0; i < pdata->rx_q_count;) { 1998 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 1999 2000 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) 2001 continue; 2002 2003 XGMAC_IOWRITE(pdata, reg, reg_val); 2004 2005 reg += MTL_RQDCM_INC; 2006 reg_val = 0; 2007 } 2008 } 2009 2010 static void 2011 xgbe_config_mac_address(struct xgbe_prv_data *pdata) 2012 { 2013 xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev)); 2014 2015 /* Filtering is done using perfect filtering and hash filtering */ 2016 if (pdata->hw_feat.hash_table_size) { 2017 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 2018 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 2019 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); 2020 } 2021 } 2022 2023 static void 2024 xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) 2025 { 2026 unsigned int val; 2027 2028 val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0; 2029 2030 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2031 } 2032 2033 static void 2034 xgbe_config_mac_speed(struct xgbe_prv_data *pdata) 2035 { 2036 xgbe_set_speed(pdata, pdata->phy_speed); 2037 } 2038 2039 static void 2040 xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2041 { 2042 if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM)) 2043 xgbe_enable_rx_csum(pdata); 2044 else 2045 xgbe_disable_rx_csum(pdata); 2046 } 2047 2048 static void 2049 xgbe_config_vlan_support(struct xgbe_prv_data *pdata) 2050 { 2051 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 2052 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 2053 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 2054 2055 /* Set the current VLAN Hash Table register value */ 2056 xgbe_update_vlan_hash_table(pdata); 2057 2058 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { 2059 axgbe_printf(1, "Enabling rx vlan filtering\n"); 2060 xgbe_enable_rx_vlan_filtering(pdata); 2061 } else { 2062 axgbe_printf(1, "Disabling rx vlan filtering\n"); 2063 xgbe_disable_rx_vlan_filtering(pdata); 2064 } 2065 2066 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) { 2067 axgbe_printf(1, "Enabling rx vlan stripping\n"); 2068 xgbe_enable_rx_vlan_stripping(pdata); 2069 } else { 2070 axgbe_printf(1, "Disabling rx vlan stripping\n"); 2071 xgbe_disable_rx_vlan_stripping(pdata); 2072 } 2073 } 2074 2075 static uint64_t 2076 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) 2077 { 2078 bool read_hi; 2079 uint64_t val; 2080 2081 if (pdata->vdata->mmc_64bit) { 2082 switch (reg_lo) { 2083 /* These registers are always 32 bit */ 2084 case MMC_RXRUNTERROR: 2085 case MMC_RXJABBERERROR: 2086 case MMC_RXUNDERSIZE_G: 2087 case MMC_RXOVERSIZE_G: 2088 case MMC_RXWATCHDOGERROR: 2089 read_hi = false; 2090 break; 2091 2092 default: 2093 read_hi = true; 2094 } 2095 } else { 2096 switch (reg_lo) { 2097 /* These registers are always 64 bit */ 2098 case MMC_TXOCTETCOUNT_GB_LO: 2099 case MMC_TXOCTETCOUNT_G_LO: 2100 case MMC_RXOCTETCOUNT_GB_LO: 2101 case MMC_RXOCTETCOUNT_G_LO: 2102 read_hi = true; 2103 break; 2104 2105 default: 2106 read_hi = false; 2107 } 2108 } 2109 2110 val = XGMAC_IOREAD(pdata, reg_lo); 2111 2112 if (read_hi) 2113 val |= ((uint64_t)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); 2114 2115 return (val); 2116 } 2117 2118 static void 2119 xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 2120 { 2121 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2122 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); 2123 2124 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 2125 stats->txoctetcount_gb += 2126 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2127 2128 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 2129 stats->txframecount_gb += 2130 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2131 2132 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 2133 stats->txbroadcastframes_g += 2134 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2135 2136 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2137 stats->txmulticastframes_g += 2138 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2139 2140 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2141 stats->tx64octets_gb += 2142 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2143 2144 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2145 stats->tx65to127octets_gb += 2146 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2147 2148 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2149 stats->tx128to255octets_gb += 2150 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2151 2152 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2153 stats->tx256to511octets_gb += 2154 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2155 2156 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2157 stats->tx512to1023octets_gb += 2158 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2159 2160 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2161 stats->tx1024tomaxoctets_gb += 2162 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2163 2164 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2165 stats->txunicastframes_gb += 2166 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2167 2168 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2169 stats->txmulticastframes_gb += 2170 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2171 2172 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2173 stats->txbroadcastframes_g += 2174 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2175 2176 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 2177 stats->txunderflowerror += 2178 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2179 2180 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 2181 stats->txoctetcount_g += 2182 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2183 2184 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 2185 stats->txframecount_g += 2186 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2187 2188 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 2189 stats->txpauseframes += 2190 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2191 2192 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 2193 stats->txvlanframes_g += 2194 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2195 } 2196 2197 static void 2198 xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 2199 { 2200 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2201 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); 2202 2203 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 2204 stats->rxframecount_gb += 2205 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2206 2207 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 2208 stats->rxoctetcount_gb += 2209 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2210 2211 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 2212 stats->rxoctetcount_g += 2213 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2214 2215 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 2216 stats->rxbroadcastframes_g += 2217 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2218 2219 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 2220 stats->rxmulticastframes_g += 2221 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2222 2223 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 2224 stats->rxcrcerror += 2225 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2226 2227 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 2228 stats->rxrunterror += 2229 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2230 2231 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 2232 stats->rxjabbererror += 2233 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2234 2235 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 2236 stats->rxundersize_g += 2237 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2238 2239 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 2240 stats->rxoversize_g += 2241 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2242 2243 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 2244 stats->rx64octets_gb += 2245 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2246 2247 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 2248 stats->rx65to127octets_gb += 2249 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2250 2251 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 2252 stats->rx128to255octets_gb += 2253 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2254 2255 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 2256 stats->rx256to511octets_gb += 2257 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2258 2259 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 2260 stats->rx512to1023octets_gb += 2261 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2262 2263 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 2264 stats->rx1024tomaxoctets_gb += 2265 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2266 2267 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 2268 stats->rxunicastframes_g += 2269 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2270 2271 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 2272 stats->rxlengtherror += 2273 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2274 2275 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 2276 stats->rxoutofrangetype += 2277 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2278 2279 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 2280 stats->rxpauseframes += 2281 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2282 2283 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 2284 stats->rxfifooverflow += 2285 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2286 2287 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 2288 stats->rxvlanframes_gb += 2289 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2290 2291 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 2292 stats->rxwatchdogerror += 2293 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2294 } 2295 2296 static void 2297 xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 2298 { 2299 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2300 2301 /* Freeze counters */ 2302 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 2303 2304 stats->txoctetcount_gb += 2305 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2306 2307 stats->txframecount_gb += 2308 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2309 2310 stats->txbroadcastframes_g += 2311 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2312 2313 stats->txmulticastframes_g += 2314 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2315 2316 stats->tx64octets_gb += 2317 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2318 2319 stats->tx65to127octets_gb += 2320 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2321 2322 stats->tx128to255octets_gb += 2323 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2324 2325 stats->tx256to511octets_gb += 2326 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2327 2328 stats->tx512to1023octets_gb += 2329 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2330 2331 stats->tx1024tomaxoctets_gb += 2332 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2333 2334 stats->txunicastframes_gb += 2335 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2336 2337 stats->txmulticastframes_gb += 2338 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2339 2340 stats->txbroadcastframes_gb += 2341 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2342 2343 stats->txunderflowerror += 2344 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2345 2346 stats->txoctetcount_g += 2347 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2348 2349 stats->txframecount_g += 2350 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2351 2352 stats->txpauseframes += 2353 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2354 2355 stats->txvlanframes_g += 2356 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2357 2358 stats->rxframecount_gb += 2359 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2360 2361 stats->rxoctetcount_gb += 2362 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2363 2364 stats->rxoctetcount_g += 2365 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2366 2367 stats->rxbroadcastframes_g += 2368 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2369 2370 stats->rxmulticastframes_g += 2371 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2372 2373 stats->rxcrcerror += 2374 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2375 2376 stats->rxrunterror += 2377 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2378 2379 stats->rxjabbererror += 2380 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2381 2382 stats->rxundersize_g += 2383 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2384 2385 stats->rxoversize_g += 2386 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2387 2388 stats->rx64octets_gb += 2389 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2390 2391 stats->rx65to127octets_gb += 2392 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2393 2394 stats->rx128to255octets_gb += 2395 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2396 2397 stats->rx256to511octets_gb += 2398 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2399 2400 stats->rx512to1023octets_gb += 2401 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2402 2403 stats->rx1024tomaxoctets_gb += 2404 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2405 2406 stats->rxunicastframes_g += 2407 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2408 2409 stats->rxlengtherror += 2410 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2411 2412 stats->rxoutofrangetype += 2413 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2414 2415 stats->rxpauseframes += 2416 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2417 2418 stats->rxfifooverflow += 2419 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2420 2421 stats->rxvlanframes_gb += 2422 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2423 2424 stats->rxwatchdogerror += 2425 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2426 2427 /* Un-freeze counters */ 2428 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 2429 } 2430 2431 static void 2432 xgbe_config_mmc(struct xgbe_prv_data *pdata) 2433 { 2434 /* Set counters to reset on read */ 2435 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 2436 2437 /* Reset the counters */ 2438 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 2439 } 2440 2441 static void 2442 xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) 2443 { 2444 unsigned int tx_status; 2445 unsigned long tx_timeout; 2446 2447 /* The Tx engine cannot be stopped if it is actively processing 2448 * packets. Wait for the Tx queue to empty the Tx fifo. Don't 2449 * wait forever though... 2450 */ 2451 tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); 2452 while (ticks < tx_timeout) { 2453 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 2454 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 2455 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 2456 break; 2457 2458 DELAY(500); 2459 } 2460 2461 if (ticks >= tx_timeout) 2462 axgbe_printf(1, "timed out waiting for Tx queue %u to empty\n", 2463 queue); 2464 } 2465 2466 static void 2467 xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) 2468 { 2469 unsigned int tx_dsr, tx_pos, tx_qidx; 2470 unsigned int tx_status; 2471 unsigned long tx_timeout; 2472 2473 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 2474 return (xgbe_txq_prepare_tx_stop(pdata, queue)); 2475 2476 /* Calculate the status register to read and the position within */ 2477 if (queue < DMA_DSRX_FIRST_QUEUE) { 2478 tx_dsr = DMA_DSR0; 2479 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 2480 } else { 2481 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 2482 2483 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 2484 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 2485 DMA_DSRX_TPS_START; 2486 } 2487 2488 /* The Tx engine cannot be stopped if it is actively processing 2489 * descriptors. Wait for the Tx engine to enter the stopped or 2490 * suspended state. Don't wait forever though... 2491 */ 2492 tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); 2493 while (ticks < tx_timeout) { 2494 tx_status = XGMAC_IOREAD(pdata, tx_dsr); 2495 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 2496 if ((tx_status == DMA_TPS_STOPPED) || 2497 (tx_status == DMA_TPS_SUSPENDED)) 2498 break; 2499 2500 DELAY(500); 2501 } 2502 2503 if (ticks >= tx_timeout) 2504 axgbe_printf(1, "timed out waiting for Tx DMA channel %u to stop\n", 2505 queue); 2506 } 2507 2508 static void 2509 xgbe_enable_tx(struct xgbe_prv_data *pdata) 2510 { 2511 unsigned int i; 2512 2513 /* Enable each Tx DMA channel */ 2514 for (i = 0; i < pdata->channel_count; i++) { 2515 if (!pdata->channel[i]->tx_ring) 2516 break; 2517 2518 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 2519 } 2520 2521 /* Enable each Tx queue */ 2522 for (i = 0; i < pdata->tx_q_count; i++) 2523 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 2524 MTL_Q_ENABLED); 2525 2526 /* Enable MAC Tx */ 2527 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 2528 } 2529 2530 static void 2531 xgbe_disable_tx(struct xgbe_prv_data *pdata) 2532 { 2533 unsigned int i; 2534 2535 /* Prepare for Tx DMA channel stop */ 2536 for (i = 0; i < pdata->tx_q_count; i++) 2537 xgbe_prepare_tx_stop(pdata, i); 2538 2539 /* Disable MAC Tx */ 2540 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2541 2542 /* Disable each Tx queue */ 2543 for (i = 0; i < pdata->tx_q_count; i++) 2544 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 2545 2546 /* Disable each Tx DMA channel */ 2547 for (i = 0; i < pdata->channel_count; i++) { 2548 if (!pdata->channel[i]->tx_ring) 2549 break; 2550 2551 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 2552 } 2553 } 2554 2555 static void 2556 xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue) 2557 { 2558 unsigned int rx_status; 2559 unsigned long rx_timeout; 2560 2561 /* The Rx engine cannot be stopped if it is actively processing 2562 * packets. Wait for the Rx queue to empty the Rx fifo. Don't 2563 * wait forever though... 2564 */ 2565 rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); 2566 while (ticks < rx_timeout) { 2567 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 2568 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 2569 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 2570 break; 2571 2572 DELAY(500); 2573 } 2574 2575 if (ticks >= rx_timeout) 2576 axgbe_printf(1, "timed out waiting for Rx queue %d to empty\n", 2577 queue); 2578 } 2579 2580 static void 2581 xgbe_enable_rx(struct xgbe_prv_data *pdata) 2582 { 2583 unsigned int reg_val, i; 2584 2585 /* Enable each Rx DMA channel */ 2586 for (i = 0; i < pdata->channel_count; i++) { 2587 if (!pdata->channel[i]->rx_ring) 2588 break; 2589 2590 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 2591 } 2592 2593 /* Enable each Rx queue */ 2594 reg_val = 0; 2595 for (i = 0; i < pdata->rx_q_count; i++) 2596 reg_val |= (0x02 << (i << 1)); 2597 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 2598 2599 /* Enable MAC Rx */ 2600 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 2601 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 2602 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 2603 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 2604 } 2605 2606 static void 2607 xgbe_disable_rx(struct xgbe_prv_data *pdata) 2608 { 2609 unsigned int i; 2610 2611 /* Disable MAC Rx */ 2612 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 2613 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 2614 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 2615 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 2616 2617 /* Prepare for Rx DMA channel stop */ 2618 for (i = 0; i < pdata->rx_q_count; i++) 2619 xgbe_prepare_rx_stop(pdata, i); 2620 2621 /* Disable each Rx queue */ 2622 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 2623 2624 /* Disable each Rx DMA channel */ 2625 for (i = 0; i < pdata->channel_count; i++) { 2626 if (!pdata->channel[i]->rx_ring) 2627 break; 2628 2629 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 2630 } 2631 } 2632 2633 static void 2634 xgbe_powerup_tx(struct xgbe_prv_data *pdata) 2635 { 2636 unsigned int i; 2637 2638 /* Enable each Tx DMA channel */ 2639 for (i = 0; i < pdata->channel_count; i++) { 2640 if (!pdata->channel[i]->tx_ring) 2641 break; 2642 2643 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 2644 } 2645 2646 /* Enable MAC Tx */ 2647 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 2648 } 2649 2650 static void 2651 xgbe_powerdown_tx(struct xgbe_prv_data *pdata) 2652 { 2653 unsigned int i; 2654 2655 /* Prepare for Tx DMA channel stop */ 2656 for (i = 0; i < pdata->tx_q_count; i++) 2657 xgbe_prepare_tx_stop(pdata, i); 2658 2659 /* Disable MAC Tx */ 2660 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2661 2662 /* Disable each Tx DMA channel */ 2663 for (i = 0; i < pdata->channel_count; i++) { 2664 if (!pdata->channel[i]->tx_ring) 2665 break; 2666 2667 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 2668 } 2669 } 2670 2671 static void 2672 xgbe_powerup_rx(struct xgbe_prv_data *pdata) 2673 { 2674 unsigned int i; 2675 2676 /* Enable each Rx DMA channel */ 2677 for (i = 0; i < pdata->channel_count; i++) { 2678 if (!pdata->channel[i]->rx_ring) 2679 break; 2680 2681 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 2682 } 2683 } 2684 2685 static void 2686 xgbe_powerdown_rx(struct xgbe_prv_data *pdata) 2687 { 2688 unsigned int i; 2689 2690 /* Disable each Rx DMA channel */ 2691 for (i = 0; i < pdata->channel_count; i++) { 2692 if (!pdata->channel[i]->rx_ring) 2693 break; 2694 2695 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 2696 } 2697 } 2698 2699 static int 2700 xgbe_init(struct xgbe_prv_data *pdata) 2701 { 2702 struct xgbe_desc_if *desc_if = &pdata->desc_if; 2703 int ret; 2704 2705 /* Flush Tx queues */ 2706 ret = xgbe_flush_tx_queues(pdata); 2707 if (ret) { 2708 axgbe_error("error flushing TX queues\n"); 2709 return (ret); 2710 } 2711 2712 /* 2713 * Initialize DMA related features 2714 */ 2715 xgbe_config_dma_bus(pdata); 2716 xgbe_config_dma_cache(pdata); 2717 xgbe_config_osp_mode(pdata); 2718 xgbe_config_pbl_val(pdata); 2719 xgbe_config_rx_coalesce(pdata); 2720 xgbe_config_tx_coalesce(pdata); 2721 xgbe_config_rx_buffer_size(pdata); 2722 xgbe_config_tso_mode(pdata); 2723 xgbe_config_sph_mode(pdata); 2724 xgbe_config_rss(pdata); 2725 desc_if->wrapper_tx_desc_init(pdata); 2726 desc_if->wrapper_rx_desc_init(pdata); 2727 xgbe_enable_dma_interrupts(pdata); 2728 2729 /* 2730 * Initialize MTL related features 2731 */ 2732 xgbe_config_mtl_mode(pdata); 2733 xgbe_config_queue_mapping(pdata); 2734 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 2735 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 2736 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); 2737 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); 2738 xgbe_config_tx_fifo_size(pdata); 2739 xgbe_config_rx_fifo_size(pdata); 2740 /*TODO: Error Packet and undersized good Packet forwarding enable 2741 (FEP and FUP) 2742 */ 2743 xgbe_enable_mtl_interrupts(pdata); 2744 2745 /* 2746 * Initialize MAC related features 2747 */ 2748 xgbe_config_mac_address(pdata); 2749 xgbe_config_rx_mode(pdata); 2750 xgbe_config_jumbo_enable(pdata); 2751 xgbe_config_flow_control(pdata); 2752 xgbe_config_mac_speed(pdata); 2753 xgbe_config_checksum_offload(pdata); 2754 xgbe_config_vlan_support(pdata); 2755 xgbe_config_mmc(pdata); 2756 xgbe_enable_mac_interrupts(pdata); 2757 2758 return (0); 2759 } 2760 2761 void 2762 xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) 2763 { 2764 2765 hw_if->tx_complete = xgbe_tx_complete; 2766 2767 hw_if->set_mac_address = xgbe_set_mac_address; 2768 hw_if->config_rx_mode = xgbe_config_rx_mode; 2769 2770 hw_if->enable_rx_csum = xgbe_enable_rx_csum; 2771 hw_if->disable_rx_csum = xgbe_disable_rx_csum; 2772 2773 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; 2774 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; 2775 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; 2776 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; 2777 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; 2778 2779 hw_if->read_mmd_regs = xgbe_read_mmd_regs; 2780 hw_if->write_mmd_regs = xgbe_write_mmd_regs; 2781 2782 hw_if->set_speed = xgbe_set_speed; 2783 2784 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; 2785 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs; 2786 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs; 2787 2788 hw_if->set_gpio = xgbe_set_gpio; 2789 hw_if->clr_gpio = xgbe_clr_gpio; 2790 2791 hw_if->enable_tx = xgbe_enable_tx; 2792 hw_if->disable_tx = xgbe_disable_tx; 2793 hw_if->enable_rx = xgbe_enable_rx; 2794 hw_if->disable_rx = xgbe_disable_rx; 2795 2796 hw_if->powerup_tx = xgbe_powerup_tx; 2797 hw_if->powerdown_tx = xgbe_powerdown_tx; 2798 hw_if->powerup_rx = xgbe_powerup_rx; 2799 hw_if->powerdown_rx = xgbe_powerdown_rx; 2800 2801 hw_if->dev_read = xgbe_dev_read; 2802 hw_if->enable_int = xgbe_enable_int; 2803 hw_if->disable_int = xgbe_disable_int; 2804 hw_if->init = xgbe_init; 2805 hw_if->exit = xgbe_exit; 2806 2807 /* Descriptor related Sequences have to be initialized here */ 2808 hw_if->tx_desc_init = xgbe_tx_desc_init; 2809 hw_if->rx_desc_init = xgbe_rx_desc_init; 2810 hw_if->tx_desc_reset = xgbe_tx_desc_reset; 2811 hw_if->is_last_desc = xgbe_is_last_desc; 2812 hw_if->is_context_desc = xgbe_is_context_desc; 2813 2814 /* For FLOW ctrl */ 2815 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; 2816 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; 2817 2818 /* For RX coalescing */ 2819 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; 2820 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; 2821 hw_if->usec_to_riwt = xgbe_usec_to_riwt; 2822 hw_if->riwt_to_usec = xgbe_riwt_to_usec; 2823 2824 /* For RX and TX threshold config */ 2825 hw_if->config_rx_threshold = xgbe_config_rx_threshold; 2826 hw_if->config_tx_threshold = xgbe_config_tx_threshold; 2827 2828 /* For RX and TX Store and Forward Mode config */ 2829 hw_if->config_rsf_mode = xgbe_config_rsf_mode; 2830 hw_if->config_tsf_mode = xgbe_config_tsf_mode; 2831 2832 /* For TX DMA Operating on Second Frame config */ 2833 hw_if->config_osp_mode = xgbe_config_osp_mode; 2834 2835 /* For MMC statistics support */ 2836 hw_if->tx_mmc_int = xgbe_tx_mmc_int; 2837 hw_if->rx_mmc_int = xgbe_rx_mmc_int; 2838 hw_if->read_mmc_stats = xgbe_read_mmc_stats; 2839 2840 /* For Receive Side Scaling */ 2841 hw_if->enable_rss = xgbe_enable_rss; 2842 hw_if->disable_rss = xgbe_disable_rss; 2843 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; 2844 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; 2845 } 2846