1 /* 2 * AMD 10Gb Ethernet driver 3 * 4 * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. 5 * 6 * This file is available to you under your choice of the following two 7 * licenses: 8 * 9 * License 1: GPLv2 10 * 11 * This file is free software; you may copy, redistribute and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 2 of the License, or (at 14 * your option) any later version. 15 * 16 * This file is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. 23 * 24 * This file incorporates work covered by the following copyright and 25 * permission notice: 26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 28 * Inc. unless otherwise expressly agreed to in writing between Synopsys 29 * and you. 30 * 31 * The Software IS NOT an item of Licensed Software or Licensed Product 32 * under any End User Software License Agreement or Agreement for Licensed 33 * Product with Synopsys or any supplement thereto. Permission is hereby 34 * granted, free of charge, to any person obtaining a copy of this software 35 * annotated with this license and the Software, to deal in the Software 36 * without restriction, including without limitation the rights to use, 37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 38 * of the Software, and to permit persons to whom the Software is furnished 39 * to do so, subject to the following conditions: 40 * 41 * The above copyright notice and this permission notice shall be included 42 * in all copies or substantial portions of the Software. 43 * 44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 54 * THE POSSIBILITY OF SUCH DAMAGE. 55 * 56 * 57 * License 2: Modified BSD 58 * 59 * Redistribution and use in source and binary forms, with or without 60 * modification, are permitted provided that the following conditions are met: 61 * * Redistributions of source code must retain the above copyright 62 * notice, this list of conditions and the following disclaimer. 63 * * Redistributions in binary form must reproduce the above copyright 64 * notice, this list of conditions and the following disclaimer in the 65 * documentation and/or other materials provided with the distribution. 66 * * Neither the name of Advanced Micro Devices, Inc. nor the 67 * names of its contributors may be used to endorse or promote products 68 * derived from this software without specific prior written permission. 69 * 70 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 71 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 72 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 73 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY 74 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 75 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 76 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 77 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 78 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 79 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 80 * 81 * This file incorporates work covered by the following copyright and 82 * permission notice: 83 * The Synopsys DWC ETHER XGMAC Software Driver and documentation 84 * (hereinafter "Software") is an unsupported proprietary work of Synopsys, 85 * Inc. unless otherwise expressly agreed to in writing between Synopsys 86 * and you. 87 * 88 * The Software IS NOT an item of Licensed Software or Licensed Product 89 * under any End User Software License Agreement or Agreement for Licensed 90 * Product with Synopsys or any supplement thereto. Permission is hereby 91 * granted, free of charge, to any person obtaining a copy of this software 92 * annotated with this license and the Software, to deal in the Software 93 * without restriction, including without limitation the rights to use, 94 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies 95 * of the Software, and to permit persons to whom the Software is furnished 96 * to do so, subject to the following conditions: 97 * 98 * The above copyright notice and this permission notice shall be included 99 * in all copies or substantial portions of the Software. 100 * 101 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" 102 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 103 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 104 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS 105 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 106 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 107 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 108 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 109 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 110 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 111 * THE POSSIBILITY OF SUCH DAMAGE. 112 */ 113 114 #include <sys/cdefs.h> 115 __FBSDID("$FreeBSD$"); 116 117 #include "xgbe.h" 118 #include "xgbe-common.h" 119 120 #include <net/if_dl.h> 121 122 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata) 123 { 124 return (if_getmtu(pdata->netdev) + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); 125 } 126 127 static unsigned int 128 xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec) 129 { 130 unsigned long rate; 131 unsigned int ret; 132 133 rate = pdata->sysclk_rate; 134 135 /* 136 * Convert the input usec value to the watchdog timer value. Each 137 * watchdog timer value is equivalent to 256 clock cycles. 138 * Calculate the required value as: 139 * ( usec * ( system_clock_mhz / 10^6 ) / 256 140 */ 141 ret = (usec * (rate / 1000000)) / 256; 142 143 return (ret); 144 } 145 146 static unsigned int 147 xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, unsigned int riwt) 148 { 149 unsigned long rate; 150 unsigned int ret; 151 152 rate = pdata->sysclk_rate; 153 154 /* 155 * Convert the input watchdog timer value to the usec value. Each 156 * watchdog timer value is equivalent to 256 clock cycles. 157 * Calculate the required value as: 158 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 ) 159 */ 160 ret = (riwt * 256) / (rate / 1000000); 161 162 return (ret); 163 } 164 165 static int 166 xgbe_config_pbl_val(struct xgbe_prv_data *pdata) 167 { 168 unsigned int pblx8, pbl; 169 unsigned int i; 170 171 pblx8 = DMA_PBL_X8_DISABLE; 172 pbl = pdata->pbl; 173 174 if (pdata->pbl > 32) { 175 pblx8 = DMA_PBL_X8_ENABLE; 176 pbl >>= 3; 177 } 178 179 for (i = 0; i < pdata->channel_count; i++) { 180 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8, 181 pblx8); 182 183 if (pdata->channel[i]->tx_ring) 184 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, 185 PBL, pbl); 186 187 if (pdata->channel[i]->rx_ring) 188 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, 189 PBL, pbl); 190 } 191 192 return (0); 193 } 194 195 static int 196 xgbe_config_osp_mode(struct xgbe_prv_data *pdata) 197 { 198 unsigned int i; 199 200 for (i = 0; i < pdata->channel_count; i++) { 201 if (!pdata->channel[i]->tx_ring) 202 break; 203 204 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP, 205 pdata->tx_osp_mode); 206 } 207 208 return (0); 209 } 210 211 static int 212 xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 213 { 214 unsigned int i; 215 216 for (i = 0; i < pdata->rx_q_count; i++) 217 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 218 219 return (0); 220 } 221 222 static int 223 xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val) 224 { 225 unsigned int i; 226 227 for (i = 0; i < pdata->tx_q_count; i++) 228 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 229 230 return (0); 231 } 232 233 static int 234 xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, unsigned int val) 235 { 236 unsigned int i; 237 238 for (i = 0; i < pdata->rx_q_count; i++) 239 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 240 241 return (0); 242 } 243 244 static int 245 xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, unsigned int val) 246 { 247 unsigned int i; 248 249 for (i = 0; i < pdata->tx_q_count; i++) 250 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 251 252 return (0); 253 } 254 255 static int 256 xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) 257 { 258 unsigned int i; 259 260 for (i = 0; i < pdata->channel_count; i++) { 261 if (!pdata->channel[i]->rx_ring) 262 break; 263 264 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT, 265 pdata->rx_riwt); 266 } 267 268 return (0); 269 } 270 271 static int 272 xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) 273 { 274 return (0); 275 } 276 277 static void 278 xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) 279 { 280 unsigned int i; 281 282 for (i = 0; i < pdata->channel_count; i++) { 283 if (!pdata->channel[i]->rx_ring) 284 break; 285 286 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ, 287 pdata->rx_buf_size); 288 } 289 } 290 291 static void 292 xgbe_config_tso_mode(struct xgbe_prv_data *pdata) 293 { 294 unsigned int i; 295 296 int tso_enabled = (if_getcapenable(pdata->netdev) & IFCAP_TSO); 297 298 for (i = 0; i < pdata->channel_count; i++) { 299 if (!pdata->channel[i]->tx_ring) 300 break; 301 302 axgbe_printf(1, "TSO in channel %d %s\n", i, tso_enabled ? "enabled" : "disabled"); 303 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, tso_enabled ? 1 : 0); 304 } 305 } 306 307 static void 308 xgbe_config_sph_mode(struct xgbe_prv_data *pdata) 309 { 310 unsigned int i; 311 int sph_enable_flag = XGMAC_IOREAD_BITS(pdata, MAC_HWF1R, SPHEN); 312 313 axgbe_printf(1, "sph_enable %d sph feature enabled?: %d\n", 314 pdata->sph_enable, sph_enable_flag); 315 316 if (pdata->sph_enable && sph_enable_flag) 317 axgbe_printf(0, "SPH Enabled\n"); 318 319 for (i = 0; i < pdata->channel_count; i++) { 320 if (!pdata->channel[i]->rx_ring) 321 break; 322 if (pdata->sph_enable && sph_enable_flag) { 323 /* Enable split header feature */ 324 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1); 325 } else { 326 /* Disable split header feature */ 327 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); 328 } 329 330 /* per-channel confirmation of SPH being disabled/enabled */ 331 int val = XGMAC_DMA_IOREAD_BITS(pdata->channel[i], DMA_CH_CR, SPH); 332 axgbe_printf(0, "%s: SPH %s in channel %d\n", __func__, 333 (val ? "enabled" : "disabled"), i); 334 } 335 336 if (pdata->sph_enable && sph_enable_flag) 337 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); 338 } 339 340 static int 341 xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, 342 unsigned int index, unsigned int val) 343 { 344 unsigned int wait; 345 int ret = 0; 346 347 mtx_lock(&pdata->rss_mutex); 348 349 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) { 350 ret = -EBUSY; 351 goto unlock; 352 } 353 354 XGMAC_IOWRITE(pdata, MAC_RSSDR, val); 355 356 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); 357 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); 358 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); 359 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); 360 361 wait = 1000; 362 while (wait--) { 363 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 364 goto unlock; 365 366 DELAY(1000); 367 } 368 369 ret = -EBUSY; 370 371 unlock: 372 mtx_unlock(&pdata->rss_mutex); 373 374 return (ret); 375 } 376 377 static int 378 xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata) 379 { 380 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(uint32_t); 381 unsigned int *key = (unsigned int *)&pdata->rss_key; 382 int ret; 383 384 while (key_regs--) { 385 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE, 386 key_regs, *key++); 387 if (ret) 388 return (ret); 389 } 390 391 return (0); 392 } 393 394 static int 395 xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata) 396 { 397 unsigned int i; 398 int ret; 399 400 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { 401 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_LOOKUP_TABLE_TYPE, i, 402 pdata->rss_table[i]); 403 if (ret) 404 return (ret); 405 } 406 407 return (0); 408 } 409 410 static int 411 xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const uint8_t *key) 412 { 413 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); 414 415 return (xgbe_write_rss_hash_key(pdata)); 416 } 417 418 static int 419 xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, const uint32_t *table) 420 { 421 unsigned int i; 422 423 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) 424 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]); 425 426 return (xgbe_write_rss_lookup_table(pdata)); 427 } 428 429 static int 430 xgbe_enable_rss(struct xgbe_prv_data *pdata) 431 { 432 int ret; 433 434 if (!pdata->hw_feat.rss) 435 return (-EOPNOTSUPP); 436 437 /* Program the hash key */ 438 ret = xgbe_write_rss_hash_key(pdata); 439 if (ret) 440 return (ret); 441 442 /* Program the lookup table */ 443 ret = xgbe_write_rss_lookup_table(pdata); 444 if (ret) 445 return (ret); 446 447 /* Set the RSS options */ 448 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 449 450 /* Enable RSS */ 451 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); 452 453 axgbe_printf(0, "RSS Enabled\n"); 454 455 return (0); 456 } 457 458 static int 459 xgbe_disable_rss(struct xgbe_prv_data *pdata) 460 { 461 if (!pdata->hw_feat.rss) 462 return (-EOPNOTSUPP); 463 464 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); 465 466 axgbe_printf(0, "RSS Disabled\n"); 467 468 return (0); 469 } 470 471 static void 472 xgbe_config_rss(struct xgbe_prv_data *pdata) 473 { 474 int ret; 475 476 if (!pdata->hw_feat.rss) 477 return; 478 479 /* Check if the interface has RSS capability */ 480 if (pdata->enable_rss) 481 ret = xgbe_enable_rss(pdata); 482 else 483 ret = xgbe_disable_rss(pdata); 484 485 if (ret) 486 axgbe_error("error configuring RSS, RSS disabled\n"); 487 } 488 489 static int 490 xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) 491 { 492 unsigned int max_q_count, q_count; 493 unsigned int reg, reg_val; 494 unsigned int i; 495 496 /* Clear MTL flow control */ 497 for (i = 0; i < pdata->rx_q_count; i++) 498 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 499 500 /* Clear MAC flow control */ 501 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 502 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 503 reg = MAC_Q0TFCR; 504 for (i = 0; i < q_count; i++) { 505 reg_val = XGMAC_IOREAD(pdata, reg); 506 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 507 XGMAC_IOWRITE(pdata, reg, reg_val); 508 509 reg += MAC_QTFCR_INC; 510 } 511 512 return (0); 513 } 514 515 static int 516 xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) 517 { 518 unsigned int max_q_count, q_count; 519 unsigned int reg, reg_val; 520 unsigned int i; 521 522 /* Set MTL flow control */ 523 for (i = 0; i < pdata->rx_q_count; i++) { 524 unsigned int ehfc = 0; 525 526 if (pdata->rx_rfd[i]) { 527 /* Flow control thresholds are established */ 528 /* TODO - enable pfc/ets support */ 529 ehfc = 1; 530 } 531 532 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); 533 534 axgbe_printf(1, "flow control %s for RXq%u\n", 535 ehfc ? "enabled" : "disabled", i); 536 } 537 538 /* Set MAC flow control */ 539 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 540 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); 541 reg = MAC_Q0TFCR; 542 for (i = 0; i < q_count; i++) { 543 reg_val = XGMAC_IOREAD(pdata, reg); 544 545 /* Enable transmit flow control */ 546 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 547 548 /* Set pause time */ 549 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 550 551 XGMAC_IOWRITE(pdata, reg, reg_val); 552 553 reg += MAC_QTFCR_INC; 554 } 555 556 return (0); 557 } 558 559 static int 560 xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata) 561 { 562 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 563 564 return (0); 565 } 566 567 static int 568 xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata) 569 { 570 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 571 572 return (0); 573 } 574 575 static int 576 xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata) 577 { 578 if (pdata->tx_pause) 579 xgbe_enable_tx_flow_control(pdata); 580 else 581 xgbe_disable_tx_flow_control(pdata); 582 583 return (0); 584 } 585 586 static int 587 xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata) 588 { 589 if (pdata->rx_pause) 590 xgbe_enable_rx_flow_control(pdata); 591 else 592 xgbe_disable_rx_flow_control(pdata); 593 594 return (0); 595 } 596 597 static void 598 xgbe_config_flow_control(struct xgbe_prv_data *pdata) 599 { 600 xgbe_config_tx_flow_control(pdata); 601 xgbe_config_rx_flow_control(pdata); 602 603 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 604 } 605 606 static void 607 xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) 608 { 609 struct xgbe_channel *channel; 610 unsigned int i, ver; 611 612 /* Set the interrupt mode if supported */ 613 if (pdata->channel_irq_mode) 614 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, 615 pdata->channel_irq_mode); 616 617 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER); 618 619 for (i = 0; i < pdata->channel_count; i++) { 620 channel = pdata->channel[i]; 621 622 /* Clear all the interrupts which are set */ 623 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, 624 XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); 625 626 /* Clear all interrupt enable bits */ 627 channel->curr_ier = 0; 628 629 /* Enable following interrupts 630 * NIE - Normal Interrupt Summary Enable 631 * AIE - Abnormal Interrupt Summary Enable 632 * FBEE - Fatal Bus Error Enable 633 */ 634 if (ver < 0x21) { 635 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1); 636 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1); 637 } else { 638 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); 639 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); 640 } 641 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); 642 643 if (channel->tx_ring) { 644 /* Enable the following Tx interrupts 645 * TIE - Transmit Interrupt Enable (unless using 646 * per channel interrupts in edge triggered 647 * mode) 648 */ 649 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 650 XGMAC_SET_BITS(channel->curr_ier, 651 DMA_CH_IER, TIE, 1); 652 } 653 if (channel->rx_ring) { 654 /* Enable following Rx interrupts 655 * RBUE - Receive Buffer Unavailable Enable 656 * RIE - Receive Interrupt Enable (unless using 657 * per channel interrupts in edge triggered 658 * mode) 659 */ 660 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); 661 if (!pdata->per_channel_irq || pdata->channel_irq_mode) 662 XGMAC_SET_BITS(channel->curr_ier, 663 DMA_CH_IER, RIE, 1); 664 } 665 666 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 667 } 668 } 669 670 static void 671 xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata) 672 { 673 unsigned int mtl_q_isr; 674 unsigned int q_count, i; 675 676 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 677 for (i = 0; i < q_count; i++) { 678 /* Clear all the interrupts which are set */ 679 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 680 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 681 682 /* No MTL interrupts to be enabled */ 683 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); 684 } 685 } 686 687 static void 688 xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) 689 { 690 unsigned int mac_ier = 0; 691 692 /* Enable Timestamp interrupt */ 693 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1); 694 695 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 696 697 /* Enable all counter interrupts */ 698 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); 699 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); 700 701 /* Enable MDIO single command completion interrupt */ 702 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1); 703 } 704 705 static int 706 xgbe_set_speed(struct xgbe_prv_data *pdata, int speed) 707 { 708 unsigned int ss; 709 710 switch (speed) { 711 case SPEED_1000: 712 ss = 0x03; 713 break; 714 case SPEED_2500: 715 ss = 0x02; 716 break; 717 case SPEED_10000: 718 ss = 0x00; 719 break; 720 default: 721 return (-EINVAL); 722 } 723 724 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) 725 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); 726 727 return (0); 728 } 729 730 static int 731 xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 732 { 733 /* Put the VLAN tag in the Rx descriptor */ 734 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 735 736 /* Don't check the VLAN type */ 737 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 738 739 /* Check only C-TAG (0x8100) packets */ 740 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 741 742 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 743 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 744 745 /* Enable VLAN tag stripping */ 746 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 747 748 axgbe_printf(0, "VLAN Stripping Enabled\n"); 749 750 return (0); 751 } 752 753 static int 754 xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) 755 { 756 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 757 758 axgbe_printf(0, "VLAN Stripping Disabled\n"); 759 760 return (0); 761 } 762 763 static int 764 xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 765 { 766 /* Enable VLAN filtering */ 767 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 768 769 /* Enable VLAN Hash Table filtering */ 770 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); 771 772 /* Disable VLAN tag inverse matching */ 773 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); 774 775 /* Only filter on the lower 12-bits of the VLAN tag */ 776 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); 777 778 /* In order for the VLAN Hash Table filtering to be effective, 779 * the VLAN tag identifier in the VLAN Tag Register must not 780 * be zero. Set the VLAN tag identifier to "1" to enable the 781 * VLAN Hash Table filtering. This implies that a VLAN tag of 782 * 1 will always pass filtering. 783 */ 784 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); 785 786 axgbe_printf(0, "VLAN filtering Enabled\n"); 787 788 return (0); 789 } 790 791 static int 792 xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) 793 { 794 /* Disable VLAN filtering */ 795 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 796 797 axgbe_printf(0, "VLAN filtering Disabled\n"); 798 799 return (0); 800 } 801 802 static uint32_t 803 xgbe_vid_crc32_le(__le16 vid_le) 804 { 805 uint32_t crc = ~0; 806 uint32_t temp = 0; 807 unsigned char *data = (unsigned char *)&vid_le; 808 unsigned char data_byte = 0; 809 int i, bits; 810 811 bits = get_bitmask_order(VLAN_VID_MASK); 812 for (i = 0; i < bits; i++) { 813 if ((i % 8) == 0) 814 data_byte = data[i / 8]; 815 816 temp = ((crc & 1) ^ data_byte) & 1; 817 crc >>= 1; 818 data_byte >>= 1; 819 820 if (temp) 821 crc ^= CRC32_POLY_LE; 822 } 823 824 return (crc); 825 } 826 827 static int 828 xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) 829 { 830 uint32_t crc; 831 uint16_t vid; 832 uint16_t vlan_hash_table = 0; 833 __le16 vid_le = 0; 834 835 axgbe_printf(1, "%s: Before updating VLANHTR 0x%x\n", __func__, 836 XGMAC_IOREAD(pdata, MAC_VLANHTR)); 837 838 /* Generate the VLAN Hash Table value */ 839 for_each_set_bit(vid, pdata->active_vlans, VLAN_NVID) { 840 841 /* Get the CRC32 value of the VLAN ID */ 842 vid_le = cpu_to_le16(vid); 843 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; 844 845 vlan_hash_table |= (1 << crc); 846 axgbe_printf(1, "%s: vid 0x%x vid_le 0x%x crc 0x%x " 847 "vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc, 848 vlan_hash_table); 849 } 850 851 /* Set the VLAN Hash Table filtering register */ 852 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); 853 854 axgbe_printf(1, "%s: After updating VLANHTR 0x%x\n", __func__, 855 XGMAC_IOREAD(pdata, MAC_VLANHTR)); 856 857 return (0); 858 } 859 860 static int 861 xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable) 862 { 863 unsigned int val = enable ? 1 : 0; 864 865 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) 866 return (0); 867 868 axgbe_printf(1, "%s promiscous mode\n", enable? "entering" : "leaving"); 869 870 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); 871 872 /* Hardware will still perform VLAN filtering in promiscuous mode */ 873 if (enable) { 874 axgbe_printf(1, "Disabling rx vlan filtering\n"); 875 xgbe_disable_rx_vlan_filtering(pdata); 876 } else { 877 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { 878 axgbe_printf(1, "Enabling rx vlan filtering\n"); 879 xgbe_enable_rx_vlan_filtering(pdata); 880 } 881 } 882 883 return (0); 884 } 885 886 static int 887 xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, unsigned int enable) 888 { 889 unsigned int val = enable ? 1 : 0; 890 891 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) 892 return (0); 893 894 axgbe_printf(1,"%s allmulti mode\n", enable ? "entering" : "leaving"); 895 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); 896 897 return (0); 898 } 899 900 static void 901 xgbe_set_mac_reg(struct xgbe_prv_data *pdata, char *addr, unsigned int *mac_reg) 902 { 903 unsigned int mac_addr_hi, mac_addr_lo; 904 uint8_t *mac_addr; 905 906 mac_addr_lo = 0; 907 mac_addr_hi = 0; 908 909 if (addr) { 910 mac_addr = (uint8_t *)&mac_addr_lo; 911 mac_addr[0] = addr[0]; 912 mac_addr[1] = addr[1]; 913 mac_addr[2] = addr[2]; 914 mac_addr[3] = addr[3]; 915 mac_addr = (uint8_t *)&mac_addr_hi; 916 mac_addr[0] = addr[4]; 917 mac_addr[1] = addr[5]; 918 919 axgbe_printf(1, "adding mac address %pM at %#x\n", addr, *mac_reg); 920 921 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 922 } 923 924 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi); 925 *mac_reg += MAC_MACA_INC; 926 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo); 927 *mac_reg += MAC_MACA_INC; 928 } 929 930 static void 931 xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata) 932 { 933 unsigned int mac_reg; 934 unsigned int addn_macs; 935 936 mac_reg = MAC_MACA1HR; 937 addn_macs = pdata->hw_feat.addn_mac; 938 939 xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg); 940 addn_macs--; 941 942 /* Clear remaining additional MAC address entries */ 943 while (addn_macs--) 944 xgbe_set_mac_reg(pdata, NULL, &mac_reg); 945 } 946 947 static int 948 xgbe_add_mac_addresses(struct xgbe_prv_data *pdata) 949 { 950 /* TODO - add support to set mac hash table */ 951 xgbe_set_mac_addn_addrs(pdata); 952 953 return (0); 954 } 955 956 static int 957 xgbe_set_mac_address(struct xgbe_prv_data *pdata, uint8_t *addr) 958 { 959 unsigned int mac_addr_hi, mac_addr_lo; 960 961 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 962 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 963 (addr[1] << 8) | (addr[0] << 0); 964 965 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 966 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 967 968 return (0); 969 } 970 971 static int 972 xgbe_config_rx_mode(struct xgbe_prv_data *pdata) 973 { 974 unsigned int pr_mode, am_mode; 975 976 pr_mode = ((if_getflags(pdata->netdev) & IFF_PPROMISC) != 0); 977 am_mode = ((if_getflags(pdata->netdev) & IFF_ALLMULTI) != 0); 978 979 xgbe_set_promiscuous_mode(pdata, pr_mode); 980 xgbe_set_all_multicast_mode(pdata, am_mode); 981 982 xgbe_add_mac_addresses(pdata); 983 984 return (0); 985 } 986 987 static int 988 xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 989 { 990 unsigned int reg; 991 992 if (gpio > 15) 993 return (-EINVAL); 994 995 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 996 997 reg &= ~(1 << (gpio + 16)); 998 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 999 1000 return (0); 1001 } 1002 1003 static int 1004 xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio) 1005 { 1006 unsigned int reg; 1007 1008 if (gpio > 15) 1009 return (-EINVAL); 1010 1011 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR); 1012 1013 reg |= (1 << (gpio + 16)); 1014 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg); 1015 1016 return (0); 1017 } 1018 1019 static int 1020 xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) 1021 { 1022 unsigned long flags; 1023 unsigned int mmd_address, index, offset; 1024 int mmd_data; 1025 1026 if (mmd_reg & MII_ADDR_C45) 1027 mmd_address = mmd_reg & ~MII_ADDR_C45; 1028 else 1029 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1030 1031 /* The PCS registers are accessed using mmio. The underlying 1032 * management interface uses indirect addressing to access the MMD 1033 * register sets. This requires accessing of the PCS register in two 1034 * phases, an address phase and a data phase. 1035 * 1036 * The mmio interface is based on 16-bit offsets and values. All 1037 * register offsets must therefore be adjusted by left shifting the 1038 * offset 1 bit and reading 16 bits of data. 1039 */ 1040 mmd_address <<= 1; 1041 index = mmd_address & ~pdata->xpcs_window_mask; 1042 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1043 1044 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1045 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1046 mmd_data = XPCS16_IOREAD(pdata, offset); 1047 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1048 1049 return (mmd_data); 1050 } 1051 1052 static void 1053 xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, 1054 int mmd_data) 1055 { 1056 unsigned long flags; 1057 unsigned int mmd_address, index, offset; 1058 1059 if (mmd_reg & MII_ADDR_C45) 1060 mmd_address = mmd_reg & ~MII_ADDR_C45; 1061 else 1062 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1063 1064 /* The PCS registers are accessed using mmio. The underlying 1065 * management interface uses indirect addressing to access the MMD 1066 * register sets. This requires accessing of the PCS register in two 1067 * phases, an address phase and a data phase. 1068 * 1069 * The mmio interface is based on 16-bit offsets and values. All 1070 * register offsets must therefore be adjusted by left shifting the 1071 * offset 1 bit and writing 16 bits of data. 1072 */ 1073 mmd_address <<= 1; 1074 index = mmd_address & ~pdata->xpcs_window_mask; 1075 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1076 1077 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1078 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1079 XPCS16_IOWRITE(pdata, offset, mmd_data); 1080 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1081 } 1082 1083 static int 1084 xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) 1085 { 1086 unsigned long flags; 1087 unsigned int mmd_address; 1088 int mmd_data; 1089 1090 if (mmd_reg & MII_ADDR_C45) 1091 mmd_address = mmd_reg & ~MII_ADDR_C45; 1092 else 1093 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1094 1095 /* The PCS registers are accessed using mmio. The underlying APB3 1096 * management interface uses indirect addressing to access the MMD 1097 * register sets. This requires accessing of the PCS register in two 1098 * phases, an address phase and a data phase. 1099 * 1100 * The mmio interface is based on 32-bit offsets and values. All 1101 * register offsets must therefore be adjusted by left shifting the 1102 * offset 2 bits and reading 32 bits of data. 1103 */ 1104 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1105 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1106 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2); 1107 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1108 1109 return (mmd_data); 1110 } 1111 1112 static void 1113 xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, 1114 int mmd_data) 1115 { 1116 unsigned int mmd_address; 1117 unsigned long flags; 1118 1119 if (mmd_reg & MII_ADDR_C45) 1120 mmd_address = mmd_reg & ~MII_ADDR_C45; 1121 else 1122 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1123 1124 /* The PCS registers are accessed using mmio. The underlying APB3 1125 * management interface uses indirect addressing to access the MMD 1126 * register sets. This requires accessing of the PCS register in two 1127 * phases, an address phase and a data phase. 1128 * 1129 * The mmio interface is based on 32-bit offsets and values. All 1130 * register offsets must therefore be adjusted by left shifting the 1131 * offset 2 bits and writing 32 bits of data. 1132 */ 1133 spin_lock_irqsave(&pdata->xpcs_lock, flags); 1134 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8); 1135 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); 1136 spin_unlock_irqrestore(&pdata->xpcs_lock, flags); 1137 } 1138 1139 static int 1140 xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) 1141 { 1142 switch (pdata->vdata->xpcs_access) { 1143 case XGBE_XPCS_ACCESS_V1: 1144 return (xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg)); 1145 1146 case XGBE_XPCS_ACCESS_V2: 1147 default: 1148 return (xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg)); 1149 } 1150 } 1151 1152 static void 1153 xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, 1154 int mmd_data) 1155 { 1156 switch (pdata->vdata->xpcs_access) { 1157 case XGBE_XPCS_ACCESS_V1: 1158 return (xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data)); 1159 1160 case XGBE_XPCS_ACCESS_V2: 1161 default: 1162 return (xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data)); 1163 } 1164 } 1165 1166 static unsigned int 1167 xgbe_create_mdio_sca(int port, int reg) 1168 { 1169 unsigned int mdio_sca, da; 1170 1171 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; 1172 1173 mdio_sca = 0; 1174 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); 1175 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); 1176 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); 1177 1178 return (mdio_sca); 1179 } 1180 1181 static int 1182 xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg, 1183 uint16_t val) 1184 { 1185 unsigned int mdio_sca, mdio_sccd; 1186 1187 mtx_lock_spin(&pdata->mdio_mutex); 1188 1189 mdio_sca = xgbe_create_mdio_sca(addr, reg); 1190 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1191 1192 mdio_sccd = 0; 1193 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); 1194 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); 1195 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1196 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1197 1198 if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == 1199 EWOULDBLOCK) { 1200 axgbe_error("%s: MDIO write error\n", __func__); 1201 mtx_unlock_spin(&pdata->mdio_mutex); 1202 return (-ETIMEDOUT); 1203 } 1204 1205 mtx_unlock_spin(&pdata->mdio_mutex); 1206 return (0); 1207 } 1208 1209 static int 1210 xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg) 1211 { 1212 unsigned int mdio_sca, mdio_sccd; 1213 1214 mtx_lock_spin(&pdata->mdio_mutex); 1215 1216 mdio_sca = xgbe_create_mdio_sca(addr, reg); 1217 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1218 1219 mdio_sccd = 0; 1220 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); 1221 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1222 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1223 1224 if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) == 1225 EWOULDBLOCK) { 1226 axgbe_error("%s: MDIO read error\n", __func__); 1227 mtx_unlock_spin(&pdata->mdio_mutex); 1228 return (-ETIMEDOUT); 1229 } 1230 1231 mtx_unlock_spin(&pdata->mdio_mutex); 1232 1233 return (XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA)); 1234 } 1235 1236 static int 1237 xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port, 1238 enum xgbe_mdio_mode mode) 1239 { 1240 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R); 1241 1242 switch (mode) { 1243 case XGBE_MDIO_MODE_CL22: 1244 if (port > XGMAC_MAX_C22_PORT) 1245 return (-EINVAL); 1246 reg_val |= (1 << port); 1247 break; 1248 case XGBE_MDIO_MODE_CL45: 1249 break; 1250 default: 1251 return (-EINVAL); 1252 } 1253 1254 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); 1255 1256 return (0); 1257 } 1258 1259 static int 1260 xgbe_tx_complete(struct xgbe_ring_desc *rdesc) 1261 { 1262 return (!XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN)); 1263 } 1264 1265 static int 1266 xgbe_disable_rx_csum(struct xgbe_prv_data *pdata) 1267 { 1268 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 1269 1270 axgbe_printf(0, "Receive checksum offload Disabled\n"); 1271 return (0); 1272 } 1273 1274 static int 1275 xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) 1276 { 1277 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 1278 1279 axgbe_printf(0, "Receive checksum offload Enabled\n"); 1280 return (0); 1281 } 1282 1283 static void 1284 xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) 1285 { 1286 struct xgbe_ring_desc *rdesc = rdata->rdesc; 1287 1288 /* Reset the Tx descriptor 1289 * Set buffer 1 (lo) address to zero 1290 * Set buffer 1 (hi) address to zero 1291 * Reset all other control bits (IC, TTSE, B2L & B1L) 1292 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) 1293 */ 1294 rdesc->desc0 = 0; 1295 rdesc->desc1 = 0; 1296 rdesc->desc2 = 0; 1297 rdesc->desc3 = 0; 1298 1299 wmb(); 1300 } 1301 1302 static void 1303 xgbe_tx_desc_init(struct xgbe_channel *channel) 1304 { 1305 struct xgbe_ring *ring = channel->tx_ring; 1306 struct xgbe_ring_data *rdata; 1307 int i; 1308 int start_index = ring->cur; 1309 1310 /* Initialze all descriptors */ 1311 for (i = 0; i < ring->rdesc_count; i++) { 1312 rdata = XGBE_GET_DESC_DATA(ring, i); 1313 1314 /* Initialize Tx descriptor */ 1315 xgbe_tx_desc_reset(rdata); 1316 } 1317 1318 /* Update the total number of Tx descriptors */ 1319 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1); 1320 1321 /* Update the starting address of descriptor ring */ 1322 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1323 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI, 1324 upper_32_bits(rdata->rdata_paddr)); 1325 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO, 1326 lower_32_bits(rdata->rdata_paddr)); 1327 } 1328 1329 static void 1330 xgbe_rx_desc_init(struct xgbe_channel *channel) 1331 { 1332 struct xgbe_ring *ring = channel->rx_ring; 1333 struct xgbe_ring_data *rdata; 1334 unsigned int start_index = ring->cur; 1335 1336 /* 1337 * Just set desc_count and the starting address of the desc list 1338 * here. Rest will be done as part of the txrx path. 1339 */ 1340 1341 /* Update the total number of Rx descriptors */ 1342 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1); 1343 1344 /* Update the starting address of descriptor ring */ 1345 rdata = XGBE_GET_DESC_DATA(ring, start_index); 1346 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI, 1347 upper_32_bits(rdata->rdata_paddr)); 1348 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO, 1349 lower_32_bits(rdata->rdata_paddr)); 1350 } 1351 1352 static int 1353 xgbe_dev_read(struct xgbe_channel *channel) 1354 { 1355 struct xgbe_prv_data *pdata = channel->pdata; 1356 struct xgbe_ring *ring = channel->rx_ring; 1357 struct xgbe_ring_data *rdata; 1358 struct xgbe_ring_desc *rdesc; 1359 struct xgbe_packet_data *packet = &ring->packet_data; 1360 unsigned int err, etlt, l34t = 0; 1361 1362 axgbe_printf(1, "-->xgbe_dev_read: cur = %d\n", ring->cur); 1363 1364 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1365 rdesc = rdata->rdesc; 1366 1367 /* Check for data availability */ 1368 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN)) 1369 return (1); 1370 1371 rmb(); 1372 1373 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { 1374 /* TODO - Timestamp Context Descriptor */ 1375 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1376 CONTEXT, 1); 1377 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1378 CONTEXT_NEXT, 0); 1379 return (0); 1380 } 1381 1382 /* Normal Descriptor, be sure Context Descriptor bit is off */ 1383 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0); 1384 1385 /* Indicate if a Context Descriptor is next */ 1386 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA)) 1387 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1388 CONTEXT_NEXT, 1); 1389 1390 /* Get the header length */ 1391 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { 1392 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1393 FIRST, 1); 1394 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, 1395 RX_NORMAL_DESC2, HL); 1396 if (rdata->rx.hdr_len) 1397 pdata->ext_stats.rx_split_header_packets++; 1398 } else 1399 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1400 FIRST, 0); 1401 1402 /* Get the RSS hash */ 1403 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { 1404 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1405 RSS_HASH, 1); 1406 1407 packet->rss_hash = le32_to_cpu(rdesc->desc1); 1408 1409 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); 1410 switch (l34t) { 1411 case RX_DESC3_L34T_IPV4_TCP: 1412 packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV4; 1413 break; 1414 case RX_DESC3_L34T_IPV4_UDP: 1415 packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV4; 1416 break; 1417 case RX_DESC3_L34T_IPV6_TCP: 1418 packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV6; 1419 break; 1420 case RX_DESC3_L34T_IPV6_UDP: 1421 packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV6; 1422 break; 1423 default: 1424 packet->rss_hash_type = M_HASHTYPE_OPAQUE; 1425 break; 1426 } 1427 } 1428 1429 /* Not all the data has been transferred for this packet */ 1430 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { 1431 /* This is not the last of the data for this packet */ 1432 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1433 LAST, 0); 1434 return (0); 1435 } 1436 1437 /* This is the last of the data for this packet */ 1438 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1439 LAST, 1); 1440 1441 /* Get the packet length */ 1442 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); 1443 1444 /* Set checksum done indicator as appropriate */ 1445 /* TODO - add tunneling support */ 1446 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1447 CSUM_DONE, 1); 1448 1449 /* Check for errors (only valid in last descriptor) */ 1450 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); 1451 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); 1452 axgbe_printf(1, "%s: err=%u, etlt=%#x\n", __func__, err, etlt); 1453 1454 if (!err || !etlt) { 1455 /* No error if err is 0 or etlt is 0 */ 1456 if (etlt == 0x09) { 1457 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1458 VLAN_CTAG, 1); 1459 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, 1460 RX_NORMAL_DESC0, OVT); 1461 axgbe_printf(1, "vlan-ctag=%#06x\n", packet->vlan_ctag); 1462 } 1463 } else { 1464 unsigned int tnp = XGMAC_GET_BITS(packet->attributes, 1465 RX_PACKET_ATTRIBUTES, TNP); 1466 1467 if ((etlt == 0x05) || (etlt == 0x06)) { 1468 axgbe_printf(1, "%s: err1 l34t %d err 0x%x etlt 0x%x\n", 1469 __func__, l34t, err, etlt); 1470 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1471 CSUM_DONE, 0); 1472 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1473 TNPCSUM_DONE, 0); 1474 pdata->ext_stats.rx_csum_errors++; 1475 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { 1476 axgbe_printf(1, "%s: err2 l34t %d err 0x%x etlt 0x%x\n", 1477 __func__, l34t, err, etlt); 1478 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1479 CSUM_DONE, 0); 1480 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, 1481 TNPCSUM_DONE, 0); 1482 pdata->ext_stats.rx_vxlan_csum_errors++; 1483 } else { 1484 axgbe_printf(1, "%s: tnp %d l34t %d err 0x%x etlt 0x%x\n", 1485 __func__, tnp, l34t, err, etlt); 1486 axgbe_printf(1, "%s: Channel: %d SR 0x%x DSR 0x%x \n", 1487 __func__, channel->queue_index, 1488 XGMAC_DMA_IOREAD(channel, DMA_CH_SR), 1489 XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); 1490 axgbe_printf(1, "%s: ring cur %d dirty %d\n", 1491 __func__, ring->cur, ring->dirty); 1492 axgbe_printf(1, "%s: Desc 0x%08x-0x%08x-0x%08x-0x%08x\n", 1493 __func__, rdesc->desc0, rdesc->desc1, rdesc->desc2, 1494 rdesc->desc3); 1495 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, 1496 FRAME, 1); 1497 } 1498 } 1499 1500 axgbe_printf(1, "<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", 1501 channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur); 1502 1503 return (0); 1504 } 1505 1506 static int 1507 xgbe_is_context_desc(struct xgbe_ring_desc *rdesc) 1508 { 1509 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ 1510 return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT)); 1511 } 1512 1513 static int 1514 xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) 1515 { 1516 /* Rx and Tx share LD bit, so check TDES3.LD bit */ 1517 return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD)); 1518 } 1519 1520 static int 1521 xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id) 1522 { 1523 struct xgbe_prv_data *pdata = channel->pdata; 1524 1525 axgbe_printf(1, "enable_int: DMA_CH_IER read - 0x%x\n", 1526 channel->curr_ier); 1527 1528 switch (int_id) { 1529 case XGMAC_INT_DMA_CH_SR_TI: 1530 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); 1531 break; 1532 case XGMAC_INT_DMA_CH_SR_TPS: 1533 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); 1534 break; 1535 case XGMAC_INT_DMA_CH_SR_TBU: 1536 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); 1537 break; 1538 case XGMAC_INT_DMA_CH_SR_RI: 1539 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); 1540 break; 1541 case XGMAC_INT_DMA_CH_SR_RBU: 1542 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); 1543 break; 1544 case XGMAC_INT_DMA_CH_SR_RPS: 1545 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); 1546 break; 1547 case XGMAC_INT_DMA_CH_SR_TI_RI: 1548 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); 1549 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); 1550 break; 1551 case XGMAC_INT_DMA_CH_SR_FBE: 1552 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); 1553 break; 1554 case XGMAC_INT_DMA_ALL: 1555 channel->curr_ier |= channel->saved_ier; 1556 break; 1557 default: 1558 return (-1); 1559 } 1560 1561 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 1562 1563 axgbe_printf(1, "enable_int: DMA_CH_IER write - 0x%x\n", 1564 channel->curr_ier); 1565 1566 return (0); 1567 } 1568 1569 static int 1570 xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id) 1571 { 1572 struct xgbe_prv_data *pdata = channel->pdata; 1573 1574 axgbe_printf(1, "disable_int: DMA_CH_IER read - 0x%x\n", 1575 channel->curr_ier); 1576 1577 switch (int_id) { 1578 case XGMAC_INT_DMA_CH_SR_TI: 1579 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); 1580 break; 1581 case XGMAC_INT_DMA_CH_SR_TPS: 1582 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); 1583 break; 1584 case XGMAC_INT_DMA_CH_SR_TBU: 1585 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); 1586 break; 1587 case XGMAC_INT_DMA_CH_SR_RI: 1588 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); 1589 break; 1590 case XGMAC_INT_DMA_CH_SR_RBU: 1591 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); 1592 break; 1593 case XGMAC_INT_DMA_CH_SR_RPS: 1594 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); 1595 break; 1596 case XGMAC_INT_DMA_CH_SR_TI_RI: 1597 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); 1598 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); 1599 break; 1600 case XGMAC_INT_DMA_CH_SR_FBE: 1601 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); 1602 break; 1603 case XGMAC_INT_DMA_ALL: 1604 channel->saved_ier = channel->curr_ier; 1605 channel->curr_ier = 0; 1606 break; 1607 default: 1608 return (-1); 1609 } 1610 1611 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); 1612 1613 axgbe_printf(1, "disable_int: DMA_CH_IER write - 0x%x\n", 1614 channel->curr_ier); 1615 1616 return (0); 1617 } 1618 1619 static int 1620 __xgbe_exit(struct xgbe_prv_data *pdata) 1621 { 1622 unsigned int count = 2000; 1623 1624 /* Issue a software reset */ 1625 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 1626 DELAY(10); 1627 1628 /* Poll Until Poll Condition */ 1629 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 1630 DELAY(500); 1631 1632 if (!count) 1633 return (-EBUSY); 1634 1635 return (0); 1636 } 1637 1638 static int 1639 xgbe_exit(struct xgbe_prv_data *pdata) 1640 { 1641 int ret; 1642 1643 /* To guard against possible incorrectly generated interrupts, 1644 * issue the software reset twice. 1645 */ 1646 ret = __xgbe_exit(pdata); 1647 if (ret) { 1648 axgbe_error("%s: exit error %d\n", __func__, ret); 1649 return (ret); 1650 } 1651 1652 return (__xgbe_exit(pdata)); 1653 } 1654 1655 static int 1656 xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) 1657 { 1658 unsigned int i, count; 1659 1660 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) 1661 return (0); 1662 1663 for (i = 0; i < pdata->tx_q_count; i++) 1664 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 1665 1666 /* Poll Until Poll Condition */ 1667 for (i = 0; i < pdata->tx_q_count; i++) { 1668 count = 2000; 1669 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, 1670 MTL_Q_TQOMR, FTQ)) 1671 DELAY(500); 1672 1673 if (!count) 1674 return (-EBUSY); 1675 } 1676 1677 return (0); 1678 } 1679 1680 static void 1681 xgbe_config_dma_bus(struct xgbe_prv_data *pdata) 1682 { 1683 unsigned int sbmr; 1684 1685 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR); 1686 1687 /* Set enhanced addressing mode */ 1688 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1); 1689 1690 /* Set the System Bus mode */ 1691 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1); 1692 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2); 1693 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal); 1694 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1); 1695 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1); 1696 1697 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr); 1698 1699 /* Set descriptor fetching threshold */ 1700 if (pdata->vdata->tx_desc_prefetch) 1701 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS, 1702 pdata->vdata->tx_desc_prefetch); 1703 1704 if (pdata->vdata->rx_desc_prefetch) 1705 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS, 1706 pdata->vdata->rx_desc_prefetch); 1707 } 1708 1709 static void 1710 xgbe_config_dma_cache(struct xgbe_prv_data *pdata) 1711 { 1712 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr); 1713 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr); 1714 if (pdata->awarcr) 1715 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr); 1716 } 1717 1718 static void 1719 xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) 1720 { 1721 unsigned int i; 1722 1723 /* Set Tx to weighted round robin scheduling algorithm */ 1724 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 1725 1726 /* Set Tx traffic classes to use WRR algorithm with equal weights */ 1727 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 1728 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 1729 MTL_TSA_ETS); 1730 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); 1731 } 1732 1733 /* Set Rx to strict priority algorithm */ 1734 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 1735 } 1736 1737 static void 1738 xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata, 1739 unsigned int queue, unsigned int q_fifo_size) 1740 { 1741 unsigned int frame_fifo_size; 1742 unsigned int rfa, rfd; 1743 1744 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata)); 1745 axgbe_printf(1, "%s: queue %d q_fifo_size %d frame_fifo_size 0x%x\n", 1746 __func__, queue, q_fifo_size, frame_fifo_size); 1747 1748 /* TODO - add pfc/ets related support */ 1749 1750 /* This path deals with just maximum frame sizes which are 1751 * limited to a jumbo frame of 9,000 (plus headers, etc.) 1752 * so we can never exceed the maximum allowable RFA/RFD 1753 * values. 1754 */ 1755 if (q_fifo_size <= 2048) { 1756 /* rx_rfd to zero to signal no flow control */ 1757 pdata->rx_rfa[queue] = 0; 1758 pdata->rx_rfd[queue] = 0; 1759 return; 1760 } 1761 1762 if (q_fifo_size <= 4096) { 1763 /* Between 2048 and 4096 */ 1764 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ 1765 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ 1766 return; 1767 } 1768 1769 if (q_fifo_size <= frame_fifo_size) { 1770 /* Between 4096 and max-frame */ 1771 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ 1772 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ 1773 return; 1774 } 1775 1776 if (q_fifo_size <= (frame_fifo_size * 3)) { 1777 /* Between max-frame and 3 max-frames, 1778 * trigger if we get just over a frame of data and 1779 * resume when we have just under half a frame left. 1780 */ 1781 rfa = q_fifo_size - frame_fifo_size; 1782 rfd = rfa + (frame_fifo_size / 2); 1783 } else { 1784 /* Above 3 max-frames - trigger when just over 1785 * 2 frames of space available 1786 */ 1787 rfa = frame_fifo_size * 2; 1788 rfa += XGMAC_FLOW_CONTROL_UNIT; 1789 rfd = rfa + frame_fifo_size; 1790 } 1791 1792 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa); 1793 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd); 1794 axgbe_printf(1, "%s: forced queue %d rfa 0x%x rfd 0x%x\n", __func__, 1795 queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]); 1796 } 1797 1798 static void 1799 xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata, 1800 unsigned int *fifo) 1801 { 1802 unsigned int q_fifo_size; 1803 unsigned int i; 1804 1805 for (i = 0; i < pdata->rx_q_count; i++) { 1806 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT; 1807 1808 axgbe_printf(1, "%s: fifo[%d] - 0x%x q_fifo_size 0x%x\n", 1809 __func__, i, fifo[i], q_fifo_size); 1810 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); 1811 } 1812 } 1813 1814 static void 1815 xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata) 1816 { 1817 unsigned int i; 1818 1819 for (i = 0; i < pdata->rx_q_count; i++) { 1820 axgbe_printf(1, "%s: queue %d rfa %d rfd %d\n", __func__, i, 1821 pdata->rx_rfa[i], pdata->rx_rfd[i]); 1822 1823 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 1824 pdata->rx_rfa[i]); 1825 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 1826 pdata->rx_rfd[i]); 1827 1828 axgbe_printf(1, "%s: MTL_Q_RQFCR 0x%x\n", __func__, 1829 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); 1830 } 1831 } 1832 1833 static unsigned int 1834 xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata) 1835 { 1836 /* The configured value may not be the actual amount of fifo RAM */ 1837 return (min_t(unsigned int, pdata->tx_max_fifo_size, 1838 pdata->hw_feat.tx_fifo_size)); 1839 } 1840 1841 static unsigned int 1842 xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata) 1843 { 1844 /* The configured value may not be the actual amount of fifo RAM */ 1845 return (min_t(unsigned int, pdata->rx_max_fifo_size, 1846 pdata->hw_feat.rx_fifo_size)); 1847 } 1848 1849 static void 1850 xgbe_calculate_equal_fifo(unsigned int fifo_size, unsigned int queue_count, 1851 unsigned int *fifo) 1852 { 1853 unsigned int q_fifo_size; 1854 unsigned int p_fifo; 1855 unsigned int i; 1856 1857 q_fifo_size = fifo_size / queue_count; 1858 1859 /* Calculate the fifo setting by dividing the queue's fifo size 1860 * by the fifo allocation increment (with 0 representing the 1861 * base allocation increment so decrement the result by 1). 1862 */ 1863 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT; 1864 if (p_fifo) 1865 p_fifo--; 1866 1867 /* Distribute the fifo equally amongst the queues */ 1868 for (i = 0; i < queue_count; i++) 1869 fifo[i] = p_fifo; 1870 } 1871 1872 static unsigned int 1873 xgbe_set_nonprio_fifos(unsigned int fifo_size, unsigned int queue_count, 1874 unsigned int *fifo) 1875 { 1876 unsigned int i; 1877 1878 MPASS(powerof2(XGMAC_FIFO_MIN_ALLOC)); 1879 1880 if (queue_count <= IEEE_8021QAZ_MAX_TCS) 1881 return (fifo_size); 1882 1883 /* Rx queues 9 and up are for specialized packets, 1884 * such as PTP or DCB control packets, etc. and 1885 * don't require a large fifo 1886 */ 1887 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) { 1888 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1; 1889 fifo_size -= XGMAC_FIFO_MIN_ALLOC; 1890 } 1891 1892 return (fifo_size); 1893 } 1894 1895 static void 1896 xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) 1897 { 1898 unsigned int fifo_size; 1899 unsigned int fifo[XGBE_MAX_QUEUES]; 1900 unsigned int i; 1901 1902 fifo_size = xgbe_get_tx_fifo_size(pdata); 1903 axgbe_printf(1, "%s: fifo_size 0x%x\n", __func__, fifo_size); 1904 1905 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo); 1906 1907 for (i = 0; i < pdata->tx_q_count; i++) { 1908 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]); 1909 axgbe_printf(1, "Tx q %d FIFO Size 0x%x\n", i, 1910 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); 1911 } 1912 1913 axgbe_printf(1, "%d Tx hardware queues, %d byte fifo per queue\n", 1914 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 1915 } 1916 1917 static void 1918 xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) 1919 { 1920 unsigned int fifo_size; 1921 unsigned int fifo[XGBE_MAX_QUEUES]; 1922 unsigned int prio_queues; 1923 unsigned int i; 1924 1925 /* TODO - add pfc/ets related support */ 1926 1927 /* Clear any DCB related fifo/queue information */ 1928 fifo_size = xgbe_get_rx_fifo_size(pdata); 1929 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 1930 axgbe_printf(1, "%s: fifo_size 0x%x rx_q_cnt %d prio %d\n", __func__, 1931 fifo_size, pdata->rx_q_count, prio_queues); 1932 1933 /* Assign a minimum fifo to the non-VLAN priority queues */ 1934 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo); 1935 1936 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo); 1937 1938 for (i = 0; i < pdata->rx_q_count; i++) { 1939 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]); 1940 axgbe_printf(1, "Rx q %d FIFO Size 0x%x\n", i, 1941 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); 1942 } 1943 1944 xgbe_calculate_flow_control_threshold(pdata, fifo); 1945 xgbe_config_flow_control_threshold(pdata); 1946 1947 axgbe_printf(1, "%u Rx hardware queues, %u byte fifo/queue\n", 1948 pdata->rx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT)); 1949 } 1950 1951 static void 1952 xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) 1953 { 1954 unsigned int qptc, qptc_extra, queue; 1955 unsigned int prio_queues; 1956 unsigned int ppq, ppq_extra, prio; 1957 unsigned int mask; 1958 unsigned int i, j, reg, reg_val; 1959 1960 /* Map the MTL Tx Queues to Traffic Classes 1961 * Note: Tx Queues >= Traffic Classes 1962 */ 1963 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; 1964 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; 1965 1966 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { 1967 for (j = 0; j < qptc; j++) { 1968 axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); 1969 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 1970 Q2TCMAP, i); 1971 pdata->q2tc_map[queue++] = i; 1972 } 1973 1974 if (i < qptc_extra) { 1975 axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i); 1976 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 1977 Q2TCMAP, i); 1978 pdata->q2tc_map[queue++] = i; 1979 } 1980 } 1981 1982 /* Map the 8 VLAN priority values to available MTL Rx queues */ 1983 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count); 1984 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues; 1985 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues; 1986 1987 reg = MAC_RQC2R; 1988 reg_val = 0; 1989 for (i = 0, prio = 0; i < prio_queues;) { 1990 mask = 0; 1991 for (j = 0; j < ppq; j++) { 1992 axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); 1993 mask |= (1 << prio); 1994 pdata->prio2q_map[prio++] = i; 1995 } 1996 1997 if (i < ppq_extra) { 1998 axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i); 1999 mask |= (1 << prio); 2000 pdata->prio2q_map[prio++] = i; 2001 } 2002 2003 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3)); 2004 2005 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues)) 2006 continue; 2007 2008 XGMAC_IOWRITE(pdata, reg, reg_val); 2009 reg += MAC_RQC2_INC; 2010 reg_val = 0; 2011 } 2012 2013 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 2014 reg = MTL_RQDCM0R; 2015 reg_val = 0; 2016 for (i = 0; i < pdata->rx_q_count;) { 2017 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 2018 2019 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count)) 2020 continue; 2021 2022 XGMAC_IOWRITE(pdata, reg, reg_val); 2023 2024 reg += MTL_RQDCM_INC; 2025 reg_val = 0; 2026 } 2027 } 2028 2029 static void 2030 xgbe_config_mac_address(struct xgbe_prv_data *pdata) 2031 { 2032 xgbe_set_mac_address(pdata, if_getlladdr(pdata->netdev)); 2033 2034 /* Filtering is done using perfect filtering and hash filtering */ 2035 if (pdata->hw_feat.hash_table_size) { 2036 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1); 2037 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1); 2038 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1); 2039 } 2040 } 2041 2042 static void 2043 xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata) 2044 { 2045 unsigned int val; 2046 2047 val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0; 2048 2049 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 2050 } 2051 2052 static void 2053 xgbe_config_mac_speed(struct xgbe_prv_data *pdata) 2054 { 2055 xgbe_set_speed(pdata, pdata->phy_speed); 2056 } 2057 2058 static void 2059 xgbe_config_checksum_offload(struct xgbe_prv_data *pdata) 2060 { 2061 if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM)) 2062 xgbe_enable_rx_csum(pdata); 2063 else 2064 xgbe_disable_rx_csum(pdata); 2065 } 2066 2067 static void 2068 xgbe_config_vlan_support(struct xgbe_prv_data *pdata) 2069 { 2070 /* Indicate that VLAN Tx CTAGs come from context descriptors */ 2071 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0); 2072 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1); 2073 2074 /* Set the current VLAN Hash Table register value */ 2075 xgbe_update_vlan_hash_table(pdata); 2076 2077 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) { 2078 axgbe_printf(1, "Enabling rx vlan filtering\n"); 2079 xgbe_enable_rx_vlan_filtering(pdata); 2080 } else { 2081 axgbe_printf(1, "Disabling rx vlan filtering\n"); 2082 xgbe_disable_rx_vlan_filtering(pdata); 2083 } 2084 2085 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) { 2086 axgbe_printf(1, "Enabling rx vlan stripping\n"); 2087 xgbe_enable_rx_vlan_stripping(pdata); 2088 } else { 2089 axgbe_printf(1, "Disabling rx vlan stripping\n"); 2090 xgbe_disable_rx_vlan_stripping(pdata); 2091 } 2092 } 2093 2094 static uint64_t 2095 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) 2096 { 2097 bool read_hi; 2098 uint64_t val; 2099 2100 if (pdata->vdata->mmc_64bit) { 2101 switch (reg_lo) { 2102 /* These registers are always 32 bit */ 2103 case MMC_RXRUNTERROR: 2104 case MMC_RXJABBERERROR: 2105 case MMC_RXUNDERSIZE_G: 2106 case MMC_RXOVERSIZE_G: 2107 case MMC_RXWATCHDOGERROR: 2108 read_hi = false; 2109 break; 2110 2111 default: 2112 read_hi = true; 2113 } 2114 } else { 2115 switch (reg_lo) { 2116 /* These registers are always 64 bit */ 2117 case MMC_TXOCTETCOUNT_GB_LO: 2118 case MMC_TXOCTETCOUNT_G_LO: 2119 case MMC_RXOCTETCOUNT_GB_LO: 2120 case MMC_RXOCTETCOUNT_G_LO: 2121 read_hi = true; 2122 break; 2123 2124 default: 2125 read_hi = false; 2126 } 2127 } 2128 2129 val = XGMAC_IOREAD(pdata, reg_lo); 2130 2131 if (read_hi) 2132 val |= ((uint64_t)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); 2133 2134 return (val); 2135 } 2136 2137 static void 2138 xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 2139 { 2140 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2141 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR); 2142 2143 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 2144 stats->txoctetcount_gb += 2145 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2146 2147 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 2148 stats->txframecount_gb += 2149 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2150 2151 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 2152 stats->txbroadcastframes_g += 2153 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2154 2155 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2156 stats->txmulticastframes_g += 2157 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2158 2159 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2160 stats->tx64octets_gb += 2161 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2162 2163 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2164 stats->tx65to127octets_gb += 2165 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2166 2167 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2168 stats->tx128to255octets_gb += 2169 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2170 2171 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2172 stats->tx256to511octets_gb += 2173 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2174 2175 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2176 stats->tx512to1023octets_gb += 2177 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2178 2179 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2180 stats->tx1024tomaxoctets_gb += 2181 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2182 2183 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2184 stats->txunicastframes_gb += 2185 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2186 2187 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2188 stats->txmulticastframes_gb += 2189 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2190 2191 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2192 stats->txbroadcastframes_g += 2193 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2194 2195 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 2196 stats->txunderflowerror += 2197 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2198 2199 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 2200 stats->txoctetcount_g += 2201 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2202 2203 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 2204 stats->txframecount_g += 2205 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2206 2207 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 2208 stats->txpauseframes += 2209 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2210 2211 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 2212 stats->txvlanframes_g += 2213 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2214 } 2215 2216 static void 2217 xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 2218 { 2219 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2220 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR); 2221 2222 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 2223 stats->rxframecount_gb += 2224 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2225 2226 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 2227 stats->rxoctetcount_gb += 2228 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2229 2230 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 2231 stats->rxoctetcount_g += 2232 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2233 2234 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 2235 stats->rxbroadcastframes_g += 2236 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2237 2238 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 2239 stats->rxmulticastframes_g += 2240 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2241 2242 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 2243 stats->rxcrcerror += 2244 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2245 2246 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 2247 stats->rxrunterror += 2248 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2249 2250 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 2251 stats->rxjabbererror += 2252 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2253 2254 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 2255 stats->rxundersize_g += 2256 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2257 2258 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 2259 stats->rxoversize_g += 2260 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2261 2262 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 2263 stats->rx64octets_gb += 2264 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2265 2266 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 2267 stats->rx65to127octets_gb += 2268 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2269 2270 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 2271 stats->rx128to255octets_gb += 2272 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2273 2274 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 2275 stats->rx256to511octets_gb += 2276 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2277 2278 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 2279 stats->rx512to1023octets_gb += 2280 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2281 2282 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 2283 stats->rx1024tomaxoctets_gb += 2284 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2285 2286 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 2287 stats->rxunicastframes_g += 2288 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2289 2290 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 2291 stats->rxlengtherror += 2292 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2293 2294 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 2295 stats->rxoutofrangetype += 2296 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2297 2298 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 2299 stats->rxpauseframes += 2300 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2301 2302 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 2303 stats->rxfifooverflow += 2304 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2305 2306 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 2307 stats->rxvlanframes_gb += 2308 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2309 2310 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 2311 stats->rxwatchdogerror += 2312 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2313 } 2314 2315 static void 2316 xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 2317 { 2318 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 2319 2320 /* Freeze counters */ 2321 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 2322 2323 stats->txoctetcount_gb += 2324 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); 2325 2326 stats->txframecount_gb += 2327 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); 2328 2329 stats->txbroadcastframes_g += 2330 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2331 2332 stats->txmulticastframes_g += 2333 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2334 2335 stats->tx64octets_gb += 2336 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); 2337 2338 stats->tx65to127octets_gb += 2339 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); 2340 2341 stats->tx128to255octets_gb += 2342 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); 2343 2344 stats->tx256to511octets_gb += 2345 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); 2346 2347 stats->tx512to1023octets_gb += 2348 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2349 2350 stats->tx1024tomaxoctets_gb += 2351 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2352 2353 stats->txunicastframes_gb += 2354 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2355 2356 stats->txmulticastframes_gb += 2357 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2358 2359 stats->txbroadcastframes_gb += 2360 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2361 2362 stats->txunderflowerror += 2363 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); 2364 2365 stats->txoctetcount_g += 2366 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); 2367 2368 stats->txframecount_g += 2369 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); 2370 2371 stats->txpauseframes += 2372 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); 2373 2374 stats->txvlanframes_g += 2375 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); 2376 2377 stats->rxframecount_gb += 2378 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); 2379 2380 stats->rxoctetcount_gb += 2381 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); 2382 2383 stats->rxoctetcount_g += 2384 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); 2385 2386 stats->rxbroadcastframes_g += 2387 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2388 2389 stats->rxmulticastframes_g += 2390 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2391 2392 stats->rxcrcerror += 2393 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); 2394 2395 stats->rxrunterror += 2396 xgbe_mmc_read(pdata, MMC_RXRUNTERROR); 2397 2398 stats->rxjabbererror += 2399 xgbe_mmc_read(pdata, MMC_RXJABBERERROR); 2400 2401 stats->rxundersize_g += 2402 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); 2403 2404 stats->rxoversize_g += 2405 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); 2406 2407 stats->rx64octets_gb += 2408 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); 2409 2410 stats->rx65to127octets_gb += 2411 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); 2412 2413 stats->rx128to255octets_gb += 2414 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); 2415 2416 stats->rx256to511octets_gb += 2417 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); 2418 2419 stats->rx512to1023octets_gb += 2420 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2421 2422 stats->rx1024tomaxoctets_gb += 2423 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2424 2425 stats->rxunicastframes_g += 2426 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); 2427 2428 stats->rxlengtherror += 2429 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); 2430 2431 stats->rxoutofrangetype += 2432 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); 2433 2434 stats->rxpauseframes += 2435 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); 2436 2437 stats->rxfifooverflow += 2438 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); 2439 2440 stats->rxvlanframes_gb += 2441 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); 2442 2443 stats->rxwatchdogerror += 2444 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); 2445 2446 /* Un-freeze counters */ 2447 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 2448 } 2449 2450 static void 2451 xgbe_config_mmc(struct xgbe_prv_data *pdata) 2452 { 2453 /* Set counters to reset on read */ 2454 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 2455 2456 /* Reset the counters */ 2457 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 2458 } 2459 2460 static void 2461 xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) 2462 { 2463 unsigned int tx_status; 2464 unsigned long tx_timeout; 2465 2466 /* The Tx engine cannot be stopped if it is actively processing 2467 * packets. Wait for the Tx queue to empty the Tx fifo. Don't 2468 * wait forever though... 2469 */ 2470 tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); 2471 while (ticks < tx_timeout) { 2472 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 2473 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 2474 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 2475 break; 2476 2477 DELAY(500); 2478 } 2479 2480 if (ticks >= tx_timeout) 2481 axgbe_printf(1, "timed out waiting for Tx queue %u to empty\n", 2482 queue); 2483 } 2484 2485 static void 2486 xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue) 2487 { 2488 unsigned int tx_dsr, tx_pos, tx_qidx; 2489 unsigned int tx_status; 2490 unsigned long tx_timeout; 2491 2492 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 2493 return (xgbe_txq_prepare_tx_stop(pdata, queue)); 2494 2495 /* Calculate the status register to read and the position within */ 2496 if (queue < DMA_DSRX_FIRST_QUEUE) { 2497 tx_dsr = DMA_DSR0; 2498 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 2499 } else { 2500 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 2501 2502 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 2503 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 2504 DMA_DSRX_TPS_START; 2505 } 2506 2507 /* The Tx engine cannot be stopped if it is actively processing 2508 * descriptors. Wait for the Tx engine to enter the stopped or 2509 * suspended state. Don't wait forever though... 2510 */ 2511 tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); 2512 while (ticks < tx_timeout) { 2513 tx_status = XGMAC_IOREAD(pdata, tx_dsr); 2514 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 2515 if ((tx_status == DMA_TPS_STOPPED) || 2516 (tx_status == DMA_TPS_SUSPENDED)) 2517 break; 2518 2519 DELAY(500); 2520 } 2521 2522 if (ticks >= tx_timeout) 2523 axgbe_printf(1, "timed out waiting for Tx DMA channel %u to stop\n", 2524 queue); 2525 } 2526 2527 static void 2528 xgbe_enable_tx(struct xgbe_prv_data *pdata) 2529 { 2530 unsigned int i; 2531 2532 /* Enable each Tx DMA channel */ 2533 for (i = 0; i < pdata->channel_count; i++) { 2534 if (!pdata->channel[i]->tx_ring) 2535 break; 2536 2537 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 2538 } 2539 2540 /* Enable each Tx queue */ 2541 for (i = 0; i < pdata->tx_q_count; i++) 2542 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 2543 MTL_Q_ENABLED); 2544 2545 /* Enable MAC Tx */ 2546 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 2547 } 2548 2549 static void 2550 xgbe_disable_tx(struct xgbe_prv_data *pdata) 2551 { 2552 unsigned int i; 2553 2554 /* Prepare for Tx DMA channel stop */ 2555 for (i = 0; i < pdata->tx_q_count; i++) 2556 xgbe_prepare_tx_stop(pdata, i); 2557 2558 /* Disable MAC Tx */ 2559 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2560 2561 /* Disable each Tx queue */ 2562 for (i = 0; i < pdata->tx_q_count; i++) 2563 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); 2564 2565 /* Disable each Tx DMA channel */ 2566 for (i = 0; i < pdata->channel_count; i++) { 2567 if (!pdata->channel[i]->tx_ring) 2568 break; 2569 2570 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 2571 } 2572 } 2573 2574 static void 2575 xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue) 2576 { 2577 unsigned int rx_status; 2578 unsigned long rx_timeout; 2579 2580 /* The Rx engine cannot be stopped if it is actively processing 2581 * packets. Wait for the Rx queue to empty the Rx fifo. Don't 2582 * wait forever though... 2583 */ 2584 rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz); 2585 while (ticks < rx_timeout) { 2586 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 2587 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 2588 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 2589 break; 2590 2591 DELAY(500); 2592 } 2593 2594 if (ticks >= rx_timeout) 2595 axgbe_printf(1, "timed out waiting for Rx queue %d to empty\n", 2596 queue); 2597 } 2598 2599 static void 2600 xgbe_enable_rx(struct xgbe_prv_data *pdata) 2601 { 2602 unsigned int reg_val, i; 2603 2604 /* Enable each Rx DMA channel */ 2605 for (i = 0; i < pdata->channel_count; i++) { 2606 if (!pdata->channel[i]->rx_ring) 2607 break; 2608 2609 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 2610 } 2611 2612 /* Enable each Rx queue */ 2613 reg_val = 0; 2614 for (i = 0; i < pdata->rx_q_count; i++) 2615 reg_val |= (0x02 << (i << 1)); 2616 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 2617 2618 /* Enable MAC Rx */ 2619 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 2620 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 2621 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 2622 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 2623 } 2624 2625 static void 2626 xgbe_disable_rx(struct xgbe_prv_data *pdata) 2627 { 2628 unsigned int i; 2629 2630 /* Disable MAC Rx */ 2631 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 2632 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 2633 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 2634 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 2635 2636 /* Prepare for Rx DMA channel stop */ 2637 for (i = 0; i < pdata->rx_q_count; i++) 2638 xgbe_prepare_rx_stop(pdata, i); 2639 2640 /* Disable each Rx queue */ 2641 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 2642 2643 /* Disable each Rx DMA channel */ 2644 for (i = 0; i < pdata->channel_count; i++) { 2645 if (!pdata->channel[i]->rx_ring) 2646 break; 2647 2648 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 2649 } 2650 } 2651 2652 static void 2653 xgbe_powerup_tx(struct xgbe_prv_data *pdata) 2654 { 2655 unsigned int i; 2656 2657 /* Enable each Tx DMA channel */ 2658 for (i = 0; i < pdata->channel_count; i++) { 2659 if (!pdata->channel[i]->tx_ring) 2660 break; 2661 2662 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1); 2663 } 2664 2665 /* Enable MAC Tx */ 2666 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 2667 } 2668 2669 static void 2670 xgbe_powerdown_tx(struct xgbe_prv_data *pdata) 2671 { 2672 unsigned int i; 2673 2674 /* Prepare for Tx DMA channel stop */ 2675 for (i = 0; i < pdata->tx_q_count; i++) 2676 xgbe_prepare_tx_stop(pdata, i); 2677 2678 /* Disable MAC Tx */ 2679 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 2680 2681 /* Disable each Tx DMA channel */ 2682 for (i = 0; i < pdata->channel_count; i++) { 2683 if (!pdata->channel[i]->tx_ring) 2684 break; 2685 2686 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0); 2687 } 2688 } 2689 2690 static void 2691 xgbe_powerup_rx(struct xgbe_prv_data *pdata) 2692 { 2693 unsigned int i; 2694 2695 /* Enable each Rx DMA channel */ 2696 for (i = 0; i < pdata->channel_count; i++) { 2697 if (!pdata->channel[i]->rx_ring) 2698 break; 2699 2700 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1); 2701 } 2702 } 2703 2704 static void 2705 xgbe_powerdown_rx(struct xgbe_prv_data *pdata) 2706 { 2707 unsigned int i; 2708 2709 /* Disable each Rx DMA channel */ 2710 for (i = 0; i < pdata->channel_count; i++) { 2711 if (!pdata->channel[i]->rx_ring) 2712 break; 2713 2714 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0); 2715 } 2716 } 2717 2718 static int 2719 xgbe_init(struct xgbe_prv_data *pdata) 2720 { 2721 struct xgbe_desc_if *desc_if = &pdata->desc_if; 2722 int ret; 2723 2724 /* Flush Tx queues */ 2725 ret = xgbe_flush_tx_queues(pdata); 2726 if (ret) { 2727 axgbe_error("error flushing TX queues\n"); 2728 return (ret); 2729 } 2730 2731 /* 2732 * Initialize DMA related features 2733 */ 2734 xgbe_config_dma_bus(pdata); 2735 xgbe_config_dma_cache(pdata); 2736 xgbe_config_osp_mode(pdata); 2737 xgbe_config_pbl_val(pdata); 2738 xgbe_config_rx_coalesce(pdata); 2739 xgbe_config_tx_coalesce(pdata); 2740 xgbe_config_rx_buffer_size(pdata); 2741 xgbe_config_tso_mode(pdata); 2742 xgbe_config_sph_mode(pdata); 2743 xgbe_config_rss(pdata); 2744 desc_if->wrapper_tx_desc_init(pdata); 2745 desc_if->wrapper_rx_desc_init(pdata); 2746 xgbe_enable_dma_interrupts(pdata); 2747 2748 /* 2749 * Initialize MTL related features 2750 */ 2751 xgbe_config_mtl_mode(pdata); 2752 xgbe_config_queue_mapping(pdata); 2753 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 2754 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 2755 xgbe_config_tx_threshold(pdata, pdata->tx_threshold); 2756 xgbe_config_rx_threshold(pdata, pdata->rx_threshold); 2757 xgbe_config_tx_fifo_size(pdata); 2758 xgbe_config_rx_fifo_size(pdata); 2759 /*TODO: Error Packet and undersized good Packet forwarding enable 2760 (FEP and FUP) 2761 */ 2762 xgbe_enable_mtl_interrupts(pdata); 2763 2764 /* 2765 * Initialize MAC related features 2766 */ 2767 xgbe_config_mac_address(pdata); 2768 xgbe_config_rx_mode(pdata); 2769 xgbe_config_jumbo_enable(pdata); 2770 xgbe_config_flow_control(pdata); 2771 xgbe_config_mac_speed(pdata); 2772 xgbe_config_checksum_offload(pdata); 2773 xgbe_config_vlan_support(pdata); 2774 xgbe_config_mmc(pdata); 2775 xgbe_enable_mac_interrupts(pdata); 2776 2777 return (0); 2778 } 2779 2780 void 2781 xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) 2782 { 2783 2784 hw_if->tx_complete = xgbe_tx_complete; 2785 2786 hw_if->set_mac_address = xgbe_set_mac_address; 2787 hw_if->config_rx_mode = xgbe_config_rx_mode; 2788 2789 hw_if->enable_rx_csum = xgbe_enable_rx_csum; 2790 hw_if->disable_rx_csum = xgbe_disable_rx_csum; 2791 2792 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping; 2793 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping; 2794 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering; 2795 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering; 2796 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table; 2797 2798 hw_if->read_mmd_regs = xgbe_read_mmd_regs; 2799 hw_if->write_mmd_regs = xgbe_write_mmd_regs; 2800 2801 hw_if->set_speed = xgbe_set_speed; 2802 2803 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode; 2804 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs; 2805 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs; 2806 2807 hw_if->set_gpio = xgbe_set_gpio; 2808 hw_if->clr_gpio = xgbe_clr_gpio; 2809 2810 hw_if->enable_tx = xgbe_enable_tx; 2811 hw_if->disable_tx = xgbe_disable_tx; 2812 hw_if->enable_rx = xgbe_enable_rx; 2813 hw_if->disable_rx = xgbe_disable_rx; 2814 2815 hw_if->powerup_tx = xgbe_powerup_tx; 2816 hw_if->powerdown_tx = xgbe_powerdown_tx; 2817 hw_if->powerup_rx = xgbe_powerup_rx; 2818 hw_if->powerdown_rx = xgbe_powerdown_rx; 2819 2820 hw_if->dev_read = xgbe_dev_read; 2821 hw_if->enable_int = xgbe_enable_int; 2822 hw_if->disable_int = xgbe_disable_int; 2823 hw_if->init = xgbe_init; 2824 hw_if->exit = xgbe_exit; 2825 2826 /* Descriptor related Sequences have to be initialized here */ 2827 hw_if->tx_desc_init = xgbe_tx_desc_init; 2828 hw_if->rx_desc_init = xgbe_rx_desc_init; 2829 hw_if->tx_desc_reset = xgbe_tx_desc_reset; 2830 hw_if->is_last_desc = xgbe_is_last_desc; 2831 hw_if->is_context_desc = xgbe_is_context_desc; 2832 2833 /* For FLOW ctrl */ 2834 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control; 2835 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control; 2836 2837 /* For RX coalescing */ 2838 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce; 2839 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce; 2840 hw_if->usec_to_riwt = xgbe_usec_to_riwt; 2841 hw_if->riwt_to_usec = xgbe_riwt_to_usec; 2842 2843 /* For RX and TX threshold config */ 2844 hw_if->config_rx_threshold = xgbe_config_rx_threshold; 2845 hw_if->config_tx_threshold = xgbe_config_tx_threshold; 2846 2847 /* For RX and TX Store and Forward Mode config */ 2848 hw_if->config_rsf_mode = xgbe_config_rsf_mode; 2849 hw_if->config_tsf_mode = xgbe_config_tsf_mode; 2850 2851 /* For TX DMA Operating on Second Frame config */ 2852 hw_if->config_osp_mode = xgbe_config_osp_mode; 2853 2854 /* For MMC statistics support */ 2855 hw_if->tx_mmc_int = xgbe_tx_mmc_int; 2856 hw_if->rx_mmc_int = xgbe_rx_mmc_int; 2857 hw_if->read_mmc_stats = xgbe_read_mmc_stats; 2858 2859 /* For Receive Side Scaling */ 2860 hw_if->enable_rss = xgbe_enable_rss; 2861 hw_if->disable_rss = xgbe_disable_rss; 2862 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key; 2863 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table; 2864 } 2865