1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 #include "igb_sw.h" 32 33 int 34 igb_m_stat(void *arg, uint_t stat, uint64_t *val) 35 { 36 igb_t *igb = (igb_t *)arg; 37 struct e1000_hw *hw = &igb->hw; 38 igb_stat_t *igb_ks; 39 uint32_t low_val, high_val; 40 41 igb_ks = (igb_stat_t *)igb->igb_ks->ks_data; 42 43 mutex_enter(&igb->gen_lock); 44 45 if (igb->igb_state & IGB_SUSPENDED) { 46 mutex_exit(&igb->gen_lock); 47 return (ECANCELED); 48 } 49 50 switch (stat) { 51 case MAC_STAT_IFSPEED: 52 *val = igb->link_speed * 1000000ull; 53 break; 54 55 case MAC_STAT_MULTIRCV: 56 igb_ks->mprc.value.ui64 += 57 E1000_READ_REG(hw, E1000_MPRC); 58 *val = igb_ks->mprc.value.ui64; 59 break; 60 61 case MAC_STAT_BRDCSTRCV: 62 igb_ks->bprc.value.ui64 += 63 E1000_READ_REG(hw, E1000_BPRC); 64 *val = igb_ks->bprc.value.ui64; 65 break; 66 67 case MAC_STAT_MULTIXMT: 68 igb_ks->mptc.value.ui64 += 69 E1000_READ_REG(hw, E1000_MPTC); 70 *val = igb_ks->mptc.value.ui64; 71 break; 72 73 case MAC_STAT_BRDCSTXMT: 74 igb_ks->bptc.value.ui64 += 75 E1000_READ_REG(hw, E1000_BPTC); 76 *val = igb_ks->bptc.value.ui64; 77 break; 78 79 case MAC_STAT_NORCVBUF: 80 igb_ks->rnbc.value.ui64 += 81 E1000_READ_REG(hw, E1000_RNBC); 82 *val = igb_ks->rnbc.value.ui64; 83 break; 84 85 case MAC_STAT_IERRORS: 86 igb_ks->rxerrc.value.ui64 += 87 E1000_READ_REG(hw, E1000_RXERRC); 88 igb_ks->algnerrc.value.ui64 += 89 E1000_READ_REG(hw, E1000_ALGNERRC); 90 igb_ks->rlec.value.ui64 += 91 E1000_READ_REG(hw, E1000_RLEC); 92 igb_ks->crcerrs.value.ui64 += 93 E1000_READ_REG(hw, E1000_CRCERRS); 94 igb_ks->cexterr.value.ui64 += 95 E1000_READ_REG(hw, E1000_CEXTERR); 96 *val = igb_ks->rxerrc.value.ui64 + 97 igb_ks->algnerrc.value.ui64 + 98 igb_ks->rlec.value.ui64 + 99 igb_ks->crcerrs.value.ui64 + 100 igb_ks->cexterr.value.ui64; 101 break; 102 103 case MAC_STAT_NOXMTBUF: 104 *val = 0; 105 break; 106 107 case MAC_STAT_OERRORS: 108 igb_ks->ecol.value.ui64 += 109 E1000_READ_REG(hw, E1000_ECOL); 110 *val = igb_ks->ecol.value.ui64; 111 break; 112 113 case MAC_STAT_COLLISIONS: 114 igb_ks->colc.value.ui64 += 115 E1000_READ_REG(hw, E1000_COLC); 116 *val = igb_ks->colc.value.ui64; 117 break; 118 119 case MAC_STAT_RBYTES: 120 /* 121 * The 64-bit register will reset whenever the upper 122 * 32 bits are read. So we need to read the lower 123 * 32 bits first, then read the upper 32 bits. 124 */ 125 low_val = E1000_READ_REG(hw, E1000_TORL); 126 high_val = E1000_READ_REG(hw, E1000_TORH); 127 igb_ks->tor.value.ui64 += 128 (uint64_t)high_val << 32 | (uint64_t)low_val; 129 *val = igb_ks->tor.value.ui64; 130 break; 131 132 case MAC_STAT_IPACKETS: 133 igb_ks->tpr.value.ui64 += 134 E1000_READ_REG(hw, E1000_TPR); 135 *val = igb_ks->tpr.value.ui64; 136 break; 137 138 case MAC_STAT_OBYTES: 139 /* 140 * The 64-bit register will reset whenever the upper 141 * 32 bits are read. So we need to read the lower 142 * 32 bits first, then read the upper 32 bits. 143 */ 144 low_val = E1000_READ_REG(hw, E1000_TOTL); 145 high_val = E1000_READ_REG(hw, E1000_TOTH); 146 igb_ks->tot.value.ui64 += 147 (uint64_t)high_val << 32 | (uint64_t)low_val; 148 *val = igb_ks->tot.value.ui64; 149 break; 150 151 case MAC_STAT_OPACKETS: 152 igb_ks->tpt.value.ui64 += 153 E1000_READ_REG(hw, E1000_TPT); 154 *val = igb_ks->tpt.value.ui64; 155 break; 156 157 /* RFC 1643 stats */ 158 case ETHER_STAT_ALIGN_ERRORS: 159 igb_ks->algnerrc.value.ui64 += 160 E1000_READ_REG(hw, E1000_ALGNERRC); 161 *val = igb_ks->algnerrc.value.ui64; 162 break; 163 164 case ETHER_STAT_FCS_ERRORS: 165 igb_ks->crcerrs.value.ui64 += 166 E1000_READ_REG(hw, E1000_CRCERRS); 167 *val = igb_ks->crcerrs.value.ui64; 168 break; 169 170 case ETHER_STAT_FIRST_COLLISIONS: 171 igb_ks->scc.value.ui64 += 172 E1000_READ_REG(hw, E1000_SCC); 173 *val = igb_ks->scc.value.ui64; 174 break; 175 176 case ETHER_STAT_MULTI_COLLISIONS: 177 igb_ks->mcc.value.ui64 += 178 E1000_READ_REG(hw, E1000_MCC); 179 *val = igb_ks->mcc.value.ui64; 180 break; 181 182 case ETHER_STAT_SQE_ERRORS: 183 igb_ks->sec.value.ui64 += 184 E1000_READ_REG(hw, E1000_SEC); 185 *val = igb_ks->sec.value.ui64; 186 break; 187 188 case ETHER_STAT_DEFER_XMTS: 189 igb_ks->dc.value.ui64 += 190 E1000_READ_REG(hw, E1000_DC); 191 *val = igb_ks->dc.value.ui64; 192 break; 193 194 case ETHER_STAT_TX_LATE_COLLISIONS: 195 igb_ks->latecol.value.ui64 += 196 E1000_READ_REG(hw, E1000_LATECOL); 197 *val = igb_ks->latecol.value.ui64; 198 break; 199 200 case ETHER_STAT_EX_COLLISIONS: 201 igb_ks->ecol.value.ui64 += 202 E1000_READ_REG(hw, E1000_ECOL); 203 *val = igb_ks->ecol.value.ui64; 204 break; 205 206 case ETHER_STAT_MACXMT_ERRORS: 207 igb_ks->ecol.value.ui64 += 208 E1000_READ_REG(hw, E1000_ECOL); 209 *val = igb_ks->ecol.value.ui64; 210 break; 211 212 case ETHER_STAT_CARRIER_ERRORS: 213 igb_ks->cexterr.value.ui64 += 214 E1000_READ_REG(hw, E1000_CEXTERR); 215 *val = igb_ks->cexterr.value.ui64; 216 break; 217 218 case ETHER_STAT_TOOLONG_ERRORS: 219 igb_ks->roc.value.ui64 += 220 E1000_READ_REG(hw, E1000_ROC); 221 *val = igb_ks->roc.value.ui64; 222 break; 223 224 case ETHER_STAT_MACRCV_ERRORS: 225 igb_ks->rxerrc.value.ui64 += 226 E1000_READ_REG(hw, E1000_RXERRC); 227 *val = igb_ks->rxerrc.value.ui64; 228 break; 229 230 /* MII/GMII stats */ 231 case ETHER_STAT_XCVR_ADDR: 232 /* The Internal PHY's MDI address for each MAC is 1 */ 233 *val = 1; 234 break; 235 236 case ETHER_STAT_XCVR_ID: 237 *val = hw->phy.id | hw->phy.revision; 238 break; 239 240 case ETHER_STAT_XCVR_INUSE: 241 switch (igb->link_speed) { 242 case SPEED_1000: 243 *val = 244 (hw->phy.media_type == e1000_media_type_copper) ? 245 XCVR_1000T : XCVR_1000X; 246 break; 247 case SPEED_100: 248 *val = 249 (hw->phy.media_type == e1000_media_type_copper) ? 250 (igb->param_100t4_cap == 1) ? 251 XCVR_100T4 : XCVR_100T2 : XCVR_100X; 252 break; 253 case SPEED_10: 254 *val = XCVR_10; 255 break; 256 default: 257 *val = XCVR_NONE; 258 break; 259 } 260 break; 261 262 case ETHER_STAT_CAP_1000FDX: 263 *val = igb->param_1000fdx_cap; 264 break; 265 266 case ETHER_STAT_CAP_1000HDX: 267 *val = igb->param_1000hdx_cap; 268 break; 269 270 case ETHER_STAT_CAP_100FDX: 271 *val = igb->param_100fdx_cap; 272 break; 273 274 case ETHER_STAT_CAP_100HDX: 275 *val = igb->param_100hdx_cap; 276 break; 277 278 case ETHER_STAT_CAP_10FDX: 279 *val = igb->param_10fdx_cap; 280 break; 281 282 case ETHER_STAT_CAP_10HDX: 283 *val = igb->param_10hdx_cap; 284 break; 285 286 case ETHER_STAT_CAP_ASMPAUSE: 287 *val = igb->param_asym_pause_cap; 288 break; 289 290 case ETHER_STAT_CAP_PAUSE: 291 *val = igb->param_pause_cap; 292 break; 293 294 case ETHER_STAT_CAP_AUTONEG: 295 *val = igb->param_autoneg_cap; 296 break; 297 298 case ETHER_STAT_ADV_CAP_1000FDX: 299 *val = igb->param_adv_1000fdx_cap; 300 break; 301 302 case ETHER_STAT_ADV_CAP_1000HDX: 303 *val = igb->param_adv_1000hdx_cap; 304 break; 305 306 case ETHER_STAT_ADV_CAP_100FDX: 307 *val = igb->param_adv_100fdx_cap; 308 break; 309 310 case ETHER_STAT_ADV_CAP_100HDX: 311 *val = igb->param_adv_100hdx_cap; 312 break; 313 314 case ETHER_STAT_ADV_CAP_10FDX: 315 *val = igb->param_adv_10fdx_cap; 316 break; 317 318 case ETHER_STAT_ADV_CAP_10HDX: 319 *val = igb->param_adv_10hdx_cap; 320 break; 321 322 case ETHER_STAT_ADV_CAP_ASMPAUSE: 323 *val = igb->param_adv_asym_pause_cap; 324 break; 325 326 case ETHER_STAT_ADV_CAP_PAUSE: 327 *val = igb->param_adv_pause_cap; 328 break; 329 330 case ETHER_STAT_ADV_CAP_AUTONEG: 331 *val = hw->mac.autoneg; 332 break; 333 334 case ETHER_STAT_LP_CAP_1000FDX: 335 *val = igb->param_lp_1000fdx_cap; 336 break; 337 338 case ETHER_STAT_LP_CAP_1000HDX: 339 *val = igb->param_lp_1000hdx_cap; 340 break; 341 342 case ETHER_STAT_LP_CAP_100FDX: 343 *val = igb->param_lp_100fdx_cap; 344 break; 345 346 case ETHER_STAT_LP_CAP_100HDX: 347 *val = igb->param_lp_100hdx_cap; 348 break; 349 350 case ETHER_STAT_LP_CAP_10FDX: 351 *val = igb->param_lp_10fdx_cap; 352 break; 353 354 case ETHER_STAT_LP_CAP_10HDX: 355 *val = igb->param_lp_10hdx_cap; 356 break; 357 358 case ETHER_STAT_LP_CAP_ASMPAUSE: 359 *val = igb->param_lp_asym_pause_cap; 360 break; 361 362 case ETHER_STAT_LP_CAP_PAUSE: 363 *val = igb->param_lp_pause_cap; 364 break; 365 366 case ETHER_STAT_LP_CAP_AUTONEG: 367 *val = igb->param_lp_autoneg_cap; 368 break; 369 370 case ETHER_STAT_LINK_ASMPAUSE: 371 *val = igb->param_asym_pause_cap; 372 break; 373 374 case ETHER_STAT_LINK_PAUSE: 375 *val = igb->param_pause_cap; 376 break; 377 378 case ETHER_STAT_LINK_AUTONEG: 379 *val = hw->mac.autoneg; 380 break; 381 382 case ETHER_STAT_LINK_DUPLEX: 383 *val = (igb->link_duplex == FULL_DUPLEX) ? 384 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 385 break; 386 387 case ETHER_STAT_TOOSHORT_ERRORS: 388 igb_ks->ruc.value.ui64 += 389 E1000_READ_REG(hw, E1000_RUC); 390 *val = igb_ks->ruc.value.ui64; 391 break; 392 393 case ETHER_STAT_CAP_REMFAULT: 394 *val = igb->param_rem_fault; 395 break; 396 397 case ETHER_STAT_ADV_REMFAULT: 398 *val = igb->param_adv_rem_fault; 399 break; 400 401 case ETHER_STAT_LP_REMFAULT: 402 *val = igb->param_lp_rem_fault; 403 break; 404 405 case ETHER_STAT_JABBER_ERRORS: 406 igb_ks->rjc.value.ui64 += 407 E1000_READ_REG(hw, E1000_RJC); 408 *val = igb_ks->rjc.value.ui64; 409 break; 410 411 case ETHER_STAT_CAP_100T4: 412 *val = igb->param_100t4_cap; 413 break; 414 415 case ETHER_STAT_ADV_CAP_100T4: 416 *val = igb->param_adv_100t4_cap; 417 break; 418 419 case ETHER_STAT_LP_CAP_100T4: 420 *val = igb->param_lp_100t4_cap; 421 break; 422 423 default: 424 mutex_exit(&igb->gen_lock); 425 return (ENOTSUP); 426 } 427 428 mutex_exit(&igb->gen_lock); 429 430 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 431 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 432 return (EIO); 433 } 434 435 return (0); 436 } 437 438 /* 439 * Bring the device out of the reset/quiesced state that it 440 * was in when the interface was registered. 441 */ 442 int 443 igb_m_start(void *arg) 444 { 445 igb_t *igb = (igb_t *)arg; 446 447 mutex_enter(&igb->gen_lock); 448 449 if (igb->igb_state & IGB_SUSPENDED) { 450 mutex_exit(&igb->gen_lock); 451 return (ECANCELED); 452 } 453 454 if (igb_start(igb) != IGB_SUCCESS) { 455 mutex_exit(&igb->gen_lock); 456 return (EIO); 457 } 458 459 atomic_or_32(&igb->igb_state, IGB_STARTED); 460 461 mutex_exit(&igb->gen_lock); 462 463 /* 464 * Enable and start the watchdog timer 465 */ 466 igb_enable_watchdog_timer(igb); 467 468 return (0); 469 } 470 471 /* 472 * Stop the device and put it in a reset/quiesced state such 473 * that the interface can be unregistered. 474 */ 475 void 476 igb_m_stop(void *arg) 477 { 478 igb_t *igb = (igb_t *)arg; 479 480 mutex_enter(&igb->gen_lock); 481 482 if (igb->igb_state & IGB_SUSPENDED) { 483 mutex_exit(&igb->gen_lock); 484 return; 485 } 486 487 atomic_and_32(&igb->igb_state, ~IGB_STARTED); 488 489 igb_stop(igb); 490 491 mutex_exit(&igb->gen_lock); 492 493 /* 494 * Disable and stop the watchdog timer 495 */ 496 igb_disable_watchdog_timer(igb); 497 } 498 499 /* 500 * Set the promiscuity of the device. 501 */ 502 int 503 igb_m_promisc(void *arg, boolean_t on) 504 { 505 igb_t *igb = (igb_t *)arg; 506 uint32_t reg_val; 507 508 mutex_enter(&igb->gen_lock); 509 510 if (igb->igb_state & IGB_SUSPENDED) { 511 mutex_exit(&igb->gen_lock); 512 return (ECANCELED); 513 } 514 515 reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL); 516 517 if (on) 518 reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 519 else 520 reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 521 522 E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val); 523 524 mutex_exit(&igb->gen_lock); 525 526 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 527 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 528 return (EIO); 529 } 530 531 return (0); 532 } 533 534 /* 535 * Add/remove the addresses to/from the set of multicast 536 * addresses for which the device will receive packets. 537 */ 538 int 539 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr) 540 { 541 igb_t *igb = (igb_t *)arg; 542 int result; 543 544 mutex_enter(&igb->gen_lock); 545 546 if (igb->igb_state & IGB_SUSPENDED) { 547 mutex_exit(&igb->gen_lock); 548 return (ECANCELED); 549 } 550 551 result = (add) ? igb_multicst_add(igb, mcst_addr) 552 : igb_multicst_remove(igb, mcst_addr); 553 554 mutex_exit(&igb->gen_lock); 555 556 return (result); 557 } 558 559 /* 560 * Pass on M_IOCTL messages passed to the DLD, and support 561 * private IOCTLs for debugging and ndd. 562 */ 563 void 564 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 565 { 566 igb_t *igb = (igb_t *)arg; 567 struct iocblk *iocp; 568 enum ioc_reply status; 569 570 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; 571 iocp->ioc_error = 0; 572 573 switch (iocp->ioc_cmd) { 574 case LB_GET_INFO_SIZE: 575 case LB_GET_INFO: 576 case LB_GET_MODE: 577 case LB_SET_MODE: 578 status = igb_loopback_ioctl(igb, iocp, mp); 579 break; 580 581 case ND_GET: 582 case ND_SET: 583 status = igb_nd_ioctl(igb, q, mp, iocp); 584 break; 585 586 default: 587 status = IOC_INVAL; 588 break; 589 } 590 591 /* 592 * Decide how to reply 593 */ 594 switch (status) { 595 default: 596 case IOC_INVAL: 597 /* 598 * Error, reply with a NAK and EINVAL or the specified error 599 */ 600 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 601 EINVAL : iocp->ioc_error); 602 break; 603 604 case IOC_DONE: 605 /* 606 * OK, reply already sent 607 */ 608 break; 609 610 case IOC_ACK: 611 /* 612 * OK, reply with an ACK 613 */ 614 miocack(q, mp, 0, 0); 615 break; 616 617 case IOC_REPLY: 618 /* 619 * OK, send prepared reply as ACK or NAK 620 */ 621 mp->b_datap->db_type = iocp->ioc_error == 0 ? 622 M_IOCACK : M_IOCNAK; 623 qreply(q, mp); 624 break; 625 } 626 } 627 628 /* 629 * Add a MAC address to the target RX group. 630 */ 631 static int 632 igb_addmac(void *arg, const uint8_t *mac_addr) 633 { 634 igb_rx_group_t *rx_group = (igb_rx_group_t *)arg; 635 igb_t *igb = rx_group->igb; 636 struct e1000_hw *hw = &igb->hw; 637 int i, slot; 638 639 mutex_enter(&igb->gen_lock); 640 641 if (igb->igb_state & IGB_SUSPENDED) { 642 mutex_exit(&igb->gen_lock); 643 return (ECANCELED); 644 } 645 646 if (igb->unicst_avail == 0) { 647 /* no slots available */ 648 mutex_exit(&igb->gen_lock); 649 return (ENOSPC); 650 } 651 652 /* 653 * The slots from 0 to igb->num_rx_groups are reserved slots which 654 * are 1 to 1 mapped with group index directly. The other slots are 655 * shared between the all of groups. While adding a MAC address, 656 * it will try to set the reserved slots first, then the shared slots. 657 */ 658 slot = -1; 659 if (igb->unicst_addr[rx_group->index].mac.set == 1) { 660 /* 661 * The reserved slot for current group is used, find the free 662 * slots in the shared slots. 663 */ 664 for (i = igb->num_rx_groups; i < igb->unicst_total; i++) { 665 if (igb->unicst_addr[i].mac.set == 0) { 666 slot = i; 667 break; 668 } 669 } 670 } else 671 slot = rx_group->index; 672 673 if (slot == -1) { 674 /* no slots available in the shared slots */ 675 mutex_exit(&igb->gen_lock); 676 return (ENOSPC); 677 } 678 679 /* Set VMDq according to the mode supported by hardware. */ 680 e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index); 681 682 bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL); 683 igb->unicst_addr[slot].mac.group_index = rx_group->index; 684 igb->unicst_addr[slot].mac.set = 1; 685 igb->unicst_avail--; 686 687 mutex_exit(&igb->gen_lock); 688 689 return (0); 690 } 691 692 /* 693 * Remove a MAC address from the specified RX group. 694 */ 695 static int 696 igb_remmac(void *arg, const uint8_t *mac_addr) 697 { 698 igb_rx_group_t *rx_group = (igb_rx_group_t *)arg; 699 igb_t *igb = rx_group->igb; 700 struct e1000_hw *hw = &igb->hw; 701 int slot; 702 703 mutex_enter(&igb->gen_lock); 704 705 if (igb->igb_state & IGB_SUSPENDED) { 706 mutex_exit(&igb->gen_lock); 707 return (ECANCELED); 708 } 709 710 slot = igb_unicst_find(igb, mac_addr); 711 if (slot == -1) { 712 mutex_exit(&igb->gen_lock); 713 return (EINVAL); 714 } 715 716 if (igb->unicst_addr[slot].mac.set == 0) { 717 mutex_exit(&igb->gen_lock); 718 return (EINVAL); 719 } 720 721 /* Clear the MAC ddress in the slot */ 722 e1000_rar_clear(hw, slot); 723 igb->unicst_addr[slot].mac.set = 0; 724 igb->unicst_avail++; 725 726 mutex_exit(&igb->gen_lock); 727 728 return (0); 729 } 730 731 /* 732 * Enable interrupt on the specificed rx ring. 733 */ 734 int 735 igb_rx_ring_intr_enable(mac_intr_handle_t intrh) 736 { 737 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh; 738 igb_t *igb = rx_ring->igb; 739 struct e1000_hw *hw = &igb->hw; 740 uint32_t index = rx_ring->index; 741 742 if (igb->intr_type == DDI_INTR_TYPE_MSIX) { 743 /* Interrupt enabling for MSI-X */ 744 igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index); 745 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask); 746 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask); 747 } else { 748 ASSERT(index == 0); 749 /* Interrupt enabling for MSI and legacy */ 750 igb->ims_mask |= E1000_IMS_RXT0; 751 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask); 752 } 753 754 E1000_WRITE_FLUSH(hw); 755 756 return (0); 757 } 758 759 /* 760 * Disable interrupt on the specificed rx ring. 761 */ 762 int 763 igb_rx_ring_intr_disable(mac_intr_handle_t intrh) 764 { 765 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh; 766 igb_t *igb = rx_ring->igb; 767 struct e1000_hw *hw = &igb->hw; 768 uint32_t index = rx_ring->index; 769 770 if (igb->intr_type == DDI_INTR_TYPE_MSIX) { 771 /* Interrupt disabling for MSI-X */ 772 igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index); 773 E1000_WRITE_REG(hw, E1000_EIMC, 774 (E1000_EICR_RX_QUEUE0 << index)); 775 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask); 776 } else { 777 ASSERT(index == 0); 778 /* Interrupt disabling for MSI and legacy */ 779 igb->ims_mask &= ~E1000_IMS_RXT0; 780 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 781 } 782 783 E1000_WRITE_FLUSH(hw); 784 785 return (0); 786 } 787 788 /* 789 * Get the global ring index by a ring index within a group. 790 */ 791 int 792 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex) 793 { 794 igb_rx_ring_t *rx_ring; 795 int i; 796 797 for (i = 0; i < igb->num_rx_rings; i++) { 798 rx_ring = &igb->rx_rings[i]; 799 if (rx_ring->group_index == gindex) 800 rindex--; 801 if (rindex < 0) 802 return (i); 803 } 804 805 return (-1); 806 } 807 808 static int 809 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 810 { 811 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh; 812 813 mutex_enter(&rx_ring->rx_lock); 814 rx_ring->ring_gen_num = mr_gen_num; 815 mutex_exit(&rx_ring->rx_lock); 816 return (0); 817 } 818 819 /* 820 * Callback funtion for MAC layer to register all rings. 821 */ 822 /* ARGSUSED */ 823 void 824 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 825 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 826 { 827 igb_t *igb = (igb_t *)arg; 828 mac_intr_t *mintr = &infop->mri_intr; 829 830 switch (rtype) { 831 case MAC_RING_TYPE_RX: { 832 igb_rx_ring_t *rx_ring; 833 int global_index; 834 835 /* 836 * 'index' is the ring index within the group. 837 * We need the global ring index by searching in group. 838 */ 839 global_index = igb_get_rx_ring_index(igb, rg_index, index); 840 841 ASSERT(global_index >= 0); 842 843 rx_ring = &igb->rx_rings[global_index]; 844 rx_ring->ring_handle = rh; 845 846 infop->mri_driver = (mac_ring_driver_t)rx_ring; 847 infop->mri_start = igb_ring_start; 848 infop->mri_stop = NULL; 849 infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll; 850 851 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 852 mintr->mi_enable = igb_rx_ring_intr_enable; 853 mintr->mi_disable = igb_rx_ring_intr_disable; 854 855 break; 856 } 857 case MAC_RING_TYPE_TX: { 858 ASSERT(index < igb->num_tx_rings); 859 860 igb_tx_ring_t *tx_ring = &igb->tx_rings[index]; 861 tx_ring->ring_handle = rh; 862 863 infop->mri_driver = (mac_ring_driver_t)tx_ring; 864 infop->mri_start = NULL; 865 infop->mri_stop = NULL; 866 infop->mri_tx = igb_tx_ring_send; 867 868 break; 869 } 870 default: 871 break; 872 } 873 } 874 875 void 876 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index, 877 mac_group_info_t *infop, mac_group_handle_t gh) 878 { 879 igb_t *igb = (igb_t *)arg; 880 881 switch (rtype) { 882 case MAC_RING_TYPE_RX: { 883 igb_rx_group_t *rx_group; 884 885 ASSERT((index >= 0) && (index < igb->num_rx_groups)); 886 887 rx_group = &igb->rx_groups[index]; 888 rx_group->group_handle = gh; 889 890 infop->mgi_driver = (mac_group_driver_t)rx_group; 891 infop->mgi_start = NULL; 892 infop->mgi_stop = NULL; 893 infop->mgi_addmac = igb_addmac; 894 infop->mgi_remmac = igb_remmac; 895 infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups); 896 897 break; 898 } 899 case MAC_RING_TYPE_TX: 900 break; 901 default: 902 break; 903 } 904 } 905 906 /* 907 * Obtain the MAC's capabilities and associated data from 908 * the driver. 909 */ 910 boolean_t 911 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 912 { 913 igb_t *igb = (igb_t *)arg; 914 915 switch (cap) { 916 case MAC_CAPAB_HCKSUM: { 917 uint32_t *tx_hcksum_flags = cap_data; 918 919 /* 920 * We advertise our capabilities only if tx hcksum offload is 921 * enabled. On receive, the stack will accept checksummed 922 * packets anyway, even if we haven't said we can deliver 923 * them. 924 */ 925 if (!igb->tx_hcksum_enable) 926 return (B_FALSE); 927 928 *tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM; 929 break; 930 } 931 case MAC_CAPAB_LSO: { 932 mac_capab_lso_t *cap_lso = cap_data; 933 934 if (igb->lso_enable) { 935 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 936 cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN; 937 break; 938 } else { 939 return (B_FALSE); 940 } 941 } 942 case MAC_CAPAB_RINGS: { 943 mac_capab_rings_t *cap_rings = cap_data; 944 945 switch (cap_rings->mr_type) { 946 case MAC_RING_TYPE_RX: 947 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 948 cap_rings->mr_rnum = igb->num_rx_rings; 949 cap_rings->mr_gnum = igb->num_rx_groups; 950 cap_rings->mr_rget = igb_fill_ring; 951 cap_rings->mr_gget = igb_fill_group; 952 cap_rings->mr_gaddring = NULL; 953 cap_rings->mr_gremring = NULL; 954 955 break; 956 case MAC_RING_TYPE_TX: 957 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 958 cap_rings->mr_rnum = igb->num_tx_rings; 959 cap_rings->mr_gnum = 0; 960 cap_rings->mr_rget = igb_fill_ring; 961 cap_rings->mr_gget = NULL; 962 963 break; 964 default: 965 break; 966 } 967 break; 968 } 969 970 default: 971 return (B_FALSE); 972 } 973 return (B_TRUE); 974 } 975