1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 28 * Use is subject to license terms. 29 */ 30 31 #include "igb_sw.h" 32 33 int 34 igb_m_stat(void *arg, uint_t stat, uint64_t *val) 35 { 36 igb_t *igb = (igb_t *)arg; 37 struct e1000_hw *hw = &igb->hw; 38 igb_stat_t *igb_ks; 39 uint32_t low_val, high_val; 40 41 igb_ks = (igb_stat_t *)igb->igb_ks->ks_data; 42 43 mutex_enter(&igb->gen_lock); 44 45 if (igb->igb_state & IGB_SUSPENDED) { 46 mutex_exit(&igb->gen_lock); 47 return (ECANCELED); 48 } 49 50 switch (stat) { 51 case MAC_STAT_IFSPEED: 52 *val = igb->link_speed * 1000000ull; 53 break; 54 55 case MAC_STAT_MULTIRCV: 56 igb_ks->mprc.value.ui64 += 57 E1000_READ_REG(hw, E1000_MPRC); 58 *val = igb_ks->mprc.value.ui64; 59 break; 60 61 case MAC_STAT_BRDCSTRCV: 62 igb_ks->bprc.value.ui64 += 63 E1000_READ_REG(hw, E1000_BPRC); 64 *val = igb_ks->bprc.value.ui64; 65 break; 66 67 case MAC_STAT_MULTIXMT: 68 igb_ks->mptc.value.ui64 += 69 E1000_READ_REG(hw, E1000_MPTC); 70 *val = igb_ks->mptc.value.ui64; 71 break; 72 73 case MAC_STAT_BRDCSTXMT: 74 igb_ks->bptc.value.ui64 += 75 E1000_READ_REG(hw, E1000_BPTC); 76 *val = igb_ks->bptc.value.ui64; 77 break; 78 79 case MAC_STAT_NORCVBUF: 80 igb_ks->rnbc.value.ui64 += 81 E1000_READ_REG(hw, E1000_RNBC); 82 *val = igb_ks->rnbc.value.ui64; 83 break; 84 85 case MAC_STAT_IERRORS: 86 igb_ks->rxerrc.value.ui64 += 87 E1000_READ_REG(hw, E1000_RXERRC); 88 igb_ks->algnerrc.value.ui64 += 89 E1000_READ_REG(hw, E1000_ALGNERRC); 90 igb_ks->rlec.value.ui64 += 91 E1000_READ_REG(hw, E1000_RLEC); 92 igb_ks->crcerrs.value.ui64 += 93 E1000_READ_REG(hw, E1000_CRCERRS); 94 igb_ks->cexterr.value.ui64 += 95 E1000_READ_REG(hw, E1000_CEXTERR); 96 *val = igb_ks->rxerrc.value.ui64 + 97 igb_ks->algnerrc.value.ui64 + 98 igb_ks->rlec.value.ui64 + 99 igb_ks->crcerrs.value.ui64 + 100 igb_ks->cexterr.value.ui64; 101 break; 102 103 case MAC_STAT_NOXMTBUF: 104 *val = 0; 105 break; 106 107 case MAC_STAT_OERRORS: 108 igb_ks->ecol.value.ui64 += 109 E1000_READ_REG(hw, E1000_ECOL); 110 *val = igb_ks->ecol.value.ui64; 111 break; 112 113 case MAC_STAT_COLLISIONS: 114 igb_ks->colc.value.ui64 += 115 E1000_READ_REG(hw, E1000_COLC); 116 *val = igb_ks->colc.value.ui64; 117 break; 118 119 case MAC_STAT_RBYTES: 120 /* 121 * The 64-bit register will reset whenever the upper 122 * 32 bits are read. So we need to read the lower 123 * 32 bits first, then read the upper 32 bits. 124 */ 125 low_val = E1000_READ_REG(hw, E1000_TORL); 126 high_val = E1000_READ_REG(hw, E1000_TORH); 127 igb_ks->tor.value.ui64 += 128 (uint64_t)high_val << 32 | (uint64_t)low_val; 129 *val = igb_ks->tor.value.ui64; 130 break; 131 132 case MAC_STAT_IPACKETS: 133 igb_ks->tpr.value.ui64 += 134 E1000_READ_REG(hw, E1000_TPR); 135 *val = igb_ks->tpr.value.ui64; 136 break; 137 138 case MAC_STAT_OBYTES: 139 /* 140 * The 64-bit register will reset whenever the upper 141 * 32 bits are read. So we need to read the lower 142 * 32 bits first, then read the upper 32 bits. 143 */ 144 low_val = E1000_READ_REG(hw, E1000_TOTL); 145 high_val = E1000_READ_REG(hw, E1000_TOTH); 146 igb_ks->tot.value.ui64 += 147 (uint64_t)high_val << 32 | (uint64_t)low_val; 148 *val = igb_ks->tot.value.ui64; 149 break; 150 151 case MAC_STAT_OPACKETS: 152 igb_ks->tpt.value.ui64 += 153 E1000_READ_REG(hw, E1000_TPT); 154 *val = igb_ks->tpt.value.ui64; 155 break; 156 157 /* RFC 1643 stats */ 158 case ETHER_STAT_ALIGN_ERRORS: 159 igb_ks->algnerrc.value.ui64 += 160 E1000_READ_REG(hw, E1000_ALGNERRC); 161 *val = igb_ks->algnerrc.value.ui64; 162 break; 163 164 case ETHER_STAT_FCS_ERRORS: 165 igb_ks->crcerrs.value.ui64 += 166 E1000_READ_REG(hw, E1000_CRCERRS); 167 *val = igb_ks->crcerrs.value.ui64; 168 break; 169 170 case ETHER_STAT_FIRST_COLLISIONS: 171 igb_ks->scc.value.ui64 += 172 E1000_READ_REG(hw, E1000_SCC); 173 *val = igb_ks->scc.value.ui64; 174 break; 175 176 case ETHER_STAT_MULTI_COLLISIONS: 177 igb_ks->mcc.value.ui64 += 178 E1000_READ_REG(hw, E1000_MCC); 179 *val = igb_ks->mcc.value.ui64; 180 break; 181 182 case ETHER_STAT_SQE_ERRORS: 183 igb_ks->sec.value.ui64 += 184 E1000_READ_REG(hw, E1000_SEC); 185 *val = igb_ks->sec.value.ui64; 186 break; 187 188 case ETHER_STAT_DEFER_XMTS: 189 igb_ks->dc.value.ui64 += 190 E1000_READ_REG(hw, E1000_DC); 191 *val = igb_ks->dc.value.ui64; 192 break; 193 194 case ETHER_STAT_TX_LATE_COLLISIONS: 195 igb_ks->latecol.value.ui64 += 196 E1000_READ_REG(hw, E1000_LATECOL); 197 *val = igb_ks->latecol.value.ui64; 198 break; 199 200 case ETHER_STAT_EX_COLLISIONS: 201 igb_ks->ecol.value.ui64 += 202 E1000_READ_REG(hw, E1000_ECOL); 203 *val = igb_ks->ecol.value.ui64; 204 break; 205 206 case ETHER_STAT_MACXMT_ERRORS: 207 igb_ks->ecol.value.ui64 += 208 E1000_READ_REG(hw, E1000_ECOL); 209 *val = igb_ks->ecol.value.ui64; 210 break; 211 212 case ETHER_STAT_CARRIER_ERRORS: 213 igb_ks->cexterr.value.ui64 += 214 E1000_READ_REG(hw, E1000_CEXTERR); 215 *val = igb_ks->cexterr.value.ui64; 216 break; 217 218 case ETHER_STAT_TOOLONG_ERRORS: 219 igb_ks->roc.value.ui64 += 220 E1000_READ_REG(hw, E1000_ROC); 221 *val = igb_ks->roc.value.ui64; 222 break; 223 224 case ETHER_STAT_MACRCV_ERRORS: 225 igb_ks->rxerrc.value.ui64 += 226 E1000_READ_REG(hw, E1000_RXERRC); 227 *val = igb_ks->rxerrc.value.ui64; 228 break; 229 230 /* MII/GMII stats */ 231 case ETHER_STAT_XCVR_ADDR: 232 /* The Internal PHY's MDI address for each MAC is 1 */ 233 *val = 1; 234 break; 235 236 case ETHER_STAT_XCVR_ID: 237 *val = hw->phy.id | hw->phy.revision; 238 break; 239 240 case ETHER_STAT_XCVR_INUSE: 241 switch (igb->link_speed) { 242 case SPEED_1000: 243 *val = 244 (hw->phy.media_type == e1000_media_type_copper) ? 245 XCVR_1000T : XCVR_1000X; 246 break; 247 case SPEED_100: 248 *val = 249 (hw->phy.media_type == e1000_media_type_copper) ? 250 (igb->param_100t4_cap == 1) ? 251 XCVR_100T4 : XCVR_100T2 : XCVR_100X; 252 break; 253 case SPEED_10: 254 *val = XCVR_10; 255 break; 256 default: 257 *val = XCVR_NONE; 258 break; 259 } 260 break; 261 262 case ETHER_STAT_CAP_1000FDX: 263 *val = igb->param_1000fdx_cap; 264 break; 265 266 case ETHER_STAT_CAP_1000HDX: 267 *val = igb->param_1000hdx_cap; 268 break; 269 270 case ETHER_STAT_CAP_100FDX: 271 *val = igb->param_100fdx_cap; 272 break; 273 274 case ETHER_STAT_CAP_100HDX: 275 *val = igb->param_100hdx_cap; 276 break; 277 278 case ETHER_STAT_CAP_10FDX: 279 *val = igb->param_10fdx_cap; 280 break; 281 282 case ETHER_STAT_CAP_10HDX: 283 *val = igb->param_10hdx_cap; 284 break; 285 286 case ETHER_STAT_CAP_ASMPAUSE: 287 *val = igb->param_asym_pause_cap; 288 break; 289 290 case ETHER_STAT_CAP_PAUSE: 291 *val = igb->param_pause_cap; 292 break; 293 294 case ETHER_STAT_CAP_AUTONEG: 295 *val = igb->param_autoneg_cap; 296 break; 297 298 case ETHER_STAT_ADV_CAP_1000FDX: 299 *val = igb->param_adv_1000fdx_cap; 300 break; 301 302 case ETHER_STAT_ADV_CAP_1000HDX: 303 *val = igb->param_adv_1000hdx_cap; 304 break; 305 306 case ETHER_STAT_ADV_CAP_100FDX: 307 *val = igb->param_adv_100fdx_cap; 308 break; 309 310 case ETHER_STAT_ADV_CAP_100HDX: 311 *val = igb->param_adv_100hdx_cap; 312 break; 313 314 case ETHER_STAT_ADV_CAP_10FDX: 315 *val = igb->param_adv_10fdx_cap; 316 break; 317 318 case ETHER_STAT_ADV_CAP_10HDX: 319 *val = igb->param_adv_10hdx_cap; 320 break; 321 322 case ETHER_STAT_ADV_CAP_ASMPAUSE: 323 *val = igb->param_adv_asym_pause_cap; 324 break; 325 326 case ETHER_STAT_ADV_CAP_PAUSE: 327 *val = igb->param_adv_pause_cap; 328 break; 329 330 case ETHER_STAT_ADV_CAP_AUTONEG: 331 *val = hw->mac.autoneg; 332 break; 333 334 case ETHER_STAT_LP_CAP_1000FDX: 335 *val = igb->param_lp_1000fdx_cap; 336 break; 337 338 case ETHER_STAT_LP_CAP_1000HDX: 339 *val = igb->param_lp_1000hdx_cap; 340 break; 341 342 case ETHER_STAT_LP_CAP_100FDX: 343 *val = igb->param_lp_100fdx_cap; 344 break; 345 346 case ETHER_STAT_LP_CAP_100HDX: 347 *val = igb->param_lp_100hdx_cap; 348 break; 349 350 case ETHER_STAT_LP_CAP_10FDX: 351 *val = igb->param_lp_10fdx_cap; 352 break; 353 354 case ETHER_STAT_LP_CAP_10HDX: 355 *val = igb->param_lp_10hdx_cap; 356 break; 357 358 case ETHER_STAT_LP_CAP_ASMPAUSE: 359 *val = igb->param_lp_asym_pause_cap; 360 break; 361 362 case ETHER_STAT_LP_CAP_PAUSE: 363 *val = igb->param_lp_pause_cap; 364 break; 365 366 case ETHER_STAT_LP_CAP_AUTONEG: 367 *val = igb->param_lp_autoneg_cap; 368 break; 369 370 case ETHER_STAT_LINK_ASMPAUSE: 371 *val = igb->param_asym_pause_cap; 372 break; 373 374 case ETHER_STAT_LINK_PAUSE: 375 *val = igb->param_pause_cap; 376 break; 377 378 case ETHER_STAT_LINK_AUTONEG: 379 *val = hw->mac.autoneg; 380 break; 381 382 case ETHER_STAT_LINK_DUPLEX: 383 *val = (igb->link_duplex == FULL_DUPLEX) ? 384 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF; 385 break; 386 387 case ETHER_STAT_TOOSHORT_ERRORS: 388 igb_ks->ruc.value.ui64 += 389 E1000_READ_REG(hw, E1000_RUC); 390 *val = igb_ks->ruc.value.ui64; 391 break; 392 393 case ETHER_STAT_CAP_REMFAULT: 394 *val = igb->param_rem_fault; 395 break; 396 397 case ETHER_STAT_ADV_REMFAULT: 398 *val = igb->param_adv_rem_fault; 399 break; 400 401 case ETHER_STAT_LP_REMFAULT: 402 *val = igb->param_lp_rem_fault; 403 break; 404 405 case ETHER_STAT_JABBER_ERRORS: 406 igb_ks->rjc.value.ui64 += 407 E1000_READ_REG(hw, E1000_RJC); 408 *val = igb_ks->rjc.value.ui64; 409 break; 410 411 case ETHER_STAT_CAP_100T4: 412 *val = igb->param_100t4_cap; 413 break; 414 415 case ETHER_STAT_ADV_CAP_100T4: 416 *val = igb->param_adv_100t4_cap; 417 break; 418 419 case ETHER_STAT_LP_CAP_100T4: 420 *val = igb->param_lp_100t4_cap; 421 break; 422 423 default: 424 mutex_exit(&igb->gen_lock); 425 return (ENOTSUP); 426 } 427 428 mutex_exit(&igb->gen_lock); 429 430 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 431 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 432 return (EIO); 433 } 434 435 return (0); 436 } 437 438 /* 439 * Bring the device out of the reset/quiesced state that it 440 * was in when the interface was registered. 441 */ 442 int 443 igb_m_start(void *arg) 444 { 445 igb_t *igb = (igb_t *)arg; 446 447 mutex_enter(&igb->gen_lock); 448 449 if (igb->igb_state & IGB_SUSPENDED) { 450 mutex_exit(&igb->gen_lock); 451 return (ECANCELED); 452 } 453 454 if (igb_start(igb, B_TRUE) != IGB_SUCCESS) { 455 mutex_exit(&igb->gen_lock); 456 return (EIO); 457 } 458 459 atomic_or_32(&igb->igb_state, IGB_STARTED); 460 461 mutex_exit(&igb->gen_lock); 462 463 /* 464 * Enable and start the watchdog timer 465 */ 466 igb_enable_watchdog_timer(igb); 467 468 return (0); 469 } 470 471 /* 472 * Stop the device and put it in a reset/quiesced state such 473 * that the interface can be unregistered. 474 */ 475 void 476 igb_m_stop(void *arg) 477 { 478 igb_t *igb = (igb_t *)arg; 479 480 mutex_enter(&igb->gen_lock); 481 482 if (igb->igb_state & IGB_SUSPENDED) { 483 mutex_exit(&igb->gen_lock); 484 return; 485 } 486 487 atomic_and_32(&igb->igb_state, ~IGB_STARTED); 488 489 igb_stop(igb, B_TRUE); 490 491 mutex_exit(&igb->gen_lock); 492 493 /* 494 * Disable and stop the watchdog timer 495 */ 496 igb_disable_watchdog_timer(igb); 497 } 498 499 /* 500 * Set the promiscuity of the device. 501 */ 502 int 503 igb_m_promisc(void *arg, boolean_t on) 504 { 505 igb_t *igb = (igb_t *)arg; 506 uint32_t reg_val; 507 508 mutex_enter(&igb->gen_lock); 509 510 if (igb->igb_state & IGB_SUSPENDED) { 511 mutex_exit(&igb->gen_lock); 512 return (ECANCELED); 513 } 514 515 reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL); 516 517 if (on) 518 reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 519 else 520 reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 521 522 E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val); 523 524 mutex_exit(&igb->gen_lock); 525 526 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 527 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 528 return (EIO); 529 } 530 531 return (0); 532 } 533 534 /* 535 * Add/remove the addresses to/from the set of multicast 536 * addresses for which the device will receive packets. 537 */ 538 int 539 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr) 540 { 541 igb_t *igb = (igb_t *)arg; 542 int result; 543 544 mutex_enter(&igb->gen_lock); 545 546 if (igb->igb_state & IGB_SUSPENDED) { 547 mutex_exit(&igb->gen_lock); 548 return (ECANCELED); 549 } 550 551 result = (add) ? igb_multicst_add(igb, mcst_addr) 552 : igb_multicst_remove(igb, mcst_addr); 553 554 mutex_exit(&igb->gen_lock); 555 556 return (result); 557 } 558 559 /* 560 * Pass on M_IOCTL messages passed to the DLD, and support 561 * private IOCTLs for debugging and ndd. 562 */ 563 void 564 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 565 { 566 igb_t *igb = (igb_t *)arg; 567 struct iocblk *iocp; 568 enum ioc_reply status; 569 570 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; 571 iocp->ioc_error = 0; 572 573 mutex_enter(&igb->gen_lock); 574 if (igb->igb_state & IGB_SUSPENDED) { 575 mutex_exit(&igb->gen_lock); 576 miocnak(q, mp, 0, EINVAL); 577 return; 578 } 579 mutex_exit(&igb->gen_lock); 580 581 switch (iocp->ioc_cmd) { 582 case LB_GET_INFO_SIZE: 583 case LB_GET_INFO: 584 case LB_GET_MODE: 585 case LB_SET_MODE: 586 status = igb_loopback_ioctl(igb, iocp, mp); 587 break; 588 589 default: 590 status = IOC_INVAL; 591 break; 592 } 593 594 /* 595 * Decide how to reply 596 */ 597 switch (status) { 598 default: 599 case IOC_INVAL: 600 /* 601 * Error, reply with a NAK and EINVAL or the specified error 602 */ 603 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 604 EINVAL : iocp->ioc_error); 605 break; 606 607 case IOC_DONE: 608 /* 609 * OK, reply already sent 610 */ 611 break; 612 613 case IOC_ACK: 614 /* 615 * OK, reply with an ACK 616 */ 617 miocack(q, mp, 0, 0); 618 break; 619 620 case IOC_REPLY: 621 /* 622 * OK, send prepared reply as ACK or NAK 623 */ 624 mp->b_datap->db_type = iocp->ioc_error == 0 ? 625 M_IOCACK : M_IOCNAK; 626 qreply(q, mp); 627 break; 628 } 629 } 630 631 /* 632 * Add a MAC address to the target RX group. 633 */ 634 static int 635 igb_addmac(void *arg, const uint8_t *mac_addr) 636 { 637 igb_rx_group_t *rx_group = (igb_rx_group_t *)arg; 638 igb_t *igb = rx_group->igb; 639 struct e1000_hw *hw = &igb->hw; 640 int i, slot; 641 642 mutex_enter(&igb->gen_lock); 643 644 if (igb->igb_state & IGB_SUSPENDED) { 645 mutex_exit(&igb->gen_lock); 646 return (ECANCELED); 647 } 648 649 if (igb->unicst_avail == 0) { 650 /* no slots available */ 651 mutex_exit(&igb->gen_lock); 652 return (ENOSPC); 653 } 654 655 /* 656 * The slots from 0 to igb->num_rx_groups are reserved slots which 657 * are 1 to 1 mapped with group index directly. The other slots are 658 * shared between the all of groups. While adding a MAC address, 659 * it will try to set the reserved slots first, then the shared slots. 660 */ 661 slot = -1; 662 if (igb->unicst_addr[rx_group->index].mac.set == 1) { 663 /* 664 * The reserved slot for current group is used, find the free 665 * slots in the shared slots. 666 */ 667 for (i = igb->num_rx_groups; i < igb->unicst_total; i++) { 668 if (igb->unicst_addr[i].mac.set == 0) { 669 slot = i; 670 break; 671 } 672 } 673 } else 674 slot = rx_group->index; 675 676 if (slot == -1) { 677 /* no slots available in the shared slots */ 678 mutex_exit(&igb->gen_lock); 679 return (ENOSPC); 680 } 681 682 /* Set VMDq according to the mode supported by hardware. */ 683 e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index); 684 685 bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL); 686 igb->unicst_addr[slot].mac.group_index = rx_group->index; 687 igb->unicst_addr[slot].mac.set = 1; 688 igb->unicst_avail--; 689 690 mutex_exit(&igb->gen_lock); 691 692 return (0); 693 } 694 695 /* 696 * Remove a MAC address from the specified RX group. 697 */ 698 static int 699 igb_remmac(void *arg, const uint8_t *mac_addr) 700 { 701 igb_rx_group_t *rx_group = (igb_rx_group_t *)arg; 702 igb_t *igb = rx_group->igb; 703 struct e1000_hw *hw = &igb->hw; 704 int slot; 705 706 mutex_enter(&igb->gen_lock); 707 708 if (igb->igb_state & IGB_SUSPENDED) { 709 mutex_exit(&igb->gen_lock); 710 return (ECANCELED); 711 } 712 713 slot = igb_unicst_find(igb, mac_addr); 714 if (slot == -1) { 715 mutex_exit(&igb->gen_lock); 716 return (EINVAL); 717 } 718 719 if (igb->unicst_addr[slot].mac.set == 0) { 720 mutex_exit(&igb->gen_lock); 721 return (EINVAL); 722 } 723 724 /* Clear the MAC ddress in the slot */ 725 e1000_rar_clear(hw, slot); 726 igb->unicst_addr[slot].mac.set = 0; 727 igb->unicst_avail++; 728 729 mutex_exit(&igb->gen_lock); 730 731 return (0); 732 } 733 734 /* 735 * Enable interrupt on the specificed rx ring. 736 */ 737 int 738 igb_rx_ring_intr_enable(mac_intr_handle_t intrh) 739 { 740 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh; 741 igb_t *igb = rx_ring->igb; 742 struct e1000_hw *hw = &igb->hw; 743 uint32_t index = rx_ring->index; 744 745 if (igb->intr_type == DDI_INTR_TYPE_MSIX) { 746 /* Interrupt enabling for MSI-X */ 747 igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index); 748 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask); 749 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask); 750 } else { 751 ASSERT(index == 0); 752 /* Interrupt enabling for MSI and legacy */ 753 igb->ims_mask |= E1000_IMS_RXT0; 754 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask); 755 } 756 757 E1000_WRITE_FLUSH(hw); 758 759 return (0); 760 } 761 762 /* 763 * Disable interrupt on the specificed rx ring. 764 */ 765 int 766 igb_rx_ring_intr_disable(mac_intr_handle_t intrh) 767 { 768 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh; 769 igb_t *igb = rx_ring->igb; 770 struct e1000_hw *hw = &igb->hw; 771 uint32_t index = rx_ring->index; 772 773 if (igb->intr_type == DDI_INTR_TYPE_MSIX) { 774 /* Interrupt disabling for MSI-X */ 775 igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index); 776 E1000_WRITE_REG(hw, E1000_EIMC, 777 (E1000_EICR_RX_QUEUE0 << index)); 778 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask); 779 } else { 780 ASSERT(index == 0); 781 /* Interrupt disabling for MSI and legacy */ 782 igb->ims_mask &= ~E1000_IMS_RXT0; 783 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 784 } 785 786 E1000_WRITE_FLUSH(hw); 787 788 return (0); 789 } 790 791 /* 792 * Get the global ring index by a ring index within a group. 793 */ 794 int 795 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex) 796 { 797 igb_rx_ring_t *rx_ring; 798 int i; 799 800 for (i = 0; i < igb->num_rx_rings; i++) { 801 rx_ring = &igb->rx_rings[i]; 802 if (rx_ring->group_index == gindex) 803 rindex--; 804 if (rindex < 0) 805 return (i); 806 } 807 808 return (-1); 809 } 810 811 static int 812 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 813 { 814 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh; 815 816 mutex_enter(&rx_ring->rx_lock); 817 rx_ring->ring_gen_num = mr_gen_num; 818 mutex_exit(&rx_ring->rx_lock); 819 return (0); 820 } 821 822 /* 823 * Callback funtion for MAC layer to register all rings. 824 */ 825 /* ARGSUSED */ 826 void 827 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 828 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh) 829 { 830 igb_t *igb = (igb_t *)arg; 831 mac_intr_t *mintr = &infop->mri_intr; 832 833 switch (rtype) { 834 case MAC_RING_TYPE_RX: { 835 igb_rx_ring_t *rx_ring; 836 int global_index; 837 838 /* 839 * 'index' is the ring index within the group. 840 * We need the global ring index by searching in group. 841 */ 842 global_index = igb_get_rx_ring_index(igb, rg_index, index); 843 844 ASSERT(global_index >= 0); 845 846 rx_ring = &igb->rx_rings[global_index]; 847 rx_ring->ring_handle = rh; 848 849 infop->mri_driver = (mac_ring_driver_t)rx_ring; 850 infop->mri_start = igb_ring_start; 851 infop->mri_stop = NULL; 852 infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll; 853 infop->mri_stat = igb_rx_ring_stat; 854 855 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 856 mintr->mi_enable = igb_rx_ring_intr_enable; 857 mintr->mi_disable = igb_rx_ring_intr_disable; 858 if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 859 mintr->mi_ddi_handle = 860 igb->htable[rx_ring->intr_vector]; 861 } 862 break; 863 } 864 case MAC_RING_TYPE_TX: { 865 ASSERT(index < igb->num_tx_rings); 866 867 igb_tx_ring_t *tx_ring = &igb->tx_rings[index]; 868 tx_ring->ring_handle = rh; 869 870 infop->mri_driver = (mac_ring_driver_t)tx_ring; 871 infop->mri_start = NULL; 872 infop->mri_stop = NULL; 873 infop->mri_tx = igb_tx_ring_send; 874 infop->mri_stat = igb_tx_ring_stat; 875 if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 876 mintr->mi_ddi_handle = 877 igb->htable[tx_ring->intr_vector]; 878 } 879 break; 880 } 881 default: 882 break; 883 } 884 } 885 886 void 887 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index, 888 mac_group_info_t *infop, mac_group_handle_t gh) 889 { 890 igb_t *igb = (igb_t *)arg; 891 892 switch (rtype) { 893 case MAC_RING_TYPE_RX: { 894 igb_rx_group_t *rx_group; 895 896 ASSERT((index >= 0) && (index < igb->num_rx_groups)); 897 898 rx_group = &igb->rx_groups[index]; 899 rx_group->group_handle = gh; 900 901 infop->mgi_driver = (mac_group_driver_t)rx_group; 902 infop->mgi_start = NULL; 903 infop->mgi_stop = NULL; 904 infop->mgi_addmac = igb_addmac; 905 infop->mgi_remmac = igb_remmac; 906 infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups); 907 908 break; 909 } 910 case MAC_RING_TYPE_TX: 911 break; 912 default: 913 break; 914 } 915 } 916 917 /* 918 * Obtain the MAC's capabilities and associated data from 919 * the driver. 920 */ 921 boolean_t 922 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 923 { 924 igb_t *igb = (igb_t *)arg; 925 926 switch (cap) { 927 case MAC_CAPAB_HCKSUM: { 928 uint32_t *tx_hcksum_flags = cap_data; 929 930 /* 931 * We advertise our capabilities only if tx hcksum offload is 932 * enabled. On receive, the stack will accept checksummed 933 * packets anyway, even if we haven't said we can deliver 934 * them. 935 */ 936 if (!igb->tx_hcksum_enable) 937 return (B_FALSE); 938 939 *tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM; 940 break; 941 } 942 case MAC_CAPAB_LSO: { 943 mac_capab_lso_t *cap_lso = cap_data; 944 945 if (igb->lso_enable) { 946 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 947 cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN; 948 break; 949 } else { 950 return (B_FALSE); 951 } 952 } 953 case MAC_CAPAB_RINGS: { 954 mac_capab_rings_t *cap_rings = cap_data; 955 956 switch (cap_rings->mr_type) { 957 case MAC_RING_TYPE_RX: 958 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 959 cap_rings->mr_rnum = igb->num_rx_rings; 960 cap_rings->mr_gnum = igb->num_rx_groups; 961 cap_rings->mr_rget = igb_fill_ring; 962 cap_rings->mr_gget = igb_fill_group; 963 cap_rings->mr_gaddring = NULL; 964 cap_rings->mr_gremring = NULL; 965 966 break; 967 case MAC_RING_TYPE_TX: 968 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 969 cap_rings->mr_rnum = igb->num_tx_rings; 970 cap_rings->mr_gnum = 0; 971 cap_rings->mr_rget = igb_fill_ring; 972 cap_rings->mr_gget = NULL; 973 974 break; 975 default: 976 break; 977 } 978 break; 979 } 980 981 default: 982 return (B_FALSE); 983 } 984 return (B_TRUE); 985 } 986 987 int 988 igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 989 uint_t pr_valsize, const void *pr_val) 990 { 991 igb_t *igb = (igb_t *)arg; 992 struct e1000_hw *hw = &igb->hw; 993 int err = 0; 994 uint32_t flow_control; 995 uint32_t cur_mtu, new_mtu; 996 uint32_t rx_size; 997 uint32_t tx_size; 998 999 mutex_enter(&igb->gen_lock); 1000 if (igb->igb_state & IGB_SUSPENDED) { 1001 mutex_exit(&igb->gen_lock); 1002 return (ECANCELED); 1003 } 1004 1005 if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) { 1006 /* 1007 * All en_* parameters are locked (read-only) 1008 * while the device is in any sort of loopback mode. 1009 */ 1010 mutex_exit(&igb->gen_lock); 1011 return (EBUSY); 1012 } 1013 1014 switch (pr_num) { 1015 case MAC_PROP_EN_1000FDX_CAP: 1016 /* read/write on copper, read-only on serdes */ 1017 if (hw->phy.media_type != e1000_media_type_copper) { 1018 err = ENOTSUP; 1019 break; 1020 } 1021 igb->param_en_1000fdx_cap = *(uint8_t *)pr_val; 1022 igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val; 1023 goto setup_link; 1024 case MAC_PROP_EN_100FDX_CAP: 1025 if (hw->phy.media_type != e1000_media_type_copper) { 1026 err = ENOTSUP; 1027 break; 1028 } 1029 igb->param_en_100fdx_cap = *(uint8_t *)pr_val; 1030 igb->param_adv_100fdx_cap = *(uint8_t *)pr_val; 1031 goto setup_link; 1032 case MAC_PROP_EN_100HDX_CAP: 1033 if (hw->phy.media_type != e1000_media_type_copper) { 1034 err = ENOTSUP; 1035 break; 1036 } 1037 igb->param_en_100hdx_cap = *(uint8_t *)pr_val; 1038 igb->param_adv_100hdx_cap = *(uint8_t *)pr_val; 1039 goto setup_link; 1040 case MAC_PROP_EN_10FDX_CAP: 1041 if (hw->phy.media_type != e1000_media_type_copper) { 1042 err = ENOTSUP; 1043 break; 1044 } 1045 igb->param_en_10fdx_cap = *(uint8_t *)pr_val; 1046 igb->param_adv_10fdx_cap = *(uint8_t *)pr_val; 1047 goto setup_link; 1048 case MAC_PROP_EN_10HDX_CAP: 1049 if (hw->phy.media_type != e1000_media_type_copper) { 1050 err = ENOTSUP; 1051 break; 1052 } 1053 igb->param_en_10hdx_cap = *(uint8_t *)pr_val; 1054 igb->param_adv_10hdx_cap = *(uint8_t *)pr_val; 1055 goto setup_link; 1056 case MAC_PROP_AUTONEG: 1057 if (hw->phy.media_type != e1000_media_type_copper) { 1058 err = ENOTSUP; 1059 break; 1060 } 1061 igb->param_adv_autoneg_cap = *(uint8_t *)pr_val; 1062 goto setup_link; 1063 case MAC_PROP_FLOWCTRL: 1064 bcopy(pr_val, &flow_control, sizeof (flow_control)); 1065 1066 switch (flow_control) { 1067 default: 1068 err = EINVAL; 1069 break; 1070 case LINK_FLOWCTRL_NONE: 1071 hw->fc.requested_mode = e1000_fc_none; 1072 break; 1073 case LINK_FLOWCTRL_RX: 1074 hw->fc.requested_mode = e1000_fc_rx_pause; 1075 break; 1076 case LINK_FLOWCTRL_TX: 1077 hw->fc.requested_mode = e1000_fc_tx_pause; 1078 break; 1079 case LINK_FLOWCTRL_BI: 1080 hw->fc.requested_mode = e1000_fc_full; 1081 break; 1082 } 1083 setup_link: 1084 if (err == 0) { 1085 if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS) 1086 err = EINVAL; 1087 } 1088 break; 1089 case MAC_PROP_ADV_1000FDX_CAP: 1090 case MAC_PROP_ADV_1000HDX_CAP: 1091 case MAC_PROP_ADV_100T4_CAP: 1092 case MAC_PROP_ADV_100FDX_CAP: 1093 case MAC_PROP_ADV_100HDX_CAP: 1094 case MAC_PROP_ADV_10FDX_CAP: 1095 case MAC_PROP_ADV_10HDX_CAP: 1096 case MAC_PROP_EN_1000HDX_CAP: 1097 case MAC_PROP_EN_100T4_CAP: 1098 case MAC_PROP_STATUS: 1099 case MAC_PROP_SPEED: 1100 case MAC_PROP_DUPLEX: 1101 err = ENOTSUP; /* read-only prop. Can't set this. */ 1102 break; 1103 case MAC_PROP_MTU: 1104 /* adapter must be stopped for an MTU change */ 1105 if (igb->igb_state & IGB_STARTED) { 1106 err = EBUSY; 1107 break; 1108 } 1109 1110 cur_mtu = igb->default_mtu; 1111 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 1112 if (new_mtu == cur_mtu) { 1113 err = 0; 1114 break; 1115 } 1116 1117 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) { 1118 err = EINVAL; 1119 break; 1120 } 1121 1122 err = mac_maxsdu_update(igb->mac_hdl, new_mtu); 1123 if (err == 0) { 1124 igb->default_mtu = new_mtu; 1125 igb->max_frame_size = igb->default_mtu + 1126 sizeof (struct ether_vlan_header) + ETHERFCSL; 1127 1128 /* 1129 * Set rx buffer size 1130 */ 1131 rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM; 1132 igb->rx_buf_size = ((rx_size >> 10) + ((rx_size & 1133 (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1134 1135 /* 1136 * Set tx buffer size 1137 */ 1138 tx_size = igb->max_frame_size; 1139 igb->tx_buf_size = ((tx_size >> 10) + ((tx_size & 1140 (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1141 } 1142 break; 1143 case MAC_PROP_PRIVATE: 1144 err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val); 1145 break; 1146 default: 1147 err = EINVAL; 1148 break; 1149 } 1150 1151 mutex_exit(&igb->gen_lock); 1152 1153 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 1154 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 1155 return (EIO); 1156 } 1157 1158 return (err); 1159 } 1160 1161 int 1162 igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 1163 uint_t pr_valsize, void *pr_val) 1164 { 1165 igb_t *igb = (igb_t *)arg; 1166 struct e1000_hw *hw = &igb->hw; 1167 int err = 0; 1168 uint32_t flow_control; 1169 uint64_t tmp = 0; 1170 1171 switch (pr_num) { 1172 case MAC_PROP_DUPLEX: 1173 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 1174 bcopy(&igb->link_duplex, pr_val, sizeof (link_duplex_t)); 1175 break; 1176 case MAC_PROP_SPEED: 1177 ASSERT(pr_valsize >= sizeof (uint64_t)); 1178 tmp = igb->link_speed * 1000000ull; 1179 bcopy(&tmp, pr_val, sizeof (tmp)); 1180 break; 1181 case MAC_PROP_AUTONEG: 1182 ASSERT(pr_valsize >= sizeof (uint8_t)); 1183 *(uint8_t *)pr_val = igb->param_adv_autoneg_cap; 1184 break; 1185 case MAC_PROP_FLOWCTRL: 1186 ASSERT(pr_valsize >= sizeof (uint32_t)); 1187 switch (hw->fc.requested_mode) { 1188 case e1000_fc_none: 1189 flow_control = LINK_FLOWCTRL_NONE; 1190 break; 1191 case e1000_fc_rx_pause: 1192 flow_control = LINK_FLOWCTRL_RX; 1193 break; 1194 case e1000_fc_tx_pause: 1195 flow_control = LINK_FLOWCTRL_TX; 1196 break; 1197 case e1000_fc_full: 1198 flow_control = LINK_FLOWCTRL_BI; 1199 break; 1200 } 1201 bcopy(&flow_control, pr_val, sizeof (flow_control)); 1202 break; 1203 case MAC_PROP_ADV_1000FDX_CAP: 1204 *(uint8_t *)pr_val = igb->param_adv_1000fdx_cap; 1205 break; 1206 case MAC_PROP_EN_1000FDX_CAP: 1207 *(uint8_t *)pr_val = igb->param_en_1000fdx_cap; 1208 break; 1209 case MAC_PROP_ADV_1000HDX_CAP: 1210 *(uint8_t *)pr_val = igb->param_adv_1000hdx_cap; 1211 break; 1212 case MAC_PROP_EN_1000HDX_CAP: 1213 *(uint8_t *)pr_val = igb->param_en_1000hdx_cap; 1214 break; 1215 case MAC_PROP_ADV_100T4_CAP: 1216 *(uint8_t *)pr_val = igb->param_adv_100t4_cap; 1217 break; 1218 case MAC_PROP_EN_100T4_CAP: 1219 *(uint8_t *)pr_val = igb->param_en_100t4_cap; 1220 break; 1221 case MAC_PROP_ADV_100FDX_CAP: 1222 *(uint8_t *)pr_val = igb->param_adv_100fdx_cap; 1223 break; 1224 case MAC_PROP_EN_100FDX_CAP: 1225 *(uint8_t *)pr_val = igb->param_en_100fdx_cap; 1226 break; 1227 case MAC_PROP_ADV_100HDX_CAP: 1228 *(uint8_t *)pr_val = igb->param_adv_100hdx_cap; 1229 break; 1230 case MAC_PROP_EN_100HDX_CAP: 1231 *(uint8_t *)pr_val = igb->param_en_100hdx_cap; 1232 break; 1233 case MAC_PROP_ADV_10FDX_CAP: 1234 *(uint8_t *)pr_val = igb->param_adv_10fdx_cap; 1235 break; 1236 case MAC_PROP_EN_10FDX_CAP: 1237 *(uint8_t *)pr_val = igb->param_en_10fdx_cap; 1238 break; 1239 case MAC_PROP_ADV_10HDX_CAP: 1240 *(uint8_t *)pr_val = igb->param_adv_10hdx_cap; 1241 break; 1242 case MAC_PROP_EN_10HDX_CAP: 1243 *(uint8_t *)pr_val = igb->param_en_10hdx_cap; 1244 break; 1245 case MAC_PROP_PRIVATE: 1246 err = igb_get_priv_prop(igb, pr_name, pr_valsize, pr_val); 1247 break; 1248 default: 1249 err = EINVAL; 1250 break; 1251 } 1252 return (err); 1253 } 1254 1255 void 1256 igb_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num, 1257 mac_prop_info_handle_t prh) 1258 { 1259 igb_t *igb = (igb_t *)arg; 1260 struct e1000_hw *hw = &igb->hw; 1261 uint16_t phy_status, phy_ext_status; 1262 1263 switch (pr_num) { 1264 case MAC_PROP_DUPLEX: 1265 case MAC_PROP_SPEED: 1266 case MAC_PROP_ADV_1000FDX_CAP: 1267 case MAC_PROP_ADV_1000HDX_CAP: 1268 case MAC_PROP_EN_1000HDX_CAP: 1269 case MAC_PROP_ADV_100T4_CAP: 1270 case MAC_PROP_EN_100T4_CAP: 1271 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1272 break; 1273 1274 case MAC_PROP_EN_1000FDX_CAP: 1275 if (hw->phy.media_type != e1000_media_type_copper) { 1276 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1277 } else { 1278 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, 1279 &phy_ext_status); 1280 mac_prop_info_set_default_uint8(prh, 1281 ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 1282 (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0); 1283 } 1284 break; 1285 1286 case MAC_PROP_ADV_100FDX_CAP: 1287 case MAC_PROP_EN_100FDX_CAP: 1288 if (hw->phy.media_type != e1000_media_type_copper) { 1289 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1290 } else { 1291 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); 1292 mac_prop_info_set_default_uint8(prh, 1293 ((phy_status & MII_SR_100X_FD_CAPS) || 1294 (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0); 1295 } 1296 break; 1297 1298 case MAC_PROP_ADV_100HDX_CAP: 1299 case MAC_PROP_EN_100HDX_CAP: 1300 if (hw->phy.media_type != e1000_media_type_copper) { 1301 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1302 } else { 1303 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); 1304 mac_prop_info_set_default_uint8(prh, 1305 ((phy_status & MII_SR_100X_HD_CAPS) || 1306 (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0); 1307 } 1308 break; 1309 1310 case MAC_PROP_ADV_10FDX_CAP: 1311 case MAC_PROP_EN_10FDX_CAP: 1312 if (hw->phy.media_type != e1000_media_type_copper) { 1313 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1314 } else { 1315 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); 1316 mac_prop_info_set_default_uint8(prh, 1317 (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0); 1318 } 1319 break; 1320 1321 case MAC_PROP_ADV_10HDX_CAP: 1322 case MAC_PROP_EN_10HDX_CAP: 1323 if (hw->phy.media_type != e1000_media_type_copper) { 1324 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1325 } else { 1326 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); 1327 mac_prop_info_set_default_uint8(prh, 1328 (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0); 1329 } 1330 break; 1331 1332 case MAC_PROP_AUTONEG: 1333 if (hw->phy.media_type != e1000_media_type_copper) { 1334 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1335 } else { 1336 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); 1337 mac_prop_info_set_default_uint8(prh, 1338 (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0); 1339 } 1340 break; 1341 1342 case MAC_PROP_FLOWCTRL: 1343 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI); 1344 break; 1345 1346 case MAC_PROP_MTU: 1347 mac_prop_info_set_range_uint32(prh, MIN_MTU, MAX_MTU); 1348 break; 1349 1350 case MAC_PROP_PRIVATE: 1351 igb_priv_prop_info(igb, pr_name, prh); 1352 break; 1353 } 1354 1355 } 1356 1357 boolean_t 1358 igb_param_locked(mac_prop_id_t pr_num) 1359 { 1360 /* 1361 * All en_* parameters are locked (read-only) while 1362 * the device is in any sort of loopback mode ... 1363 */ 1364 switch (pr_num) { 1365 case MAC_PROP_EN_1000FDX_CAP: 1366 case MAC_PROP_EN_1000HDX_CAP: 1367 case MAC_PROP_EN_100T4_CAP: 1368 case MAC_PROP_EN_100FDX_CAP: 1369 case MAC_PROP_EN_100HDX_CAP: 1370 case MAC_PROP_EN_10FDX_CAP: 1371 case MAC_PROP_EN_10HDX_CAP: 1372 case MAC_PROP_AUTONEG: 1373 case MAC_PROP_FLOWCTRL: 1374 return (B_TRUE); 1375 } 1376 return (B_FALSE); 1377 } 1378 1379 /* ARGSUSED */ 1380 int 1381 igb_set_priv_prop(igb_t *igb, const char *pr_name, 1382 uint_t pr_valsize, const void *pr_val) 1383 { 1384 int err = 0; 1385 long result; 1386 struct e1000_hw *hw = &igb->hw; 1387 int i; 1388 1389 if (strcmp(pr_name, "_tx_copy_thresh") == 0) { 1390 if (pr_val == NULL) { 1391 err = EINVAL; 1392 return (err); 1393 } 1394 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1395 if (result < MIN_TX_COPY_THRESHOLD || 1396 result > MAX_TX_COPY_THRESHOLD) 1397 err = EINVAL; 1398 else { 1399 igb->tx_copy_thresh = (uint32_t)result; 1400 } 1401 return (err); 1402 } 1403 if (strcmp(pr_name, "_tx_recycle_thresh") == 0) { 1404 if (pr_val == NULL) { 1405 err = EINVAL; 1406 return (err); 1407 } 1408 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1409 if (result < MIN_TX_RECYCLE_THRESHOLD || 1410 result > MAX_TX_RECYCLE_THRESHOLD) 1411 err = EINVAL; 1412 else { 1413 igb->tx_recycle_thresh = (uint32_t)result; 1414 } 1415 return (err); 1416 } 1417 if (strcmp(pr_name, "_tx_overload_thresh") == 0) { 1418 if (pr_val == NULL) { 1419 err = EINVAL; 1420 return (err); 1421 } 1422 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1423 if (result < MIN_TX_OVERLOAD_THRESHOLD || 1424 result > MAX_TX_OVERLOAD_THRESHOLD) 1425 err = EINVAL; 1426 else { 1427 igb->tx_overload_thresh = (uint32_t)result; 1428 } 1429 return (err); 1430 } 1431 if (strcmp(pr_name, "_tx_resched_thresh") == 0) { 1432 if (pr_val == NULL) { 1433 err = EINVAL; 1434 return (err); 1435 } 1436 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1437 if (result < MIN_TX_RESCHED_THRESHOLD || 1438 result > MAX_TX_RESCHED_THRESHOLD) 1439 err = EINVAL; 1440 else { 1441 igb->tx_resched_thresh = (uint32_t)result; 1442 } 1443 return (err); 1444 } 1445 if (strcmp(pr_name, "_rx_copy_thresh") == 0) { 1446 if (pr_val == NULL) { 1447 err = EINVAL; 1448 return (err); 1449 } 1450 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1451 if (result < MIN_RX_COPY_THRESHOLD || 1452 result > MAX_RX_COPY_THRESHOLD) 1453 err = EINVAL; 1454 else { 1455 igb->rx_copy_thresh = (uint32_t)result; 1456 } 1457 return (err); 1458 } 1459 if (strcmp(pr_name, "_rx_limit_per_intr") == 0) { 1460 if (pr_val == NULL) { 1461 err = EINVAL; 1462 return (err); 1463 } 1464 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1465 if (result < MIN_RX_LIMIT_PER_INTR || 1466 result > MAX_RX_LIMIT_PER_INTR) 1467 err = EINVAL; 1468 else { 1469 igb->rx_limit_per_intr = (uint32_t)result; 1470 } 1471 return (err); 1472 } 1473 if (strcmp(pr_name, "_intr_throttling") == 0) { 1474 if (pr_val == NULL) { 1475 err = EINVAL; 1476 return (err); 1477 } 1478 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 1479 1480 if (result < igb->capab->min_intr_throttle || 1481 result > igb->capab->max_intr_throttle) 1482 err = EINVAL; 1483 else { 1484 igb->intr_throttling[0] = (uint32_t)result; 1485 1486 for (i = 0; i < MAX_NUM_EITR; i++) 1487 igb->intr_throttling[i] = 1488 igb->intr_throttling[0]; 1489 1490 /* Set interrupt throttling rate */ 1491 for (i = 0; i < igb->intr_cnt; i++) 1492 E1000_WRITE_REG(hw, E1000_EITR(i), 1493 igb->intr_throttling[i]); 1494 } 1495 return (err); 1496 } 1497 return (ENOTSUP); 1498 } 1499 1500 int 1501 igb_get_priv_prop(igb_t *igb, const char *pr_name, uint_t pr_valsize, 1502 void *pr_val) 1503 { 1504 int value; 1505 1506 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 1507 value = igb->param_adv_pause_cap; 1508 } else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1509 value = igb->param_adv_asym_pause_cap; 1510 } else if (strcmp(pr_name, "_tx_copy_thresh") == 0) { 1511 value = igb->tx_copy_thresh; 1512 } else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) { 1513 value = igb->tx_recycle_thresh; 1514 } else if (strcmp(pr_name, "_tx_overload_thresh") == 0) { 1515 value = igb->tx_overload_thresh; 1516 } else if (strcmp(pr_name, "_tx_resched_thresh") == 0) { 1517 value = igb->tx_resched_thresh; 1518 } else if (strcmp(pr_name, "_rx_copy_thresh") == 0) { 1519 value = igb->rx_copy_thresh; 1520 } else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) { 1521 value = igb->rx_limit_per_intr; 1522 } else if (strcmp(pr_name, "_intr_throttling") == 0) { 1523 value = igb->intr_throttling[0]; 1524 } else { 1525 return (ENOTSUP); 1526 } 1527 1528 (void) snprintf(pr_val, pr_valsize, "%d", value); 1529 return (0); 1530 } 1531 1532 void 1533 igb_priv_prop_info(igb_t *igb, const char *pr_name, mac_prop_info_handle_t prh) 1534 { 1535 char valstr[64]; 1536 int value; 1537 1538 if (strcmp(pr_name, "_adv_pause_cap") == 0 || 1539 strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 1540 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 1541 return; 1542 } else if (strcmp(pr_name, "_tx_copy_thresh") == 0) { 1543 value = DEFAULT_TX_COPY_THRESHOLD; 1544 } else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) { 1545 value = DEFAULT_TX_RECYCLE_THRESHOLD; 1546 } else if (strcmp(pr_name, "_tx_overload_thresh") == 0) { 1547 value = DEFAULT_TX_OVERLOAD_THRESHOLD; 1548 } else if (strcmp(pr_name, "_tx_resched_thresh") == 0) { 1549 value = DEFAULT_TX_RESCHED_THRESHOLD; 1550 } else if (strcmp(pr_name, "_rx_copy_thresh") == 0) { 1551 value = DEFAULT_RX_COPY_THRESHOLD; 1552 } else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) { 1553 value = DEFAULT_RX_LIMIT_PER_INTR; 1554 } else if (strcmp(pr_name, "_intr_throttling") == 0) { 1555 value = igb->capab->def_intr_throttle; 1556 } else { 1557 return; 1558 } 1559 1560 (void) snprintf(valstr, sizeof (valstr), "%d", value); 1561 mac_prop_info_set_default_str(prh, valstr); 1562 } 1563