1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2007-2012 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright 2013, Nexenta Systems, Inc. All rights reserved. 29 */ 30 31 #include "igb_sw.h" 32 33 static char ident[] = "Intel 1Gb Ethernet"; 34 static char igb_version[] = "igb 2.3.8-ish"; 35 36 /* 37 * Local function protoypes 38 */ 39 static int igb_register_mac(igb_t *); 40 static int igb_identify_hardware(igb_t *); 41 static int igb_regs_map(igb_t *); 42 static void igb_init_properties(igb_t *); 43 static int igb_init_driver_settings(igb_t *); 44 static void igb_init_locks(igb_t *); 45 static void igb_destroy_locks(igb_t *); 46 static int igb_init_mac_address(igb_t *); 47 static int igb_init(igb_t *); 48 static int igb_init_adapter(igb_t *); 49 static void igb_stop_adapter(igb_t *); 50 static int igb_reset(igb_t *); 51 static void igb_tx_clean(igb_t *); 52 static boolean_t igb_tx_drain(igb_t *); 53 static boolean_t igb_rx_drain(igb_t *); 54 static int igb_alloc_rings(igb_t *); 55 static int igb_alloc_rx_data(igb_t *); 56 static void igb_free_rx_data(igb_t *); 57 static void igb_free_rings(igb_t *); 58 static void igb_setup_rings(igb_t *); 59 static void igb_setup_rx(igb_t *); 60 static void igb_setup_tx(igb_t *); 61 static void igb_setup_rx_ring(igb_rx_ring_t *); 62 static void igb_setup_tx_ring(igb_tx_ring_t *); 63 static void igb_setup_rss(igb_t *); 64 static void igb_setup_mac_rss_classify(igb_t *); 65 static void igb_setup_mac_classify(igb_t *); 66 static void igb_init_unicst(igb_t *); 67 static void igb_setup_multicst(igb_t *); 68 static void igb_get_phy_state(igb_t *); 69 static void igb_param_sync(igb_t *); 70 static void igb_get_conf(igb_t *); 71 static int igb_get_prop(igb_t *, char *, int, int, int); 72 static boolean_t igb_is_link_up(igb_t *); 73 static boolean_t igb_link_check(igb_t *); 74 static void igb_local_timer(void *); 75 static void igb_link_timer(void *); 76 static void igb_arm_watchdog_timer(igb_t *); 77 static void igb_start_watchdog_timer(igb_t *); 78 static void igb_restart_watchdog_timer(igb_t *); 79 static void igb_stop_watchdog_timer(igb_t *); 80 static void igb_start_link_timer(igb_t *); 81 static void igb_stop_link_timer(igb_t *); 82 static void igb_disable_adapter_interrupts(igb_t *); 83 static void igb_enable_adapter_interrupts_82575(igb_t *); 84 static void igb_enable_adapter_interrupts_82576(igb_t *); 85 static void igb_enable_adapter_interrupts_82580(igb_t *); 86 static boolean_t is_valid_mac_addr(uint8_t *); 87 static boolean_t igb_stall_check(igb_t *); 88 static boolean_t igb_set_loopback_mode(igb_t *, uint32_t); 89 static void igb_set_external_loopback(igb_t *); 90 static void igb_set_internal_phy_loopback(igb_t *); 91 static void igb_set_internal_serdes_loopback(igb_t *); 92 static boolean_t igb_find_mac_address(igb_t *); 93 static int igb_alloc_intrs(igb_t *); 94 static int igb_alloc_intr_handles(igb_t *, int); 95 static int igb_add_intr_handlers(igb_t *); 96 static void igb_rem_intr_handlers(igb_t *); 97 static void igb_rem_intrs(igb_t *); 98 static int igb_enable_intrs(igb_t *); 99 static int igb_disable_intrs(igb_t *); 100 static void igb_setup_msix_82575(igb_t *); 101 static void igb_setup_msix_82576(igb_t *); 102 static void igb_setup_msix_82580(igb_t *); 103 static uint_t igb_intr_legacy(void *, void *); 104 static uint_t igb_intr_msi(void *, void *); 105 static uint_t igb_intr_rx(void *, void *); 106 static uint_t igb_intr_tx(void *, void *); 107 static uint_t igb_intr_tx_other(void *, void *); 108 static void igb_intr_rx_work(igb_rx_ring_t *); 109 static void igb_intr_tx_work(igb_tx_ring_t *); 110 static void igb_intr_link_work(igb_t *); 111 static void igb_get_driver_control(struct e1000_hw *); 112 static void igb_release_driver_control(struct e1000_hw *); 113 114 static int igb_attach(dev_info_t *, ddi_attach_cmd_t); 115 static int igb_detach(dev_info_t *, ddi_detach_cmd_t); 116 static int igb_resume(dev_info_t *); 117 static int igb_suspend(dev_info_t *); 118 static int igb_quiesce(dev_info_t *); 119 static void igb_unconfigure(dev_info_t *, igb_t *); 120 static int igb_fm_error_cb(dev_info_t *, ddi_fm_error_t *, 121 const void *); 122 static void igb_fm_init(igb_t *); 123 static void igb_fm_fini(igb_t *); 124 static void igb_release_multicast(igb_t *); 125 126 char *igb_priv_props[] = { 127 "_eee_support", 128 "_tx_copy_thresh", 129 "_tx_recycle_thresh", 130 "_tx_overload_thresh", 131 "_tx_resched_thresh", 132 "_rx_copy_thresh", 133 "_rx_limit_per_intr", 134 "_intr_throttling", 135 "_adv_pause_cap", 136 "_adv_asym_pause_cap", 137 NULL 138 }; 139 140 static struct cb_ops igb_cb_ops = { 141 nulldev, /* cb_open */ 142 nulldev, /* cb_close */ 143 nodev, /* cb_strategy */ 144 nodev, /* cb_print */ 145 nodev, /* cb_dump */ 146 nodev, /* cb_read */ 147 nodev, /* cb_write */ 148 nodev, /* cb_ioctl */ 149 nodev, /* cb_devmap */ 150 nodev, /* cb_mmap */ 151 nodev, /* cb_segmap */ 152 nochpoll, /* cb_chpoll */ 153 ddi_prop_op, /* cb_prop_op */ 154 NULL, /* cb_stream */ 155 D_MP | D_HOTPLUG, /* cb_flag */ 156 CB_REV, /* cb_rev */ 157 nodev, /* cb_aread */ 158 nodev /* cb_awrite */ 159 }; 160 161 static struct dev_ops igb_dev_ops = { 162 DEVO_REV, /* devo_rev */ 163 0, /* devo_refcnt */ 164 NULL, /* devo_getinfo */ 165 nulldev, /* devo_identify */ 166 nulldev, /* devo_probe */ 167 igb_attach, /* devo_attach */ 168 igb_detach, /* devo_detach */ 169 nodev, /* devo_reset */ 170 &igb_cb_ops, /* devo_cb_ops */ 171 NULL, /* devo_bus_ops */ 172 ddi_power, /* devo_power */ 173 igb_quiesce, /* devo_quiesce */ 174 }; 175 176 static struct modldrv igb_modldrv = { 177 &mod_driverops, /* Type of module. This one is a driver */ 178 ident, /* Discription string */ 179 &igb_dev_ops, /* driver ops */ 180 }; 181 182 static struct modlinkage igb_modlinkage = { 183 MODREV_1, &igb_modldrv, NULL 184 }; 185 186 /* Access attributes for register mapping */ 187 ddi_device_acc_attr_t igb_regs_acc_attr = { 188 DDI_DEVICE_ATTR_V1, 189 DDI_STRUCTURE_LE_ACC, 190 DDI_STRICTORDER_ACC, 191 DDI_FLAGERR_ACC 192 }; 193 194 #define IGB_M_CALLBACK_FLAGS \ 195 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 196 197 static mac_callbacks_t igb_m_callbacks = { 198 IGB_M_CALLBACK_FLAGS, 199 igb_m_stat, 200 igb_m_start, 201 igb_m_stop, 202 igb_m_promisc, 203 igb_m_multicst, 204 NULL, 205 NULL, 206 NULL, 207 igb_m_ioctl, 208 igb_m_getcapab, 209 NULL, 210 NULL, 211 igb_m_setprop, 212 igb_m_getprop, 213 igb_m_propinfo 214 }; 215 216 /* 217 * Initialize capabilities of each supported adapter type 218 */ 219 static adapter_info_t igb_82575_cap = { 220 /* limits */ 221 4, /* maximum number of rx queues */ 222 1, /* minimum number of rx queues */ 223 4, /* default number of rx queues */ 224 4, /* maximum number of tx queues */ 225 1, /* minimum number of tx queues */ 226 4, /* default number of tx queues */ 227 65535, /* maximum interrupt throttle rate */ 228 0, /* minimum interrupt throttle rate */ 229 200, /* default interrupt throttle rate */ 230 231 /* function pointers */ 232 igb_enable_adapter_interrupts_82575, 233 igb_setup_msix_82575, 234 235 /* capabilities */ 236 (IGB_FLAG_HAS_DCA | /* capability flags */ 237 IGB_FLAG_VMDQ_POOL), 238 239 0xffc00000 /* mask for RXDCTL register */ 240 }; 241 242 static adapter_info_t igb_82576_cap = { 243 /* limits */ 244 16, /* maximum number of rx queues */ 245 1, /* minimum number of rx queues */ 246 4, /* default number of rx queues */ 247 16, /* maximum number of tx queues */ 248 1, /* minimum number of tx queues */ 249 4, /* default number of tx queues */ 250 65535, /* maximum interrupt throttle rate */ 251 0, /* minimum interrupt throttle rate */ 252 200, /* default interrupt throttle rate */ 253 254 /* function pointers */ 255 igb_enable_adapter_interrupts_82576, 256 igb_setup_msix_82576, 257 258 /* capabilities */ 259 (IGB_FLAG_HAS_DCA | /* capability flags */ 260 IGB_FLAG_VMDQ_POOL | 261 IGB_FLAG_NEED_CTX_IDX), 262 263 0xffe00000 /* mask for RXDCTL register */ 264 }; 265 266 static adapter_info_t igb_82580_cap = { 267 /* limits */ 268 8, /* maximum number of rx queues */ 269 1, /* minimum number of rx queues */ 270 4, /* default number of rx queues */ 271 8, /* maximum number of tx queues */ 272 1, /* minimum number of tx queues */ 273 4, /* default number of tx queues */ 274 65535, /* maximum interrupt throttle rate */ 275 0, /* minimum interrupt throttle rate */ 276 200, /* default interrupt throttle rate */ 277 278 /* function pointers */ 279 igb_enable_adapter_interrupts_82580, 280 igb_setup_msix_82580, 281 282 /* capabilities */ 283 (IGB_FLAG_HAS_DCA | /* capability flags */ 284 IGB_FLAG_VMDQ_POOL | 285 IGB_FLAG_NEED_CTX_IDX), 286 287 0xffe00000 /* mask for RXDCTL register */ 288 }; 289 290 static adapter_info_t igb_i350_cap = { 291 /* limits */ 292 8, /* maximum number of rx queues */ 293 1, /* minimum number of rx queues */ 294 4, /* default number of rx queues */ 295 8, /* maximum number of tx queues */ 296 1, /* minimum number of tx queues */ 297 4, /* default number of tx queues */ 298 65535, /* maximum interrupt throttle rate */ 299 0, /* minimum interrupt throttle rate */ 300 200, /* default interrupt throttle rate */ 301 302 /* function pointers */ 303 igb_enable_adapter_interrupts_82580, 304 igb_setup_msix_82580, 305 306 /* capabilities */ 307 (IGB_FLAG_HAS_DCA | /* capability flags */ 308 IGB_FLAG_VMDQ_POOL | 309 IGB_FLAG_NEED_CTX_IDX), 310 311 0xffe00000 /* mask for RXDCTL register */ 312 }; 313 314 static adapter_info_t igb_i210_cap = { 315 /* limits */ 316 4, /* maximum number of rx queues */ 317 1, /* minimum number of rx queues */ 318 4, /* default number of rx queues */ 319 4, /* maximum number of tx queues */ 320 1, /* minimum number of tx queues */ 321 4, /* default number of tx queues */ 322 65535, /* maximum interrupt throttle rate */ 323 0, /* minimum interrupt throttle rate */ 324 200, /* default interrupt throttle rate */ 325 326 /* function pointers */ 327 igb_enable_adapter_interrupts_82580, 328 igb_setup_msix_82580, 329 330 /* capabilities */ 331 (IGB_FLAG_HAS_DCA | /* capability flags */ 332 IGB_FLAG_VMDQ_POOL | 333 IGB_FLAG_NEED_CTX_IDX), 334 335 0xfff00000 /* mask for RXDCTL register */ 336 }; 337 338 /* 339 * Module Initialization Functions 340 */ 341 342 int 343 _init(void) 344 { 345 int status; 346 347 mac_init_ops(&igb_dev_ops, MODULE_NAME); 348 349 status = mod_install(&igb_modlinkage); 350 351 if (status != DDI_SUCCESS) { 352 mac_fini_ops(&igb_dev_ops); 353 } 354 355 return (status); 356 } 357 358 int 359 _fini(void) 360 { 361 int status; 362 363 status = mod_remove(&igb_modlinkage); 364 365 if (status == DDI_SUCCESS) { 366 mac_fini_ops(&igb_dev_ops); 367 } 368 369 return (status); 370 371 } 372 373 int 374 _info(struct modinfo *modinfop) 375 { 376 int status; 377 378 status = mod_info(&igb_modlinkage, modinfop); 379 380 return (status); 381 } 382 383 /* 384 * igb_attach - driver attach 385 * 386 * This function is the device specific initialization entry 387 * point. This entry point is required and must be written. 388 * The DDI_ATTACH command must be provided in the attach entry 389 * point. When attach() is called with cmd set to DDI_ATTACH, 390 * all normal kernel services (such as kmem_alloc(9F)) are 391 * available for use by the driver. 392 * 393 * The attach() function will be called once for each instance 394 * of the device on the system with cmd set to DDI_ATTACH. 395 * Until attach() succeeds, the only driver entry points which 396 * may be called are open(9E) and getinfo(9E). 397 */ 398 static int 399 igb_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 400 { 401 igb_t *igb; 402 struct igb_osdep *osdep; 403 struct e1000_hw *hw; 404 int instance; 405 406 /* 407 * Check the command and perform corresponding operations 408 */ 409 switch (cmd) { 410 default: 411 return (DDI_FAILURE); 412 413 case DDI_RESUME: 414 return (igb_resume(devinfo)); 415 416 case DDI_ATTACH: 417 break; 418 } 419 420 /* Get the device instance */ 421 instance = ddi_get_instance(devinfo); 422 423 /* Allocate memory for the instance data structure */ 424 igb = kmem_zalloc(sizeof (igb_t), KM_SLEEP); 425 426 igb->dip = devinfo; 427 igb->instance = instance; 428 429 hw = &igb->hw; 430 osdep = &igb->osdep; 431 hw->back = osdep; 432 osdep->igb = igb; 433 434 /* Attach the instance pointer to the dev_info data structure */ 435 ddi_set_driver_private(devinfo, igb); 436 437 438 /* Initialize for fma support */ 439 igb->fm_capabilities = igb_get_prop(igb, "fm-capable", 440 0, 0x0f, 441 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 442 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 443 igb_fm_init(igb); 444 igb->attach_progress |= ATTACH_PROGRESS_FMINIT; 445 446 /* 447 * Map PCI config space registers 448 */ 449 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 450 igb_error(igb, "Failed to map PCI configurations"); 451 goto attach_fail; 452 } 453 igb->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 454 455 /* 456 * Identify the chipset family 457 */ 458 if (igb_identify_hardware(igb) != IGB_SUCCESS) { 459 igb_error(igb, "Failed to identify hardware"); 460 goto attach_fail; 461 } 462 463 /* 464 * Map device registers 465 */ 466 if (igb_regs_map(igb) != IGB_SUCCESS) { 467 igb_error(igb, "Failed to map device registers"); 468 goto attach_fail; 469 } 470 igb->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 471 472 /* 473 * Initialize driver parameters 474 */ 475 igb_init_properties(igb); 476 igb->attach_progress |= ATTACH_PROGRESS_PROPS; 477 478 /* 479 * Allocate interrupts 480 */ 481 if (igb_alloc_intrs(igb) != IGB_SUCCESS) { 482 igb_error(igb, "Failed to allocate interrupts"); 483 goto attach_fail; 484 } 485 igb->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 486 487 /* 488 * Allocate rx/tx rings based on the ring numbers. 489 * The actual numbers of rx/tx rings are decided by the number of 490 * allocated interrupt vectors, so we should allocate the rings after 491 * interrupts are allocated. 492 */ 493 if (igb_alloc_rings(igb) != IGB_SUCCESS) { 494 igb_error(igb, "Failed to allocate rx/tx rings or groups"); 495 goto attach_fail; 496 } 497 igb->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 498 499 /* 500 * Add interrupt handlers 501 */ 502 if (igb_add_intr_handlers(igb) != IGB_SUCCESS) { 503 igb_error(igb, "Failed to add interrupt handlers"); 504 goto attach_fail; 505 } 506 igb->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 507 508 /* 509 * Initialize driver parameters 510 */ 511 if (igb_init_driver_settings(igb) != IGB_SUCCESS) { 512 igb_error(igb, "Failed to initialize driver settings"); 513 goto attach_fail; 514 } 515 516 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK) { 517 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST); 518 goto attach_fail; 519 } 520 521 /* 522 * Initialize mutexes for this device. 523 * Do this before enabling the interrupt handler and 524 * register the softint to avoid the condition where 525 * interrupt handler can try using uninitialized mutex 526 */ 527 igb_init_locks(igb); 528 igb->attach_progress |= ATTACH_PROGRESS_LOCKS; 529 530 /* 531 * Initialize the adapter 532 */ 533 if (igb_init(igb) != IGB_SUCCESS) { 534 igb_error(igb, "Failed to initialize adapter"); 535 goto attach_fail; 536 } 537 igb->attach_progress |= ATTACH_PROGRESS_INIT_ADAPTER; 538 539 /* 540 * Initialize statistics 541 */ 542 if (igb_init_stats(igb) != IGB_SUCCESS) { 543 igb_error(igb, "Failed to initialize statistics"); 544 goto attach_fail; 545 } 546 igb->attach_progress |= ATTACH_PROGRESS_STATS; 547 548 /* 549 * Register the driver to the MAC 550 */ 551 if (igb_register_mac(igb) != IGB_SUCCESS) { 552 igb_error(igb, "Failed to register MAC"); 553 goto attach_fail; 554 } 555 igb->attach_progress |= ATTACH_PROGRESS_MAC; 556 557 /* 558 * Now that mutex locks are initialized, and the chip is also 559 * initialized, enable interrupts. 560 */ 561 if (igb_enable_intrs(igb) != IGB_SUCCESS) { 562 igb_error(igb, "Failed to enable DDI interrupts"); 563 goto attach_fail; 564 } 565 igb->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 566 567 igb_log(igb, "%s", igb_version); 568 atomic_or_32(&igb->igb_state, IGB_INITIALIZED); 569 570 /* 571 * Newer models have Energy Efficient Ethernet, let's disable this by 572 * default. 573 */ 574 if (igb->hw.mac.type == e1000_i350) 575 (void) e1000_set_eee_i350(&igb->hw); 576 577 return (DDI_SUCCESS); 578 579 attach_fail: 580 igb_unconfigure(devinfo, igb); 581 return (DDI_FAILURE); 582 } 583 584 /* 585 * igb_detach - driver detach 586 * 587 * The detach() function is the complement of the attach routine. 588 * If cmd is set to DDI_DETACH, detach() is used to remove the 589 * state associated with a given instance of a device node 590 * prior to the removal of that instance from the system. 591 * 592 * The detach() function will be called once for each instance 593 * of the device for which there has been a successful attach() 594 * once there are no longer any opens on the device. 595 * 596 * Interrupts routine are disabled, All memory allocated by this 597 * driver are freed. 598 */ 599 static int 600 igb_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 601 { 602 igb_t *igb; 603 604 /* 605 * Check detach command 606 */ 607 switch (cmd) { 608 default: 609 return (DDI_FAILURE); 610 611 case DDI_SUSPEND: 612 return (igb_suspend(devinfo)); 613 614 case DDI_DETACH: 615 break; 616 } 617 618 619 /* 620 * Get the pointer to the driver private data structure 621 */ 622 igb = (igb_t *)ddi_get_driver_private(devinfo); 623 if (igb == NULL) 624 return (DDI_FAILURE); 625 626 /* 627 * Unregister MAC. If failed, we have to fail the detach 628 */ 629 if (mac_unregister(igb->mac_hdl) != 0) { 630 igb_error(igb, "Failed to unregister MAC"); 631 return (DDI_FAILURE); 632 } 633 igb->attach_progress &= ~ATTACH_PROGRESS_MAC; 634 635 /* 636 * If the device is still running, it needs to be stopped first. 637 * This check is necessary because under some specific circumstances, 638 * the detach routine can be called without stopping the interface 639 * first. 640 */ 641 mutex_enter(&igb->gen_lock); 642 if (igb->igb_state & IGB_STARTED) { 643 atomic_and_32(&igb->igb_state, ~IGB_STARTED); 644 igb_stop(igb, B_TRUE); 645 mutex_exit(&igb->gen_lock); 646 /* Disable and stop the watchdog timer */ 647 igb_disable_watchdog_timer(igb); 648 } else 649 mutex_exit(&igb->gen_lock); 650 651 /* 652 * Check if there are still rx buffers held by the upper layer. 653 * If so, fail the detach. 654 */ 655 if (!igb_rx_drain(igb)) 656 return (DDI_FAILURE); 657 658 /* 659 * Do the remaining unconfigure routines 660 */ 661 igb_unconfigure(devinfo, igb); 662 663 return (DDI_SUCCESS); 664 } 665 666 /* 667 * quiesce(9E) entry point. 668 * 669 * This function is called when the system is single-threaded at high 670 * PIL with preemption disabled. Therefore, this function must not be 671 * blocked. 672 * 673 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 674 * DDI_FAILURE indicates an error condition and should almost never happen. 675 */ 676 static int 677 igb_quiesce(dev_info_t *devinfo) 678 { 679 igb_t *igb; 680 struct e1000_hw *hw; 681 682 igb = (igb_t *)ddi_get_driver_private(devinfo); 683 684 if (igb == NULL) 685 return (DDI_FAILURE); 686 687 hw = &igb->hw; 688 689 /* 690 * Disable the adapter interrupts 691 */ 692 igb_disable_adapter_interrupts(igb); 693 694 /* Tell firmware driver is no longer in control */ 695 igb_release_driver_control(hw); 696 697 /* 698 * Reset the chipset 699 */ 700 (void) e1000_reset_hw(hw); 701 702 /* 703 * Reset PHY if possible 704 */ 705 if (e1000_check_reset_block(hw) == E1000_SUCCESS) 706 (void) e1000_phy_hw_reset(hw); 707 708 return (DDI_SUCCESS); 709 } 710 711 /* 712 * igb_unconfigure - release all resources held by this instance 713 */ 714 static void 715 igb_unconfigure(dev_info_t *devinfo, igb_t *igb) 716 { 717 /* 718 * Disable interrupt 719 */ 720 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 721 (void) igb_disable_intrs(igb); 722 } 723 724 /* 725 * Unregister MAC 726 */ 727 if (igb->attach_progress & ATTACH_PROGRESS_MAC) { 728 (void) mac_unregister(igb->mac_hdl); 729 } 730 731 /* 732 * Free statistics 733 */ 734 if (igb->attach_progress & ATTACH_PROGRESS_STATS) { 735 kstat_delete((kstat_t *)igb->igb_ks); 736 } 737 738 /* 739 * Remove interrupt handlers 740 */ 741 if (igb->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 742 igb_rem_intr_handlers(igb); 743 } 744 745 /* 746 * Remove interrupts 747 */ 748 if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 749 igb_rem_intrs(igb); 750 } 751 752 /* 753 * Remove driver properties 754 */ 755 if (igb->attach_progress & ATTACH_PROGRESS_PROPS) { 756 (void) ddi_prop_remove_all(devinfo); 757 } 758 759 /* 760 * Stop the adapter 761 */ 762 if (igb->attach_progress & ATTACH_PROGRESS_INIT_ADAPTER) { 763 mutex_enter(&igb->gen_lock); 764 igb_stop_adapter(igb); 765 mutex_exit(&igb->gen_lock); 766 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) 767 ddi_fm_service_impact(igb->dip, DDI_SERVICE_UNAFFECTED); 768 } 769 770 /* 771 * Free multicast table 772 */ 773 igb_release_multicast(igb); 774 775 /* 776 * Free register handle 777 */ 778 if (igb->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 779 if (igb->osdep.reg_handle != NULL) 780 ddi_regs_map_free(&igb->osdep.reg_handle); 781 } 782 783 /* 784 * Free PCI config handle 785 */ 786 if (igb->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 787 if (igb->osdep.cfg_handle != NULL) 788 pci_config_teardown(&igb->osdep.cfg_handle); 789 } 790 791 /* 792 * Free locks 793 */ 794 if (igb->attach_progress & ATTACH_PROGRESS_LOCKS) { 795 igb_destroy_locks(igb); 796 } 797 798 /* 799 * Free the rx/tx rings 800 */ 801 if (igb->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 802 igb_free_rings(igb); 803 } 804 805 /* 806 * Remove FMA 807 */ 808 if (igb->attach_progress & ATTACH_PROGRESS_FMINIT) { 809 igb_fm_fini(igb); 810 } 811 812 /* 813 * Free the driver data structure 814 */ 815 kmem_free(igb, sizeof (igb_t)); 816 817 ddi_set_driver_private(devinfo, NULL); 818 } 819 820 /* 821 * igb_register_mac - Register the driver and its function pointers with 822 * the GLD interface 823 */ 824 static int 825 igb_register_mac(igb_t *igb) 826 { 827 struct e1000_hw *hw = &igb->hw; 828 mac_register_t *mac; 829 int status; 830 831 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 832 return (IGB_FAILURE); 833 834 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 835 mac->m_driver = igb; 836 mac->m_dip = igb->dip; 837 mac->m_src_addr = hw->mac.addr; 838 mac->m_callbacks = &igb_m_callbacks; 839 mac->m_min_sdu = 0; 840 mac->m_max_sdu = igb->max_frame_size - 841 sizeof (struct ether_vlan_header) - ETHERFCSL; 842 mac->m_margin = VLAN_TAGSZ; 843 mac->m_priv_props = igb_priv_props; 844 mac->m_v12n = MAC_VIRT_LEVEL1; 845 846 status = mac_register(mac, &igb->mac_hdl); 847 848 mac_free(mac); 849 850 return ((status == 0) ? IGB_SUCCESS : IGB_FAILURE); 851 } 852 853 /* 854 * igb_identify_hardware - Identify the type of the chipset 855 */ 856 static int 857 igb_identify_hardware(igb_t *igb) 858 { 859 struct e1000_hw *hw = &igb->hw; 860 struct igb_osdep *osdep = &igb->osdep; 861 862 /* 863 * Get the device id 864 */ 865 hw->vendor_id = 866 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 867 hw->device_id = 868 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 869 hw->revision_id = 870 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 871 hw->subsystem_device_id = 872 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 873 hw->subsystem_vendor_id = 874 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 875 876 /* 877 * Set the mac type of the adapter based on the device id 878 */ 879 if (e1000_set_mac_type(hw) != E1000_SUCCESS) { 880 return (IGB_FAILURE); 881 } 882 883 /* 884 * Install adapter capabilities based on mac type 885 */ 886 switch (hw->mac.type) { 887 case e1000_82575: 888 igb->capab = &igb_82575_cap; 889 break; 890 case e1000_82576: 891 igb->capab = &igb_82576_cap; 892 break; 893 case e1000_82580: 894 igb->capab = &igb_82580_cap; 895 break; 896 case e1000_i350: 897 igb->capab = &igb_i350_cap; 898 break; 899 case e1000_i210: 900 case e1000_i211: 901 igb->capab = &igb_i210_cap; 902 break; 903 default: 904 return (IGB_FAILURE); 905 } 906 907 return (IGB_SUCCESS); 908 } 909 910 /* 911 * igb_regs_map - Map the device registers 912 */ 913 static int 914 igb_regs_map(igb_t *igb) 915 { 916 dev_info_t *devinfo = igb->dip; 917 struct e1000_hw *hw = &igb->hw; 918 struct igb_osdep *osdep = &igb->osdep; 919 off_t mem_size; 920 921 /* 922 * First get the size of device registers to be mapped. 923 */ 924 if (ddi_dev_regsize(devinfo, IGB_ADAPTER_REGSET, &mem_size) != 925 DDI_SUCCESS) { 926 return (IGB_FAILURE); 927 } 928 929 /* 930 * Call ddi_regs_map_setup() to map registers 931 */ 932 if ((ddi_regs_map_setup(devinfo, IGB_ADAPTER_REGSET, 933 (caddr_t *)&hw->hw_addr, 0, 934 mem_size, &igb_regs_acc_attr, 935 &osdep->reg_handle)) != DDI_SUCCESS) { 936 return (IGB_FAILURE); 937 } 938 939 return (IGB_SUCCESS); 940 } 941 942 /* 943 * igb_init_properties - Initialize driver properties 944 */ 945 static void 946 igb_init_properties(igb_t *igb) 947 { 948 /* 949 * Get conf file properties, including link settings 950 * jumbo frames, ring number, descriptor number, etc. 951 */ 952 igb_get_conf(igb); 953 } 954 955 /* 956 * igb_init_driver_settings - Initialize driver settings 957 * 958 * The settings include hardware function pointers, bus information, 959 * rx/tx rings settings, link state, and any other parameters that 960 * need to be setup during driver initialization. 961 */ 962 static int 963 igb_init_driver_settings(igb_t *igb) 964 { 965 struct e1000_hw *hw = &igb->hw; 966 igb_rx_ring_t *rx_ring; 967 igb_tx_ring_t *tx_ring; 968 uint32_t rx_size; 969 uint32_t tx_size; 970 int i; 971 972 /* 973 * Initialize chipset specific hardware function pointers 974 */ 975 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { 976 return (IGB_FAILURE); 977 } 978 979 /* 980 * Get bus information 981 */ 982 if (e1000_get_bus_info(hw) != E1000_SUCCESS) { 983 return (IGB_FAILURE); 984 } 985 986 /* 987 * Get the system page size 988 */ 989 igb->page_size = ddi_ptob(igb->dip, (ulong_t)1); 990 991 /* 992 * Set rx buffer size 993 * The IP header alignment room is counted in the calculation. 994 * The rx buffer size is in unit of 1K that is required by the 995 * chipset hardware. 996 */ 997 rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM; 998 igb->rx_buf_size = ((rx_size >> 10) + 999 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1000 1001 /* 1002 * Set tx buffer size 1003 */ 1004 tx_size = igb->max_frame_size; 1005 igb->tx_buf_size = ((tx_size >> 10) + 1006 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1007 1008 /* 1009 * Initialize rx/tx rings parameters 1010 */ 1011 for (i = 0; i < igb->num_rx_rings; i++) { 1012 rx_ring = &igb->rx_rings[i]; 1013 rx_ring->index = i; 1014 rx_ring->igb = igb; 1015 } 1016 1017 for (i = 0; i < igb->num_tx_rings; i++) { 1018 tx_ring = &igb->tx_rings[i]; 1019 tx_ring->index = i; 1020 tx_ring->igb = igb; 1021 if (igb->tx_head_wb_enable) 1022 tx_ring->tx_recycle = igb_tx_recycle_head_wb; 1023 else 1024 tx_ring->tx_recycle = igb_tx_recycle_legacy; 1025 1026 tx_ring->ring_size = igb->tx_ring_size; 1027 tx_ring->free_list_size = igb->tx_ring_size + 1028 (igb->tx_ring_size >> 1); 1029 } 1030 1031 /* 1032 * Initialize values of interrupt throttling rates 1033 */ 1034 for (i = 1; i < MAX_NUM_EITR; i++) 1035 igb->intr_throttling[i] = igb->intr_throttling[0]; 1036 1037 /* 1038 * The initial link state should be "unknown" 1039 */ 1040 igb->link_state = LINK_STATE_UNKNOWN; 1041 1042 return (IGB_SUCCESS); 1043 } 1044 1045 /* 1046 * igb_init_locks - Initialize locks 1047 */ 1048 static void 1049 igb_init_locks(igb_t *igb) 1050 { 1051 igb_rx_ring_t *rx_ring; 1052 igb_tx_ring_t *tx_ring; 1053 int i; 1054 1055 for (i = 0; i < igb->num_rx_rings; i++) { 1056 rx_ring = &igb->rx_rings[i]; 1057 mutex_init(&rx_ring->rx_lock, NULL, 1058 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); 1059 } 1060 1061 for (i = 0; i < igb->num_tx_rings; i++) { 1062 tx_ring = &igb->tx_rings[i]; 1063 mutex_init(&tx_ring->tx_lock, NULL, 1064 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); 1065 mutex_init(&tx_ring->recycle_lock, NULL, 1066 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); 1067 mutex_init(&tx_ring->tcb_head_lock, NULL, 1068 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); 1069 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1070 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); 1071 } 1072 1073 mutex_init(&igb->gen_lock, NULL, 1074 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); 1075 1076 mutex_init(&igb->watchdog_lock, NULL, 1077 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); 1078 1079 mutex_init(&igb->link_lock, NULL, 1080 MUTEX_DRIVER, DDI_INTR_PRI(igb->intr_pri)); 1081 } 1082 1083 /* 1084 * igb_destroy_locks - Destroy locks 1085 */ 1086 static void 1087 igb_destroy_locks(igb_t *igb) 1088 { 1089 igb_rx_ring_t *rx_ring; 1090 igb_tx_ring_t *tx_ring; 1091 int i; 1092 1093 for (i = 0; i < igb->num_rx_rings; i++) { 1094 rx_ring = &igb->rx_rings[i]; 1095 mutex_destroy(&rx_ring->rx_lock); 1096 } 1097 1098 for (i = 0; i < igb->num_tx_rings; i++) { 1099 tx_ring = &igb->tx_rings[i]; 1100 mutex_destroy(&tx_ring->tx_lock); 1101 mutex_destroy(&tx_ring->recycle_lock); 1102 mutex_destroy(&tx_ring->tcb_head_lock); 1103 mutex_destroy(&tx_ring->tcb_tail_lock); 1104 } 1105 1106 mutex_destroy(&igb->gen_lock); 1107 mutex_destroy(&igb->watchdog_lock); 1108 mutex_destroy(&igb->link_lock); 1109 } 1110 1111 static int 1112 igb_resume(dev_info_t *devinfo) 1113 { 1114 igb_t *igb; 1115 1116 igb = (igb_t *)ddi_get_driver_private(devinfo); 1117 if (igb == NULL) 1118 return (DDI_FAILURE); 1119 1120 mutex_enter(&igb->gen_lock); 1121 1122 /* 1123 * Enable interrupts 1124 */ 1125 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1126 if (igb_enable_intrs(igb) != IGB_SUCCESS) { 1127 igb_error(igb, "Failed to enable DDI interrupts"); 1128 mutex_exit(&igb->gen_lock); 1129 return (DDI_FAILURE); 1130 } 1131 } 1132 1133 if (igb->igb_state & IGB_STARTED) { 1134 if (igb_start(igb, B_FALSE) != IGB_SUCCESS) { 1135 mutex_exit(&igb->gen_lock); 1136 return (DDI_FAILURE); 1137 } 1138 1139 /* 1140 * Enable and start the watchdog timer 1141 */ 1142 igb_enable_watchdog_timer(igb); 1143 } 1144 1145 atomic_and_32(&igb->igb_state, ~IGB_SUSPENDED); 1146 1147 mutex_exit(&igb->gen_lock); 1148 1149 return (DDI_SUCCESS); 1150 } 1151 1152 static int 1153 igb_suspend(dev_info_t *devinfo) 1154 { 1155 igb_t *igb; 1156 1157 igb = (igb_t *)ddi_get_driver_private(devinfo); 1158 if (igb == NULL) 1159 return (DDI_FAILURE); 1160 1161 mutex_enter(&igb->gen_lock); 1162 1163 atomic_or_32(&igb->igb_state, IGB_SUSPENDED); 1164 1165 /* 1166 * Disable interrupts 1167 */ 1168 if (igb->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1169 (void) igb_disable_intrs(igb); 1170 } 1171 1172 if (!(igb->igb_state & IGB_STARTED)) { 1173 mutex_exit(&igb->gen_lock); 1174 return (DDI_SUCCESS); 1175 } 1176 1177 igb_stop(igb, B_FALSE); 1178 1179 mutex_exit(&igb->gen_lock); 1180 1181 /* 1182 * Disable and stop the watchdog timer 1183 */ 1184 igb_disable_watchdog_timer(igb); 1185 1186 return (DDI_SUCCESS); 1187 } 1188 1189 static int 1190 igb_init(igb_t *igb) 1191 { 1192 mutex_enter(&igb->gen_lock); 1193 1194 /* 1195 * Initilize the adapter 1196 */ 1197 if (igb_init_adapter(igb) != IGB_SUCCESS) { 1198 mutex_exit(&igb->gen_lock); 1199 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE); 1200 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST); 1201 return (IGB_FAILURE); 1202 } 1203 1204 mutex_exit(&igb->gen_lock); 1205 1206 return (IGB_SUCCESS); 1207 } 1208 1209 /* 1210 * igb_init_mac_address - Initialize the default MAC address 1211 * 1212 * On success, the MAC address is entered in the igb->hw.mac.addr 1213 * and hw->mac.perm_addr fields and the adapter's RAR(0) receive 1214 * address register. 1215 * 1216 * Important side effects: 1217 * 1. adapter is reset - this is required to put it in a known state. 1218 * 2. all of non-volatile memory (NVM) is read & checksummed - NVM is where 1219 * MAC address and all default settings are stored, so a valid checksum 1220 * is required. 1221 */ 1222 static int 1223 igb_init_mac_address(igb_t *igb) 1224 { 1225 struct e1000_hw *hw = &igb->hw; 1226 1227 ASSERT(mutex_owned(&igb->gen_lock)); 1228 1229 /* 1230 * Reset chipset to put the hardware in a known state 1231 * before we try to get MAC address from NVM. 1232 */ 1233 if (e1000_reset_hw(hw) != E1000_SUCCESS) { 1234 igb_error(igb, "Adapter reset failed."); 1235 goto init_mac_fail; 1236 } 1237 1238 /* 1239 * NVM validation 1240 */ 1241 if (((igb->hw.mac.type != e1000_i210) && 1242 (igb->hw.mac.type != e1000_i211)) && 1243 (e1000_validate_nvm_checksum(hw) < 0)) { 1244 /* 1245 * Some PCI-E parts fail the first check due to 1246 * the link being in sleep state. Call it again, 1247 * if it fails a second time its a real issue. 1248 */ 1249 if (e1000_validate_nvm_checksum(hw) < 0) { 1250 igb_error(igb, 1251 "Invalid NVM checksum. Please contact " 1252 "the vendor to update the NVM."); 1253 goto init_mac_fail; 1254 } 1255 } 1256 1257 /* 1258 * Get the mac address 1259 * This function should handle SPARC case correctly. 1260 */ 1261 if (!igb_find_mac_address(igb)) { 1262 igb_error(igb, "Failed to get the mac address"); 1263 goto init_mac_fail; 1264 } 1265 1266 /* Validate mac address */ 1267 if (!is_valid_mac_addr(hw->mac.addr)) { 1268 igb_error(igb, "Invalid mac address"); 1269 goto init_mac_fail; 1270 } 1271 1272 return (IGB_SUCCESS); 1273 1274 init_mac_fail: 1275 return (IGB_FAILURE); 1276 } 1277 1278 /* 1279 * igb_init_adapter - Initialize the adapter 1280 */ 1281 static int 1282 igb_init_adapter(igb_t *igb) 1283 { 1284 struct e1000_hw *hw = &igb->hw; 1285 uint32_t pba; 1286 int oemid[2]; 1287 uint16_t nvmword; 1288 uint32_t hwm; 1289 uint32_t default_mtu; 1290 u8 pbanum[E1000_PBANUM_LENGTH]; 1291 char eepromver[5]; /* f.ff */ 1292 int i; 1293 1294 ASSERT(mutex_owned(&igb->gen_lock)); 1295 1296 /* 1297 * In order to obtain the default MAC address, this will reset the 1298 * adapter and validate the NVM that the address and many other 1299 * default settings come from. 1300 */ 1301 if (igb_init_mac_address(igb) != IGB_SUCCESS) { 1302 igb_error(igb, "Failed to initialize MAC address"); 1303 goto init_adapter_fail; 1304 } 1305 1306 /* 1307 * Packet Buffer Allocation (PBA) 1308 * Writing PBA sets the receive portion of the buffer 1309 * the remainder is used for the transmit buffer. 1310 */ 1311 switch (hw->mac.type) { 1312 case e1000_82575: 1313 pba = E1000_PBA_32K; 1314 break; 1315 case e1000_82576: 1316 pba = E1000_READ_REG(hw, E1000_RXPBS); 1317 pba &= E1000_RXPBS_SIZE_MASK_82576; 1318 break; 1319 case e1000_82580: 1320 case e1000_i350: 1321 pba = E1000_READ_REG(hw, E1000_RXPBS); 1322 pba = e1000_rxpbs_adjust_82580(pba); 1323 break; 1324 case e1000_i210: 1325 case e1000_i211: 1326 pba = E1000_PBA_34K; 1327 default: 1328 break; 1329 } 1330 1331 /* Special needs in case of Jumbo frames */ 1332 default_mtu = igb_get_prop(igb, PROP_DEFAULT_MTU, 1333 MIN_MTU, MAX_MTU, DEFAULT_MTU); 1334 if ((hw->mac.type == e1000_82575) && (default_mtu > ETHERMTU)) { 1335 u32 tx_space, min_tx, min_rx; 1336 pba = E1000_READ_REG(hw, E1000_PBA); 1337 tx_space = pba >> 16; 1338 pba &= 0xffff; 1339 min_tx = (igb->max_frame_size + 1340 sizeof (struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2; 1341 min_tx = roundup(min_tx, 1024); 1342 min_tx >>= 10; 1343 min_rx = igb->max_frame_size; 1344 min_rx = roundup(min_rx, 1024); 1345 min_rx >>= 10; 1346 if (tx_space < min_tx && 1347 ((min_tx - tx_space) < pba)) { 1348 pba = pba - (min_tx - tx_space); 1349 /* 1350 * if short on rx space, rx wins 1351 * and must trump tx adjustment 1352 */ 1353 if (pba < min_rx) 1354 pba = min_rx; 1355 } 1356 E1000_WRITE_REG(hw, E1000_PBA, pba); 1357 } 1358 1359 DEBUGOUT1("igb_init: pba=%dK", pba); 1360 1361 /* 1362 * These parameters control the automatic generation (Tx) and 1363 * response (Rx) to Ethernet PAUSE frames. 1364 * - High water mark should allow for at least two frames to be 1365 * received after sending an XOFF. 1366 * - Low water mark works best when it is very near the high water mark. 1367 * This allows the receiver to restart by sending XON when it has 1368 * drained a bit. 1369 */ 1370 hwm = min(((pba << 10) * 9 / 10), 1371 ((pba << 10) - 2 * igb->max_frame_size)); 1372 1373 if (hw->mac.type < e1000_82576) { 1374 hw->fc.high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1375 hw->fc.low_water = hw->fc.high_water - 8; 1376 } else { 1377 hw->fc.high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1378 hw->fc.low_water = hw->fc.high_water - 16; 1379 } 1380 1381 hw->fc.pause_time = E1000_FC_PAUSE_TIME; 1382 hw->fc.send_xon = B_TRUE; 1383 1384 (void) e1000_validate_mdi_setting(hw); 1385 1386 /* 1387 * Reset the chipset hardware the second time to put PBA settings 1388 * into effect. 1389 */ 1390 if (e1000_reset_hw(hw) != E1000_SUCCESS) { 1391 igb_error(igb, "Second reset failed"); 1392 goto init_adapter_fail; 1393 } 1394 1395 /* 1396 * Don't wait for auto-negotiation to complete 1397 */ 1398 hw->phy.autoneg_wait_to_complete = B_FALSE; 1399 1400 /* 1401 * Copper options 1402 */ 1403 if (hw->phy.media_type == e1000_media_type_copper) { 1404 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 1405 hw->phy.disable_polarity_correction = B_FALSE; 1406 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ 1407 } 1408 1409 /* 1410 * Initialize link settings 1411 */ 1412 (void) igb_setup_link(igb, B_FALSE); 1413 1414 /* 1415 * Configure/Initialize hardware 1416 */ 1417 if (e1000_init_hw(hw) != E1000_SUCCESS) { 1418 igb_error(igb, "Failed to initialize hardware"); 1419 goto init_adapter_fail; 1420 } 1421 1422 /* 1423 * Start the link setup timer 1424 */ 1425 igb_start_link_timer(igb); 1426 1427 /* 1428 * Disable wakeup control by default 1429 */ 1430 E1000_WRITE_REG(hw, E1000_WUC, 0); 1431 1432 /* 1433 * Record phy info in hw struct 1434 */ 1435 (void) e1000_get_phy_info(hw); 1436 1437 /* 1438 * Make sure driver has control 1439 */ 1440 igb_get_driver_control(hw); 1441 1442 /* 1443 * Restore LED settings to the default from EEPROM 1444 * to meet the standard for Sun platforms. 1445 */ 1446 (void) e1000_cleanup_led(hw); 1447 1448 /* 1449 * Setup MSI-X interrupts 1450 */ 1451 if (igb->intr_type == DDI_INTR_TYPE_MSIX) 1452 igb->capab->setup_msix(igb); 1453 1454 /* 1455 * Initialize unicast addresses. 1456 */ 1457 igb_init_unicst(igb); 1458 1459 /* 1460 * Setup and initialize the mctable structures. 1461 */ 1462 igb_setup_multicst(igb); 1463 1464 /* 1465 * Set interrupt throttling rate 1466 */ 1467 for (i = 0; i < igb->intr_cnt; i++) 1468 E1000_WRITE_REG(hw, E1000_EITR(i), igb->intr_throttling[i]); 1469 1470 /* 1471 * Read identifying information and place in devinfo. 1472 */ 1473 nvmword = 0xffff; 1474 (void) e1000_read_nvm(&igb->hw, NVM_OEM_OFFSET_0, 1, &nvmword); 1475 oemid[0] = (int)nvmword; 1476 (void) e1000_read_nvm(&igb->hw, NVM_OEM_OFFSET_1, 1, &nvmword); 1477 oemid[1] = (int)nvmword; 1478 (void) ddi_prop_update_int_array(DDI_DEV_T_NONE, igb->dip, 1479 "oem-identifier", oemid, 2); 1480 1481 pbanum[0] = '\0'; 1482 (void) e1000_read_pba_string(&igb->hw, pbanum, sizeof (pbanum)); 1483 if (*pbanum != '\0') { 1484 (void) ddi_prop_update_string(DDI_DEV_T_NONE, igb->dip, 1485 "printed-board-assembly", (char *)pbanum); 1486 } 1487 1488 nvmword = 0xffff; 1489 (void) e1000_read_nvm(&igb->hw, NVM_VERSION, 1, &nvmword); 1490 if ((nvmword & 0xf00) == 0) { 1491 (void) snprintf(eepromver, sizeof (eepromver), "%x.%x", 1492 (nvmword & 0xf000) >> 12, (nvmword & 0xff)); 1493 (void) ddi_prop_update_string(DDI_DEV_T_NONE, igb->dip, 1494 "nvm-version", eepromver); 1495 } 1496 1497 /* 1498 * Save the state of the phy 1499 */ 1500 igb_get_phy_state(igb); 1501 1502 igb_param_sync(igb); 1503 1504 return (IGB_SUCCESS); 1505 1506 init_adapter_fail: 1507 /* 1508 * Reset PHY if possible 1509 */ 1510 if (e1000_check_reset_block(hw) == E1000_SUCCESS) 1511 (void) e1000_phy_hw_reset(hw); 1512 1513 return (IGB_FAILURE); 1514 } 1515 1516 /* 1517 * igb_stop_adapter - Stop the adapter 1518 */ 1519 static void 1520 igb_stop_adapter(igb_t *igb) 1521 { 1522 struct e1000_hw *hw = &igb->hw; 1523 1524 ASSERT(mutex_owned(&igb->gen_lock)); 1525 1526 /* Stop the link setup timer */ 1527 igb_stop_link_timer(igb); 1528 1529 /* Tell firmware driver is no longer in control */ 1530 igb_release_driver_control(hw); 1531 1532 /* 1533 * Reset the chipset 1534 */ 1535 if (e1000_reset_hw(hw) != E1000_SUCCESS) { 1536 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE); 1537 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST); 1538 } 1539 1540 /* 1541 * e1000_phy_hw_reset is not needed here, MAC reset above is sufficient 1542 */ 1543 } 1544 1545 /* 1546 * igb_reset - Reset the chipset and restart the driver. 1547 * 1548 * It involves stopping and re-starting the chipset, 1549 * and re-configuring the rx/tx rings. 1550 */ 1551 static int 1552 igb_reset(igb_t *igb) 1553 { 1554 int i; 1555 1556 mutex_enter(&igb->gen_lock); 1557 1558 ASSERT(igb->igb_state & IGB_STARTED); 1559 atomic_and_32(&igb->igb_state, ~IGB_STARTED); 1560 1561 /* 1562 * Disable the adapter interrupts to stop any rx/tx activities 1563 * before draining pending data and resetting hardware. 1564 */ 1565 igb_disable_adapter_interrupts(igb); 1566 1567 /* 1568 * Drain the pending transmit packets 1569 */ 1570 (void) igb_tx_drain(igb); 1571 1572 for (i = 0; i < igb->num_rx_rings; i++) 1573 mutex_enter(&igb->rx_rings[i].rx_lock); 1574 for (i = 0; i < igb->num_tx_rings; i++) 1575 mutex_enter(&igb->tx_rings[i].tx_lock); 1576 1577 /* 1578 * Stop the adapter 1579 */ 1580 igb_stop_adapter(igb); 1581 1582 /* 1583 * Clean the pending tx data/resources 1584 */ 1585 igb_tx_clean(igb); 1586 1587 /* 1588 * Start the adapter 1589 */ 1590 if (igb_init_adapter(igb) != IGB_SUCCESS) { 1591 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE); 1592 goto reset_failure; 1593 } 1594 1595 /* 1596 * Setup the rx/tx rings 1597 */ 1598 igb->tx_ring_init = B_FALSE; 1599 igb_setup_rings(igb); 1600 1601 atomic_and_32(&igb->igb_state, ~(IGB_ERROR | IGB_STALL)); 1602 1603 /* 1604 * Enable adapter interrupts 1605 * The interrupts must be enabled after the driver state is START 1606 */ 1607 igb->capab->enable_intr(igb); 1608 1609 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK) 1610 goto reset_failure; 1611 1612 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) 1613 goto reset_failure; 1614 1615 for (i = igb->num_tx_rings - 1; i >= 0; i--) 1616 mutex_exit(&igb->tx_rings[i].tx_lock); 1617 for (i = igb->num_rx_rings - 1; i >= 0; i--) 1618 mutex_exit(&igb->rx_rings[i].rx_lock); 1619 1620 atomic_or_32(&igb->igb_state, IGB_STARTED); 1621 1622 mutex_exit(&igb->gen_lock); 1623 1624 return (IGB_SUCCESS); 1625 1626 reset_failure: 1627 for (i = igb->num_tx_rings - 1; i >= 0; i--) 1628 mutex_exit(&igb->tx_rings[i].tx_lock); 1629 for (i = igb->num_rx_rings - 1; i >= 0; i--) 1630 mutex_exit(&igb->rx_rings[i].rx_lock); 1631 1632 mutex_exit(&igb->gen_lock); 1633 1634 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST); 1635 1636 return (IGB_FAILURE); 1637 } 1638 1639 /* 1640 * igb_tx_clean - Clean the pending transmit packets and DMA resources 1641 */ 1642 static void 1643 igb_tx_clean(igb_t *igb) 1644 { 1645 igb_tx_ring_t *tx_ring; 1646 tx_control_block_t *tcb; 1647 link_list_t pending_list; 1648 uint32_t desc_num; 1649 int i, j; 1650 1651 LINK_LIST_INIT(&pending_list); 1652 1653 for (i = 0; i < igb->num_tx_rings; i++) { 1654 tx_ring = &igb->tx_rings[i]; 1655 1656 mutex_enter(&tx_ring->recycle_lock); 1657 1658 /* 1659 * Clean the pending tx data - the pending packets in the 1660 * work_list that have no chances to be transmitted again. 1661 * 1662 * We must ensure the chipset is stopped or the link is down 1663 * before cleaning the transmit packets. 1664 */ 1665 desc_num = 0; 1666 for (j = 0; j < tx_ring->ring_size; j++) { 1667 tcb = tx_ring->work_list[j]; 1668 if (tcb != NULL) { 1669 desc_num += tcb->desc_num; 1670 1671 tx_ring->work_list[j] = NULL; 1672 1673 igb_free_tcb(tcb); 1674 1675 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1676 } 1677 } 1678 1679 if (desc_num > 0) { 1680 atomic_add_32(&tx_ring->tbd_free, desc_num); 1681 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1682 1683 /* 1684 * Reset the head and tail pointers of the tbd ring; 1685 * Reset the head write-back if it is enabled. 1686 */ 1687 tx_ring->tbd_head = 0; 1688 tx_ring->tbd_tail = 0; 1689 if (igb->tx_head_wb_enable) 1690 *tx_ring->tbd_head_wb = 0; 1691 1692 E1000_WRITE_REG(&igb->hw, E1000_TDH(tx_ring->index), 0); 1693 E1000_WRITE_REG(&igb->hw, E1000_TDT(tx_ring->index), 0); 1694 } 1695 1696 mutex_exit(&tx_ring->recycle_lock); 1697 1698 /* 1699 * Add the tx control blocks in the pending list to 1700 * the free list. 1701 */ 1702 igb_put_free_list(tx_ring, &pending_list); 1703 } 1704 } 1705 1706 /* 1707 * igb_tx_drain - Drain the tx rings to allow pending packets to be transmitted 1708 */ 1709 static boolean_t 1710 igb_tx_drain(igb_t *igb) 1711 { 1712 igb_tx_ring_t *tx_ring; 1713 boolean_t done; 1714 int i, j; 1715 1716 /* 1717 * Wait for a specific time to allow pending tx packets 1718 * to be transmitted. 1719 * 1720 * Check the counter tbd_free to see if transmission is done. 1721 * No lock protection is needed here. 1722 * 1723 * Return B_TRUE if all pending packets have been transmitted; 1724 * Otherwise return B_FALSE; 1725 */ 1726 for (i = 0; i < TX_DRAIN_TIME; i++) { 1727 1728 done = B_TRUE; 1729 for (j = 0; j < igb->num_tx_rings; j++) { 1730 tx_ring = &igb->tx_rings[j]; 1731 done = done && 1732 (tx_ring->tbd_free == tx_ring->ring_size); 1733 } 1734 1735 if (done) 1736 break; 1737 1738 msec_delay(1); 1739 } 1740 1741 return (done); 1742 } 1743 1744 /* 1745 * igb_rx_drain - Wait for all rx buffers to be released by upper layer 1746 */ 1747 static boolean_t 1748 igb_rx_drain(igb_t *igb) 1749 { 1750 boolean_t done; 1751 int i; 1752 1753 /* 1754 * Polling the rx free list to check if those rx buffers held by 1755 * the upper layer are released. 1756 * 1757 * Check the counter rcb_free to see if all pending buffers are 1758 * released. No lock protection is needed here. 1759 * 1760 * Return B_TRUE if all pending buffers have been released; 1761 * Otherwise return B_FALSE; 1762 */ 1763 for (i = 0; i < RX_DRAIN_TIME; i++) { 1764 done = (igb->rcb_pending == 0); 1765 1766 if (done) 1767 break; 1768 1769 msec_delay(1); 1770 } 1771 1772 return (done); 1773 } 1774 1775 /* 1776 * igb_start - Start the driver/chipset 1777 */ 1778 int 1779 igb_start(igb_t *igb, boolean_t alloc_buffer) 1780 { 1781 int i; 1782 1783 ASSERT(mutex_owned(&igb->gen_lock)); 1784 1785 if (alloc_buffer) { 1786 if (igb_alloc_rx_data(igb) != IGB_SUCCESS) { 1787 igb_error(igb, 1788 "Failed to allocate software receive rings"); 1789 return (IGB_FAILURE); 1790 } 1791 1792 /* Allocate buffers for all the rx/tx rings */ 1793 if (igb_alloc_dma(igb) != IGB_SUCCESS) { 1794 igb_error(igb, "Failed to allocate DMA resource"); 1795 return (IGB_FAILURE); 1796 } 1797 1798 igb->tx_ring_init = B_TRUE; 1799 } else { 1800 igb->tx_ring_init = B_FALSE; 1801 } 1802 1803 for (i = 0; i < igb->num_rx_rings; i++) 1804 mutex_enter(&igb->rx_rings[i].rx_lock); 1805 for (i = 0; i < igb->num_tx_rings; i++) 1806 mutex_enter(&igb->tx_rings[i].tx_lock); 1807 1808 /* 1809 * Start the adapter 1810 */ 1811 if ((igb->attach_progress & ATTACH_PROGRESS_INIT_ADAPTER) == 0) { 1812 if (igb_init_adapter(igb) != IGB_SUCCESS) { 1813 igb_fm_ereport(igb, DDI_FM_DEVICE_INVAL_STATE); 1814 goto start_failure; 1815 } 1816 igb->attach_progress |= ATTACH_PROGRESS_INIT_ADAPTER; 1817 } 1818 1819 /* 1820 * Setup the rx/tx rings 1821 */ 1822 igb_setup_rings(igb); 1823 1824 /* 1825 * Enable adapter interrupts 1826 * The interrupts must be enabled after the driver state is START 1827 */ 1828 igb->capab->enable_intr(igb); 1829 1830 if (igb_check_acc_handle(igb->osdep.cfg_handle) != DDI_FM_OK) 1831 goto start_failure; 1832 1833 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) 1834 goto start_failure; 1835 1836 if (igb->hw.mac.type == e1000_i350) 1837 (void) e1000_set_eee_i350(&igb->hw); 1838 1839 for (i = igb->num_tx_rings - 1; i >= 0; i--) 1840 mutex_exit(&igb->tx_rings[i].tx_lock); 1841 for (i = igb->num_rx_rings - 1; i >= 0; i--) 1842 mutex_exit(&igb->rx_rings[i].rx_lock); 1843 1844 return (IGB_SUCCESS); 1845 1846 start_failure: 1847 for (i = igb->num_tx_rings - 1; i >= 0; i--) 1848 mutex_exit(&igb->tx_rings[i].tx_lock); 1849 for (i = igb->num_rx_rings - 1; i >= 0; i--) 1850 mutex_exit(&igb->rx_rings[i].rx_lock); 1851 1852 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST); 1853 1854 return (IGB_FAILURE); 1855 } 1856 1857 /* 1858 * igb_stop - Stop the driver/chipset 1859 */ 1860 void 1861 igb_stop(igb_t *igb, boolean_t free_buffer) 1862 { 1863 int i; 1864 1865 ASSERT(mutex_owned(&igb->gen_lock)); 1866 1867 igb->attach_progress &= ~ATTACH_PROGRESS_INIT_ADAPTER; 1868 1869 /* 1870 * Disable the adapter interrupts 1871 */ 1872 igb_disable_adapter_interrupts(igb); 1873 1874 /* 1875 * Drain the pending tx packets 1876 */ 1877 (void) igb_tx_drain(igb); 1878 1879 for (i = 0; i < igb->num_rx_rings; i++) 1880 mutex_enter(&igb->rx_rings[i].rx_lock); 1881 for (i = 0; i < igb->num_tx_rings; i++) 1882 mutex_enter(&igb->tx_rings[i].tx_lock); 1883 1884 /* 1885 * Stop the adapter 1886 */ 1887 igb_stop_adapter(igb); 1888 1889 /* 1890 * Clean the pending tx data/resources 1891 */ 1892 igb_tx_clean(igb); 1893 1894 for (i = igb->num_tx_rings - 1; i >= 0; i--) 1895 mutex_exit(&igb->tx_rings[i].tx_lock); 1896 for (i = igb->num_rx_rings - 1; i >= 0; i--) 1897 mutex_exit(&igb->rx_rings[i].rx_lock); 1898 1899 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) 1900 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST); 1901 1902 if (igb->link_state == LINK_STATE_UP) { 1903 igb->link_state = LINK_STATE_UNKNOWN; 1904 mac_link_update(igb->mac_hdl, igb->link_state); 1905 } 1906 1907 if (free_buffer) { 1908 /* 1909 * Release the DMA/memory resources of rx/tx rings 1910 */ 1911 igb_free_dma(igb); 1912 igb_free_rx_data(igb); 1913 } 1914 } 1915 1916 /* 1917 * igb_alloc_rings - Allocate memory space for rx/tx rings 1918 */ 1919 static int 1920 igb_alloc_rings(igb_t *igb) 1921 { 1922 /* 1923 * Allocate memory space for rx rings 1924 */ 1925 igb->rx_rings = kmem_zalloc( 1926 sizeof (igb_rx_ring_t) * igb->num_rx_rings, 1927 KM_NOSLEEP); 1928 1929 if (igb->rx_rings == NULL) { 1930 return (IGB_FAILURE); 1931 } 1932 1933 /* 1934 * Allocate memory space for tx rings 1935 */ 1936 igb->tx_rings = kmem_zalloc( 1937 sizeof (igb_tx_ring_t) * igb->num_tx_rings, 1938 KM_NOSLEEP); 1939 1940 if (igb->tx_rings == NULL) { 1941 kmem_free(igb->rx_rings, 1942 sizeof (igb_rx_ring_t) * igb->num_rx_rings); 1943 igb->rx_rings = NULL; 1944 return (IGB_FAILURE); 1945 } 1946 1947 /* 1948 * Allocate memory space for rx ring groups 1949 */ 1950 igb->rx_groups = kmem_zalloc( 1951 sizeof (igb_rx_group_t) * igb->num_rx_groups, 1952 KM_NOSLEEP); 1953 1954 if (igb->rx_groups == NULL) { 1955 kmem_free(igb->rx_rings, 1956 sizeof (igb_rx_ring_t) * igb->num_rx_rings); 1957 kmem_free(igb->tx_rings, 1958 sizeof (igb_tx_ring_t) * igb->num_tx_rings); 1959 igb->rx_rings = NULL; 1960 igb->tx_rings = NULL; 1961 return (IGB_FAILURE); 1962 } 1963 1964 return (IGB_SUCCESS); 1965 } 1966 1967 /* 1968 * igb_free_rings - Free the memory space of rx/tx rings. 1969 */ 1970 static void 1971 igb_free_rings(igb_t *igb) 1972 { 1973 if (igb->rx_rings != NULL) { 1974 kmem_free(igb->rx_rings, 1975 sizeof (igb_rx_ring_t) * igb->num_rx_rings); 1976 igb->rx_rings = NULL; 1977 } 1978 1979 if (igb->tx_rings != NULL) { 1980 kmem_free(igb->tx_rings, 1981 sizeof (igb_tx_ring_t) * igb->num_tx_rings); 1982 igb->tx_rings = NULL; 1983 } 1984 1985 if (igb->rx_groups != NULL) { 1986 kmem_free(igb->rx_groups, 1987 sizeof (igb_rx_group_t) * igb->num_rx_groups); 1988 igb->rx_groups = NULL; 1989 } 1990 } 1991 1992 static int 1993 igb_alloc_rx_data(igb_t *igb) 1994 { 1995 igb_rx_ring_t *rx_ring; 1996 int i; 1997 1998 for (i = 0; i < igb->num_rx_rings; i++) { 1999 rx_ring = &igb->rx_rings[i]; 2000 if (igb_alloc_rx_ring_data(rx_ring) != IGB_SUCCESS) 2001 goto alloc_rx_rings_failure; 2002 } 2003 return (IGB_SUCCESS); 2004 2005 alloc_rx_rings_failure: 2006 igb_free_rx_data(igb); 2007 return (IGB_FAILURE); 2008 } 2009 2010 static void 2011 igb_free_rx_data(igb_t *igb) 2012 { 2013 igb_rx_ring_t *rx_ring; 2014 igb_rx_data_t *rx_data; 2015 int i; 2016 2017 for (i = 0; i < igb->num_rx_rings; i++) { 2018 rx_ring = &igb->rx_rings[i]; 2019 2020 mutex_enter(&igb->rx_pending_lock); 2021 rx_data = rx_ring->rx_data; 2022 2023 if (rx_data != NULL) { 2024 rx_data->flag |= IGB_RX_STOPPED; 2025 2026 if (rx_data->rcb_pending == 0) { 2027 igb_free_rx_ring_data(rx_data); 2028 rx_ring->rx_data = NULL; 2029 } 2030 } 2031 2032 mutex_exit(&igb->rx_pending_lock); 2033 } 2034 } 2035 2036 /* 2037 * igb_setup_rings - Setup rx/tx rings 2038 */ 2039 static void 2040 igb_setup_rings(igb_t *igb) 2041 { 2042 /* 2043 * Setup the rx/tx rings, including the following: 2044 * 2045 * 1. Setup the descriptor ring and the control block buffers; 2046 * 2. Initialize necessary registers for receive/transmit; 2047 * 3. Initialize software pointers/parameters for receive/transmit; 2048 */ 2049 igb_setup_rx(igb); 2050 2051 igb_setup_tx(igb); 2052 } 2053 2054 static void 2055 igb_setup_rx_ring(igb_rx_ring_t *rx_ring) 2056 { 2057 igb_t *igb = rx_ring->igb; 2058 igb_rx_data_t *rx_data = rx_ring->rx_data; 2059 struct e1000_hw *hw = &igb->hw; 2060 rx_control_block_t *rcb; 2061 union e1000_adv_rx_desc *rbd; 2062 uint32_t size; 2063 uint32_t buf_low; 2064 uint32_t buf_high; 2065 uint32_t rxdctl; 2066 int i; 2067 2068 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2069 ASSERT(mutex_owned(&igb->gen_lock)); 2070 2071 /* 2072 * Initialize descriptor ring with buffer addresses 2073 */ 2074 for (i = 0; i < igb->rx_ring_size; i++) { 2075 rcb = rx_data->work_list[i]; 2076 rbd = &rx_data->rbd_ring[i]; 2077 2078 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2079 rbd->read.hdr_addr = NULL; 2080 } 2081 2082 /* 2083 * Initialize the base address registers 2084 */ 2085 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2086 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2087 E1000_WRITE_REG(hw, E1000_RDBAH(rx_ring->index), buf_high); 2088 E1000_WRITE_REG(hw, E1000_RDBAL(rx_ring->index), buf_low); 2089 2090 /* 2091 * Initialize the length register 2092 */ 2093 size = rx_data->ring_size * sizeof (union e1000_adv_rx_desc); 2094 E1000_WRITE_REG(hw, E1000_RDLEN(rx_ring->index), size); 2095 2096 /* 2097 * Initialize buffer size & descriptor type 2098 */ 2099 E1000_WRITE_REG(hw, E1000_SRRCTL(rx_ring->index), 2100 ((igb->rx_buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) | 2101 E1000_SRRCTL_DESCTYPE_ADV_ONEBUF)); 2102 2103 /* 2104 * Setup the Receive Descriptor Control Register (RXDCTL) 2105 */ 2106 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rx_ring->index)); 2107 rxdctl &= igb->capab->rxdctl_mask; 2108 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2109 rxdctl |= 16; /* pthresh */ 2110 rxdctl |= 8 << 8; /* hthresh */ 2111 rxdctl |= 1 << 16; /* wthresh */ 2112 E1000_WRITE_REG(hw, E1000_RXDCTL(rx_ring->index), rxdctl); 2113 2114 rx_data->rbd_next = 0; 2115 } 2116 2117 static void 2118 igb_setup_rx(igb_t *igb) 2119 { 2120 igb_rx_ring_t *rx_ring; 2121 igb_rx_data_t *rx_data; 2122 igb_rx_group_t *rx_group; 2123 struct e1000_hw *hw = &igb->hw; 2124 uint32_t rctl, rxcsum; 2125 uint32_t ring_per_group; 2126 int i; 2127 2128 /* 2129 * Setup the Receive Control Register (RCTL), and enable the 2130 * receiver. The initial configuration is to: enable the receiver, 2131 * accept broadcasts, discard bad packets, accept long packets, 2132 * disable VLAN filter checking, and set receive buffer size to 2133 * 2k. For 82575, also set the receive descriptor minimum 2134 * threshold size to 1/2 the ring. 2135 */ 2136 rctl = E1000_READ_REG(hw, E1000_RCTL); 2137 2138 /* 2139 * Clear the field used for wakeup control. This driver doesn't do 2140 * wakeup but leave this here for completeness. 2141 */ 2142 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2143 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 2144 2145 rctl |= (E1000_RCTL_EN | /* Enable Receive Unit */ 2146 E1000_RCTL_BAM | /* Accept Broadcast Packets */ 2147 E1000_RCTL_LPE | /* Large Packet Enable */ 2148 /* Multicast filter offset */ 2149 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT) | 2150 E1000_RCTL_RDMTS_HALF | /* rx descriptor threshold */ 2151 E1000_RCTL_SECRC); /* Strip Ethernet CRC */ 2152 2153 for (i = 0; i < igb->num_rx_groups; i++) { 2154 rx_group = &igb->rx_groups[i]; 2155 rx_group->index = i; 2156 rx_group->igb = igb; 2157 } 2158 2159 /* 2160 * Set up all rx descriptor rings - must be called before receive unit 2161 * enabled. 2162 */ 2163 ring_per_group = igb->num_rx_rings / igb->num_rx_groups; 2164 for (i = 0; i < igb->num_rx_rings; i++) { 2165 rx_ring = &igb->rx_rings[i]; 2166 igb_setup_rx_ring(rx_ring); 2167 2168 /* 2169 * Map a ring to a group by assigning a group index 2170 */ 2171 rx_ring->group_index = i / ring_per_group; 2172 } 2173 2174 /* 2175 * Setup the Rx Long Packet Max Length register 2176 */ 2177 E1000_WRITE_REG(hw, E1000_RLPML, igb->max_frame_size); 2178 2179 /* 2180 * Hardware checksum settings 2181 */ 2182 if (igb->rx_hcksum_enable) { 2183 rxcsum = 2184 E1000_RXCSUM_TUOFL | /* TCP/UDP checksum */ 2185 E1000_RXCSUM_IPOFL; /* IP checksum */ 2186 2187 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); 2188 } 2189 2190 /* 2191 * Setup classify and RSS for multiple receive queues 2192 */ 2193 switch (igb->vmdq_mode) { 2194 case E1000_VMDQ_OFF: 2195 /* 2196 * One ring group, only RSS is needed when more than 2197 * one ring enabled. 2198 */ 2199 if (igb->num_rx_rings > 1) 2200 igb_setup_rss(igb); 2201 break; 2202 case E1000_VMDQ_MAC: 2203 /* 2204 * Multiple groups, each group has one ring, 2205 * only the MAC classification is needed. 2206 */ 2207 igb_setup_mac_classify(igb); 2208 break; 2209 case E1000_VMDQ_MAC_RSS: 2210 /* 2211 * Multiple groups and multiple rings, both 2212 * MAC classification and RSS are needed. 2213 */ 2214 igb_setup_mac_rss_classify(igb); 2215 break; 2216 } 2217 2218 /* 2219 * Enable the receive unit - must be done after all 2220 * the rx setup above. 2221 */ 2222 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2223 2224 /* 2225 * Initialize all adapter ring head & tail pointers - must 2226 * be done after receive unit is enabled 2227 */ 2228 for (i = 0; i < igb->num_rx_rings; i++) { 2229 rx_ring = &igb->rx_rings[i]; 2230 rx_data = rx_ring->rx_data; 2231 E1000_WRITE_REG(hw, E1000_RDH(i), 0); 2232 E1000_WRITE_REG(hw, E1000_RDT(i), rx_data->ring_size - 1); 2233 } 2234 2235 /* 2236 * 82575 with manageability enabled needs a special flush to make 2237 * sure the fifos start clean. 2238 */ 2239 if ((hw->mac.type == e1000_82575) && 2240 (E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) { 2241 e1000_rx_fifo_flush_82575(hw); 2242 } 2243 } 2244 2245 static void 2246 igb_setup_tx_ring(igb_tx_ring_t *tx_ring) 2247 { 2248 igb_t *igb = tx_ring->igb; 2249 struct e1000_hw *hw = &igb->hw; 2250 uint32_t size; 2251 uint32_t buf_low; 2252 uint32_t buf_high; 2253 uint32_t reg_val; 2254 2255 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2256 ASSERT(mutex_owned(&igb->gen_lock)); 2257 2258 2259 /* 2260 * Initialize the length register 2261 */ 2262 size = tx_ring->ring_size * sizeof (union e1000_adv_tx_desc); 2263 E1000_WRITE_REG(hw, E1000_TDLEN(tx_ring->index), size); 2264 2265 /* 2266 * Initialize the base address registers 2267 */ 2268 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2269 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2270 E1000_WRITE_REG(hw, E1000_TDBAL(tx_ring->index), buf_low); 2271 E1000_WRITE_REG(hw, E1000_TDBAH(tx_ring->index), buf_high); 2272 2273 /* 2274 * Setup head & tail pointers 2275 */ 2276 E1000_WRITE_REG(hw, E1000_TDH(tx_ring->index), 0); 2277 E1000_WRITE_REG(hw, E1000_TDT(tx_ring->index), 0); 2278 2279 /* 2280 * Setup head write-back 2281 */ 2282 if (igb->tx_head_wb_enable) { 2283 /* 2284 * The memory of the head write-back is allocated using 2285 * the extra tbd beyond the tail of the tbd ring. 2286 */ 2287 tx_ring->tbd_head_wb = (uint32_t *) 2288 ((uintptr_t)tx_ring->tbd_area.address + size); 2289 *tx_ring->tbd_head_wb = 0; 2290 2291 buf_low = (uint32_t) 2292 (tx_ring->tbd_area.dma_address + size); 2293 buf_high = (uint32_t) 2294 ((tx_ring->tbd_area.dma_address + size) >> 32); 2295 2296 /* Set the head write-back enable bit */ 2297 buf_low |= E1000_TX_HEAD_WB_ENABLE; 2298 2299 E1000_WRITE_REG(hw, E1000_TDWBAL(tx_ring->index), buf_low); 2300 E1000_WRITE_REG(hw, E1000_TDWBAH(tx_ring->index), buf_high); 2301 2302 /* 2303 * Turn off relaxed ordering for head write back or it will 2304 * cause problems with the tx recycling 2305 */ 2306 reg_val = E1000_READ_REG(hw, 2307 E1000_DCA_TXCTRL(tx_ring->index)); 2308 reg_val &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 2309 E1000_WRITE_REG(hw, 2310 E1000_DCA_TXCTRL(tx_ring->index), reg_val); 2311 } else { 2312 tx_ring->tbd_head_wb = NULL; 2313 } 2314 2315 tx_ring->tbd_head = 0; 2316 tx_ring->tbd_tail = 0; 2317 tx_ring->tbd_free = tx_ring->ring_size; 2318 2319 if (igb->tx_ring_init == B_TRUE) { 2320 tx_ring->tcb_head = 0; 2321 tx_ring->tcb_tail = 0; 2322 tx_ring->tcb_free = tx_ring->free_list_size; 2323 } 2324 2325 /* 2326 * Enable TXDCTL per queue 2327 */ 2328 reg_val = E1000_READ_REG(hw, E1000_TXDCTL(tx_ring->index)); 2329 reg_val |= E1000_TXDCTL_QUEUE_ENABLE; 2330 E1000_WRITE_REG(hw, E1000_TXDCTL(tx_ring->index), reg_val); 2331 2332 /* 2333 * Initialize hardware checksum offload settings 2334 */ 2335 bzero(&tx_ring->tx_context, sizeof (tx_context_t)); 2336 } 2337 2338 static void 2339 igb_setup_tx(igb_t *igb) 2340 { 2341 igb_tx_ring_t *tx_ring; 2342 struct e1000_hw *hw = &igb->hw; 2343 uint32_t reg_val; 2344 int i; 2345 2346 for (i = 0; i < igb->num_tx_rings; i++) { 2347 tx_ring = &igb->tx_rings[i]; 2348 igb_setup_tx_ring(tx_ring); 2349 } 2350 2351 /* 2352 * Setup the Transmit Control Register (TCTL) 2353 */ 2354 reg_val = E1000_READ_REG(hw, E1000_TCTL); 2355 reg_val &= ~E1000_TCTL_CT; 2356 reg_val |= E1000_TCTL_PSP | E1000_TCTL_RTLC | 2357 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2358 2359 /* Enable transmits */ 2360 reg_val |= E1000_TCTL_EN; 2361 2362 E1000_WRITE_REG(hw, E1000_TCTL, reg_val); 2363 } 2364 2365 /* 2366 * igb_setup_rss - Setup receive-side scaling feature 2367 */ 2368 static void 2369 igb_setup_rss(igb_t *igb) 2370 { 2371 struct e1000_hw *hw = &igb->hw; 2372 uint32_t i, mrqc, rxcsum; 2373 int shift = 0; 2374 uint32_t random; 2375 union e1000_reta { 2376 uint32_t dword; 2377 uint8_t bytes[4]; 2378 } reta; 2379 2380 /* Setup the Redirection Table */ 2381 if (hw->mac.type == e1000_82576) { 2382 shift = 3; 2383 } else if (hw->mac.type == e1000_82575) { 2384 shift = 6; 2385 } 2386 for (i = 0; i < (32 * 4); i++) { 2387 reta.bytes[i & 3] = (i % igb->num_rx_rings) << shift; 2388 if ((i & 3) == 3) { 2389 E1000_WRITE_REG(hw, 2390 (E1000_RETA(0) + (i & ~3)), reta.dword); 2391 } 2392 } 2393 2394 /* Fill out hash function seeds */ 2395 for (i = 0; i < 10; i++) { 2396 (void) random_get_pseudo_bytes((uint8_t *)&random, 2397 sizeof (uint32_t)); 2398 E1000_WRITE_REG(hw, E1000_RSSRK(i), random); 2399 } 2400 2401 /* Setup the Multiple Receive Queue Control register */ 2402 mrqc = E1000_MRQC_ENABLE_RSS_4Q; 2403 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | 2404 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2405 E1000_MRQC_RSS_FIELD_IPV6 | 2406 E1000_MRQC_RSS_FIELD_IPV6_TCP | 2407 E1000_MRQC_RSS_FIELD_IPV4_UDP | 2408 E1000_MRQC_RSS_FIELD_IPV6_UDP | 2409 E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | 2410 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 2411 2412 E1000_WRITE_REG(hw, E1000_MRQC, mrqc); 2413 2414 /* 2415 * Disable Packet Checksum to enable RSS for multiple receive queues. 2416 * 2417 * The Packet Checksum is not ethernet CRC. It is another kind of 2418 * checksum offloading provided by the 82575 chipset besides the IP 2419 * header checksum offloading and the TCP/UDP checksum offloading. 2420 * The Packet Checksum is by default computed over the entire packet 2421 * from the first byte of the DA through the last byte of the CRC, 2422 * including the Ethernet and IP headers. 2423 * 2424 * It is a hardware limitation that Packet Checksum is mutually 2425 * exclusive with RSS. 2426 */ 2427 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); 2428 rxcsum |= E1000_RXCSUM_PCSD; 2429 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); 2430 } 2431 2432 /* 2433 * igb_setup_mac_rss_classify - Setup MAC classification and rss 2434 */ 2435 static void 2436 igb_setup_mac_rss_classify(igb_t *igb) 2437 { 2438 struct e1000_hw *hw = &igb->hw; 2439 uint32_t i, mrqc, vmdctl, rxcsum; 2440 uint32_t ring_per_group; 2441 int shift_group0, shift_group1; 2442 uint32_t random; 2443 union e1000_reta { 2444 uint32_t dword; 2445 uint8_t bytes[4]; 2446 } reta; 2447 2448 ring_per_group = igb->num_rx_rings / igb->num_rx_groups; 2449 2450 /* Setup the Redirection Table, it is shared between two groups */ 2451 shift_group0 = 2; 2452 shift_group1 = 6; 2453 for (i = 0; i < (32 * 4); i++) { 2454 reta.bytes[i & 3] = ((i % ring_per_group) << shift_group0) | 2455 ((ring_per_group + (i % ring_per_group)) << shift_group1); 2456 if ((i & 3) == 3) { 2457 E1000_WRITE_REG(hw, 2458 (E1000_RETA(0) + (i & ~3)), reta.dword); 2459 } 2460 } 2461 2462 /* Fill out hash function seeds */ 2463 for (i = 0; i < 10; i++) { 2464 (void) random_get_pseudo_bytes((uint8_t *)&random, 2465 sizeof (uint32_t)); 2466 E1000_WRITE_REG(hw, E1000_RSSRK(i), random); 2467 } 2468 2469 /* 2470 * Setup the Multiple Receive Queue Control register, 2471 * enable VMDq based on packet destination MAC address and RSS. 2472 */ 2473 mrqc = E1000_MRQC_ENABLE_VMDQ_MAC_RSS_GROUP; 2474 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | 2475 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2476 E1000_MRQC_RSS_FIELD_IPV6 | 2477 E1000_MRQC_RSS_FIELD_IPV6_TCP | 2478 E1000_MRQC_RSS_FIELD_IPV4_UDP | 2479 E1000_MRQC_RSS_FIELD_IPV6_UDP | 2480 E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | 2481 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); 2482 2483 E1000_WRITE_REG(hw, E1000_MRQC, mrqc); 2484 2485 2486 /* Define the default group and default queues */ 2487 vmdctl = E1000_VMDQ_MAC_GROUP_DEFAULT_QUEUE; 2488 E1000_WRITE_REG(hw, E1000_VT_CTL, vmdctl); 2489 2490 /* 2491 * Disable Packet Checksum to enable RSS for multiple receive queues. 2492 * 2493 * The Packet Checksum is not ethernet CRC. It is another kind of 2494 * checksum offloading provided by the 82575 chipset besides the IP 2495 * header checksum offloading and the TCP/UDP checksum offloading. 2496 * The Packet Checksum is by default computed over the entire packet 2497 * from the first byte of the DA through the last byte of the CRC, 2498 * including the Ethernet and IP headers. 2499 * 2500 * It is a hardware limitation that Packet Checksum is mutually 2501 * exclusive with RSS. 2502 */ 2503 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); 2504 rxcsum |= E1000_RXCSUM_PCSD; 2505 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); 2506 } 2507 2508 /* 2509 * igb_setup_mac_classify - Setup MAC classification feature 2510 */ 2511 static void 2512 igb_setup_mac_classify(igb_t *igb) 2513 { 2514 struct e1000_hw *hw = &igb->hw; 2515 uint32_t mrqc, rxcsum; 2516 2517 /* 2518 * Setup the Multiple Receive Queue Control register, 2519 * enable VMDq based on packet destination MAC address. 2520 */ 2521 mrqc = E1000_MRQC_ENABLE_VMDQ_MAC_GROUP; 2522 E1000_WRITE_REG(hw, E1000_MRQC, mrqc); 2523 2524 /* 2525 * Disable Packet Checksum to enable RSS for multiple receive queues. 2526 * 2527 * The Packet Checksum is not ethernet CRC. It is another kind of 2528 * checksum offloading provided by the 82575 chipset besides the IP 2529 * header checksum offloading and the TCP/UDP checksum offloading. 2530 * The Packet Checksum is by default computed over the entire packet 2531 * from the first byte of the DA through the last byte of the CRC, 2532 * including the Ethernet and IP headers. 2533 * 2534 * It is a hardware limitation that Packet Checksum is mutually 2535 * exclusive with RSS. 2536 */ 2537 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); 2538 rxcsum |= E1000_RXCSUM_PCSD; 2539 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); 2540 2541 } 2542 2543 /* 2544 * igb_init_unicst - Initialize the unicast addresses 2545 */ 2546 static void 2547 igb_init_unicst(igb_t *igb) 2548 { 2549 struct e1000_hw *hw = &igb->hw; 2550 int slot; 2551 2552 /* 2553 * Here we should consider two situations: 2554 * 2555 * 1. Chipset is initialized the first time 2556 * Initialize the multiple unicast addresses, and 2557 * save the default MAC address. 2558 * 2559 * 2. Chipset is reset 2560 * Recover the multiple unicast addresses from the 2561 * software data structure to the RAR registers. 2562 */ 2563 2564 /* 2565 * Clear the default MAC address in the RAR0 rgister, 2566 * which is loaded from EEPROM when system boot or chipreset, 2567 * this will cause the conficts with add_mac/rem_mac entry 2568 * points when VMDq is enabled. For this reason, the RAR0 2569 * must be cleared for both cases mentioned above. 2570 */ 2571 e1000_rar_clear(hw, 0); 2572 2573 if (!igb->unicst_init) { 2574 2575 /* Initialize the multiple unicast addresses */ 2576 igb->unicst_total = MAX_NUM_UNICAST_ADDRESSES; 2577 igb->unicst_avail = igb->unicst_total; 2578 2579 for (slot = 0; slot < igb->unicst_total; slot++) 2580 igb->unicst_addr[slot].mac.set = 0; 2581 2582 igb->unicst_init = B_TRUE; 2583 } else { 2584 /* Re-configure the RAR registers */ 2585 for (slot = 0; slot < igb->unicst_total; slot++) { 2586 e1000_rar_set_vmdq(hw, igb->unicst_addr[slot].mac.addr, 2587 slot, igb->vmdq_mode, 2588 igb->unicst_addr[slot].mac.group_index); 2589 } 2590 } 2591 } 2592 2593 /* 2594 * igb_unicst_find - Find the slot for the specified unicast address 2595 */ 2596 int 2597 igb_unicst_find(igb_t *igb, const uint8_t *mac_addr) 2598 { 2599 int slot; 2600 2601 ASSERT(mutex_owned(&igb->gen_lock)); 2602 2603 for (slot = 0; slot < igb->unicst_total; slot++) { 2604 if (bcmp(igb->unicst_addr[slot].mac.addr, 2605 mac_addr, ETHERADDRL) == 0) 2606 return (slot); 2607 } 2608 2609 return (-1); 2610 } 2611 2612 /* 2613 * igb_unicst_set - Set the unicast address to the specified slot 2614 */ 2615 int 2616 igb_unicst_set(igb_t *igb, const uint8_t *mac_addr, 2617 int slot) 2618 { 2619 struct e1000_hw *hw = &igb->hw; 2620 2621 ASSERT(mutex_owned(&igb->gen_lock)); 2622 2623 /* 2624 * Save the unicast address in the software data structure 2625 */ 2626 bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL); 2627 2628 /* 2629 * Set the unicast address to the RAR register 2630 */ 2631 e1000_rar_set(hw, (uint8_t *)mac_addr, slot); 2632 2633 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 2634 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 2635 return (EIO); 2636 } 2637 2638 return (0); 2639 } 2640 2641 /* 2642 * igb_multicst_add - Add a multicst address 2643 */ 2644 int 2645 igb_multicst_add(igb_t *igb, const uint8_t *multiaddr) 2646 { 2647 struct ether_addr *new_table; 2648 size_t new_len; 2649 size_t old_len; 2650 2651 ASSERT(mutex_owned(&igb->gen_lock)); 2652 2653 if ((multiaddr[0] & 01) == 0) { 2654 igb_error(igb, "Illegal multicast address"); 2655 return (EINVAL); 2656 } 2657 2658 if (igb->mcast_count >= igb->mcast_max_num) { 2659 igb_error(igb, "Adapter requested more than %d mcast addresses", 2660 igb->mcast_max_num); 2661 return (ENOENT); 2662 } 2663 2664 if (igb->mcast_count == igb->mcast_alloc_count) { 2665 old_len = igb->mcast_alloc_count * 2666 sizeof (struct ether_addr); 2667 new_len = (igb->mcast_alloc_count + MCAST_ALLOC_COUNT) * 2668 sizeof (struct ether_addr); 2669 2670 new_table = kmem_alloc(new_len, KM_NOSLEEP); 2671 if (new_table == NULL) { 2672 igb_error(igb, 2673 "Not enough memory to alloc mcast table"); 2674 return (ENOMEM); 2675 } 2676 2677 if (igb->mcast_table != NULL) { 2678 bcopy(igb->mcast_table, new_table, old_len); 2679 kmem_free(igb->mcast_table, old_len); 2680 } 2681 igb->mcast_alloc_count += MCAST_ALLOC_COUNT; 2682 igb->mcast_table = new_table; 2683 } 2684 2685 bcopy(multiaddr, 2686 &igb->mcast_table[igb->mcast_count], ETHERADDRL); 2687 igb->mcast_count++; 2688 2689 /* 2690 * Update the multicast table in the hardware 2691 */ 2692 igb_setup_multicst(igb); 2693 2694 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 2695 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 2696 return (EIO); 2697 } 2698 2699 return (0); 2700 } 2701 2702 /* 2703 * igb_multicst_remove - Remove a multicst address 2704 */ 2705 int 2706 igb_multicst_remove(igb_t *igb, const uint8_t *multiaddr) 2707 { 2708 struct ether_addr *new_table; 2709 size_t new_len; 2710 size_t old_len; 2711 int i; 2712 2713 ASSERT(mutex_owned(&igb->gen_lock)); 2714 2715 for (i = 0; i < igb->mcast_count; i++) { 2716 if (bcmp(multiaddr, &igb->mcast_table[i], 2717 ETHERADDRL) == 0) { 2718 for (i++; i < igb->mcast_count; i++) { 2719 igb->mcast_table[i - 1] = 2720 igb->mcast_table[i]; 2721 } 2722 igb->mcast_count--; 2723 break; 2724 } 2725 } 2726 2727 if ((igb->mcast_alloc_count - igb->mcast_count) > 2728 MCAST_ALLOC_COUNT) { 2729 old_len = igb->mcast_alloc_count * 2730 sizeof (struct ether_addr); 2731 new_len = (igb->mcast_alloc_count - MCAST_ALLOC_COUNT) * 2732 sizeof (struct ether_addr); 2733 2734 new_table = kmem_alloc(new_len, KM_NOSLEEP); 2735 if (new_table != NULL) { 2736 bcopy(igb->mcast_table, new_table, new_len); 2737 kmem_free(igb->mcast_table, old_len); 2738 igb->mcast_alloc_count -= MCAST_ALLOC_COUNT; 2739 igb->mcast_table = new_table; 2740 } 2741 } 2742 2743 /* 2744 * Update the multicast table in the hardware 2745 */ 2746 igb_setup_multicst(igb); 2747 2748 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 2749 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 2750 return (EIO); 2751 } 2752 2753 return (0); 2754 } 2755 2756 static void 2757 igb_release_multicast(igb_t *igb) 2758 { 2759 if (igb->mcast_table != NULL) { 2760 kmem_free(igb->mcast_table, 2761 igb->mcast_alloc_count * sizeof (struct ether_addr)); 2762 igb->mcast_table = NULL; 2763 } 2764 } 2765 2766 /* 2767 * igb_setup_multicast - setup multicast data structures 2768 * 2769 * This routine initializes all of the multicast related structures 2770 * and save them in the hardware registers. 2771 */ 2772 static void 2773 igb_setup_multicst(igb_t *igb) 2774 { 2775 uint8_t *mc_addr_list; 2776 uint32_t mc_addr_count; 2777 struct e1000_hw *hw = &igb->hw; 2778 2779 ASSERT(mutex_owned(&igb->gen_lock)); 2780 ASSERT(igb->mcast_count <= igb->mcast_max_num); 2781 2782 mc_addr_list = (uint8_t *)igb->mcast_table; 2783 mc_addr_count = igb->mcast_count; 2784 2785 /* 2786 * Update the multicase addresses to the MTA registers 2787 */ 2788 e1000_update_mc_addr_list(hw, mc_addr_list, mc_addr_count); 2789 } 2790 2791 /* 2792 * igb_get_conf - Get driver configurations set in driver.conf 2793 * 2794 * This routine gets user-configured values out of the configuration 2795 * file igb.conf. 2796 * 2797 * For each configurable value, there is a minimum, a maximum, and a 2798 * default. 2799 * If user does not configure a value, use the default. 2800 * If user configures below the minimum, use the minumum. 2801 * If user configures above the maximum, use the maxumum. 2802 */ 2803 static void 2804 igb_get_conf(igb_t *igb) 2805 { 2806 struct e1000_hw *hw = &igb->hw; 2807 uint32_t default_mtu; 2808 uint32_t flow_control; 2809 uint32_t ring_per_group; 2810 int i; 2811 2812 /* 2813 * igb driver supports the following user configurations: 2814 * 2815 * Link configurations: 2816 * adv_autoneg_cap 2817 * adv_1000fdx_cap 2818 * adv_100fdx_cap 2819 * adv_100hdx_cap 2820 * adv_10fdx_cap 2821 * adv_10hdx_cap 2822 * Note: 1000hdx is not supported. 2823 * 2824 * Jumbo frame configuration: 2825 * default_mtu 2826 * 2827 * Ethernet flow control configuration: 2828 * flow_control 2829 * 2830 * Multiple rings configurations: 2831 * tx_queue_number 2832 * tx_ring_size 2833 * rx_queue_number 2834 * rx_ring_size 2835 * 2836 * Call igb_get_prop() to get the value for a specific 2837 * configuration parameter. 2838 */ 2839 2840 /* 2841 * Link configurations 2842 */ 2843 igb->param_adv_autoneg_cap = igb_get_prop(igb, 2844 PROP_ADV_AUTONEG_CAP, 0, 1, 1); 2845 igb->param_adv_1000fdx_cap = igb_get_prop(igb, 2846 PROP_ADV_1000FDX_CAP, 0, 1, 1); 2847 igb->param_adv_100fdx_cap = igb_get_prop(igb, 2848 PROP_ADV_100FDX_CAP, 0, 1, 1); 2849 igb->param_adv_100hdx_cap = igb_get_prop(igb, 2850 PROP_ADV_100HDX_CAP, 0, 1, 1); 2851 igb->param_adv_10fdx_cap = igb_get_prop(igb, 2852 PROP_ADV_10FDX_CAP, 0, 1, 1); 2853 igb->param_adv_10hdx_cap = igb_get_prop(igb, 2854 PROP_ADV_10HDX_CAP, 0, 1, 1); 2855 2856 /* 2857 * Jumbo frame configurations 2858 */ 2859 default_mtu = igb_get_prop(igb, PROP_DEFAULT_MTU, 2860 MIN_MTU, MAX_MTU, DEFAULT_MTU); 2861 2862 igb->max_frame_size = default_mtu + 2863 sizeof (struct ether_vlan_header) + ETHERFCSL; 2864 2865 /* 2866 * Ethernet flow control configuration 2867 */ 2868 flow_control = igb_get_prop(igb, PROP_FLOW_CONTROL, 2869 e1000_fc_none, 4, e1000_fc_full); 2870 if (flow_control == 4) 2871 flow_control = e1000_fc_default; 2872 2873 hw->fc.requested_mode = flow_control; 2874 2875 /* 2876 * Multiple rings configurations 2877 */ 2878 igb->tx_ring_size = igb_get_prop(igb, PROP_TX_RING_SIZE, 2879 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 2880 igb->rx_ring_size = igb_get_prop(igb, PROP_RX_RING_SIZE, 2881 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 2882 2883 igb->mr_enable = igb_get_prop(igb, PROP_MR_ENABLE, 0, 1, 0); 2884 igb->num_rx_groups = igb_get_prop(igb, PROP_RX_GROUP_NUM, 2885 MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM); 2886 /* 2887 * Currently we do not support VMDq for 82576 and 82580. 2888 * If it is e1000_82576, set num_rx_groups to 1. 2889 */ 2890 if (hw->mac.type >= e1000_82576) 2891 igb->num_rx_groups = 1; 2892 2893 if (igb->mr_enable) { 2894 igb->num_tx_rings = igb->capab->def_tx_que_num; 2895 igb->num_rx_rings = igb->capab->def_rx_que_num; 2896 } else { 2897 igb->num_tx_rings = 1; 2898 igb->num_rx_rings = 1; 2899 2900 if (igb->num_rx_groups > 1) { 2901 igb_error(igb, 2902 "Invalid rx groups number. Please enable multiple " 2903 "rings first"); 2904 igb->num_rx_groups = 1; 2905 } 2906 } 2907 2908 /* 2909 * Check the divisibility between rx rings and rx groups. 2910 */ 2911 for (i = igb->num_rx_groups; i > 0; i--) { 2912 if ((igb->num_rx_rings % i) == 0) 2913 break; 2914 } 2915 if (i != igb->num_rx_groups) { 2916 igb_error(igb, 2917 "Invalid rx groups number. Downgrade the rx group " 2918 "number to %d.", i); 2919 igb->num_rx_groups = i; 2920 } 2921 2922 /* 2923 * Get the ring number per group. 2924 */ 2925 ring_per_group = igb->num_rx_rings / igb->num_rx_groups; 2926 2927 if (igb->num_rx_groups == 1) { 2928 /* 2929 * One rx ring group, the rx ring number is num_rx_rings. 2930 */ 2931 igb->vmdq_mode = E1000_VMDQ_OFF; 2932 } else if (ring_per_group == 1) { 2933 /* 2934 * Multiple rx groups, each group has one rx ring. 2935 */ 2936 igb->vmdq_mode = E1000_VMDQ_MAC; 2937 } else { 2938 /* 2939 * Multiple groups and multiple rings. 2940 */ 2941 igb->vmdq_mode = E1000_VMDQ_MAC_RSS; 2942 } 2943 2944 /* 2945 * Tunable used to force an interrupt type. The only use is 2946 * for testing of the lesser interrupt types. 2947 * 0 = don't force interrupt type 2948 * 1 = force interrupt type MSIX 2949 * 2 = force interrupt type MSI 2950 * 3 = force interrupt type Legacy 2951 */ 2952 igb->intr_force = igb_get_prop(igb, PROP_INTR_FORCE, 2953 IGB_INTR_NONE, IGB_INTR_LEGACY, IGB_INTR_NONE); 2954 2955 igb->tx_hcksum_enable = igb_get_prop(igb, PROP_TX_HCKSUM_ENABLE, 2956 0, 1, 1); 2957 igb->rx_hcksum_enable = igb_get_prop(igb, PROP_RX_HCKSUM_ENABLE, 2958 0, 1, 1); 2959 igb->lso_enable = igb_get_prop(igb, PROP_LSO_ENABLE, 2960 0, 1, 1); 2961 igb->tx_head_wb_enable = igb_get_prop(igb, PROP_TX_HEAD_WB_ENABLE, 2962 0, 1, 1); 2963 2964 /* 2965 * igb LSO needs the tx h/w checksum support. 2966 * Here LSO will be disabled if tx h/w checksum has been disabled. 2967 */ 2968 if (igb->tx_hcksum_enable == B_FALSE) 2969 igb->lso_enable = B_FALSE; 2970 2971 igb->tx_copy_thresh = igb_get_prop(igb, PROP_TX_COPY_THRESHOLD, 2972 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 2973 DEFAULT_TX_COPY_THRESHOLD); 2974 igb->tx_recycle_thresh = igb_get_prop(igb, PROP_TX_RECYCLE_THRESHOLD, 2975 MIN_TX_RECYCLE_THRESHOLD, MAX_TX_RECYCLE_THRESHOLD, 2976 DEFAULT_TX_RECYCLE_THRESHOLD); 2977 igb->tx_overload_thresh = igb_get_prop(igb, PROP_TX_OVERLOAD_THRESHOLD, 2978 MIN_TX_OVERLOAD_THRESHOLD, MAX_TX_OVERLOAD_THRESHOLD, 2979 DEFAULT_TX_OVERLOAD_THRESHOLD); 2980 igb->tx_resched_thresh = igb_get_prop(igb, PROP_TX_RESCHED_THRESHOLD, 2981 MIN_TX_RESCHED_THRESHOLD, 2982 MIN(igb->tx_ring_size, MAX_TX_RESCHED_THRESHOLD), 2983 igb->tx_ring_size > DEFAULT_TX_RESCHED_THRESHOLD ? 2984 DEFAULT_TX_RESCHED_THRESHOLD : DEFAULT_TX_RESCHED_THRESHOLD_LOW); 2985 2986 igb->rx_copy_thresh = igb_get_prop(igb, PROP_RX_COPY_THRESHOLD, 2987 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 2988 DEFAULT_RX_COPY_THRESHOLD); 2989 igb->rx_limit_per_intr = igb_get_prop(igb, PROP_RX_LIMIT_PER_INTR, 2990 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 2991 DEFAULT_RX_LIMIT_PER_INTR); 2992 2993 igb->intr_throttling[0] = igb_get_prop(igb, PROP_INTR_THROTTLING, 2994 igb->capab->min_intr_throttle, 2995 igb->capab->max_intr_throttle, 2996 igb->capab->def_intr_throttle); 2997 2998 /* 2999 * Max number of multicast addresses 3000 */ 3001 igb->mcast_max_num = 3002 igb_get_prop(igb, PROP_MCAST_MAX_NUM, 3003 MIN_MCAST_NUM, MAX_MCAST_NUM, DEFAULT_MCAST_NUM); 3004 } 3005 3006 /* 3007 * igb_get_prop - Get a property value out of the configuration file igb.conf 3008 * 3009 * Caller provides the name of the property, a default value, a minimum 3010 * value, and a maximum value. 3011 * 3012 * Return configured value of the property, with default, minimum and 3013 * maximum properly applied. 3014 */ 3015 static int 3016 igb_get_prop(igb_t *igb, 3017 char *propname, /* name of the property */ 3018 int minval, /* minimum acceptable value */ 3019 int maxval, /* maximim acceptable value */ 3020 int defval) /* default value */ 3021 { 3022 int value; 3023 3024 /* 3025 * Call ddi_prop_get_int() to read the conf settings 3026 */ 3027 value = ddi_prop_get_int(DDI_DEV_T_ANY, igb->dip, 3028 DDI_PROP_DONTPASS, propname, defval); 3029 3030 if (value > maxval) 3031 value = maxval; 3032 3033 if (value < minval) 3034 value = minval; 3035 3036 return (value); 3037 } 3038 3039 /* 3040 * igb_setup_link - Using the link properties to setup the link 3041 */ 3042 int 3043 igb_setup_link(igb_t *igb, boolean_t setup_hw) 3044 { 3045 struct e1000_mac_info *mac; 3046 struct e1000_phy_info *phy; 3047 boolean_t invalid; 3048 3049 mac = &igb->hw.mac; 3050 phy = &igb->hw.phy; 3051 invalid = B_FALSE; 3052 3053 if (igb->param_adv_autoneg_cap == 1) { 3054 mac->autoneg = B_TRUE; 3055 phy->autoneg_advertised = 0; 3056 3057 /* 3058 * 1000hdx is not supported for autonegotiation 3059 */ 3060 if (igb->param_adv_1000fdx_cap == 1) 3061 phy->autoneg_advertised |= ADVERTISE_1000_FULL; 3062 3063 if (igb->param_adv_100fdx_cap == 1) 3064 phy->autoneg_advertised |= ADVERTISE_100_FULL; 3065 3066 if (igb->param_adv_100hdx_cap == 1) 3067 phy->autoneg_advertised |= ADVERTISE_100_HALF; 3068 3069 if (igb->param_adv_10fdx_cap == 1) 3070 phy->autoneg_advertised |= ADVERTISE_10_FULL; 3071 3072 if (igb->param_adv_10hdx_cap == 1) 3073 phy->autoneg_advertised |= ADVERTISE_10_HALF; 3074 3075 if (phy->autoneg_advertised == 0) 3076 invalid = B_TRUE; 3077 } else { 3078 mac->autoneg = B_FALSE; 3079 3080 /* 3081 * 1000fdx and 1000hdx are not supported for forced link 3082 */ 3083 if (igb->param_adv_100fdx_cap == 1) 3084 mac->forced_speed_duplex = ADVERTISE_100_FULL; 3085 else if (igb->param_adv_100hdx_cap == 1) 3086 mac->forced_speed_duplex = ADVERTISE_100_HALF; 3087 else if (igb->param_adv_10fdx_cap == 1) 3088 mac->forced_speed_duplex = ADVERTISE_10_FULL; 3089 else if (igb->param_adv_10hdx_cap == 1) 3090 mac->forced_speed_duplex = ADVERTISE_10_HALF; 3091 else 3092 invalid = B_TRUE; 3093 } 3094 3095 if (invalid) { 3096 igb_notice(igb, "Invalid link settings. Setup link to " 3097 "autonegotiation with full link capabilities."); 3098 mac->autoneg = B_TRUE; 3099 phy->autoneg_advertised = ADVERTISE_1000_FULL | 3100 ADVERTISE_100_FULL | ADVERTISE_100_HALF | 3101 ADVERTISE_10_FULL | ADVERTISE_10_HALF; 3102 } 3103 3104 if (setup_hw) { 3105 if (e1000_setup_link(&igb->hw) != E1000_SUCCESS) 3106 return (IGB_FAILURE); 3107 } 3108 3109 return (IGB_SUCCESS); 3110 } 3111 3112 3113 /* 3114 * igb_is_link_up - Check if the link is up 3115 */ 3116 static boolean_t 3117 igb_is_link_up(igb_t *igb) 3118 { 3119 struct e1000_hw *hw = &igb->hw; 3120 boolean_t link_up = B_FALSE; 3121 3122 ASSERT(mutex_owned(&igb->gen_lock)); 3123 3124 /* 3125 * get_link_status is set in the interrupt handler on link-status-change 3126 * or rx sequence error interrupt. get_link_status will stay 3127 * false until the e1000_check_for_link establishes link only 3128 * for copper adapters. 3129 */ 3130 switch (hw->phy.media_type) { 3131 case e1000_media_type_copper: 3132 if (hw->mac.get_link_status) { 3133 (void) e1000_check_for_link(hw); 3134 link_up = !hw->mac.get_link_status; 3135 } else { 3136 link_up = B_TRUE; 3137 } 3138 break; 3139 case e1000_media_type_fiber: 3140 (void) e1000_check_for_link(hw); 3141 link_up = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); 3142 break; 3143 case e1000_media_type_internal_serdes: 3144 (void) e1000_check_for_link(hw); 3145 link_up = hw->mac.serdes_has_link; 3146 break; 3147 } 3148 3149 return (link_up); 3150 } 3151 3152 /* 3153 * igb_link_check - Link status processing 3154 */ 3155 static boolean_t 3156 igb_link_check(igb_t *igb) 3157 { 3158 struct e1000_hw *hw = &igb->hw; 3159 uint16_t speed = 0, duplex = 0; 3160 boolean_t link_changed = B_FALSE; 3161 3162 ASSERT(mutex_owned(&igb->gen_lock)); 3163 3164 if (igb_is_link_up(igb)) { 3165 /* 3166 * The Link is up, check whether it was marked as down earlier 3167 */ 3168 if (igb->link_state != LINK_STATE_UP) { 3169 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex); 3170 igb->link_speed = speed; 3171 igb->link_duplex = duplex; 3172 igb->link_state = LINK_STATE_UP; 3173 link_changed = B_TRUE; 3174 if (!igb->link_complete) 3175 igb_stop_link_timer(igb); 3176 } 3177 } else if (igb->link_complete) { 3178 if (igb->link_state != LINK_STATE_DOWN) { 3179 igb->link_speed = 0; 3180 igb->link_duplex = 0; 3181 igb->link_state = LINK_STATE_DOWN; 3182 link_changed = B_TRUE; 3183 } 3184 } 3185 3186 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 3187 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 3188 return (B_FALSE); 3189 } 3190 3191 return (link_changed); 3192 } 3193 3194 /* 3195 * igb_local_timer - driver watchdog function 3196 * 3197 * This function will handle the hardware stall check, link status 3198 * check and other routines. 3199 */ 3200 static void 3201 igb_local_timer(void *arg) 3202 { 3203 igb_t *igb = (igb_t *)arg; 3204 boolean_t link_changed = B_FALSE; 3205 3206 if (igb->igb_state & IGB_ERROR) { 3207 igb->reset_count++; 3208 if (igb_reset(igb) == IGB_SUCCESS) 3209 ddi_fm_service_impact(igb->dip, DDI_SERVICE_RESTORED); 3210 3211 igb_restart_watchdog_timer(igb); 3212 return; 3213 } 3214 3215 if (igb_stall_check(igb) || (igb->igb_state & IGB_STALL)) { 3216 igb_fm_ereport(igb, DDI_FM_DEVICE_STALL); 3217 ddi_fm_service_impact(igb->dip, DDI_SERVICE_LOST); 3218 igb->reset_count++; 3219 if (igb_reset(igb) == IGB_SUCCESS) 3220 ddi_fm_service_impact(igb->dip, DDI_SERVICE_RESTORED); 3221 3222 igb_restart_watchdog_timer(igb); 3223 return; 3224 } 3225 3226 mutex_enter(&igb->gen_lock); 3227 if (!(igb->igb_state & IGB_SUSPENDED) && (igb->igb_state & IGB_STARTED)) 3228 link_changed = igb_link_check(igb); 3229 mutex_exit(&igb->gen_lock); 3230 3231 if (link_changed) 3232 mac_link_update(igb->mac_hdl, igb->link_state); 3233 3234 igb_restart_watchdog_timer(igb); 3235 } 3236 3237 /* 3238 * igb_link_timer - link setup timer function 3239 * 3240 * It is called when the timer for link setup is expired, which indicates 3241 * the completion of the link setup. The link state will not be updated 3242 * until the link setup is completed. And the link state will not be sent 3243 * to the upper layer through mac_link_update() in this function. It will 3244 * be updated in the local timer routine or the interrupts service routine 3245 * after the interface is started (plumbed). 3246 */ 3247 static void 3248 igb_link_timer(void *arg) 3249 { 3250 igb_t *igb = (igb_t *)arg; 3251 3252 mutex_enter(&igb->link_lock); 3253 igb->link_complete = B_TRUE; 3254 igb->link_tid = 0; 3255 mutex_exit(&igb->link_lock); 3256 } 3257 /* 3258 * igb_stall_check - check for transmit stall 3259 * 3260 * This function checks if the adapter is stalled (in transmit). 3261 * 3262 * It is called each time the watchdog timeout is invoked. 3263 * If the transmit descriptor reclaim continuously fails, 3264 * the watchdog value will increment by 1. If the watchdog 3265 * value exceeds the threshold, the igb is assumed to 3266 * have stalled and need to be reset. 3267 */ 3268 static boolean_t 3269 igb_stall_check(igb_t *igb) 3270 { 3271 igb_tx_ring_t *tx_ring; 3272 struct e1000_hw *hw = &igb->hw; 3273 boolean_t result; 3274 int i; 3275 3276 if (igb->link_state != LINK_STATE_UP) 3277 return (B_FALSE); 3278 3279 /* 3280 * If any tx ring is stalled, we'll reset the chipset 3281 */ 3282 result = B_FALSE; 3283 for (i = 0; i < igb->num_tx_rings; i++) { 3284 tx_ring = &igb->tx_rings[i]; 3285 3286 if (tx_ring->recycle_fail > 0) 3287 tx_ring->stall_watchdog++; 3288 else 3289 tx_ring->stall_watchdog = 0; 3290 3291 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 3292 result = B_TRUE; 3293 if (hw->mac.type == e1000_82580) { 3294 hw->dev_spec._82575.global_device_reset 3295 = B_TRUE; 3296 } 3297 break; 3298 } 3299 } 3300 3301 if (result) { 3302 tx_ring->stall_watchdog = 0; 3303 tx_ring->recycle_fail = 0; 3304 } 3305 3306 return (result); 3307 } 3308 3309 3310 /* 3311 * is_valid_mac_addr - Check if the mac address is valid 3312 */ 3313 static boolean_t 3314 is_valid_mac_addr(uint8_t *mac_addr) 3315 { 3316 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 3317 const uint8_t addr_test2[6] = 3318 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3319 3320 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 3321 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 3322 return (B_FALSE); 3323 3324 return (B_TRUE); 3325 } 3326 3327 static boolean_t 3328 igb_find_mac_address(igb_t *igb) 3329 { 3330 struct e1000_hw *hw = &igb->hw; 3331 #ifdef __sparc 3332 uchar_t *bytes; 3333 struct ether_addr sysaddr; 3334 uint_t nelts; 3335 int err; 3336 boolean_t found = B_FALSE; 3337 3338 /* 3339 * The "vendor's factory-set address" may already have 3340 * been extracted from the chip, but if the property 3341 * "local-mac-address" is set we use that instead. 3342 * 3343 * We check whether it looks like an array of 6 3344 * bytes (which it should, if OBP set it). If we can't 3345 * make sense of it this way, we'll ignore it. 3346 */ 3347 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip, 3348 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 3349 if (err == DDI_PROP_SUCCESS) { 3350 if (nelts == ETHERADDRL) { 3351 while (nelts--) 3352 hw->mac.addr[nelts] = bytes[nelts]; 3353 found = B_TRUE; 3354 } 3355 ddi_prop_free(bytes); 3356 } 3357 3358 /* 3359 * Look up the OBP property "local-mac-address?". If the user has set 3360 * 'local-mac-address? = false', use "the system address" instead. 3361 */ 3362 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip, 0, 3363 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 3364 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 3365 if (localetheraddr(NULL, &sysaddr) != 0) { 3366 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 3367 found = B_TRUE; 3368 } 3369 } 3370 ddi_prop_free(bytes); 3371 } 3372 3373 /* 3374 * Finally(!), if there's a valid "mac-address" property (created 3375 * if we netbooted from this interface), we must use this instead 3376 * of any of the above to ensure that the NFS/install server doesn't 3377 * get confused by the address changing as Solaris takes over! 3378 */ 3379 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, igb->dip, 3380 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 3381 if (err == DDI_PROP_SUCCESS) { 3382 if (nelts == ETHERADDRL) { 3383 while (nelts--) 3384 hw->mac.addr[nelts] = bytes[nelts]; 3385 found = B_TRUE; 3386 } 3387 ddi_prop_free(bytes); 3388 } 3389 3390 if (found) { 3391 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 3392 return (B_TRUE); 3393 } 3394 #endif 3395 3396 /* 3397 * Read the device MAC address from the EEPROM 3398 */ 3399 if (e1000_read_mac_addr(hw) != E1000_SUCCESS) 3400 return (B_FALSE); 3401 3402 return (B_TRUE); 3403 } 3404 3405 #pragma inline(igb_arm_watchdog_timer) 3406 3407 static void 3408 igb_arm_watchdog_timer(igb_t *igb) 3409 { 3410 /* 3411 * Fire a watchdog timer 3412 */ 3413 igb->watchdog_tid = 3414 timeout(igb_local_timer, 3415 (void *)igb, 1 * drv_usectohz(1000000)); 3416 3417 } 3418 3419 /* 3420 * igb_enable_watchdog_timer - Enable and start the driver watchdog timer 3421 */ 3422 void 3423 igb_enable_watchdog_timer(igb_t *igb) 3424 { 3425 mutex_enter(&igb->watchdog_lock); 3426 3427 if (!igb->watchdog_enable) { 3428 igb->watchdog_enable = B_TRUE; 3429 igb->watchdog_start = B_TRUE; 3430 igb_arm_watchdog_timer(igb); 3431 } 3432 3433 mutex_exit(&igb->watchdog_lock); 3434 3435 } 3436 3437 /* 3438 * igb_disable_watchdog_timer - Disable and stop the driver watchdog timer 3439 */ 3440 void 3441 igb_disable_watchdog_timer(igb_t *igb) 3442 { 3443 timeout_id_t tid; 3444 3445 mutex_enter(&igb->watchdog_lock); 3446 3447 igb->watchdog_enable = B_FALSE; 3448 igb->watchdog_start = B_FALSE; 3449 tid = igb->watchdog_tid; 3450 igb->watchdog_tid = 0; 3451 3452 mutex_exit(&igb->watchdog_lock); 3453 3454 if (tid != 0) 3455 (void) untimeout(tid); 3456 3457 } 3458 3459 /* 3460 * igb_start_watchdog_timer - Start the driver watchdog timer 3461 */ 3462 static void 3463 igb_start_watchdog_timer(igb_t *igb) 3464 { 3465 mutex_enter(&igb->watchdog_lock); 3466 3467 if (igb->watchdog_enable) { 3468 if (!igb->watchdog_start) { 3469 igb->watchdog_start = B_TRUE; 3470 igb_arm_watchdog_timer(igb); 3471 } 3472 } 3473 3474 mutex_exit(&igb->watchdog_lock); 3475 } 3476 3477 /* 3478 * igb_restart_watchdog_timer - Restart the driver watchdog timer 3479 */ 3480 static void 3481 igb_restart_watchdog_timer(igb_t *igb) 3482 { 3483 mutex_enter(&igb->watchdog_lock); 3484 3485 if (igb->watchdog_start) 3486 igb_arm_watchdog_timer(igb); 3487 3488 mutex_exit(&igb->watchdog_lock); 3489 } 3490 3491 /* 3492 * igb_stop_watchdog_timer - Stop the driver watchdog timer 3493 */ 3494 static void 3495 igb_stop_watchdog_timer(igb_t *igb) 3496 { 3497 timeout_id_t tid; 3498 3499 mutex_enter(&igb->watchdog_lock); 3500 3501 igb->watchdog_start = B_FALSE; 3502 tid = igb->watchdog_tid; 3503 igb->watchdog_tid = 0; 3504 3505 mutex_exit(&igb->watchdog_lock); 3506 3507 if (tid != 0) 3508 (void) untimeout(tid); 3509 } 3510 3511 /* 3512 * igb_start_link_timer - Start the link setup timer 3513 */ 3514 static void 3515 igb_start_link_timer(struct igb *igb) 3516 { 3517 struct e1000_hw *hw = &igb->hw; 3518 clock_t link_timeout; 3519 3520 if (hw->mac.autoneg) 3521 link_timeout = PHY_AUTO_NEG_LIMIT * 3522 drv_usectohz(100000); 3523 else 3524 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); 3525 3526 mutex_enter(&igb->link_lock); 3527 if (hw->phy.autoneg_wait_to_complete) { 3528 igb->link_complete = B_TRUE; 3529 } else { 3530 igb->link_complete = B_FALSE; 3531 igb->link_tid = timeout(igb_link_timer, (void *)igb, 3532 link_timeout); 3533 } 3534 mutex_exit(&igb->link_lock); 3535 } 3536 3537 /* 3538 * igb_stop_link_timer - Stop the link setup timer 3539 */ 3540 static void 3541 igb_stop_link_timer(struct igb *igb) 3542 { 3543 timeout_id_t tid; 3544 3545 mutex_enter(&igb->link_lock); 3546 igb->link_complete = B_TRUE; 3547 tid = igb->link_tid; 3548 igb->link_tid = 0; 3549 mutex_exit(&igb->link_lock); 3550 3551 if (tid != 0) 3552 (void) untimeout(tid); 3553 } 3554 3555 /* 3556 * igb_disable_adapter_interrupts - Clear/disable all hardware interrupts 3557 */ 3558 static void 3559 igb_disable_adapter_interrupts(igb_t *igb) 3560 { 3561 struct e1000_hw *hw = &igb->hw; 3562 3563 /* 3564 * Set the IMC register to mask all the interrupts, 3565 * including the tx interrupts. 3566 */ 3567 E1000_WRITE_REG(hw, E1000_IMC, ~0); 3568 E1000_WRITE_REG(hw, E1000_IAM, 0); 3569 3570 /* 3571 * Additional disabling for MSI-X 3572 */ 3573 if (igb->intr_type == DDI_INTR_TYPE_MSIX) { 3574 E1000_WRITE_REG(hw, E1000_EIMC, ~0); 3575 E1000_WRITE_REG(hw, E1000_EIAC, 0); 3576 E1000_WRITE_REG(hw, E1000_EIAM, 0); 3577 } 3578 3579 E1000_WRITE_FLUSH(hw); 3580 } 3581 3582 /* 3583 * igb_enable_adapter_interrupts_82580 - Enable NIC interrupts for 82580 3584 */ 3585 static void 3586 igb_enable_adapter_interrupts_82580(igb_t *igb) 3587 { 3588 struct e1000_hw *hw = &igb->hw; 3589 3590 /* Clear any pending interrupts */ 3591 (void) E1000_READ_REG(hw, E1000_ICR); 3592 igb->ims_mask |= E1000_IMS_DRSTA; 3593 3594 if (igb->intr_type == DDI_INTR_TYPE_MSIX) { 3595 3596 /* Interrupt enabling for MSI-X */ 3597 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask); 3598 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask); 3599 igb->ims_mask = (E1000_IMS_LSC | E1000_IMS_DRSTA); 3600 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask); 3601 } else { /* Interrupt enabling for MSI and legacy */ 3602 E1000_WRITE_REG(hw, E1000_IVAR0, E1000_IVAR_VALID); 3603 igb->ims_mask = IMS_ENABLE_MASK | E1000_IMS_TXQE; 3604 igb->ims_mask |= E1000_IMS_DRSTA; 3605 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask); 3606 } 3607 3608 /* Disable auto-mask for ICR interrupt bits */ 3609 E1000_WRITE_REG(hw, E1000_IAM, 0); 3610 3611 E1000_WRITE_FLUSH(hw); 3612 } 3613 3614 /* 3615 * igb_enable_adapter_interrupts_82576 - Enable NIC interrupts for 82576 3616 */ 3617 static void 3618 igb_enable_adapter_interrupts_82576(igb_t *igb) 3619 { 3620 struct e1000_hw *hw = &igb->hw; 3621 3622 /* Clear any pending interrupts */ 3623 (void) E1000_READ_REG(hw, E1000_ICR); 3624 3625 if (igb->intr_type == DDI_INTR_TYPE_MSIX) { 3626 3627 /* Interrupt enabling for MSI-X */ 3628 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask); 3629 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask); 3630 igb->ims_mask = E1000_IMS_LSC; 3631 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC); 3632 } else { 3633 /* Interrupt enabling for MSI and legacy */ 3634 E1000_WRITE_REG(hw, E1000_IVAR0, E1000_IVAR_VALID); 3635 igb->ims_mask = IMS_ENABLE_MASK | E1000_IMS_TXQE; 3636 E1000_WRITE_REG(hw, E1000_IMS, 3637 (IMS_ENABLE_MASK | E1000_IMS_TXQE)); 3638 } 3639 3640 /* Disable auto-mask for ICR interrupt bits */ 3641 E1000_WRITE_REG(hw, E1000_IAM, 0); 3642 3643 E1000_WRITE_FLUSH(hw); 3644 } 3645 3646 /* 3647 * igb_enable_adapter_interrupts_82575 - Enable NIC interrupts for 82575 3648 */ 3649 static void 3650 igb_enable_adapter_interrupts_82575(igb_t *igb) 3651 { 3652 struct e1000_hw *hw = &igb->hw; 3653 uint32_t reg; 3654 3655 /* Clear any pending interrupts */ 3656 (void) E1000_READ_REG(hw, E1000_ICR); 3657 3658 if (igb->intr_type == DDI_INTR_TYPE_MSIX) { 3659 /* Interrupt enabling for MSI-X */ 3660 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask); 3661 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask); 3662 igb->ims_mask = E1000_IMS_LSC; 3663 E1000_WRITE_REG(hw, E1000_IMS, E1000_IMS_LSC); 3664 3665 /* Enable MSI-X PBA support */ 3666 reg = E1000_READ_REG(hw, E1000_CTRL_EXT); 3667 reg |= E1000_CTRL_EXT_PBA_CLR; 3668 3669 /* Non-selective interrupt clear-on-read */ 3670 reg |= E1000_CTRL_EXT_IRCA; /* Called NSICR in the EAS */ 3671 3672 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); 3673 } else { 3674 /* Interrupt enabling for MSI and legacy */ 3675 igb->ims_mask = IMS_ENABLE_MASK; 3676 E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK); 3677 } 3678 3679 E1000_WRITE_FLUSH(hw); 3680 } 3681 3682 /* 3683 * Loopback Support 3684 */ 3685 static lb_property_t lb_normal = 3686 { normal, "normal", IGB_LB_NONE }; 3687 static lb_property_t lb_external = 3688 { external, "External", IGB_LB_EXTERNAL }; 3689 static lb_property_t lb_phy = 3690 { internal, "PHY", IGB_LB_INTERNAL_PHY }; 3691 static lb_property_t lb_serdes = 3692 { internal, "SerDes", IGB_LB_INTERNAL_SERDES }; 3693 3694 enum ioc_reply 3695 igb_loopback_ioctl(igb_t *igb, struct iocblk *iocp, mblk_t *mp) 3696 { 3697 lb_info_sz_t *lbsp; 3698 lb_property_t *lbpp; 3699 struct e1000_hw *hw; 3700 uint32_t *lbmp; 3701 uint32_t size; 3702 uint32_t value; 3703 3704 hw = &igb->hw; 3705 3706 if (mp->b_cont == NULL) 3707 return (IOC_INVAL); 3708 3709 switch (iocp->ioc_cmd) { 3710 default: 3711 return (IOC_INVAL); 3712 3713 case LB_GET_INFO_SIZE: 3714 size = sizeof (lb_info_sz_t); 3715 if (iocp->ioc_count != size) 3716 return (IOC_INVAL); 3717 3718 value = sizeof (lb_normal); 3719 if (hw->phy.media_type == e1000_media_type_copper) 3720 value += sizeof (lb_phy); 3721 else 3722 value += sizeof (lb_serdes); 3723 value += sizeof (lb_external); 3724 3725 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 3726 *lbsp = value; 3727 break; 3728 3729 case LB_GET_INFO: 3730 value = sizeof (lb_normal); 3731 if (hw->phy.media_type == e1000_media_type_copper) 3732 value += sizeof (lb_phy); 3733 else 3734 value += sizeof (lb_serdes); 3735 value += sizeof (lb_external); 3736 3737 size = value; 3738 if (iocp->ioc_count != size) 3739 return (IOC_INVAL); 3740 3741 value = 0; 3742 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 3743 3744 lbpp[value++] = lb_normal; 3745 if (hw->phy.media_type == e1000_media_type_copper) 3746 lbpp[value++] = lb_phy; 3747 else 3748 lbpp[value++] = lb_serdes; 3749 lbpp[value++] = lb_external; 3750 break; 3751 3752 case LB_GET_MODE: 3753 size = sizeof (uint32_t); 3754 if (iocp->ioc_count != size) 3755 return (IOC_INVAL); 3756 3757 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3758 *lbmp = igb->loopback_mode; 3759 break; 3760 3761 case LB_SET_MODE: 3762 size = 0; 3763 if (iocp->ioc_count != sizeof (uint32_t)) 3764 return (IOC_INVAL); 3765 3766 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3767 if (!igb_set_loopback_mode(igb, *lbmp)) 3768 return (IOC_INVAL); 3769 break; 3770 } 3771 3772 iocp->ioc_count = size; 3773 iocp->ioc_error = 0; 3774 3775 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 3776 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 3777 return (IOC_INVAL); 3778 } 3779 3780 return (IOC_REPLY); 3781 } 3782 3783 /* 3784 * igb_set_loopback_mode - Setup loopback based on the loopback mode 3785 */ 3786 static boolean_t 3787 igb_set_loopback_mode(igb_t *igb, uint32_t mode) 3788 { 3789 struct e1000_hw *hw; 3790 int i; 3791 3792 if (mode == igb->loopback_mode) 3793 return (B_TRUE); 3794 3795 hw = &igb->hw; 3796 3797 igb->loopback_mode = mode; 3798 3799 if (mode == IGB_LB_NONE) { 3800 /* Reset the chip */ 3801 hw->phy.autoneg_wait_to_complete = B_TRUE; 3802 (void) igb_reset(igb); 3803 hw->phy.autoneg_wait_to_complete = B_FALSE; 3804 return (B_TRUE); 3805 } 3806 3807 mutex_enter(&igb->gen_lock); 3808 3809 switch (mode) { 3810 default: 3811 mutex_exit(&igb->gen_lock); 3812 return (B_FALSE); 3813 3814 case IGB_LB_EXTERNAL: 3815 igb_set_external_loopback(igb); 3816 break; 3817 3818 case IGB_LB_INTERNAL_PHY: 3819 igb_set_internal_phy_loopback(igb); 3820 break; 3821 3822 case IGB_LB_INTERNAL_SERDES: 3823 igb_set_internal_serdes_loopback(igb); 3824 break; 3825 } 3826 3827 mutex_exit(&igb->gen_lock); 3828 3829 /* 3830 * When external loopback is set, wait up to 1000ms to get the link up. 3831 * According to test, 1000ms can work and it's an experimental value. 3832 */ 3833 if (mode == IGB_LB_EXTERNAL) { 3834 for (i = 0; i <= 10; i++) { 3835 mutex_enter(&igb->gen_lock); 3836 (void) igb_link_check(igb); 3837 mutex_exit(&igb->gen_lock); 3838 3839 if (igb->link_state == LINK_STATE_UP) 3840 break; 3841 3842 msec_delay(100); 3843 } 3844 3845 if (igb->link_state != LINK_STATE_UP) { 3846 /* 3847 * Does not support external loopback. 3848 * Reset driver to loopback none. 3849 */ 3850 igb->loopback_mode = IGB_LB_NONE; 3851 3852 /* Reset the chip */ 3853 hw->phy.autoneg_wait_to_complete = B_TRUE; 3854 (void) igb_reset(igb); 3855 hw->phy.autoneg_wait_to_complete = B_FALSE; 3856 3857 IGB_DEBUGLOG_0(igb, "Set external loopback failed, " 3858 "reset to loopback none."); 3859 3860 return (B_FALSE); 3861 } 3862 } 3863 3864 return (B_TRUE); 3865 } 3866 3867 /* 3868 * igb_set_external_loopback - Set the external loopback mode 3869 */ 3870 static void 3871 igb_set_external_loopback(igb_t *igb) 3872 { 3873 struct e1000_hw *hw; 3874 uint32_t ctrl_ext; 3875 3876 hw = &igb->hw; 3877 3878 /* Set link mode to PHY (00b) in the Extended Control register */ 3879 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 3880 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 3881 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 3882 3883 (void) e1000_write_phy_reg(hw, 0x0, 0x0140); 3884 (void) e1000_write_phy_reg(hw, 0x9, 0x1a00); 3885 (void) e1000_write_phy_reg(hw, 0x12, 0x1610); 3886 (void) e1000_write_phy_reg(hw, 0x1f37, 0x3f1c); 3887 } 3888 3889 /* 3890 * igb_set_internal_phy_loopback - Set the internal PHY loopback mode 3891 */ 3892 static void 3893 igb_set_internal_phy_loopback(igb_t *igb) 3894 { 3895 struct e1000_hw *hw; 3896 uint32_t ctrl_ext; 3897 uint16_t phy_ctrl; 3898 uint16_t phy_pconf; 3899 3900 hw = &igb->hw; 3901 3902 /* Set link mode to PHY (00b) in the Extended Control register */ 3903 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 3904 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; 3905 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 3906 3907 /* 3908 * Set PHY control register (0x4140): 3909 * Set full duplex mode 3910 * Set loopback bit 3911 * Clear auto-neg enable bit 3912 * Set PHY speed 3913 */ 3914 phy_ctrl = MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000 | MII_CR_LOOPBACK; 3915 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 3916 3917 /* Set the link disable bit in the Port Configuration register */ 3918 (void) e1000_read_phy_reg(hw, 0x10, &phy_pconf); 3919 phy_pconf |= (uint16_t)1 << 14; 3920 (void) e1000_write_phy_reg(hw, 0x10, phy_pconf); 3921 } 3922 3923 /* 3924 * igb_set_internal_serdes_loopback - Set the internal SerDes loopback mode 3925 */ 3926 static void 3927 igb_set_internal_serdes_loopback(igb_t *igb) 3928 { 3929 struct e1000_hw *hw; 3930 uint32_t ctrl_ext; 3931 uint32_t ctrl; 3932 uint32_t pcs_lctl; 3933 uint32_t connsw; 3934 3935 hw = &igb->hw; 3936 3937 /* Set link mode to SerDes (11b) in the Extended Control register */ 3938 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 3939 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; 3940 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 3941 3942 /* Configure the SerDes to loopback */ 3943 E1000_WRITE_REG(hw, E1000_SCTL, 0x410); 3944 3945 /* Set Device Control register */ 3946 ctrl = E1000_READ_REG(hw, E1000_CTRL); 3947 ctrl |= (E1000_CTRL_FD | /* Force full duplex */ 3948 E1000_CTRL_SLU); /* Force link up */ 3949 ctrl &= ~(E1000_CTRL_RFCE | /* Disable receive flow control */ 3950 E1000_CTRL_TFCE | /* Disable transmit flow control */ 3951 E1000_CTRL_LRST); /* Clear link reset */ 3952 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 3953 3954 /* Set PCS Link Control register */ 3955 pcs_lctl = E1000_READ_REG(hw, E1000_PCS_LCTL); 3956 pcs_lctl |= (E1000_PCS_LCTL_FORCE_LINK | 3957 E1000_PCS_LCTL_FSD | 3958 E1000_PCS_LCTL_FDV_FULL | 3959 E1000_PCS_LCTL_FLV_LINK_UP); 3960 pcs_lctl &= ~E1000_PCS_LCTL_AN_ENABLE; 3961 E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_lctl); 3962 3963 /* Set the Copper/Fiber Switch Control - CONNSW register */ 3964 connsw = E1000_READ_REG(hw, E1000_CONNSW); 3965 connsw &= ~E1000_CONNSW_ENRGSRC; 3966 E1000_WRITE_REG(hw, E1000_CONNSW, connsw); 3967 } 3968 3969 #pragma inline(igb_intr_rx_work) 3970 /* 3971 * igb_intr_rx_work - rx processing of ISR 3972 */ 3973 static void 3974 igb_intr_rx_work(igb_rx_ring_t *rx_ring) 3975 { 3976 mblk_t *mp; 3977 3978 mutex_enter(&rx_ring->rx_lock); 3979 mp = igb_rx(rx_ring, IGB_NO_POLL); 3980 mutex_exit(&rx_ring->rx_lock); 3981 3982 if (mp != NULL) 3983 mac_rx_ring(rx_ring->igb->mac_hdl, rx_ring->ring_handle, mp, 3984 rx_ring->ring_gen_num); 3985 } 3986 3987 #pragma inline(igb_intr_tx_work) 3988 /* 3989 * igb_intr_tx_work - tx processing of ISR 3990 */ 3991 static void 3992 igb_intr_tx_work(igb_tx_ring_t *tx_ring) 3993 { 3994 igb_t *igb = tx_ring->igb; 3995 3996 /* Recycle the tx descriptors */ 3997 tx_ring->tx_recycle(tx_ring); 3998 3999 /* Schedule the re-transmit */ 4000 if (tx_ring->reschedule && 4001 (tx_ring->tbd_free >= igb->tx_resched_thresh)) { 4002 tx_ring->reschedule = B_FALSE; 4003 mac_tx_ring_update(tx_ring->igb->mac_hdl, tx_ring->ring_handle); 4004 IGB_DEBUG_STAT(tx_ring->stat_reschedule); 4005 } 4006 } 4007 4008 #pragma inline(igb_intr_link_work) 4009 /* 4010 * igb_intr_link_work - link-status-change processing of ISR 4011 */ 4012 static void 4013 igb_intr_link_work(igb_t *igb) 4014 { 4015 boolean_t link_changed; 4016 4017 igb_stop_watchdog_timer(igb); 4018 4019 mutex_enter(&igb->gen_lock); 4020 4021 /* 4022 * Because we got a link-status-change interrupt, force 4023 * e1000_check_for_link() to look at phy 4024 */ 4025 igb->hw.mac.get_link_status = B_TRUE; 4026 4027 /* igb_link_check takes care of link status change */ 4028 link_changed = igb_link_check(igb); 4029 4030 /* Get new phy state */ 4031 igb_get_phy_state(igb); 4032 4033 mutex_exit(&igb->gen_lock); 4034 4035 if (link_changed) 4036 mac_link_update(igb->mac_hdl, igb->link_state); 4037 4038 igb_start_watchdog_timer(igb); 4039 } 4040 4041 /* 4042 * igb_intr_legacy - Interrupt handler for legacy interrupts 4043 */ 4044 static uint_t 4045 igb_intr_legacy(void *arg1, void *arg2) 4046 { 4047 igb_t *igb = (igb_t *)arg1; 4048 igb_tx_ring_t *tx_ring; 4049 uint32_t icr; 4050 mblk_t *mp; 4051 boolean_t tx_reschedule; 4052 boolean_t link_changed; 4053 uint_t result; 4054 4055 _NOTE(ARGUNUSED(arg2)); 4056 4057 mutex_enter(&igb->gen_lock); 4058 4059 if (igb->igb_state & IGB_SUSPENDED) { 4060 mutex_exit(&igb->gen_lock); 4061 return (DDI_INTR_UNCLAIMED); 4062 } 4063 4064 mp = NULL; 4065 tx_reschedule = B_FALSE; 4066 link_changed = B_FALSE; 4067 icr = E1000_READ_REG(&igb->hw, E1000_ICR); 4068 4069 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 4070 mutex_exit(&igb->gen_lock); 4071 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 4072 atomic_or_32(&igb->igb_state, IGB_ERROR); 4073 return (DDI_INTR_UNCLAIMED); 4074 } 4075 4076 if (icr & E1000_ICR_INT_ASSERTED) { 4077 /* 4078 * E1000_ICR_INT_ASSERTED bit was set: 4079 * Read(Clear) the ICR, claim this interrupt, 4080 * look for work to do. 4081 */ 4082 ASSERT(igb->num_rx_rings == 1); 4083 ASSERT(igb->num_tx_rings == 1); 4084 4085 /* Make sure all interrupt causes cleared */ 4086 (void) E1000_READ_REG(&igb->hw, E1000_EICR); 4087 4088 if (icr & E1000_ICR_RXT0) { 4089 mp = igb_rx(&igb->rx_rings[0], IGB_NO_POLL); 4090 } 4091 4092 if (icr & E1000_ICR_TXDW) { 4093 tx_ring = &igb->tx_rings[0]; 4094 4095 /* Recycle the tx descriptors */ 4096 tx_ring->tx_recycle(tx_ring); 4097 4098 /* Schedule the re-transmit */ 4099 tx_reschedule = (tx_ring->reschedule && 4100 (tx_ring->tbd_free >= igb->tx_resched_thresh)); 4101 } 4102 4103 if (icr & E1000_ICR_LSC) { 4104 /* 4105 * Because we got a link-status-change interrupt, force 4106 * e1000_check_for_link() to look at phy 4107 */ 4108 igb->hw.mac.get_link_status = B_TRUE; 4109 4110 /* igb_link_check takes care of link status change */ 4111 link_changed = igb_link_check(igb); 4112 4113 /* Get new phy state */ 4114 igb_get_phy_state(igb); 4115 } 4116 4117 if (icr & E1000_ICR_DRSTA) { 4118 /* 82580 Full Device Reset needed */ 4119 atomic_or_32(&igb->igb_state, IGB_STALL); 4120 } 4121 4122 result = DDI_INTR_CLAIMED; 4123 } else { 4124 /* 4125 * E1000_ICR_INT_ASSERTED bit was not set: 4126 * Don't claim this interrupt. 4127 */ 4128 result = DDI_INTR_UNCLAIMED; 4129 } 4130 4131 mutex_exit(&igb->gen_lock); 4132 4133 /* 4134 * Do the following work outside of the gen_lock 4135 */ 4136 if (mp != NULL) 4137 mac_rx(igb->mac_hdl, NULL, mp); 4138 4139 if (tx_reschedule) { 4140 tx_ring->reschedule = B_FALSE; 4141 mac_tx_ring_update(igb->mac_hdl, tx_ring->ring_handle); 4142 IGB_DEBUG_STAT(tx_ring->stat_reschedule); 4143 } 4144 4145 if (link_changed) 4146 mac_link_update(igb->mac_hdl, igb->link_state); 4147 4148 return (result); 4149 } 4150 4151 /* 4152 * igb_intr_msi - Interrupt handler for MSI 4153 */ 4154 static uint_t 4155 igb_intr_msi(void *arg1, void *arg2) 4156 { 4157 igb_t *igb = (igb_t *)arg1; 4158 uint32_t icr; 4159 4160 _NOTE(ARGUNUSED(arg2)); 4161 4162 icr = E1000_READ_REG(&igb->hw, E1000_ICR); 4163 4164 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 4165 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 4166 atomic_or_32(&igb->igb_state, IGB_ERROR); 4167 return (DDI_INTR_CLAIMED); 4168 } 4169 4170 /* Make sure all interrupt causes cleared */ 4171 (void) E1000_READ_REG(&igb->hw, E1000_EICR); 4172 4173 /* 4174 * For MSI interrupt, we have only one vector, 4175 * so we have only one rx ring and one tx ring enabled. 4176 */ 4177 ASSERT(igb->num_rx_rings == 1); 4178 ASSERT(igb->num_tx_rings == 1); 4179 4180 if (icr & E1000_ICR_RXT0) { 4181 igb_intr_rx_work(&igb->rx_rings[0]); 4182 } 4183 4184 if (icr & E1000_ICR_TXDW) { 4185 igb_intr_tx_work(&igb->tx_rings[0]); 4186 } 4187 4188 if (icr & E1000_ICR_LSC) { 4189 igb_intr_link_work(igb); 4190 } 4191 4192 if (icr & E1000_ICR_DRSTA) { 4193 /* 82580 Full Device Reset needed */ 4194 atomic_or_32(&igb->igb_state, IGB_STALL); 4195 } 4196 4197 return (DDI_INTR_CLAIMED); 4198 } 4199 4200 /* 4201 * igb_intr_rx - Interrupt handler for rx 4202 */ 4203 static uint_t 4204 igb_intr_rx(void *arg1, void *arg2) 4205 { 4206 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)arg1; 4207 4208 _NOTE(ARGUNUSED(arg2)); 4209 4210 /* 4211 * Only used via MSI-X vector so don't check cause bits 4212 * and only clean the given ring. 4213 */ 4214 igb_intr_rx_work(rx_ring); 4215 4216 return (DDI_INTR_CLAIMED); 4217 } 4218 4219 /* 4220 * igb_intr_tx - Interrupt handler for tx 4221 */ 4222 static uint_t 4223 igb_intr_tx(void *arg1, void *arg2) 4224 { 4225 igb_tx_ring_t *tx_ring = (igb_tx_ring_t *)arg1; 4226 4227 _NOTE(ARGUNUSED(arg2)); 4228 4229 /* 4230 * Only used via MSI-X vector so don't check cause bits 4231 * and only clean the given ring. 4232 */ 4233 igb_intr_tx_work(tx_ring); 4234 4235 return (DDI_INTR_CLAIMED); 4236 } 4237 4238 /* 4239 * igb_intr_tx_other - Interrupt handler for both tx and other 4240 * 4241 */ 4242 static uint_t 4243 igb_intr_tx_other(void *arg1, void *arg2) 4244 { 4245 igb_t *igb = (igb_t *)arg1; 4246 uint32_t icr; 4247 4248 _NOTE(ARGUNUSED(arg2)); 4249 4250 icr = E1000_READ_REG(&igb->hw, E1000_ICR); 4251 4252 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) { 4253 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED); 4254 atomic_or_32(&igb->igb_state, IGB_ERROR); 4255 return (DDI_INTR_CLAIMED); 4256 } 4257 4258 /* 4259 * Look for tx reclaiming work first. Remember, in the 4260 * case of only interrupt sharing, only one tx ring is 4261 * used 4262 */ 4263 igb_intr_tx_work(&igb->tx_rings[0]); 4264 4265 /* 4266 * Check for "other" causes. 4267 */ 4268 if (icr & E1000_ICR_LSC) { 4269 igb_intr_link_work(igb); 4270 } 4271 4272 /* 4273 * The DOUTSYNC bit indicates a tx packet dropped because 4274 * DMA engine gets "out of sync". There isn't a real fix 4275 * for this. The Intel recommendation is to count the number 4276 * of occurrences so user can detect when it is happening. 4277 * The issue is non-fatal and there's no recovery action 4278 * available. 4279 */ 4280 if (icr & E1000_ICR_DOUTSYNC) { 4281 IGB_STAT(igb->dout_sync); 4282 } 4283 4284 if (icr & E1000_ICR_DRSTA) { 4285 /* 82580 Full Device Reset needed */ 4286 atomic_or_32(&igb->igb_state, IGB_STALL); 4287 } 4288 4289 return (DDI_INTR_CLAIMED); 4290 } 4291 4292 /* 4293 * igb_alloc_intrs - Allocate interrupts for the driver 4294 * 4295 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4296 * if not successful, try Legacy. 4297 * igb->intr_force can be used to force sequence to start with 4298 * any of the 3 types. 4299 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4300 */ 4301 static int 4302 igb_alloc_intrs(igb_t *igb) 4303 { 4304 dev_info_t *devinfo; 4305 int intr_types; 4306 int rc; 4307 4308 devinfo = igb->dip; 4309 4310 /* Get supported interrupt types */ 4311 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4312 4313 if (rc != DDI_SUCCESS) { 4314 igb_log(igb, 4315 "Get supported interrupt types failed: %d", rc); 4316 return (IGB_FAILURE); 4317 } 4318 IGB_DEBUGLOG_1(igb, "Supported interrupt types: %x", intr_types); 4319 4320 igb->intr_type = 0; 4321 4322 /* Install MSI-X interrupts */ 4323 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4324 (igb->intr_force <= IGB_INTR_MSIX)) { 4325 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_MSIX); 4326 4327 if (rc == IGB_SUCCESS) 4328 return (IGB_SUCCESS); 4329 4330 igb_log(igb, 4331 "Allocate MSI-X failed, trying MSI interrupts..."); 4332 } 4333 4334 /* MSI-X not used, force rings to 1 */ 4335 igb->num_rx_rings = 1; 4336 igb->num_tx_rings = 1; 4337 igb_log(igb, 4338 "MSI-X not used, force rx and tx queue number to 1"); 4339 4340 /* Install MSI interrupts */ 4341 if ((intr_types & DDI_INTR_TYPE_MSI) && 4342 (igb->intr_force <= IGB_INTR_MSI)) { 4343 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_MSI); 4344 4345 if (rc == IGB_SUCCESS) 4346 return (IGB_SUCCESS); 4347 4348 igb_log(igb, 4349 "Allocate MSI failed, trying Legacy interrupts..."); 4350 } 4351 4352 /* Install legacy interrupts */ 4353 if (intr_types & DDI_INTR_TYPE_FIXED) { 4354 rc = igb_alloc_intr_handles(igb, DDI_INTR_TYPE_FIXED); 4355 4356 if (rc == IGB_SUCCESS) 4357 return (IGB_SUCCESS); 4358 4359 igb_log(igb, 4360 "Allocate Legacy interrupts failed"); 4361 } 4362 4363 /* If none of the 3 types succeeded, return failure */ 4364 return (IGB_FAILURE); 4365 } 4366 4367 /* 4368 * igb_alloc_intr_handles - Allocate interrupt handles. 4369 * 4370 * For legacy and MSI, only 1 handle is needed. For MSI-X, 4371 * if fewer than 2 handles are available, return failure. 4372 * Upon success, this sets the number of Rx rings to a number that 4373 * matches the handles available for Rx interrupts. 4374 */ 4375 static int 4376 igb_alloc_intr_handles(igb_t *igb, int intr_type) 4377 { 4378 dev_info_t *devinfo; 4379 int orig, request, count, avail, actual; 4380 int diff, minimum; 4381 int rc; 4382 4383 devinfo = igb->dip; 4384 4385 switch (intr_type) { 4386 case DDI_INTR_TYPE_FIXED: 4387 request = 1; /* Request 1 legacy interrupt handle */ 4388 minimum = 1; 4389 IGB_DEBUGLOG_0(igb, "interrupt type: legacy"); 4390 break; 4391 4392 case DDI_INTR_TYPE_MSI: 4393 request = 1; /* Request 1 MSI interrupt handle */ 4394 minimum = 1; 4395 IGB_DEBUGLOG_0(igb, "interrupt type: MSI"); 4396 break; 4397 4398 case DDI_INTR_TYPE_MSIX: 4399 /* 4400 * Number of vectors for the adapter is 4401 * # rx rings + # tx rings 4402 * One of tx vectors is for tx & other 4403 */ 4404 request = igb->num_rx_rings + igb->num_tx_rings; 4405 orig = request; 4406 minimum = 2; 4407 IGB_DEBUGLOG_0(igb, "interrupt type: MSI-X"); 4408 break; 4409 4410 default: 4411 igb_log(igb, 4412 "invalid call to igb_alloc_intr_handles(): %d\n", 4413 intr_type); 4414 return (IGB_FAILURE); 4415 } 4416 IGB_DEBUGLOG_2(igb, "interrupt handles requested: %d minimum: %d", 4417 request, minimum); 4418 4419 /* 4420 * Get number of supported interrupts 4421 */ 4422 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 4423 if ((rc != DDI_SUCCESS) || (count < minimum)) { 4424 igb_log(igb, 4425 "Get supported interrupt number failed. " 4426 "Return: %d, count: %d", rc, count); 4427 return (IGB_FAILURE); 4428 } 4429 IGB_DEBUGLOG_1(igb, "interrupts supported: %d", count); 4430 4431 /* 4432 * Get number of available interrupts 4433 */ 4434 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 4435 if ((rc != DDI_SUCCESS) || (avail < minimum)) { 4436 igb_log(igb, 4437 "Get available interrupt number failed. " 4438 "Return: %d, available: %d", rc, avail); 4439 return (IGB_FAILURE); 4440 } 4441 IGB_DEBUGLOG_1(igb, "interrupts available: %d", avail); 4442 4443 if (avail < request) { 4444 igb_log(igb, "Request %d handles, %d available", 4445 request, avail); 4446 request = avail; 4447 } 4448 4449 actual = 0; 4450 igb->intr_cnt = 0; 4451 4452 /* 4453 * Allocate an array of interrupt handles 4454 */ 4455 igb->intr_size = request * sizeof (ddi_intr_handle_t); 4456 igb->htable = kmem_alloc(igb->intr_size, KM_SLEEP); 4457 4458 rc = ddi_intr_alloc(devinfo, igb->htable, intr_type, 0, 4459 request, &actual, DDI_INTR_ALLOC_NORMAL); 4460 if (rc != DDI_SUCCESS) { 4461 igb_log(igb, "Allocate interrupts failed. " 4462 "return: %d, request: %d, actual: %d", 4463 rc, request, actual); 4464 goto alloc_handle_fail; 4465 } 4466 IGB_DEBUGLOG_1(igb, "interrupts actually allocated: %d", actual); 4467 4468 igb->intr_cnt = actual; 4469 4470 if (actual < minimum) { 4471 igb_log(igb, "Insufficient interrupt handles allocated: %d", 4472 actual); 4473 goto alloc_handle_fail; 4474 } 4475 4476 /* 4477 * For MSI-X, actual might force us to reduce number of tx & rx rings 4478 */ 4479 if ((intr_type == DDI_INTR_TYPE_MSIX) && (orig > actual)) { 4480 diff = orig - actual; 4481 if (diff < igb->num_tx_rings) { 4482 igb_log(igb, 4483 "MSI-X vectors force Tx queue number to %d", 4484 igb->num_tx_rings - diff); 4485 igb->num_tx_rings -= diff; 4486 } else { 4487 igb_log(igb, 4488 "MSI-X vectors force Tx queue number to 1"); 4489 igb->num_tx_rings = 1; 4490 4491 igb_log(igb, 4492 "MSI-X vectors force Rx queue number to %d", 4493 actual - 1); 4494 igb->num_rx_rings = actual - 1; 4495 } 4496 } 4497 4498 /* 4499 * Get priority for first vector, assume remaining are all the same 4500 */ 4501 rc = ddi_intr_get_pri(igb->htable[0], &igb->intr_pri); 4502 if (rc != DDI_SUCCESS) { 4503 igb_log(igb, 4504 "Get interrupt priority failed: %d", rc); 4505 goto alloc_handle_fail; 4506 } 4507 4508 rc = ddi_intr_get_cap(igb->htable[0], &igb->intr_cap); 4509 if (rc != DDI_SUCCESS) { 4510 igb_log(igb, 4511 "Get interrupt cap failed: %d", rc); 4512 goto alloc_handle_fail; 4513 } 4514 4515 igb->intr_type = intr_type; 4516 4517 return (IGB_SUCCESS); 4518 4519 alloc_handle_fail: 4520 igb_rem_intrs(igb); 4521 4522 return (IGB_FAILURE); 4523 } 4524 4525 /* 4526 * igb_add_intr_handlers - Add interrupt handlers based on the interrupt type 4527 * 4528 * Before adding the interrupt handlers, the interrupt vectors have 4529 * been allocated, and the rx/tx rings have also been allocated. 4530 */ 4531 static int 4532 igb_add_intr_handlers(igb_t *igb) 4533 { 4534 igb_rx_ring_t *rx_ring; 4535 igb_tx_ring_t *tx_ring; 4536 int vector; 4537 int rc; 4538 int i; 4539 4540 vector = 0; 4541 4542 switch (igb->intr_type) { 4543 case DDI_INTR_TYPE_MSIX: 4544 /* Add interrupt handler for tx + other */ 4545 tx_ring = &igb->tx_rings[0]; 4546 rc = ddi_intr_add_handler(igb->htable[vector], 4547 (ddi_intr_handler_t *)igb_intr_tx_other, 4548 (void *)igb, NULL); 4549 4550 if (rc != DDI_SUCCESS) { 4551 igb_log(igb, 4552 "Add tx/other interrupt handler failed: %d", rc); 4553 return (IGB_FAILURE); 4554 } 4555 tx_ring->intr_vector = vector; 4556 vector++; 4557 4558 /* Add interrupt handler for each rx ring */ 4559 for (i = 0; i < igb->num_rx_rings; i++) { 4560 rx_ring = &igb->rx_rings[i]; 4561 4562 rc = ddi_intr_add_handler(igb->htable[vector], 4563 (ddi_intr_handler_t *)igb_intr_rx, 4564 (void *)rx_ring, NULL); 4565 4566 if (rc != DDI_SUCCESS) { 4567 igb_log(igb, 4568 "Add rx interrupt handler failed. " 4569 "return: %d, rx ring: %d", rc, i); 4570 for (vector--; vector >= 0; vector--) { 4571 (void) ddi_intr_remove_handler( 4572 igb->htable[vector]); 4573 } 4574 return (IGB_FAILURE); 4575 } 4576 4577 rx_ring->intr_vector = vector; 4578 4579 vector++; 4580 } 4581 4582 /* Add interrupt handler for each tx ring from 2nd ring */ 4583 for (i = 1; i < igb->num_tx_rings; i++) { 4584 tx_ring = &igb->tx_rings[i]; 4585 4586 rc = ddi_intr_add_handler(igb->htable[vector], 4587 (ddi_intr_handler_t *)igb_intr_tx, 4588 (void *)tx_ring, NULL); 4589 4590 if (rc != DDI_SUCCESS) { 4591 igb_log(igb, 4592 "Add tx interrupt handler failed. " 4593 "return: %d, tx ring: %d", rc, i); 4594 for (vector--; vector >= 0; vector--) { 4595 (void) ddi_intr_remove_handler( 4596 igb->htable[vector]); 4597 } 4598 return (IGB_FAILURE); 4599 } 4600 4601 tx_ring->intr_vector = vector; 4602 4603 vector++; 4604 } 4605 4606 break; 4607 4608 case DDI_INTR_TYPE_MSI: 4609 /* Add interrupt handlers for the only vector */ 4610 rc = ddi_intr_add_handler(igb->htable[vector], 4611 (ddi_intr_handler_t *)igb_intr_msi, 4612 (void *)igb, NULL); 4613 4614 if (rc != DDI_SUCCESS) { 4615 igb_log(igb, 4616 "Add MSI interrupt handler failed: %d", rc); 4617 return (IGB_FAILURE); 4618 } 4619 4620 rx_ring = &igb->rx_rings[0]; 4621 rx_ring->intr_vector = vector; 4622 4623 vector++; 4624 break; 4625 4626 case DDI_INTR_TYPE_FIXED: 4627 /* Add interrupt handlers for the only vector */ 4628 rc = ddi_intr_add_handler(igb->htable[vector], 4629 (ddi_intr_handler_t *)igb_intr_legacy, 4630 (void *)igb, NULL); 4631 4632 if (rc != DDI_SUCCESS) { 4633 igb_log(igb, 4634 "Add legacy interrupt handler failed: %d", rc); 4635 return (IGB_FAILURE); 4636 } 4637 4638 rx_ring = &igb->rx_rings[0]; 4639 rx_ring->intr_vector = vector; 4640 4641 vector++; 4642 break; 4643 4644 default: 4645 return (IGB_FAILURE); 4646 } 4647 4648 ASSERT(vector == igb->intr_cnt); 4649 4650 return (IGB_SUCCESS); 4651 } 4652 4653 /* 4654 * igb_setup_msix_82575 - setup 82575 adapter to use MSI-X interrupts 4655 * 4656 * For each vector enabled on the adapter, Set the MSIXBM register accordingly 4657 */ 4658 static void 4659 igb_setup_msix_82575(igb_t *igb) 4660 { 4661 uint32_t eims = 0; 4662 int i, vector; 4663 struct e1000_hw *hw = &igb->hw; 4664 4665 /* 4666 * Set vector for tx ring 0 and other causes. 4667 * NOTE assumption that it is vector 0. 4668 */ 4669 vector = 0; 4670 4671 igb->eims_mask = E1000_EICR_TX_QUEUE0 | E1000_EICR_OTHER; 4672 E1000_WRITE_REG(hw, E1000_MSIXBM(vector), igb->eims_mask); 4673 vector++; 4674 4675 for (i = 0; i < igb->num_rx_rings; i++) { 4676 /* 4677 * Set vector for each rx ring 4678 */ 4679 eims = (E1000_EICR_RX_QUEUE0 << i); 4680 E1000_WRITE_REG(hw, E1000_MSIXBM(vector), eims); 4681 4682 /* 4683 * Accumulate bits to enable in 4684 * igb_enable_adapter_interrupts_82575() 4685 */ 4686 igb->eims_mask |= eims; 4687 4688 vector++; 4689 } 4690 4691 for (i = 1; i < igb->num_tx_rings; i++) { 4692 /* 4693 * Set vector for each tx ring from 2nd tx ring 4694 */ 4695 eims = (E1000_EICR_TX_QUEUE0 << i); 4696 E1000_WRITE_REG(hw, E1000_MSIXBM(vector), eims); 4697 4698 /* 4699 * Accumulate bits to enable in 4700 * igb_enable_adapter_interrupts_82575() 4701 */ 4702 igb->eims_mask |= eims; 4703 4704 vector++; 4705 } 4706 4707 ASSERT(vector == igb->intr_cnt); 4708 4709 /* 4710 * Disable IAM for ICR interrupt bits 4711 */ 4712 E1000_WRITE_REG(hw, E1000_IAM, 0); 4713 E1000_WRITE_FLUSH(hw); 4714 } 4715 4716 /* 4717 * igb_setup_msix_82576 - setup 82576 adapter to use MSI-X interrupts 4718 * 4719 * 82576 uses a table based method for assigning vectors. Each queue has a 4720 * single entry in the table to which we write a vector number along with a 4721 * "valid" bit. The entry is a single byte in a 4-byte register. Vectors 4722 * take a different position in the 4-byte register depending on whether 4723 * they are numbered above or below 8. 4724 */ 4725 static void 4726 igb_setup_msix_82576(igb_t *igb) 4727 { 4728 struct e1000_hw *hw = &igb->hw; 4729 uint32_t ivar, index, vector; 4730 int i; 4731 4732 /* must enable msi-x capability before IVAR settings */ 4733 E1000_WRITE_REG(hw, E1000_GPIE, 4734 (E1000_GPIE_MSIX_MODE | E1000_GPIE_PBA | E1000_GPIE_NSICR)); 4735 4736 /* 4737 * Set vector for tx ring 0 and other causes. 4738 * NOTE assumption that it is vector 0. 4739 * This is also interdependent with installation of interrupt service 4740 * routines in igb_add_intr_handlers(). 4741 */ 4742 4743 /* assign "other" causes to vector 0 */ 4744 vector = 0; 4745 ivar = ((vector | E1000_IVAR_VALID) << 8); 4746 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 4747 4748 /* assign tx ring 0 to vector 0 */ 4749 ivar = ((vector | E1000_IVAR_VALID) << 8); 4750 E1000_WRITE_REG(hw, E1000_IVAR0, ivar); 4751 4752 /* prepare to enable tx & other interrupt causes */ 4753 igb->eims_mask = (1 << vector); 4754 4755 vector ++; 4756 for (i = 0; i < igb->num_rx_rings; i++) { 4757 /* 4758 * Set vector for each rx ring 4759 */ 4760 index = (i & 0x7); 4761 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4762 4763 if (i < 8) { 4764 /* vector goes into low byte of register */ 4765 ivar = ivar & 0xFFFFFF00; 4766 ivar |= (vector | E1000_IVAR_VALID); 4767 } else { 4768 /* vector goes into third byte of register */ 4769 ivar = ivar & 0xFF00FFFF; 4770 ivar |= ((vector | E1000_IVAR_VALID) << 16); 4771 } 4772 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4773 4774 /* Accumulate interrupt-cause bits to enable */ 4775 igb->eims_mask |= (1 << vector); 4776 4777 vector ++; 4778 } 4779 4780 for (i = 1; i < igb->num_tx_rings; i++) { 4781 /* 4782 * Set vector for each tx ring from 2nd tx ring. 4783 * Note assumption that tx vectors numericall follow rx vectors. 4784 */ 4785 index = (i & 0x7); 4786 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4787 4788 if (i < 8) { 4789 /* vector goes into second byte of register */ 4790 ivar = ivar & 0xFFFF00FF; 4791 ivar |= ((vector | E1000_IVAR_VALID) << 8); 4792 } else { 4793 /* vector goes into fourth byte of register */ 4794 ivar = ivar & 0x00FFFFFF; 4795 ivar |= (vector | E1000_IVAR_VALID) << 24; 4796 } 4797 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4798 4799 /* Accumulate interrupt-cause bits to enable */ 4800 igb->eims_mask |= (1 << vector); 4801 4802 vector ++; 4803 } 4804 4805 ASSERT(vector == igb->intr_cnt); 4806 } 4807 4808 /* 4809 * igb_setup_msix_82580 - setup 82580 adapter to use MSI-X interrupts 4810 * 4811 * 82580 uses same table approach at 82576 but has fewer entries. Each 4812 * queue has a single entry in the table to which we write a vector number 4813 * along with a "valid" bit. Vectors take a different position in the 4814 * register depending on * whether * they are numbered above or below 4. 4815 */ 4816 static void 4817 igb_setup_msix_82580(igb_t *igb) 4818 { 4819 struct e1000_hw *hw = &igb->hw; 4820 uint32_t ivar, index, vector; 4821 int i; 4822 4823 /* must enable msi-x capability before IVAR settings */ 4824 E1000_WRITE_REG(hw, E1000_GPIE, (E1000_GPIE_MSIX_MODE | 4825 E1000_GPIE_PBA | E1000_GPIE_NSICR | E1000_GPIE_EIAME)); 4826 /* 4827 * Set vector for tx ring 0 and other causes. 4828 * NOTE assumption that it is vector 0. 4829 * This is also interdependent with installation of interrupt service 4830 * routines in igb_add_intr_handlers(). 4831 */ 4832 4833 /* assign "other" causes to vector 0 */ 4834 vector = 0; 4835 ivar = ((vector | E1000_IVAR_VALID) << 8); 4836 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 4837 4838 /* assign tx ring 0 to vector 0 */ 4839 ivar = ((vector | E1000_IVAR_VALID) << 8); 4840 E1000_WRITE_REG(hw, E1000_IVAR0, ivar); 4841 4842 /* prepare to enable tx & other interrupt causes */ 4843 igb->eims_mask = (1 << vector); 4844 4845 vector ++; 4846 4847 for (i = 0; i < igb->num_rx_rings; i++) { 4848 /* 4849 * Set vector for each rx ring 4850 */ 4851 index = (i >> 1); 4852 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4853 4854 if (i & 1) { 4855 /* vector goes into third byte of register */ 4856 ivar = ivar & 0xFF00FFFF; 4857 ivar |= ((vector | E1000_IVAR_VALID) << 16); 4858 } else { 4859 /* vector goes into low byte of register */ 4860 ivar = ivar & 0xFFFFFF00; 4861 ivar |= (vector | E1000_IVAR_VALID); 4862 } 4863 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4864 4865 /* Accumulate interrupt-cause bits to enable */ 4866 igb->eims_mask |= (1 << vector); 4867 4868 vector ++; 4869 } 4870 4871 for (i = 1; i < igb->num_tx_rings; i++) { 4872 /* 4873 * Set vector for each tx ring from 2nd tx ring. 4874 * Note assumption that tx vectors numericall follow rx vectors. 4875 */ 4876 index = (i >> 1); 4877 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 4878 4879 if (i & 1) { 4880 /* vector goes into high byte of register */ 4881 ivar = ivar & 0x00FFFFFF; 4882 ivar |= ((vector | E1000_IVAR_VALID) << 24); 4883 } else { 4884 /* vector goes into second byte of register */ 4885 ivar = ivar & 0xFFFF00FF; 4886 ivar |= (vector | E1000_IVAR_VALID) << 8; 4887 } 4888 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 4889 4890 /* Accumulate interrupt-cause bits to enable */ 4891 igb->eims_mask |= (1 << vector); 4892 4893 vector ++; 4894 } 4895 ASSERT(vector == igb->intr_cnt); 4896 } 4897 4898 /* 4899 * igb_rem_intr_handlers - remove the interrupt handlers 4900 */ 4901 static void 4902 igb_rem_intr_handlers(igb_t *igb) 4903 { 4904 int i; 4905 int rc; 4906 4907 for (i = 0; i < igb->intr_cnt; i++) { 4908 rc = ddi_intr_remove_handler(igb->htable[i]); 4909 if (rc != DDI_SUCCESS) { 4910 IGB_DEBUGLOG_1(igb, 4911 "Remove intr handler failed: %d", rc); 4912 } 4913 } 4914 } 4915 4916 /* 4917 * igb_rem_intrs - remove the allocated interrupts 4918 */ 4919 static void 4920 igb_rem_intrs(igb_t *igb) 4921 { 4922 int i; 4923 int rc; 4924 4925 for (i = 0; i < igb->intr_cnt; i++) { 4926 rc = ddi_intr_free(igb->htable[i]); 4927 if (rc != DDI_SUCCESS) { 4928 IGB_DEBUGLOG_1(igb, 4929 "Free intr failed: %d", rc); 4930 } 4931 } 4932 4933 kmem_free(igb->htable, igb->intr_size); 4934 igb->htable = NULL; 4935 } 4936 4937 /* 4938 * igb_enable_intrs - enable all the ddi interrupts 4939 */ 4940 static int 4941 igb_enable_intrs(igb_t *igb) 4942 { 4943 int i; 4944 int rc; 4945 4946 /* Enable interrupts */ 4947 if (igb->intr_cap & DDI_INTR_FLAG_BLOCK) { 4948 /* Call ddi_intr_block_enable() for MSI */ 4949 rc = ddi_intr_block_enable(igb->htable, igb->intr_cnt); 4950 if (rc != DDI_SUCCESS) { 4951 igb_log(igb, 4952 "Enable block intr failed: %d", rc); 4953 return (IGB_FAILURE); 4954 } 4955 } else { 4956 /* Call ddi_intr_enable() for Legacy/MSI non block enable */ 4957 for (i = 0; i < igb->intr_cnt; i++) { 4958 rc = ddi_intr_enable(igb->htable[i]); 4959 if (rc != DDI_SUCCESS) { 4960 igb_log(igb, 4961 "Enable intr failed: %d", rc); 4962 return (IGB_FAILURE); 4963 } 4964 } 4965 } 4966 4967 return (IGB_SUCCESS); 4968 } 4969 4970 /* 4971 * igb_disable_intrs - disable all the ddi interrupts 4972 */ 4973 static int 4974 igb_disable_intrs(igb_t *igb) 4975 { 4976 int i; 4977 int rc; 4978 4979 /* Disable all interrupts */ 4980 if (igb->intr_cap & DDI_INTR_FLAG_BLOCK) { 4981 rc = ddi_intr_block_disable(igb->htable, igb->intr_cnt); 4982 if (rc != DDI_SUCCESS) { 4983 igb_log(igb, 4984 "Disable block intr failed: %d", rc); 4985 return (IGB_FAILURE); 4986 } 4987 } else { 4988 for (i = 0; i < igb->intr_cnt; i++) { 4989 rc = ddi_intr_disable(igb->htable[i]); 4990 if (rc != DDI_SUCCESS) { 4991 igb_log(igb, 4992 "Disable intr failed: %d", rc); 4993 return (IGB_FAILURE); 4994 } 4995 } 4996 } 4997 4998 return (IGB_SUCCESS); 4999 } 5000 5001 /* 5002 * igb_get_phy_state - Get and save the parameters read from PHY registers 5003 */ 5004 static void 5005 igb_get_phy_state(igb_t *igb) 5006 { 5007 struct e1000_hw *hw = &igb->hw; 5008 uint16_t phy_ctrl; 5009 uint16_t phy_status; 5010 uint16_t phy_an_adv; 5011 uint16_t phy_an_exp; 5012 uint16_t phy_ext_status; 5013 uint16_t phy_1000t_ctrl; 5014 uint16_t phy_1000t_status; 5015 uint16_t phy_lp_able; 5016 5017 ASSERT(mutex_owned(&igb->gen_lock)); 5018 5019 if (hw->phy.media_type == e1000_media_type_copper) { 5020 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 5021 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status); 5022 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &phy_an_adv); 5023 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_an_exp); 5024 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, &phy_ext_status); 5025 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_1000t_ctrl); 5026 (void) e1000_read_phy_reg(hw, 5027 PHY_1000T_STATUS, &phy_1000t_status); 5028 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_lp_able); 5029 5030 igb->param_autoneg_cap = 5031 (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; 5032 igb->param_pause_cap = 5033 (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 5034 igb->param_asym_pause_cap = 5035 (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 5036 igb->param_1000fdx_cap = 5037 ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5038 (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; 5039 igb->param_1000hdx_cap = 5040 ((phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || 5041 (phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; 5042 igb->param_100t4_cap = 5043 (phy_status & MII_SR_100T4_CAPS) ? 1 : 0; 5044 igb->param_100fdx_cap = ((phy_status & MII_SR_100X_FD_CAPS) || 5045 (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; 5046 igb->param_100hdx_cap = ((phy_status & MII_SR_100X_HD_CAPS) || 5047 (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; 5048 igb->param_10fdx_cap = 5049 (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; 5050 igb->param_10hdx_cap = 5051 (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; 5052 igb->param_rem_fault = 5053 (phy_status & MII_SR_REMOTE_FAULT) ? 1 : 0; 5054 5055 igb->param_adv_autoneg_cap = hw->mac.autoneg; 5056 igb->param_adv_pause_cap = 5057 (phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 5058 igb->param_adv_asym_pause_cap = 5059 (phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 5060 igb->param_adv_1000hdx_cap = 5061 (phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; 5062 igb->param_adv_100t4_cap = 5063 (phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; 5064 igb->param_adv_rem_fault = 5065 (phy_an_adv & NWAY_AR_REMOTE_FAULT) ? 1 : 0; 5066 if (igb->param_adv_autoneg_cap == 1) { 5067 igb->param_adv_1000fdx_cap = 5068 (phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0; 5069 igb->param_adv_100fdx_cap = 5070 (phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0; 5071 igb->param_adv_100hdx_cap = 5072 (phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0; 5073 igb->param_adv_10fdx_cap = 5074 (phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; 5075 igb->param_adv_10hdx_cap = 5076 (phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; 5077 } 5078 5079 igb->param_lp_autoneg_cap = 5080 (phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; 5081 igb->param_lp_pause_cap = 5082 (phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; 5083 igb->param_lp_asym_pause_cap = 5084 (phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; 5085 igb->param_lp_1000fdx_cap = 5086 (phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; 5087 igb->param_lp_1000hdx_cap = 5088 (phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; 5089 igb->param_lp_100t4_cap = 5090 (phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; 5091 igb->param_lp_100fdx_cap = 5092 (phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; 5093 igb->param_lp_100hdx_cap = 5094 (phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; 5095 igb->param_lp_10fdx_cap = 5096 (phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; 5097 igb->param_lp_10hdx_cap = 5098 (phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; 5099 igb->param_lp_rem_fault = 5100 (phy_lp_able & NWAY_LPAR_REMOTE_FAULT) ? 1 : 0; 5101 } else { 5102 /* 5103 * 1Gig Fiber adapter only offers 1Gig Full Duplex. 5104 */ 5105 igb->param_autoneg_cap = 0; 5106 igb->param_pause_cap = 1; 5107 igb->param_asym_pause_cap = 1; 5108 igb->param_1000fdx_cap = 1; 5109 igb->param_1000hdx_cap = 0; 5110 igb->param_100t4_cap = 0; 5111 igb->param_100fdx_cap = 0; 5112 igb->param_100hdx_cap = 0; 5113 igb->param_10fdx_cap = 0; 5114 igb->param_10hdx_cap = 0; 5115 5116 igb->param_adv_autoneg_cap = 0; 5117 igb->param_adv_pause_cap = 1; 5118 igb->param_adv_asym_pause_cap = 1; 5119 igb->param_adv_1000fdx_cap = 1; 5120 igb->param_adv_1000hdx_cap = 0; 5121 igb->param_adv_100t4_cap = 0; 5122 igb->param_adv_100fdx_cap = 0; 5123 igb->param_adv_100hdx_cap = 0; 5124 igb->param_adv_10fdx_cap = 0; 5125 igb->param_adv_10hdx_cap = 0; 5126 5127 igb->param_lp_autoneg_cap = 0; 5128 igb->param_lp_pause_cap = 0; 5129 igb->param_lp_asym_pause_cap = 0; 5130 igb->param_lp_1000fdx_cap = 0; 5131 igb->param_lp_1000hdx_cap = 0; 5132 igb->param_lp_100t4_cap = 0; 5133 igb->param_lp_100fdx_cap = 0; 5134 igb->param_lp_100hdx_cap = 0; 5135 igb->param_lp_10fdx_cap = 0; 5136 igb->param_lp_10hdx_cap = 0; 5137 igb->param_lp_rem_fault = 0; 5138 } 5139 } 5140 5141 /* 5142 * synchronize the adv* and en* parameters. 5143 * 5144 * See comments in <sys/dld.h> for details of the *_en_* 5145 * parameters. The usage of ndd for setting adv parameters will 5146 * synchronize all the en parameters with the e1000g parameters, 5147 * implicitly disabling any settings made via dladm. 5148 */ 5149 static void 5150 igb_param_sync(igb_t *igb) 5151 { 5152 igb->param_en_1000fdx_cap = igb->param_adv_1000fdx_cap; 5153 igb->param_en_1000hdx_cap = igb->param_adv_1000hdx_cap; 5154 igb->param_en_100t4_cap = igb->param_adv_100t4_cap; 5155 igb->param_en_100fdx_cap = igb->param_adv_100fdx_cap; 5156 igb->param_en_100hdx_cap = igb->param_adv_100hdx_cap; 5157 igb->param_en_10fdx_cap = igb->param_adv_10fdx_cap; 5158 igb->param_en_10hdx_cap = igb->param_adv_10hdx_cap; 5159 } 5160 5161 /* 5162 * igb_get_driver_control 5163 */ 5164 static void 5165 igb_get_driver_control(struct e1000_hw *hw) 5166 { 5167 uint32_t ctrl_ext; 5168 5169 /* Notify firmware that driver is in control of device */ 5170 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5171 ctrl_ext |= E1000_CTRL_EXT_DRV_LOAD; 5172 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5173 } 5174 5175 /* 5176 * igb_release_driver_control 5177 */ 5178 static void 5179 igb_release_driver_control(struct e1000_hw *hw) 5180 { 5181 uint32_t ctrl_ext; 5182 5183 /* Notify firmware that driver is no longer in control of device */ 5184 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5185 ctrl_ext &= ~E1000_CTRL_EXT_DRV_LOAD; 5186 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5187 } 5188 5189 /* 5190 * igb_atomic_reserve - Atomic decrease operation 5191 */ 5192 int 5193 igb_atomic_reserve(uint32_t *count_p, uint32_t n) 5194 { 5195 uint32_t oldval; 5196 uint32_t newval; 5197 5198 /* ATOMICALLY */ 5199 do { 5200 oldval = *count_p; 5201 if (oldval < n) 5202 return (-1); 5203 newval = oldval - n; 5204 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5205 5206 return (newval); 5207 } 5208 5209 /* 5210 * FMA support 5211 */ 5212 5213 int 5214 igb_check_acc_handle(ddi_acc_handle_t handle) 5215 { 5216 ddi_fm_error_t de; 5217 5218 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5219 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5220 return (de.fme_status); 5221 } 5222 5223 int 5224 igb_check_dma_handle(ddi_dma_handle_t handle) 5225 { 5226 ddi_fm_error_t de; 5227 5228 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5229 return (de.fme_status); 5230 } 5231 5232 /* 5233 * The IO fault service error handling callback function 5234 */ 5235 /*ARGSUSED*/ 5236 static int 5237 igb_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5238 { 5239 /* 5240 * as the driver can always deal with an error in any dma or 5241 * access handle, we can just return the fme_status value. 5242 */ 5243 pci_ereport_post(dip, err, NULL); 5244 return (err->fme_status); 5245 } 5246 5247 static void 5248 igb_fm_init(igb_t *igb) 5249 { 5250 ddi_iblock_cookie_t iblk; 5251 int fma_dma_flag; 5252 5253 /* Only register with IO Fault Services if we have some capability */ 5254 if (igb->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5255 igb_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5256 } else { 5257 igb_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5258 } 5259 5260 if (igb->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5261 fma_dma_flag = 1; 5262 } else { 5263 fma_dma_flag = 0; 5264 } 5265 5266 (void) igb_set_fma_flags(fma_dma_flag); 5267 5268 if (igb->fm_capabilities) { 5269 5270 /* Register capabilities with IO Fault Services */ 5271 ddi_fm_init(igb->dip, &igb->fm_capabilities, &iblk); 5272 5273 /* 5274 * Initialize pci ereport capabilities if ereport capable 5275 */ 5276 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities) || 5277 DDI_FM_ERRCB_CAP(igb->fm_capabilities)) 5278 pci_ereport_setup(igb->dip); 5279 5280 /* 5281 * Register error callback if error callback capable 5282 */ 5283 if (DDI_FM_ERRCB_CAP(igb->fm_capabilities)) 5284 ddi_fm_handler_register(igb->dip, 5285 igb_fm_error_cb, (void*) igb); 5286 } 5287 } 5288 5289 static void 5290 igb_fm_fini(igb_t *igb) 5291 { 5292 /* Only unregister FMA capabilities if we registered some */ 5293 if (igb->fm_capabilities) { 5294 5295 /* 5296 * Release any resources allocated by pci_ereport_setup() 5297 */ 5298 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities) || 5299 DDI_FM_ERRCB_CAP(igb->fm_capabilities)) 5300 pci_ereport_teardown(igb->dip); 5301 5302 /* 5303 * Un-register error callback if error callback capable 5304 */ 5305 if (DDI_FM_ERRCB_CAP(igb->fm_capabilities)) 5306 ddi_fm_handler_unregister(igb->dip); 5307 5308 /* Unregister from IO Fault Services */ 5309 ddi_fm_fini(igb->dip); 5310 } 5311 } 5312 5313 void 5314 igb_fm_ereport(igb_t *igb, char *detail) 5315 { 5316 uint64_t ena; 5317 char buf[FM_MAX_CLASS]; 5318 5319 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5320 ena = fm_ena_generate(0, FM_ENA_FMT1); 5321 if (DDI_FM_EREPORT_CAP(igb->fm_capabilities)) { 5322 ddi_fm_ereport_post(igb->dip, buf, ena, DDI_NOSLEEP, 5323 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 5324 } 5325 } 5326