1 /* 2 * This file is provided under a CDDLv1 license. When using or 3 * redistributing this file, you may do so under this license. 4 * In redistributing this file this license must be included 5 * and no other modification of this header file is permitted. 6 * 7 * CDDL LICENSE SUMMARY 8 * 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved. 10 * 11 * The contents of this file are subject to the terms of Version 12 * 1.0 of the Common Development and Distribution License (the "License"). 13 * 14 * You should have received a copy of the License with this software. 15 * You can obtain a copy of the License at 16 * http://www.opensolaris.org/os/licensing. 17 * See the License for the specific language governing permissions 18 * and limitations under the License. 19 */ 20 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 28 * Copyright (c) 2018, Joyent, Inc. 29 * Copyright 2022 Oxide Computer Company 30 */ 31 32 /* 33 * ********************************************************************** 34 * * 35 * Module Name: * 36 * e1000g_main.c * 37 * * 38 * Abstract: * 39 * This file contains the interface routines for the solaris OS. * 40 * It has all DDI entry point routines and GLD entry point routines. * 41 * * 42 * This file also contains routines that take care of initialization * 43 * uninit routine and interrupt routine. * 44 * * 45 * ********************************************************************** 46 */ 47 48 #include <sys/dlpi.h> 49 #include <sys/mac.h> 50 #include "e1000g_sw.h" 51 #include "e1000g_debug.h" 52 53 static char ident[] = "Intel PRO/1000 Ethernet"; 54 /* LINTED E_STATIC_UNUSED */ 55 static char e1000g_version[] = "Driver Ver. 5.4.00"; 56 57 /* 58 * Proto types for DDI entry points 59 */ 60 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t); 61 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t); 62 static int e1000g_quiesce(dev_info_t *); 63 64 /* 65 * init and intr routines prototype 66 */ 67 static int e1000g_resume(dev_info_t *); 68 static int e1000g_suspend(dev_info_t *); 69 static uint_t e1000g_intr_pciexpress(caddr_t, caddr_t); 70 static uint_t e1000g_intr(caddr_t, caddr_t); 71 static void e1000g_intr_work(struct e1000g *, uint32_t); 72 #pragma inline(e1000g_intr_work) 73 static int e1000g_init(struct e1000g *); 74 static int e1000g_start(struct e1000g *, boolean_t); 75 static void e1000g_stop(struct e1000g *, boolean_t); 76 static int e1000g_m_start(void *); 77 static void e1000g_m_stop(void *); 78 static int e1000g_m_promisc(void *, boolean_t); 79 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *); 80 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *); 81 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *); 82 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t, 83 uint_t, const void *); 84 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t, 85 uint_t, void *); 86 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t, 87 mac_prop_info_handle_t); 88 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t, 89 const void *); 90 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *); 91 static void e1000g_init_locks(struct e1000g *); 92 static void e1000g_destroy_locks(struct e1000g *); 93 static int e1000g_identify_hardware(struct e1000g *); 94 static int e1000g_regs_map(struct e1000g *); 95 static int e1000g_set_driver_params(struct e1000g *); 96 static void e1000g_set_bufsize(struct e1000g *); 97 static int e1000g_register_mac(struct e1000g *); 98 static boolean_t e1000g_rx_drain(struct e1000g *); 99 static boolean_t e1000g_tx_drain(struct e1000g *); 100 static void e1000g_init_unicst(struct e1000g *); 101 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int); 102 static int e1000g_alloc_rx_data(struct e1000g *); 103 static void e1000g_release_multicast(struct e1000g *); 104 static void e1000g_pch_limits(struct e1000g *); 105 static uint32_t e1000g_mtu2maxframe(uint32_t); 106 107 /* 108 * Local routines 109 */ 110 static boolean_t e1000g_reset_adapter(struct e1000g *); 111 static void e1000g_tx_clean(struct e1000g *); 112 static void e1000g_rx_clean(struct e1000g *); 113 static void e1000g_link_timer(void *); 114 static void e1000g_local_timer(void *); 115 static boolean_t e1000g_link_check(struct e1000g *); 116 static boolean_t e1000g_stall_check(struct e1000g *); 117 static void e1000g_smartspeed(struct e1000g *); 118 static void e1000g_get_conf(struct e1000g *); 119 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int, 120 int *); 121 static void enable_watchdog_timer(struct e1000g *); 122 static void disable_watchdog_timer(struct e1000g *); 123 static void start_watchdog_timer(struct e1000g *); 124 static void restart_watchdog_timer(struct e1000g *); 125 static void stop_watchdog_timer(struct e1000g *); 126 static void stop_link_timer(struct e1000g *); 127 static void stop_82547_timer(e1000g_tx_ring_t *); 128 static void e1000g_force_speed_duplex(struct e1000g *); 129 static void e1000g_setup_max_mtu(struct e1000g *); 130 static void e1000g_get_max_frame_size(struct e1000g *); 131 static boolean_t is_valid_mac_addr(uint8_t *); 132 static void e1000g_unattach(dev_info_t *, struct e1000g *); 133 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *); 134 #ifdef E1000G_DEBUG 135 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *); 136 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *); 137 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *); 138 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *); 139 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *, 140 struct iocblk *, mblk_t *); 141 #endif 142 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *, 143 struct iocblk *, mblk_t *); 144 static boolean_t e1000g_check_loopback_support(struct e1000_hw *); 145 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t); 146 static void e1000g_set_internal_loopback(struct e1000g *); 147 static void e1000g_set_external_loopback_1000(struct e1000g *); 148 static void e1000g_set_external_loopback_100(struct e1000g *); 149 static void e1000g_set_external_loopback_10(struct e1000g *); 150 static int e1000g_add_intrs(struct e1000g *); 151 static int e1000g_intr_add(struct e1000g *, int); 152 static int e1000g_rem_intrs(struct e1000g *); 153 static int e1000g_enable_intrs(struct e1000g *); 154 static int e1000g_disable_intrs(struct e1000g *); 155 static boolean_t e1000g_link_up(struct e1000g *); 156 #ifdef __sparc 157 static boolean_t e1000g_find_mac_address(struct e1000g *); 158 #endif 159 static void e1000g_get_phy_state(struct e1000g *); 160 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 161 const void *impl_data); 162 static void e1000g_fm_init(struct e1000g *Adapter); 163 static void e1000g_fm_fini(struct e1000g *Adapter); 164 static void e1000g_param_sync(struct e1000g *); 165 static void e1000g_get_driver_control(struct e1000_hw *); 166 static void e1000g_release_driver_control(struct e1000_hw *); 167 static void e1000g_restore_promisc(struct e1000g *Adapter); 168 169 char *e1000g_priv_props[] = { 170 "_tx_bcopy_threshold", 171 "_tx_interrupt_enable", 172 "_tx_intr_delay", 173 "_tx_intr_abs_delay", 174 "_rx_bcopy_threshold", 175 "_max_num_rcv_packets", 176 "_rx_intr_delay", 177 "_rx_intr_abs_delay", 178 "_intr_throttling_rate", 179 "_intr_adaptive", 180 "_adv_pause_cap", 181 "_adv_asym_pause_cap", 182 NULL 183 }; 184 185 static struct cb_ops cb_ws_ops = { 186 nulldev, /* cb_open */ 187 nulldev, /* cb_close */ 188 nodev, /* cb_strategy */ 189 nodev, /* cb_print */ 190 nodev, /* cb_dump */ 191 nodev, /* cb_read */ 192 nodev, /* cb_write */ 193 nodev, /* cb_ioctl */ 194 nodev, /* cb_devmap */ 195 nodev, /* cb_mmap */ 196 nodev, /* cb_segmap */ 197 nochpoll, /* cb_chpoll */ 198 ddi_prop_op, /* cb_prop_op */ 199 NULL, /* cb_stream */ 200 D_MP | D_HOTPLUG, /* cb_flag */ 201 CB_REV, /* cb_rev */ 202 nodev, /* cb_aread */ 203 nodev /* cb_awrite */ 204 }; 205 206 static struct dev_ops ws_ops = { 207 DEVO_REV, /* devo_rev */ 208 0, /* devo_refcnt */ 209 NULL, /* devo_getinfo */ 210 nulldev, /* devo_identify */ 211 nulldev, /* devo_probe */ 212 e1000g_attach, /* devo_attach */ 213 e1000g_detach, /* devo_detach */ 214 nodev, /* devo_reset */ 215 &cb_ws_ops, /* devo_cb_ops */ 216 NULL, /* devo_bus_ops */ 217 ddi_power, /* devo_power */ 218 e1000g_quiesce /* devo_quiesce */ 219 }; 220 221 static struct modldrv modldrv = { 222 &mod_driverops, /* Type of module. This one is a driver */ 223 ident, /* Discription string */ 224 &ws_ops, /* driver ops */ 225 }; 226 227 static struct modlinkage modlinkage = { 228 MODREV_1, &modldrv, NULL 229 }; 230 231 /* Access attributes for register mapping */ 232 static ddi_device_acc_attr_t e1000g_regs_acc_attr = { 233 DDI_DEVICE_ATTR_V1, 234 DDI_STRUCTURE_LE_ACC, 235 DDI_STRICTORDER_ACC, 236 DDI_FLAGERR_ACC 237 }; 238 239 #define E1000G_M_CALLBACK_FLAGS \ 240 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 241 242 static mac_callbacks_t e1000g_m_callbacks = { 243 E1000G_M_CALLBACK_FLAGS, 244 e1000g_m_stat, 245 e1000g_m_start, 246 e1000g_m_stop, 247 e1000g_m_promisc, 248 e1000g_m_multicst, 249 NULL, 250 e1000g_m_tx, 251 NULL, 252 e1000g_m_ioctl, 253 e1000g_m_getcapab, 254 NULL, 255 NULL, 256 e1000g_m_setprop, 257 e1000g_m_getprop, 258 e1000g_m_propinfo 259 }; 260 261 /* 262 * Global variables 263 */ 264 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K; 265 uint32_t e1000g_mblks_pending = 0; 266 /* 267 * Workaround for Dynamic Reconfiguration support, for x86 platform only. 268 * Here we maintain a private dev_info list if e1000g_force_detach is 269 * enabled. If we force the driver to detach while there are still some 270 * rx buffers retained in the upper layer, we have to keep a copy of the 271 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data 272 * structure will be freed after the driver is detached. However when we 273 * finally free those rx buffers released by the upper layer, we need to 274 * refer to the dev_info to free the dma buffers. So we save a copy of 275 * the dev_info for this purpose. On x86 platform, we assume this copy 276 * of dev_info is always valid, but on SPARC platform, it could be invalid 277 * after the system board level DR operation. For this reason, the global 278 * variable e1000g_force_detach must be B_FALSE on SPARC platform. 279 */ 280 #ifdef __sparc 281 boolean_t e1000g_force_detach = B_FALSE; 282 #else 283 boolean_t e1000g_force_detach = B_TRUE; 284 #endif 285 private_devi_list_t *e1000g_private_devi_list = NULL; 286 287 /* 288 * The mutex e1000g_rx_detach_lock is defined to protect the processing of 289 * the private dev_info list, and to serialize the processing of rx buffer 290 * freeing and rx buffer recycling. 291 */ 292 kmutex_t e1000g_rx_detach_lock; 293 /* 294 * The rwlock e1000g_dma_type_lock is defined to protect the global flag 295 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA". 296 * If there are many e1000g instances, the system may run out of DVMA 297 * resources during the initialization of the instances, then the flag will 298 * be changed to "USE_DMA". Because different e1000g instances are initialized 299 * in parallel, we need to use this lock to protect the flag. 300 */ 301 krwlock_t e1000g_dma_type_lock; 302 303 /* 304 * The 82546 chipset is a dual-port device, both the ports share one eeprom. 305 * Based on the information from Intel, the 82546 chipset has some hardware 306 * problem. When one port is being reset and the other port is trying to 307 * access the eeprom, it could cause system hang or panic. To workaround this 308 * hardware problem, we use a global mutex to prevent such operations from 309 * happening simultaneously on different instances. This workaround is applied 310 * to all the devices supported by this driver. 311 */ 312 kmutex_t e1000g_nvm_lock; 313 314 /* 315 * Loadable module configuration entry points for the driver 316 */ 317 318 /* 319 * _init - module initialization 320 */ 321 int 322 _init(void) 323 { 324 int status; 325 326 mac_init_ops(&ws_ops, WSNAME); 327 status = mod_install(&modlinkage); 328 if (status != DDI_SUCCESS) 329 mac_fini_ops(&ws_ops); 330 else { 331 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL); 332 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL); 333 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL); 334 } 335 336 return (status); 337 } 338 339 /* 340 * _fini - module finalization 341 */ 342 int 343 _fini(void) 344 { 345 int status; 346 347 if (e1000g_mblks_pending != 0) 348 return (EBUSY); 349 350 status = mod_remove(&modlinkage); 351 if (status == DDI_SUCCESS) { 352 mac_fini_ops(&ws_ops); 353 354 if (e1000g_force_detach) { 355 private_devi_list_t *devi_node; 356 357 mutex_enter(&e1000g_rx_detach_lock); 358 while (e1000g_private_devi_list != NULL) { 359 devi_node = e1000g_private_devi_list; 360 e1000g_private_devi_list = 361 e1000g_private_devi_list->next; 362 363 kmem_free(devi_node->priv_dip, 364 sizeof (struct dev_info)); 365 kmem_free(devi_node, 366 sizeof (private_devi_list_t)); 367 } 368 mutex_exit(&e1000g_rx_detach_lock); 369 } 370 371 mutex_destroy(&e1000g_rx_detach_lock); 372 rw_destroy(&e1000g_dma_type_lock); 373 mutex_destroy(&e1000g_nvm_lock); 374 } 375 376 return (status); 377 } 378 379 /* 380 * _info - module information 381 */ 382 int 383 _info(struct modinfo *modinfop) 384 { 385 return (mod_info(&modlinkage, modinfop)); 386 } 387 388 /* 389 * e1000g_attach - driver attach 390 * 391 * This function is the device-specific initialization entry 392 * point. This entry point is required and must be written. 393 * The DDI_ATTACH command must be provided in the attach entry 394 * point. When attach() is called with cmd set to DDI_ATTACH, 395 * all normal kernel services (such as kmem_alloc(9F)) are 396 * available for use by the driver. 397 * 398 * The attach() function will be called once for each instance 399 * of the device on the system with cmd set to DDI_ATTACH. 400 * Until attach() succeeds, the only driver entry points which 401 * may be called are open(9E) and getinfo(9E). 402 */ 403 static int 404 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 405 { 406 struct e1000g *Adapter; 407 struct e1000_hw *hw; 408 struct e1000g_osdep *osdep; 409 int instance; 410 411 switch (cmd) { 412 default: 413 e1000g_log(NULL, CE_WARN, 414 "Unsupported command send to e1000g_attach... "); 415 return (DDI_FAILURE); 416 417 case DDI_RESUME: 418 return (e1000g_resume(devinfo)); 419 420 case DDI_ATTACH: 421 break; 422 } 423 424 /* 425 * get device instance number 426 */ 427 instance = ddi_get_instance(devinfo); 428 429 /* 430 * Allocate soft data structure 431 */ 432 Adapter = 433 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP); 434 435 Adapter->dip = devinfo; 436 Adapter->instance = instance; 437 Adapter->tx_ring->adapter = Adapter; 438 Adapter->rx_ring->adapter = Adapter; 439 440 hw = &Adapter->shared; 441 osdep = &Adapter->osdep; 442 hw->back = osdep; 443 osdep->adapter = Adapter; 444 445 ddi_set_driver_private(devinfo, (caddr_t)Adapter); 446 447 /* 448 * Initialize for fma support 449 */ 450 (void) e1000g_get_prop(Adapter, "fm-capable", 451 0, 0x0f, 452 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 453 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE, 454 &Adapter->fm_capabilities); 455 e1000g_fm_init(Adapter); 456 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT; 457 458 /* 459 * PCI Configure 460 */ 461 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 462 e1000g_log(Adapter, CE_WARN, "PCI configuration failed"); 463 goto attach_fail; 464 } 465 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 466 467 /* 468 * Setup hardware 469 */ 470 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) { 471 e1000g_log(Adapter, CE_WARN, "Identify hardware failed"); 472 goto attach_fail; 473 } 474 475 /* 476 * Map in the device registers. 477 */ 478 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) { 479 e1000g_log(Adapter, CE_WARN, "Mapping registers failed"); 480 goto attach_fail; 481 } 482 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 483 484 /* 485 * Initialize driver parameters 486 */ 487 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) { 488 goto attach_fail; 489 } 490 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP; 491 492 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 493 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 494 goto attach_fail; 495 } 496 497 /* 498 * Disable ULP support 499 */ 500 (void) e1000_disable_ulp_lpt_lp(hw, TRUE); 501 502 /* 503 * Initialize interrupts 504 */ 505 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { 506 e1000g_log(Adapter, CE_WARN, "Add interrupts failed"); 507 goto attach_fail; 508 } 509 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 510 511 /* 512 * Initialize mutex's for this device. 513 * Do this before enabling the interrupt handler and 514 * register the softint to avoid the condition where 515 * interrupt handler can try using uninitialized mutex 516 */ 517 e1000g_init_locks(Adapter); 518 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS; 519 520 /* 521 * Initialize Driver Counters 522 */ 523 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) { 524 e1000g_log(Adapter, CE_WARN, "Init stats failed"); 525 goto attach_fail; 526 } 527 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS; 528 529 /* 530 * Initialize chip hardware and software structures 531 */ 532 rw_enter(&Adapter->chip_lock, RW_WRITER); 533 if (e1000g_init(Adapter) != DDI_SUCCESS) { 534 rw_exit(&Adapter->chip_lock); 535 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed"); 536 goto attach_fail; 537 } 538 rw_exit(&Adapter->chip_lock); 539 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 540 541 /* 542 * Register the driver to the MAC 543 */ 544 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) { 545 e1000g_log(Adapter, CE_WARN, "Register MAC failed"); 546 goto attach_fail; 547 } 548 Adapter->attach_progress |= ATTACH_PROGRESS_MAC; 549 550 /* 551 * Now that mutex locks are initialized, and the chip is also 552 * initialized, enable interrupts. 553 */ 554 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) { 555 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed"); 556 goto attach_fail; 557 } 558 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 559 560 /* 561 * If e1000g_force_detach is enabled, in global private dip list, 562 * we will create a new entry, which maintains the priv_dip for DR 563 * supports after driver detached. 564 */ 565 if (e1000g_force_detach) { 566 private_devi_list_t *devi_node; 567 568 Adapter->priv_dip = 569 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP); 570 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip), 571 sizeof (struct dev_info)); 572 573 devi_node = 574 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP); 575 576 mutex_enter(&e1000g_rx_detach_lock); 577 devi_node->priv_dip = Adapter->priv_dip; 578 devi_node->flag = E1000G_PRIV_DEVI_ATTACH; 579 devi_node->pending_rx_count = 0; 580 581 Adapter->priv_devi_node = devi_node; 582 583 if (e1000g_private_devi_list == NULL) { 584 devi_node->prev = NULL; 585 devi_node->next = NULL; 586 e1000g_private_devi_list = devi_node; 587 } else { 588 devi_node->prev = NULL; 589 devi_node->next = e1000g_private_devi_list; 590 e1000g_private_devi_list->prev = devi_node; 591 e1000g_private_devi_list = devi_node; 592 } 593 mutex_exit(&e1000g_rx_detach_lock); 594 } 595 596 Adapter->e1000g_state = E1000G_INITIALIZED; 597 return (DDI_SUCCESS); 598 599 attach_fail: 600 e1000g_unattach(devinfo, Adapter); 601 return (DDI_FAILURE); 602 } 603 604 static int 605 e1000g_register_mac(struct e1000g *Adapter) 606 { 607 struct e1000_hw *hw = &Adapter->shared; 608 mac_register_t *mac; 609 int err; 610 611 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 612 return (DDI_FAILURE); 613 614 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 615 mac->m_driver = Adapter; 616 mac->m_dip = Adapter->dip; 617 mac->m_src_addr = hw->mac.addr; 618 mac->m_callbacks = &e1000g_m_callbacks; 619 mac->m_min_sdu = 0; 620 mac->m_max_sdu = Adapter->default_mtu; 621 mac->m_margin = VLAN_TAGSZ; 622 mac->m_priv_props = e1000g_priv_props; 623 mac->m_v12n = MAC_VIRT_LEVEL1; 624 625 err = mac_register(mac, &Adapter->mh); 626 mac_free(mac); 627 628 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE); 629 } 630 631 static int 632 e1000g_identify_hardware(struct e1000g *Adapter) 633 { 634 struct e1000_hw *hw = &Adapter->shared; 635 struct e1000g_osdep *osdep = &Adapter->osdep; 636 637 /* Get the device id */ 638 hw->vendor_id = 639 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 640 hw->device_id = 641 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 642 hw->revision_id = 643 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 644 hw->subsystem_device_id = 645 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 646 hw->subsystem_vendor_id = 647 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 648 649 if (e1000_set_mac_type(hw) != E1000_SUCCESS) { 650 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 651 "MAC type could not be set properly."); 652 return (DDI_FAILURE); 653 } 654 655 return (DDI_SUCCESS); 656 } 657 658 static int 659 e1000g_regs_map(struct e1000g *Adapter) 660 { 661 dev_info_t *devinfo = Adapter->dip; 662 struct e1000_hw *hw = &Adapter->shared; 663 struct e1000g_osdep *osdep = &Adapter->osdep; 664 off_t mem_size; 665 bar_info_t bar_info; 666 int offset, rnumber; 667 668 rnumber = ADAPTER_REG_SET; 669 /* Get size of adapter register memory */ 670 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) != 671 DDI_SUCCESS) { 672 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 673 "ddi_dev_regsize for registers failed"); 674 return (DDI_FAILURE); 675 } 676 677 /* Map adapter register memory */ 678 if ((ddi_regs_map_setup(devinfo, rnumber, 679 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr, 680 &osdep->reg_handle)) != DDI_SUCCESS) { 681 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 682 "ddi_regs_map_setup for registers failed"); 683 goto regs_map_fail; 684 } 685 686 /* ICH needs to map flash memory */ 687 switch (hw->mac.type) { 688 case e1000_ich8lan: 689 case e1000_ich9lan: 690 case e1000_ich10lan: 691 case e1000_pchlan: 692 case e1000_pch2lan: 693 case e1000_pch_lpt: 694 rnumber = ICH_FLASH_REG_SET; 695 696 /* get flash size */ 697 if (ddi_dev_regsize(devinfo, rnumber, 698 &mem_size) != DDI_SUCCESS) { 699 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 700 "ddi_dev_regsize for ICH flash failed"); 701 goto regs_map_fail; 702 } 703 704 /* map flash in */ 705 if (ddi_regs_map_setup(devinfo, rnumber, 706 (caddr_t *)&hw->flash_address, 0, 707 mem_size, &e1000g_regs_acc_attr, 708 &osdep->ich_flash_handle) != DDI_SUCCESS) { 709 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 710 "ddi_regs_map_setup for ICH flash failed"); 711 goto regs_map_fail; 712 } 713 break; 714 case e1000_pch_spt: 715 case e1000_pch_cnp: 716 case e1000_pch_tgp: 717 case e1000_pch_adp: 718 case e1000_pch_mtp: 719 case e1000_pch_lnp: 720 case e1000_pch_rpl: 721 /* 722 * On the SPT, the device flash is actually in BAR0, not a 723 * separate BAR. Therefore we end up setting the 724 * ich_flash_handle to be the same as the register handle. 725 * We mark the same to reduce the confusion in the other 726 * functions and macros. Though this does make the set up and 727 * tear-down path slightly more complicated. 728 */ 729 osdep->ich_flash_handle = osdep->reg_handle; 730 hw->flash_address = hw->hw_addr; 731 default: 732 break; 733 } 734 735 /* map io space */ 736 switch (hw->mac.type) { 737 case e1000_82544: 738 case e1000_82540: 739 case e1000_82545: 740 case e1000_82546: 741 case e1000_82541: 742 case e1000_82541_rev_2: 743 /* find the IO bar */ 744 rnumber = -1; 745 for (offset = PCI_CONF_BASE1; 746 offset <= PCI_CONF_BASE5; offset += 4) { 747 if (e1000g_get_bar_info(devinfo, offset, &bar_info) 748 != DDI_SUCCESS) 749 continue; 750 if (bar_info.type == E1000G_BAR_IO) { 751 rnumber = bar_info.rnumber; 752 break; 753 } 754 } 755 756 if (rnumber < 0) { 757 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 758 "No io space is found"); 759 goto regs_map_fail; 760 } 761 762 /* get io space size */ 763 if (ddi_dev_regsize(devinfo, rnumber, 764 &mem_size) != DDI_SUCCESS) { 765 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 766 "ddi_dev_regsize for io space failed"); 767 goto regs_map_fail; 768 } 769 770 /* map io space */ 771 if ((ddi_regs_map_setup(devinfo, rnumber, 772 (caddr_t *)&hw->io_base, 0, mem_size, 773 &e1000g_regs_acc_attr, 774 &osdep->io_reg_handle)) != DDI_SUCCESS) { 775 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 776 "ddi_regs_map_setup for io space failed"); 777 goto regs_map_fail; 778 } 779 break; 780 default: 781 hw->io_base = 0; 782 break; 783 } 784 785 return (DDI_SUCCESS); 786 787 regs_map_fail: 788 if (osdep->reg_handle != NULL) 789 ddi_regs_map_free(&osdep->reg_handle); 790 if (osdep->ich_flash_handle != NULL && hw->mac.type < e1000_pch_spt) 791 ddi_regs_map_free(&osdep->ich_flash_handle); 792 return (DDI_FAILURE); 793 } 794 795 static int 796 e1000g_set_driver_params(struct e1000g *Adapter) 797 { 798 struct e1000_hw *hw; 799 800 hw = &Adapter->shared; 801 802 /* Set MAC type and initialize hardware functions */ 803 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { 804 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 805 "Could not setup hardware functions"); 806 return (DDI_FAILURE); 807 } 808 809 /* Get bus information */ 810 if (e1000_get_bus_info(hw) != E1000_SUCCESS) { 811 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 812 "Could not get bus information"); 813 return (DDI_FAILURE); 814 } 815 816 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word); 817 818 hw->mac.autoneg_failed = B_TRUE; 819 820 /* Set the autoneg_wait_to_complete flag to B_FALSE */ 821 hw->phy.autoneg_wait_to_complete = B_FALSE; 822 823 /* Adaptive IFS related changes */ 824 hw->mac.adaptive_ifs = B_TRUE; 825 826 /* Enable phy init script for IGP phy of 82541/82547 */ 827 if ((hw->mac.type == e1000_82547) || 828 (hw->mac.type == e1000_82541) || 829 (hw->mac.type == e1000_82547_rev_2) || 830 (hw->mac.type == e1000_82541_rev_2)) 831 e1000_init_script_state_82541(hw, B_TRUE); 832 833 /* Enable the TTL workaround for 82541/82547 */ 834 e1000_set_ttl_workaround_state_82541(hw, B_TRUE); 835 836 #ifdef __sparc 837 Adapter->strip_crc = B_TRUE; 838 #else 839 Adapter->strip_crc = B_FALSE; 840 #endif 841 842 /* setup the maximum MTU size of the chip */ 843 e1000g_setup_max_mtu(Adapter); 844 845 /* Get speed/duplex settings in conf file */ 846 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; 847 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 848 e1000g_force_speed_duplex(Adapter); 849 850 /* Get Jumbo Frames settings in conf file */ 851 e1000g_get_max_frame_size(Adapter); 852 853 /* Get conf file properties */ 854 e1000g_get_conf(Adapter); 855 856 /* enforce PCH limits */ 857 e1000g_pch_limits(Adapter); 858 859 /* Set Rx/Tx buffer size */ 860 e1000g_set_bufsize(Adapter); 861 862 /* Master Latency Timer */ 863 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER; 864 865 /* copper options */ 866 if (hw->phy.media_type == e1000_media_type_copper) { 867 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 868 hw->phy.disable_polarity_correction = B_FALSE; 869 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ 870 } 871 872 /* The initial link state should be "unknown" */ 873 Adapter->link_state = LINK_STATE_UNKNOWN; 874 875 /* Initialize rx parameters */ 876 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY; 877 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY; 878 879 /* Initialize tx parameters */ 880 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE; 881 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD; 882 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY; 883 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY; 884 885 /* Initialize rx parameters */ 886 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD; 887 888 return (DDI_SUCCESS); 889 } 890 891 static void 892 e1000g_setup_max_mtu(struct e1000g *Adapter) 893 { 894 struct e1000_mac_info *mac = &Adapter->shared.mac; 895 struct e1000_phy_info *phy = &Adapter->shared.phy; 896 897 switch (mac->type) { 898 /* types that do not support jumbo frames */ 899 case e1000_ich8lan: 900 case e1000_82573: 901 case e1000_82583: 902 Adapter->max_mtu = ETHERMTU; 903 break; 904 /* ich9 supports jumbo frames except on one phy type */ 905 case e1000_ich9lan: 906 if (phy->type == e1000_phy_ife) 907 Adapter->max_mtu = ETHERMTU; 908 else 909 Adapter->max_mtu = MAXIMUM_MTU_9K; 910 break; 911 /* pch can do jumbo frames up to 4K */ 912 case e1000_pchlan: 913 Adapter->max_mtu = MAXIMUM_MTU_4K; 914 break; 915 /* pch2 can do jumbo frames up to 9K */ 916 case e1000_pch2lan: 917 case e1000_pch_lpt: 918 case e1000_pch_spt: 919 case e1000_pch_cnp: 920 case e1000_pch_tgp: 921 case e1000_pch_adp: 922 case e1000_pch_mtp: 923 case e1000_pch_lnp: 924 case e1000_pch_rpl: 925 Adapter->max_mtu = MAXIMUM_MTU_9K; 926 break; 927 /* types with a special limit */ 928 case e1000_82571: 929 case e1000_82572: 930 case e1000_82574: 931 case e1000_80003es2lan: 932 case e1000_ich10lan: 933 if (e1000g_jumbo_mtu >= ETHERMTU && 934 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) { 935 Adapter->max_mtu = e1000g_jumbo_mtu; 936 } else { 937 Adapter->max_mtu = MAXIMUM_MTU_9K; 938 } 939 break; 940 /* default limit is 16K */ 941 default: 942 Adapter->max_mtu = FRAME_SIZE_UPTO_16K - 943 sizeof (struct ether_vlan_header) - ETHERFCSL; 944 break; 945 } 946 } 947 948 static void 949 e1000g_set_bufsize(struct e1000g *Adapter) 950 { 951 struct e1000_mac_info *mac = &Adapter->shared.mac; 952 uint64_t rx_size; 953 uint64_t tx_size; 954 955 dev_info_t *devinfo = Adapter->dip; 956 #ifdef __sparc 957 ulong_t iommu_pagesize; 958 #endif 959 /* Get the system page size */ 960 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1); 961 962 #ifdef __sparc 963 iommu_pagesize = dvma_pagesize(devinfo); 964 if (iommu_pagesize != 0) { 965 if (Adapter->sys_page_sz == iommu_pagesize) { 966 if (iommu_pagesize > 0x4000) 967 Adapter->sys_page_sz = 0x4000; 968 } else { 969 if (Adapter->sys_page_sz > iommu_pagesize) 970 Adapter->sys_page_sz = iommu_pagesize; 971 } 972 } 973 if (Adapter->lso_enable) { 974 Adapter->dvma_page_num = E1000_LSO_MAXLEN / 975 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 976 } else { 977 Adapter->dvma_page_num = Adapter->max_frame_size / 978 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 979 } 980 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM); 981 #endif 982 983 Adapter->min_frame_size = ETHERMIN + ETHERFCSL; 984 985 if (Adapter->mem_workaround_82546 && 986 ((mac->type == e1000_82545) || 987 (mac->type == e1000_82546) || 988 (mac->type == e1000_82546_rev_3))) { 989 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 990 } else { 991 rx_size = Adapter->max_frame_size; 992 if ((rx_size > FRAME_SIZE_UPTO_2K) && 993 (rx_size <= FRAME_SIZE_UPTO_4K)) 994 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K; 995 else if ((rx_size > FRAME_SIZE_UPTO_4K) && 996 (rx_size <= FRAME_SIZE_UPTO_8K)) 997 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K; 998 else if ((rx_size > FRAME_SIZE_UPTO_8K) && 999 (rx_size <= FRAME_SIZE_UPTO_16K)) 1000 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K; 1001 else 1002 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 1003 } 1004 Adapter->rx_buffer_size += E1000G_IPALIGNROOM; 1005 1006 tx_size = Adapter->max_frame_size; 1007 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K)) 1008 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K; 1009 else if ((tx_size > FRAME_SIZE_UPTO_4K) && 1010 (tx_size <= FRAME_SIZE_UPTO_8K)) 1011 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K; 1012 else if ((tx_size > FRAME_SIZE_UPTO_8K) && 1013 (tx_size <= FRAME_SIZE_UPTO_16K)) 1014 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K; 1015 else 1016 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K; 1017 1018 /* 1019 * For Wiseman adapters we have an requirement of having receive 1020 * buffers aligned at 256 byte boundary. Since Livengood does not 1021 * require this and forcing it for all hardwares will have 1022 * performance implications, I am making it applicable only for 1023 * Wiseman and for Jumbo frames enabled mode as rest of the time, 1024 * it is okay to have normal frames...but it does involve a 1025 * potential risk where we may loose data if buffer is not 1026 * aligned...so all wiseman boards to have 256 byte aligned 1027 * buffers 1028 */ 1029 if (mac->type < e1000_82543) 1030 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE; 1031 else 1032 Adapter->rx_buf_align = 1; 1033 } 1034 1035 /* 1036 * e1000g_detach - driver detach 1037 * 1038 * The detach() function is the complement of the attach routine. 1039 * If cmd is set to DDI_DETACH, detach() is used to remove the 1040 * state associated with a given instance of a device node 1041 * prior to the removal of that instance from the system. 1042 * 1043 * The detach() function will be called once for each instance 1044 * of the device for which there has been a successful attach() 1045 * once there are no longer any opens on the device. 1046 * 1047 * Interrupts routine are disabled, All memory allocated by this 1048 * driver are freed. 1049 */ 1050 static int 1051 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1052 { 1053 struct e1000g *Adapter; 1054 boolean_t rx_drain; 1055 1056 switch (cmd) { 1057 default: 1058 return (DDI_FAILURE); 1059 1060 case DDI_SUSPEND: 1061 return (e1000g_suspend(devinfo)); 1062 1063 case DDI_DETACH: 1064 break; 1065 } 1066 1067 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1068 if (Adapter == NULL) 1069 return (DDI_FAILURE); 1070 1071 rx_drain = e1000g_rx_drain(Adapter); 1072 if (!rx_drain && !e1000g_force_detach) 1073 return (DDI_FAILURE); 1074 1075 if (mac_unregister(Adapter->mh) != 0) { 1076 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed"); 1077 return (DDI_FAILURE); 1078 } 1079 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC; 1080 1081 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED)); 1082 1083 if (!e1000g_force_detach && !rx_drain) 1084 return (DDI_FAILURE); 1085 1086 e1000g_unattach(devinfo, Adapter); 1087 1088 return (DDI_SUCCESS); 1089 } 1090 1091 /* 1092 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance 1093 */ 1094 void 1095 e1000g_free_priv_devi_node(private_devi_list_t *devi_node) 1096 { 1097 ASSERT(e1000g_private_devi_list != NULL); 1098 ASSERT(devi_node != NULL); 1099 1100 if (devi_node->prev != NULL) 1101 devi_node->prev->next = devi_node->next; 1102 if (devi_node->next != NULL) 1103 devi_node->next->prev = devi_node->prev; 1104 if (devi_node == e1000g_private_devi_list) 1105 e1000g_private_devi_list = devi_node->next; 1106 1107 kmem_free(devi_node->priv_dip, 1108 sizeof (struct dev_info)); 1109 kmem_free(devi_node, 1110 sizeof (private_devi_list_t)); 1111 } 1112 1113 static void 1114 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter) 1115 { 1116 private_devi_list_t *devi_node; 1117 int result; 1118 1119 if (Adapter->e1000g_blink != NULL) { 1120 ddi_periodic_delete(Adapter->e1000g_blink); 1121 Adapter->e1000g_blink = NULL; 1122 } 1123 1124 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1125 (void) e1000g_disable_intrs(Adapter); 1126 } 1127 1128 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) { 1129 (void) mac_unregister(Adapter->mh); 1130 } 1131 1132 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1133 (void) e1000g_rem_intrs(Adapter); 1134 } 1135 1136 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) { 1137 (void) ddi_prop_remove_all(devinfo); 1138 } 1139 1140 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) { 1141 kstat_delete((kstat_t *)Adapter->e1000g_ksp); 1142 } 1143 1144 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) { 1145 stop_link_timer(Adapter); 1146 1147 mutex_enter(&e1000g_nvm_lock); 1148 result = e1000_reset_hw(&Adapter->shared); 1149 mutex_exit(&e1000g_nvm_lock); 1150 1151 if (result != E1000_SUCCESS) { 1152 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1153 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1154 } 1155 } 1156 1157 e1000g_release_multicast(Adapter); 1158 1159 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 1160 if (Adapter->osdep.reg_handle != NULL) 1161 ddi_regs_map_free(&Adapter->osdep.reg_handle); 1162 if (Adapter->osdep.ich_flash_handle != NULL && 1163 Adapter->shared.mac.type < e1000_pch_spt) 1164 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle); 1165 if (Adapter->osdep.io_reg_handle != NULL) 1166 ddi_regs_map_free(&Adapter->osdep.io_reg_handle); 1167 } 1168 1169 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 1170 if (Adapter->osdep.cfg_handle != NULL) 1171 pci_config_teardown(&Adapter->osdep.cfg_handle); 1172 } 1173 1174 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) { 1175 e1000g_destroy_locks(Adapter); 1176 } 1177 1178 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) { 1179 e1000g_fm_fini(Adapter); 1180 } 1181 1182 mutex_enter(&e1000g_rx_detach_lock); 1183 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) { 1184 devi_node = Adapter->priv_devi_node; 1185 devi_node->flag |= E1000G_PRIV_DEVI_DETACH; 1186 1187 if (devi_node->pending_rx_count == 0) { 1188 e1000g_free_priv_devi_node(devi_node); 1189 } 1190 } 1191 mutex_exit(&e1000g_rx_detach_lock); 1192 1193 kmem_free((caddr_t)Adapter, sizeof (struct e1000g)); 1194 1195 /* 1196 * Another hotplug spec requirement, 1197 * run ddi_set_driver_private(devinfo, null); 1198 */ 1199 ddi_set_driver_private(devinfo, NULL); 1200 } 1201 1202 /* 1203 * Get the BAR type and rnumber for a given PCI BAR offset 1204 */ 1205 static int 1206 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info) 1207 { 1208 pci_regspec_t *regs; 1209 uint_t regs_length; 1210 int type, rnumber, rcount; 1211 1212 ASSERT((bar_offset >= PCI_CONF_BASE0) && 1213 (bar_offset <= PCI_CONF_BASE5)); 1214 1215 /* 1216 * Get the DDI "reg" property 1217 */ 1218 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 1219 DDI_PROP_DONTPASS, "reg", (int **)®s, 1220 ®s_length) != DDI_PROP_SUCCESS) { 1221 return (DDI_FAILURE); 1222 } 1223 1224 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t); 1225 /* 1226 * Check the BAR offset 1227 */ 1228 for (rnumber = 0; rnumber < rcount; ++rnumber) { 1229 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) { 1230 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK; 1231 break; 1232 } 1233 } 1234 1235 ddi_prop_free(regs); 1236 1237 if (rnumber >= rcount) 1238 return (DDI_FAILURE); 1239 1240 switch (type) { 1241 case PCI_ADDR_CONFIG: 1242 bar_info->type = E1000G_BAR_CONFIG; 1243 break; 1244 case PCI_ADDR_IO: 1245 bar_info->type = E1000G_BAR_IO; 1246 break; 1247 case PCI_ADDR_MEM32: 1248 bar_info->type = E1000G_BAR_MEM32; 1249 break; 1250 case PCI_ADDR_MEM64: 1251 bar_info->type = E1000G_BAR_MEM64; 1252 break; 1253 default: 1254 return (DDI_FAILURE); 1255 } 1256 bar_info->rnumber = rnumber; 1257 return (DDI_SUCCESS); 1258 } 1259 1260 static void 1261 e1000g_init_locks(struct e1000g *Adapter) 1262 { 1263 e1000g_tx_ring_t *tx_ring; 1264 e1000g_rx_ring_t *rx_ring; 1265 1266 rw_init(&Adapter->chip_lock, NULL, 1267 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1268 mutex_init(&Adapter->link_lock, NULL, 1269 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1270 mutex_init(&Adapter->watchdog_lock, NULL, 1271 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1272 1273 tx_ring = Adapter->tx_ring; 1274 1275 mutex_init(&tx_ring->tx_lock, NULL, 1276 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1277 mutex_init(&tx_ring->usedlist_lock, NULL, 1278 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1279 mutex_init(&tx_ring->freelist_lock, NULL, 1280 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1281 1282 rx_ring = Adapter->rx_ring; 1283 1284 mutex_init(&rx_ring->rx_lock, NULL, 1285 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1286 1287 mutex_init(&Adapter->e1000g_led_lock, NULL, 1288 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1289 } 1290 1291 static void 1292 e1000g_destroy_locks(struct e1000g *Adapter) 1293 { 1294 e1000g_tx_ring_t *tx_ring; 1295 e1000g_rx_ring_t *rx_ring; 1296 1297 mutex_destroy(&Adapter->e1000g_led_lock); 1298 1299 tx_ring = Adapter->tx_ring; 1300 mutex_destroy(&tx_ring->tx_lock); 1301 mutex_destroy(&tx_ring->usedlist_lock); 1302 mutex_destroy(&tx_ring->freelist_lock); 1303 1304 rx_ring = Adapter->rx_ring; 1305 mutex_destroy(&rx_ring->rx_lock); 1306 1307 mutex_destroy(&Adapter->link_lock); 1308 mutex_destroy(&Adapter->watchdog_lock); 1309 rw_destroy(&Adapter->chip_lock); 1310 1311 /* destory mutex initialized in shared code */ 1312 e1000_destroy_hw_mutex(&Adapter->shared); 1313 } 1314 1315 static int 1316 e1000g_resume(dev_info_t *devinfo) 1317 { 1318 struct e1000g *Adapter; 1319 1320 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1321 if (Adapter == NULL) 1322 e1000g_log(Adapter, CE_PANIC, 1323 "Instance pointer is null\n"); 1324 1325 if (Adapter->dip != devinfo) 1326 e1000g_log(Adapter, CE_PANIC, 1327 "Devinfo is not the same as saved devinfo\n"); 1328 1329 rw_enter(&Adapter->chip_lock, RW_WRITER); 1330 1331 if (Adapter->e1000g_state & E1000G_STARTED) { 1332 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 1333 rw_exit(&Adapter->chip_lock); 1334 /* 1335 * We note the failure, but return success, as the 1336 * system is still usable without this controller. 1337 */ 1338 e1000g_log(Adapter, CE_WARN, 1339 "e1000g_resume: failed to restart controller\n"); 1340 return (DDI_SUCCESS); 1341 } 1342 /* Enable and start the watchdog timer */ 1343 enable_watchdog_timer(Adapter); 1344 } 1345 1346 Adapter->e1000g_state &= ~E1000G_SUSPENDED; 1347 1348 rw_exit(&Adapter->chip_lock); 1349 1350 return (DDI_SUCCESS); 1351 } 1352 1353 static int 1354 e1000g_suspend(dev_info_t *devinfo) 1355 { 1356 struct e1000g *Adapter; 1357 1358 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1359 if (Adapter == NULL) 1360 return (DDI_FAILURE); 1361 1362 rw_enter(&Adapter->chip_lock, RW_WRITER); 1363 1364 Adapter->e1000g_state |= E1000G_SUSPENDED; 1365 1366 /* if the port isn't plumbed, we can simply return */ 1367 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 1368 rw_exit(&Adapter->chip_lock); 1369 return (DDI_SUCCESS); 1370 } 1371 1372 e1000g_stop(Adapter, B_FALSE); 1373 1374 rw_exit(&Adapter->chip_lock); 1375 1376 /* Disable and stop all the timers */ 1377 disable_watchdog_timer(Adapter); 1378 stop_link_timer(Adapter); 1379 stop_82547_timer(Adapter->tx_ring); 1380 1381 return (DDI_SUCCESS); 1382 } 1383 1384 static int 1385 e1000g_init(struct e1000g *Adapter) 1386 { 1387 uint32_t pba; 1388 uint32_t high_water; 1389 struct e1000_hw *hw; 1390 clock_t link_timeout; 1391 int result; 1392 1393 hw = &Adapter->shared; 1394 1395 /* 1396 * reset to put the hardware in a known state 1397 * before we try to do anything with the eeprom 1398 */ 1399 mutex_enter(&e1000g_nvm_lock); 1400 result = e1000_reset_hw(hw); 1401 mutex_exit(&e1000g_nvm_lock); 1402 1403 if (result != E1000_SUCCESS) { 1404 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1405 goto init_fail; 1406 } 1407 1408 mutex_enter(&e1000g_nvm_lock); 1409 result = e1000_validate_nvm_checksum(hw); 1410 if (result < E1000_SUCCESS) { 1411 /* 1412 * Some PCI-E parts fail the first check due to 1413 * the link being in sleep state. Call it again, 1414 * if it fails a second time its a real issue. 1415 */ 1416 result = e1000_validate_nvm_checksum(hw); 1417 } 1418 mutex_exit(&e1000g_nvm_lock); 1419 1420 if (result < E1000_SUCCESS) { 1421 e1000g_log(Adapter, CE_WARN, 1422 "Invalid NVM checksum. Please contact " 1423 "the vendor to update the NVM."); 1424 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1425 goto init_fail; 1426 } 1427 1428 result = 0; 1429 #ifdef __sparc 1430 /* 1431 * First, we try to get the local ethernet address from OBP. If 1432 * failed, then we get it from the EEPROM of NIC card. 1433 */ 1434 result = e1000g_find_mac_address(Adapter); 1435 #endif 1436 /* Get the local ethernet address. */ 1437 if (!result) { 1438 mutex_enter(&e1000g_nvm_lock); 1439 result = e1000_read_mac_addr(hw); 1440 mutex_exit(&e1000g_nvm_lock); 1441 } 1442 1443 if (result < E1000_SUCCESS) { 1444 e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); 1445 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1446 goto init_fail; 1447 } 1448 1449 /* check for valid mac address */ 1450 if (!is_valid_mac_addr(hw->mac.addr)) { 1451 e1000g_log(Adapter, CE_WARN, "Invalid mac addr"); 1452 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1453 goto init_fail; 1454 } 1455 1456 /* Set LAA state for 82571 chipset */ 1457 e1000_set_laa_state_82571(hw, B_TRUE); 1458 1459 /* Master Latency Timer implementation */ 1460 if (Adapter->master_latency_timer) { 1461 pci_config_put8(Adapter->osdep.cfg_handle, 1462 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer); 1463 } 1464 1465 if (hw->mac.type < e1000_82547) { 1466 /* 1467 * Total FIFO is 64K 1468 */ 1469 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1470 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1471 else 1472 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1473 } else if ((hw->mac.type == e1000_82571) || 1474 (hw->mac.type == e1000_82572) || 1475 (hw->mac.type == e1000_80003es2lan)) { 1476 /* 1477 * Total FIFO is 48K 1478 */ 1479 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1480 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */ 1481 else 1482 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */ 1483 } else if (hw->mac.type == e1000_82573) { 1484 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */ 1485 } else if (hw->mac.type == e1000_82574) { 1486 /* Keep adapter default: 20K for Rx, 20K for Tx */ 1487 pba = E1000_READ_REG(hw, E1000_PBA); 1488 } else if (hw->mac.type == e1000_ich8lan) { 1489 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */ 1490 } else if (hw->mac.type == e1000_ich9lan) { 1491 pba = E1000_PBA_10K; 1492 } else if (hw->mac.type == e1000_ich10lan) { 1493 pba = E1000_PBA_10K; 1494 } else if (hw->mac.type == e1000_pchlan) { 1495 pba = E1000_PBA_26K; 1496 } else if (hw->mac.type == e1000_pch2lan) { 1497 pba = E1000_PBA_26K; 1498 } else if (hw->mac.type == e1000_pch_lpt) { 1499 pba = E1000_PBA_26K; 1500 } else if (hw->mac.type == e1000_pch_spt) { 1501 pba = E1000_PBA_26K; 1502 } else if (hw->mac.type == e1000_pch_cnp) { 1503 pba = E1000_PBA_26K; 1504 } else if (hw->mac.type == e1000_pch_tgp) { 1505 pba = E1000_PBA_26K; 1506 } else if (hw->mac.type == e1000_pch_adp) { 1507 pba = E1000_PBA_26K; 1508 } else if (hw->mac.type == e1000_pch_mtp) { 1509 pba = E1000_PBA_26K; 1510 } else if (hw->mac.type == e1000_pch_lnp) { 1511 pba = E1000_PBA_26K; 1512 } else if (hw->mac.type == e1000_pch_rpl) { 1513 pba = E1000_PBA_26K; 1514 } else { 1515 /* 1516 * Total FIFO is 40K 1517 */ 1518 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1519 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1520 else 1521 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1522 } 1523 E1000_WRITE_REG(hw, E1000_PBA, pba); 1524 1525 /* 1526 * These parameters set thresholds for the adapter's generation(Tx) 1527 * and response(Rx) to Ethernet PAUSE frames. These are just threshold 1528 * settings. Flow control is enabled or disabled in the configuration 1529 * file. 1530 * High-water mark is set down from the top of the rx fifo (not 1531 * sensitive to max_frame_size) and low-water is set just below 1532 * high-water mark. 1533 * The high water mark must be low enough to fit one full frame above 1534 * it in the rx FIFO. Should be the lower of: 1535 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early 1536 * receive size (assuming ERT set to E1000_ERT_2048), or the full 1537 * Rx FIFO size minus one full frame. 1538 */ 1539 high_water = min(((pba << 10) * 9 / 10), 1540 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 || 1541 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ? 1542 ((pba << 10) - (E1000_ERT_2048 << 3)) : 1543 ((pba << 10) - Adapter->max_frame_size))); 1544 1545 hw->fc.high_water = high_water & 0xFFF8; 1546 hw->fc.low_water = hw->fc.high_water - 8; 1547 1548 if (hw->mac.type == e1000_80003es2lan) 1549 hw->fc.pause_time = 0xFFFF; 1550 else 1551 hw->fc.pause_time = E1000_FC_PAUSE_TIME; 1552 hw->fc.send_xon = B_TRUE; 1553 1554 /* 1555 * Reset the adapter hardware the second time. 1556 */ 1557 mutex_enter(&e1000g_nvm_lock); 1558 result = e1000_reset_hw(hw); 1559 mutex_exit(&e1000g_nvm_lock); 1560 1561 if (result != E1000_SUCCESS) { 1562 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1563 goto init_fail; 1564 } 1565 1566 /* disable wakeup control by default */ 1567 if (hw->mac.type >= e1000_82544) 1568 E1000_WRITE_REG(hw, E1000_WUC, 0); 1569 1570 /* 1571 * MWI should be disabled on 82546. 1572 */ 1573 if (hw->mac.type == e1000_82546) 1574 e1000_pci_clear_mwi(hw); 1575 else 1576 e1000_pci_set_mwi(hw); 1577 1578 /* 1579 * Configure/Initialize hardware 1580 */ 1581 mutex_enter(&e1000g_nvm_lock); 1582 result = e1000_init_hw(hw); 1583 mutex_exit(&e1000g_nvm_lock); 1584 1585 if (result < E1000_SUCCESS) { 1586 e1000g_log(Adapter, CE_WARN, "Initialize hw failed"); 1587 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1588 goto init_fail; 1589 } 1590 1591 /* 1592 * Restore LED settings to the default from EEPROM 1593 * to meet the standard for Sun platforms. 1594 */ 1595 (void) e1000_cleanup_led(hw); 1596 1597 /* Disable Smart Power Down */ 1598 phy_spd_state(hw, B_FALSE); 1599 1600 /* Make sure driver has control */ 1601 e1000g_get_driver_control(hw); 1602 1603 /* 1604 * Initialize unicast addresses. 1605 */ 1606 e1000g_init_unicst(Adapter); 1607 1608 /* 1609 * Setup and initialize the mctable structures. After this routine 1610 * completes Multicast table will be set 1611 */ 1612 e1000_update_mc_addr_list(hw, 1613 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 1614 msec_delay(5); 1615 1616 /* 1617 * Implement Adaptive IFS 1618 */ 1619 e1000_reset_adaptive(hw); 1620 1621 /* Setup Interrupt Throttling Register */ 1622 if (hw->mac.type >= e1000_82540) { 1623 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate); 1624 } else 1625 Adapter->intr_adaptive = B_FALSE; 1626 1627 /* Start the timer for link setup */ 1628 if (hw->mac.autoneg) 1629 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000); 1630 else 1631 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); 1632 1633 mutex_enter(&Adapter->link_lock); 1634 if (hw->phy.autoneg_wait_to_complete) { 1635 Adapter->link_complete = B_TRUE; 1636 } else { 1637 Adapter->link_complete = B_FALSE; 1638 Adapter->link_tid = timeout(e1000g_link_timer, 1639 (void *)Adapter, link_timeout); 1640 } 1641 mutex_exit(&Adapter->link_lock); 1642 1643 /* Save the state of the phy */ 1644 e1000g_get_phy_state(Adapter); 1645 1646 e1000g_param_sync(Adapter); 1647 1648 Adapter->init_count++; 1649 1650 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 1651 goto init_fail; 1652 } 1653 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1654 goto init_fail; 1655 } 1656 1657 Adapter->poll_mode = e1000g_poll_mode; 1658 1659 return (DDI_SUCCESS); 1660 1661 init_fail: 1662 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1663 return (DDI_FAILURE); 1664 } 1665 1666 static int 1667 e1000g_alloc_rx_data(struct e1000g *Adapter) 1668 { 1669 e1000g_rx_ring_t *rx_ring; 1670 e1000g_rx_data_t *rx_data; 1671 1672 rx_ring = Adapter->rx_ring; 1673 1674 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP); 1675 1676 if (rx_data == NULL) 1677 return (DDI_FAILURE); 1678 1679 rx_data->priv_devi_node = Adapter->priv_devi_node; 1680 rx_data->rx_ring = rx_ring; 1681 1682 mutex_init(&rx_data->freelist_lock, NULL, 1683 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1684 mutex_init(&rx_data->recycle_lock, NULL, 1685 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1686 1687 rx_ring->rx_data = rx_data; 1688 1689 return (DDI_SUCCESS); 1690 } 1691 1692 void 1693 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data) 1694 { 1695 rx_sw_packet_t *packet, *next_packet; 1696 1697 if (rx_data == NULL) 1698 return; 1699 1700 packet = rx_data->packet_area; 1701 while (packet != NULL) { 1702 next_packet = packet->next; 1703 e1000g_free_rx_sw_packet(packet, B_TRUE); 1704 packet = next_packet; 1705 } 1706 rx_data->packet_area = NULL; 1707 } 1708 1709 void 1710 e1000g_free_rx_data(e1000g_rx_data_t *rx_data) 1711 { 1712 if (rx_data == NULL) 1713 return; 1714 1715 mutex_destroy(&rx_data->freelist_lock); 1716 mutex_destroy(&rx_data->recycle_lock); 1717 1718 kmem_free(rx_data, sizeof (e1000g_rx_data_t)); 1719 } 1720 1721 /* 1722 * Check if the link is up 1723 */ 1724 static boolean_t 1725 e1000g_link_up(struct e1000g *Adapter) 1726 { 1727 struct e1000_hw *hw = &Adapter->shared; 1728 boolean_t link_up = B_FALSE; 1729 1730 /* 1731 * get_link_status is set in the interrupt handler on link-status-change 1732 * or rx sequence error interrupt. get_link_status will stay 1733 * false until the e1000_check_for_link establishes link only 1734 * for copper adapters. 1735 */ 1736 switch (hw->phy.media_type) { 1737 case e1000_media_type_copper: 1738 if (hw->mac.get_link_status) { 1739 /* 1740 * SPT and newer devices need a bit of extra time before 1741 * we ask them. 1742 */ 1743 if (hw->mac.type >= e1000_pch_spt) 1744 msec_delay(50); 1745 (void) e1000_check_for_link(hw); 1746 if ((E1000_READ_REG(hw, E1000_STATUS) & 1747 E1000_STATUS_LU)) { 1748 link_up = B_TRUE; 1749 } else { 1750 link_up = !hw->mac.get_link_status; 1751 } 1752 } else { 1753 link_up = B_TRUE; 1754 } 1755 break; 1756 case e1000_media_type_fiber: 1757 (void) e1000_check_for_link(hw); 1758 link_up = (E1000_READ_REG(hw, E1000_STATUS) & 1759 E1000_STATUS_LU); 1760 break; 1761 case e1000_media_type_internal_serdes: 1762 (void) e1000_check_for_link(hw); 1763 link_up = hw->mac.serdes_has_link; 1764 break; 1765 } 1766 1767 return (link_up); 1768 } 1769 1770 static void 1771 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 1772 { 1773 struct iocblk *iocp; 1774 struct e1000g *e1000gp; 1775 enum ioc_reply status; 1776 1777 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; 1778 iocp->ioc_error = 0; 1779 e1000gp = (struct e1000g *)arg; 1780 1781 ASSERT(e1000gp); 1782 if (e1000gp == NULL) { 1783 miocnak(q, mp, 0, EINVAL); 1784 return; 1785 } 1786 1787 rw_enter(&e1000gp->chip_lock, RW_READER); 1788 if (e1000gp->e1000g_state & E1000G_SUSPENDED) { 1789 rw_exit(&e1000gp->chip_lock); 1790 miocnak(q, mp, 0, EINVAL); 1791 return; 1792 } 1793 rw_exit(&e1000gp->chip_lock); 1794 1795 switch (iocp->ioc_cmd) { 1796 1797 case LB_GET_INFO_SIZE: 1798 case LB_GET_INFO: 1799 case LB_GET_MODE: 1800 case LB_SET_MODE: 1801 status = e1000g_loopback_ioctl(e1000gp, iocp, mp); 1802 break; 1803 1804 1805 #ifdef E1000G_DEBUG 1806 case E1000G_IOC_REG_PEEK: 1807 case E1000G_IOC_REG_POKE: 1808 status = e1000g_pp_ioctl(e1000gp, iocp, mp); 1809 break; 1810 case E1000G_IOC_CHIP_RESET: 1811 e1000gp->reset_count++; 1812 if (e1000g_reset_adapter(e1000gp)) 1813 status = IOC_ACK; 1814 else 1815 status = IOC_INVAL; 1816 break; 1817 #endif 1818 default: 1819 status = IOC_INVAL; 1820 break; 1821 } 1822 1823 /* 1824 * Decide how to reply 1825 */ 1826 switch (status) { 1827 default: 1828 case IOC_INVAL: 1829 /* 1830 * Error, reply with a NAK and EINVAL or the specified error 1831 */ 1832 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 1833 EINVAL : iocp->ioc_error); 1834 break; 1835 1836 case IOC_DONE: 1837 /* 1838 * OK, reply already sent 1839 */ 1840 break; 1841 1842 case IOC_ACK: 1843 /* 1844 * OK, reply with an ACK 1845 */ 1846 miocack(q, mp, 0, 0); 1847 break; 1848 1849 case IOC_REPLY: 1850 /* 1851 * OK, send prepared reply as ACK or NAK 1852 */ 1853 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1854 M_IOCACK : M_IOCNAK; 1855 qreply(q, mp); 1856 break; 1857 } 1858 } 1859 1860 /* 1861 * The default value of e1000g_poll_mode == 0 assumes that the NIC is 1862 * capable of supporting only one interrupt and we shouldn't disable 1863 * the physical interrupt. In this case we let the interrupt come and 1864 * we queue the packets in the rx ring itself in case we are in polling 1865 * mode (better latency but slightly lower performance and a very 1866 * high intrrupt count in mpstat which is harmless). 1867 * 1868 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt 1869 * which can be disabled in poll mode. This gives better overall 1870 * throughput (compared to the mode above), shows very low interrupt 1871 * count but has slightly higher latency since we pick the packets when 1872 * the poll thread does polling. 1873 * 1874 * Currently, this flag should be enabled only while doing performance 1875 * measurement or when it can be guaranteed that entire NIC going 1876 * in poll mode will not harm any traffic like cluster heartbeat etc. 1877 */ 1878 int e1000g_poll_mode = 0; 1879 1880 /* 1881 * Called from the upper layers when driver is in polling mode to 1882 * pick up any queued packets. Care should be taken to not block 1883 * this thread. 1884 */ 1885 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup) 1886 { 1887 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg; 1888 mblk_t *mp = NULL; 1889 mblk_t *tail; 1890 struct e1000g *adapter; 1891 1892 adapter = rx_ring->adapter; 1893 1894 rw_enter(&adapter->chip_lock, RW_READER); 1895 1896 if (adapter->e1000g_state & E1000G_SUSPENDED) { 1897 rw_exit(&adapter->chip_lock); 1898 return (NULL); 1899 } 1900 1901 mutex_enter(&rx_ring->rx_lock); 1902 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup); 1903 mutex_exit(&rx_ring->rx_lock); 1904 rw_exit(&adapter->chip_lock); 1905 return (mp); 1906 } 1907 1908 static int 1909 e1000g_m_start(void *arg) 1910 { 1911 struct e1000g *Adapter = (struct e1000g *)arg; 1912 1913 rw_enter(&Adapter->chip_lock, RW_WRITER); 1914 1915 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 1916 rw_exit(&Adapter->chip_lock); 1917 return (ECANCELED); 1918 } 1919 1920 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 1921 rw_exit(&Adapter->chip_lock); 1922 return (ENOTACTIVE); 1923 } 1924 1925 Adapter->e1000g_state |= E1000G_STARTED; 1926 1927 rw_exit(&Adapter->chip_lock); 1928 1929 /* Enable and start the watchdog timer */ 1930 enable_watchdog_timer(Adapter); 1931 1932 return (0); 1933 } 1934 1935 static int 1936 e1000g_start(struct e1000g *Adapter, boolean_t global) 1937 { 1938 e1000g_rx_data_t *rx_data; 1939 1940 if (global) { 1941 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) { 1942 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed"); 1943 goto start_fail; 1944 } 1945 1946 /* Allocate dma resources for descriptors and buffers */ 1947 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) { 1948 e1000g_log(Adapter, CE_WARN, 1949 "Alloc DMA resources failed"); 1950 goto start_fail; 1951 } 1952 Adapter->rx_buffer_setup = B_FALSE; 1953 } 1954 1955 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) { 1956 if (e1000g_init(Adapter) != DDI_SUCCESS) { 1957 e1000g_log(Adapter, CE_WARN, 1958 "Adapter initialization failed"); 1959 goto start_fail; 1960 } 1961 } 1962 1963 /* Setup and initialize the transmit structures */ 1964 e1000g_tx_setup(Adapter); 1965 msec_delay(5); 1966 1967 /* Setup and initialize the receive structures */ 1968 e1000g_rx_setup(Adapter); 1969 msec_delay(5); 1970 1971 /* Restore the e1000g promiscuous mode */ 1972 e1000g_restore_promisc(Adapter); 1973 1974 e1000g_mask_interrupt(Adapter); 1975 1976 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 1977 1978 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1979 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1980 goto start_fail; 1981 } 1982 1983 return (DDI_SUCCESS); 1984 1985 start_fail: 1986 rx_data = Adapter->rx_ring->rx_data; 1987 1988 if (global) { 1989 e1000g_release_dma_resources(Adapter); 1990 e1000g_free_rx_pending_buffers(rx_data); 1991 e1000g_free_rx_data(rx_data); 1992 } 1993 1994 mutex_enter(&e1000g_nvm_lock); 1995 (void) e1000_reset_hw(&Adapter->shared); 1996 mutex_exit(&e1000g_nvm_lock); 1997 1998 return (DDI_FAILURE); 1999 } 2000 2001 /* 2002 * The I219 has the curious property that if the descriptor rings are not 2003 * emptied before resetting the hardware or before changing the device state 2004 * based on runtime power management, it'll cause the card to hang. This can 2005 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we 2006 * have to flush the rings if we're in this state. 2007 */ 2008 static void 2009 e1000g_flush_desc_rings(struct e1000g *Adapter) 2010 { 2011 struct e1000_hw *hw = &Adapter->shared; 2012 u16 hang_state; 2013 u32 fext_nvm11, tdlen; 2014 2015 /* First, disable MULR fix in FEXTNVM11 */ 2016 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 2017 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 2018 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 2019 2020 /* do nothing if we're not in faulty state, or if the queue is empty */ 2021 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); 2022 hang_state = pci_config_get16(Adapter->osdep.cfg_handle, 2023 PCICFG_DESC_RING_STATUS); 2024 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) 2025 return; 2026 e1000g_flush_tx_ring(Adapter); 2027 2028 /* recheck, maybe the fault is caused by the rx ring */ 2029 hang_state = pci_config_get16(Adapter->osdep.cfg_handle, 2030 PCICFG_DESC_RING_STATUS); 2031 if (hang_state & FLUSH_DESC_REQUIRED) 2032 e1000g_flush_rx_ring(Adapter); 2033 2034 } 2035 2036 static void 2037 e1000g_m_stop(void *arg) 2038 { 2039 struct e1000g *Adapter = (struct e1000g *)arg; 2040 2041 /* Drain tx sessions */ 2042 (void) e1000g_tx_drain(Adapter); 2043 2044 rw_enter(&Adapter->chip_lock, RW_WRITER); 2045 2046 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2047 rw_exit(&Adapter->chip_lock); 2048 return; 2049 } 2050 Adapter->e1000g_state &= ~E1000G_STARTED; 2051 e1000g_stop(Adapter, B_TRUE); 2052 2053 rw_exit(&Adapter->chip_lock); 2054 2055 /* Disable and stop all the timers */ 2056 disable_watchdog_timer(Adapter); 2057 stop_link_timer(Adapter); 2058 stop_82547_timer(Adapter->tx_ring); 2059 } 2060 2061 static void 2062 e1000g_stop(struct e1000g *Adapter, boolean_t global) 2063 { 2064 private_devi_list_t *devi_node; 2065 e1000g_rx_data_t *rx_data; 2066 int result; 2067 2068 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT; 2069 2070 /* Stop the chip and release pending resources */ 2071 2072 /* Tell firmware driver is no longer in control */ 2073 e1000g_release_driver_control(&Adapter->shared); 2074 2075 e1000g_clear_all_interrupts(Adapter); 2076 2077 mutex_enter(&e1000g_nvm_lock); 2078 result = e1000_reset_hw(&Adapter->shared); 2079 mutex_exit(&e1000g_nvm_lock); 2080 2081 if (result != E1000_SUCCESS) { 2082 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 2083 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 2084 } 2085 2086 mutex_enter(&Adapter->link_lock); 2087 Adapter->link_complete = B_FALSE; 2088 mutex_exit(&Adapter->link_lock); 2089 2090 /* Release resources still held by the TX descriptors */ 2091 e1000g_tx_clean(Adapter); 2092 2093 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2094 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 2095 2096 /* Clean the pending rx jumbo packet fragment */ 2097 e1000g_rx_clean(Adapter); 2098 2099 /* 2100 * The I219, eg. the pch_spt, has bugs such that we must ensure that 2101 * rings are flushed before we do anything else. This must be done 2102 * before we release DMA resources. 2103 */ 2104 if (Adapter->shared.mac.type >= e1000_pch_spt) 2105 e1000g_flush_desc_rings(Adapter); 2106 2107 if (global) { 2108 e1000g_release_dma_resources(Adapter); 2109 2110 mutex_enter(&e1000g_rx_detach_lock); 2111 rx_data = Adapter->rx_ring->rx_data; 2112 rx_data->flag |= E1000G_RX_STOPPED; 2113 2114 if (rx_data->pending_count == 0) { 2115 e1000g_free_rx_pending_buffers(rx_data); 2116 e1000g_free_rx_data(rx_data); 2117 } else { 2118 devi_node = rx_data->priv_devi_node; 2119 if (devi_node != NULL) 2120 atomic_inc_32(&devi_node->pending_rx_count); 2121 else 2122 atomic_inc_32(&Adapter->pending_rx_count); 2123 } 2124 mutex_exit(&e1000g_rx_detach_lock); 2125 } 2126 2127 if (Adapter->link_state != LINK_STATE_UNKNOWN) { 2128 Adapter->link_state = LINK_STATE_UNKNOWN; 2129 if (!Adapter->reset_flag) 2130 mac_link_update(Adapter->mh, Adapter->link_state); 2131 } 2132 } 2133 2134 static void 2135 e1000g_rx_clean(struct e1000g *Adapter) 2136 { 2137 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data; 2138 2139 if (rx_data == NULL) 2140 return; 2141 2142 if (rx_data->rx_mblk != NULL) { 2143 freemsg(rx_data->rx_mblk); 2144 rx_data->rx_mblk = NULL; 2145 rx_data->rx_mblk_tail = NULL; 2146 rx_data->rx_mblk_len = 0; 2147 } 2148 } 2149 2150 static void 2151 e1000g_tx_clean(struct e1000g *Adapter) 2152 { 2153 e1000g_tx_ring_t *tx_ring; 2154 p_tx_sw_packet_t packet; 2155 mblk_t *mp; 2156 mblk_t *nmp; 2157 uint32_t packet_count; 2158 2159 tx_ring = Adapter->tx_ring; 2160 2161 /* 2162 * Here we don't need to protect the lists using 2163 * the usedlist_lock and freelist_lock, for they 2164 * have been protected by the chip_lock. 2165 */ 2166 mp = NULL; 2167 nmp = NULL; 2168 packet_count = 0; 2169 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list); 2170 while (packet != NULL) { 2171 if (packet->mp != NULL) { 2172 /* Assemble the message chain */ 2173 if (mp == NULL) { 2174 mp = packet->mp; 2175 nmp = packet->mp; 2176 } else { 2177 nmp->b_next = packet->mp; 2178 nmp = packet->mp; 2179 } 2180 /* Disconnect the message from the sw packet */ 2181 packet->mp = NULL; 2182 } 2183 2184 e1000g_free_tx_swpkt(packet); 2185 packet_count++; 2186 2187 packet = (p_tx_sw_packet_t) 2188 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link); 2189 } 2190 2191 if (mp != NULL) 2192 freemsgchain(mp); 2193 2194 if (packet_count > 0) { 2195 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list); 2196 QUEUE_INIT_LIST(&tx_ring->used_list); 2197 2198 /* Setup TX descriptor pointers */ 2199 tx_ring->tbd_next = tx_ring->tbd_first; 2200 tx_ring->tbd_oldest = tx_ring->tbd_first; 2201 2202 /* Setup our HW Tx Head & Tail descriptor pointers */ 2203 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 2204 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 2205 } 2206 } 2207 2208 static boolean_t 2209 e1000g_tx_drain(struct e1000g *Adapter) 2210 { 2211 int i; 2212 boolean_t done; 2213 e1000g_tx_ring_t *tx_ring; 2214 2215 tx_ring = Adapter->tx_ring; 2216 2217 /* Allow up to 'wsdraintime' for pending xmit's to complete. */ 2218 for (i = 0; i < TX_DRAIN_TIME; i++) { 2219 mutex_enter(&tx_ring->usedlist_lock); 2220 done = IS_QUEUE_EMPTY(&tx_ring->used_list); 2221 mutex_exit(&tx_ring->usedlist_lock); 2222 2223 if (done) 2224 break; 2225 2226 msec_delay(1); 2227 } 2228 2229 return (done); 2230 } 2231 2232 static boolean_t 2233 e1000g_rx_drain(struct e1000g *Adapter) 2234 { 2235 int i; 2236 boolean_t done; 2237 2238 /* 2239 * Allow up to RX_DRAIN_TIME for pending received packets to complete. 2240 */ 2241 for (i = 0; i < RX_DRAIN_TIME; i++) { 2242 done = (Adapter->pending_rx_count == 0); 2243 2244 if (done) 2245 break; 2246 2247 msec_delay(1); 2248 } 2249 2250 return (done); 2251 } 2252 2253 static boolean_t 2254 e1000g_reset_adapter(struct e1000g *Adapter) 2255 { 2256 /* Disable and stop all the timers */ 2257 disable_watchdog_timer(Adapter); 2258 stop_link_timer(Adapter); 2259 stop_82547_timer(Adapter->tx_ring); 2260 2261 rw_enter(&Adapter->chip_lock, RW_WRITER); 2262 2263 if (Adapter->stall_flag) { 2264 Adapter->stall_flag = B_FALSE; 2265 Adapter->reset_flag = B_TRUE; 2266 } 2267 2268 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2269 rw_exit(&Adapter->chip_lock); 2270 return (B_TRUE); 2271 } 2272 2273 e1000g_stop(Adapter, B_FALSE); 2274 2275 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 2276 rw_exit(&Adapter->chip_lock); 2277 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2278 return (B_FALSE); 2279 } 2280 2281 rw_exit(&Adapter->chip_lock); 2282 2283 /* Enable and start the watchdog timer */ 2284 enable_watchdog_timer(Adapter); 2285 2286 return (B_TRUE); 2287 } 2288 2289 boolean_t 2290 e1000g_global_reset(struct e1000g *Adapter) 2291 { 2292 /* Disable and stop all the timers */ 2293 disable_watchdog_timer(Adapter); 2294 stop_link_timer(Adapter); 2295 stop_82547_timer(Adapter->tx_ring); 2296 2297 rw_enter(&Adapter->chip_lock, RW_WRITER); 2298 2299 e1000g_stop(Adapter, B_TRUE); 2300 2301 Adapter->init_count = 0; 2302 2303 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 2304 rw_exit(&Adapter->chip_lock); 2305 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2306 return (B_FALSE); 2307 } 2308 2309 rw_exit(&Adapter->chip_lock); 2310 2311 /* Enable and start the watchdog timer */ 2312 enable_watchdog_timer(Adapter); 2313 2314 return (B_TRUE); 2315 } 2316 2317 /* 2318 * e1000g_intr_pciexpress - ISR for PCI Express chipsets 2319 * 2320 * This interrupt service routine is for PCI-Express adapters. 2321 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED 2322 * bit is set. 2323 */ 2324 static uint_t 2325 e1000g_intr_pciexpress(caddr_t arg, caddr_t arg1 __unused) 2326 { 2327 struct e1000g *Adapter; 2328 uint32_t icr; 2329 2330 Adapter = (struct e1000g *)(uintptr_t)arg; 2331 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2332 2333 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2334 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2335 return (DDI_INTR_CLAIMED); 2336 } 2337 2338 if (icr & E1000_ICR_INT_ASSERTED) { 2339 /* 2340 * E1000_ICR_INT_ASSERTED bit was set: 2341 * Read(Clear) the ICR, claim this interrupt, 2342 * look for work to do. 2343 */ 2344 e1000g_intr_work(Adapter, icr); 2345 return (DDI_INTR_CLAIMED); 2346 } else { 2347 /* 2348 * E1000_ICR_INT_ASSERTED bit was not set: 2349 * Don't claim this interrupt, return immediately. 2350 */ 2351 return (DDI_INTR_UNCLAIMED); 2352 } 2353 } 2354 2355 /* 2356 * e1000g_intr - ISR for PCI/PCI-X chipsets 2357 * 2358 * This interrupt service routine is for PCI/PCI-X adapters. 2359 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED 2360 * bit is set or not. 2361 */ 2362 static uint_t 2363 e1000g_intr(caddr_t arg, caddr_t arg1 __unused) 2364 { 2365 struct e1000g *Adapter; 2366 uint32_t icr; 2367 2368 Adapter = (struct e1000g *)(uintptr_t)arg; 2369 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2370 2371 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2372 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2373 return (DDI_INTR_CLAIMED); 2374 } 2375 2376 if (icr) { 2377 /* 2378 * Any bit was set in ICR: 2379 * Read(Clear) the ICR, claim this interrupt, 2380 * look for work to do. 2381 */ 2382 e1000g_intr_work(Adapter, icr); 2383 return (DDI_INTR_CLAIMED); 2384 } else { 2385 /* 2386 * No bit was set in ICR: 2387 * Don't claim this interrupt, return immediately. 2388 */ 2389 return (DDI_INTR_UNCLAIMED); 2390 } 2391 } 2392 2393 /* 2394 * e1000g_intr_work - actual processing of ISR 2395 * 2396 * Read(clear) the ICR contents and call appropriate interrupt 2397 * processing routines. 2398 */ 2399 static void 2400 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr) 2401 { 2402 struct e1000_hw *hw; 2403 hw = &Adapter->shared; 2404 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 2405 2406 Adapter->rx_pkt_cnt = 0; 2407 Adapter->tx_pkt_cnt = 0; 2408 2409 rw_enter(&Adapter->chip_lock, RW_READER); 2410 2411 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2412 rw_exit(&Adapter->chip_lock); 2413 return; 2414 } 2415 /* 2416 * Here we need to check the "e1000g_state" flag within the chip_lock to 2417 * ensure the receive routine will not execute when the adapter is 2418 * being reset. 2419 */ 2420 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2421 rw_exit(&Adapter->chip_lock); 2422 return; 2423 } 2424 2425 if (icr & E1000_ICR_RXT0) { 2426 mblk_t *mp = NULL; 2427 mblk_t *tail = NULL; 2428 e1000g_rx_ring_t *rx_ring; 2429 2430 rx_ring = Adapter->rx_ring; 2431 mutex_enter(&rx_ring->rx_lock); 2432 /* 2433 * Sometimes with legacy interrupts, it possible that 2434 * there is a single interrupt for Rx/Tx. In which 2435 * case, if poll flag is set, we shouldn't really 2436 * be doing Rx processing. 2437 */ 2438 if (!rx_ring->poll_flag) 2439 mp = e1000g_receive(rx_ring, &tail, 2440 E1000G_CHAIN_NO_LIMIT); 2441 mutex_exit(&rx_ring->rx_lock); 2442 rw_exit(&Adapter->chip_lock); 2443 if (mp != NULL) 2444 mac_rx_ring(Adapter->mh, rx_ring->mrh, 2445 mp, rx_ring->ring_gen_num); 2446 } else 2447 rw_exit(&Adapter->chip_lock); 2448 2449 if (icr & E1000_ICR_TXDW) { 2450 if (!Adapter->tx_intr_enable) 2451 e1000g_clear_tx_interrupt(Adapter); 2452 2453 /* Recycle the tx descriptors */ 2454 rw_enter(&Adapter->chip_lock, RW_READER); 2455 (void) e1000g_recycle(tx_ring); 2456 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr); 2457 rw_exit(&Adapter->chip_lock); 2458 2459 if (tx_ring->resched_needed && 2460 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) { 2461 tx_ring->resched_needed = B_FALSE; 2462 mac_tx_update(Adapter->mh); 2463 E1000G_STAT(tx_ring->stat_reschedule); 2464 } 2465 } 2466 2467 /* 2468 * The Receive Sequence errors RXSEQ and the link status change LSC 2469 * are checked to detect that the cable has been pulled out. For 2470 * the Wiseman 2.0 silicon, the receive sequence errors interrupt 2471 * are an indication that cable is not connected. 2472 */ 2473 if ((icr & E1000_ICR_RXSEQ) || 2474 (icr & E1000_ICR_LSC) || 2475 (icr & E1000_ICR_GPI_EN1)) { 2476 boolean_t link_changed; 2477 timeout_id_t tid = 0; 2478 2479 stop_watchdog_timer(Adapter); 2480 2481 rw_enter(&Adapter->chip_lock, RW_WRITER); 2482 2483 /* 2484 * Because we got a link-status-change interrupt, force 2485 * e1000_check_for_link() to look at phy 2486 */ 2487 Adapter->shared.mac.get_link_status = B_TRUE; 2488 2489 /* e1000g_link_check takes care of link status change */ 2490 link_changed = e1000g_link_check(Adapter); 2491 2492 /* Get new phy state */ 2493 e1000g_get_phy_state(Adapter); 2494 2495 /* 2496 * If the link timer has not timed out, we'll not notify 2497 * the upper layer with any link state until the link is up. 2498 */ 2499 if (link_changed && !Adapter->link_complete) { 2500 if (Adapter->link_state == LINK_STATE_UP) { 2501 mutex_enter(&Adapter->link_lock); 2502 Adapter->link_complete = B_TRUE; 2503 tid = Adapter->link_tid; 2504 Adapter->link_tid = 0; 2505 mutex_exit(&Adapter->link_lock); 2506 } else { 2507 link_changed = B_FALSE; 2508 } 2509 } 2510 rw_exit(&Adapter->chip_lock); 2511 2512 if (link_changed) { 2513 if (tid != 0) 2514 (void) untimeout(tid); 2515 2516 /* 2517 * Workaround for esb2. Data stuck in fifo on a link 2518 * down event. Stop receiver here and reset in watchdog. 2519 */ 2520 if ((Adapter->link_state == LINK_STATE_DOWN) && 2521 (Adapter->shared.mac.type == e1000_80003es2lan)) { 2522 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 2523 E1000_WRITE_REG(hw, E1000_RCTL, 2524 rctl & ~E1000_RCTL_EN); 2525 e1000g_log(Adapter, CE_WARN, 2526 "ESB2 receiver disabled"); 2527 Adapter->esb2_workaround = B_TRUE; 2528 } 2529 if (!Adapter->reset_flag) 2530 mac_link_update(Adapter->mh, 2531 Adapter->link_state); 2532 if (Adapter->link_state == LINK_STATE_UP) 2533 Adapter->reset_flag = B_FALSE; 2534 } 2535 2536 start_watchdog_timer(Adapter); 2537 } 2538 } 2539 2540 static void 2541 e1000g_init_unicst(struct e1000g *Adapter) 2542 { 2543 struct e1000_hw *hw; 2544 int slot; 2545 2546 hw = &Adapter->shared; 2547 2548 if (Adapter->init_count == 0) { 2549 /* Initialize the multiple unicast addresses */ 2550 Adapter->unicst_total = min(hw->mac.rar_entry_count, 2551 MAX_NUM_UNICAST_ADDRESSES); 2552 2553 /* 2554 * The common code does not correctly calculate the number of 2555 * rar's that could be reserved by firmware for the pch_lpt and 2556 * pch_spt macs. The interface has one primary rar, and 11 2557 * additional ones. Those 11 additional ones are not always 2558 * available. According to the datasheet, we need to check a 2559 * few of the bits set in the FWSM register. If the value is 2560 * zero, everything is available. If the value is 1, none of the 2561 * additional registers are available. If the value is 2-7, only 2562 * that number are available. 2563 */ 2564 if (hw->mac.type >= e1000_pch_lpt) { 2565 uint32_t locked, rar; 2566 2567 locked = E1000_READ_REG(hw, E1000_FWSM) & 2568 E1000_FWSM_WLOCK_MAC_MASK; 2569 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT; 2570 rar = 1; 2571 if (locked == 0) 2572 rar += 11; 2573 else if (locked == 1) 2574 rar += 0; 2575 else 2576 rar += locked; 2577 Adapter->unicst_total = min(rar, 2578 MAX_NUM_UNICAST_ADDRESSES); 2579 } 2580 2581 /* Workaround for an erratum of 82571 chipst */ 2582 if ((hw->mac.type == e1000_82571) && 2583 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2584 Adapter->unicst_total--; 2585 2586 /* VMware doesn't support multiple mac addresses properly */ 2587 if (hw->subsystem_vendor_id == 0x15ad) 2588 Adapter->unicst_total = 1; 2589 2590 Adapter->unicst_avail = Adapter->unicst_total; 2591 2592 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2593 /* Clear both the flag and MAC address */ 2594 Adapter->unicst_addr[slot].reg.high = 0; 2595 Adapter->unicst_addr[slot].reg.low = 0; 2596 } 2597 } else { 2598 /* Workaround for an erratum of 82571 chipst */ 2599 if ((hw->mac.type == e1000_82571) && 2600 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2601 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); 2602 2603 /* Re-configure the RAR registers */ 2604 for (slot = 0; slot < Adapter->unicst_total; slot++) 2605 if (Adapter->unicst_addr[slot].mac.set == 1) 2606 (void) e1000_rar_set(hw, 2607 Adapter->unicst_addr[slot].mac.addr, slot); 2608 } 2609 2610 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2611 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2612 } 2613 2614 static int 2615 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, 2616 int slot) 2617 { 2618 struct e1000_hw *hw; 2619 2620 hw = &Adapter->shared; 2621 2622 /* 2623 * The first revision of Wiseman silicon (rev 2.0) has an errata 2624 * that requires the receiver to be in reset when any of the 2625 * receive address registers (RAR regs) are accessed. The first 2626 * rev of Wiseman silicon also requires MWI to be disabled when 2627 * a global reset or a receive reset is issued. So before we 2628 * initialize the RARs, we check the rev of the Wiseman controller 2629 * and work around any necessary HW errata. 2630 */ 2631 if ((hw->mac.type == e1000_82542) && 2632 (hw->revision_id == E1000_REVISION_2)) { 2633 e1000_pci_clear_mwi(hw); 2634 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); 2635 msec_delay(5); 2636 } 2637 if (mac_addr == NULL) { 2638 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0); 2639 E1000_WRITE_FLUSH(hw); 2640 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0); 2641 E1000_WRITE_FLUSH(hw); 2642 /* Clear both the flag and MAC address */ 2643 Adapter->unicst_addr[slot].reg.high = 0; 2644 Adapter->unicst_addr[slot].reg.low = 0; 2645 } else { 2646 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, 2647 ETHERADDRL); 2648 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot); 2649 Adapter->unicst_addr[slot].mac.set = 1; 2650 } 2651 2652 /* Workaround for an erratum of 82571 chipst */ 2653 if (slot == 0) { 2654 if ((hw->mac.type == e1000_82571) && 2655 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2656 if (mac_addr == NULL) { 2657 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2658 slot << 1, 0); 2659 E1000_WRITE_FLUSH(hw); 2660 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2661 (slot << 1) + 1, 0); 2662 E1000_WRITE_FLUSH(hw); 2663 } else { 2664 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, 2665 LAST_RAR_ENTRY); 2666 } 2667 } 2668 2669 /* 2670 * If we are using Wiseman rev 2.0 silicon, we will have previously 2671 * put the receive in reset, and disabled MWI, to work around some 2672 * HW errata. Now we should take the receiver out of reset, and 2673 * re-enabled if MWI if it was previously enabled by the PCI BIOS. 2674 */ 2675 if ((hw->mac.type == e1000_82542) && 2676 (hw->revision_id == E1000_REVISION_2)) { 2677 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2678 msec_delay(1); 2679 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2680 e1000_pci_set_mwi(hw); 2681 e1000g_rx_setup(Adapter); 2682 } 2683 2684 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2685 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2686 return (EIO); 2687 } 2688 2689 return (0); 2690 } 2691 2692 static int 2693 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr) 2694 { 2695 struct e1000_hw *hw = &Adapter->shared; 2696 struct ether_addr *newtable; 2697 size_t new_len; 2698 size_t old_len; 2699 int res = 0; 2700 2701 if ((multiaddr[0] & 01) == 0) { 2702 res = EINVAL; 2703 e1000g_log(Adapter, CE_WARN, "Illegal multicast address"); 2704 goto done; 2705 } 2706 2707 if (Adapter->mcast_count >= Adapter->mcast_max_num) { 2708 res = ENOENT; 2709 e1000g_log(Adapter, CE_WARN, 2710 "Adapter requested more than %d mcast addresses", 2711 Adapter->mcast_max_num); 2712 goto done; 2713 } 2714 2715 2716 if (Adapter->mcast_count == Adapter->mcast_alloc_count) { 2717 old_len = Adapter->mcast_alloc_count * 2718 sizeof (struct ether_addr); 2719 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) * 2720 sizeof (struct ether_addr); 2721 2722 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2723 if (newtable == NULL) { 2724 res = ENOMEM; 2725 e1000g_log(Adapter, CE_WARN, 2726 "Not enough memory to alloc mcast table"); 2727 goto done; 2728 } 2729 2730 if (Adapter->mcast_table != NULL) { 2731 bcopy(Adapter->mcast_table, newtable, old_len); 2732 kmem_free(Adapter->mcast_table, old_len); 2733 } 2734 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE; 2735 Adapter->mcast_table = newtable; 2736 } 2737 2738 bcopy(multiaddr, 2739 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL); 2740 Adapter->mcast_count++; 2741 2742 /* 2743 * Update the MC table in the hardware 2744 */ 2745 e1000g_clear_interrupt(Adapter); 2746 2747 e1000_update_mc_addr_list(hw, 2748 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2749 2750 e1000g_mask_interrupt(Adapter); 2751 2752 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2753 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2754 res = EIO; 2755 } 2756 2757 done: 2758 return (res); 2759 } 2760 2761 static int 2762 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr) 2763 { 2764 struct e1000_hw *hw = &Adapter->shared; 2765 struct ether_addr *newtable; 2766 size_t new_len; 2767 size_t old_len; 2768 unsigned i; 2769 2770 for (i = 0; i < Adapter->mcast_count; i++) { 2771 if (bcmp(multiaddr, &Adapter->mcast_table[i], 2772 ETHERADDRL) == 0) { 2773 for (i++; i < Adapter->mcast_count; i++) { 2774 Adapter->mcast_table[i - 1] = 2775 Adapter->mcast_table[i]; 2776 } 2777 Adapter->mcast_count--; 2778 break; 2779 } 2780 } 2781 2782 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) > 2783 MCAST_ALLOC_SIZE) { 2784 old_len = Adapter->mcast_alloc_count * 2785 sizeof (struct ether_addr); 2786 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) * 2787 sizeof (struct ether_addr); 2788 2789 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2790 if (newtable != NULL) { 2791 bcopy(Adapter->mcast_table, newtable, new_len); 2792 kmem_free(Adapter->mcast_table, old_len); 2793 2794 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE; 2795 Adapter->mcast_table = newtable; 2796 } 2797 } 2798 2799 /* 2800 * Update the MC table in the hardware 2801 */ 2802 e1000g_clear_interrupt(Adapter); 2803 2804 e1000_update_mc_addr_list(hw, 2805 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2806 2807 e1000g_mask_interrupt(Adapter); 2808 2809 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2810 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2811 return (EIO); 2812 } 2813 2814 return (0); 2815 } 2816 2817 static void 2818 e1000g_release_multicast(struct e1000g *Adapter) 2819 { 2820 if (Adapter->mcast_table != NULL) { 2821 kmem_free(Adapter->mcast_table, 2822 Adapter->mcast_alloc_count * sizeof (struct ether_addr)); 2823 Adapter->mcast_table = NULL; 2824 } 2825 } 2826 2827 int 2828 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 2829 { 2830 struct e1000g *Adapter = (struct e1000g *)arg; 2831 int result; 2832 2833 rw_enter(&Adapter->chip_lock, RW_WRITER); 2834 2835 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2836 result = ECANCELED; 2837 goto done; 2838 } 2839 2840 result = (add) ? multicst_add(Adapter, addr) 2841 : multicst_remove(Adapter, addr); 2842 2843 done: 2844 rw_exit(&Adapter->chip_lock); 2845 return (result); 2846 2847 } 2848 2849 int 2850 e1000g_m_promisc(void *arg, boolean_t on) 2851 { 2852 struct e1000g *Adapter = (struct e1000g *)arg; 2853 uint32_t rctl; 2854 2855 rw_enter(&Adapter->chip_lock, RW_WRITER); 2856 2857 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2858 rw_exit(&Adapter->chip_lock); 2859 return (ECANCELED); 2860 } 2861 2862 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 2863 2864 if (on) 2865 rctl |= 2866 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 2867 else 2868 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 2869 2870 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 2871 2872 Adapter->e1000g_promisc = on; 2873 2874 rw_exit(&Adapter->chip_lock); 2875 2876 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2877 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2878 return (EIO); 2879 } 2880 2881 return (0); 2882 } 2883 2884 /* 2885 * Entry points to enable and disable interrupts at the granularity of 2886 * a group. 2887 * Turns the poll_mode for the whole adapter on and off to enable or 2888 * override the ring level polling control over the hardware interrupts. 2889 */ 2890 static int 2891 e1000g_rx_group_intr_enable(mac_intr_handle_t arg) 2892 { 2893 struct e1000g *adapter = (struct e1000g *)arg; 2894 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2895 2896 /* 2897 * Later interrupts at the granularity of the this ring will 2898 * invoke mac_rx() with NULL, indicating the need for another 2899 * software classification. 2900 * We have a single ring usable per adapter now, so we only need to 2901 * reset the rx handle for that one. 2902 * When more RX rings can be used, we should update each one of them. 2903 */ 2904 mutex_enter(&rx_ring->rx_lock); 2905 rx_ring->mrh = NULL; 2906 adapter->poll_mode = B_FALSE; 2907 mutex_exit(&rx_ring->rx_lock); 2908 return (0); 2909 } 2910 2911 static int 2912 e1000g_rx_group_intr_disable(mac_intr_handle_t arg) 2913 { 2914 struct e1000g *adapter = (struct e1000g *)arg; 2915 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2916 2917 mutex_enter(&rx_ring->rx_lock); 2918 2919 /* 2920 * Later interrupts at the granularity of the this ring will 2921 * invoke mac_rx() with the handle for this ring; 2922 */ 2923 adapter->poll_mode = B_TRUE; 2924 rx_ring->mrh = rx_ring->mrh_init; 2925 mutex_exit(&rx_ring->rx_lock); 2926 return (0); 2927 } 2928 2929 /* 2930 * Entry points to enable and disable interrupts at the granularity of 2931 * a ring. 2932 * adapter poll_mode controls whether we actually proceed with hardware 2933 * interrupt toggling. 2934 */ 2935 static int 2936 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh) 2937 { 2938 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2939 struct e1000g *adapter = rx_ring->adapter; 2940 struct e1000_hw *hw = &adapter->shared; 2941 uint32_t intr_mask; 2942 2943 rw_enter(&adapter->chip_lock, RW_READER); 2944 2945 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2946 rw_exit(&adapter->chip_lock); 2947 return (0); 2948 } 2949 2950 mutex_enter(&rx_ring->rx_lock); 2951 rx_ring->poll_flag = 0; 2952 mutex_exit(&rx_ring->rx_lock); 2953 2954 /* Rx interrupt enabling for MSI and legacy */ 2955 intr_mask = E1000_READ_REG(hw, E1000_IMS); 2956 intr_mask |= E1000_IMS_RXT0; 2957 E1000_WRITE_REG(hw, E1000_IMS, intr_mask); 2958 E1000_WRITE_FLUSH(hw); 2959 2960 /* Trigger a Rx interrupt to check Rx ring */ 2961 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 2962 E1000_WRITE_FLUSH(hw); 2963 2964 rw_exit(&adapter->chip_lock); 2965 return (0); 2966 } 2967 2968 static int 2969 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh) 2970 { 2971 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2972 struct e1000g *adapter = rx_ring->adapter; 2973 struct e1000_hw *hw = &adapter->shared; 2974 2975 rw_enter(&adapter->chip_lock, RW_READER); 2976 2977 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2978 rw_exit(&adapter->chip_lock); 2979 return (0); 2980 } 2981 mutex_enter(&rx_ring->rx_lock); 2982 rx_ring->poll_flag = 1; 2983 mutex_exit(&rx_ring->rx_lock); 2984 2985 /* Rx interrupt disabling for MSI and legacy */ 2986 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 2987 E1000_WRITE_FLUSH(hw); 2988 2989 rw_exit(&adapter->chip_lock); 2990 return (0); 2991 } 2992 2993 /* 2994 * e1000g_unicst_find - Find the slot for the specified unicast address 2995 */ 2996 static int 2997 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr) 2998 { 2999 int slot; 3000 3001 for (slot = 0; slot < Adapter->unicst_total; slot++) { 3002 if ((Adapter->unicst_addr[slot].mac.set == 1) && 3003 (bcmp(Adapter->unicst_addr[slot].mac.addr, 3004 mac_addr, ETHERADDRL) == 0)) 3005 return (slot); 3006 } 3007 3008 return (-1); 3009 } 3010 3011 /* 3012 * Entry points to add and remove a MAC address to a ring group. 3013 * The caller takes care of adding and removing the MAC addresses 3014 * to the filter via these two routines. 3015 */ 3016 3017 static int 3018 e1000g_addmac(void *arg, const uint8_t *mac_addr) 3019 { 3020 struct e1000g *Adapter = (struct e1000g *)arg; 3021 int slot, err; 3022 3023 rw_enter(&Adapter->chip_lock, RW_WRITER); 3024 3025 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3026 rw_exit(&Adapter->chip_lock); 3027 return (ECANCELED); 3028 } 3029 3030 if (e1000g_unicst_find(Adapter, mac_addr) != -1) { 3031 /* The same address is already in slot */ 3032 rw_exit(&Adapter->chip_lock); 3033 return (0); 3034 } 3035 3036 if (Adapter->unicst_avail == 0) { 3037 /* no slots available */ 3038 rw_exit(&Adapter->chip_lock); 3039 return (ENOSPC); 3040 } 3041 3042 /* Search for a free slot */ 3043 for (slot = 0; slot < Adapter->unicst_total; slot++) { 3044 if (Adapter->unicst_addr[slot].mac.set == 0) 3045 break; 3046 } 3047 ASSERT(slot < Adapter->unicst_total); 3048 3049 err = e1000g_unicst_set(Adapter, mac_addr, slot); 3050 if (err == 0) 3051 Adapter->unicst_avail--; 3052 3053 rw_exit(&Adapter->chip_lock); 3054 3055 return (err); 3056 } 3057 3058 static int 3059 e1000g_remmac(void *arg, const uint8_t *mac_addr) 3060 { 3061 struct e1000g *Adapter = (struct e1000g *)arg; 3062 int slot, err; 3063 3064 rw_enter(&Adapter->chip_lock, RW_WRITER); 3065 3066 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3067 rw_exit(&Adapter->chip_lock); 3068 return (ECANCELED); 3069 } 3070 3071 slot = e1000g_unicst_find(Adapter, mac_addr); 3072 if (slot == -1) { 3073 rw_exit(&Adapter->chip_lock); 3074 return (EINVAL); 3075 } 3076 3077 ASSERT(Adapter->unicst_addr[slot].mac.set); 3078 3079 /* Clear this slot */ 3080 err = e1000g_unicst_set(Adapter, NULL, slot); 3081 if (err == 0) 3082 Adapter->unicst_avail++; 3083 3084 rw_exit(&Adapter->chip_lock); 3085 3086 return (err); 3087 } 3088 3089 static int 3090 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 3091 { 3092 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh; 3093 3094 mutex_enter(&rx_ring->rx_lock); 3095 rx_ring->ring_gen_num = mr_gen_num; 3096 mutex_exit(&rx_ring->rx_lock); 3097 return (0); 3098 } 3099 3100 /* 3101 * Callback funtion for MAC layer to register all rings. 3102 * 3103 * The hardware supports a single group with currently only one ring 3104 * available. 3105 * Though not offering virtualization ability per se, exposing the 3106 * group/ring still enables the polling and interrupt toggling. 3107 */ 3108 /* ARGSUSED */ 3109 void 3110 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index, 3111 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 3112 { 3113 struct e1000g *Adapter = (struct e1000g *)arg; 3114 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring; 3115 mac_intr_t *mintr; 3116 3117 /* 3118 * We advertised only RX group/rings, so the MAC framework shouldn't 3119 * ask for any thing else. 3120 */ 3121 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0); 3122 3123 rx_ring->mrh = rx_ring->mrh_init = rh; 3124 infop->mri_driver = (mac_ring_driver_t)rx_ring; 3125 infop->mri_start = e1000g_ring_start; 3126 infop->mri_stop = NULL; 3127 infop->mri_poll = e1000g_poll_ring; 3128 infop->mri_stat = e1000g_rx_ring_stat; 3129 3130 /* Ring level interrupts */ 3131 mintr = &infop->mri_intr; 3132 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 3133 mintr->mi_enable = e1000g_rx_ring_intr_enable; 3134 mintr->mi_disable = e1000g_rx_ring_intr_disable; 3135 if (Adapter->msi_enable) 3136 mintr->mi_ddi_handle = Adapter->htable[0]; 3137 } 3138 3139 /* ARGSUSED */ 3140 static void 3141 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index, 3142 mac_group_info_t *infop, mac_group_handle_t gh) 3143 { 3144 struct e1000g *Adapter = (struct e1000g *)arg; 3145 mac_intr_t *mintr; 3146 3147 /* 3148 * We advertised a single RX ring. Getting a request for anything else 3149 * signifies a bug in the MAC framework. 3150 */ 3151 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0); 3152 3153 Adapter->rx_group = gh; 3154 3155 infop->mgi_driver = (mac_group_driver_t)Adapter; 3156 infop->mgi_start = NULL; 3157 infop->mgi_stop = NULL; 3158 infop->mgi_addmac = e1000g_addmac; 3159 infop->mgi_remmac = e1000g_remmac; 3160 infop->mgi_count = 1; 3161 3162 /* Group level interrupts */ 3163 mintr = &infop->mgi_intr; 3164 mintr->mi_handle = (mac_intr_handle_t)Adapter; 3165 mintr->mi_enable = e1000g_rx_group_intr_enable; 3166 mintr->mi_disable = e1000g_rx_group_intr_disable; 3167 } 3168 3169 static void 3170 e1000g_led_blink(void *arg) 3171 { 3172 e1000g_t *e1000g = arg; 3173 3174 mutex_enter(&e1000g->e1000g_led_lock); 3175 VERIFY(e1000g->e1000g_emul_blink); 3176 if (e1000g->e1000g_emul_state) { 3177 (void) e1000_led_on(&e1000g->shared); 3178 } else { 3179 (void) e1000_led_off(&e1000g->shared); 3180 } 3181 e1000g->e1000g_emul_state = !e1000g->e1000g_emul_state; 3182 mutex_exit(&e1000g->e1000g_led_lock); 3183 } 3184 3185 static int 3186 e1000g_led_set(void *arg, mac_led_mode_t mode, uint_t flags) 3187 { 3188 e1000g_t *e1000g = arg; 3189 3190 if (flags != 0) 3191 return (EINVAL); 3192 3193 if (mode != MAC_LED_DEFAULT && 3194 mode != MAC_LED_IDENT && 3195 mode != MAC_LED_OFF && 3196 mode != MAC_LED_ON) 3197 return (ENOTSUP); 3198 3199 mutex_enter(&e1000g->e1000g_led_lock); 3200 3201 if ((mode == MAC_LED_IDENT || mode == MAC_LED_OFF || 3202 mode == MAC_LED_ON) && 3203 !e1000g->e1000g_led_setup) { 3204 if (e1000_setup_led(&e1000g->shared) != E1000_SUCCESS) { 3205 mutex_exit(&e1000g->e1000g_led_lock); 3206 return (EIO); 3207 } 3208 3209 e1000g->e1000g_led_setup = B_TRUE; 3210 } 3211 3212 if (mode != MAC_LED_IDENT && e1000g->e1000g_blink != NULL) { 3213 ddi_periodic_t id = e1000g->e1000g_blink; 3214 e1000g->e1000g_blink = NULL; 3215 mutex_exit(&e1000g->e1000g_led_lock); 3216 ddi_periodic_delete(id); 3217 mutex_enter(&e1000g->e1000g_led_lock); 3218 } 3219 3220 switch (mode) { 3221 case MAC_LED_DEFAULT: 3222 if (e1000g->e1000g_led_setup) { 3223 if (e1000_cleanup_led(&e1000g->shared) != 3224 E1000_SUCCESS) { 3225 mutex_exit(&e1000g->e1000g_led_lock); 3226 return (EIO); 3227 } 3228 e1000g->e1000g_led_setup = B_FALSE; 3229 } 3230 break; 3231 case MAC_LED_IDENT: 3232 if (e1000g->e1000g_emul_blink) { 3233 if (e1000g->e1000g_blink != NULL) 3234 break; 3235 3236 /* 3237 * Note, we use a 200 ms period here as that's what 3238 * section 10.1.3 8254x Intel Manual (PCI/PCI-X Family 3239 * of Gigabit Ethernet Controllers Software Developer's 3240 * Manual) indicates that the optional blink hardware 3241 * operates at. 3242 */ 3243 e1000g->e1000g_blink = 3244 ddi_periodic_add(e1000g_led_blink, e1000g, 3245 200ULL * (NANOSEC / MILLISEC), DDI_IPL_0); 3246 } else if (e1000_blink_led(&e1000g->shared) != E1000_SUCCESS) { 3247 mutex_exit(&e1000g->e1000g_led_lock); 3248 return (EIO); 3249 } 3250 break; 3251 case MAC_LED_OFF: 3252 if (e1000_led_off(&e1000g->shared) != E1000_SUCCESS) { 3253 mutex_exit(&e1000g->e1000g_led_lock); 3254 return (EIO); 3255 } 3256 break; 3257 case MAC_LED_ON: 3258 if (e1000_led_on(&e1000g->shared) != E1000_SUCCESS) { 3259 mutex_exit(&e1000g->e1000g_led_lock); 3260 return (EIO); 3261 } 3262 break; 3263 default: 3264 mutex_exit(&e1000g->e1000g_led_lock); 3265 return (ENOTSUP); 3266 } 3267 3268 mutex_exit(&e1000g->e1000g_led_lock); 3269 return (0); 3270 3271 } 3272 3273 static boolean_t 3274 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3275 { 3276 struct e1000g *Adapter = (struct e1000g *)arg; 3277 3278 switch (cap) { 3279 case MAC_CAPAB_HCKSUM: { 3280 uint32_t *txflags = cap_data; 3281 3282 if (Adapter->tx_hcksum_enable) 3283 *txflags = HCKSUM_IPHDRCKSUM | 3284 HCKSUM_INET_PARTIAL; 3285 else 3286 return (B_FALSE); 3287 break; 3288 } 3289 3290 case MAC_CAPAB_LSO: { 3291 mac_capab_lso_t *cap_lso = cap_data; 3292 3293 if (Adapter->lso_enable) { 3294 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 3295 cap_lso->lso_basic_tcp_ipv4.lso_max = 3296 E1000_LSO_MAXLEN; 3297 } else 3298 return (B_FALSE); 3299 break; 3300 } 3301 case MAC_CAPAB_RINGS: { 3302 mac_capab_rings_t *cap_rings = cap_data; 3303 3304 /* No TX rings exposed yet */ 3305 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 3306 return (B_FALSE); 3307 3308 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 3309 cap_rings->mr_rnum = 1; 3310 cap_rings->mr_gnum = 1; 3311 cap_rings->mr_rget = e1000g_fill_ring; 3312 cap_rings->mr_gget = e1000g_fill_group; 3313 break; 3314 } 3315 case MAC_CAPAB_LED: { 3316 mac_capab_led_t *cap_led = cap_data; 3317 3318 cap_led->mcl_flags = 0; 3319 cap_led->mcl_modes = MAC_LED_DEFAULT; 3320 if (Adapter->shared.mac.ops.blink_led != NULL && 3321 Adapter->shared.mac.ops.blink_led != 3322 e1000_null_ops_generic) { 3323 cap_led->mcl_modes |= MAC_LED_IDENT; 3324 } 3325 3326 if (Adapter->shared.mac.ops.led_off != NULL && 3327 Adapter->shared.mac.ops.led_off != 3328 e1000_null_ops_generic) { 3329 cap_led->mcl_modes |= MAC_LED_OFF; 3330 } 3331 3332 if (Adapter->shared.mac.ops.led_on != NULL && 3333 Adapter->shared.mac.ops.led_on != 3334 e1000_null_ops_generic) { 3335 cap_led->mcl_modes |= MAC_LED_ON; 3336 } 3337 3338 /* 3339 * Some hardware doesn't support blinking natively as they're 3340 * missing the optional blink circuit. If they have both off and 3341 * on then we'll emulate it ourselves. 3342 */ 3343 if (((cap_led->mcl_modes & MAC_LED_IDENT) == 0) && 3344 ((cap_led->mcl_modes & MAC_LED_OFF) != 0) && 3345 ((cap_led->mcl_modes & MAC_LED_ON) != 0)) { 3346 cap_led->mcl_modes |= MAC_LED_IDENT; 3347 Adapter->e1000g_emul_blink = B_TRUE; 3348 } 3349 3350 cap_led->mcl_set = e1000g_led_set; 3351 break; 3352 } 3353 default: 3354 return (B_FALSE); 3355 } 3356 return (B_TRUE); 3357 } 3358 3359 static boolean_t 3360 e1000g_param_locked(mac_prop_id_t pr_num) 3361 { 3362 /* 3363 * All en_* parameters are locked (read-only) while 3364 * the device is in any sort of loopback mode ... 3365 */ 3366 switch (pr_num) { 3367 case MAC_PROP_EN_1000FDX_CAP: 3368 case MAC_PROP_EN_1000HDX_CAP: 3369 case MAC_PROP_EN_100FDX_CAP: 3370 case MAC_PROP_EN_100HDX_CAP: 3371 case MAC_PROP_EN_10FDX_CAP: 3372 case MAC_PROP_EN_10HDX_CAP: 3373 case MAC_PROP_AUTONEG: 3374 case MAC_PROP_FLOWCTRL: 3375 return (B_TRUE); 3376 } 3377 return (B_FALSE); 3378 } 3379 3380 /* 3381 * callback function for set/get of properties 3382 */ 3383 static int 3384 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3385 uint_t pr_valsize, const void *pr_val) 3386 { 3387 struct e1000g *Adapter = arg; 3388 struct e1000_hw *hw = &Adapter->shared; 3389 struct e1000_fc_info *fc = &Adapter->shared.fc; 3390 int err = 0; 3391 link_flowctrl_t flowctrl; 3392 uint32_t cur_mtu, new_mtu; 3393 3394 rw_enter(&Adapter->chip_lock, RW_WRITER); 3395 3396 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3397 rw_exit(&Adapter->chip_lock); 3398 return (ECANCELED); 3399 } 3400 3401 if (Adapter->loopback_mode != E1000G_LB_NONE && 3402 e1000g_param_locked(pr_num)) { 3403 /* 3404 * All en_* parameters are locked (read-only) 3405 * while the device is in any sort of loopback mode. 3406 */ 3407 rw_exit(&Adapter->chip_lock); 3408 return (EBUSY); 3409 } 3410 3411 switch (pr_num) { 3412 case MAC_PROP_EN_1000FDX_CAP: 3413 if (hw->phy.media_type != e1000_media_type_copper) { 3414 err = ENOTSUP; 3415 break; 3416 } 3417 Adapter->param_en_1000fdx = *(uint8_t *)pr_val; 3418 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val; 3419 goto reset; 3420 case MAC_PROP_EN_100FDX_CAP: 3421 if (hw->phy.media_type != e1000_media_type_copper) { 3422 err = ENOTSUP; 3423 break; 3424 } 3425 Adapter->param_en_100fdx = *(uint8_t *)pr_val; 3426 Adapter->param_adv_100fdx = *(uint8_t *)pr_val; 3427 goto reset; 3428 case MAC_PROP_EN_100HDX_CAP: 3429 if (hw->phy.media_type != e1000_media_type_copper) { 3430 err = ENOTSUP; 3431 break; 3432 } 3433 Adapter->param_en_100hdx = *(uint8_t *)pr_val; 3434 Adapter->param_adv_100hdx = *(uint8_t *)pr_val; 3435 goto reset; 3436 case MAC_PROP_EN_10FDX_CAP: 3437 if (hw->phy.media_type != e1000_media_type_copper) { 3438 err = ENOTSUP; 3439 break; 3440 } 3441 Adapter->param_en_10fdx = *(uint8_t *)pr_val; 3442 Adapter->param_adv_10fdx = *(uint8_t *)pr_val; 3443 goto reset; 3444 case MAC_PROP_EN_10HDX_CAP: 3445 if (hw->phy.media_type != e1000_media_type_copper) { 3446 err = ENOTSUP; 3447 break; 3448 } 3449 Adapter->param_en_10hdx = *(uint8_t *)pr_val; 3450 Adapter->param_adv_10hdx = *(uint8_t *)pr_val; 3451 goto reset; 3452 case MAC_PROP_AUTONEG: 3453 if (hw->phy.media_type != e1000_media_type_copper) { 3454 err = ENOTSUP; 3455 break; 3456 } 3457 Adapter->param_adv_autoneg = *(uint8_t *)pr_val; 3458 goto reset; 3459 case MAC_PROP_FLOWCTRL: 3460 fc->send_xon = B_TRUE; 3461 bcopy(pr_val, &flowctrl, sizeof (flowctrl)); 3462 3463 switch (flowctrl) { 3464 default: 3465 err = EINVAL; 3466 break; 3467 case LINK_FLOWCTRL_NONE: 3468 fc->requested_mode = e1000_fc_none; 3469 break; 3470 case LINK_FLOWCTRL_RX: 3471 fc->requested_mode = e1000_fc_rx_pause; 3472 break; 3473 case LINK_FLOWCTRL_TX: 3474 fc->requested_mode = e1000_fc_tx_pause; 3475 break; 3476 case LINK_FLOWCTRL_BI: 3477 fc->requested_mode = e1000_fc_full; 3478 break; 3479 } 3480 reset: 3481 if (err == 0) { 3482 /* check PCH limits & reset the link */ 3483 e1000g_pch_limits(Adapter); 3484 if (e1000g_reset_link(Adapter) != DDI_SUCCESS) 3485 err = EINVAL; 3486 } 3487 break; 3488 case MAC_PROP_ADV_1000FDX_CAP: 3489 case MAC_PROP_ADV_1000HDX_CAP: 3490 case MAC_PROP_ADV_100FDX_CAP: 3491 case MAC_PROP_ADV_100HDX_CAP: 3492 case MAC_PROP_ADV_10FDX_CAP: 3493 case MAC_PROP_ADV_10HDX_CAP: 3494 case MAC_PROP_EN_1000HDX_CAP: 3495 case MAC_PROP_STATUS: 3496 case MAC_PROP_SPEED: 3497 case MAC_PROP_DUPLEX: 3498 case MAC_PROP_MEDIA: 3499 err = ENOTSUP; /* read-only prop. Can't set this. */ 3500 break; 3501 case MAC_PROP_MTU: 3502 /* adapter must be stopped for an MTU change */ 3503 if (Adapter->e1000g_state & E1000G_STARTED) { 3504 err = EBUSY; 3505 break; 3506 } 3507 3508 cur_mtu = Adapter->default_mtu; 3509 3510 /* get new requested MTU */ 3511 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3512 if (new_mtu == cur_mtu) { 3513 err = 0; 3514 break; 3515 } 3516 3517 if ((new_mtu < DEFAULT_MTU) || 3518 (new_mtu > Adapter->max_mtu)) { 3519 err = EINVAL; 3520 break; 3521 } 3522 3523 /* inform MAC framework of new MTU */ 3524 err = mac_maxsdu_update(Adapter->mh, new_mtu); 3525 3526 if (err == 0) { 3527 Adapter->default_mtu = new_mtu; 3528 Adapter->max_frame_size = 3529 e1000g_mtu2maxframe(new_mtu); 3530 3531 /* 3532 * check PCH limits & set buffer sizes to 3533 * match new MTU 3534 */ 3535 e1000g_pch_limits(Adapter); 3536 e1000g_set_bufsize(Adapter); 3537 3538 /* 3539 * decrease the number of descriptors and free 3540 * packets for jumbo frames to reduce tx/rx 3541 * resource consumption 3542 */ 3543 if (Adapter->max_frame_size >= 3544 (FRAME_SIZE_UPTO_4K)) { 3545 if (Adapter->tx_desc_num_flag == 0) 3546 Adapter->tx_desc_num = 3547 DEFAULT_JUMBO_NUM_TX_DESC; 3548 3549 if (Adapter->rx_desc_num_flag == 0) 3550 Adapter->rx_desc_num = 3551 DEFAULT_JUMBO_NUM_RX_DESC; 3552 3553 if (Adapter->tx_buf_num_flag == 0) 3554 Adapter->tx_freelist_num = 3555 DEFAULT_JUMBO_NUM_TX_BUF; 3556 3557 if (Adapter->rx_buf_num_flag == 0) 3558 Adapter->rx_freelist_limit = 3559 DEFAULT_JUMBO_NUM_RX_BUF; 3560 } else { 3561 if (Adapter->tx_desc_num_flag == 0) 3562 Adapter->tx_desc_num = 3563 DEFAULT_NUM_TX_DESCRIPTOR; 3564 3565 if (Adapter->rx_desc_num_flag == 0) 3566 Adapter->rx_desc_num = 3567 DEFAULT_NUM_RX_DESCRIPTOR; 3568 3569 if (Adapter->tx_buf_num_flag == 0) 3570 Adapter->tx_freelist_num = 3571 DEFAULT_NUM_TX_FREELIST; 3572 3573 if (Adapter->rx_buf_num_flag == 0) 3574 Adapter->rx_freelist_limit = 3575 DEFAULT_NUM_RX_FREELIST; 3576 } 3577 } 3578 break; 3579 case MAC_PROP_PRIVATE: 3580 err = e1000g_set_priv_prop(Adapter, pr_name, 3581 pr_valsize, pr_val); 3582 break; 3583 default: 3584 err = ENOTSUP; 3585 break; 3586 } 3587 rw_exit(&Adapter->chip_lock); 3588 return (err); 3589 } 3590 3591 static int 3592 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3593 uint_t pr_valsize, void *pr_val) 3594 { 3595 struct e1000g *Adapter = arg; 3596 struct e1000_hw *hw = &Adapter->shared; 3597 struct e1000_fc_info *fc = &Adapter->shared.fc; 3598 int err = 0; 3599 link_flowctrl_t flowctrl; 3600 uint64_t tmp = 0; 3601 3602 switch (pr_num) { 3603 case MAC_PROP_DUPLEX: 3604 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 3605 bcopy(&Adapter->link_duplex, pr_val, 3606 sizeof (link_duplex_t)); 3607 break; 3608 case MAC_PROP_SPEED: 3609 ASSERT(pr_valsize >= sizeof (uint64_t)); 3610 tmp = Adapter->link_speed * 1000000ull; 3611 bcopy(&tmp, pr_val, sizeof (tmp)); 3612 break; 3613 case MAC_PROP_AUTONEG: 3614 *(uint8_t *)pr_val = Adapter->param_adv_autoneg; 3615 break; 3616 case MAC_PROP_FLOWCTRL: 3617 ASSERT(pr_valsize >= sizeof (link_flowctrl_t)); 3618 switch (fc->current_mode) { 3619 case e1000_fc_none: 3620 flowctrl = LINK_FLOWCTRL_NONE; 3621 break; 3622 case e1000_fc_rx_pause: 3623 flowctrl = LINK_FLOWCTRL_RX; 3624 break; 3625 case e1000_fc_tx_pause: 3626 flowctrl = LINK_FLOWCTRL_TX; 3627 break; 3628 case e1000_fc_full: 3629 flowctrl = LINK_FLOWCTRL_BI; 3630 break; 3631 } 3632 bcopy(&flowctrl, pr_val, sizeof (flowctrl)); 3633 break; 3634 case MAC_PROP_ADV_1000FDX_CAP: 3635 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx; 3636 break; 3637 case MAC_PROP_EN_1000FDX_CAP: 3638 *(uint8_t *)pr_val = Adapter->param_en_1000fdx; 3639 break; 3640 case MAC_PROP_ADV_1000HDX_CAP: 3641 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx; 3642 break; 3643 case MAC_PROP_EN_1000HDX_CAP: 3644 *(uint8_t *)pr_val = Adapter->param_en_1000hdx; 3645 break; 3646 case MAC_PROP_ADV_100FDX_CAP: 3647 *(uint8_t *)pr_val = Adapter->param_adv_100fdx; 3648 break; 3649 case MAC_PROP_EN_100FDX_CAP: 3650 *(uint8_t *)pr_val = Adapter->param_en_100fdx; 3651 break; 3652 case MAC_PROP_ADV_100HDX_CAP: 3653 *(uint8_t *)pr_val = Adapter->param_adv_100hdx; 3654 break; 3655 case MAC_PROP_EN_100HDX_CAP: 3656 *(uint8_t *)pr_val = Adapter->param_en_100hdx; 3657 break; 3658 case MAC_PROP_ADV_10FDX_CAP: 3659 *(uint8_t *)pr_val = Adapter->param_adv_10fdx; 3660 break; 3661 case MAC_PROP_EN_10FDX_CAP: 3662 *(uint8_t *)pr_val = Adapter->param_en_10fdx; 3663 break; 3664 case MAC_PROP_ADV_10HDX_CAP: 3665 *(uint8_t *)pr_val = Adapter->param_adv_10hdx; 3666 break; 3667 case MAC_PROP_EN_10HDX_CAP: 3668 *(uint8_t *)pr_val = Adapter->param_en_10hdx; 3669 break; 3670 case MAC_PROP_ADV_100T4_CAP: 3671 case MAC_PROP_EN_100T4_CAP: 3672 *(uint8_t *)pr_val = Adapter->param_adv_100t4; 3673 break; 3674 case MAC_PROP_MEDIA: 3675 *(mac_ether_media_t *)pr_val = e1000_link_to_media(hw, 3676 Adapter->link_speed); 3677 break; 3678 case MAC_PROP_PRIVATE: 3679 err = e1000g_get_priv_prop(Adapter, pr_name, 3680 pr_valsize, pr_val); 3681 break; 3682 default: 3683 err = ENOTSUP; 3684 break; 3685 } 3686 3687 return (err); 3688 } 3689 3690 static void 3691 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3692 mac_prop_info_handle_t prh) 3693 { 3694 struct e1000g *Adapter = arg; 3695 struct e1000_hw *hw = &Adapter->shared; 3696 3697 switch (pr_num) { 3698 case MAC_PROP_DUPLEX: 3699 case MAC_PROP_SPEED: 3700 case MAC_PROP_ADV_1000FDX_CAP: 3701 case MAC_PROP_ADV_1000HDX_CAP: 3702 case MAC_PROP_ADV_100FDX_CAP: 3703 case MAC_PROP_ADV_100HDX_CAP: 3704 case MAC_PROP_ADV_10FDX_CAP: 3705 case MAC_PROP_ADV_10HDX_CAP: 3706 case MAC_PROP_ADV_100T4_CAP: 3707 case MAC_PROP_EN_100T4_CAP: 3708 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3709 break; 3710 3711 case MAC_PROP_EN_1000FDX_CAP: 3712 if (hw->phy.media_type != e1000_media_type_copper) { 3713 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3714 } else { 3715 mac_prop_info_set_default_uint8(prh, 3716 ((Adapter->phy_ext_status & 3717 IEEE_ESR_1000T_FD_CAPS) || 3718 (Adapter->phy_ext_status & 3719 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0); 3720 } 3721 break; 3722 3723 case MAC_PROP_EN_100FDX_CAP: 3724 if (hw->phy.media_type != e1000_media_type_copper) { 3725 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3726 } else { 3727 mac_prop_info_set_default_uint8(prh, 3728 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 3729 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 3730 ? 1 : 0); 3731 } 3732 break; 3733 3734 case MAC_PROP_EN_100HDX_CAP: 3735 if (hw->phy.media_type != e1000_media_type_copper) { 3736 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3737 } else { 3738 mac_prop_info_set_default_uint8(prh, 3739 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 3740 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) 3741 ? 1 : 0); 3742 } 3743 break; 3744 3745 case MAC_PROP_EN_10FDX_CAP: 3746 if (hw->phy.media_type != e1000_media_type_copper) { 3747 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3748 } else { 3749 mac_prop_info_set_default_uint8(prh, 3750 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0); 3751 } 3752 break; 3753 3754 case MAC_PROP_EN_10HDX_CAP: 3755 if (hw->phy.media_type != e1000_media_type_copper) { 3756 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3757 } else { 3758 mac_prop_info_set_default_uint8(prh, 3759 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0); 3760 } 3761 break; 3762 3763 case MAC_PROP_EN_1000HDX_CAP: 3764 if (hw->phy.media_type != e1000_media_type_copper) 3765 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3766 break; 3767 3768 case MAC_PROP_AUTONEG: 3769 if (hw->phy.media_type != e1000_media_type_copper) { 3770 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3771 } else { 3772 mac_prop_info_set_default_uint8(prh, 3773 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) 3774 ? 1 : 0); 3775 } 3776 break; 3777 3778 case MAC_PROP_FLOWCTRL: 3779 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI); 3780 break; 3781 3782 case MAC_PROP_MTU: { 3783 struct e1000_mac_info *mac = &Adapter->shared.mac; 3784 struct e1000_phy_info *phy = &Adapter->shared.phy; 3785 uint32_t max; 3786 3787 /* some MAC types do not support jumbo frames */ 3788 if ((mac->type == e1000_ich8lan) || 3789 ((mac->type == e1000_ich9lan) && (phy->type == 3790 e1000_phy_ife))) { 3791 max = DEFAULT_MTU; 3792 } else { 3793 max = Adapter->max_mtu; 3794 } 3795 3796 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max); 3797 break; 3798 } 3799 case MAC_PROP_PRIVATE: { 3800 char valstr[64]; 3801 int value; 3802 3803 if (strcmp(pr_name, "_adv_pause_cap") == 0 || 3804 strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3805 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3806 return; 3807 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3808 value = DEFAULT_TX_BCOPY_THRESHOLD; 3809 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3810 value = DEFAULT_TX_INTR_ENABLE; 3811 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3812 value = DEFAULT_TX_INTR_DELAY; 3813 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3814 value = DEFAULT_TX_INTR_ABS_DELAY; 3815 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3816 value = DEFAULT_RX_BCOPY_THRESHOLD; 3817 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3818 value = DEFAULT_RX_LIMIT_ON_INTR; 3819 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3820 value = DEFAULT_RX_INTR_DELAY; 3821 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3822 value = DEFAULT_RX_INTR_ABS_DELAY; 3823 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3824 value = DEFAULT_INTR_THROTTLING; 3825 } else if (strcmp(pr_name, "_intr_adaptive") == 0) { 3826 value = 1; 3827 } else { 3828 return; 3829 } 3830 3831 (void) snprintf(valstr, sizeof (valstr), "%d", value); 3832 mac_prop_info_set_default_str(prh, valstr); 3833 break; 3834 } 3835 } 3836 } 3837 3838 /* ARGSUSED2 */ 3839 static int 3840 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name, 3841 uint_t pr_valsize, const void *pr_val) 3842 { 3843 int err = 0; 3844 long result; 3845 struct e1000_hw *hw = &Adapter->shared; 3846 3847 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3848 if (pr_val == NULL) { 3849 err = EINVAL; 3850 return (err); 3851 } 3852 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3853 if (result < MIN_TX_BCOPY_THRESHOLD || 3854 result > MAX_TX_BCOPY_THRESHOLD) 3855 err = EINVAL; 3856 else { 3857 Adapter->tx_bcopy_thresh = (uint32_t)result; 3858 } 3859 return (err); 3860 } 3861 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3862 if (pr_val == NULL) { 3863 err = EINVAL; 3864 return (err); 3865 } 3866 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3867 if (result < 0 || result > 1) 3868 err = EINVAL; 3869 else { 3870 Adapter->tx_intr_enable = (result == 1) ? 3871 B_TRUE: B_FALSE; 3872 if (Adapter->tx_intr_enable) 3873 e1000g_mask_tx_interrupt(Adapter); 3874 else 3875 e1000g_clear_tx_interrupt(Adapter); 3876 if (e1000g_check_acc_handle( 3877 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3878 ddi_fm_service_impact(Adapter->dip, 3879 DDI_SERVICE_DEGRADED); 3880 err = EIO; 3881 } 3882 } 3883 return (err); 3884 } 3885 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3886 if (pr_val == NULL) { 3887 err = EINVAL; 3888 return (err); 3889 } 3890 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3891 if (result < MIN_TX_INTR_DELAY || 3892 result > MAX_TX_INTR_DELAY) 3893 err = EINVAL; 3894 else { 3895 Adapter->tx_intr_delay = (uint32_t)result; 3896 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay); 3897 if (e1000g_check_acc_handle( 3898 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3899 ddi_fm_service_impact(Adapter->dip, 3900 DDI_SERVICE_DEGRADED); 3901 err = EIO; 3902 } 3903 } 3904 return (err); 3905 } 3906 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3907 if (pr_val == NULL) { 3908 err = EINVAL; 3909 return (err); 3910 } 3911 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3912 if (result < MIN_TX_INTR_ABS_DELAY || 3913 result > MAX_TX_INTR_ABS_DELAY) 3914 err = EINVAL; 3915 else { 3916 Adapter->tx_intr_abs_delay = (uint32_t)result; 3917 E1000_WRITE_REG(hw, E1000_TADV, 3918 Adapter->tx_intr_abs_delay); 3919 if (e1000g_check_acc_handle( 3920 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3921 ddi_fm_service_impact(Adapter->dip, 3922 DDI_SERVICE_DEGRADED); 3923 err = EIO; 3924 } 3925 } 3926 return (err); 3927 } 3928 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3929 if (pr_val == NULL) { 3930 err = EINVAL; 3931 return (err); 3932 } 3933 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3934 if (result < MIN_RX_BCOPY_THRESHOLD || 3935 result > MAX_RX_BCOPY_THRESHOLD) 3936 err = EINVAL; 3937 else 3938 Adapter->rx_bcopy_thresh = (uint32_t)result; 3939 return (err); 3940 } 3941 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3942 if (pr_val == NULL) { 3943 err = EINVAL; 3944 return (err); 3945 } 3946 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3947 if (result < MIN_RX_LIMIT_ON_INTR || 3948 result > MAX_RX_LIMIT_ON_INTR) 3949 err = EINVAL; 3950 else 3951 Adapter->rx_limit_onintr = (uint32_t)result; 3952 return (err); 3953 } 3954 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3955 if (pr_val == NULL) { 3956 err = EINVAL; 3957 return (err); 3958 } 3959 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3960 if (result < MIN_RX_INTR_DELAY || 3961 result > MAX_RX_INTR_DELAY) 3962 err = EINVAL; 3963 else { 3964 Adapter->rx_intr_delay = (uint32_t)result; 3965 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay); 3966 if (e1000g_check_acc_handle( 3967 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3968 ddi_fm_service_impact(Adapter->dip, 3969 DDI_SERVICE_DEGRADED); 3970 err = EIO; 3971 } 3972 } 3973 return (err); 3974 } 3975 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3976 if (pr_val == NULL) { 3977 err = EINVAL; 3978 return (err); 3979 } 3980 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3981 if (result < MIN_RX_INTR_ABS_DELAY || 3982 result > MAX_RX_INTR_ABS_DELAY) 3983 err = EINVAL; 3984 else { 3985 Adapter->rx_intr_abs_delay = (uint32_t)result; 3986 E1000_WRITE_REG(hw, E1000_RADV, 3987 Adapter->rx_intr_abs_delay); 3988 if (e1000g_check_acc_handle( 3989 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3990 ddi_fm_service_impact(Adapter->dip, 3991 DDI_SERVICE_DEGRADED); 3992 err = EIO; 3993 } 3994 } 3995 return (err); 3996 } 3997 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3998 if (pr_val == NULL) { 3999 err = EINVAL; 4000 return (err); 4001 } 4002 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4003 if (result < MIN_INTR_THROTTLING || 4004 result > MAX_INTR_THROTTLING) 4005 err = EINVAL; 4006 else { 4007 if (hw->mac.type >= e1000_82540) { 4008 Adapter->intr_throttling_rate = 4009 (uint32_t)result; 4010 E1000_WRITE_REG(hw, E1000_ITR, 4011 Adapter->intr_throttling_rate); 4012 if (e1000g_check_acc_handle( 4013 Adapter->osdep.reg_handle) != DDI_FM_OK) { 4014 ddi_fm_service_impact(Adapter->dip, 4015 DDI_SERVICE_DEGRADED); 4016 err = EIO; 4017 } 4018 } else 4019 err = EINVAL; 4020 } 4021 return (err); 4022 } 4023 if (strcmp(pr_name, "_intr_adaptive") == 0) { 4024 if (pr_val == NULL) { 4025 err = EINVAL; 4026 return (err); 4027 } 4028 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4029 if (result < 0 || result > 1) 4030 err = EINVAL; 4031 else { 4032 if (hw->mac.type >= e1000_82540) { 4033 Adapter->intr_adaptive = (result == 1) ? 4034 B_TRUE : B_FALSE; 4035 } else { 4036 err = EINVAL; 4037 } 4038 } 4039 return (err); 4040 } 4041 return (ENOTSUP); 4042 } 4043 4044 static int 4045 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name, 4046 uint_t pr_valsize, void *pr_val) 4047 { 4048 int err = ENOTSUP; 4049 int value; 4050 4051 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4052 value = Adapter->param_adv_pause; 4053 err = 0; 4054 goto done; 4055 } 4056 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 4057 value = Adapter->param_adv_asym_pause; 4058 err = 0; 4059 goto done; 4060 } 4061 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 4062 value = Adapter->tx_bcopy_thresh; 4063 err = 0; 4064 goto done; 4065 } 4066 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 4067 value = Adapter->tx_intr_enable; 4068 err = 0; 4069 goto done; 4070 } 4071 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 4072 value = Adapter->tx_intr_delay; 4073 err = 0; 4074 goto done; 4075 } 4076 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 4077 value = Adapter->tx_intr_abs_delay; 4078 err = 0; 4079 goto done; 4080 } 4081 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 4082 value = Adapter->rx_bcopy_thresh; 4083 err = 0; 4084 goto done; 4085 } 4086 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 4087 value = Adapter->rx_limit_onintr; 4088 err = 0; 4089 goto done; 4090 } 4091 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 4092 value = Adapter->rx_intr_delay; 4093 err = 0; 4094 goto done; 4095 } 4096 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 4097 value = Adapter->rx_intr_abs_delay; 4098 err = 0; 4099 goto done; 4100 } 4101 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 4102 value = Adapter->intr_throttling_rate; 4103 err = 0; 4104 goto done; 4105 } 4106 if (strcmp(pr_name, "_intr_adaptive") == 0) { 4107 value = Adapter->intr_adaptive; 4108 err = 0; 4109 goto done; 4110 } 4111 done: 4112 if (err == 0) { 4113 (void) snprintf(pr_val, pr_valsize, "%d", value); 4114 } 4115 return (err); 4116 } 4117 4118 /* 4119 * e1000g_get_conf - get configurations set in e1000g.conf 4120 * This routine gets user-configured values out of the configuration 4121 * file e1000g.conf. 4122 * 4123 * For each configurable value, there is a minimum, a maximum, and a 4124 * default. 4125 * If user does not configure a value, use the default. 4126 * If user configures below the minimum, use the minumum. 4127 * If user configures above the maximum, use the maxumum. 4128 */ 4129 static void 4130 e1000g_get_conf(struct e1000g *Adapter) 4131 { 4132 struct e1000_hw *hw = &Adapter->shared; 4133 boolean_t tbi_compatibility = B_FALSE; 4134 boolean_t is_jumbo = B_FALSE; 4135 int propval; 4136 /* 4137 * decrease the number of descriptors and free packets 4138 * for jumbo frames to reduce tx/rx resource consumption 4139 */ 4140 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) { 4141 is_jumbo = B_TRUE; 4142 } 4143 4144 /* 4145 * get each configurable property from e1000g.conf 4146 */ 4147 4148 /* 4149 * NumTxDescriptors 4150 */ 4151 Adapter->tx_desc_num_flag = 4152 e1000g_get_prop(Adapter, "NumTxDescriptors", 4153 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR, 4154 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC 4155 : DEFAULT_NUM_TX_DESCRIPTOR, &propval); 4156 Adapter->tx_desc_num = propval; 4157 4158 /* 4159 * NumRxDescriptors 4160 */ 4161 Adapter->rx_desc_num_flag = 4162 e1000g_get_prop(Adapter, "NumRxDescriptors", 4163 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR, 4164 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC 4165 : DEFAULT_NUM_RX_DESCRIPTOR, &propval); 4166 Adapter->rx_desc_num = propval; 4167 4168 /* 4169 * NumRxFreeList 4170 */ 4171 Adapter->rx_buf_num_flag = 4172 e1000g_get_prop(Adapter, "NumRxFreeList", 4173 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST, 4174 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF 4175 : DEFAULT_NUM_RX_FREELIST, &propval); 4176 Adapter->rx_freelist_limit = propval; 4177 4178 /* 4179 * NumTxPacketList 4180 */ 4181 Adapter->tx_buf_num_flag = 4182 e1000g_get_prop(Adapter, "NumTxPacketList", 4183 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST, 4184 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF 4185 : DEFAULT_NUM_TX_FREELIST, &propval); 4186 Adapter->tx_freelist_num = propval; 4187 4188 /* 4189 * FlowControl 4190 */ 4191 hw->fc.send_xon = B_TRUE; 4192 (void) e1000g_get_prop(Adapter, "FlowControl", 4193 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval); 4194 hw->fc.requested_mode = propval; 4195 /* 4 is the setting that says "let the eeprom decide" */ 4196 if (hw->fc.requested_mode == 4) 4197 hw->fc.requested_mode = e1000_fc_default; 4198 4199 /* 4200 * Max Num Receive Packets on Interrupt 4201 */ 4202 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets", 4203 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR, 4204 DEFAULT_RX_LIMIT_ON_INTR, &propval); 4205 Adapter->rx_limit_onintr = propval; 4206 4207 /* 4208 * PHY master slave setting 4209 */ 4210 (void) e1000g_get_prop(Adapter, "SetMasterSlave", 4211 e1000_ms_hw_default, e1000_ms_auto, 4212 e1000_ms_hw_default, &propval); 4213 hw->phy.ms_type = propval; 4214 4215 /* 4216 * Parameter which controls TBI mode workaround, which is only 4217 * needed on certain switches such as Cisco 6500/Foundry 4218 */ 4219 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable", 4220 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval); 4221 tbi_compatibility = (propval == 1); 4222 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility); 4223 4224 /* 4225 * MSI Enable 4226 */ 4227 (void) e1000g_get_prop(Adapter, "MSIEnable", 4228 0, 1, DEFAULT_MSI_ENABLE, &propval); 4229 Adapter->msi_enable = (propval == 1); 4230 4231 /* 4232 * Interrupt Throttling Rate 4233 */ 4234 (void) e1000g_get_prop(Adapter, "intr_throttling_rate", 4235 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 4236 DEFAULT_INTR_THROTTLING, &propval); 4237 Adapter->intr_throttling_rate = propval; 4238 4239 /* 4240 * Adaptive Interrupt Blanking Enable/Disable 4241 * It is enabled by default 4242 */ 4243 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1, 4244 &propval); 4245 Adapter->intr_adaptive = (propval == 1); 4246 4247 /* 4248 * Hardware checksum enable/disable parameter 4249 */ 4250 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable", 4251 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval); 4252 Adapter->tx_hcksum_enable = (propval == 1); 4253 /* 4254 * Checksum on/off selection via global parameters. 4255 * 4256 * If the chip is flagged as not capable of (correctly) 4257 * handling checksumming, we don't enable it on either 4258 * Rx or Tx side. Otherwise, we take this chip's settings 4259 * from the patchable global defaults. 4260 * 4261 * We advertise our capabilities only if TX offload is 4262 * enabled. On receive, the stack will accept checksummed 4263 * packets anyway, even if we haven't said we can deliver 4264 * them. 4265 */ 4266 switch (hw->mac.type) { 4267 case e1000_82540: 4268 case e1000_82544: 4269 case e1000_82545: 4270 case e1000_82545_rev_3: 4271 case e1000_82546: 4272 case e1000_82546_rev_3: 4273 case e1000_82571: 4274 case e1000_82572: 4275 case e1000_82573: 4276 case e1000_80003es2lan: 4277 break; 4278 /* 4279 * For the following Intel PRO/1000 chipsets, we have not 4280 * tested the hardware checksum offload capability, so we 4281 * disable the capability for them. 4282 * e1000_82542, 4283 * e1000_82543, 4284 * e1000_82541, 4285 * e1000_82541_rev_2, 4286 * e1000_82547, 4287 * e1000_82547_rev_2, 4288 */ 4289 default: 4290 Adapter->tx_hcksum_enable = B_FALSE; 4291 } 4292 4293 /* 4294 * Large Send Offloading(LSO) Enable/Disable 4295 * If the tx hardware checksum is not enabled, LSO should be 4296 * disabled. 4297 */ 4298 (void) e1000g_get_prop(Adapter, "lso_enable", 4299 0, 1, DEFAULT_LSO_ENABLE, &propval); 4300 Adapter->lso_enable = (propval == 1); 4301 4302 switch (hw->mac.type) { 4303 case e1000_82546: 4304 case e1000_82546_rev_3: 4305 if (Adapter->lso_enable) 4306 Adapter->lso_premature_issue = B_TRUE; 4307 /* FALLTHRU */ 4308 case e1000_82571: 4309 case e1000_82572: 4310 case e1000_82573: 4311 case e1000_80003es2lan: 4312 break; 4313 default: 4314 Adapter->lso_enable = B_FALSE; 4315 } 4316 4317 if (!Adapter->tx_hcksum_enable) { 4318 Adapter->lso_premature_issue = B_FALSE; 4319 Adapter->lso_enable = B_FALSE; 4320 } 4321 4322 /* 4323 * If mem_workaround_82546 is enabled, the rx buffer allocated by 4324 * e1000_82545, e1000_82546 and e1000_82546_rev_3 4325 * will not cross 64k boundary. 4326 */ 4327 (void) e1000g_get_prop(Adapter, "mem_workaround_82546", 4328 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval); 4329 Adapter->mem_workaround_82546 = (propval == 1); 4330 4331 /* 4332 * Max number of multicast addresses 4333 */ 4334 (void) e1000g_get_prop(Adapter, "mcast_max_num", 4335 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32, 4336 &propval); 4337 Adapter->mcast_max_num = propval; 4338 } 4339 4340 /* 4341 * e1000g_get_prop - routine to read properties 4342 * 4343 * Get a user-configure property value out of the configuration 4344 * file e1000g.conf. 4345 * 4346 * Caller provides name of the property, a default value, a minimum 4347 * value, a maximum value and a pointer to the returned property 4348 * value. 4349 * 4350 * Return B_TRUE if the configured value of the property is not a default 4351 * value, otherwise return B_FALSE. 4352 */ 4353 static boolean_t 4354 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */ 4355 char *propname, /* name of the property */ 4356 int minval, /* minimum acceptable value */ 4357 int maxval, /* maximim acceptable value */ 4358 int defval, /* default value */ 4359 int *propvalue) /* property value return to caller */ 4360 { 4361 int propval; /* value returned for requested property */ 4362 int *props; /* point to array of properties returned */ 4363 uint_t nprops; /* number of property value returned */ 4364 boolean_t ret = B_TRUE; 4365 4366 /* 4367 * get the array of properties from the config file 4368 */ 4369 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip, 4370 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) { 4371 /* got some properties, test if we got enough */ 4372 if (Adapter->instance < nprops) { 4373 propval = props[Adapter->instance]; 4374 } else { 4375 /* not enough properties configured */ 4376 propval = defval; 4377 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4378 "Not Enough %s values found in e1000g.conf" 4379 " - set to %d\n", 4380 propname, propval); 4381 ret = B_FALSE; 4382 } 4383 4384 /* free memory allocated for properties */ 4385 ddi_prop_free(props); 4386 4387 } else { 4388 propval = defval; 4389 ret = B_FALSE; 4390 } 4391 4392 /* 4393 * enforce limits 4394 */ 4395 if (propval > maxval) { 4396 propval = maxval; 4397 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4398 "Too High %s value in e1000g.conf - set to %d\n", 4399 propname, propval); 4400 } 4401 4402 if (propval < minval) { 4403 propval = minval; 4404 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4405 "Too Low %s value in e1000g.conf - set to %d\n", 4406 propname, propval); 4407 } 4408 4409 *propvalue = propval; 4410 return (ret); 4411 } 4412 4413 static boolean_t 4414 e1000g_link_check(struct e1000g *Adapter) 4415 { 4416 uint16_t speed, duplex, phydata; 4417 boolean_t link_changed = B_FALSE; 4418 struct e1000_hw *hw; 4419 uint32_t reg_tarc; 4420 4421 hw = &Adapter->shared; 4422 4423 if (e1000g_link_up(Adapter)) { 4424 /* 4425 * The Link is up, check whether it was marked as down earlier 4426 */ 4427 if (Adapter->link_state != LINK_STATE_UP) { 4428 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex); 4429 Adapter->link_speed = speed; 4430 Adapter->link_duplex = duplex; 4431 Adapter->link_state = LINK_STATE_UP; 4432 link_changed = B_TRUE; 4433 4434 if (Adapter->link_speed == SPEED_1000) 4435 Adapter->stall_threshold = TX_STALL_TIME_2S; 4436 else 4437 Adapter->stall_threshold = TX_STALL_TIME_8S; 4438 4439 Adapter->tx_link_down_timeout = 0; 4440 4441 if ((hw->mac.type == e1000_82571) || 4442 (hw->mac.type == e1000_82572)) { 4443 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0)); 4444 if (speed == SPEED_1000) 4445 reg_tarc |= (1 << 21); 4446 else 4447 reg_tarc &= ~(1 << 21); 4448 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc); 4449 } 4450 } 4451 Adapter->smartspeed = 0; 4452 } else { 4453 if (Adapter->link_state != LINK_STATE_DOWN) { 4454 Adapter->link_speed = 0; 4455 Adapter->link_duplex = 0; 4456 Adapter->link_state = LINK_STATE_DOWN; 4457 link_changed = B_TRUE; 4458 4459 /* 4460 * SmartSpeed workaround for Tabor/TanaX, When the 4461 * driver loses link disable auto master/slave 4462 * resolution. 4463 */ 4464 if (hw->phy.type == e1000_phy_igp) { 4465 (void) e1000_read_phy_reg(hw, 4466 PHY_1000T_CTRL, &phydata); 4467 phydata |= CR_1000T_MS_ENABLE; 4468 (void) e1000_write_phy_reg(hw, 4469 PHY_1000T_CTRL, phydata); 4470 } 4471 } else { 4472 e1000g_smartspeed(Adapter); 4473 } 4474 4475 if (Adapter->e1000g_state & E1000G_STARTED) { 4476 if (Adapter->tx_link_down_timeout < 4477 MAX_TX_LINK_DOWN_TIMEOUT) { 4478 Adapter->tx_link_down_timeout++; 4479 } else if (Adapter->tx_link_down_timeout == 4480 MAX_TX_LINK_DOWN_TIMEOUT) { 4481 e1000g_tx_clean(Adapter); 4482 Adapter->tx_link_down_timeout++; 4483 } 4484 } 4485 } 4486 4487 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4488 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4489 4490 return (link_changed); 4491 } 4492 4493 /* 4494 * e1000g_reset_link - Using the link properties to setup the link 4495 */ 4496 int 4497 e1000g_reset_link(struct e1000g *Adapter) 4498 { 4499 struct e1000_mac_info *mac; 4500 struct e1000_phy_info *phy; 4501 struct e1000_hw *hw; 4502 boolean_t invalid; 4503 4504 mac = &Adapter->shared.mac; 4505 phy = &Adapter->shared.phy; 4506 hw = &Adapter->shared; 4507 invalid = B_FALSE; 4508 4509 if (hw->phy.media_type != e1000_media_type_copper) 4510 goto out; 4511 4512 if (Adapter->param_adv_autoneg == 1) { 4513 mac->autoneg = B_TRUE; 4514 phy->autoneg_advertised = 0; 4515 4516 /* 4517 * 1000hdx is not supported for autonegotiation 4518 */ 4519 if (Adapter->param_adv_1000fdx == 1) 4520 phy->autoneg_advertised |= ADVERTISE_1000_FULL; 4521 4522 if (Adapter->param_adv_100fdx == 1) 4523 phy->autoneg_advertised |= ADVERTISE_100_FULL; 4524 4525 if (Adapter->param_adv_100hdx == 1) 4526 phy->autoneg_advertised |= ADVERTISE_100_HALF; 4527 4528 if (Adapter->param_adv_10fdx == 1) 4529 phy->autoneg_advertised |= ADVERTISE_10_FULL; 4530 4531 if (Adapter->param_adv_10hdx == 1) 4532 phy->autoneg_advertised |= ADVERTISE_10_HALF; 4533 4534 if (phy->autoneg_advertised == 0) 4535 invalid = B_TRUE; 4536 } else { 4537 mac->autoneg = B_FALSE; 4538 4539 /* 4540 * For Intel copper cards, 1000fdx and 1000hdx are not 4541 * supported for forced link 4542 */ 4543 if (Adapter->param_adv_100fdx == 1) 4544 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4545 else if (Adapter->param_adv_100hdx == 1) 4546 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4547 else if (Adapter->param_adv_10fdx == 1) 4548 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4549 else if (Adapter->param_adv_10hdx == 1) 4550 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4551 else 4552 invalid = B_TRUE; 4553 4554 } 4555 4556 if (invalid) { 4557 e1000g_log(Adapter, CE_WARN, 4558 "Invalid link settings. Setup link to " 4559 "support autonegotiation with all link capabilities."); 4560 mac->autoneg = B_TRUE; 4561 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 4562 } 4563 4564 out: 4565 return (e1000_setup_link(&Adapter->shared)); 4566 } 4567 4568 static void 4569 e1000g_timer_tx_resched(struct e1000g *Adapter) 4570 { 4571 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 4572 4573 rw_enter(&Adapter->chip_lock, RW_READER); 4574 4575 if (tx_ring->resched_needed && 4576 ((ddi_get_lbolt() - tx_ring->resched_timestamp) > 4577 drv_usectohz(1000000)) && 4578 (Adapter->e1000g_state & E1000G_STARTED) && 4579 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) { 4580 tx_ring->resched_needed = B_FALSE; 4581 mac_tx_update(Adapter->mh); 4582 E1000G_STAT(tx_ring->stat_reschedule); 4583 E1000G_STAT(tx_ring->stat_timer_reschedule); 4584 } 4585 4586 rw_exit(&Adapter->chip_lock); 4587 } 4588 4589 static void 4590 e1000g_local_timer(void *ws) 4591 { 4592 struct e1000g *Adapter = (struct e1000g *)ws; 4593 struct e1000_hw *hw; 4594 e1000g_ether_addr_t ether_addr; 4595 boolean_t link_changed; 4596 4597 hw = &Adapter->shared; 4598 4599 if (Adapter->e1000g_state & E1000G_ERROR) { 4600 rw_enter(&Adapter->chip_lock, RW_WRITER); 4601 Adapter->e1000g_state &= ~E1000G_ERROR; 4602 rw_exit(&Adapter->chip_lock); 4603 4604 Adapter->reset_count++; 4605 if (e1000g_global_reset(Adapter)) { 4606 ddi_fm_service_impact(Adapter->dip, 4607 DDI_SERVICE_RESTORED); 4608 e1000g_timer_tx_resched(Adapter); 4609 } else 4610 ddi_fm_service_impact(Adapter->dip, 4611 DDI_SERVICE_LOST); 4612 return; 4613 } 4614 4615 if (e1000g_stall_check(Adapter)) { 4616 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 4617 "Tx stall detected. Activate automatic recovery.\n"); 4618 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL); 4619 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 4620 Adapter->reset_count++; 4621 if (e1000g_reset_adapter(Adapter)) { 4622 ddi_fm_service_impact(Adapter->dip, 4623 DDI_SERVICE_RESTORED); 4624 e1000g_timer_tx_resched(Adapter); 4625 } 4626 return; 4627 } 4628 4629 link_changed = B_FALSE; 4630 rw_enter(&Adapter->chip_lock, RW_READER); 4631 if (Adapter->link_complete) 4632 link_changed = e1000g_link_check(Adapter); 4633 rw_exit(&Adapter->chip_lock); 4634 4635 if (link_changed) { 4636 if (!Adapter->reset_flag && 4637 (Adapter->e1000g_state & E1000G_STARTED) && 4638 !(Adapter->e1000g_state & E1000G_SUSPENDED)) 4639 mac_link_update(Adapter->mh, Adapter->link_state); 4640 if (Adapter->link_state == LINK_STATE_UP) 4641 Adapter->reset_flag = B_FALSE; 4642 } 4643 /* 4644 * Workaround for esb2. Data stuck in fifo on a link 4645 * down event. Reset the adapter to recover it. 4646 */ 4647 if (Adapter->esb2_workaround) { 4648 Adapter->esb2_workaround = B_FALSE; 4649 (void) e1000g_reset_adapter(Adapter); 4650 return; 4651 } 4652 4653 /* 4654 * With 82571 controllers, any locally administered address will 4655 * be overwritten when there is a reset on the other port. 4656 * Detect this circumstance and correct it. 4657 */ 4658 if ((hw->mac.type == e1000_82571) && 4659 (e1000_get_laa_state_82571(hw) == B_TRUE)) { 4660 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0); 4661 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1); 4662 4663 ether_addr.reg.low = ntohl(ether_addr.reg.low); 4664 ether_addr.reg.high = ntohl(ether_addr.reg.high); 4665 4666 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) || 4667 (ether_addr.mac.addr[4] != hw->mac.addr[1]) || 4668 (ether_addr.mac.addr[3] != hw->mac.addr[2]) || 4669 (ether_addr.mac.addr[2] != hw->mac.addr[3]) || 4670 (ether_addr.mac.addr[1] != hw->mac.addr[4]) || 4671 (ether_addr.mac.addr[0] != hw->mac.addr[5])) { 4672 (void) e1000_rar_set(hw, hw->mac.addr, 0); 4673 } 4674 } 4675 4676 /* 4677 * Long TTL workaround for 82541/82547 4678 */ 4679 (void) e1000_igp_ttl_workaround_82547(hw); 4680 4681 /* 4682 * Check for Adaptive IFS settings If there are lots of collisions 4683 * change the value in steps... 4684 * These properties should only be set for 10/100 4685 */ 4686 if ((hw->phy.media_type == e1000_media_type_copper) && 4687 ((Adapter->link_speed == SPEED_100) || 4688 (Adapter->link_speed == SPEED_10))) { 4689 e1000_update_adaptive(hw); 4690 } 4691 /* 4692 * Set Timer Interrupts 4693 */ 4694 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 4695 4696 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4697 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4698 else 4699 e1000g_timer_tx_resched(Adapter); 4700 4701 restart_watchdog_timer(Adapter); 4702 } 4703 4704 /* 4705 * The function e1000g_link_timer() is called when the timer for link setup 4706 * is expired, which indicates the completion of the link setup. The link 4707 * state will not be updated until the link setup is completed. And the 4708 * link state will not be sent to the upper layer through mac_link_update() 4709 * in this function. It will be updated in the local timer routine or the 4710 * interrupt service routine after the interface is started (plumbed). 4711 */ 4712 static void 4713 e1000g_link_timer(void *arg) 4714 { 4715 struct e1000g *Adapter = (struct e1000g *)arg; 4716 4717 mutex_enter(&Adapter->link_lock); 4718 Adapter->link_complete = B_TRUE; 4719 Adapter->link_tid = 0; 4720 mutex_exit(&Adapter->link_lock); 4721 } 4722 4723 /* 4724 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf 4725 * 4726 * This function read the forced speed and duplex for 10/100 Mbps speeds 4727 * and also for 1000 Mbps speeds from the e1000g.conf file 4728 */ 4729 static void 4730 e1000g_force_speed_duplex(struct e1000g *Adapter) 4731 { 4732 int forced; 4733 int propval; 4734 struct e1000_mac_info *mac = &Adapter->shared.mac; 4735 struct e1000_phy_info *phy = &Adapter->shared.phy; 4736 4737 /* 4738 * get value out of config file 4739 */ 4740 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex", 4741 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced); 4742 4743 switch (forced) { 4744 case GDIAG_10_HALF: 4745 /* 4746 * Disable Auto Negotiation 4747 */ 4748 mac->autoneg = B_FALSE; 4749 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4750 break; 4751 case GDIAG_10_FULL: 4752 /* 4753 * Disable Auto Negotiation 4754 */ 4755 mac->autoneg = B_FALSE; 4756 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4757 break; 4758 case GDIAG_100_HALF: 4759 /* 4760 * Disable Auto Negotiation 4761 */ 4762 mac->autoneg = B_FALSE; 4763 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4764 break; 4765 case GDIAG_100_FULL: 4766 /* 4767 * Disable Auto Negotiation 4768 */ 4769 mac->autoneg = B_FALSE; 4770 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4771 break; 4772 case GDIAG_1000_FULL: 4773 /* 4774 * The gigabit spec requires autonegotiation. Therefore, 4775 * when the user wants to force the speed to 1000Mbps, we 4776 * enable AutoNeg, but only allow the harware to advertise 4777 * 1000Mbps. This is different from 10/100 operation, where 4778 * we are allowed to link without any negotiation. 4779 */ 4780 mac->autoneg = B_TRUE; 4781 phy->autoneg_advertised = ADVERTISE_1000_FULL; 4782 break; 4783 default: /* obey the setting of AutoNegAdvertised */ 4784 mac->autoneg = B_TRUE; 4785 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised", 4786 0, AUTONEG_ADVERTISE_SPEED_DEFAULT, 4787 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval); 4788 phy->autoneg_advertised = (uint16_t)propval; 4789 break; 4790 } /* switch */ 4791 } 4792 4793 /* 4794 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf 4795 * 4796 * This function reads MaxFrameSize from e1000g.conf 4797 */ 4798 static void 4799 e1000g_get_max_frame_size(struct e1000g *Adapter) 4800 { 4801 int max_frame; 4802 4803 /* 4804 * get value out of config file 4805 */ 4806 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0, 4807 &max_frame); 4808 4809 switch (max_frame) { 4810 case 0: 4811 Adapter->default_mtu = ETHERMTU; 4812 break; 4813 case 1: 4814 Adapter->default_mtu = FRAME_SIZE_UPTO_4K - 4815 sizeof (struct ether_vlan_header) - ETHERFCSL; 4816 break; 4817 case 2: 4818 Adapter->default_mtu = FRAME_SIZE_UPTO_8K - 4819 sizeof (struct ether_vlan_header) - ETHERFCSL; 4820 break; 4821 case 3: 4822 Adapter->default_mtu = FRAME_SIZE_UPTO_16K - 4823 sizeof (struct ether_vlan_header) - ETHERFCSL; 4824 break; 4825 default: 4826 Adapter->default_mtu = ETHERMTU; 4827 break; 4828 } /* switch */ 4829 4830 /* 4831 * If the user configed MTU is larger than the deivce's maximum MTU, 4832 * the MTU is set to the deivce's maximum value. 4833 */ 4834 if (Adapter->default_mtu > Adapter->max_mtu) 4835 Adapter->default_mtu = Adapter->max_mtu; 4836 4837 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu); 4838 } 4839 4840 /* 4841 * e1000g_pch_limits - Apply limits of the PCH silicon type 4842 * 4843 * At any frame size larger than the ethernet default, 4844 * prevent linking at 10/100 speeds. 4845 */ 4846 static void 4847 e1000g_pch_limits(struct e1000g *Adapter) 4848 { 4849 struct e1000_hw *hw = &Adapter->shared; 4850 4851 /* only applies to PCH silicon type */ 4852 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan) 4853 return; 4854 4855 /* only applies to frames larger than ethernet default */ 4856 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) { 4857 hw->mac.autoneg = B_TRUE; 4858 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; 4859 4860 Adapter->param_adv_autoneg = 1; 4861 Adapter->param_adv_1000fdx = 1; 4862 4863 Adapter->param_adv_100fdx = 0; 4864 Adapter->param_adv_100hdx = 0; 4865 Adapter->param_adv_10fdx = 0; 4866 Adapter->param_adv_10hdx = 0; 4867 4868 e1000g_param_sync(Adapter); 4869 } 4870 } 4871 4872 /* 4873 * e1000g_mtu2maxframe - convert given MTU to maximum frame size 4874 */ 4875 static uint32_t 4876 e1000g_mtu2maxframe(uint32_t mtu) 4877 { 4878 uint32_t maxframe; 4879 4880 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL; 4881 4882 return (maxframe); 4883 } 4884 4885 static void 4886 arm_watchdog_timer(struct e1000g *Adapter) 4887 { 4888 Adapter->watchdog_tid = 4889 timeout(e1000g_local_timer, 4890 (void *)Adapter, 1 * drv_usectohz(1000000)); 4891 } 4892 #pragma inline(arm_watchdog_timer) 4893 4894 static void 4895 enable_watchdog_timer(struct e1000g *Adapter) 4896 { 4897 mutex_enter(&Adapter->watchdog_lock); 4898 4899 if (!Adapter->watchdog_timer_enabled) { 4900 Adapter->watchdog_timer_enabled = B_TRUE; 4901 Adapter->watchdog_timer_started = B_TRUE; 4902 arm_watchdog_timer(Adapter); 4903 } 4904 4905 mutex_exit(&Adapter->watchdog_lock); 4906 } 4907 4908 static void 4909 disable_watchdog_timer(struct e1000g *Adapter) 4910 { 4911 timeout_id_t tid; 4912 4913 mutex_enter(&Adapter->watchdog_lock); 4914 4915 Adapter->watchdog_timer_enabled = B_FALSE; 4916 Adapter->watchdog_timer_started = B_FALSE; 4917 tid = Adapter->watchdog_tid; 4918 Adapter->watchdog_tid = 0; 4919 4920 mutex_exit(&Adapter->watchdog_lock); 4921 4922 if (tid != 0) 4923 (void) untimeout(tid); 4924 } 4925 4926 static void 4927 start_watchdog_timer(struct e1000g *Adapter) 4928 { 4929 mutex_enter(&Adapter->watchdog_lock); 4930 4931 if (Adapter->watchdog_timer_enabled) { 4932 if (!Adapter->watchdog_timer_started) { 4933 Adapter->watchdog_timer_started = B_TRUE; 4934 arm_watchdog_timer(Adapter); 4935 } 4936 } 4937 4938 mutex_exit(&Adapter->watchdog_lock); 4939 } 4940 4941 static void 4942 restart_watchdog_timer(struct e1000g *Adapter) 4943 { 4944 mutex_enter(&Adapter->watchdog_lock); 4945 4946 if (Adapter->watchdog_timer_started) 4947 arm_watchdog_timer(Adapter); 4948 4949 mutex_exit(&Adapter->watchdog_lock); 4950 } 4951 4952 static void 4953 stop_watchdog_timer(struct e1000g *Adapter) 4954 { 4955 timeout_id_t tid; 4956 4957 mutex_enter(&Adapter->watchdog_lock); 4958 4959 Adapter->watchdog_timer_started = B_FALSE; 4960 tid = Adapter->watchdog_tid; 4961 Adapter->watchdog_tid = 0; 4962 4963 mutex_exit(&Adapter->watchdog_lock); 4964 4965 if (tid != 0) 4966 (void) untimeout(tid); 4967 } 4968 4969 static void 4970 stop_link_timer(struct e1000g *Adapter) 4971 { 4972 timeout_id_t tid; 4973 4974 /* Disable the link timer */ 4975 mutex_enter(&Adapter->link_lock); 4976 4977 tid = Adapter->link_tid; 4978 Adapter->link_tid = 0; 4979 4980 mutex_exit(&Adapter->link_lock); 4981 4982 if (tid != 0) 4983 (void) untimeout(tid); 4984 } 4985 4986 static void 4987 stop_82547_timer(e1000g_tx_ring_t *tx_ring) 4988 { 4989 timeout_id_t tid; 4990 4991 /* Disable the tx timer for 82547 chipset */ 4992 mutex_enter(&tx_ring->tx_lock); 4993 4994 tx_ring->timer_enable_82547 = B_FALSE; 4995 tid = tx_ring->timer_id_82547; 4996 tx_ring->timer_id_82547 = 0; 4997 4998 mutex_exit(&tx_ring->tx_lock); 4999 5000 if (tid != 0) 5001 (void) untimeout(tid); 5002 } 5003 5004 void 5005 e1000g_clear_interrupt(struct e1000g *Adapter) 5006 { 5007 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 5008 0xffffffff & ~E1000_IMS_RXSEQ); 5009 } 5010 5011 void 5012 e1000g_mask_interrupt(struct e1000g *Adapter) 5013 { 5014 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, 5015 IMS_ENABLE_MASK & ~E1000_IMS_TXDW); 5016 5017 if (Adapter->tx_intr_enable) 5018 e1000g_mask_tx_interrupt(Adapter); 5019 } 5020 5021 /* 5022 * This routine is called by e1000g_quiesce(), therefore must not block. 5023 */ 5024 void 5025 e1000g_clear_all_interrupts(struct e1000g *Adapter) 5026 { 5027 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff); 5028 } 5029 5030 void 5031 e1000g_mask_tx_interrupt(struct e1000g *Adapter) 5032 { 5033 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW); 5034 } 5035 5036 void 5037 e1000g_clear_tx_interrupt(struct e1000g *Adapter) 5038 { 5039 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW); 5040 } 5041 5042 static void 5043 e1000g_smartspeed(struct e1000g *Adapter) 5044 { 5045 struct e1000_hw *hw = &Adapter->shared; 5046 uint16_t phy_status; 5047 uint16_t phy_ctrl; 5048 5049 /* 5050 * If we're not T-or-T, or we're not autoneg'ing, or we're not 5051 * advertising 1000Full, we don't even use the workaround 5052 */ 5053 if ((hw->phy.type != e1000_phy_igp) || 5054 !hw->mac.autoneg || 5055 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)) 5056 return; 5057 5058 /* 5059 * True if this is the first call of this function or after every 5060 * 30 seconds of not having link 5061 */ 5062 if (Adapter->smartspeed == 0) { 5063 /* 5064 * If Master/Slave config fault is asserted twice, we 5065 * assume back-to-back 5066 */ 5067 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 5068 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 5069 return; 5070 5071 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 5072 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 5073 return; 5074 /* 5075 * We're assuming back-2-back because our status register 5076 * insists! there's a fault in the master/slave 5077 * relationship that was "negotiated" 5078 */ 5079 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 5080 /* 5081 * Is the phy configured for manual configuration of 5082 * master/slave? 5083 */ 5084 if (phy_ctrl & CR_1000T_MS_ENABLE) { 5085 /* 5086 * Yes. Then disable manual configuration (enable 5087 * auto configuration) of master/slave 5088 */ 5089 phy_ctrl &= ~CR_1000T_MS_ENABLE; 5090 (void) e1000_write_phy_reg(hw, 5091 PHY_1000T_CTRL, phy_ctrl); 5092 /* 5093 * Effectively starting the clock 5094 */ 5095 Adapter->smartspeed++; 5096 /* 5097 * Restart autonegotiation 5098 */ 5099 if (!e1000_phy_setup_autoneg(hw) && 5100 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 5101 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 5102 MII_CR_RESTART_AUTO_NEG); 5103 (void) e1000_write_phy_reg(hw, 5104 PHY_CONTROL, phy_ctrl); 5105 } 5106 } 5107 return; 5108 /* 5109 * Has 6 seconds transpired still without link? Remember, 5110 * you should reset the smartspeed counter once you obtain 5111 * link 5112 */ 5113 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 5114 /* 5115 * Yes. Remember, we did at the start determine that 5116 * there's a master/slave configuration fault, so we're 5117 * still assuming there's someone on the other end, but we 5118 * just haven't yet been able to talk to it. We then 5119 * re-enable auto configuration of master/slave to see if 5120 * we're running 2/3 pair cables. 5121 */ 5122 /* 5123 * If still no link, perhaps using 2/3 pair cable 5124 */ 5125 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 5126 phy_ctrl |= CR_1000T_MS_ENABLE; 5127 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 5128 /* 5129 * Restart autoneg with phy enabled for manual 5130 * configuration of master/slave 5131 */ 5132 if (!e1000_phy_setup_autoneg(hw) && 5133 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 5134 phy_ctrl |= 5135 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 5136 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 5137 } 5138 /* 5139 * Hopefully, there are no more faults and we've obtained 5140 * link as a result. 5141 */ 5142 } 5143 /* 5144 * Restart process after E1000_SMARTSPEED_MAX iterations (30 5145 * seconds) 5146 */ 5147 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 5148 Adapter->smartspeed = 0; 5149 } 5150 5151 static boolean_t 5152 is_valid_mac_addr(uint8_t *mac_addr) 5153 { 5154 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 5155 const uint8_t addr_test2[6] = 5156 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 5157 5158 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 5159 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 5160 return (B_FALSE); 5161 5162 return (B_TRUE); 5163 } 5164 5165 /* 5166 * e1000g_stall_check - check for tx stall 5167 * 5168 * This function checks if the adapter is stalled (in transmit). 5169 * 5170 * It is called each time the watchdog timeout is invoked. 5171 * If the transmit descriptor reclaim continuously fails, 5172 * the watchdog value will increment by 1. If the watchdog 5173 * value exceeds the threshold, the adapter is assumed to 5174 * have stalled and need to be reset. 5175 */ 5176 static boolean_t 5177 e1000g_stall_check(struct e1000g *Adapter) 5178 { 5179 e1000g_tx_ring_t *tx_ring; 5180 5181 tx_ring = Adapter->tx_ring; 5182 5183 if (Adapter->link_state != LINK_STATE_UP) 5184 return (B_FALSE); 5185 5186 (void) e1000g_recycle(tx_ring); 5187 5188 if (Adapter->stall_flag) 5189 return (B_TRUE); 5190 5191 return (B_FALSE); 5192 } 5193 5194 #ifdef E1000G_DEBUG 5195 static enum ioc_reply 5196 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp) 5197 { 5198 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd); 5199 e1000g_peekpoke_t *ppd; 5200 uint64_t mem_va; 5201 uint64_t maxoff; 5202 boolean_t peek; 5203 5204 switch (iocp->ioc_cmd) { 5205 5206 case E1000G_IOC_REG_PEEK: 5207 peek = B_TRUE; 5208 break; 5209 5210 case E1000G_IOC_REG_POKE: 5211 peek = B_FALSE; 5212 break; 5213 5214 deault: 5215 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 5216 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n", 5217 iocp->ioc_cmd); 5218 return (IOC_INVAL); 5219 } 5220 5221 /* 5222 * Validate format of ioctl 5223 */ 5224 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t)) 5225 return (IOC_INVAL); 5226 if (mp->b_cont == NULL) 5227 return (IOC_INVAL); 5228 5229 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr; 5230 5231 /* 5232 * Validate request parameters 5233 */ 5234 switch (ppd->pp_acc_space) { 5235 5236 default: 5237 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 5238 "e1000g_diag_ioctl: invalid access space 0x%X\n", 5239 ppd->pp_acc_space); 5240 return (IOC_INVAL); 5241 5242 case E1000G_PP_SPACE_REG: 5243 /* 5244 * Memory-mapped I/O space 5245 */ 5246 ASSERT(ppd->pp_acc_size == 4); 5247 if (ppd->pp_acc_size != 4) 5248 return (IOC_INVAL); 5249 5250 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 5251 return (IOC_INVAL); 5252 5253 mem_va = 0; 5254 maxoff = 0x10000; 5255 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg; 5256 break; 5257 5258 case E1000G_PP_SPACE_E1000G: 5259 /* 5260 * E1000g data structure! 5261 */ 5262 mem_va = (uintptr_t)e1000gp; 5263 maxoff = sizeof (struct e1000g); 5264 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem; 5265 break; 5266 5267 } 5268 5269 if (ppd->pp_acc_offset >= maxoff) 5270 return (IOC_INVAL); 5271 5272 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff) 5273 return (IOC_INVAL); 5274 5275 /* 5276 * All OK - go! 5277 */ 5278 ppd->pp_acc_offset += mem_va; 5279 (*ppfn)(e1000gp, ppd); 5280 return (peek ? IOC_REPLY : IOC_ACK); 5281 } 5282 5283 static void 5284 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5285 { 5286 ddi_acc_handle_t handle; 5287 uint32_t *regaddr; 5288 5289 handle = e1000gp->osdep.reg_handle; 5290 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5291 (uintptr_t)ppd->pp_acc_offset); 5292 5293 ppd->pp_acc_data = ddi_get32(handle, regaddr); 5294 } 5295 5296 static void 5297 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5298 { 5299 ddi_acc_handle_t handle; 5300 uint32_t *regaddr; 5301 uint32_t value; 5302 5303 handle = e1000gp->osdep.reg_handle; 5304 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5305 (uintptr_t)ppd->pp_acc_offset); 5306 value = (uint32_t)ppd->pp_acc_data; 5307 5308 ddi_put32(handle, regaddr, value); 5309 } 5310 5311 static void 5312 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5313 { 5314 uint64_t value; 5315 void *vaddr; 5316 5317 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5318 5319 switch (ppd->pp_acc_size) { 5320 case 1: 5321 value = *(uint8_t *)vaddr; 5322 break; 5323 5324 case 2: 5325 value = *(uint16_t *)vaddr; 5326 break; 5327 5328 case 4: 5329 value = *(uint32_t *)vaddr; 5330 break; 5331 5332 case 8: 5333 value = *(uint64_t *)vaddr; 5334 break; 5335 } 5336 5337 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5338 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n", 5339 (void *)e1000gp, (void *)ppd, value, vaddr); 5340 5341 ppd->pp_acc_data = value; 5342 } 5343 5344 static void 5345 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5346 { 5347 uint64_t value; 5348 void *vaddr; 5349 5350 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5351 value = ppd->pp_acc_data; 5352 5353 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5354 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n", 5355 (void *)e1000gp, (void *)ppd, value, vaddr); 5356 5357 switch (ppd->pp_acc_size) { 5358 case 1: 5359 *(uint8_t *)vaddr = (uint8_t)value; 5360 break; 5361 5362 case 2: 5363 *(uint16_t *)vaddr = (uint16_t)value; 5364 break; 5365 5366 case 4: 5367 *(uint32_t *)vaddr = (uint32_t)value; 5368 break; 5369 5370 case 8: 5371 *(uint64_t *)vaddr = (uint64_t)value; 5372 break; 5373 } 5374 } 5375 #endif 5376 5377 /* 5378 * Loopback Support 5379 */ 5380 static lb_property_t lb_normal = 5381 { normal, "normal", E1000G_LB_NONE }; 5382 static lb_property_t lb_external1000 = 5383 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 }; 5384 static lb_property_t lb_external100 = 5385 { external, "100Mbps", E1000G_LB_EXTERNAL_100 }; 5386 static lb_property_t lb_external10 = 5387 { external, "10Mbps", E1000G_LB_EXTERNAL_10 }; 5388 static lb_property_t lb_phy = 5389 { internal, "PHY", E1000G_LB_INTERNAL_PHY }; 5390 5391 static enum ioc_reply 5392 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp) 5393 { 5394 lb_info_sz_t *lbsp; 5395 lb_property_t *lbpp; 5396 struct e1000_hw *hw; 5397 uint32_t *lbmp; 5398 uint32_t size; 5399 uint32_t value; 5400 5401 hw = &Adapter->shared; 5402 5403 if (mp->b_cont == NULL) 5404 return (IOC_INVAL); 5405 5406 if (!e1000g_check_loopback_support(hw)) { 5407 e1000g_log(NULL, CE_WARN, 5408 "Loopback is not supported on e1000g%d", Adapter->instance); 5409 return (IOC_INVAL); 5410 } 5411 5412 switch (iocp->ioc_cmd) { 5413 default: 5414 return (IOC_INVAL); 5415 5416 case LB_GET_INFO_SIZE: 5417 size = sizeof (lb_info_sz_t); 5418 if (iocp->ioc_count != size) 5419 return (IOC_INVAL); 5420 5421 rw_enter(&Adapter->chip_lock, RW_WRITER); 5422 e1000g_get_phy_state(Adapter); 5423 5424 /* 5425 * Workaround for hardware faults. In order to get a stable 5426 * state of phy, we will wait for a specific interval and 5427 * try again. The time delay is an experiential value based 5428 * on our testing. 5429 */ 5430 msec_delay(100); 5431 e1000g_get_phy_state(Adapter); 5432 rw_exit(&Adapter->chip_lock); 5433 5434 value = sizeof (lb_normal); 5435 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5436 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5437 (hw->phy.media_type == e1000_media_type_fiber) || 5438 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5439 value += sizeof (lb_phy); 5440 switch (hw->mac.type) { 5441 case e1000_82571: 5442 case e1000_82572: 5443 case e1000_80003es2lan: 5444 value += sizeof (lb_external1000); 5445 break; 5446 } 5447 } 5448 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5449 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5450 value += sizeof (lb_external100); 5451 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5452 value += sizeof (lb_external10); 5453 5454 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 5455 *lbsp = value; 5456 break; 5457 5458 case LB_GET_INFO: 5459 value = sizeof (lb_normal); 5460 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5461 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5462 (hw->phy.media_type == e1000_media_type_fiber) || 5463 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5464 value += sizeof (lb_phy); 5465 switch (hw->mac.type) { 5466 case e1000_82571: 5467 case e1000_82572: 5468 case e1000_80003es2lan: 5469 value += sizeof (lb_external1000); 5470 break; 5471 } 5472 } 5473 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5474 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5475 value += sizeof (lb_external100); 5476 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5477 value += sizeof (lb_external10); 5478 5479 size = value; 5480 if (iocp->ioc_count != size) 5481 return (IOC_INVAL); 5482 5483 value = 0; 5484 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 5485 lbpp[value++] = lb_normal; 5486 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5487 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5488 (hw->phy.media_type == e1000_media_type_fiber) || 5489 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5490 lbpp[value++] = lb_phy; 5491 switch (hw->mac.type) { 5492 case e1000_82571: 5493 case e1000_82572: 5494 case e1000_80003es2lan: 5495 lbpp[value++] = lb_external1000; 5496 break; 5497 } 5498 } 5499 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5500 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5501 lbpp[value++] = lb_external100; 5502 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5503 lbpp[value++] = lb_external10; 5504 break; 5505 5506 case LB_GET_MODE: 5507 size = sizeof (uint32_t); 5508 if (iocp->ioc_count != size) 5509 return (IOC_INVAL); 5510 5511 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5512 *lbmp = Adapter->loopback_mode; 5513 break; 5514 5515 case LB_SET_MODE: 5516 size = 0; 5517 if (iocp->ioc_count != sizeof (uint32_t)) 5518 return (IOC_INVAL); 5519 5520 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5521 if (!e1000g_set_loopback_mode(Adapter, *lbmp)) 5522 return (IOC_INVAL); 5523 break; 5524 } 5525 5526 iocp->ioc_count = size; 5527 iocp->ioc_error = 0; 5528 5529 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 5530 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 5531 return (IOC_INVAL); 5532 } 5533 5534 return (IOC_REPLY); 5535 } 5536 5537 static boolean_t 5538 e1000g_check_loopback_support(struct e1000_hw *hw) 5539 { 5540 switch (hw->mac.type) { 5541 case e1000_82540: 5542 case e1000_82545: 5543 case e1000_82545_rev_3: 5544 case e1000_82546: 5545 case e1000_82546_rev_3: 5546 case e1000_82541: 5547 case e1000_82541_rev_2: 5548 case e1000_82547: 5549 case e1000_82547_rev_2: 5550 case e1000_82571: 5551 case e1000_82572: 5552 case e1000_82573: 5553 case e1000_82574: 5554 case e1000_80003es2lan: 5555 case e1000_ich9lan: 5556 case e1000_ich10lan: 5557 return (B_TRUE); 5558 } 5559 return (B_FALSE); 5560 } 5561 5562 static boolean_t 5563 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode) 5564 { 5565 struct e1000_hw *hw; 5566 int i, times; 5567 boolean_t link_up; 5568 5569 if (mode == Adapter->loopback_mode) 5570 return (B_TRUE); 5571 5572 hw = &Adapter->shared; 5573 times = 0; 5574 5575 Adapter->loopback_mode = mode; 5576 5577 if (mode == E1000G_LB_NONE) { 5578 /* Reset the chip */ 5579 hw->phy.autoneg_wait_to_complete = B_TRUE; 5580 (void) e1000g_reset_adapter(Adapter); 5581 hw->phy.autoneg_wait_to_complete = B_FALSE; 5582 return (B_TRUE); 5583 } 5584 5585 again: 5586 5587 rw_enter(&Adapter->chip_lock, RW_WRITER); 5588 5589 switch (mode) { 5590 default: 5591 rw_exit(&Adapter->chip_lock); 5592 return (B_FALSE); 5593 5594 case E1000G_LB_EXTERNAL_1000: 5595 e1000g_set_external_loopback_1000(Adapter); 5596 break; 5597 5598 case E1000G_LB_EXTERNAL_100: 5599 e1000g_set_external_loopback_100(Adapter); 5600 break; 5601 5602 case E1000G_LB_EXTERNAL_10: 5603 e1000g_set_external_loopback_10(Adapter); 5604 break; 5605 5606 case E1000G_LB_INTERNAL_PHY: 5607 e1000g_set_internal_loopback(Adapter); 5608 break; 5609 } 5610 5611 times++; 5612 5613 rw_exit(&Adapter->chip_lock); 5614 5615 /* Wait for link up */ 5616 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--) 5617 msec_delay(100); 5618 5619 rw_enter(&Adapter->chip_lock, RW_WRITER); 5620 5621 link_up = e1000g_link_up(Adapter); 5622 5623 rw_exit(&Adapter->chip_lock); 5624 5625 if (!link_up) { 5626 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5627 "Failed to get the link up"); 5628 if (times < 2) { 5629 /* Reset the link */ 5630 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5631 "Reset the link ..."); 5632 (void) e1000g_reset_adapter(Adapter); 5633 goto again; 5634 } 5635 5636 /* 5637 * Reset driver to loopback none when set loopback failed 5638 * for the second time. 5639 */ 5640 Adapter->loopback_mode = E1000G_LB_NONE; 5641 5642 /* Reset the chip */ 5643 hw->phy.autoneg_wait_to_complete = B_TRUE; 5644 (void) e1000g_reset_adapter(Adapter); 5645 hw->phy.autoneg_wait_to_complete = B_FALSE; 5646 5647 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5648 "Set loopback mode failed, reset to loopback none"); 5649 5650 return (B_FALSE); 5651 } 5652 5653 return (B_TRUE); 5654 } 5655 5656 /* 5657 * The following loopback settings are from Intel's technical 5658 * document - "How To Loopback". All the register settings and 5659 * time delay values are directly inherited from the document 5660 * without more explanations available. 5661 */ 5662 static void 5663 e1000g_set_internal_loopback(struct e1000g *Adapter) 5664 { 5665 struct e1000_hw *hw; 5666 uint32_t ctrl; 5667 uint32_t status; 5668 uint16_t phy_ctrl; 5669 uint16_t phy_reg; 5670 uint32_t txcw; 5671 5672 hw = &Adapter->shared; 5673 5674 /* Disable Smart Power Down */ 5675 phy_spd_state(hw, B_FALSE); 5676 5677 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 5678 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10); 5679 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000; 5680 5681 switch (hw->mac.type) { 5682 case e1000_82540: 5683 case e1000_82545: 5684 case e1000_82545_rev_3: 5685 case e1000_82546: 5686 case e1000_82546_rev_3: 5687 case e1000_82573: 5688 /* Auto-MDI/MDIX off */ 5689 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 5690 /* Reset PHY to update Auto-MDI/MDIX */ 5691 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5692 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN); 5693 /* Reset PHY to auto-neg off and force 1000 */ 5694 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5695 phy_ctrl | MII_CR_RESET); 5696 /* 5697 * Disable PHY receiver for 82540/545/546 and 82573 Family. 5698 * See comments above e1000g_set_internal_loopback() for the 5699 * background. 5700 */ 5701 (void) e1000_write_phy_reg(hw, 29, 0x001F); 5702 (void) e1000_write_phy_reg(hw, 30, 0x8FFC); 5703 (void) e1000_write_phy_reg(hw, 29, 0x001A); 5704 (void) e1000_write_phy_reg(hw, 30, 0x8FF0); 5705 break; 5706 case e1000_80003es2lan: 5707 /* Force Link Up */ 5708 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 5709 0x1CC); 5710 /* Sets PCS loopback at 1Gbs */ 5711 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 5712 0x1046); 5713 break; 5714 } 5715 5716 /* 5717 * The following registers should be set for e1000_phy_bm phy type. 5718 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy. 5719 * For others, we do not need to set these registers. 5720 */ 5721 if (hw->phy.type == e1000_phy_bm) { 5722 /* Set Default MAC Interface speed to 1GB */ 5723 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg); 5724 phy_reg &= ~0x0007; 5725 phy_reg |= 0x006; 5726 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg); 5727 /* Assert SW reset for above settings to take effect */ 5728 (void) e1000_phy_commit(hw); 5729 msec_delay(1); 5730 /* Force Full Duplex */ 5731 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5732 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5733 phy_reg | 0x000C); 5734 /* Set Link Up (in force link) */ 5735 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg); 5736 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16), 5737 phy_reg | 0x0040); 5738 /* Force Link */ 5739 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5740 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5741 phy_reg | 0x0040); 5742 /* Set Early Link Enable */ 5743 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg); 5744 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20), 5745 phy_reg | 0x0400); 5746 } 5747 5748 /* Set loopback */ 5749 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK); 5750 5751 msec_delay(250); 5752 5753 /* Now set up the MAC to the same speed/duplex as the PHY. */ 5754 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5755 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5756 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5757 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5758 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ 5759 E1000_CTRL_FD); /* Force Duplex to FULL */ 5760 5761 switch (hw->mac.type) { 5762 case e1000_82540: 5763 case e1000_82545: 5764 case e1000_82545_rev_3: 5765 case e1000_82546: 5766 case e1000_82546_rev_3: 5767 /* 5768 * For some serdes we'll need to commit the writes now 5769 * so that the status is updated on link 5770 */ 5771 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 5772 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5773 msec_delay(100); 5774 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5775 } 5776 5777 if (hw->phy.media_type == e1000_media_type_copper) { 5778 /* Invert Loss of Signal */ 5779 ctrl |= E1000_CTRL_ILOS; 5780 } else { 5781 /* Set ILOS on fiber nic if half duplex is detected */ 5782 status = E1000_READ_REG(hw, E1000_STATUS); 5783 if ((status & E1000_STATUS_FD) == 0) 5784 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5785 } 5786 break; 5787 5788 case e1000_82571: 5789 case e1000_82572: 5790 /* 5791 * The fiber/SerDes versions of this adapter do not contain an 5792 * accessible PHY. Therefore, loopback beyond MAC must be done 5793 * using SerDes analog loopback. 5794 */ 5795 if (hw->phy.media_type != e1000_media_type_copper) { 5796 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5797 txcw = E1000_READ_REG(hw, E1000_TXCW); 5798 txcw &= ~((uint32_t)1 << 31); 5799 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5800 5801 /* 5802 * Write 0x410 to Serdes Control register 5803 * to enable Serdes analog loopback 5804 */ 5805 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5806 msec_delay(10); 5807 } 5808 5809 status = E1000_READ_REG(hw, E1000_STATUS); 5810 /* Set ILOS on fiber nic if half duplex is detected */ 5811 if ((hw->phy.media_type == e1000_media_type_fiber) && 5812 ((status & E1000_STATUS_FD) == 0 || 5813 (status & E1000_STATUS_LU) == 0)) 5814 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5815 else if (hw->phy.media_type == e1000_media_type_internal_serdes) 5816 ctrl |= E1000_CTRL_SLU; 5817 break; 5818 5819 case e1000_82573: 5820 ctrl |= E1000_CTRL_ILOS; 5821 break; 5822 case e1000_ich9lan: 5823 case e1000_ich10lan: 5824 ctrl |= E1000_CTRL_SLU; 5825 break; 5826 } 5827 if (hw->phy.type == e1000_phy_bm) 5828 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS; 5829 5830 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5831 } 5832 5833 static void 5834 e1000g_set_external_loopback_1000(struct e1000g *Adapter) 5835 { 5836 struct e1000_hw *hw; 5837 uint32_t rctl; 5838 uint32_t ctrl_ext; 5839 uint32_t ctrl; 5840 uint32_t status; 5841 uint32_t txcw; 5842 uint16_t phydata; 5843 5844 hw = &Adapter->shared; 5845 5846 /* Disable Smart Power Down */ 5847 phy_spd_state(hw, B_FALSE); 5848 5849 switch (hw->mac.type) { 5850 case e1000_82571: 5851 case e1000_82572: 5852 switch (hw->phy.media_type) { 5853 case e1000_media_type_copper: 5854 /* Force link up (Must be done before the PHY writes) */ 5855 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5856 ctrl |= E1000_CTRL_SLU; /* Force Link Up */ 5857 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5858 5859 rctl = E1000_READ_REG(hw, E1000_RCTL); 5860 rctl |= (E1000_RCTL_EN | 5861 E1000_RCTL_SBP | 5862 E1000_RCTL_UPE | 5863 E1000_RCTL_MPE | 5864 E1000_RCTL_LPE | 5865 E1000_RCTL_BAM); /* 0x803E */ 5866 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 5867 5868 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5869 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA | 5870 E1000_CTRL_EXT_SDP6_DATA | 5871 E1000_CTRL_EXT_SDP3_DATA | 5872 E1000_CTRL_EXT_SDP4_DIR | 5873 E1000_CTRL_EXT_SDP6_DIR | 5874 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */ 5875 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5876 5877 /* 5878 * This sequence tunes the PHY's SDP and no customer 5879 * settable values. For background, see comments above 5880 * e1000g_set_internal_loopback(). 5881 */ 5882 (void) e1000_write_phy_reg(hw, 0x0, 0x140); 5883 msec_delay(10); 5884 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00); 5885 (void) e1000_write_phy_reg(hw, 0x12, 0xC10); 5886 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10); 5887 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76); 5888 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1); 5889 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0); 5890 5891 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65); 5892 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C); 5893 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC); 5894 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C); 5895 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC); 5896 5897 msec_delay(50); 5898 break; 5899 case e1000_media_type_fiber: 5900 case e1000_media_type_internal_serdes: 5901 status = E1000_READ_REG(hw, E1000_STATUS); 5902 if (((status & E1000_STATUS_LU) == 0) || 5903 (hw->phy.media_type == 5904 e1000_media_type_internal_serdes)) { 5905 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5906 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5907 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5908 } 5909 5910 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5911 txcw = E1000_READ_REG(hw, E1000_TXCW); 5912 txcw &= ~((uint32_t)1 << 31); 5913 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5914 5915 /* 5916 * Write 0x410 to Serdes Control register 5917 * to enable Serdes analog loopback 5918 */ 5919 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5920 msec_delay(10); 5921 break; 5922 default: 5923 break; 5924 } 5925 break; 5926 case e1000_82574: 5927 case e1000_80003es2lan: 5928 case e1000_ich9lan: 5929 case e1000_ich10lan: 5930 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata); 5931 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16), 5932 phydata | (1 << 5)); 5933 Adapter->param_adv_autoneg = 1; 5934 Adapter->param_adv_1000fdx = 1; 5935 (void) e1000g_reset_link(Adapter); 5936 break; 5937 } 5938 } 5939 5940 static void 5941 e1000g_set_external_loopback_100(struct e1000g *Adapter) 5942 { 5943 struct e1000_hw *hw; 5944 uint32_t ctrl; 5945 uint16_t phy_ctrl; 5946 5947 hw = &Adapter->shared; 5948 5949 /* Disable Smart Power Down */ 5950 phy_spd_state(hw, B_FALSE); 5951 5952 phy_ctrl = (MII_CR_FULL_DUPLEX | 5953 MII_CR_SPEED_100); 5954 5955 /* Force 100/FD, reset PHY */ 5956 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5957 phy_ctrl | MII_CR_RESET); /* 0xA100 */ 5958 msec_delay(10); 5959 5960 /* Force 100/FD */ 5961 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5962 phy_ctrl); /* 0x2100 */ 5963 msec_delay(10); 5964 5965 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5966 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5967 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5968 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5969 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5970 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5971 E1000_CTRL_SPD_100 | /* Force Speed to 100 */ 5972 E1000_CTRL_FD); /* Force Duplex to FULL */ 5973 5974 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5975 } 5976 5977 static void 5978 e1000g_set_external_loopback_10(struct e1000g *Adapter) 5979 { 5980 struct e1000_hw *hw; 5981 uint32_t ctrl; 5982 uint16_t phy_ctrl; 5983 5984 hw = &Adapter->shared; 5985 5986 /* Disable Smart Power Down */ 5987 phy_spd_state(hw, B_FALSE); 5988 5989 phy_ctrl = (MII_CR_FULL_DUPLEX | 5990 MII_CR_SPEED_10); 5991 5992 /* Force 10/FD, reset PHY */ 5993 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5994 phy_ctrl | MII_CR_RESET); /* 0x8100 */ 5995 msec_delay(10); 5996 5997 /* Force 10/FD */ 5998 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5999 phy_ctrl); /* 0x0100 */ 6000 msec_delay(10); 6001 6002 /* Now setup the MAC to the same speed/duplex as the PHY. */ 6003 ctrl = E1000_READ_REG(hw, E1000_CTRL); 6004 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 6005 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 6006 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 6007 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 6008 E1000_CTRL_SPD_10 | /* Force Speed to 10 */ 6009 E1000_CTRL_FD); /* Force Duplex to FULL */ 6010 6011 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 6012 } 6013 6014 #ifdef __sparc 6015 static boolean_t 6016 e1000g_find_mac_address(struct e1000g *Adapter) 6017 { 6018 struct e1000_hw *hw = &Adapter->shared; 6019 uchar_t *bytes; 6020 struct ether_addr sysaddr; 6021 uint_t nelts; 6022 int err; 6023 boolean_t found = B_FALSE; 6024 6025 /* 6026 * The "vendor's factory-set address" may already have 6027 * been extracted from the chip, but if the property 6028 * "local-mac-address" is set we use that instead. 6029 * 6030 * We check whether it looks like an array of 6 6031 * bytes (which it should, if OBP set it). If we can't 6032 * make sense of it this way, we'll ignore it. 6033 */ 6034 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 6035 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 6036 if (err == DDI_PROP_SUCCESS) { 6037 if (nelts == ETHERADDRL) { 6038 while (nelts--) 6039 hw->mac.addr[nelts] = bytes[nelts]; 6040 found = B_TRUE; 6041 } 6042 ddi_prop_free(bytes); 6043 } 6044 6045 /* 6046 * Look up the OBP property "local-mac-address?". If the user has set 6047 * 'local-mac-address? = false', use "the system address" instead. 6048 */ 6049 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0, 6050 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 6051 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 6052 if (localetheraddr(NULL, &sysaddr) != 0) { 6053 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 6054 found = B_TRUE; 6055 } 6056 } 6057 ddi_prop_free(bytes); 6058 } 6059 6060 /* 6061 * Finally(!), if there's a valid "mac-address" property (created 6062 * if we netbooted from this interface), we must use this instead 6063 * of any of the above to ensure that the NFS/install server doesn't 6064 * get confused by the address changing as Solaris takes over! 6065 */ 6066 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 6067 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 6068 if (err == DDI_PROP_SUCCESS) { 6069 if (nelts == ETHERADDRL) { 6070 while (nelts--) 6071 hw->mac.addr[nelts] = bytes[nelts]; 6072 found = B_TRUE; 6073 } 6074 ddi_prop_free(bytes); 6075 } 6076 6077 if (found) { 6078 bcopy(hw->mac.addr, hw->mac.perm_addr, 6079 ETHERADDRL); 6080 } 6081 6082 return (found); 6083 } 6084 #endif 6085 6086 static int 6087 e1000g_add_intrs(struct e1000g *Adapter) 6088 { 6089 dev_info_t *devinfo; 6090 int intr_types; 6091 int rc; 6092 6093 devinfo = Adapter->dip; 6094 6095 /* Get supported interrupt types */ 6096 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 6097 6098 if (rc != DDI_SUCCESS) { 6099 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6100 "Get supported interrupt types failed: %d\n", rc); 6101 return (DDI_FAILURE); 6102 } 6103 6104 /* 6105 * Based on Intel Technical Advisory document (TA-160), there are some 6106 * cases where some older Intel PCI-X NICs may "advertise" to the OS 6107 * that it supports MSI, but in fact has problems. 6108 * So we should only enable MSI for PCI-E NICs and disable MSI for old 6109 * PCI/PCI-X NICs. 6110 */ 6111 if (Adapter->shared.mac.type < e1000_82571) 6112 Adapter->msi_enable = B_FALSE; 6113 6114 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) { 6115 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI); 6116 6117 if (rc != DDI_SUCCESS) { 6118 /* EMPTY */ 6119 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6120 "Add MSI failed, trying Legacy interrupts\n"); 6121 } else { 6122 Adapter->intr_type = DDI_INTR_TYPE_MSI; 6123 } 6124 } 6125 6126 if ((Adapter->intr_type == 0) && 6127 (intr_types & DDI_INTR_TYPE_FIXED)) { 6128 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED); 6129 6130 if (rc != DDI_SUCCESS) { 6131 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6132 "Add Legacy interrupts failed\n"); 6133 return (DDI_FAILURE); 6134 } 6135 6136 Adapter->intr_type = DDI_INTR_TYPE_FIXED; 6137 } 6138 6139 if (Adapter->intr_type == 0) { 6140 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6141 "No interrupts registered\n"); 6142 return (DDI_FAILURE); 6143 } 6144 6145 return (DDI_SUCCESS); 6146 } 6147 6148 /* 6149 * e1000g_intr_add() handles MSI/Legacy interrupts 6150 */ 6151 static int 6152 e1000g_intr_add(struct e1000g *Adapter, int intr_type) 6153 { 6154 dev_info_t *devinfo; 6155 int count, avail, actual; 6156 int x, y, rc, inum = 0; 6157 int flag; 6158 ddi_intr_handler_t *intr_handler; 6159 6160 devinfo = Adapter->dip; 6161 6162 /* get number of interrupts */ 6163 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 6164 if ((rc != DDI_SUCCESS) || (count == 0)) { 6165 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6166 "Get interrupt number failed. Return: %d, count: %d\n", 6167 rc, count); 6168 return (DDI_FAILURE); 6169 } 6170 6171 /* get number of available interrupts */ 6172 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 6173 if ((rc != DDI_SUCCESS) || (avail == 0)) { 6174 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6175 "Get interrupt available number failed. " 6176 "Return: %d, available: %d\n", rc, avail); 6177 return (DDI_FAILURE); 6178 } 6179 6180 if (avail < count) { 6181 /* EMPTY */ 6182 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6183 "Interrupts count: %d, available: %d\n", 6184 count, avail); 6185 } 6186 6187 /* Allocate an array of interrupt handles */ 6188 Adapter->intr_size = count * sizeof (ddi_intr_handle_t); 6189 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP); 6190 6191 /* Set NORMAL behavior for both MSI and FIXED interrupt */ 6192 flag = DDI_INTR_ALLOC_NORMAL; 6193 6194 /* call ddi_intr_alloc() */ 6195 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum, 6196 count, &actual, flag); 6197 6198 if ((rc != DDI_SUCCESS) || (actual == 0)) { 6199 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6200 "Allocate interrupts failed: %d\n", rc); 6201 6202 kmem_free(Adapter->htable, Adapter->intr_size); 6203 return (DDI_FAILURE); 6204 } 6205 6206 if (actual < count) { 6207 /* EMPTY */ 6208 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6209 "Interrupts requested: %d, received: %d\n", 6210 count, actual); 6211 } 6212 6213 Adapter->intr_cnt = actual; 6214 6215 /* Get priority for first msi, assume remaining are all the same */ 6216 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri); 6217 6218 if (rc != DDI_SUCCESS) { 6219 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6220 "Get interrupt priority failed: %d\n", rc); 6221 6222 /* Free already allocated intr */ 6223 for (y = 0; y < actual; y++) 6224 (void) ddi_intr_free(Adapter->htable[y]); 6225 6226 kmem_free(Adapter->htable, Adapter->intr_size); 6227 return (DDI_FAILURE); 6228 } 6229 6230 /* 6231 * In Legacy Interrupt mode, for PCI-Express adapters, we should 6232 * use the interrupt service routine e1000g_intr_pciexpress() 6233 * to avoid interrupt stealing when sharing interrupt with other 6234 * devices. 6235 */ 6236 if (Adapter->shared.mac.type < e1000_82571) 6237 intr_handler = e1000g_intr; 6238 else 6239 intr_handler = e1000g_intr_pciexpress; 6240 6241 /* Call ddi_intr_add_handler() */ 6242 for (x = 0; x < actual; x++) { 6243 rc = ddi_intr_add_handler(Adapter->htable[x], 6244 intr_handler, (caddr_t)Adapter, NULL); 6245 6246 if (rc != DDI_SUCCESS) { 6247 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6248 "Add interrupt handler failed: %d\n", rc); 6249 6250 /* Remove already added handler */ 6251 for (y = 0; y < x; y++) 6252 (void) ddi_intr_remove_handler( 6253 Adapter->htable[y]); 6254 6255 /* Free already allocated intr */ 6256 for (y = 0; y < actual; y++) 6257 (void) ddi_intr_free(Adapter->htable[y]); 6258 6259 kmem_free(Adapter->htable, Adapter->intr_size); 6260 return (DDI_FAILURE); 6261 } 6262 } 6263 6264 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap); 6265 6266 if (rc != DDI_SUCCESS) { 6267 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6268 "Get interrupt cap failed: %d\n", rc); 6269 6270 /* Free already allocated intr */ 6271 for (y = 0; y < actual; y++) { 6272 (void) ddi_intr_remove_handler(Adapter->htable[y]); 6273 (void) ddi_intr_free(Adapter->htable[y]); 6274 } 6275 6276 kmem_free(Adapter->htable, Adapter->intr_size); 6277 return (DDI_FAILURE); 6278 } 6279 6280 return (DDI_SUCCESS); 6281 } 6282 6283 static int 6284 e1000g_rem_intrs(struct e1000g *Adapter) 6285 { 6286 int x; 6287 int rc; 6288 6289 for (x = 0; x < Adapter->intr_cnt; x++) { 6290 rc = ddi_intr_remove_handler(Adapter->htable[x]); 6291 if (rc != DDI_SUCCESS) { 6292 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6293 "Remove intr handler failed: %d\n", rc); 6294 return (DDI_FAILURE); 6295 } 6296 6297 rc = ddi_intr_free(Adapter->htable[x]); 6298 if (rc != DDI_SUCCESS) { 6299 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6300 "Free intr failed: %d\n", rc); 6301 return (DDI_FAILURE); 6302 } 6303 } 6304 6305 kmem_free(Adapter->htable, Adapter->intr_size); 6306 6307 return (DDI_SUCCESS); 6308 } 6309 6310 static int 6311 e1000g_enable_intrs(struct e1000g *Adapter) 6312 { 6313 int x; 6314 int rc; 6315 6316 /* Enable interrupts */ 6317 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6318 /* Call ddi_intr_block_enable() for MSI */ 6319 rc = ddi_intr_block_enable(Adapter->htable, 6320 Adapter->intr_cnt); 6321 if (rc != DDI_SUCCESS) { 6322 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6323 "Enable block intr failed: %d\n", rc); 6324 return (DDI_FAILURE); 6325 } 6326 } else { 6327 /* Call ddi_intr_enable() for Legacy/MSI non block enable */ 6328 for (x = 0; x < Adapter->intr_cnt; x++) { 6329 rc = ddi_intr_enable(Adapter->htable[x]); 6330 if (rc != DDI_SUCCESS) { 6331 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6332 "Enable intr failed: %d\n", rc); 6333 return (DDI_FAILURE); 6334 } 6335 } 6336 } 6337 6338 return (DDI_SUCCESS); 6339 } 6340 6341 static int 6342 e1000g_disable_intrs(struct e1000g *Adapter) 6343 { 6344 int x; 6345 int rc; 6346 6347 /* Disable all interrupts */ 6348 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6349 rc = ddi_intr_block_disable(Adapter->htable, 6350 Adapter->intr_cnt); 6351 if (rc != DDI_SUCCESS) { 6352 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6353 "Disable block intr failed: %d\n", rc); 6354 return (DDI_FAILURE); 6355 } 6356 } else { 6357 for (x = 0; x < Adapter->intr_cnt; x++) { 6358 rc = ddi_intr_disable(Adapter->htable[x]); 6359 if (rc != DDI_SUCCESS) { 6360 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6361 "Disable intr failed: %d\n", rc); 6362 return (DDI_FAILURE); 6363 } 6364 } 6365 } 6366 6367 return (DDI_SUCCESS); 6368 } 6369 6370 /* 6371 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter 6372 */ 6373 static void 6374 e1000g_get_phy_state(struct e1000g *Adapter) 6375 { 6376 struct e1000_hw *hw = &Adapter->shared; 6377 6378 if (hw->phy.media_type == e1000_media_type_copper) { 6379 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl); 6380 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status); 6381 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 6382 &Adapter->phy_an_adv); 6383 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, 6384 &Adapter->phy_an_exp); 6385 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, 6386 &Adapter->phy_ext_status); 6387 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, 6388 &Adapter->phy_1000t_ctrl); 6389 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, 6390 &Adapter->phy_1000t_status); 6391 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, 6392 &Adapter->phy_lp_able); 6393 6394 Adapter->param_autoneg_cap = 6395 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; 6396 Adapter->param_pause_cap = 6397 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6398 Adapter->param_asym_pause_cap = 6399 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6400 Adapter->param_1000fdx_cap = 6401 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 6402 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; 6403 Adapter->param_1000hdx_cap = 6404 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || 6405 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; 6406 Adapter->param_100t4_cap = 6407 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0; 6408 Adapter->param_100fdx_cap = 6409 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 6410 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; 6411 Adapter->param_100hdx_cap = 6412 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 6413 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; 6414 Adapter->param_10fdx_cap = 6415 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; 6416 Adapter->param_10hdx_cap = 6417 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; 6418 6419 Adapter->param_adv_autoneg = hw->mac.autoneg; 6420 Adapter->param_adv_pause = 6421 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6422 Adapter->param_adv_asym_pause = 6423 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6424 Adapter->param_adv_1000hdx = 6425 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; 6426 Adapter->param_adv_100t4 = 6427 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; 6428 if (Adapter->param_adv_autoneg == 1) { 6429 Adapter->param_adv_1000fdx = 6430 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) 6431 ? 1 : 0; 6432 Adapter->param_adv_100fdx = 6433 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) 6434 ? 1 : 0; 6435 Adapter->param_adv_100hdx = 6436 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) 6437 ? 1 : 0; 6438 Adapter->param_adv_10fdx = 6439 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; 6440 Adapter->param_adv_10hdx = 6441 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; 6442 } 6443 6444 Adapter->param_lp_autoneg = 6445 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; 6446 Adapter->param_lp_pause = 6447 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; 6448 Adapter->param_lp_asym_pause = 6449 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; 6450 Adapter->param_lp_1000fdx = 6451 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; 6452 Adapter->param_lp_1000hdx = 6453 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; 6454 Adapter->param_lp_100t4 = 6455 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; 6456 Adapter->param_lp_100fdx = 6457 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; 6458 Adapter->param_lp_100hdx = 6459 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; 6460 Adapter->param_lp_10fdx = 6461 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; 6462 Adapter->param_lp_10hdx = 6463 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; 6464 } else { 6465 /* 6466 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning, 6467 * it can only work with 1Gig Full Duplex Link Partner. 6468 */ 6469 Adapter->param_autoneg_cap = 0; 6470 Adapter->param_pause_cap = 1; 6471 Adapter->param_asym_pause_cap = 1; 6472 Adapter->param_1000fdx_cap = 1; 6473 Adapter->param_1000hdx_cap = 0; 6474 Adapter->param_100t4_cap = 0; 6475 Adapter->param_100fdx_cap = 0; 6476 Adapter->param_100hdx_cap = 0; 6477 Adapter->param_10fdx_cap = 0; 6478 Adapter->param_10hdx_cap = 0; 6479 6480 Adapter->param_adv_autoneg = 0; 6481 Adapter->param_adv_pause = 1; 6482 Adapter->param_adv_asym_pause = 1; 6483 Adapter->param_adv_1000fdx = 1; 6484 Adapter->param_adv_1000hdx = 0; 6485 Adapter->param_adv_100t4 = 0; 6486 Adapter->param_adv_100fdx = 0; 6487 Adapter->param_adv_100hdx = 0; 6488 Adapter->param_adv_10fdx = 0; 6489 Adapter->param_adv_10hdx = 0; 6490 6491 Adapter->param_lp_autoneg = 0; 6492 Adapter->param_lp_pause = 0; 6493 Adapter->param_lp_asym_pause = 0; 6494 Adapter->param_lp_1000fdx = 0; 6495 Adapter->param_lp_1000hdx = 0; 6496 Adapter->param_lp_100t4 = 0; 6497 Adapter->param_lp_100fdx = 0; 6498 Adapter->param_lp_100hdx = 0; 6499 Adapter->param_lp_10fdx = 0; 6500 Adapter->param_lp_10hdx = 0; 6501 } 6502 } 6503 6504 /* 6505 * FMA support 6506 */ 6507 6508 int 6509 e1000g_check_acc_handle(ddi_acc_handle_t handle) 6510 { 6511 ddi_fm_error_t de; 6512 6513 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6514 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 6515 return (de.fme_status); 6516 } 6517 6518 int 6519 e1000g_check_dma_handle(ddi_dma_handle_t handle) 6520 { 6521 ddi_fm_error_t de; 6522 6523 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6524 return (de.fme_status); 6525 } 6526 6527 /* 6528 * The IO fault service error handling callback function 6529 */ 6530 /* ARGSUSED2 */ 6531 static int 6532 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6533 { 6534 /* 6535 * as the driver can always deal with an error in any dma or 6536 * access handle, we can just return the fme_status value. 6537 */ 6538 pci_ereport_post(dip, err, NULL); 6539 return (err->fme_status); 6540 } 6541 6542 static void 6543 e1000g_fm_init(struct e1000g *Adapter) 6544 { 6545 ddi_iblock_cookie_t iblk; 6546 int fma_dma_flag; 6547 6548 /* Only register with IO Fault Services if we have some capability */ 6549 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 6550 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6551 } else { 6552 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6553 } 6554 6555 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 6556 fma_dma_flag = 1; 6557 } else { 6558 fma_dma_flag = 0; 6559 } 6560 6561 (void) e1000g_set_fma_flags(fma_dma_flag); 6562 6563 if (Adapter->fm_capabilities) { 6564 6565 /* Register capabilities with IO Fault Services */ 6566 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk); 6567 6568 /* 6569 * Initialize pci ereport capabilities if ereport capable 6570 */ 6571 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6572 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6573 pci_ereport_setup(Adapter->dip); 6574 6575 /* 6576 * Register error callback if error callback capable 6577 */ 6578 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6579 ddi_fm_handler_register(Adapter->dip, 6580 e1000g_fm_error_cb, (void*) Adapter); 6581 } 6582 } 6583 6584 static void 6585 e1000g_fm_fini(struct e1000g *Adapter) 6586 { 6587 /* Only unregister FMA capabilities if we registered some */ 6588 if (Adapter->fm_capabilities) { 6589 6590 /* 6591 * Release any resources allocated by pci_ereport_setup() 6592 */ 6593 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6594 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6595 pci_ereport_teardown(Adapter->dip); 6596 6597 /* 6598 * Un-register error callback if error callback capable 6599 */ 6600 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6601 ddi_fm_handler_unregister(Adapter->dip); 6602 6603 /* Unregister from IO Fault Services */ 6604 mutex_enter(&e1000g_rx_detach_lock); 6605 ddi_fm_fini(Adapter->dip); 6606 if (Adapter->priv_dip != NULL) { 6607 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL; 6608 } 6609 mutex_exit(&e1000g_rx_detach_lock); 6610 } 6611 } 6612 6613 void 6614 e1000g_fm_ereport(struct e1000g *Adapter, char *detail) 6615 { 6616 uint64_t ena; 6617 char buf[FM_MAX_CLASS]; 6618 6619 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6620 ena = fm_ena_generate(0, FM_ENA_FMT1); 6621 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) { 6622 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP, 6623 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6624 } 6625 } 6626 6627 /* 6628 * quiesce(9E) entry point. 6629 * 6630 * This function is called when the system is single-threaded at high 6631 * PIL with preemption disabled. Therefore, this function must not be 6632 * blocked. 6633 * 6634 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6635 * DDI_FAILURE indicates an error condition and should almost never happen. 6636 */ 6637 static int 6638 e1000g_quiesce(dev_info_t *devinfo) 6639 { 6640 struct e1000g *Adapter; 6641 6642 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 6643 6644 if (Adapter == NULL) 6645 return (DDI_FAILURE); 6646 6647 e1000g_clear_all_interrupts(Adapter); 6648 6649 (void) e1000_reset_hw(&Adapter->shared); 6650 6651 /* Setup our HW Tx Head & Tail descriptor pointers */ 6652 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 6653 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 6654 6655 /* Setup our HW Rx Head & Tail descriptor pointers */ 6656 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0); 6657 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0); 6658 6659 return (DDI_SUCCESS); 6660 } 6661 6662 /* 6663 * synchronize the adv* and en* parameters. 6664 * 6665 * See comments in <sys/dld.h> for details of the *_en_* 6666 * parameters. The usage of ndd for setting adv parameters will 6667 * synchronize all the en parameters with the e1000g parameters, 6668 * implicitly disabling any settings made via dladm. 6669 */ 6670 static void 6671 e1000g_param_sync(struct e1000g *Adapter) 6672 { 6673 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx; 6674 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx; 6675 Adapter->param_en_100fdx = Adapter->param_adv_100fdx; 6676 Adapter->param_en_100hdx = Adapter->param_adv_100hdx; 6677 Adapter->param_en_10fdx = Adapter->param_adv_10fdx; 6678 Adapter->param_en_10hdx = Adapter->param_adv_10hdx; 6679 } 6680 6681 /* 6682 * e1000g_get_driver_control - tell manageability firmware that the driver 6683 * has control. 6684 */ 6685 static void 6686 e1000g_get_driver_control(struct e1000_hw *hw) 6687 { 6688 uint32_t ctrl_ext; 6689 uint32_t swsm; 6690 6691 /* tell manageability firmware the driver has taken over */ 6692 switch (hw->mac.type) { 6693 case e1000_82573: 6694 swsm = E1000_READ_REG(hw, E1000_SWSM); 6695 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 6696 break; 6697 case e1000_82571: 6698 case e1000_82572: 6699 case e1000_82574: 6700 case e1000_80003es2lan: 6701 case e1000_ich8lan: 6702 case e1000_ich9lan: 6703 case e1000_ich10lan: 6704 case e1000_pchlan: 6705 case e1000_pch2lan: 6706 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6707 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6708 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 6709 break; 6710 default: 6711 /* no manageability firmware: do nothing */ 6712 break; 6713 } 6714 } 6715 6716 /* 6717 * e1000g_release_driver_control - tell manageability firmware that the driver 6718 * has released control. 6719 */ 6720 static void 6721 e1000g_release_driver_control(struct e1000_hw *hw) 6722 { 6723 uint32_t ctrl_ext; 6724 uint32_t swsm; 6725 6726 /* tell manageability firmware the driver has released control */ 6727 switch (hw->mac.type) { 6728 case e1000_82573: 6729 swsm = E1000_READ_REG(hw, E1000_SWSM); 6730 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 6731 break; 6732 case e1000_82571: 6733 case e1000_82572: 6734 case e1000_82574: 6735 case e1000_80003es2lan: 6736 case e1000_ich8lan: 6737 case e1000_ich9lan: 6738 case e1000_ich10lan: 6739 case e1000_pchlan: 6740 case e1000_pch2lan: 6741 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6742 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6743 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 6744 break; 6745 default: 6746 /* no manageability firmware: do nothing */ 6747 break; 6748 } 6749 } 6750 6751 /* 6752 * Restore e1000g promiscuous mode. 6753 */ 6754 static void 6755 e1000g_restore_promisc(struct e1000g *Adapter) 6756 { 6757 if (Adapter->e1000g_promisc) { 6758 uint32_t rctl; 6759 6760 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 6761 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 6762 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 6763 } 6764 } 6765