1 /* 2 * This file is provided under a CDDLv1 license. When using or 3 * redistributing this file, you may do so under this license. 4 * In redistributing this file this license must be included 5 * and no other modification of this header file is permitted. 6 * 7 * CDDL LICENSE SUMMARY 8 * 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved. 10 * 11 * The contents of this file are subject to the terms of Version 12 * 1.0 of the Common Development and Distribution License (the "License"). 13 * 14 * You should have received a copy of the License with this software. 15 * You can obtain a copy of the License at 16 * http://www.opensolaris.org/os/licensing. 17 * See the License for the specific language governing permissions 18 * and limitations under the License. 19 */ 20 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 28 * Copyright (c) 2018, Joyent, Inc. 29 * Copyright 2024 Oxide Computer Company 30 */ 31 32 /* 33 * ********************************************************************** 34 * * 35 * Module Name: * 36 * e1000g_main.c * 37 * * 38 * Abstract: * 39 * This file contains the interface routines for the solaris OS. * 40 * It has all DDI entry point routines and GLD entry point routines. * 41 * * 42 * This file also contains routines that take care of initialization * 43 * uninit routine and interrupt routine. * 44 * * 45 * ********************************************************************** 46 */ 47 48 #include <sys/dlpi.h> 49 #include <sys/mac.h> 50 #include "e1000g_sw.h" 51 #include "e1000g_debug.h" 52 53 static char ident[] = "Intel PRO/1000 Ethernet"; 54 /* LINTED E_STATIC_UNUSED */ 55 static char e1000g_version[] = "Driver Ver. 5.4.00"; 56 57 /* 58 * Proto types for DDI entry points 59 */ 60 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t); 61 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t); 62 static int e1000g_quiesce(dev_info_t *); 63 64 /* 65 * init and intr routines prototype 66 */ 67 static int e1000g_resume(dev_info_t *); 68 static int e1000g_suspend(dev_info_t *); 69 static uint_t e1000g_intr_pciexpress(caddr_t, caddr_t); 70 static uint_t e1000g_intr(caddr_t, caddr_t); 71 static void e1000g_intr_work(struct e1000g *, uint32_t); 72 static int e1000g_init(struct e1000g *); 73 static int e1000g_start(struct e1000g *, boolean_t); 74 static void e1000g_stop(struct e1000g *, boolean_t); 75 static int e1000g_m_start(void *); 76 static void e1000g_m_stop(void *); 77 static int e1000g_m_promisc(void *, boolean_t); 78 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *); 79 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *); 80 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *); 81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t, 82 uint_t, const void *); 83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t, 84 uint_t, void *); 85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t, 86 mac_prop_info_handle_t); 87 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t, 88 const void *); 89 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *); 90 static void e1000g_init_locks(struct e1000g *); 91 static void e1000g_destroy_locks(struct e1000g *); 92 static int e1000g_identify_hardware(struct e1000g *); 93 static int e1000g_regs_map(struct e1000g *); 94 static int e1000g_set_driver_params(struct e1000g *); 95 static void e1000g_set_bufsize(struct e1000g *); 96 static int e1000g_register_mac(struct e1000g *); 97 static boolean_t e1000g_rx_drain(struct e1000g *); 98 static boolean_t e1000g_tx_drain(struct e1000g *); 99 static void e1000g_init_unicst(struct e1000g *); 100 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int); 101 static int e1000g_alloc_rx_data(struct e1000g *); 102 static void e1000g_release_multicast(struct e1000g *); 103 static void e1000g_pch_limits(struct e1000g *); 104 static uint32_t e1000g_mtu2maxframe(uint32_t); 105 106 /* 107 * Local routines 108 */ 109 static boolean_t e1000g_reset_adapter(struct e1000g *); 110 static void e1000g_tx_clean(struct e1000g *); 111 static void e1000g_rx_clean(struct e1000g *); 112 static void e1000g_link_timer(void *); 113 static void e1000g_local_timer(void *); 114 static boolean_t e1000g_link_check(struct e1000g *); 115 static boolean_t e1000g_stall_check(struct e1000g *); 116 static void e1000g_smartspeed(struct e1000g *); 117 static void e1000g_get_conf(struct e1000g *); 118 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int, 119 int *); 120 static void enable_watchdog_timer(struct e1000g *); 121 static void disable_watchdog_timer(struct e1000g *); 122 static void start_watchdog_timer(struct e1000g *); 123 static void restart_watchdog_timer(struct e1000g *); 124 static void stop_watchdog_timer(struct e1000g *); 125 static void stop_link_timer(struct e1000g *); 126 static void stop_82547_timer(e1000g_tx_ring_t *); 127 static void e1000g_force_speed_duplex(struct e1000g *); 128 static void e1000g_setup_max_mtu(struct e1000g *); 129 static void e1000g_get_max_frame_size(struct e1000g *); 130 static boolean_t is_valid_mac_addr(uint8_t *); 131 static void e1000g_unattach(dev_info_t *, struct e1000g *); 132 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *); 133 #ifdef E1000G_DEBUG 134 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *); 135 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *); 136 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *); 137 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *); 138 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *, 139 struct iocblk *, mblk_t *); 140 #endif 141 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *, 142 struct iocblk *, mblk_t *); 143 static boolean_t e1000g_check_loopback_support(struct e1000_hw *); 144 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t); 145 static void e1000g_set_internal_loopback(struct e1000g *); 146 static void e1000g_set_external_loopback_1000(struct e1000g *); 147 static void e1000g_set_external_loopback_100(struct e1000g *); 148 static void e1000g_set_external_loopback_10(struct e1000g *); 149 static int e1000g_add_intrs(struct e1000g *); 150 static int e1000g_intr_add(struct e1000g *, int); 151 static int e1000g_rem_intrs(struct e1000g *); 152 static int e1000g_enable_intrs(struct e1000g *); 153 static int e1000g_disable_intrs(struct e1000g *); 154 static boolean_t e1000g_link_up(struct e1000g *); 155 #ifdef __sparc 156 static boolean_t e1000g_find_mac_address(struct e1000g *); 157 #endif 158 static void e1000g_get_phy_state(struct e1000g *); 159 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 160 const void *impl_data); 161 static void e1000g_fm_init(struct e1000g *Adapter); 162 static void e1000g_fm_fini(struct e1000g *Adapter); 163 static void e1000g_param_sync(struct e1000g *); 164 static void e1000g_get_driver_control(struct e1000_hw *); 165 static void e1000g_release_driver_control(struct e1000_hw *); 166 static void e1000g_restore_promisc(struct e1000g *Adapter); 167 168 char *e1000g_priv_props[] = { 169 "_tx_bcopy_threshold", 170 "_tx_interrupt_enable", 171 "_tx_intr_delay", 172 "_tx_intr_abs_delay", 173 "_rx_bcopy_threshold", 174 "_max_num_rcv_packets", 175 "_rx_intr_delay", 176 "_rx_intr_abs_delay", 177 "_intr_throttling_rate", 178 "_intr_adaptive", 179 "_adv_pause_cap", 180 "_adv_asym_pause_cap", 181 NULL 182 }; 183 184 static struct cb_ops cb_ws_ops = { 185 nulldev, /* cb_open */ 186 nulldev, /* cb_close */ 187 nodev, /* cb_strategy */ 188 nodev, /* cb_print */ 189 nodev, /* cb_dump */ 190 nodev, /* cb_read */ 191 nodev, /* cb_write */ 192 nodev, /* cb_ioctl */ 193 nodev, /* cb_devmap */ 194 nodev, /* cb_mmap */ 195 nodev, /* cb_segmap */ 196 nochpoll, /* cb_chpoll */ 197 ddi_prop_op, /* cb_prop_op */ 198 NULL, /* cb_stream */ 199 D_MP | D_HOTPLUG, /* cb_flag */ 200 CB_REV, /* cb_rev */ 201 nodev, /* cb_aread */ 202 nodev /* cb_awrite */ 203 }; 204 205 static struct dev_ops ws_ops = { 206 DEVO_REV, /* devo_rev */ 207 0, /* devo_refcnt */ 208 NULL, /* devo_getinfo */ 209 nulldev, /* devo_identify */ 210 nulldev, /* devo_probe */ 211 e1000g_attach, /* devo_attach */ 212 e1000g_detach, /* devo_detach */ 213 nodev, /* devo_reset */ 214 &cb_ws_ops, /* devo_cb_ops */ 215 NULL, /* devo_bus_ops */ 216 ddi_power, /* devo_power */ 217 e1000g_quiesce /* devo_quiesce */ 218 }; 219 220 static struct modldrv modldrv = { 221 &mod_driverops, /* Type of module. This one is a driver */ 222 ident, /* Discription string */ 223 &ws_ops, /* driver ops */ 224 }; 225 226 static struct modlinkage modlinkage = { 227 MODREV_1, &modldrv, NULL 228 }; 229 230 /* Access attributes for register mapping */ 231 static ddi_device_acc_attr_t e1000g_regs_acc_attr = { 232 DDI_DEVICE_ATTR_V1, 233 DDI_STRUCTURE_LE_ACC, 234 DDI_STRICTORDER_ACC, 235 DDI_FLAGERR_ACC 236 }; 237 238 #define E1000G_M_CALLBACK_FLAGS \ 239 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 240 241 static mac_callbacks_t e1000g_m_callbacks = { 242 E1000G_M_CALLBACK_FLAGS, 243 e1000g_m_stat, 244 e1000g_m_start, 245 e1000g_m_stop, 246 e1000g_m_promisc, 247 e1000g_m_multicst, 248 NULL, 249 e1000g_m_tx, 250 NULL, 251 e1000g_m_ioctl, 252 e1000g_m_getcapab, 253 NULL, 254 NULL, 255 e1000g_m_setprop, 256 e1000g_m_getprop, 257 e1000g_m_propinfo 258 }; 259 260 /* 261 * Global variables 262 */ 263 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K; 264 uint32_t e1000g_mblks_pending = 0; 265 /* 266 * Workaround for Dynamic Reconfiguration support, for x86 platform only. 267 * Here we maintain a private dev_info list if e1000g_force_detach is 268 * enabled. If we force the driver to detach while there are still some 269 * rx buffers retained in the upper layer, we have to keep a copy of the 270 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data 271 * structure will be freed after the driver is detached. However when we 272 * finally free those rx buffers released by the upper layer, we need to 273 * refer to the dev_info to free the dma buffers. So we save a copy of 274 * the dev_info for this purpose. On x86 platform, we assume this copy 275 * of dev_info is always valid, but on SPARC platform, it could be invalid 276 * after the system board level DR operation. For this reason, the global 277 * variable e1000g_force_detach must be B_FALSE on SPARC platform. 278 */ 279 #ifdef __sparc 280 boolean_t e1000g_force_detach = B_FALSE; 281 #else 282 boolean_t e1000g_force_detach = B_TRUE; 283 #endif 284 private_devi_list_t *e1000g_private_devi_list = NULL; 285 286 /* 287 * The mutex e1000g_rx_detach_lock is defined to protect the processing of 288 * the private dev_info list, and to serialize the processing of rx buffer 289 * freeing and rx buffer recycling. 290 */ 291 kmutex_t e1000g_rx_detach_lock; 292 /* 293 * The rwlock e1000g_dma_type_lock is defined to protect the global flag 294 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA". 295 * If there are many e1000g instances, the system may run out of DVMA 296 * resources during the initialization of the instances, then the flag will 297 * be changed to "USE_DMA". Because different e1000g instances are initialized 298 * in parallel, we need to use this lock to protect the flag. 299 */ 300 krwlock_t e1000g_dma_type_lock; 301 302 /* 303 * The 82546 chipset is a dual-port device, both the ports share one eeprom. 304 * Based on the information from Intel, the 82546 chipset has some hardware 305 * problem. When one port is being reset and the other port is trying to 306 * access the eeprom, it could cause system hang or panic. To workaround this 307 * hardware problem, we use a global mutex to prevent such operations from 308 * happening simultaneously on different instances. This workaround is applied 309 * to all the devices supported by this driver. 310 */ 311 kmutex_t e1000g_nvm_lock; 312 313 /* 314 * Loadable module configuration entry points for the driver 315 */ 316 317 /* 318 * _init - module initialization 319 */ 320 int 321 _init(void) 322 { 323 int status; 324 325 mac_init_ops(&ws_ops, WSNAME); 326 status = mod_install(&modlinkage); 327 if (status != DDI_SUCCESS) 328 mac_fini_ops(&ws_ops); 329 else { 330 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL); 331 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL); 332 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL); 333 } 334 335 return (status); 336 } 337 338 /* 339 * _fini - module finalization 340 */ 341 int 342 _fini(void) 343 { 344 int status; 345 346 if (e1000g_mblks_pending != 0) 347 return (EBUSY); 348 349 status = mod_remove(&modlinkage); 350 if (status == DDI_SUCCESS) { 351 mac_fini_ops(&ws_ops); 352 353 if (e1000g_force_detach) { 354 private_devi_list_t *devi_node; 355 356 mutex_enter(&e1000g_rx_detach_lock); 357 while (e1000g_private_devi_list != NULL) { 358 devi_node = e1000g_private_devi_list; 359 e1000g_private_devi_list = 360 e1000g_private_devi_list->next; 361 362 kmem_free(devi_node->priv_dip, 363 sizeof (struct dev_info)); 364 kmem_free(devi_node, 365 sizeof (private_devi_list_t)); 366 } 367 mutex_exit(&e1000g_rx_detach_lock); 368 } 369 370 mutex_destroy(&e1000g_rx_detach_lock); 371 rw_destroy(&e1000g_dma_type_lock); 372 mutex_destroy(&e1000g_nvm_lock); 373 } 374 375 return (status); 376 } 377 378 /* 379 * _info - module information 380 */ 381 int 382 _info(struct modinfo *modinfop) 383 { 384 return (mod_info(&modlinkage, modinfop)); 385 } 386 387 /* 388 * e1000g_attach - driver attach 389 * 390 * This function is the device-specific initialization entry 391 * point. This entry point is required and must be written. 392 * The DDI_ATTACH command must be provided in the attach entry 393 * point. When attach() is called with cmd set to DDI_ATTACH, 394 * all normal kernel services (such as kmem_alloc(9F)) are 395 * available for use by the driver. 396 * 397 * The attach() function will be called once for each instance 398 * of the device on the system with cmd set to DDI_ATTACH. 399 * Until attach() succeeds, the only driver entry points which 400 * may be called are open(9E) and getinfo(9E). 401 */ 402 static int 403 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 404 { 405 struct e1000g *Adapter; 406 struct e1000_hw *hw; 407 struct e1000g_osdep *osdep; 408 int instance; 409 410 switch (cmd) { 411 default: 412 e1000g_log(NULL, CE_WARN, 413 "Unsupported command send to e1000g_attach... "); 414 return (DDI_FAILURE); 415 416 case DDI_RESUME: 417 return (e1000g_resume(devinfo)); 418 419 case DDI_ATTACH: 420 break; 421 } 422 423 /* 424 * get device instance number 425 */ 426 instance = ddi_get_instance(devinfo); 427 428 /* 429 * Allocate soft data structure 430 */ 431 Adapter = 432 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP); 433 434 Adapter->dip = devinfo; 435 Adapter->instance = instance; 436 Adapter->tx_ring->adapter = Adapter; 437 Adapter->rx_ring->adapter = Adapter; 438 439 hw = &Adapter->shared; 440 osdep = &Adapter->osdep; 441 hw->back = osdep; 442 osdep->adapter = Adapter; 443 444 ddi_set_driver_private(devinfo, (caddr_t)Adapter); 445 446 /* 447 * Initialize for fma support 448 */ 449 (void) e1000g_get_prop(Adapter, "fm-capable", 450 0, 0x0f, 451 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 452 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE, 453 &Adapter->fm_capabilities); 454 e1000g_fm_init(Adapter); 455 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT; 456 457 /* 458 * PCI Configure 459 */ 460 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 461 e1000g_log(Adapter, CE_WARN, "PCI configuration failed"); 462 goto attach_fail; 463 } 464 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 465 466 /* 467 * Setup hardware 468 */ 469 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) { 470 e1000g_log(Adapter, CE_WARN, "Identify hardware failed"); 471 goto attach_fail; 472 } 473 474 /* 475 * Map in the device registers. 476 */ 477 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) { 478 e1000g_log(Adapter, CE_WARN, "Mapping registers failed"); 479 goto attach_fail; 480 } 481 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 482 483 /* 484 * Initialize driver parameters 485 */ 486 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) { 487 goto attach_fail; 488 } 489 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP; 490 491 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 492 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 493 goto attach_fail; 494 } 495 496 /* 497 * Disable ULP support 498 */ 499 (void) e1000_disable_ulp_lpt_lp(hw, TRUE); 500 501 /* 502 * Initialize interrupts 503 */ 504 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { 505 e1000g_log(Adapter, CE_WARN, "Add interrupts failed"); 506 goto attach_fail; 507 } 508 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 509 510 /* 511 * Initialize mutex's for this device. 512 * Do this before enabling the interrupt handler and 513 * register the softint to avoid the condition where 514 * interrupt handler can try using uninitialized mutex 515 */ 516 e1000g_init_locks(Adapter); 517 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS; 518 519 /* 520 * Initialize Driver Counters 521 */ 522 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) { 523 e1000g_log(Adapter, CE_WARN, "Init stats failed"); 524 goto attach_fail; 525 } 526 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS; 527 528 /* 529 * Initialize chip hardware and software structures 530 */ 531 rw_enter(&Adapter->chip_lock, RW_WRITER); 532 if (e1000g_init(Adapter) != DDI_SUCCESS) { 533 rw_exit(&Adapter->chip_lock); 534 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed"); 535 goto attach_fail; 536 } 537 rw_exit(&Adapter->chip_lock); 538 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 539 540 /* 541 * Register the driver to the MAC 542 */ 543 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) { 544 e1000g_log(Adapter, CE_WARN, "Register MAC failed"); 545 goto attach_fail; 546 } 547 Adapter->attach_progress |= ATTACH_PROGRESS_MAC; 548 549 /* 550 * Now that mutex locks are initialized, and the chip is also 551 * initialized, enable interrupts. 552 */ 553 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) { 554 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed"); 555 goto attach_fail; 556 } 557 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 558 559 /* 560 * If e1000g_force_detach is enabled, in global private dip list, 561 * we will create a new entry, which maintains the priv_dip for DR 562 * supports after driver detached. 563 */ 564 if (e1000g_force_detach) { 565 private_devi_list_t *devi_node; 566 567 Adapter->priv_dip = 568 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP); 569 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip), 570 sizeof (struct dev_info)); 571 572 devi_node = 573 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP); 574 575 mutex_enter(&e1000g_rx_detach_lock); 576 devi_node->priv_dip = Adapter->priv_dip; 577 devi_node->flag = E1000G_PRIV_DEVI_ATTACH; 578 devi_node->pending_rx_count = 0; 579 580 Adapter->priv_devi_node = devi_node; 581 582 if (e1000g_private_devi_list == NULL) { 583 devi_node->prev = NULL; 584 devi_node->next = NULL; 585 e1000g_private_devi_list = devi_node; 586 } else { 587 devi_node->prev = NULL; 588 devi_node->next = e1000g_private_devi_list; 589 e1000g_private_devi_list->prev = devi_node; 590 e1000g_private_devi_list = devi_node; 591 } 592 mutex_exit(&e1000g_rx_detach_lock); 593 } 594 595 Adapter->e1000g_state = E1000G_INITIALIZED; 596 return (DDI_SUCCESS); 597 598 attach_fail: 599 e1000g_unattach(devinfo, Adapter); 600 return (DDI_FAILURE); 601 } 602 603 static int 604 e1000g_register_mac(struct e1000g *Adapter) 605 { 606 struct e1000_hw *hw = &Adapter->shared; 607 mac_register_t *mac; 608 int err; 609 610 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 611 return (DDI_FAILURE); 612 613 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 614 mac->m_driver = Adapter; 615 mac->m_dip = Adapter->dip; 616 mac->m_src_addr = hw->mac.addr; 617 mac->m_callbacks = &e1000g_m_callbacks; 618 mac->m_min_sdu = 0; 619 mac->m_max_sdu = Adapter->default_mtu; 620 mac->m_margin = VLAN_TAGSZ; 621 mac->m_priv_props = e1000g_priv_props; 622 mac->m_v12n = MAC_VIRT_LEVEL1; 623 624 err = mac_register(mac, &Adapter->mh); 625 mac_free(mac); 626 627 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE); 628 } 629 630 static int 631 e1000g_identify_hardware(struct e1000g *Adapter) 632 { 633 struct e1000_hw *hw = &Adapter->shared; 634 struct e1000g_osdep *osdep = &Adapter->osdep; 635 636 /* Get the device id */ 637 hw->vendor_id = 638 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 639 hw->device_id = 640 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 641 hw->revision_id = 642 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 643 hw->subsystem_device_id = 644 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 645 hw->subsystem_vendor_id = 646 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 647 648 if (e1000_set_mac_type(hw) != E1000_SUCCESS) { 649 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 650 "MAC type could not be set properly."); 651 return (DDI_FAILURE); 652 } 653 654 return (DDI_SUCCESS); 655 } 656 657 static int 658 e1000g_regs_map(struct e1000g *Adapter) 659 { 660 dev_info_t *devinfo = Adapter->dip; 661 struct e1000_hw *hw = &Adapter->shared; 662 struct e1000g_osdep *osdep = &Adapter->osdep; 663 off_t mem_size; 664 bar_info_t bar_info; 665 int offset, rnumber; 666 667 rnumber = ADAPTER_REG_SET; 668 /* Get size of adapter register memory */ 669 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) != 670 DDI_SUCCESS) { 671 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 672 "ddi_dev_regsize for registers failed"); 673 return (DDI_FAILURE); 674 } 675 676 /* Map adapter register memory */ 677 if ((ddi_regs_map_setup(devinfo, rnumber, 678 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr, 679 &osdep->reg_handle)) != DDI_SUCCESS) { 680 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 681 "ddi_regs_map_setup for registers failed"); 682 goto regs_map_fail; 683 } 684 685 /* ICH needs to map flash memory */ 686 switch (hw->mac.type) { 687 case e1000_ich8lan: 688 case e1000_ich9lan: 689 case e1000_ich10lan: 690 case e1000_pchlan: 691 case e1000_pch2lan: 692 case e1000_pch_lpt: 693 rnumber = ICH_FLASH_REG_SET; 694 695 /* get flash size */ 696 if (ddi_dev_regsize(devinfo, rnumber, 697 &mem_size) != DDI_SUCCESS) { 698 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 699 "ddi_dev_regsize for ICH flash failed"); 700 goto regs_map_fail; 701 } 702 703 /* map flash in */ 704 if (ddi_regs_map_setup(devinfo, rnumber, 705 (caddr_t *)&hw->flash_address, 0, 706 mem_size, &e1000g_regs_acc_attr, 707 &osdep->ich_flash_handle) != DDI_SUCCESS) { 708 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 709 "ddi_regs_map_setup for ICH flash failed"); 710 goto regs_map_fail; 711 } 712 break; 713 case e1000_pch_spt: 714 case e1000_pch_cnp: 715 case e1000_pch_tgp: 716 case e1000_pch_adp: 717 case e1000_pch_mtp: 718 case e1000_pch_lnp: 719 case e1000_pch_rpl: 720 case e1000_pch_arl: 721 case e1000_pch_ptp: 722 case e1000_pch_nvl: 723 /* 724 * On the SPT, the device flash is actually in BAR0, not a 725 * separate BAR. Therefore we end up setting the 726 * ich_flash_handle to be the same as the register handle. 727 * We mark the same to reduce the confusion in the other 728 * functions and macros. Though this does make the set up and 729 * tear-down path slightly more complicated. 730 */ 731 osdep->ich_flash_handle = osdep->reg_handle; 732 hw->flash_address = hw->hw_addr; 733 default: 734 break; 735 } 736 737 /* map io space */ 738 switch (hw->mac.type) { 739 case e1000_82544: 740 case e1000_82540: 741 case e1000_82545: 742 case e1000_82546: 743 case e1000_82541: 744 case e1000_82541_rev_2: 745 /* find the IO bar */ 746 rnumber = -1; 747 for (offset = PCI_CONF_BASE1; 748 offset <= PCI_CONF_BASE5; offset += 4) { 749 if (e1000g_get_bar_info(devinfo, offset, &bar_info) 750 != DDI_SUCCESS) 751 continue; 752 if (bar_info.type == E1000G_BAR_IO) { 753 rnumber = bar_info.rnumber; 754 break; 755 } 756 } 757 758 if (rnumber < 0) { 759 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 760 "No io space is found"); 761 goto regs_map_fail; 762 } 763 764 /* get io space size */ 765 if (ddi_dev_regsize(devinfo, rnumber, 766 &mem_size) != DDI_SUCCESS) { 767 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 768 "ddi_dev_regsize for io space failed"); 769 goto regs_map_fail; 770 } 771 772 /* map io space */ 773 if ((ddi_regs_map_setup(devinfo, rnumber, 774 (caddr_t *)&hw->io_base, 0, mem_size, 775 &e1000g_regs_acc_attr, 776 &osdep->io_reg_handle)) != DDI_SUCCESS) { 777 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 778 "ddi_regs_map_setup for io space failed"); 779 goto regs_map_fail; 780 } 781 break; 782 default: 783 hw->io_base = 0; 784 break; 785 } 786 787 return (DDI_SUCCESS); 788 789 regs_map_fail: 790 if (osdep->reg_handle != NULL) 791 ddi_regs_map_free(&osdep->reg_handle); 792 if (osdep->ich_flash_handle != NULL && hw->mac.type < e1000_pch_spt) 793 ddi_regs_map_free(&osdep->ich_flash_handle); 794 return (DDI_FAILURE); 795 } 796 797 static int 798 e1000g_set_driver_params(struct e1000g *Adapter) 799 { 800 struct e1000_hw *hw; 801 802 hw = &Adapter->shared; 803 804 /* Set MAC type and initialize hardware functions */ 805 if (e1000_setup_init_funcs(hw, true) != E1000_SUCCESS) { 806 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 807 "Could not setup hardware functions"); 808 return (DDI_FAILURE); 809 } 810 811 /* Get bus information */ 812 if (e1000_get_bus_info(hw) != E1000_SUCCESS) { 813 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 814 "Could not get bus information"); 815 return (DDI_FAILURE); 816 } 817 818 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word); 819 820 hw->mac.autoneg_failed = true; 821 822 /* Set the autoneg_wait_to_complete flag to B_FALSE */ 823 hw->phy.autoneg_wait_to_complete = false; 824 825 /* Adaptive IFS related changes */ 826 hw->mac.adaptive_ifs = true; 827 828 /* Enable phy init script for IGP phy of 82541/82547 */ 829 if ((hw->mac.type == e1000_82547) || 830 (hw->mac.type == e1000_82541) || 831 (hw->mac.type == e1000_82547_rev_2) || 832 (hw->mac.type == e1000_82541_rev_2)) 833 e1000_init_script_state_82541(hw, true); 834 835 /* Enable the TTL workaround for 82541/82547 */ 836 e1000_set_ttl_workaround_state_82541(hw, true); 837 838 #ifdef __sparc 839 Adapter->strip_crc = B_TRUE; 840 #else 841 Adapter->strip_crc = B_FALSE; 842 #endif 843 844 /* setup the maximum MTU size of the chip */ 845 e1000g_setup_max_mtu(Adapter); 846 847 /* Get speed/duplex settings in conf file */ 848 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; 849 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 850 e1000g_force_speed_duplex(Adapter); 851 852 /* Get Jumbo Frames settings in conf file */ 853 e1000g_get_max_frame_size(Adapter); 854 855 /* Get conf file properties */ 856 e1000g_get_conf(Adapter); 857 858 /* enforce PCH limits */ 859 e1000g_pch_limits(Adapter); 860 861 /* Set Rx/Tx buffer size */ 862 e1000g_set_bufsize(Adapter); 863 864 /* Master Latency Timer */ 865 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER; 866 867 /* copper options */ 868 if (hw->phy.media_type == e1000_media_type_copper) { 869 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 870 hw->phy.disable_polarity_correction = false; 871 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ 872 } 873 874 /* The initial link state should be "unknown" */ 875 Adapter->link_state = LINK_STATE_UNKNOWN; 876 877 /* Initialize rx parameters */ 878 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY; 879 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY; 880 881 /* Initialize tx parameters */ 882 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE; 883 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD; 884 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY; 885 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY; 886 887 /* Initialize rx parameters */ 888 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD; 889 890 return (DDI_SUCCESS); 891 } 892 893 static void 894 e1000g_setup_max_mtu(struct e1000g *Adapter) 895 { 896 struct e1000_mac_info *mac = &Adapter->shared.mac; 897 struct e1000_phy_info *phy = &Adapter->shared.phy; 898 899 switch (mac->type) { 900 /* types that do not support jumbo frames */ 901 case e1000_ich8lan: 902 case e1000_82573: 903 case e1000_82583: 904 Adapter->max_mtu = ETHERMTU; 905 break; 906 /* ich9 supports jumbo frames except on one phy type */ 907 case e1000_ich9lan: 908 if (phy->type == e1000_phy_ife) 909 Adapter->max_mtu = ETHERMTU; 910 else 911 Adapter->max_mtu = MAXIMUM_MTU_9K; 912 break; 913 /* pch can do jumbo frames up to 4K */ 914 case e1000_pchlan: 915 Adapter->max_mtu = MAXIMUM_MTU_4K; 916 break; 917 /* pch2 can do jumbo frames up to 9K */ 918 case e1000_pch2lan: 919 case e1000_pch_lpt: 920 case e1000_pch_spt: 921 case e1000_pch_cnp: 922 case e1000_pch_tgp: 923 case e1000_pch_adp: 924 case e1000_pch_mtp: 925 case e1000_pch_lnp: 926 case e1000_pch_rpl: 927 case e1000_pch_arl: 928 case e1000_pch_ptp: 929 case e1000_pch_nvl: 930 Adapter->max_mtu = MAXIMUM_MTU_9K; 931 break; 932 /* types with a special limit */ 933 case e1000_82571: 934 case e1000_82572: 935 case e1000_82574: 936 case e1000_80003es2lan: 937 case e1000_ich10lan: 938 if (e1000g_jumbo_mtu >= ETHERMTU && 939 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) { 940 Adapter->max_mtu = e1000g_jumbo_mtu; 941 } else { 942 Adapter->max_mtu = MAXIMUM_MTU_9K; 943 } 944 break; 945 /* default limit is 16K */ 946 default: 947 Adapter->max_mtu = FRAME_SIZE_UPTO_16K - 948 sizeof (struct ether_vlan_header) - ETHERFCSL; 949 break; 950 } 951 } 952 953 static void 954 e1000g_set_bufsize(struct e1000g *Adapter) 955 { 956 struct e1000_mac_info *mac = &Adapter->shared.mac; 957 uint64_t rx_size; 958 uint64_t tx_size; 959 960 dev_info_t *devinfo = Adapter->dip; 961 #ifdef __sparc 962 ulong_t iommu_pagesize; 963 #endif 964 /* Get the system page size */ 965 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1); 966 967 #ifdef __sparc 968 iommu_pagesize = dvma_pagesize(devinfo); 969 if (iommu_pagesize != 0) { 970 if (Adapter->sys_page_sz == iommu_pagesize) { 971 if (iommu_pagesize > 0x4000) 972 Adapter->sys_page_sz = 0x4000; 973 } else { 974 if (Adapter->sys_page_sz > iommu_pagesize) 975 Adapter->sys_page_sz = iommu_pagesize; 976 } 977 } 978 if (Adapter->lso_enable) { 979 Adapter->dvma_page_num = E1000_LSO_MAXLEN / 980 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 981 } else { 982 Adapter->dvma_page_num = Adapter->max_frame_size / 983 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 984 } 985 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM); 986 #endif 987 988 Adapter->min_frame_size = ETHERMIN + ETHERFCSL; 989 990 if (Adapter->mem_workaround_82546 && 991 ((mac->type == e1000_82545) || 992 (mac->type == e1000_82546) || 993 (mac->type == e1000_82546_rev_3))) { 994 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 995 } else { 996 rx_size = Adapter->max_frame_size; 997 if ((rx_size > FRAME_SIZE_UPTO_2K) && 998 (rx_size <= FRAME_SIZE_UPTO_4K)) 999 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K; 1000 else if ((rx_size > FRAME_SIZE_UPTO_4K) && 1001 (rx_size <= FRAME_SIZE_UPTO_8K)) 1002 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K; 1003 else if ((rx_size > FRAME_SIZE_UPTO_8K) && 1004 (rx_size <= FRAME_SIZE_UPTO_16K)) 1005 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K; 1006 else 1007 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 1008 } 1009 Adapter->rx_buffer_size += E1000G_IPALIGNROOM; 1010 1011 tx_size = Adapter->max_frame_size; 1012 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K)) 1013 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K; 1014 else if ((tx_size > FRAME_SIZE_UPTO_4K) && 1015 (tx_size <= FRAME_SIZE_UPTO_8K)) 1016 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K; 1017 else if ((tx_size > FRAME_SIZE_UPTO_8K) && 1018 (tx_size <= FRAME_SIZE_UPTO_16K)) 1019 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K; 1020 else 1021 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K; 1022 1023 /* 1024 * For Wiseman adapters we have an requirement of having receive 1025 * buffers aligned at 256 byte boundary. Since Livengood does not 1026 * require this and forcing it for all hardwares will have 1027 * performance implications, I am making it applicable only for 1028 * Wiseman and for Jumbo frames enabled mode as rest of the time, 1029 * it is okay to have normal frames...but it does involve a 1030 * potential risk where we may loose data if buffer is not 1031 * aligned...so all wiseman boards to have 256 byte aligned 1032 * buffers 1033 */ 1034 if (mac->type < e1000_82543) 1035 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE; 1036 else 1037 Adapter->rx_buf_align = 1; 1038 } 1039 1040 /* 1041 * e1000g_detach - driver detach 1042 * 1043 * The detach() function is the complement of the attach routine. 1044 * If cmd is set to DDI_DETACH, detach() is used to remove the 1045 * state associated with a given instance of a device node 1046 * prior to the removal of that instance from the system. 1047 * 1048 * The detach() function will be called once for each instance 1049 * of the device for which there has been a successful attach() 1050 * once there are no longer any opens on the device. 1051 * 1052 * Interrupts routine are disabled, All memory allocated by this 1053 * driver are freed. 1054 */ 1055 static int 1056 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1057 { 1058 struct e1000g *Adapter; 1059 boolean_t rx_drain; 1060 1061 switch (cmd) { 1062 default: 1063 return (DDI_FAILURE); 1064 1065 case DDI_SUSPEND: 1066 return (e1000g_suspend(devinfo)); 1067 1068 case DDI_DETACH: 1069 break; 1070 } 1071 1072 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1073 if (Adapter == NULL) 1074 return (DDI_FAILURE); 1075 1076 rx_drain = e1000g_rx_drain(Adapter); 1077 if (!rx_drain && !e1000g_force_detach) 1078 return (DDI_FAILURE); 1079 1080 if (mac_unregister(Adapter->mh) != 0) { 1081 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed"); 1082 return (DDI_FAILURE); 1083 } 1084 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC; 1085 1086 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED)); 1087 1088 if (!e1000g_force_detach && !rx_drain) 1089 return (DDI_FAILURE); 1090 1091 e1000g_unattach(devinfo, Adapter); 1092 1093 return (DDI_SUCCESS); 1094 } 1095 1096 /* 1097 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance 1098 */ 1099 void 1100 e1000g_free_priv_devi_node(private_devi_list_t *devi_node) 1101 { 1102 ASSERT(e1000g_private_devi_list != NULL); 1103 ASSERT(devi_node != NULL); 1104 1105 if (devi_node->prev != NULL) 1106 devi_node->prev->next = devi_node->next; 1107 if (devi_node->next != NULL) 1108 devi_node->next->prev = devi_node->prev; 1109 if (devi_node == e1000g_private_devi_list) 1110 e1000g_private_devi_list = devi_node->next; 1111 1112 kmem_free(devi_node->priv_dip, 1113 sizeof (struct dev_info)); 1114 kmem_free(devi_node, 1115 sizeof (private_devi_list_t)); 1116 } 1117 1118 static void 1119 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter) 1120 { 1121 private_devi_list_t *devi_node; 1122 int result; 1123 1124 if (Adapter->e1000g_blink != NULL) { 1125 ddi_periodic_delete(Adapter->e1000g_blink); 1126 Adapter->e1000g_blink = NULL; 1127 } 1128 1129 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1130 (void) e1000g_disable_intrs(Adapter); 1131 } 1132 1133 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) { 1134 (void) mac_unregister(Adapter->mh); 1135 } 1136 1137 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1138 (void) e1000g_rem_intrs(Adapter); 1139 } 1140 1141 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) { 1142 (void) ddi_prop_remove_all(devinfo); 1143 } 1144 1145 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) { 1146 kstat_delete((kstat_t *)Adapter->e1000g_ksp); 1147 } 1148 1149 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) { 1150 stop_link_timer(Adapter); 1151 1152 mutex_enter(&e1000g_nvm_lock); 1153 result = e1000_reset_hw(&Adapter->shared); 1154 mutex_exit(&e1000g_nvm_lock); 1155 1156 if (result != E1000_SUCCESS) { 1157 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1158 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1159 } 1160 } 1161 1162 e1000g_release_multicast(Adapter); 1163 1164 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 1165 if (Adapter->osdep.reg_handle != NULL) 1166 ddi_regs_map_free(&Adapter->osdep.reg_handle); 1167 if (Adapter->osdep.ich_flash_handle != NULL && 1168 Adapter->shared.mac.type < e1000_pch_spt) 1169 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle); 1170 if (Adapter->osdep.io_reg_handle != NULL) 1171 ddi_regs_map_free(&Adapter->osdep.io_reg_handle); 1172 } 1173 1174 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 1175 if (Adapter->osdep.cfg_handle != NULL) 1176 pci_config_teardown(&Adapter->osdep.cfg_handle); 1177 } 1178 1179 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) { 1180 e1000g_destroy_locks(Adapter); 1181 } 1182 1183 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) { 1184 e1000g_fm_fini(Adapter); 1185 } 1186 1187 mutex_enter(&e1000g_rx_detach_lock); 1188 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) { 1189 devi_node = Adapter->priv_devi_node; 1190 devi_node->flag |= E1000G_PRIV_DEVI_DETACH; 1191 1192 if (devi_node->pending_rx_count == 0) { 1193 e1000g_free_priv_devi_node(devi_node); 1194 } 1195 } 1196 mutex_exit(&e1000g_rx_detach_lock); 1197 1198 kmem_free((caddr_t)Adapter, sizeof (struct e1000g)); 1199 1200 /* 1201 * Another hotplug spec requirement, 1202 * run ddi_set_driver_private(devinfo, null); 1203 */ 1204 ddi_set_driver_private(devinfo, NULL); 1205 } 1206 1207 /* 1208 * Get the BAR type and rnumber for a given PCI BAR offset 1209 */ 1210 static int 1211 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info) 1212 { 1213 pci_regspec_t *regs; 1214 uint_t regs_length; 1215 int type, rnumber, rcount; 1216 1217 ASSERT((bar_offset >= PCI_CONF_BASE0) && 1218 (bar_offset <= PCI_CONF_BASE5)); 1219 1220 /* 1221 * Get the DDI "reg" property 1222 */ 1223 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 1224 DDI_PROP_DONTPASS, "reg", (int **)®s, 1225 ®s_length) != DDI_PROP_SUCCESS) { 1226 return (DDI_FAILURE); 1227 } 1228 1229 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t); 1230 /* 1231 * Check the BAR offset 1232 */ 1233 for (rnumber = 0; rnumber < rcount; ++rnumber) { 1234 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) { 1235 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK; 1236 break; 1237 } 1238 } 1239 1240 ddi_prop_free(regs); 1241 1242 if (rnumber >= rcount) 1243 return (DDI_FAILURE); 1244 1245 switch (type) { 1246 case PCI_ADDR_CONFIG: 1247 bar_info->type = E1000G_BAR_CONFIG; 1248 break; 1249 case PCI_ADDR_IO: 1250 bar_info->type = E1000G_BAR_IO; 1251 break; 1252 case PCI_ADDR_MEM32: 1253 bar_info->type = E1000G_BAR_MEM32; 1254 break; 1255 case PCI_ADDR_MEM64: 1256 bar_info->type = E1000G_BAR_MEM64; 1257 break; 1258 default: 1259 return (DDI_FAILURE); 1260 } 1261 bar_info->rnumber = rnumber; 1262 return (DDI_SUCCESS); 1263 } 1264 1265 static void 1266 e1000g_init_locks(struct e1000g *Adapter) 1267 { 1268 e1000g_tx_ring_t *tx_ring; 1269 e1000g_rx_ring_t *rx_ring; 1270 1271 rw_init(&Adapter->chip_lock, NULL, 1272 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1273 mutex_init(&Adapter->link_lock, NULL, 1274 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1275 mutex_init(&Adapter->watchdog_lock, NULL, 1276 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1277 1278 tx_ring = Adapter->tx_ring; 1279 1280 mutex_init(&tx_ring->tx_lock, NULL, 1281 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1282 mutex_init(&tx_ring->usedlist_lock, NULL, 1283 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1284 mutex_init(&tx_ring->freelist_lock, NULL, 1285 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1286 1287 rx_ring = Adapter->rx_ring; 1288 1289 mutex_init(&rx_ring->rx_lock, NULL, 1290 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1291 1292 mutex_init(&Adapter->e1000g_led_lock, NULL, 1293 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1294 } 1295 1296 static void 1297 e1000g_destroy_locks(struct e1000g *Adapter) 1298 { 1299 e1000g_tx_ring_t *tx_ring; 1300 e1000g_rx_ring_t *rx_ring; 1301 1302 mutex_destroy(&Adapter->e1000g_led_lock); 1303 1304 tx_ring = Adapter->tx_ring; 1305 mutex_destroy(&tx_ring->tx_lock); 1306 mutex_destroy(&tx_ring->usedlist_lock); 1307 mutex_destroy(&tx_ring->freelist_lock); 1308 1309 rx_ring = Adapter->rx_ring; 1310 mutex_destroy(&rx_ring->rx_lock); 1311 1312 mutex_destroy(&Adapter->link_lock); 1313 mutex_destroy(&Adapter->watchdog_lock); 1314 rw_destroy(&Adapter->chip_lock); 1315 1316 /* destory mutex initialized in shared code */ 1317 e1000_destroy_hw_mutex(&Adapter->shared); 1318 } 1319 1320 static int 1321 e1000g_resume(dev_info_t *devinfo) 1322 { 1323 struct e1000g *Adapter; 1324 1325 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1326 if (Adapter == NULL) 1327 e1000g_log(Adapter, CE_PANIC, 1328 "Instance pointer is null\n"); 1329 1330 if (Adapter->dip != devinfo) 1331 e1000g_log(Adapter, CE_PANIC, 1332 "Devinfo is not the same as saved devinfo\n"); 1333 1334 rw_enter(&Adapter->chip_lock, RW_WRITER); 1335 1336 if (Adapter->e1000g_state & E1000G_STARTED) { 1337 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 1338 rw_exit(&Adapter->chip_lock); 1339 /* 1340 * We note the failure, but return success, as the 1341 * system is still usable without this controller. 1342 */ 1343 e1000g_log(Adapter, CE_WARN, 1344 "e1000g_resume: failed to restart controller\n"); 1345 return (DDI_SUCCESS); 1346 } 1347 /* Enable and start the watchdog timer */ 1348 enable_watchdog_timer(Adapter); 1349 } 1350 1351 Adapter->e1000g_state &= ~E1000G_SUSPENDED; 1352 1353 rw_exit(&Adapter->chip_lock); 1354 1355 return (DDI_SUCCESS); 1356 } 1357 1358 static int 1359 e1000g_suspend(dev_info_t *devinfo) 1360 { 1361 struct e1000g *Adapter; 1362 1363 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1364 if (Adapter == NULL) 1365 return (DDI_FAILURE); 1366 1367 rw_enter(&Adapter->chip_lock, RW_WRITER); 1368 1369 Adapter->e1000g_state |= E1000G_SUSPENDED; 1370 1371 /* if the port isn't plumbed, we can simply return */ 1372 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 1373 rw_exit(&Adapter->chip_lock); 1374 return (DDI_SUCCESS); 1375 } 1376 1377 e1000g_stop(Adapter, B_FALSE); 1378 1379 rw_exit(&Adapter->chip_lock); 1380 1381 /* Disable and stop all the timers */ 1382 disable_watchdog_timer(Adapter); 1383 stop_link_timer(Adapter); 1384 stop_82547_timer(Adapter->tx_ring); 1385 1386 return (DDI_SUCCESS); 1387 } 1388 1389 static int 1390 e1000g_init(struct e1000g *Adapter) 1391 { 1392 uint32_t pba; 1393 uint32_t high_water; 1394 struct e1000_hw *hw; 1395 clock_t link_timeout; 1396 int result; 1397 1398 hw = &Adapter->shared; 1399 1400 /* 1401 * reset to put the hardware in a known state 1402 * before we try to do anything with the eeprom 1403 */ 1404 mutex_enter(&e1000g_nvm_lock); 1405 result = e1000_reset_hw(hw); 1406 mutex_exit(&e1000g_nvm_lock); 1407 1408 if (result != E1000_SUCCESS) { 1409 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1410 goto init_fail; 1411 } 1412 1413 mutex_enter(&e1000g_nvm_lock); 1414 result = e1000_validate_nvm_checksum(hw); 1415 if (result < E1000_SUCCESS) { 1416 /* 1417 * Some PCI-E parts fail the first check due to 1418 * the link being in sleep state. Call it again, 1419 * if it fails a second time its a real issue. 1420 */ 1421 result = e1000_validate_nvm_checksum(hw); 1422 } 1423 mutex_exit(&e1000g_nvm_lock); 1424 1425 if (result < E1000_SUCCESS) { 1426 e1000g_log(Adapter, CE_WARN, 1427 "Invalid NVM checksum. Please contact " 1428 "the vendor to update the NVM."); 1429 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1430 goto init_fail; 1431 } 1432 1433 result = 0; 1434 #ifdef __sparc 1435 /* 1436 * First, we try to get the local ethernet address from OBP. If 1437 * failed, then we get it from the EEPROM of NIC card. 1438 */ 1439 result = e1000g_find_mac_address(Adapter); 1440 #endif 1441 /* Get the local ethernet address. */ 1442 if (!result) { 1443 mutex_enter(&e1000g_nvm_lock); 1444 result = e1000_read_mac_addr(hw); 1445 mutex_exit(&e1000g_nvm_lock); 1446 } 1447 1448 if (result < E1000_SUCCESS) { 1449 e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); 1450 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1451 goto init_fail; 1452 } 1453 1454 /* check for valid mac address */ 1455 if (!is_valid_mac_addr(hw->mac.addr)) { 1456 e1000g_log(Adapter, CE_WARN, "Invalid mac addr"); 1457 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1458 goto init_fail; 1459 } 1460 1461 /* Set LAA state for 82571 chipset */ 1462 e1000_set_laa_state_82571(hw, true); 1463 1464 /* Master Latency Timer implementation */ 1465 if (Adapter->master_latency_timer) { 1466 pci_config_put8(Adapter->osdep.cfg_handle, 1467 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer); 1468 } 1469 1470 if (hw->mac.type < e1000_82547) { 1471 /* 1472 * Total FIFO is 64K 1473 */ 1474 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1475 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1476 else 1477 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1478 } else if ((hw->mac.type == e1000_82571) || 1479 (hw->mac.type == e1000_82572) || 1480 (hw->mac.type == e1000_80003es2lan)) { 1481 /* 1482 * Total FIFO is 48K 1483 */ 1484 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1485 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */ 1486 else 1487 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */ 1488 } else if (hw->mac.type == e1000_82573) { 1489 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */ 1490 } else if (hw->mac.type == e1000_82574) { 1491 /* Keep adapter default: 20K for Rx, 20K for Tx */ 1492 pba = E1000_READ_REG(hw, E1000_PBA); 1493 } else if (hw->mac.type == e1000_ich8lan) { 1494 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */ 1495 } else if (hw->mac.type == e1000_ich9lan) { 1496 pba = E1000_PBA_10K; 1497 } else if (hw->mac.type == e1000_ich10lan) { 1498 pba = E1000_PBA_10K; 1499 } else if (hw->mac.type == e1000_pchlan) { 1500 pba = E1000_PBA_26K; 1501 } else if (hw->mac.type == e1000_pch2lan) { 1502 pba = E1000_PBA_26K; 1503 } else if (hw->mac.type == e1000_pch_lpt) { 1504 pba = E1000_PBA_26K; 1505 } else if (hw->mac.type == e1000_pch_spt) { 1506 pba = E1000_PBA_26K; 1507 } else if (hw->mac.type == e1000_pch_cnp) { 1508 pba = E1000_PBA_26K; 1509 } else if (hw->mac.type == e1000_pch_tgp) { 1510 pba = E1000_PBA_26K; 1511 } else if (hw->mac.type == e1000_pch_adp) { 1512 pba = E1000_PBA_26K; 1513 } else if (hw->mac.type == e1000_pch_mtp) { 1514 pba = E1000_PBA_26K; 1515 } else if (hw->mac.type == e1000_pch_lnp) { 1516 pba = E1000_PBA_26K; 1517 } else if (hw->mac.type == e1000_pch_rpl) { 1518 pba = E1000_PBA_26K; 1519 } else if (hw->mac.type == e1000_pch_arl) { 1520 pba = E1000_PBA_26K; 1521 } else if (hw->mac.type == e1000_pch_ptp) { 1522 pba = E1000_PBA_26K; 1523 } else if (hw->mac.type == e1000_pch_nvl) { 1524 pba = E1000_PBA_26K; 1525 } else { 1526 /* 1527 * Total FIFO is 40K 1528 */ 1529 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1530 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1531 else 1532 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1533 } 1534 E1000_WRITE_REG(hw, E1000_PBA, pba); 1535 1536 /* 1537 * These parameters set thresholds for the adapter's generation(Tx) 1538 * and response(Rx) to Ethernet PAUSE frames. These are just threshold 1539 * settings. Flow control is enabled or disabled in the configuration 1540 * file. 1541 * High-water mark is set down from the top of the rx fifo (not 1542 * sensitive to max_frame_size) and low-water is set just below 1543 * high-water mark. 1544 * The high water mark must be low enough to fit one full frame above 1545 * it in the rx FIFO. Should be the lower of: 1546 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early 1547 * receive size (assuming ERT set to E1000_ERT_2048), or the full 1548 * Rx FIFO size minus one full frame. 1549 */ 1550 high_water = min(((pba << 10) * 9 / 10), 1551 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 || 1552 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ? 1553 ((pba << 10) - (E1000_ERT_2048 << 3)) : 1554 ((pba << 10) - Adapter->max_frame_size))); 1555 1556 hw->fc.high_water = high_water & 0xFFF8; 1557 hw->fc.low_water = hw->fc.high_water - 8; 1558 1559 if (hw->mac.type == e1000_80003es2lan) 1560 hw->fc.pause_time = 0xFFFF; 1561 else 1562 hw->fc.pause_time = E1000_FC_PAUSE_TIME; 1563 hw->fc.send_xon = true; 1564 1565 /* 1566 * Reset the adapter hardware the second time. 1567 */ 1568 mutex_enter(&e1000g_nvm_lock); 1569 result = e1000_reset_hw(hw); 1570 mutex_exit(&e1000g_nvm_lock); 1571 1572 if (result != E1000_SUCCESS) { 1573 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1574 goto init_fail; 1575 } 1576 1577 /* disable wakeup control by default */ 1578 if (hw->mac.type >= e1000_82544) 1579 E1000_WRITE_REG(hw, E1000_WUC, 0); 1580 1581 /* 1582 * MWI should be disabled on 82546. 1583 */ 1584 if (hw->mac.type == e1000_82546) 1585 e1000_pci_clear_mwi(hw); 1586 else 1587 e1000_pci_set_mwi(hw); 1588 1589 /* 1590 * Configure/Initialize hardware 1591 */ 1592 mutex_enter(&e1000g_nvm_lock); 1593 result = e1000_init_hw(hw); 1594 mutex_exit(&e1000g_nvm_lock); 1595 1596 if (result < E1000_SUCCESS) { 1597 e1000g_log(Adapter, CE_WARN, "Initialize hw failed"); 1598 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1599 goto init_fail; 1600 } 1601 1602 /* 1603 * Restore LED settings to the default from EEPROM 1604 * to meet the standard for Sun platforms. 1605 */ 1606 (void) e1000_cleanup_led(hw); 1607 1608 /* Disable Smart Power Down */ 1609 phy_spd_state(hw, B_FALSE); 1610 1611 /* Make sure driver has control */ 1612 e1000g_get_driver_control(hw); 1613 1614 /* 1615 * Initialize unicast addresses. 1616 */ 1617 e1000g_init_unicst(Adapter); 1618 1619 /* 1620 * Setup and initialize the mctable structures. After this routine 1621 * completes Multicast table will be set 1622 */ 1623 e1000_update_mc_addr_list(hw, 1624 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 1625 msec_delay(5); 1626 1627 /* 1628 * Implement Adaptive IFS 1629 */ 1630 e1000_reset_adaptive(hw); 1631 1632 /* Setup Interrupt Throttling Register */ 1633 if (hw->mac.type >= e1000_82540) { 1634 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate); 1635 } else 1636 Adapter->intr_adaptive = B_FALSE; 1637 1638 /* Start the timer for link setup */ 1639 if (hw->mac.autoneg) 1640 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000); 1641 else 1642 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); 1643 1644 mutex_enter(&Adapter->link_lock); 1645 if (hw->phy.autoneg_wait_to_complete) { 1646 Adapter->link_complete = B_TRUE; 1647 } else { 1648 Adapter->link_complete = B_FALSE; 1649 Adapter->link_tid = timeout(e1000g_link_timer, 1650 (void *)Adapter, link_timeout); 1651 } 1652 mutex_exit(&Adapter->link_lock); 1653 1654 /* Save the state of the phy */ 1655 e1000g_get_phy_state(Adapter); 1656 1657 e1000g_param_sync(Adapter); 1658 1659 Adapter->init_count++; 1660 1661 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 1662 goto init_fail; 1663 } 1664 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1665 goto init_fail; 1666 } 1667 1668 Adapter->poll_mode = e1000g_poll_mode; 1669 1670 return (DDI_SUCCESS); 1671 1672 init_fail: 1673 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1674 return (DDI_FAILURE); 1675 } 1676 1677 static int 1678 e1000g_alloc_rx_data(struct e1000g *Adapter) 1679 { 1680 e1000g_rx_ring_t *rx_ring; 1681 e1000g_rx_data_t *rx_data; 1682 1683 rx_ring = Adapter->rx_ring; 1684 1685 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP); 1686 1687 if (rx_data == NULL) 1688 return (DDI_FAILURE); 1689 1690 rx_data->priv_devi_node = Adapter->priv_devi_node; 1691 rx_data->rx_ring = rx_ring; 1692 1693 mutex_init(&rx_data->freelist_lock, NULL, 1694 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1695 mutex_init(&rx_data->recycle_lock, NULL, 1696 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1697 1698 rx_ring->rx_data = rx_data; 1699 1700 return (DDI_SUCCESS); 1701 } 1702 1703 void 1704 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data) 1705 { 1706 rx_sw_packet_t *packet, *next_packet; 1707 1708 if (rx_data == NULL) 1709 return; 1710 1711 packet = rx_data->packet_area; 1712 while (packet != NULL) { 1713 next_packet = packet->next; 1714 e1000g_free_rx_sw_packet(packet, B_TRUE); 1715 packet = next_packet; 1716 } 1717 rx_data->packet_area = NULL; 1718 } 1719 1720 void 1721 e1000g_free_rx_data(e1000g_rx_data_t *rx_data) 1722 { 1723 if (rx_data == NULL) 1724 return; 1725 1726 mutex_destroy(&rx_data->freelist_lock); 1727 mutex_destroy(&rx_data->recycle_lock); 1728 1729 kmem_free(rx_data, sizeof (e1000g_rx_data_t)); 1730 } 1731 1732 /* 1733 * Check if the link is up 1734 */ 1735 static boolean_t 1736 e1000g_link_up(struct e1000g *Adapter) 1737 { 1738 struct e1000_hw *hw = &Adapter->shared; 1739 boolean_t link_up = B_FALSE; 1740 1741 /* 1742 * get_link_status is set in the interrupt handler on link-status-change 1743 * or rx sequence error interrupt. get_link_status will stay 1744 * false until the e1000_check_for_link establishes link only 1745 * for copper adapters. 1746 */ 1747 switch (hw->phy.media_type) { 1748 case e1000_media_type_copper: 1749 if (hw->mac.get_link_status) { 1750 /* 1751 * SPT and newer devices need a bit of extra time before 1752 * we ask them. 1753 */ 1754 if (hw->mac.type >= e1000_pch_spt) 1755 msec_delay(50); 1756 (void) e1000_check_for_link(hw); 1757 if ((E1000_READ_REG(hw, E1000_STATUS) & 1758 E1000_STATUS_LU)) { 1759 link_up = B_TRUE; 1760 } else { 1761 link_up = !hw->mac.get_link_status; 1762 } 1763 } else { 1764 link_up = B_TRUE; 1765 } 1766 break; 1767 case e1000_media_type_fiber: 1768 (void) e1000_check_for_link(hw); 1769 link_up = (E1000_READ_REG(hw, E1000_STATUS) & 1770 E1000_STATUS_LU); 1771 break; 1772 case e1000_media_type_internal_serdes: 1773 (void) e1000_check_for_link(hw); 1774 link_up = hw->mac.serdes_has_link; 1775 break; 1776 } 1777 1778 return (link_up); 1779 } 1780 1781 static void 1782 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 1783 { 1784 struct iocblk *iocp; 1785 struct e1000g *e1000gp; 1786 enum ioc_reply status; 1787 1788 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; 1789 iocp->ioc_error = 0; 1790 e1000gp = (struct e1000g *)arg; 1791 1792 ASSERT(e1000gp); 1793 if (e1000gp == NULL) { 1794 miocnak(q, mp, 0, EINVAL); 1795 return; 1796 } 1797 1798 rw_enter(&e1000gp->chip_lock, RW_READER); 1799 if (e1000gp->e1000g_state & E1000G_SUSPENDED) { 1800 rw_exit(&e1000gp->chip_lock); 1801 miocnak(q, mp, 0, EINVAL); 1802 return; 1803 } 1804 rw_exit(&e1000gp->chip_lock); 1805 1806 switch (iocp->ioc_cmd) { 1807 1808 case LB_GET_INFO_SIZE: 1809 case LB_GET_INFO: 1810 case LB_GET_MODE: 1811 case LB_SET_MODE: 1812 status = e1000g_loopback_ioctl(e1000gp, iocp, mp); 1813 break; 1814 1815 1816 #ifdef E1000G_DEBUG 1817 case E1000G_IOC_REG_PEEK: 1818 case E1000G_IOC_REG_POKE: 1819 status = e1000g_pp_ioctl(e1000gp, iocp, mp); 1820 break; 1821 case E1000G_IOC_CHIP_RESET: 1822 e1000gp->reset_count++; 1823 if (e1000g_reset_adapter(e1000gp)) 1824 status = IOC_ACK; 1825 else 1826 status = IOC_INVAL; 1827 break; 1828 #endif 1829 default: 1830 status = IOC_INVAL; 1831 break; 1832 } 1833 1834 /* 1835 * Decide how to reply 1836 */ 1837 switch (status) { 1838 default: 1839 case IOC_INVAL: 1840 /* 1841 * Error, reply with a NAK and EINVAL or the specified error 1842 */ 1843 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 1844 EINVAL : iocp->ioc_error); 1845 break; 1846 1847 case IOC_DONE: 1848 /* 1849 * OK, reply already sent 1850 */ 1851 break; 1852 1853 case IOC_ACK: 1854 /* 1855 * OK, reply with an ACK 1856 */ 1857 miocack(q, mp, 0, 0); 1858 break; 1859 1860 case IOC_REPLY: 1861 /* 1862 * OK, send prepared reply as ACK or NAK 1863 */ 1864 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1865 M_IOCACK : M_IOCNAK; 1866 qreply(q, mp); 1867 break; 1868 } 1869 } 1870 1871 /* 1872 * The default value of e1000g_poll_mode == 0 assumes that the NIC is 1873 * capable of supporting only one interrupt and we shouldn't disable 1874 * the physical interrupt. In this case we let the interrupt come and 1875 * we queue the packets in the rx ring itself in case we are in polling 1876 * mode (better latency but slightly lower performance and a very 1877 * high intrrupt count in mpstat which is harmless). 1878 * 1879 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt 1880 * which can be disabled in poll mode. This gives better overall 1881 * throughput (compared to the mode above), shows very low interrupt 1882 * count but has slightly higher latency since we pick the packets when 1883 * the poll thread does polling. 1884 * 1885 * Currently, this flag should be enabled only while doing performance 1886 * measurement or when it can be guaranteed that entire NIC going 1887 * in poll mode will not harm any traffic like cluster heartbeat etc. 1888 */ 1889 int e1000g_poll_mode = 0; 1890 1891 /* 1892 * Called from the upper layers when driver is in polling mode to 1893 * pick up any queued packets. Care should be taken to not block 1894 * this thread. 1895 */ 1896 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup) 1897 { 1898 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg; 1899 mblk_t *mp = NULL; 1900 mblk_t *tail; 1901 struct e1000g *adapter; 1902 1903 adapter = rx_ring->adapter; 1904 1905 rw_enter(&adapter->chip_lock, RW_READER); 1906 1907 if (adapter->e1000g_state & E1000G_SUSPENDED) { 1908 rw_exit(&adapter->chip_lock); 1909 return (NULL); 1910 } 1911 1912 mutex_enter(&rx_ring->rx_lock); 1913 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup); 1914 mutex_exit(&rx_ring->rx_lock); 1915 rw_exit(&adapter->chip_lock); 1916 return (mp); 1917 } 1918 1919 static int 1920 e1000g_m_start(void *arg) 1921 { 1922 struct e1000g *Adapter = (struct e1000g *)arg; 1923 1924 rw_enter(&Adapter->chip_lock, RW_WRITER); 1925 1926 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 1927 rw_exit(&Adapter->chip_lock); 1928 return (ECANCELED); 1929 } 1930 1931 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 1932 rw_exit(&Adapter->chip_lock); 1933 return (ENOTACTIVE); 1934 } 1935 1936 Adapter->e1000g_state |= E1000G_STARTED; 1937 1938 rw_exit(&Adapter->chip_lock); 1939 1940 /* Enable and start the watchdog timer */ 1941 enable_watchdog_timer(Adapter); 1942 1943 return (0); 1944 } 1945 1946 static int 1947 e1000g_start(struct e1000g *Adapter, boolean_t global) 1948 { 1949 e1000g_rx_data_t *rx_data; 1950 1951 if (global) { 1952 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) { 1953 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed"); 1954 goto start_fail; 1955 } 1956 1957 /* Allocate dma resources for descriptors and buffers */ 1958 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) { 1959 e1000g_log(Adapter, CE_WARN, 1960 "Alloc DMA resources failed"); 1961 goto start_fail; 1962 } 1963 Adapter->rx_buffer_setup = B_FALSE; 1964 } 1965 1966 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) { 1967 if (e1000g_init(Adapter) != DDI_SUCCESS) { 1968 e1000g_log(Adapter, CE_WARN, 1969 "Adapter initialization failed"); 1970 goto start_fail; 1971 } 1972 } 1973 1974 /* Setup and initialize the transmit structures */ 1975 e1000g_tx_setup(Adapter); 1976 msec_delay(5); 1977 1978 /* Setup and initialize the receive structures */ 1979 e1000g_rx_setup(Adapter); 1980 msec_delay(5); 1981 1982 /* Restore the e1000g promiscuous mode */ 1983 e1000g_restore_promisc(Adapter); 1984 1985 e1000g_mask_interrupt(Adapter); 1986 1987 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 1988 1989 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1990 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1991 goto start_fail; 1992 } 1993 1994 return (DDI_SUCCESS); 1995 1996 start_fail: 1997 rx_data = Adapter->rx_ring->rx_data; 1998 1999 if (global) { 2000 e1000g_release_dma_resources(Adapter); 2001 e1000g_free_rx_pending_buffers(rx_data); 2002 e1000g_free_rx_data(rx_data); 2003 } 2004 2005 mutex_enter(&e1000g_nvm_lock); 2006 (void) e1000_reset_hw(&Adapter->shared); 2007 mutex_exit(&e1000g_nvm_lock); 2008 2009 return (DDI_FAILURE); 2010 } 2011 2012 /* 2013 * The I219 has the curious property that if the descriptor rings are not 2014 * emptied before resetting the hardware or before changing the device state 2015 * based on runtime power management, it'll cause the card to hang. This can 2016 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we 2017 * have to flush the rings if we're in this state. 2018 */ 2019 static void 2020 e1000g_flush_desc_rings(struct e1000g *Adapter) 2021 { 2022 struct e1000_hw *hw = &Adapter->shared; 2023 u16 hang_state; 2024 u32 fext_nvm11, tdlen; 2025 2026 /* First, disable MULR fix in FEXTNVM11 */ 2027 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 2028 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 2029 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 2030 2031 /* do nothing if we're not in faulty state, or if the queue is empty */ 2032 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); 2033 hang_state = pci_config_get16(Adapter->osdep.cfg_handle, 2034 PCICFG_DESC_RING_STATUS); 2035 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) 2036 return; 2037 e1000g_flush_tx_ring(Adapter); 2038 2039 /* recheck, maybe the fault is caused by the rx ring */ 2040 hang_state = pci_config_get16(Adapter->osdep.cfg_handle, 2041 PCICFG_DESC_RING_STATUS); 2042 if (hang_state & FLUSH_DESC_REQUIRED) 2043 e1000g_flush_rx_ring(Adapter); 2044 2045 } 2046 2047 static void 2048 e1000g_m_stop(void *arg) 2049 { 2050 struct e1000g *Adapter = (struct e1000g *)arg; 2051 2052 /* Drain tx sessions */ 2053 (void) e1000g_tx_drain(Adapter); 2054 2055 rw_enter(&Adapter->chip_lock, RW_WRITER); 2056 2057 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2058 rw_exit(&Adapter->chip_lock); 2059 return; 2060 } 2061 Adapter->e1000g_state &= ~E1000G_STARTED; 2062 e1000g_stop(Adapter, B_TRUE); 2063 2064 rw_exit(&Adapter->chip_lock); 2065 2066 /* Disable and stop all the timers */ 2067 disable_watchdog_timer(Adapter); 2068 stop_link_timer(Adapter); 2069 stop_82547_timer(Adapter->tx_ring); 2070 } 2071 2072 static void 2073 e1000g_stop(struct e1000g *Adapter, boolean_t global) 2074 { 2075 private_devi_list_t *devi_node; 2076 e1000g_rx_data_t *rx_data; 2077 int result; 2078 2079 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT; 2080 2081 /* Stop the chip and release pending resources */ 2082 2083 /* Tell firmware driver is no longer in control */ 2084 e1000g_release_driver_control(&Adapter->shared); 2085 2086 e1000g_clear_all_interrupts(Adapter); 2087 2088 mutex_enter(&e1000g_nvm_lock); 2089 result = e1000_reset_hw(&Adapter->shared); 2090 mutex_exit(&e1000g_nvm_lock); 2091 2092 if (result != E1000_SUCCESS) { 2093 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 2094 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 2095 } 2096 2097 mutex_enter(&Adapter->link_lock); 2098 Adapter->link_complete = B_FALSE; 2099 mutex_exit(&Adapter->link_lock); 2100 2101 /* Release resources still held by the TX descriptors */ 2102 e1000g_tx_clean(Adapter); 2103 2104 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2105 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 2106 2107 /* Clean the pending rx jumbo packet fragment */ 2108 e1000g_rx_clean(Adapter); 2109 2110 /* 2111 * The I219, eg. the pch_spt, has bugs such that we must ensure that 2112 * rings are flushed before we do anything else. This must be done 2113 * before we release DMA resources. 2114 */ 2115 if (Adapter->shared.mac.type >= e1000_pch_spt) 2116 e1000g_flush_desc_rings(Adapter); 2117 2118 if (global) { 2119 e1000g_release_dma_resources(Adapter); 2120 2121 mutex_enter(&e1000g_rx_detach_lock); 2122 rx_data = Adapter->rx_ring->rx_data; 2123 rx_data->flag |= E1000G_RX_STOPPED; 2124 2125 if (rx_data->pending_count == 0) { 2126 e1000g_free_rx_pending_buffers(rx_data); 2127 e1000g_free_rx_data(rx_data); 2128 } else { 2129 devi_node = rx_data->priv_devi_node; 2130 if (devi_node != NULL) 2131 atomic_inc_32(&devi_node->pending_rx_count); 2132 else 2133 atomic_inc_32(&Adapter->pending_rx_count); 2134 } 2135 mutex_exit(&e1000g_rx_detach_lock); 2136 } 2137 2138 if (Adapter->link_state != LINK_STATE_UNKNOWN) { 2139 Adapter->link_state = LINK_STATE_UNKNOWN; 2140 if (!Adapter->reset_flag) 2141 mac_link_update(Adapter->mh, Adapter->link_state); 2142 } 2143 } 2144 2145 static void 2146 e1000g_rx_clean(struct e1000g *Adapter) 2147 { 2148 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data; 2149 2150 if (rx_data == NULL) 2151 return; 2152 2153 if (rx_data->rx_mblk != NULL) { 2154 freemsg(rx_data->rx_mblk); 2155 rx_data->rx_mblk = NULL; 2156 rx_data->rx_mblk_tail = NULL; 2157 rx_data->rx_mblk_len = 0; 2158 } 2159 } 2160 2161 static void 2162 e1000g_tx_clean(struct e1000g *Adapter) 2163 { 2164 e1000g_tx_ring_t *tx_ring; 2165 p_tx_sw_packet_t packet; 2166 mblk_t *mp; 2167 mblk_t *nmp; 2168 uint32_t packet_count; 2169 2170 tx_ring = Adapter->tx_ring; 2171 2172 /* 2173 * Here we don't need to protect the lists using 2174 * the usedlist_lock and freelist_lock, for they 2175 * have been protected by the chip_lock. 2176 */ 2177 mp = NULL; 2178 nmp = NULL; 2179 packet_count = 0; 2180 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list); 2181 while (packet != NULL) { 2182 if (packet->mp != NULL) { 2183 /* Assemble the message chain */ 2184 if (mp == NULL) { 2185 mp = packet->mp; 2186 nmp = packet->mp; 2187 } else { 2188 nmp->b_next = packet->mp; 2189 nmp = packet->mp; 2190 } 2191 /* Disconnect the message from the sw packet */ 2192 packet->mp = NULL; 2193 } 2194 2195 e1000g_free_tx_swpkt(packet); 2196 packet_count++; 2197 2198 packet = (p_tx_sw_packet_t) 2199 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link); 2200 } 2201 2202 if (mp != NULL) 2203 freemsgchain(mp); 2204 2205 if (packet_count > 0) { 2206 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list); 2207 QUEUE_INIT_LIST(&tx_ring->used_list); 2208 2209 /* Setup TX descriptor pointers */ 2210 tx_ring->tbd_next = tx_ring->tbd_first; 2211 tx_ring->tbd_oldest = tx_ring->tbd_first; 2212 2213 /* Setup our HW Tx Head & Tail descriptor pointers */ 2214 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 2215 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 2216 } 2217 } 2218 2219 static boolean_t 2220 e1000g_tx_drain(struct e1000g *Adapter) 2221 { 2222 int i; 2223 boolean_t done; 2224 e1000g_tx_ring_t *tx_ring; 2225 2226 tx_ring = Adapter->tx_ring; 2227 2228 /* Allow up to 'wsdraintime' for pending xmit's to complete. */ 2229 for (i = 0; i < TX_DRAIN_TIME; i++) { 2230 mutex_enter(&tx_ring->usedlist_lock); 2231 done = IS_QUEUE_EMPTY(&tx_ring->used_list); 2232 mutex_exit(&tx_ring->usedlist_lock); 2233 2234 if (done) 2235 break; 2236 2237 msec_delay(1); 2238 } 2239 2240 return (done); 2241 } 2242 2243 static boolean_t 2244 e1000g_rx_drain(struct e1000g *Adapter) 2245 { 2246 int i; 2247 boolean_t done; 2248 2249 /* 2250 * Allow up to RX_DRAIN_TIME for pending received packets to complete. 2251 */ 2252 for (i = 0; i < RX_DRAIN_TIME; i++) { 2253 done = (Adapter->pending_rx_count == 0); 2254 2255 if (done) 2256 break; 2257 2258 msec_delay(1); 2259 } 2260 2261 return (done); 2262 } 2263 2264 static boolean_t 2265 e1000g_reset_adapter(struct e1000g *Adapter) 2266 { 2267 /* Disable and stop all the timers */ 2268 disable_watchdog_timer(Adapter); 2269 stop_link_timer(Adapter); 2270 stop_82547_timer(Adapter->tx_ring); 2271 2272 rw_enter(&Adapter->chip_lock, RW_WRITER); 2273 2274 if (Adapter->stall_flag) { 2275 Adapter->stall_flag = B_FALSE; 2276 Adapter->reset_flag = B_TRUE; 2277 } 2278 2279 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2280 rw_exit(&Adapter->chip_lock); 2281 return (B_TRUE); 2282 } 2283 2284 e1000g_stop(Adapter, B_FALSE); 2285 2286 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 2287 rw_exit(&Adapter->chip_lock); 2288 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2289 return (B_FALSE); 2290 } 2291 2292 rw_exit(&Adapter->chip_lock); 2293 2294 /* Enable and start the watchdog timer */ 2295 enable_watchdog_timer(Adapter); 2296 2297 return (B_TRUE); 2298 } 2299 2300 boolean_t 2301 e1000g_global_reset(struct e1000g *Adapter) 2302 { 2303 /* Disable and stop all the timers */ 2304 disable_watchdog_timer(Adapter); 2305 stop_link_timer(Adapter); 2306 stop_82547_timer(Adapter->tx_ring); 2307 2308 rw_enter(&Adapter->chip_lock, RW_WRITER); 2309 2310 e1000g_stop(Adapter, B_TRUE); 2311 2312 Adapter->init_count = 0; 2313 2314 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 2315 rw_exit(&Adapter->chip_lock); 2316 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2317 return (B_FALSE); 2318 } 2319 2320 rw_exit(&Adapter->chip_lock); 2321 2322 /* Enable and start the watchdog timer */ 2323 enable_watchdog_timer(Adapter); 2324 2325 return (B_TRUE); 2326 } 2327 2328 /* 2329 * e1000g_intr_pciexpress - ISR for PCI Express chipsets 2330 * 2331 * This interrupt service routine is for PCI-Express adapters. 2332 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED 2333 * bit is set. 2334 */ 2335 static uint_t 2336 e1000g_intr_pciexpress(caddr_t arg, caddr_t arg1 __unused) 2337 { 2338 struct e1000g *Adapter; 2339 uint32_t icr; 2340 2341 Adapter = (struct e1000g *)(uintptr_t)arg; 2342 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2343 2344 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2345 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2346 return (DDI_INTR_CLAIMED); 2347 } 2348 2349 if (icr & E1000_ICR_INT_ASSERTED) { 2350 /* 2351 * E1000_ICR_INT_ASSERTED bit was set: 2352 * Read(Clear) the ICR, claim this interrupt, 2353 * look for work to do. 2354 */ 2355 e1000g_intr_work(Adapter, icr); 2356 return (DDI_INTR_CLAIMED); 2357 } else { 2358 /* 2359 * E1000_ICR_INT_ASSERTED bit was not set: 2360 * Don't claim this interrupt, return immediately. 2361 */ 2362 return (DDI_INTR_UNCLAIMED); 2363 } 2364 } 2365 2366 /* 2367 * e1000g_intr - ISR for PCI/PCI-X chipsets 2368 * 2369 * This interrupt service routine is for PCI/PCI-X adapters. 2370 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED 2371 * bit is set or not. 2372 */ 2373 static uint_t 2374 e1000g_intr(caddr_t arg, caddr_t arg1 __unused) 2375 { 2376 struct e1000g *Adapter; 2377 uint32_t icr; 2378 2379 Adapter = (struct e1000g *)(uintptr_t)arg; 2380 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2381 2382 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2383 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2384 return (DDI_INTR_CLAIMED); 2385 } 2386 2387 if (icr) { 2388 /* 2389 * Any bit was set in ICR: 2390 * Read(Clear) the ICR, claim this interrupt, 2391 * look for work to do. 2392 */ 2393 e1000g_intr_work(Adapter, icr); 2394 return (DDI_INTR_CLAIMED); 2395 } else { 2396 /* 2397 * No bit was set in ICR: 2398 * Don't claim this interrupt, return immediately. 2399 */ 2400 return (DDI_INTR_UNCLAIMED); 2401 } 2402 } 2403 2404 /* 2405 * e1000g_intr_work - actual processing of ISR 2406 * 2407 * Read(clear) the ICR contents and call appropriate interrupt 2408 * processing routines. 2409 */ 2410 static void 2411 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr) 2412 { 2413 struct e1000_hw *hw; 2414 hw = &Adapter->shared; 2415 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 2416 2417 Adapter->rx_pkt_cnt = 0; 2418 Adapter->tx_pkt_cnt = 0; 2419 2420 rw_enter(&Adapter->chip_lock, RW_READER); 2421 2422 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2423 rw_exit(&Adapter->chip_lock); 2424 return; 2425 } 2426 /* 2427 * Here we need to check the "e1000g_state" flag within the chip_lock to 2428 * ensure the receive routine will not execute when the adapter is 2429 * being reset. 2430 */ 2431 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2432 rw_exit(&Adapter->chip_lock); 2433 return; 2434 } 2435 2436 if (icr & E1000_ICR_RXT0) { 2437 mblk_t *mp = NULL; 2438 mblk_t *tail = NULL; 2439 e1000g_rx_ring_t *rx_ring; 2440 2441 rx_ring = Adapter->rx_ring; 2442 mutex_enter(&rx_ring->rx_lock); 2443 /* 2444 * Sometimes with legacy interrupts, it possible that 2445 * there is a single interrupt for Rx/Tx. In which 2446 * case, if poll flag is set, we shouldn't really 2447 * be doing Rx processing. 2448 */ 2449 if (!rx_ring->poll_flag) 2450 mp = e1000g_receive(rx_ring, &tail, 2451 E1000G_CHAIN_NO_LIMIT); 2452 mutex_exit(&rx_ring->rx_lock); 2453 rw_exit(&Adapter->chip_lock); 2454 if (mp != NULL) 2455 mac_rx_ring(Adapter->mh, rx_ring->mrh, 2456 mp, rx_ring->ring_gen_num); 2457 } else 2458 rw_exit(&Adapter->chip_lock); 2459 2460 if (icr & E1000_ICR_TXDW) { 2461 if (!Adapter->tx_intr_enable) 2462 e1000g_clear_tx_interrupt(Adapter); 2463 2464 /* Recycle the tx descriptors */ 2465 rw_enter(&Adapter->chip_lock, RW_READER); 2466 (void) e1000g_recycle(tx_ring); 2467 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr); 2468 rw_exit(&Adapter->chip_lock); 2469 2470 if (tx_ring->resched_needed && 2471 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) { 2472 tx_ring->resched_needed = B_FALSE; 2473 mac_tx_update(Adapter->mh); 2474 E1000G_STAT(tx_ring->stat_reschedule); 2475 } 2476 } 2477 2478 /* 2479 * The Receive Sequence errors RXSEQ and the link status change LSC 2480 * are checked to detect that the cable has been pulled out. For 2481 * the Wiseman 2.0 silicon, the receive sequence errors interrupt 2482 * are an indication that cable is not connected. 2483 */ 2484 if ((icr & E1000_ICR_RXSEQ) || 2485 (icr & E1000_ICR_LSC) || 2486 (icr & E1000_ICR_GPI_EN1)) { 2487 boolean_t link_changed; 2488 timeout_id_t tid = 0; 2489 2490 stop_watchdog_timer(Adapter); 2491 2492 rw_enter(&Adapter->chip_lock, RW_WRITER); 2493 2494 /* 2495 * Because we got a link-status-change interrupt, force 2496 * e1000_check_for_link() to look at phy 2497 */ 2498 Adapter->shared.mac.get_link_status = B_TRUE; 2499 2500 /* e1000g_link_check takes care of link status change */ 2501 link_changed = e1000g_link_check(Adapter); 2502 2503 /* Get new phy state */ 2504 e1000g_get_phy_state(Adapter); 2505 2506 /* 2507 * If the link timer has not timed out, we'll not notify 2508 * the upper layer with any link state until the link is up. 2509 */ 2510 if (link_changed && !Adapter->link_complete) { 2511 if (Adapter->link_state == LINK_STATE_UP) { 2512 mutex_enter(&Adapter->link_lock); 2513 Adapter->link_complete = B_TRUE; 2514 tid = Adapter->link_tid; 2515 Adapter->link_tid = 0; 2516 mutex_exit(&Adapter->link_lock); 2517 } else { 2518 link_changed = B_FALSE; 2519 } 2520 } 2521 rw_exit(&Adapter->chip_lock); 2522 2523 if (link_changed) { 2524 if (tid != 0) 2525 (void) untimeout(tid); 2526 2527 /* 2528 * Workaround for esb2. Data stuck in fifo on a link 2529 * down event. Stop receiver here and reset in watchdog. 2530 */ 2531 if ((Adapter->link_state == LINK_STATE_DOWN) && 2532 (Adapter->shared.mac.type == e1000_80003es2lan)) { 2533 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 2534 E1000_WRITE_REG(hw, E1000_RCTL, 2535 rctl & ~E1000_RCTL_EN); 2536 e1000g_log(Adapter, CE_WARN, 2537 "ESB2 receiver disabled"); 2538 Adapter->esb2_workaround = B_TRUE; 2539 } 2540 if (!Adapter->reset_flag) 2541 mac_link_update(Adapter->mh, 2542 Adapter->link_state); 2543 if (Adapter->link_state == LINK_STATE_UP) 2544 Adapter->reset_flag = B_FALSE; 2545 } 2546 2547 start_watchdog_timer(Adapter); 2548 } 2549 } 2550 2551 static void 2552 e1000g_init_unicst(struct e1000g *Adapter) 2553 { 2554 struct e1000_hw *hw; 2555 int slot; 2556 2557 hw = &Adapter->shared; 2558 2559 if (Adapter->init_count == 0) { 2560 /* Initialize the multiple unicast addresses */ 2561 Adapter->unicst_total = min(hw->mac.rar_entry_count, 2562 MAX_NUM_UNICAST_ADDRESSES); 2563 2564 /* 2565 * The common code does not correctly calculate the number of 2566 * rar's that could be reserved by firmware for the pch_lpt and 2567 * pch_spt macs. The interface has one primary rar, and 11 2568 * additional ones. Those 11 additional ones are not always 2569 * available. According to the datasheet, we need to check a 2570 * few of the bits set in the FWSM register. If the value is 2571 * zero, everything is available. If the value is 1, none of the 2572 * additional registers are available. If the value is 2-7, only 2573 * that number are available. 2574 */ 2575 if (hw->mac.type >= e1000_pch_lpt) { 2576 uint32_t locked, rar; 2577 2578 locked = E1000_READ_REG(hw, E1000_FWSM) & 2579 E1000_FWSM_WLOCK_MAC_MASK; 2580 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT; 2581 rar = 1; 2582 if (locked == 0) 2583 rar += 11; 2584 else if (locked == 1) 2585 rar += 0; 2586 else 2587 rar += locked; 2588 Adapter->unicst_total = min(rar, 2589 MAX_NUM_UNICAST_ADDRESSES); 2590 } 2591 2592 /* Workaround for an erratum of 82571 chipst */ 2593 if ((hw->mac.type == e1000_82571) && 2594 e1000_get_laa_state_82571(hw)) 2595 Adapter->unicst_total--; 2596 2597 /* VMware doesn't support multiple mac addresses properly */ 2598 if (hw->subsystem_vendor_id == 0x15ad) 2599 Adapter->unicst_total = 1; 2600 2601 Adapter->unicst_avail = Adapter->unicst_total; 2602 2603 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2604 /* Clear both the flag and MAC address */ 2605 Adapter->unicst_addr[slot].reg.high = 0; 2606 Adapter->unicst_addr[slot].reg.low = 0; 2607 } 2608 } else { 2609 /* Workaround for an erratum of 82571 chipst */ 2610 if ((hw->mac.type == e1000_82571) && 2611 e1000_get_laa_state_82571(hw)) 2612 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); 2613 2614 /* Re-configure the RAR registers */ 2615 for (slot = 0; slot < Adapter->unicst_total; slot++) 2616 if (Adapter->unicst_addr[slot].mac.set == 1) 2617 (void) e1000_rar_set(hw, 2618 Adapter->unicst_addr[slot].mac.addr, slot); 2619 } 2620 2621 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2622 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2623 } 2624 2625 static int 2626 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, 2627 int slot) 2628 { 2629 struct e1000_hw *hw; 2630 2631 hw = &Adapter->shared; 2632 2633 /* 2634 * The first revision of Wiseman silicon (rev 2.0) has an errata 2635 * that requires the receiver to be in reset when any of the 2636 * receive address registers (RAR regs) are accessed. The first 2637 * rev of Wiseman silicon also requires MWI to be disabled when 2638 * a global reset or a receive reset is issued. So before we 2639 * initialize the RARs, we check the rev of the Wiseman controller 2640 * and work around any necessary HW errata. 2641 */ 2642 if ((hw->mac.type == e1000_82542) && 2643 (hw->revision_id == E1000_REVISION_2)) { 2644 e1000_pci_clear_mwi(hw); 2645 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); 2646 msec_delay(5); 2647 } 2648 if (mac_addr == NULL) { 2649 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0); 2650 E1000_WRITE_FLUSH(hw); 2651 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0); 2652 E1000_WRITE_FLUSH(hw); 2653 /* Clear both the flag and MAC address */ 2654 Adapter->unicst_addr[slot].reg.high = 0; 2655 Adapter->unicst_addr[slot].reg.low = 0; 2656 } else { 2657 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, 2658 ETHERADDRL); 2659 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot); 2660 Adapter->unicst_addr[slot].mac.set = 1; 2661 } 2662 2663 /* Workaround for an erratum of 82571 chipst */ 2664 if (slot == 0) { 2665 if ((hw->mac.type == e1000_82571) && 2666 e1000_get_laa_state_82571(hw)) { 2667 if (mac_addr == NULL) { 2668 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2669 slot << 1, 0); 2670 E1000_WRITE_FLUSH(hw); 2671 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2672 (slot << 1) + 1, 0); 2673 E1000_WRITE_FLUSH(hw); 2674 } else { 2675 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, 2676 LAST_RAR_ENTRY); 2677 } 2678 } 2679 } 2680 2681 /* 2682 * If we are using Wiseman rev 2.0 silicon, we will have previously 2683 * put the receive in reset, and disabled MWI, to work around some 2684 * HW errata. Now we should take the receiver out of reset, and 2685 * re-enabled if MWI if it was previously enabled by the PCI BIOS. 2686 */ 2687 if ((hw->mac.type == e1000_82542) && 2688 (hw->revision_id == E1000_REVISION_2)) { 2689 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2690 msec_delay(1); 2691 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2692 e1000_pci_set_mwi(hw); 2693 e1000g_rx_setup(Adapter); 2694 } 2695 2696 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2697 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2698 return (EIO); 2699 } 2700 2701 return (0); 2702 } 2703 2704 static int 2705 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr) 2706 { 2707 struct e1000_hw *hw = &Adapter->shared; 2708 struct ether_addr *newtable; 2709 size_t new_len; 2710 size_t old_len; 2711 int res = 0; 2712 2713 if ((multiaddr[0] & 01) == 0) { 2714 res = EINVAL; 2715 e1000g_log(Adapter, CE_WARN, "Illegal multicast address"); 2716 goto done; 2717 } 2718 2719 if (Adapter->mcast_count >= Adapter->mcast_max_num) { 2720 res = ENOENT; 2721 e1000g_log(Adapter, CE_WARN, 2722 "Adapter requested more than %d mcast addresses", 2723 Adapter->mcast_max_num); 2724 goto done; 2725 } 2726 2727 2728 if (Adapter->mcast_count == Adapter->mcast_alloc_count) { 2729 old_len = Adapter->mcast_alloc_count * 2730 sizeof (struct ether_addr); 2731 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) * 2732 sizeof (struct ether_addr); 2733 2734 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2735 if (newtable == NULL) { 2736 res = ENOMEM; 2737 e1000g_log(Adapter, CE_WARN, 2738 "Not enough memory to alloc mcast table"); 2739 goto done; 2740 } 2741 2742 if (Adapter->mcast_table != NULL) { 2743 bcopy(Adapter->mcast_table, newtable, old_len); 2744 kmem_free(Adapter->mcast_table, old_len); 2745 } 2746 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE; 2747 Adapter->mcast_table = newtable; 2748 } 2749 2750 bcopy(multiaddr, 2751 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL); 2752 Adapter->mcast_count++; 2753 2754 /* 2755 * Update the MC table in the hardware 2756 */ 2757 e1000g_clear_interrupt(Adapter); 2758 2759 e1000_update_mc_addr_list(hw, 2760 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2761 2762 e1000g_mask_interrupt(Adapter); 2763 2764 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2765 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2766 res = EIO; 2767 } 2768 2769 done: 2770 return (res); 2771 } 2772 2773 static int 2774 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr) 2775 { 2776 struct e1000_hw *hw = &Adapter->shared; 2777 struct ether_addr *newtable; 2778 size_t new_len; 2779 size_t old_len; 2780 unsigned i; 2781 2782 for (i = 0; i < Adapter->mcast_count; i++) { 2783 if (bcmp(multiaddr, &Adapter->mcast_table[i], 2784 ETHERADDRL) == 0) { 2785 for (i++; i < Adapter->mcast_count; i++) { 2786 Adapter->mcast_table[i - 1] = 2787 Adapter->mcast_table[i]; 2788 } 2789 Adapter->mcast_count--; 2790 break; 2791 } 2792 } 2793 2794 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) > 2795 MCAST_ALLOC_SIZE) { 2796 old_len = Adapter->mcast_alloc_count * 2797 sizeof (struct ether_addr); 2798 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) * 2799 sizeof (struct ether_addr); 2800 2801 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2802 if (newtable != NULL) { 2803 bcopy(Adapter->mcast_table, newtable, new_len); 2804 kmem_free(Adapter->mcast_table, old_len); 2805 2806 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE; 2807 Adapter->mcast_table = newtable; 2808 } 2809 } 2810 2811 /* 2812 * Update the MC table in the hardware 2813 */ 2814 e1000g_clear_interrupt(Adapter); 2815 2816 e1000_update_mc_addr_list(hw, 2817 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2818 2819 e1000g_mask_interrupt(Adapter); 2820 2821 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2822 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2823 return (EIO); 2824 } 2825 2826 return (0); 2827 } 2828 2829 static void 2830 e1000g_release_multicast(struct e1000g *Adapter) 2831 { 2832 if (Adapter->mcast_table != NULL) { 2833 kmem_free(Adapter->mcast_table, 2834 Adapter->mcast_alloc_count * sizeof (struct ether_addr)); 2835 Adapter->mcast_table = NULL; 2836 } 2837 } 2838 2839 int 2840 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 2841 { 2842 struct e1000g *Adapter = (struct e1000g *)arg; 2843 int result; 2844 2845 rw_enter(&Adapter->chip_lock, RW_WRITER); 2846 2847 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2848 result = ECANCELED; 2849 goto done; 2850 } 2851 2852 result = (add) ? multicst_add(Adapter, addr) 2853 : multicst_remove(Adapter, addr); 2854 2855 done: 2856 rw_exit(&Adapter->chip_lock); 2857 return (result); 2858 2859 } 2860 2861 int 2862 e1000g_m_promisc(void *arg, boolean_t on) 2863 { 2864 struct e1000g *Adapter = (struct e1000g *)arg; 2865 uint32_t rctl; 2866 2867 rw_enter(&Adapter->chip_lock, RW_WRITER); 2868 2869 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2870 rw_exit(&Adapter->chip_lock); 2871 return (ECANCELED); 2872 } 2873 2874 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 2875 2876 if (on) 2877 rctl |= 2878 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 2879 else 2880 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 2881 2882 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 2883 2884 Adapter->e1000g_promisc = on; 2885 2886 rw_exit(&Adapter->chip_lock); 2887 2888 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2889 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2890 return (EIO); 2891 } 2892 2893 return (0); 2894 } 2895 2896 /* 2897 * Entry points to enable and disable interrupts at the granularity of 2898 * a group. 2899 * Turns the poll_mode for the whole adapter on and off to enable or 2900 * override the ring level polling control over the hardware interrupts. 2901 */ 2902 static int 2903 e1000g_rx_group_intr_enable(mac_intr_handle_t arg) 2904 { 2905 struct e1000g *adapter = (struct e1000g *)arg; 2906 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2907 2908 /* 2909 * Later interrupts at the granularity of the this ring will 2910 * invoke mac_rx() with NULL, indicating the need for another 2911 * software classification. 2912 * We have a single ring usable per adapter now, so we only need to 2913 * reset the rx handle for that one. 2914 * When more RX rings can be used, we should update each one of them. 2915 */ 2916 mutex_enter(&rx_ring->rx_lock); 2917 rx_ring->mrh = NULL; 2918 adapter->poll_mode = B_FALSE; 2919 mutex_exit(&rx_ring->rx_lock); 2920 return (0); 2921 } 2922 2923 static int 2924 e1000g_rx_group_intr_disable(mac_intr_handle_t arg) 2925 { 2926 struct e1000g *adapter = (struct e1000g *)arg; 2927 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2928 2929 mutex_enter(&rx_ring->rx_lock); 2930 2931 /* 2932 * Later interrupts at the granularity of the this ring will 2933 * invoke mac_rx() with the handle for this ring; 2934 */ 2935 adapter->poll_mode = B_TRUE; 2936 rx_ring->mrh = rx_ring->mrh_init; 2937 mutex_exit(&rx_ring->rx_lock); 2938 return (0); 2939 } 2940 2941 /* 2942 * Entry points to enable and disable interrupts at the granularity of 2943 * a ring. 2944 * adapter poll_mode controls whether we actually proceed with hardware 2945 * interrupt toggling. 2946 */ 2947 static int 2948 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh) 2949 { 2950 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2951 struct e1000g *adapter = rx_ring->adapter; 2952 struct e1000_hw *hw = &adapter->shared; 2953 uint32_t intr_mask; 2954 2955 rw_enter(&adapter->chip_lock, RW_READER); 2956 2957 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2958 rw_exit(&adapter->chip_lock); 2959 return (0); 2960 } 2961 2962 mutex_enter(&rx_ring->rx_lock); 2963 rx_ring->poll_flag = 0; 2964 mutex_exit(&rx_ring->rx_lock); 2965 2966 /* Rx interrupt enabling for MSI and legacy */ 2967 intr_mask = E1000_READ_REG(hw, E1000_IMS); 2968 intr_mask |= E1000_IMS_RXT0; 2969 E1000_WRITE_REG(hw, E1000_IMS, intr_mask); 2970 E1000_WRITE_FLUSH(hw); 2971 2972 /* Trigger a Rx interrupt to check Rx ring */ 2973 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 2974 E1000_WRITE_FLUSH(hw); 2975 2976 rw_exit(&adapter->chip_lock); 2977 return (0); 2978 } 2979 2980 static int 2981 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh) 2982 { 2983 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2984 struct e1000g *adapter = rx_ring->adapter; 2985 struct e1000_hw *hw = &adapter->shared; 2986 2987 rw_enter(&adapter->chip_lock, RW_READER); 2988 2989 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2990 rw_exit(&adapter->chip_lock); 2991 return (0); 2992 } 2993 mutex_enter(&rx_ring->rx_lock); 2994 rx_ring->poll_flag = 1; 2995 mutex_exit(&rx_ring->rx_lock); 2996 2997 /* Rx interrupt disabling for MSI and legacy */ 2998 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 2999 E1000_WRITE_FLUSH(hw); 3000 3001 rw_exit(&adapter->chip_lock); 3002 return (0); 3003 } 3004 3005 /* 3006 * e1000g_unicst_find - Find the slot for the specified unicast address 3007 */ 3008 static int 3009 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr) 3010 { 3011 int slot; 3012 3013 for (slot = 0; slot < Adapter->unicst_total; slot++) { 3014 if ((Adapter->unicst_addr[slot].mac.set == 1) && 3015 (bcmp(Adapter->unicst_addr[slot].mac.addr, 3016 mac_addr, ETHERADDRL) == 0)) 3017 return (slot); 3018 } 3019 3020 return (-1); 3021 } 3022 3023 /* 3024 * Entry points to add and remove a MAC address to a ring group. 3025 * The caller takes care of adding and removing the MAC addresses 3026 * to the filter via these two routines. 3027 */ 3028 3029 static int 3030 e1000g_addmac(void *arg, const uint8_t *mac_addr) 3031 { 3032 struct e1000g *Adapter = (struct e1000g *)arg; 3033 int slot, err; 3034 3035 rw_enter(&Adapter->chip_lock, RW_WRITER); 3036 3037 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3038 rw_exit(&Adapter->chip_lock); 3039 return (ECANCELED); 3040 } 3041 3042 if (e1000g_unicst_find(Adapter, mac_addr) != -1) { 3043 /* The same address is already in slot */ 3044 rw_exit(&Adapter->chip_lock); 3045 return (0); 3046 } 3047 3048 if (Adapter->unicst_avail == 0) { 3049 /* no slots available */ 3050 rw_exit(&Adapter->chip_lock); 3051 return (ENOSPC); 3052 } 3053 3054 /* Search for a free slot */ 3055 for (slot = 0; slot < Adapter->unicst_total; slot++) { 3056 if (Adapter->unicst_addr[slot].mac.set == 0) 3057 break; 3058 } 3059 ASSERT(slot < Adapter->unicst_total); 3060 3061 err = e1000g_unicst_set(Adapter, mac_addr, slot); 3062 if (err == 0) 3063 Adapter->unicst_avail--; 3064 3065 rw_exit(&Adapter->chip_lock); 3066 3067 return (err); 3068 } 3069 3070 static int 3071 e1000g_remmac(void *arg, const uint8_t *mac_addr) 3072 { 3073 struct e1000g *Adapter = (struct e1000g *)arg; 3074 int slot, err; 3075 3076 rw_enter(&Adapter->chip_lock, RW_WRITER); 3077 3078 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3079 rw_exit(&Adapter->chip_lock); 3080 return (ECANCELED); 3081 } 3082 3083 slot = e1000g_unicst_find(Adapter, mac_addr); 3084 if (slot == -1) { 3085 rw_exit(&Adapter->chip_lock); 3086 return (EINVAL); 3087 } 3088 3089 ASSERT(Adapter->unicst_addr[slot].mac.set); 3090 3091 /* Clear this slot */ 3092 err = e1000g_unicst_set(Adapter, NULL, slot); 3093 if (err == 0) 3094 Adapter->unicst_avail++; 3095 3096 rw_exit(&Adapter->chip_lock); 3097 3098 return (err); 3099 } 3100 3101 static int 3102 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 3103 { 3104 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh; 3105 3106 mutex_enter(&rx_ring->rx_lock); 3107 rx_ring->ring_gen_num = mr_gen_num; 3108 mutex_exit(&rx_ring->rx_lock); 3109 return (0); 3110 } 3111 3112 /* 3113 * Callback funtion for MAC layer to register all rings. 3114 * 3115 * The hardware supports a single group with currently only one ring 3116 * available. 3117 * Though not offering virtualization ability per se, exposing the 3118 * group/ring still enables the polling and interrupt toggling. 3119 */ 3120 /* ARGSUSED */ 3121 void 3122 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index, 3123 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 3124 { 3125 struct e1000g *Adapter = (struct e1000g *)arg; 3126 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring; 3127 mac_intr_t *mintr; 3128 3129 /* 3130 * We advertised only RX group/rings, so the MAC framework shouldn't 3131 * ask for any thing else. 3132 */ 3133 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0); 3134 3135 rx_ring->mrh = rx_ring->mrh_init = rh; 3136 infop->mri_driver = (mac_ring_driver_t)rx_ring; 3137 infop->mri_start = e1000g_ring_start; 3138 infop->mri_stop = NULL; 3139 infop->mri_poll = e1000g_poll_ring; 3140 infop->mri_stat = e1000g_rx_ring_stat; 3141 3142 /* Ring level interrupts */ 3143 mintr = &infop->mri_intr; 3144 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 3145 mintr->mi_enable = e1000g_rx_ring_intr_enable; 3146 mintr->mi_disable = e1000g_rx_ring_intr_disable; 3147 if (Adapter->msi_enable) 3148 mintr->mi_ddi_handle = Adapter->htable[0]; 3149 } 3150 3151 /* ARGSUSED */ 3152 static void 3153 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index, 3154 mac_group_info_t *infop, mac_group_handle_t gh) 3155 { 3156 struct e1000g *Adapter = (struct e1000g *)arg; 3157 mac_intr_t *mintr; 3158 3159 /* 3160 * We advertised a single RX ring. Getting a request for anything else 3161 * signifies a bug in the MAC framework. 3162 */ 3163 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0); 3164 3165 Adapter->rx_group = gh; 3166 3167 infop->mgi_driver = (mac_group_driver_t)Adapter; 3168 infop->mgi_start = NULL; 3169 infop->mgi_stop = NULL; 3170 infop->mgi_addmac = e1000g_addmac; 3171 infop->mgi_remmac = e1000g_remmac; 3172 infop->mgi_count = 1; 3173 3174 /* Group level interrupts */ 3175 mintr = &infop->mgi_intr; 3176 mintr->mi_handle = (mac_intr_handle_t)Adapter; 3177 mintr->mi_enable = e1000g_rx_group_intr_enable; 3178 mintr->mi_disable = e1000g_rx_group_intr_disable; 3179 } 3180 3181 static void 3182 e1000g_led_blink(void *arg) 3183 { 3184 e1000g_t *e1000g = arg; 3185 3186 mutex_enter(&e1000g->e1000g_led_lock); 3187 VERIFY(e1000g->e1000g_emul_blink); 3188 if (e1000g->e1000g_emul_state) { 3189 (void) e1000_led_on(&e1000g->shared); 3190 } else { 3191 (void) e1000_led_off(&e1000g->shared); 3192 } 3193 e1000g->e1000g_emul_state = !e1000g->e1000g_emul_state; 3194 mutex_exit(&e1000g->e1000g_led_lock); 3195 } 3196 3197 static int 3198 e1000g_led_set(void *arg, mac_led_mode_t mode, uint_t flags) 3199 { 3200 e1000g_t *e1000g = arg; 3201 3202 if (flags != 0) 3203 return (EINVAL); 3204 3205 if (mode != MAC_LED_DEFAULT && 3206 mode != MAC_LED_IDENT && 3207 mode != MAC_LED_OFF && 3208 mode != MAC_LED_ON) 3209 return (ENOTSUP); 3210 3211 mutex_enter(&e1000g->e1000g_led_lock); 3212 3213 if ((mode == MAC_LED_IDENT || mode == MAC_LED_OFF || 3214 mode == MAC_LED_ON) && 3215 !e1000g->e1000g_led_setup) { 3216 if (e1000_setup_led(&e1000g->shared) != E1000_SUCCESS) { 3217 mutex_exit(&e1000g->e1000g_led_lock); 3218 return (EIO); 3219 } 3220 3221 e1000g->e1000g_led_setup = B_TRUE; 3222 } 3223 3224 if (mode != MAC_LED_IDENT && e1000g->e1000g_blink != NULL) { 3225 ddi_periodic_t id = e1000g->e1000g_blink; 3226 e1000g->e1000g_blink = NULL; 3227 mutex_exit(&e1000g->e1000g_led_lock); 3228 ddi_periodic_delete(id); 3229 mutex_enter(&e1000g->e1000g_led_lock); 3230 } 3231 3232 switch (mode) { 3233 case MAC_LED_DEFAULT: 3234 if (e1000g->e1000g_led_setup) { 3235 if (e1000_cleanup_led(&e1000g->shared) != 3236 E1000_SUCCESS) { 3237 mutex_exit(&e1000g->e1000g_led_lock); 3238 return (EIO); 3239 } 3240 e1000g->e1000g_led_setup = B_FALSE; 3241 } 3242 break; 3243 case MAC_LED_IDENT: 3244 if (e1000g->e1000g_emul_blink) { 3245 if (e1000g->e1000g_blink != NULL) 3246 break; 3247 3248 /* 3249 * Note, we use a 200 ms period here as that's what 3250 * section 10.1.3 8254x Intel Manual (PCI/PCI-X Family 3251 * of Gigabit Ethernet Controllers Software Developer's 3252 * Manual) indicates that the optional blink hardware 3253 * operates at. 3254 */ 3255 e1000g->e1000g_blink = 3256 ddi_periodic_add(e1000g_led_blink, e1000g, 3257 200ULL * (NANOSEC / MILLISEC), DDI_IPL_0); 3258 } else if (e1000_blink_led(&e1000g->shared) != E1000_SUCCESS) { 3259 mutex_exit(&e1000g->e1000g_led_lock); 3260 return (EIO); 3261 } 3262 break; 3263 case MAC_LED_OFF: 3264 if (e1000_led_off(&e1000g->shared) != E1000_SUCCESS) { 3265 mutex_exit(&e1000g->e1000g_led_lock); 3266 return (EIO); 3267 } 3268 break; 3269 case MAC_LED_ON: 3270 if (e1000_led_on(&e1000g->shared) != E1000_SUCCESS) { 3271 mutex_exit(&e1000g->e1000g_led_lock); 3272 return (EIO); 3273 } 3274 break; 3275 default: 3276 mutex_exit(&e1000g->e1000g_led_lock); 3277 return (ENOTSUP); 3278 } 3279 3280 mutex_exit(&e1000g->e1000g_led_lock); 3281 return (0); 3282 3283 } 3284 3285 static boolean_t 3286 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3287 { 3288 struct e1000g *Adapter = (struct e1000g *)arg; 3289 3290 switch (cap) { 3291 case MAC_CAPAB_HCKSUM: { 3292 uint32_t *txflags = cap_data; 3293 3294 if (Adapter->tx_hcksum_enable) 3295 *txflags = HCKSUM_IPHDRCKSUM | 3296 HCKSUM_INET_PARTIAL; 3297 else 3298 return (B_FALSE); 3299 break; 3300 } 3301 3302 case MAC_CAPAB_LSO: { 3303 mac_capab_lso_t *cap_lso = cap_data; 3304 3305 if (Adapter->lso_enable) { 3306 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 3307 cap_lso->lso_basic_tcp_ipv4.lso_max = 3308 E1000_LSO_MAXLEN; 3309 } else 3310 return (B_FALSE); 3311 break; 3312 } 3313 case MAC_CAPAB_RINGS: { 3314 mac_capab_rings_t *cap_rings = cap_data; 3315 3316 /* No TX rings exposed yet */ 3317 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 3318 return (B_FALSE); 3319 3320 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 3321 cap_rings->mr_rnum = 1; 3322 cap_rings->mr_gnum = 1; 3323 cap_rings->mr_rget = e1000g_fill_ring; 3324 cap_rings->mr_gget = e1000g_fill_group; 3325 break; 3326 } 3327 case MAC_CAPAB_LED: { 3328 mac_capab_led_t *cap_led = cap_data; 3329 3330 cap_led->mcl_flags = 0; 3331 cap_led->mcl_modes = MAC_LED_DEFAULT; 3332 if (Adapter->shared.mac.ops.blink_led != NULL && 3333 Adapter->shared.mac.ops.blink_led != 3334 e1000_null_ops_generic) { 3335 cap_led->mcl_modes |= MAC_LED_IDENT; 3336 } 3337 3338 if (Adapter->shared.mac.ops.led_off != NULL && 3339 Adapter->shared.mac.ops.led_off != 3340 e1000_null_ops_generic) { 3341 cap_led->mcl_modes |= MAC_LED_OFF; 3342 } 3343 3344 if (Adapter->shared.mac.ops.led_on != NULL && 3345 Adapter->shared.mac.ops.led_on != 3346 e1000_null_ops_generic) { 3347 cap_led->mcl_modes |= MAC_LED_ON; 3348 } 3349 3350 /* 3351 * Some hardware doesn't support blinking natively as they're 3352 * missing the optional blink circuit. If they have both off and 3353 * on then we'll emulate it ourselves. 3354 */ 3355 if (((cap_led->mcl_modes & MAC_LED_IDENT) == 0) && 3356 ((cap_led->mcl_modes & MAC_LED_OFF) != 0) && 3357 ((cap_led->mcl_modes & MAC_LED_ON) != 0)) { 3358 cap_led->mcl_modes |= MAC_LED_IDENT; 3359 Adapter->e1000g_emul_blink = B_TRUE; 3360 } 3361 3362 cap_led->mcl_set = e1000g_led_set; 3363 break; 3364 } 3365 default: 3366 return (B_FALSE); 3367 } 3368 return (B_TRUE); 3369 } 3370 3371 static boolean_t 3372 e1000g_param_locked(mac_prop_id_t pr_num) 3373 { 3374 /* 3375 * All en_* parameters are locked (read-only) while 3376 * the device is in any sort of loopback mode ... 3377 */ 3378 switch (pr_num) { 3379 case MAC_PROP_EN_1000FDX_CAP: 3380 case MAC_PROP_EN_1000HDX_CAP: 3381 case MAC_PROP_EN_100FDX_CAP: 3382 case MAC_PROP_EN_100HDX_CAP: 3383 case MAC_PROP_EN_10FDX_CAP: 3384 case MAC_PROP_EN_10HDX_CAP: 3385 case MAC_PROP_AUTONEG: 3386 case MAC_PROP_FLOWCTRL: 3387 return (B_TRUE); 3388 } 3389 return (B_FALSE); 3390 } 3391 3392 /* 3393 * callback function for set/get of properties 3394 */ 3395 static int 3396 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3397 uint_t pr_valsize, const void *pr_val) 3398 { 3399 struct e1000g *Adapter = arg; 3400 struct e1000_hw *hw = &Adapter->shared; 3401 struct e1000_fc_info *fc = &Adapter->shared.fc; 3402 int err = 0; 3403 link_flowctrl_t flowctrl; 3404 uint32_t cur_mtu, new_mtu; 3405 3406 rw_enter(&Adapter->chip_lock, RW_WRITER); 3407 3408 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3409 rw_exit(&Adapter->chip_lock); 3410 return (ECANCELED); 3411 } 3412 3413 if (Adapter->loopback_mode != E1000G_LB_NONE && 3414 e1000g_param_locked(pr_num)) { 3415 /* 3416 * All en_* parameters are locked (read-only) 3417 * while the device is in any sort of loopback mode. 3418 */ 3419 rw_exit(&Adapter->chip_lock); 3420 return (EBUSY); 3421 } 3422 3423 switch (pr_num) { 3424 case MAC_PROP_EN_1000FDX_CAP: 3425 if (hw->phy.media_type != e1000_media_type_copper) { 3426 err = ENOTSUP; 3427 break; 3428 } 3429 Adapter->param_en_1000fdx = *(uint8_t *)pr_val; 3430 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val; 3431 goto reset; 3432 case MAC_PROP_EN_100FDX_CAP: 3433 if (hw->phy.media_type != e1000_media_type_copper) { 3434 err = ENOTSUP; 3435 break; 3436 } 3437 Adapter->param_en_100fdx = *(uint8_t *)pr_val; 3438 Adapter->param_adv_100fdx = *(uint8_t *)pr_val; 3439 goto reset; 3440 case MAC_PROP_EN_100HDX_CAP: 3441 if (hw->phy.media_type != e1000_media_type_copper) { 3442 err = ENOTSUP; 3443 break; 3444 } 3445 Adapter->param_en_100hdx = *(uint8_t *)pr_val; 3446 Adapter->param_adv_100hdx = *(uint8_t *)pr_val; 3447 goto reset; 3448 case MAC_PROP_EN_10FDX_CAP: 3449 if (hw->phy.media_type != e1000_media_type_copper) { 3450 err = ENOTSUP; 3451 break; 3452 } 3453 Adapter->param_en_10fdx = *(uint8_t *)pr_val; 3454 Adapter->param_adv_10fdx = *(uint8_t *)pr_val; 3455 goto reset; 3456 case MAC_PROP_EN_10HDX_CAP: 3457 if (hw->phy.media_type != e1000_media_type_copper) { 3458 err = ENOTSUP; 3459 break; 3460 } 3461 Adapter->param_en_10hdx = *(uint8_t *)pr_val; 3462 Adapter->param_adv_10hdx = *(uint8_t *)pr_val; 3463 goto reset; 3464 case MAC_PROP_AUTONEG: 3465 if (hw->phy.media_type != e1000_media_type_copper) { 3466 err = ENOTSUP; 3467 break; 3468 } 3469 Adapter->param_adv_autoneg = *(uint8_t *)pr_val; 3470 goto reset; 3471 case MAC_PROP_FLOWCTRL: 3472 fc->send_xon = true; 3473 bcopy(pr_val, &flowctrl, sizeof (flowctrl)); 3474 3475 switch (flowctrl) { 3476 default: 3477 err = EINVAL; 3478 break; 3479 case LINK_FLOWCTRL_NONE: 3480 fc->requested_mode = e1000_fc_none; 3481 break; 3482 case LINK_FLOWCTRL_RX: 3483 fc->requested_mode = e1000_fc_rx_pause; 3484 break; 3485 case LINK_FLOWCTRL_TX: 3486 fc->requested_mode = e1000_fc_tx_pause; 3487 break; 3488 case LINK_FLOWCTRL_BI: 3489 fc->requested_mode = e1000_fc_full; 3490 break; 3491 } 3492 reset: 3493 if (err == 0) { 3494 /* check PCH limits & reset the link */ 3495 e1000g_pch_limits(Adapter); 3496 if (e1000g_reset_link(Adapter) != DDI_SUCCESS) 3497 err = EINVAL; 3498 } 3499 break; 3500 case MAC_PROP_ADV_1000FDX_CAP: 3501 case MAC_PROP_ADV_1000HDX_CAP: 3502 case MAC_PROP_ADV_100FDX_CAP: 3503 case MAC_PROP_ADV_100HDX_CAP: 3504 case MAC_PROP_ADV_10FDX_CAP: 3505 case MAC_PROP_ADV_10HDX_CAP: 3506 case MAC_PROP_EN_1000HDX_CAP: 3507 case MAC_PROP_STATUS: 3508 case MAC_PROP_SPEED: 3509 case MAC_PROP_DUPLEX: 3510 case MAC_PROP_MEDIA: 3511 err = ENOTSUP; /* read-only prop. Can't set this. */ 3512 break; 3513 case MAC_PROP_MTU: 3514 /* adapter must be stopped for an MTU change */ 3515 if (Adapter->e1000g_state & E1000G_STARTED) { 3516 err = EBUSY; 3517 break; 3518 } 3519 3520 cur_mtu = Adapter->default_mtu; 3521 3522 /* get new requested MTU */ 3523 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3524 if (new_mtu == cur_mtu) { 3525 err = 0; 3526 break; 3527 } 3528 3529 if ((new_mtu < DEFAULT_MTU) || 3530 (new_mtu > Adapter->max_mtu)) { 3531 err = EINVAL; 3532 break; 3533 } 3534 3535 /* inform MAC framework of new MTU */ 3536 err = mac_maxsdu_update(Adapter->mh, new_mtu); 3537 3538 if (err == 0) { 3539 Adapter->default_mtu = new_mtu; 3540 Adapter->max_frame_size = 3541 e1000g_mtu2maxframe(new_mtu); 3542 3543 /* 3544 * check PCH limits & set buffer sizes to 3545 * match new MTU 3546 */ 3547 e1000g_pch_limits(Adapter); 3548 e1000g_set_bufsize(Adapter); 3549 3550 /* 3551 * decrease the number of descriptors and free 3552 * packets for jumbo frames to reduce tx/rx 3553 * resource consumption 3554 */ 3555 if (Adapter->max_frame_size >= 3556 (FRAME_SIZE_UPTO_4K)) { 3557 if (Adapter->tx_desc_num_flag == 0) 3558 Adapter->tx_desc_num = 3559 DEFAULT_JUMBO_NUM_TX_DESC; 3560 3561 if (Adapter->rx_desc_num_flag == 0) 3562 Adapter->rx_desc_num = 3563 DEFAULT_JUMBO_NUM_RX_DESC; 3564 3565 if (Adapter->tx_buf_num_flag == 0) 3566 Adapter->tx_freelist_num = 3567 DEFAULT_JUMBO_NUM_TX_BUF; 3568 3569 if (Adapter->rx_buf_num_flag == 0) 3570 Adapter->rx_freelist_limit = 3571 DEFAULT_JUMBO_NUM_RX_BUF; 3572 } else { 3573 if (Adapter->tx_desc_num_flag == 0) 3574 Adapter->tx_desc_num = 3575 DEFAULT_NUM_TX_DESCRIPTOR; 3576 3577 if (Adapter->rx_desc_num_flag == 0) 3578 Adapter->rx_desc_num = 3579 DEFAULT_NUM_RX_DESCRIPTOR; 3580 3581 if (Adapter->tx_buf_num_flag == 0) 3582 Adapter->tx_freelist_num = 3583 DEFAULT_NUM_TX_FREELIST; 3584 3585 if (Adapter->rx_buf_num_flag == 0) 3586 Adapter->rx_freelist_limit = 3587 DEFAULT_NUM_RX_FREELIST; 3588 } 3589 } 3590 break; 3591 case MAC_PROP_PRIVATE: 3592 err = e1000g_set_priv_prop(Adapter, pr_name, 3593 pr_valsize, pr_val); 3594 break; 3595 default: 3596 err = ENOTSUP; 3597 break; 3598 } 3599 rw_exit(&Adapter->chip_lock); 3600 return (err); 3601 } 3602 3603 static int 3604 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3605 uint_t pr_valsize, void *pr_val) 3606 { 3607 struct e1000g *Adapter = arg; 3608 struct e1000_hw *hw = &Adapter->shared; 3609 struct e1000_fc_info *fc = &Adapter->shared.fc; 3610 int err = 0; 3611 link_flowctrl_t flowctrl; 3612 uint64_t tmp = 0; 3613 3614 switch (pr_num) { 3615 case MAC_PROP_DUPLEX: 3616 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 3617 bcopy(&Adapter->link_duplex, pr_val, 3618 sizeof (link_duplex_t)); 3619 break; 3620 case MAC_PROP_SPEED: 3621 ASSERT(pr_valsize >= sizeof (uint64_t)); 3622 tmp = Adapter->link_speed * 1000000ull; 3623 bcopy(&tmp, pr_val, sizeof (tmp)); 3624 break; 3625 case MAC_PROP_AUTONEG: 3626 *(uint8_t *)pr_val = Adapter->param_adv_autoneg; 3627 break; 3628 case MAC_PROP_FLOWCTRL: 3629 ASSERT(pr_valsize >= sizeof (link_flowctrl_t)); 3630 switch (fc->current_mode) { 3631 case e1000_fc_none: 3632 flowctrl = LINK_FLOWCTRL_NONE; 3633 break; 3634 case e1000_fc_rx_pause: 3635 flowctrl = LINK_FLOWCTRL_RX; 3636 break; 3637 case e1000_fc_tx_pause: 3638 flowctrl = LINK_FLOWCTRL_TX; 3639 break; 3640 case e1000_fc_full: 3641 flowctrl = LINK_FLOWCTRL_BI; 3642 break; 3643 } 3644 bcopy(&flowctrl, pr_val, sizeof (flowctrl)); 3645 break; 3646 case MAC_PROP_ADV_1000FDX_CAP: 3647 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx; 3648 break; 3649 case MAC_PROP_EN_1000FDX_CAP: 3650 *(uint8_t *)pr_val = Adapter->param_en_1000fdx; 3651 break; 3652 case MAC_PROP_ADV_1000HDX_CAP: 3653 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx; 3654 break; 3655 case MAC_PROP_EN_1000HDX_CAP: 3656 *(uint8_t *)pr_val = Adapter->param_en_1000hdx; 3657 break; 3658 case MAC_PROP_ADV_100FDX_CAP: 3659 *(uint8_t *)pr_val = Adapter->param_adv_100fdx; 3660 break; 3661 case MAC_PROP_EN_100FDX_CAP: 3662 *(uint8_t *)pr_val = Adapter->param_en_100fdx; 3663 break; 3664 case MAC_PROP_ADV_100HDX_CAP: 3665 *(uint8_t *)pr_val = Adapter->param_adv_100hdx; 3666 break; 3667 case MAC_PROP_EN_100HDX_CAP: 3668 *(uint8_t *)pr_val = Adapter->param_en_100hdx; 3669 break; 3670 case MAC_PROP_ADV_10FDX_CAP: 3671 *(uint8_t *)pr_val = Adapter->param_adv_10fdx; 3672 break; 3673 case MAC_PROP_EN_10FDX_CAP: 3674 *(uint8_t *)pr_val = Adapter->param_en_10fdx; 3675 break; 3676 case MAC_PROP_ADV_10HDX_CAP: 3677 *(uint8_t *)pr_val = Adapter->param_adv_10hdx; 3678 break; 3679 case MAC_PROP_EN_10HDX_CAP: 3680 *(uint8_t *)pr_val = Adapter->param_en_10hdx; 3681 break; 3682 case MAC_PROP_ADV_100T4_CAP: 3683 case MAC_PROP_EN_100T4_CAP: 3684 *(uint8_t *)pr_val = Adapter->param_adv_100t4; 3685 break; 3686 case MAC_PROP_MEDIA: 3687 *(mac_ether_media_t *)pr_val = e1000_link_to_media(hw, 3688 Adapter->link_speed); 3689 break; 3690 case MAC_PROP_PRIVATE: 3691 err = e1000g_get_priv_prop(Adapter, pr_name, 3692 pr_valsize, pr_val); 3693 break; 3694 default: 3695 err = ENOTSUP; 3696 break; 3697 } 3698 3699 return (err); 3700 } 3701 3702 static void 3703 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3704 mac_prop_info_handle_t prh) 3705 { 3706 struct e1000g *Adapter = arg; 3707 struct e1000_hw *hw = &Adapter->shared; 3708 3709 switch (pr_num) { 3710 case MAC_PROP_DUPLEX: 3711 case MAC_PROP_SPEED: 3712 case MAC_PROP_ADV_1000FDX_CAP: 3713 case MAC_PROP_ADV_1000HDX_CAP: 3714 case MAC_PROP_ADV_100FDX_CAP: 3715 case MAC_PROP_ADV_100HDX_CAP: 3716 case MAC_PROP_ADV_10FDX_CAP: 3717 case MAC_PROP_ADV_10HDX_CAP: 3718 case MAC_PROP_ADV_100T4_CAP: 3719 case MAC_PROP_EN_100T4_CAP: 3720 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3721 break; 3722 3723 case MAC_PROP_EN_1000FDX_CAP: 3724 if (hw->phy.media_type != e1000_media_type_copper) { 3725 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3726 } else { 3727 mac_prop_info_set_default_uint8(prh, 3728 ((Adapter->phy_ext_status & 3729 IEEE_ESR_1000T_FD_CAPS) || 3730 (Adapter->phy_ext_status & 3731 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0); 3732 } 3733 break; 3734 3735 case MAC_PROP_EN_100FDX_CAP: 3736 if (hw->phy.media_type != e1000_media_type_copper) { 3737 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3738 } else { 3739 mac_prop_info_set_default_uint8(prh, 3740 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 3741 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 3742 ? 1 : 0); 3743 } 3744 break; 3745 3746 case MAC_PROP_EN_100HDX_CAP: 3747 if (hw->phy.media_type != e1000_media_type_copper) { 3748 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3749 } else { 3750 mac_prop_info_set_default_uint8(prh, 3751 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 3752 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) 3753 ? 1 : 0); 3754 } 3755 break; 3756 3757 case MAC_PROP_EN_10FDX_CAP: 3758 if (hw->phy.media_type != e1000_media_type_copper) { 3759 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3760 } else { 3761 mac_prop_info_set_default_uint8(prh, 3762 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0); 3763 } 3764 break; 3765 3766 case MAC_PROP_EN_10HDX_CAP: 3767 if (hw->phy.media_type != e1000_media_type_copper) { 3768 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3769 } else { 3770 mac_prop_info_set_default_uint8(prh, 3771 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0); 3772 } 3773 break; 3774 3775 case MAC_PROP_EN_1000HDX_CAP: 3776 if (hw->phy.media_type != e1000_media_type_copper) 3777 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3778 break; 3779 3780 case MAC_PROP_AUTONEG: 3781 if (hw->phy.media_type != e1000_media_type_copper) { 3782 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3783 } else { 3784 mac_prop_info_set_default_uint8(prh, 3785 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) 3786 ? 1 : 0); 3787 } 3788 break; 3789 3790 case MAC_PROP_FLOWCTRL: 3791 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI); 3792 break; 3793 3794 case MAC_PROP_MTU: { 3795 struct e1000_mac_info *mac = &Adapter->shared.mac; 3796 struct e1000_phy_info *phy = &Adapter->shared.phy; 3797 uint32_t max; 3798 3799 /* some MAC types do not support jumbo frames */ 3800 if ((mac->type == e1000_ich8lan) || 3801 ((mac->type == e1000_ich9lan) && (phy->type == 3802 e1000_phy_ife))) { 3803 max = DEFAULT_MTU; 3804 } else { 3805 max = Adapter->max_mtu; 3806 } 3807 3808 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max); 3809 break; 3810 } 3811 case MAC_PROP_PRIVATE: { 3812 char valstr[64]; 3813 int value; 3814 3815 if (strcmp(pr_name, "_adv_pause_cap") == 0 || 3816 strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3817 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3818 return; 3819 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3820 value = DEFAULT_TX_BCOPY_THRESHOLD; 3821 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3822 value = DEFAULT_TX_INTR_ENABLE; 3823 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3824 value = DEFAULT_TX_INTR_DELAY; 3825 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3826 value = DEFAULT_TX_INTR_ABS_DELAY; 3827 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3828 value = DEFAULT_RX_BCOPY_THRESHOLD; 3829 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3830 value = DEFAULT_RX_LIMIT_ON_INTR; 3831 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3832 value = DEFAULT_RX_INTR_DELAY; 3833 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3834 value = DEFAULT_RX_INTR_ABS_DELAY; 3835 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3836 value = DEFAULT_INTR_THROTTLING; 3837 } else if (strcmp(pr_name, "_intr_adaptive") == 0) { 3838 value = 1; 3839 } else { 3840 return; 3841 } 3842 3843 (void) snprintf(valstr, sizeof (valstr), "%d", value); 3844 mac_prop_info_set_default_str(prh, valstr); 3845 break; 3846 } 3847 } 3848 } 3849 3850 /* ARGSUSED2 */ 3851 static int 3852 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name, 3853 uint_t pr_valsize, const void *pr_val) 3854 { 3855 int err = 0; 3856 long result; 3857 struct e1000_hw *hw = &Adapter->shared; 3858 3859 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3860 if (pr_val == NULL) { 3861 err = EINVAL; 3862 return (err); 3863 } 3864 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3865 if (result < MIN_TX_BCOPY_THRESHOLD || 3866 result > MAX_TX_BCOPY_THRESHOLD) 3867 err = EINVAL; 3868 else { 3869 Adapter->tx_bcopy_thresh = (uint32_t)result; 3870 } 3871 return (err); 3872 } 3873 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3874 if (pr_val == NULL) { 3875 err = EINVAL; 3876 return (err); 3877 } 3878 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3879 if (result < 0 || result > 1) 3880 err = EINVAL; 3881 else { 3882 Adapter->tx_intr_enable = (result == 1) ? 3883 B_TRUE: B_FALSE; 3884 if (Adapter->tx_intr_enable) 3885 e1000g_mask_tx_interrupt(Adapter); 3886 else 3887 e1000g_clear_tx_interrupt(Adapter); 3888 if (e1000g_check_acc_handle( 3889 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3890 ddi_fm_service_impact(Adapter->dip, 3891 DDI_SERVICE_DEGRADED); 3892 err = EIO; 3893 } 3894 } 3895 return (err); 3896 } 3897 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3898 if (pr_val == NULL) { 3899 err = EINVAL; 3900 return (err); 3901 } 3902 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3903 if (result < MIN_TX_INTR_DELAY || 3904 result > MAX_TX_INTR_DELAY) 3905 err = EINVAL; 3906 else { 3907 Adapter->tx_intr_delay = (uint32_t)result; 3908 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay); 3909 if (e1000g_check_acc_handle( 3910 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3911 ddi_fm_service_impact(Adapter->dip, 3912 DDI_SERVICE_DEGRADED); 3913 err = EIO; 3914 } 3915 } 3916 return (err); 3917 } 3918 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3919 if (pr_val == NULL) { 3920 err = EINVAL; 3921 return (err); 3922 } 3923 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3924 if (result < MIN_TX_INTR_ABS_DELAY || 3925 result > MAX_TX_INTR_ABS_DELAY) 3926 err = EINVAL; 3927 else { 3928 Adapter->tx_intr_abs_delay = (uint32_t)result; 3929 E1000_WRITE_REG(hw, E1000_TADV, 3930 Adapter->tx_intr_abs_delay); 3931 if (e1000g_check_acc_handle( 3932 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3933 ddi_fm_service_impact(Adapter->dip, 3934 DDI_SERVICE_DEGRADED); 3935 err = EIO; 3936 } 3937 } 3938 return (err); 3939 } 3940 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3941 if (pr_val == NULL) { 3942 err = EINVAL; 3943 return (err); 3944 } 3945 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3946 if (result < MIN_RX_BCOPY_THRESHOLD || 3947 result > MAX_RX_BCOPY_THRESHOLD) 3948 err = EINVAL; 3949 else 3950 Adapter->rx_bcopy_thresh = (uint32_t)result; 3951 return (err); 3952 } 3953 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3954 if (pr_val == NULL) { 3955 err = EINVAL; 3956 return (err); 3957 } 3958 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3959 if (result < MIN_RX_LIMIT_ON_INTR || 3960 result > MAX_RX_LIMIT_ON_INTR) 3961 err = EINVAL; 3962 else 3963 Adapter->rx_limit_onintr = (uint32_t)result; 3964 return (err); 3965 } 3966 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3967 if (pr_val == NULL) { 3968 err = EINVAL; 3969 return (err); 3970 } 3971 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3972 if (result < MIN_RX_INTR_DELAY || 3973 result > MAX_RX_INTR_DELAY) 3974 err = EINVAL; 3975 else { 3976 Adapter->rx_intr_delay = (uint32_t)result; 3977 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay); 3978 if (e1000g_check_acc_handle( 3979 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3980 ddi_fm_service_impact(Adapter->dip, 3981 DDI_SERVICE_DEGRADED); 3982 err = EIO; 3983 } 3984 } 3985 return (err); 3986 } 3987 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3988 if (pr_val == NULL) { 3989 err = EINVAL; 3990 return (err); 3991 } 3992 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3993 if (result < MIN_RX_INTR_ABS_DELAY || 3994 result > MAX_RX_INTR_ABS_DELAY) 3995 err = EINVAL; 3996 else { 3997 Adapter->rx_intr_abs_delay = (uint32_t)result; 3998 E1000_WRITE_REG(hw, E1000_RADV, 3999 Adapter->rx_intr_abs_delay); 4000 if (e1000g_check_acc_handle( 4001 Adapter->osdep.reg_handle) != DDI_FM_OK) { 4002 ddi_fm_service_impact(Adapter->dip, 4003 DDI_SERVICE_DEGRADED); 4004 err = EIO; 4005 } 4006 } 4007 return (err); 4008 } 4009 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 4010 if (pr_val == NULL) { 4011 err = EINVAL; 4012 return (err); 4013 } 4014 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4015 if (result < MIN_INTR_THROTTLING || 4016 result > MAX_INTR_THROTTLING) 4017 err = EINVAL; 4018 else { 4019 if (hw->mac.type >= e1000_82540) { 4020 Adapter->intr_throttling_rate = 4021 (uint32_t)result; 4022 E1000_WRITE_REG(hw, E1000_ITR, 4023 Adapter->intr_throttling_rate); 4024 if (e1000g_check_acc_handle( 4025 Adapter->osdep.reg_handle) != DDI_FM_OK) { 4026 ddi_fm_service_impact(Adapter->dip, 4027 DDI_SERVICE_DEGRADED); 4028 err = EIO; 4029 } 4030 } else 4031 err = EINVAL; 4032 } 4033 return (err); 4034 } 4035 if (strcmp(pr_name, "_intr_adaptive") == 0) { 4036 if (pr_val == NULL) { 4037 err = EINVAL; 4038 return (err); 4039 } 4040 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4041 if (result < 0 || result > 1) 4042 err = EINVAL; 4043 else { 4044 if (hw->mac.type >= e1000_82540) { 4045 Adapter->intr_adaptive = (result == 1) ? 4046 B_TRUE : B_FALSE; 4047 } else { 4048 err = EINVAL; 4049 } 4050 } 4051 return (err); 4052 } 4053 return (ENOTSUP); 4054 } 4055 4056 static int 4057 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name, 4058 uint_t pr_valsize, void *pr_val) 4059 { 4060 int err = ENOTSUP; 4061 int value; 4062 4063 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4064 value = Adapter->param_adv_pause; 4065 err = 0; 4066 goto done; 4067 } 4068 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 4069 value = Adapter->param_adv_asym_pause; 4070 err = 0; 4071 goto done; 4072 } 4073 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 4074 value = Adapter->tx_bcopy_thresh; 4075 err = 0; 4076 goto done; 4077 } 4078 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 4079 value = Adapter->tx_intr_enable; 4080 err = 0; 4081 goto done; 4082 } 4083 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 4084 value = Adapter->tx_intr_delay; 4085 err = 0; 4086 goto done; 4087 } 4088 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 4089 value = Adapter->tx_intr_abs_delay; 4090 err = 0; 4091 goto done; 4092 } 4093 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 4094 value = Adapter->rx_bcopy_thresh; 4095 err = 0; 4096 goto done; 4097 } 4098 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 4099 value = Adapter->rx_limit_onintr; 4100 err = 0; 4101 goto done; 4102 } 4103 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 4104 value = Adapter->rx_intr_delay; 4105 err = 0; 4106 goto done; 4107 } 4108 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 4109 value = Adapter->rx_intr_abs_delay; 4110 err = 0; 4111 goto done; 4112 } 4113 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 4114 value = Adapter->intr_throttling_rate; 4115 err = 0; 4116 goto done; 4117 } 4118 if (strcmp(pr_name, "_intr_adaptive") == 0) { 4119 value = Adapter->intr_adaptive; 4120 err = 0; 4121 goto done; 4122 } 4123 done: 4124 if (err == 0) { 4125 (void) snprintf(pr_val, pr_valsize, "%d", value); 4126 } 4127 return (err); 4128 } 4129 4130 /* 4131 * e1000g_get_conf - get configurations set in e1000g.conf 4132 * This routine gets user-configured values out of the configuration 4133 * file e1000g.conf. 4134 * 4135 * For each configurable value, there is a minimum, a maximum, and a 4136 * default. 4137 * If user does not configure a value, use the default. 4138 * If user configures below the minimum, use the minumum. 4139 * If user configures above the maximum, use the maxumum. 4140 */ 4141 static void 4142 e1000g_get_conf(struct e1000g *Adapter) 4143 { 4144 struct e1000_hw *hw = &Adapter->shared; 4145 boolean_t tbi_compatibility = B_FALSE; 4146 boolean_t is_jumbo = B_FALSE; 4147 int propval; 4148 /* 4149 * decrease the number of descriptors and free packets 4150 * for jumbo frames to reduce tx/rx resource consumption 4151 */ 4152 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) { 4153 is_jumbo = B_TRUE; 4154 } 4155 4156 /* 4157 * get each configurable property from e1000g.conf 4158 */ 4159 4160 /* 4161 * NumTxDescriptors 4162 */ 4163 Adapter->tx_desc_num_flag = 4164 e1000g_get_prop(Adapter, "NumTxDescriptors", 4165 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR, 4166 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC 4167 : DEFAULT_NUM_TX_DESCRIPTOR, &propval); 4168 Adapter->tx_desc_num = propval; 4169 4170 /* 4171 * NumRxDescriptors 4172 */ 4173 Adapter->rx_desc_num_flag = 4174 e1000g_get_prop(Adapter, "NumRxDescriptors", 4175 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR, 4176 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC 4177 : DEFAULT_NUM_RX_DESCRIPTOR, &propval); 4178 Adapter->rx_desc_num = propval; 4179 4180 /* 4181 * NumRxFreeList 4182 */ 4183 Adapter->rx_buf_num_flag = 4184 e1000g_get_prop(Adapter, "NumRxFreeList", 4185 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST, 4186 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF 4187 : DEFAULT_NUM_RX_FREELIST, &propval); 4188 Adapter->rx_freelist_limit = propval; 4189 4190 /* 4191 * NumTxPacketList 4192 */ 4193 Adapter->tx_buf_num_flag = 4194 e1000g_get_prop(Adapter, "NumTxPacketList", 4195 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST, 4196 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF 4197 : DEFAULT_NUM_TX_FREELIST, &propval); 4198 Adapter->tx_freelist_num = propval; 4199 4200 /* 4201 * FlowControl 4202 */ 4203 hw->fc.send_xon = true; 4204 (void) e1000g_get_prop(Adapter, "FlowControl", 4205 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval); 4206 hw->fc.requested_mode = propval; 4207 /* 4 is the setting that says "let the eeprom decide" */ 4208 if (hw->fc.requested_mode == 4) 4209 hw->fc.requested_mode = e1000_fc_default; 4210 4211 /* 4212 * Max Num Receive Packets on Interrupt 4213 */ 4214 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets", 4215 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR, 4216 DEFAULT_RX_LIMIT_ON_INTR, &propval); 4217 Adapter->rx_limit_onintr = propval; 4218 4219 /* 4220 * PHY master slave setting 4221 */ 4222 (void) e1000g_get_prop(Adapter, "SetMasterSlave", 4223 e1000_ms_hw_default, e1000_ms_auto, 4224 e1000_ms_hw_default, &propval); 4225 hw->phy.ms_type = propval; 4226 4227 /* 4228 * Parameter which controls TBI mode workaround, which is only 4229 * needed on certain switches such as Cisco 6500/Foundry 4230 */ 4231 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable", 4232 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval); 4233 tbi_compatibility = (propval == 1); 4234 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility); 4235 4236 /* 4237 * MSI Enable 4238 */ 4239 (void) e1000g_get_prop(Adapter, "MSIEnable", 4240 0, 1, DEFAULT_MSI_ENABLE, &propval); 4241 Adapter->msi_enable = (propval == 1); 4242 4243 /* 4244 * Interrupt Throttling Rate 4245 */ 4246 (void) e1000g_get_prop(Adapter, "intr_throttling_rate", 4247 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 4248 DEFAULT_INTR_THROTTLING, &propval); 4249 Adapter->intr_throttling_rate = propval; 4250 4251 /* 4252 * Adaptive Interrupt Blanking Enable/Disable 4253 * It is enabled by default 4254 */ 4255 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1, 4256 &propval); 4257 Adapter->intr_adaptive = (propval == 1); 4258 4259 /* 4260 * Hardware checksum enable/disable parameter 4261 */ 4262 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable", 4263 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval); 4264 Adapter->tx_hcksum_enable = (propval == 1); 4265 /* 4266 * Checksum on/off selection via global parameters. 4267 * 4268 * If the chip is flagged as not capable of (correctly) 4269 * handling checksumming, we don't enable it on either 4270 * Rx or Tx side. Otherwise, we take this chip's settings 4271 * from the patchable global defaults. 4272 * 4273 * We advertise our capabilities only if TX offload is 4274 * enabled. On receive, the stack will accept checksummed 4275 * packets anyway, even if we haven't said we can deliver 4276 * them. 4277 */ 4278 switch (hw->mac.type) { 4279 case e1000_82540: 4280 case e1000_82544: 4281 case e1000_82545: 4282 case e1000_82545_rev_3: 4283 case e1000_82546: 4284 case e1000_82546_rev_3: 4285 case e1000_82571: 4286 case e1000_82572: 4287 case e1000_82573: 4288 case e1000_80003es2lan: 4289 break; 4290 /* 4291 * For the following Intel PRO/1000 chipsets, we have not 4292 * tested the hardware checksum offload capability, so we 4293 * disable the capability for them. 4294 * e1000_82542, 4295 * e1000_82543, 4296 * e1000_82541, 4297 * e1000_82541_rev_2, 4298 * e1000_82547, 4299 * e1000_82547_rev_2, 4300 */ 4301 default: 4302 Adapter->tx_hcksum_enable = B_FALSE; 4303 } 4304 4305 /* 4306 * Large Send Offloading(LSO) Enable/Disable 4307 * If the tx hardware checksum is not enabled, LSO should be 4308 * disabled. 4309 */ 4310 (void) e1000g_get_prop(Adapter, "lso_enable", 4311 0, 1, DEFAULT_LSO_ENABLE, &propval); 4312 Adapter->lso_enable = (propval == 1); 4313 4314 switch (hw->mac.type) { 4315 case e1000_82546: 4316 case e1000_82546_rev_3: 4317 if (Adapter->lso_enable) 4318 Adapter->lso_premature_issue = B_TRUE; 4319 /* FALLTHRU */ 4320 case e1000_82571: 4321 case e1000_82572: 4322 case e1000_82573: 4323 case e1000_80003es2lan: 4324 break; 4325 default: 4326 Adapter->lso_enable = B_FALSE; 4327 } 4328 4329 if (!Adapter->tx_hcksum_enable) { 4330 Adapter->lso_premature_issue = B_FALSE; 4331 Adapter->lso_enable = B_FALSE; 4332 } 4333 4334 /* 4335 * If mem_workaround_82546 is enabled, the rx buffer allocated by 4336 * e1000_82545, e1000_82546 and e1000_82546_rev_3 4337 * will not cross 64k boundary. 4338 */ 4339 (void) e1000g_get_prop(Adapter, "mem_workaround_82546", 4340 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval); 4341 Adapter->mem_workaround_82546 = (propval == 1); 4342 4343 /* 4344 * Max number of multicast addresses 4345 */ 4346 (void) e1000g_get_prop(Adapter, "mcast_max_num", 4347 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32, 4348 &propval); 4349 Adapter->mcast_max_num = propval; 4350 } 4351 4352 /* 4353 * e1000g_get_prop - routine to read properties 4354 * 4355 * Get a user-configure property value out of the configuration 4356 * file e1000g.conf. 4357 * 4358 * Caller provides name of the property, a default value, a minimum 4359 * value, a maximum value and a pointer to the returned property 4360 * value. 4361 * 4362 * Return B_TRUE if the configured value of the property is not a default 4363 * value, otherwise return B_FALSE. 4364 */ 4365 static boolean_t 4366 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */ 4367 char *propname, /* name of the property */ 4368 int minval, /* minimum acceptable value */ 4369 int maxval, /* maximim acceptable value */ 4370 int defval, /* default value */ 4371 int *propvalue) /* property value return to caller */ 4372 { 4373 int propval; /* value returned for requested property */ 4374 int *props; /* point to array of properties returned */ 4375 uint_t nprops; /* number of property value returned */ 4376 boolean_t ret = B_TRUE; 4377 4378 /* 4379 * get the array of properties from the config file 4380 */ 4381 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip, 4382 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) { 4383 /* got some properties, test if we got enough */ 4384 if (Adapter->instance < nprops) { 4385 propval = props[Adapter->instance]; 4386 } else { 4387 /* not enough properties configured */ 4388 propval = defval; 4389 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4390 "Not Enough %s values found in e1000g.conf" 4391 " - set to %d\n", 4392 propname, propval); 4393 ret = B_FALSE; 4394 } 4395 4396 /* free memory allocated for properties */ 4397 ddi_prop_free(props); 4398 4399 } else { 4400 propval = defval; 4401 ret = B_FALSE; 4402 } 4403 4404 /* 4405 * enforce limits 4406 */ 4407 if (propval > maxval) { 4408 propval = maxval; 4409 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4410 "Too High %s value in e1000g.conf - set to %d\n", 4411 propname, propval); 4412 } 4413 4414 if (propval < minval) { 4415 propval = minval; 4416 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4417 "Too Low %s value in e1000g.conf - set to %d\n", 4418 propname, propval); 4419 } 4420 4421 *propvalue = propval; 4422 return (ret); 4423 } 4424 4425 static boolean_t 4426 e1000g_link_check(struct e1000g *Adapter) 4427 { 4428 uint16_t speed, duplex, phydata; 4429 boolean_t link_changed = B_FALSE; 4430 struct e1000_hw *hw; 4431 uint32_t reg_tarc; 4432 4433 hw = &Adapter->shared; 4434 4435 if (e1000g_link_up(Adapter)) { 4436 /* 4437 * The Link is up, check whether it was marked as down earlier 4438 */ 4439 if (Adapter->link_state != LINK_STATE_UP) { 4440 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex); 4441 Adapter->link_speed = speed; 4442 Adapter->link_duplex = duplex; 4443 Adapter->link_state = LINK_STATE_UP; 4444 link_changed = B_TRUE; 4445 4446 if (Adapter->link_speed == SPEED_1000) 4447 Adapter->stall_threshold = TX_STALL_TIME_2S; 4448 else 4449 Adapter->stall_threshold = TX_STALL_TIME_8S; 4450 4451 Adapter->tx_link_down_timeout = 0; 4452 4453 if ((hw->mac.type == e1000_82571) || 4454 (hw->mac.type == e1000_82572)) { 4455 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0)); 4456 if (speed == SPEED_1000) 4457 reg_tarc |= (1 << 21); 4458 else 4459 reg_tarc &= ~(1 << 21); 4460 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc); 4461 } 4462 } 4463 Adapter->smartspeed = 0; 4464 } else { 4465 if (Adapter->link_state != LINK_STATE_DOWN) { 4466 Adapter->link_speed = 0; 4467 Adapter->link_duplex = 0; 4468 Adapter->link_state = LINK_STATE_DOWN; 4469 link_changed = B_TRUE; 4470 4471 /* 4472 * SmartSpeed workaround for Tabor/TanaX, When the 4473 * driver loses link disable auto master/slave 4474 * resolution. 4475 */ 4476 if (hw->phy.type == e1000_phy_igp) { 4477 (void) e1000_read_phy_reg(hw, 4478 PHY_1000T_CTRL, &phydata); 4479 phydata |= CR_1000T_MS_ENABLE; 4480 (void) e1000_write_phy_reg(hw, 4481 PHY_1000T_CTRL, phydata); 4482 } 4483 } else { 4484 e1000g_smartspeed(Adapter); 4485 } 4486 4487 if (Adapter->e1000g_state & E1000G_STARTED) { 4488 if (Adapter->tx_link_down_timeout < 4489 MAX_TX_LINK_DOWN_TIMEOUT) { 4490 Adapter->tx_link_down_timeout++; 4491 } else if (Adapter->tx_link_down_timeout == 4492 MAX_TX_LINK_DOWN_TIMEOUT) { 4493 e1000g_tx_clean(Adapter); 4494 Adapter->tx_link_down_timeout++; 4495 } 4496 } 4497 } 4498 4499 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4500 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4501 4502 return (link_changed); 4503 } 4504 4505 /* 4506 * e1000g_reset_link - Using the link properties to setup the link 4507 */ 4508 int 4509 e1000g_reset_link(struct e1000g *Adapter) 4510 { 4511 struct e1000_mac_info *mac; 4512 struct e1000_phy_info *phy; 4513 struct e1000_hw *hw; 4514 boolean_t invalid; 4515 4516 mac = &Adapter->shared.mac; 4517 phy = &Adapter->shared.phy; 4518 hw = &Adapter->shared; 4519 invalid = B_FALSE; 4520 4521 if (hw->phy.media_type != e1000_media_type_copper) 4522 goto out; 4523 4524 if (Adapter->param_adv_autoneg == 1) { 4525 mac->autoneg = true; 4526 phy->autoneg_advertised = 0; 4527 4528 /* 4529 * 1000hdx is not supported for autonegotiation 4530 */ 4531 if (Adapter->param_adv_1000fdx == 1) 4532 phy->autoneg_advertised |= ADVERTISE_1000_FULL; 4533 4534 if (Adapter->param_adv_100fdx == 1) 4535 phy->autoneg_advertised |= ADVERTISE_100_FULL; 4536 4537 if (Adapter->param_adv_100hdx == 1) 4538 phy->autoneg_advertised |= ADVERTISE_100_HALF; 4539 4540 if (Adapter->param_adv_10fdx == 1) 4541 phy->autoneg_advertised |= ADVERTISE_10_FULL; 4542 4543 if (Adapter->param_adv_10hdx == 1) 4544 phy->autoneg_advertised |= ADVERTISE_10_HALF; 4545 4546 if (phy->autoneg_advertised == 0) 4547 invalid = B_TRUE; 4548 } else { 4549 mac->autoneg = false; 4550 4551 /* 4552 * For Intel copper cards, 1000fdx and 1000hdx are not 4553 * supported for forced link 4554 */ 4555 if (Adapter->param_adv_100fdx == 1) 4556 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4557 else if (Adapter->param_adv_100hdx == 1) 4558 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4559 else if (Adapter->param_adv_10fdx == 1) 4560 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4561 else if (Adapter->param_adv_10hdx == 1) 4562 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4563 else 4564 invalid = B_TRUE; 4565 4566 } 4567 4568 if (invalid) { 4569 e1000g_log(Adapter, CE_WARN, 4570 "Invalid link settings. Setup link to " 4571 "support autonegotiation with all link capabilities."); 4572 mac->autoneg = true; 4573 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 4574 } 4575 4576 out: 4577 return (e1000_setup_link(&Adapter->shared)); 4578 } 4579 4580 static void 4581 e1000g_timer_tx_resched(struct e1000g *Adapter) 4582 { 4583 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 4584 4585 rw_enter(&Adapter->chip_lock, RW_READER); 4586 4587 if (tx_ring->resched_needed && 4588 ((ddi_get_lbolt() - tx_ring->resched_timestamp) > 4589 drv_usectohz(1000000)) && 4590 (Adapter->e1000g_state & E1000G_STARTED) && 4591 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) { 4592 tx_ring->resched_needed = B_FALSE; 4593 mac_tx_update(Adapter->mh); 4594 E1000G_STAT(tx_ring->stat_reschedule); 4595 E1000G_STAT(tx_ring->stat_timer_reschedule); 4596 } 4597 4598 rw_exit(&Adapter->chip_lock); 4599 } 4600 4601 static void 4602 e1000g_local_timer(void *ws) 4603 { 4604 struct e1000g *Adapter = (struct e1000g *)ws; 4605 struct e1000_hw *hw; 4606 e1000g_ether_addr_t ether_addr; 4607 boolean_t link_changed; 4608 4609 hw = &Adapter->shared; 4610 4611 if (Adapter->e1000g_state & E1000G_ERROR) { 4612 rw_enter(&Adapter->chip_lock, RW_WRITER); 4613 Adapter->e1000g_state &= ~E1000G_ERROR; 4614 rw_exit(&Adapter->chip_lock); 4615 4616 Adapter->reset_count++; 4617 if (e1000g_global_reset(Adapter)) { 4618 ddi_fm_service_impact(Adapter->dip, 4619 DDI_SERVICE_RESTORED); 4620 e1000g_timer_tx_resched(Adapter); 4621 } else 4622 ddi_fm_service_impact(Adapter->dip, 4623 DDI_SERVICE_LOST); 4624 return; 4625 } 4626 4627 if (e1000g_stall_check(Adapter)) { 4628 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 4629 "Tx stall detected. Activate automatic recovery.\n"); 4630 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL); 4631 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 4632 Adapter->reset_count++; 4633 if (e1000g_reset_adapter(Adapter)) { 4634 ddi_fm_service_impact(Adapter->dip, 4635 DDI_SERVICE_RESTORED); 4636 e1000g_timer_tx_resched(Adapter); 4637 } 4638 return; 4639 } 4640 4641 link_changed = B_FALSE; 4642 rw_enter(&Adapter->chip_lock, RW_READER); 4643 if (Adapter->link_complete) 4644 link_changed = e1000g_link_check(Adapter); 4645 rw_exit(&Adapter->chip_lock); 4646 4647 if (link_changed) { 4648 if (!Adapter->reset_flag && 4649 (Adapter->e1000g_state & E1000G_STARTED) && 4650 !(Adapter->e1000g_state & E1000G_SUSPENDED)) 4651 mac_link_update(Adapter->mh, Adapter->link_state); 4652 if (Adapter->link_state == LINK_STATE_UP) 4653 Adapter->reset_flag = B_FALSE; 4654 } 4655 /* 4656 * Workaround for esb2. Data stuck in fifo on a link 4657 * down event. Reset the adapter to recover it. 4658 */ 4659 if (Adapter->esb2_workaround) { 4660 Adapter->esb2_workaround = B_FALSE; 4661 (void) e1000g_reset_adapter(Adapter); 4662 return; 4663 } 4664 4665 /* 4666 * With 82571 controllers, any locally administered address will 4667 * be overwritten when there is a reset on the other port. 4668 * Detect this circumstance and correct it. 4669 */ 4670 if ((hw->mac.type == e1000_82571) && e1000_get_laa_state_82571(hw)) { 4671 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0); 4672 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1); 4673 4674 ether_addr.reg.low = ntohl(ether_addr.reg.low); 4675 ether_addr.reg.high = ntohl(ether_addr.reg.high); 4676 4677 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) || 4678 (ether_addr.mac.addr[4] != hw->mac.addr[1]) || 4679 (ether_addr.mac.addr[3] != hw->mac.addr[2]) || 4680 (ether_addr.mac.addr[2] != hw->mac.addr[3]) || 4681 (ether_addr.mac.addr[1] != hw->mac.addr[4]) || 4682 (ether_addr.mac.addr[0] != hw->mac.addr[5])) { 4683 (void) e1000_rar_set(hw, hw->mac.addr, 0); 4684 } 4685 } 4686 4687 /* 4688 * Long TTL workaround for 82541/82547 4689 */ 4690 (void) e1000_igp_ttl_workaround_82547(hw); 4691 4692 /* 4693 * Check for Adaptive IFS settings If there are lots of collisions 4694 * change the value in steps... 4695 * These properties should only be set for 10/100 4696 */ 4697 if ((hw->phy.media_type == e1000_media_type_copper) && 4698 ((Adapter->link_speed == SPEED_100) || 4699 (Adapter->link_speed == SPEED_10))) { 4700 e1000_update_adaptive(hw); 4701 } 4702 /* 4703 * Set Timer Interrupts 4704 */ 4705 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 4706 4707 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4708 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4709 else 4710 e1000g_timer_tx_resched(Adapter); 4711 4712 restart_watchdog_timer(Adapter); 4713 } 4714 4715 /* 4716 * The function e1000g_link_timer() is called when the timer for link setup 4717 * is expired, which indicates the completion of the link setup. The link 4718 * state will not be updated until the link setup is completed. And the 4719 * link state will not be sent to the upper layer through mac_link_update() 4720 * in this function. It will be updated in the local timer routine or the 4721 * interrupt service routine after the interface is started (plumbed). 4722 */ 4723 static void 4724 e1000g_link_timer(void *arg) 4725 { 4726 struct e1000g *Adapter = (struct e1000g *)arg; 4727 4728 mutex_enter(&Adapter->link_lock); 4729 Adapter->link_complete = B_TRUE; 4730 Adapter->link_tid = 0; 4731 mutex_exit(&Adapter->link_lock); 4732 } 4733 4734 /* 4735 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf 4736 * 4737 * This function read the forced speed and duplex for 10/100 Mbps speeds 4738 * and also for 1000 Mbps speeds from the e1000g.conf file 4739 */ 4740 static void 4741 e1000g_force_speed_duplex(struct e1000g *Adapter) 4742 { 4743 int forced; 4744 int propval; 4745 struct e1000_mac_info *mac = &Adapter->shared.mac; 4746 struct e1000_phy_info *phy = &Adapter->shared.phy; 4747 4748 /* 4749 * get value out of config file 4750 */ 4751 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex", 4752 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced); 4753 4754 switch (forced) { 4755 case GDIAG_10_HALF: 4756 /* 4757 * Disable Auto Negotiation 4758 */ 4759 mac->autoneg = false; 4760 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4761 break; 4762 case GDIAG_10_FULL: 4763 /* 4764 * Disable Auto Negotiation 4765 */ 4766 mac->autoneg = false; 4767 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4768 break; 4769 case GDIAG_100_HALF: 4770 /* 4771 * Disable Auto Negotiation 4772 */ 4773 mac->autoneg = false; 4774 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4775 break; 4776 case GDIAG_100_FULL: 4777 /* 4778 * Disable Auto Negotiation 4779 */ 4780 mac->autoneg = false; 4781 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4782 break; 4783 case GDIAG_1000_FULL: 4784 /* 4785 * The gigabit spec requires autonegotiation. Therefore, 4786 * when the user wants to force the speed to 1000Mbps, we 4787 * enable AutoNeg, but only allow the harware to advertise 4788 * 1000Mbps. This is different from 10/100 operation, where 4789 * we are allowed to link without any negotiation. 4790 */ 4791 mac->autoneg = true; 4792 phy->autoneg_advertised = ADVERTISE_1000_FULL; 4793 break; 4794 default: /* obey the setting of AutoNegAdvertised */ 4795 mac->autoneg = true; 4796 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised", 4797 0, AUTONEG_ADVERTISE_SPEED_DEFAULT, 4798 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval); 4799 phy->autoneg_advertised = (uint16_t)propval; 4800 break; 4801 } /* switch */ 4802 } 4803 4804 /* 4805 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf 4806 * 4807 * This function reads MaxFrameSize from e1000g.conf 4808 */ 4809 static void 4810 e1000g_get_max_frame_size(struct e1000g *Adapter) 4811 { 4812 int max_frame; 4813 4814 /* 4815 * get value out of config file 4816 */ 4817 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0, 4818 &max_frame); 4819 4820 switch (max_frame) { 4821 case 0: 4822 Adapter->default_mtu = ETHERMTU; 4823 break; 4824 case 1: 4825 Adapter->default_mtu = FRAME_SIZE_UPTO_4K - 4826 sizeof (struct ether_vlan_header) - ETHERFCSL; 4827 break; 4828 case 2: 4829 Adapter->default_mtu = FRAME_SIZE_UPTO_8K - 4830 sizeof (struct ether_vlan_header) - ETHERFCSL; 4831 break; 4832 case 3: 4833 Adapter->default_mtu = FRAME_SIZE_UPTO_16K - 4834 sizeof (struct ether_vlan_header) - ETHERFCSL; 4835 break; 4836 default: 4837 Adapter->default_mtu = ETHERMTU; 4838 break; 4839 } /* switch */ 4840 4841 /* 4842 * If the user configed MTU is larger than the deivce's maximum MTU, 4843 * the MTU is set to the deivce's maximum value. 4844 */ 4845 if (Adapter->default_mtu > Adapter->max_mtu) 4846 Adapter->default_mtu = Adapter->max_mtu; 4847 4848 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu); 4849 } 4850 4851 /* 4852 * e1000g_pch_limits - Apply limits of the PCH silicon type 4853 * 4854 * At any frame size larger than the ethernet default, 4855 * prevent linking at 10/100 speeds. 4856 */ 4857 static void 4858 e1000g_pch_limits(struct e1000g *Adapter) 4859 { 4860 struct e1000_hw *hw = &Adapter->shared; 4861 4862 /* only applies to PCH silicon type */ 4863 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan) 4864 return; 4865 4866 /* only applies to frames larger than ethernet default */ 4867 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) { 4868 hw->mac.autoneg = true; 4869 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; 4870 4871 Adapter->param_adv_autoneg = 1; 4872 Adapter->param_adv_1000fdx = 1; 4873 4874 Adapter->param_adv_100fdx = 0; 4875 Adapter->param_adv_100hdx = 0; 4876 Adapter->param_adv_10fdx = 0; 4877 Adapter->param_adv_10hdx = 0; 4878 4879 e1000g_param_sync(Adapter); 4880 } 4881 } 4882 4883 /* 4884 * e1000g_mtu2maxframe - convert given MTU to maximum frame size 4885 */ 4886 static uint32_t 4887 e1000g_mtu2maxframe(uint32_t mtu) 4888 { 4889 uint32_t maxframe; 4890 4891 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL; 4892 4893 return (maxframe); 4894 } 4895 4896 static void 4897 arm_watchdog_timer(struct e1000g *Adapter) 4898 { 4899 Adapter->watchdog_tid = 4900 timeout(e1000g_local_timer, 4901 (void *)Adapter, 1 * drv_usectohz(1000000)); 4902 } 4903 4904 static void 4905 enable_watchdog_timer(struct e1000g *Adapter) 4906 { 4907 mutex_enter(&Adapter->watchdog_lock); 4908 4909 if (!Adapter->watchdog_timer_enabled) { 4910 Adapter->watchdog_timer_enabled = B_TRUE; 4911 Adapter->watchdog_timer_started = B_TRUE; 4912 arm_watchdog_timer(Adapter); 4913 } 4914 4915 mutex_exit(&Adapter->watchdog_lock); 4916 } 4917 4918 static void 4919 disable_watchdog_timer(struct e1000g *Adapter) 4920 { 4921 timeout_id_t tid; 4922 4923 mutex_enter(&Adapter->watchdog_lock); 4924 4925 Adapter->watchdog_timer_enabled = B_FALSE; 4926 Adapter->watchdog_timer_started = B_FALSE; 4927 tid = Adapter->watchdog_tid; 4928 Adapter->watchdog_tid = 0; 4929 4930 mutex_exit(&Adapter->watchdog_lock); 4931 4932 if (tid != 0) 4933 (void) untimeout(tid); 4934 } 4935 4936 static void 4937 start_watchdog_timer(struct e1000g *Adapter) 4938 { 4939 mutex_enter(&Adapter->watchdog_lock); 4940 4941 if (Adapter->watchdog_timer_enabled) { 4942 if (!Adapter->watchdog_timer_started) { 4943 Adapter->watchdog_timer_started = B_TRUE; 4944 arm_watchdog_timer(Adapter); 4945 } 4946 } 4947 4948 mutex_exit(&Adapter->watchdog_lock); 4949 } 4950 4951 static void 4952 restart_watchdog_timer(struct e1000g *Adapter) 4953 { 4954 mutex_enter(&Adapter->watchdog_lock); 4955 4956 if (Adapter->watchdog_timer_started) 4957 arm_watchdog_timer(Adapter); 4958 4959 mutex_exit(&Adapter->watchdog_lock); 4960 } 4961 4962 static void 4963 stop_watchdog_timer(struct e1000g *Adapter) 4964 { 4965 timeout_id_t tid; 4966 4967 mutex_enter(&Adapter->watchdog_lock); 4968 4969 Adapter->watchdog_timer_started = B_FALSE; 4970 tid = Adapter->watchdog_tid; 4971 Adapter->watchdog_tid = 0; 4972 4973 mutex_exit(&Adapter->watchdog_lock); 4974 4975 if (tid != 0) 4976 (void) untimeout(tid); 4977 } 4978 4979 static void 4980 stop_link_timer(struct e1000g *Adapter) 4981 { 4982 timeout_id_t tid; 4983 4984 /* Disable the link timer */ 4985 mutex_enter(&Adapter->link_lock); 4986 4987 tid = Adapter->link_tid; 4988 Adapter->link_tid = 0; 4989 4990 mutex_exit(&Adapter->link_lock); 4991 4992 if (tid != 0) 4993 (void) untimeout(tid); 4994 } 4995 4996 static void 4997 stop_82547_timer(e1000g_tx_ring_t *tx_ring) 4998 { 4999 timeout_id_t tid; 5000 5001 /* Disable the tx timer for 82547 chipset */ 5002 mutex_enter(&tx_ring->tx_lock); 5003 5004 tx_ring->timer_enable_82547 = B_FALSE; 5005 tid = tx_ring->timer_id_82547; 5006 tx_ring->timer_id_82547 = 0; 5007 5008 mutex_exit(&tx_ring->tx_lock); 5009 5010 if (tid != 0) 5011 (void) untimeout(tid); 5012 } 5013 5014 void 5015 e1000g_clear_interrupt(struct e1000g *Adapter) 5016 { 5017 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 5018 0xffffffff & ~E1000_IMS_RXSEQ); 5019 } 5020 5021 void 5022 e1000g_mask_interrupt(struct e1000g *Adapter) 5023 { 5024 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, 5025 IMS_ENABLE_MASK & ~E1000_IMS_TXDW); 5026 5027 if (Adapter->tx_intr_enable) 5028 e1000g_mask_tx_interrupt(Adapter); 5029 } 5030 5031 /* 5032 * This routine is called by e1000g_quiesce(), therefore must not block. 5033 */ 5034 void 5035 e1000g_clear_all_interrupts(struct e1000g *Adapter) 5036 { 5037 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff); 5038 } 5039 5040 void 5041 e1000g_mask_tx_interrupt(struct e1000g *Adapter) 5042 { 5043 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW); 5044 } 5045 5046 void 5047 e1000g_clear_tx_interrupt(struct e1000g *Adapter) 5048 { 5049 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW); 5050 } 5051 5052 static void 5053 e1000g_smartspeed(struct e1000g *Adapter) 5054 { 5055 struct e1000_hw *hw = &Adapter->shared; 5056 uint16_t phy_status; 5057 uint16_t phy_ctrl; 5058 5059 /* 5060 * If we're not T-or-T, or we're not autoneg'ing, or we're not 5061 * advertising 1000Full, we don't even use the workaround 5062 */ 5063 if ((hw->phy.type != e1000_phy_igp) || 5064 !hw->mac.autoneg || 5065 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)) 5066 return; 5067 5068 /* 5069 * True if this is the first call of this function or after every 5070 * 30 seconds of not having link 5071 */ 5072 if (Adapter->smartspeed == 0) { 5073 /* 5074 * If Master/Slave config fault is asserted twice, we 5075 * assume back-to-back 5076 */ 5077 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 5078 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 5079 return; 5080 5081 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 5082 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 5083 return; 5084 /* 5085 * We're assuming back-2-back because our status register 5086 * insists! there's a fault in the master/slave 5087 * relationship that was "negotiated" 5088 */ 5089 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 5090 /* 5091 * Is the phy configured for manual configuration of 5092 * master/slave? 5093 */ 5094 if (phy_ctrl & CR_1000T_MS_ENABLE) { 5095 /* 5096 * Yes. Then disable manual configuration (enable 5097 * auto configuration) of master/slave 5098 */ 5099 phy_ctrl &= ~CR_1000T_MS_ENABLE; 5100 (void) e1000_write_phy_reg(hw, 5101 PHY_1000T_CTRL, phy_ctrl); 5102 /* 5103 * Effectively starting the clock 5104 */ 5105 Adapter->smartspeed++; 5106 /* 5107 * Restart autonegotiation 5108 */ 5109 if (!e1000_phy_setup_autoneg(hw) && 5110 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 5111 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 5112 MII_CR_RESTART_AUTO_NEG); 5113 (void) e1000_write_phy_reg(hw, 5114 PHY_CONTROL, phy_ctrl); 5115 } 5116 } 5117 return; 5118 /* 5119 * Has 6 seconds transpired still without link? Remember, 5120 * you should reset the smartspeed counter once you obtain 5121 * link 5122 */ 5123 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 5124 /* 5125 * Yes. Remember, we did at the start determine that 5126 * there's a master/slave configuration fault, so we're 5127 * still assuming there's someone on the other end, but we 5128 * just haven't yet been able to talk to it. We then 5129 * re-enable auto configuration of master/slave to see if 5130 * we're running 2/3 pair cables. 5131 */ 5132 /* 5133 * If still no link, perhaps using 2/3 pair cable 5134 */ 5135 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 5136 phy_ctrl |= CR_1000T_MS_ENABLE; 5137 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 5138 /* 5139 * Restart autoneg with phy enabled for manual 5140 * configuration of master/slave 5141 */ 5142 if (!e1000_phy_setup_autoneg(hw) && 5143 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 5144 phy_ctrl |= 5145 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 5146 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 5147 } 5148 /* 5149 * Hopefully, there are no more faults and we've obtained 5150 * link as a result. 5151 */ 5152 } 5153 /* 5154 * Restart process after E1000_SMARTSPEED_MAX iterations (30 5155 * seconds) 5156 */ 5157 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 5158 Adapter->smartspeed = 0; 5159 } 5160 5161 static boolean_t 5162 is_valid_mac_addr(uint8_t *mac_addr) 5163 { 5164 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 5165 const uint8_t addr_test2[6] = 5166 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 5167 5168 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 5169 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 5170 return (B_FALSE); 5171 5172 return (B_TRUE); 5173 } 5174 5175 /* 5176 * e1000g_stall_check - check for tx stall 5177 * 5178 * This function checks if the adapter is stalled (in transmit). 5179 * 5180 * It is called each time the watchdog timeout is invoked. 5181 * If the transmit descriptor reclaim continuously fails, 5182 * the watchdog value will increment by 1. If the watchdog 5183 * value exceeds the threshold, the adapter is assumed to 5184 * have stalled and need to be reset. 5185 */ 5186 static boolean_t 5187 e1000g_stall_check(struct e1000g *Adapter) 5188 { 5189 e1000g_tx_ring_t *tx_ring; 5190 5191 tx_ring = Adapter->tx_ring; 5192 5193 if (Adapter->link_state != LINK_STATE_UP) 5194 return (B_FALSE); 5195 5196 (void) e1000g_recycle(tx_ring); 5197 5198 if (Adapter->stall_flag) 5199 return (B_TRUE); 5200 5201 return (B_FALSE); 5202 } 5203 5204 #ifdef E1000G_DEBUG 5205 static enum ioc_reply 5206 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp) 5207 { 5208 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd); 5209 e1000g_peekpoke_t *ppd; 5210 uint64_t mem_va; 5211 uint64_t maxoff; 5212 boolean_t peek; 5213 5214 switch (iocp->ioc_cmd) { 5215 5216 case E1000G_IOC_REG_PEEK: 5217 peek = B_TRUE; 5218 break; 5219 5220 case E1000G_IOC_REG_POKE: 5221 peek = B_FALSE; 5222 break; 5223 5224 default: 5225 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 5226 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n", 5227 iocp->ioc_cmd); 5228 return (IOC_INVAL); 5229 } 5230 5231 /* 5232 * Validate format of ioctl 5233 */ 5234 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t)) 5235 return (IOC_INVAL); 5236 if (mp->b_cont == NULL) 5237 return (IOC_INVAL); 5238 5239 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr; 5240 5241 /* 5242 * Validate request parameters 5243 */ 5244 switch (ppd->pp_acc_space) { 5245 5246 default: 5247 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 5248 "e1000g_diag_ioctl: invalid access space 0x%X\n", 5249 ppd->pp_acc_space); 5250 return (IOC_INVAL); 5251 5252 case E1000G_PP_SPACE_REG: 5253 /* 5254 * Memory-mapped I/O space 5255 */ 5256 ASSERT(ppd->pp_acc_size == 4); 5257 if (ppd->pp_acc_size != 4) 5258 return (IOC_INVAL); 5259 5260 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 5261 return (IOC_INVAL); 5262 5263 mem_va = 0; 5264 maxoff = 0x10000; 5265 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg; 5266 break; 5267 5268 case E1000G_PP_SPACE_E1000G: 5269 /* 5270 * E1000g data structure! 5271 */ 5272 mem_va = (uintptr_t)e1000gp; 5273 maxoff = sizeof (struct e1000g); 5274 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem; 5275 break; 5276 5277 } 5278 5279 if (ppd->pp_acc_offset >= maxoff) 5280 return (IOC_INVAL); 5281 5282 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff) 5283 return (IOC_INVAL); 5284 5285 /* 5286 * All OK - go! 5287 */ 5288 ppd->pp_acc_offset += mem_va; 5289 (*ppfn)(e1000gp, ppd); 5290 return (peek ? IOC_REPLY : IOC_ACK); 5291 } 5292 5293 static void 5294 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5295 { 5296 ddi_acc_handle_t handle; 5297 uint32_t *regaddr; 5298 5299 handle = e1000gp->osdep.reg_handle; 5300 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5301 (uintptr_t)ppd->pp_acc_offset); 5302 5303 ppd->pp_acc_data = ddi_get32(handle, regaddr); 5304 } 5305 5306 static void 5307 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5308 { 5309 ddi_acc_handle_t handle; 5310 uint32_t *regaddr; 5311 uint32_t value; 5312 5313 handle = e1000gp->osdep.reg_handle; 5314 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5315 (uintptr_t)ppd->pp_acc_offset); 5316 value = (uint32_t)ppd->pp_acc_data; 5317 5318 ddi_put32(handle, regaddr, value); 5319 } 5320 5321 static void 5322 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5323 { 5324 uint64_t value; 5325 void *vaddr; 5326 5327 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5328 5329 switch (ppd->pp_acc_size) { 5330 case 1: 5331 value = *(uint8_t *)vaddr; 5332 break; 5333 5334 case 2: 5335 value = *(uint16_t *)vaddr; 5336 break; 5337 5338 case 4: 5339 value = *(uint32_t *)vaddr; 5340 break; 5341 5342 case 8: 5343 value = *(uint64_t *)vaddr; 5344 break; 5345 } 5346 5347 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5348 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n", 5349 (void *)e1000gp, (void *)ppd, value, vaddr); 5350 5351 ppd->pp_acc_data = value; 5352 } 5353 5354 static void 5355 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5356 { 5357 uint64_t value; 5358 void *vaddr; 5359 5360 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5361 value = ppd->pp_acc_data; 5362 5363 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5364 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n", 5365 (void *)e1000gp, (void *)ppd, value, vaddr); 5366 5367 switch (ppd->pp_acc_size) { 5368 case 1: 5369 *(uint8_t *)vaddr = (uint8_t)value; 5370 break; 5371 5372 case 2: 5373 *(uint16_t *)vaddr = (uint16_t)value; 5374 break; 5375 5376 case 4: 5377 *(uint32_t *)vaddr = (uint32_t)value; 5378 break; 5379 5380 case 8: 5381 *(uint64_t *)vaddr = (uint64_t)value; 5382 break; 5383 } 5384 } 5385 #endif 5386 5387 /* 5388 * Loopback Support 5389 */ 5390 static lb_property_t lb_normal = 5391 { normal, "normal", E1000G_LB_NONE }; 5392 static lb_property_t lb_external1000 = 5393 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 }; 5394 static lb_property_t lb_external100 = 5395 { external, "100Mbps", E1000G_LB_EXTERNAL_100 }; 5396 static lb_property_t lb_external10 = 5397 { external, "10Mbps", E1000G_LB_EXTERNAL_10 }; 5398 static lb_property_t lb_phy = 5399 { internal, "PHY", E1000G_LB_INTERNAL_PHY }; 5400 5401 static enum ioc_reply 5402 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp) 5403 { 5404 lb_info_sz_t *lbsp; 5405 lb_property_t *lbpp; 5406 struct e1000_hw *hw; 5407 uint32_t *lbmp; 5408 uint32_t size; 5409 uint32_t value; 5410 5411 hw = &Adapter->shared; 5412 5413 if (mp->b_cont == NULL) 5414 return (IOC_INVAL); 5415 5416 if (!e1000g_check_loopback_support(hw)) { 5417 e1000g_log(NULL, CE_WARN, 5418 "Loopback is not supported on e1000g%d", Adapter->instance); 5419 return (IOC_INVAL); 5420 } 5421 5422 switch (iocp->ioc_cmd) { 5423 default: 5424 return (IOC_INVAL); 5425 5426 case LB_GET_INFO_SIZE: 5427 size = sizeof (lb_info_sz_t); 5428 if (iocp->ioc_count != size) 5429 return (IOC_INVAL); 5430 5431 rw_enter(&Adapter->chip_lock, RW_WRITER); 5432 e1000g_get_phy_state(Adapter); 5433 5434 /* 5435 * Workaround for hardware faults. In order to get a stable 5436 * state of phy, we will wait for a specific interval and 5437 * try again. The time delay is an experiential value based 5438 * on our testing. 5439 */ 5440 msec_delay(100); 5441 e1000g_get_phy_state(Adapter); 5442 rw_exit(&Adapter->chip_lock); 5443 5444 value = sizeof (lb_normal); 5445 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5446 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5447 (hw->phy.media_type == e1000_media_type_fiber) || 5448 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5449 value += sizeof (lb_phy); 5450 switch (hw->mac.type) { 5451 case e1000_82571: 5452 case e1000_82572: 5453 case e1000_80003es2lan: 5454 value += sizeof (lb_external1000); 5455 break; 5456 } 5457 } 5458 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5459 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5460 value += sizeof (lb_external100); 5461 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5462 value += sizeof (lb_external10); 5463 5464 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 5465 *lbsp = value; 5466 break; 5467 5468 case LB_GET_INFO: 5469 value = sizeof (lb_normal); 5470 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5471 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5472 (hw->phy.media_type == e1000_media_type_fiber) || 5473 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5474 value += sizeof (lb_phy); 5475 switch (hw->mac.type) { 5476 case e1000_82571: 5477 case e1000_82572: 5478 case e1000_80003es2lan: 5479 value += sizeof (lb_external1000); 5480 break; 5481 } 5482 } 5483 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5484 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5485 value += sizeof (lb_external100); 5486 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5487 value += sizeof (lb_external10); 5488 5489 size = value; 5490 if (iocp->ioc_count != size) 5491 return (IOC_INVAL); 5492 5493 value = 0; 5494 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 5495 lbpp[value++] = lb_normal; 5496 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5497 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5498 (hw->phy.media_type == e1000_media_type_fiber) || 5499 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5500 lbpp[value++] = lb_phy; 5501 switch (hw->mac.type) { 5502 case e1000_82571: 5503 case e1000_82572: 5504 case e1000_80003es2lan: 5505 lbpp[value++] = lb_external1000; 5506 break; 5507 } 5508 } 5509 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5510 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5511 lbpp[value++] = lb_external100; 5512 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5513 lbpp[value++] = lb_external10; 5514 break; 5515 5516 case LB_GET_MODE: 5517 size = sizeof (uint32_t); 5518 if (iocp->ioc_count != size) 5519 return (IOC_INVAL); 5520 5521 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5522 *lbmp = Adapter->loopback_mode; 5523 break; 5524 5525 case LB_SET_MODE: 5526 size = 0; 5527 if (iocp->ioc_count != sizeof (uint32_t)) 5528 return (IOC_INVAL); 5529 5530 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5531 if (!e1000g_set_loopback_mode(Adapter, *lbmp)) 5532 return (IOC_INVAL); 5533 break; 5534 } 5535 5536 iocp->ioc_count = size; 5537 iocp->ioc_error = 0; 5538 5539 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 5540 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 5541 return (IOC_INVAL); 5542 } 5543 5544 return (IOC_REPLY); 5545 } 5546 5547 static boolean_t 5548 e1000g_check_loopback_support(struct e1000_hw *hw) 5549 { 5550 switch (hw->mac.type) { 5551 case e1000_82540: 5552 case e1000_82545: 5553 case e1000_82545_rev_3: 5554 case e1000_82546: 5555 case e1000_82546_rev_3: 5556 case e1000_82541: 5557 case e1000_82541_rev_2: 5558 case e1000_82547: 5559 case e1000_82547_rev_2: 5560 case e1000_82571: 5561 case e1000_82572: 5562 case e1000_82573: 5563 case e1000_82574: 5564 case e1000_80003es2lan: 5565 case e1000_ich9lan: 5566 case e1000_ich10lan: 5567 return (B_TRUE); 5568 } 5569 return (B_FALSE); 5570 } 5571 5572 static boolean_t 5573 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode) 5574 { 5575 struct e1000_hw *hw; 5576 int i, times; 5577 boolean_t link_up; 5578 5579 if (mode == Adapter->loopback_mode) 5580 return (B_TRUE); 5581 5582 hw = &Adapter->shared; 5583 times = 0; 5584 5585 Adapter->loopback_mode = mode; 5586 5587 if (mode == E1000G_LB_NONE) { 5588 /* Reset the chip */ 5589 hw->phy.autoneg_wait_to_complete = true; 5590 (void) e1000g_reset_adapter(Adapter); 5591 hw->phy.autoneg_wait_to_complete = false; 5592 return (B_TRUE); 5593 } 5594 5595 again: 5596 5597 rw_enter(&Adapter->chip_lock, RW_WRITER); 5598 5599 switch (mode) { 5600 default: 5601 rw_exit(&Adapter->chip_lock); 5602 return (B_FALSE); 5603 5604 case E1000G_LB_EXTERNAL_1000: 5605 e1000g_set_external_loopback_1000(Adapter); 5606 break; 5607 5608 case E1000G_LB_EXTERNAL_100: 5609 e1000g_set_external_loopback_100(Adapter); 5610 break; 5611 5612 case E1000G_LB_EXTERNAL_10: 5613 e1000g_set_external_loopback_10(Adapter); 5614 break; 5615 5616 case E1000G_LB_INTERNAL_PHY: 5617 e1000g_set_internal_loopback(Adapter); 5618 break; 5619 } 5620 5621 times++; 5622 5623 rw_exit(&Adapter->chip_lock); 5624 5625 /* Wait for link up */ 5626 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--) 5627 msec_delay(100); 5628 5629 rw_enter(&Adapter->chip_lock, RW_WRITER); 5630 5631 link_up = e1000g_link_up(Adapter); 5632 5633 rw_exit(&Adapter->chip_lock); 5634 5635 if (!link_up) { 5636 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5637 "Failed to get the link up"); 5638 if (times < 2) { 5639 /* Reset the link */ 5640 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5641 "Reset the link ..."); 5642 (void) e1000g_reset_adapter(Adapter); 5643 goto again; 5644 } 5645 5646 /* 5647 * Reset driver to loopback none when set loopback failed 5648 * for the second time. 5649 */ 5650 Adapter->loopback_mode = E1000G_LB_NONE; 5651 5652 /* Reset the chip */ 5653 hw->phy.autoneg_wait_to_complete = true; 5654 (void) e1000g_reset_adapter(Adapter); 5655 hw->phy.autoneg_wait_to_complete = false; 5656 5657 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5658 "Set loopback mode failed, reset to loopback none"); 5659 5660 return (B_FALSE); 5661 } 5662 5663 return (B_TRUE); 5664 } 5665 5666 /* 5667 * The following loopback settings are from Intel's technical 5668 * document - "How To Loopback". All the register settings and 5669 * time delay values are directly inherited from the document 5670 * without more explanations available. 5671 */ 5672 static void 5673 e1000g_set_internal_loopback(struct e1000g *Adapter) 5674 { 5675 struct e1000_hw *hw; 5676 uint32_t ctrl; 5677 uint32_t status; 5678 uint16_t phy_ctrl; 5679 uint16_t phy_reg; 5680 uint32_t txcw; 5681 5682 hw = &Adapter->shared; 5683 5684 /* Disable Smart Power Down */ 5685 phy_spd_state(hw, B_FALSE); 5686 5687 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 5688 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10); 5689 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000; 5690 5691 switch (hw->mac.type) { 5692 case e1000_82540: 5693 case e1000_82545: 5694 case e1000_82545_rev_3: 5695 case e1000_82546: 5696 case e1000_82546_rev_3: 5697 case e1000_82573: 5698 /* Auto-MDI/MDIX off */ 5699 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 5700 /* Reset PHY to update Auto-MDI/MDIX */ 5701 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5702 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN); 5703 /* Reset PHY to auto-neg off and force 1000 */ 5704 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5705 phy_ctrl | MII_CR_RESET); 5706 /* 5707 * Disable PHY receiver for 82540/545/546 and 82573 Family. 5708 * See comments above e1000g_set_internal_loopback() for the 5709 * background. 5710 */ 5711 (void) e1000_write_phy_reg(hw, 29, 0x001F); 5712 (void) e1000_write_phy_reg(hw, 30, 0x8FFC); 5713 (void) e1000_write_phy_reg(hw, 29, 0x001A); 5714 (void) e1000_write_phy_reg(hw, 30, 0x8FF0); 5715 break; 5716 case e1000_80003es2lan: 5717 /* Force Link Up */ 5718 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 5719 0x1CC); 5720 /* Sets PCS loopback at 1Gbs */ 5721 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 5722 0x1046); 5723 break; 5724 } 5725 5726 /* 5727 * The following registers should be set for e1000_phy_bm phy type. 5728 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy. 5729 * For others, we do not need to set these registers. 5730 */ 5731 if (hw->phy.type == e1000_phy_bm) { 5732 /* Set Default MAC Interface speed to 1GB */ 5733 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg); 5734 phy_reg &= ~0x0007; 5735 phy_reg |= 0x006; 5736 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg); 5737 /* Assert SW reset for above settings to take effect */ 5738 (void) e1000_phy_commit(hw); 5739 msec_delay(1); 5740 /* Force Full Duplex */ 5741 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5742 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5743 phy_reg | 0x000C); 5744 /* Set Link Up (in force link) */ 5745 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg); 5746 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16), 5747 phy_reg | 0x0040); 5748 /* Force Link */ 5749 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5750 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5751 phy_reg | 0x0040); 5752 /* Set Early Link Enable */ 5753 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg); 5754 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20), 5755 phy_reg | 0x0400); 5756 } 5757 5758 /* Set loopback */ 5759 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK); 5760 5761 msec_delay(250); 5762 5763 /* Now set up the MAC to the same speed/duplex as the PHY. */ 5764 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5765 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5766 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5767 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5768 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ 5769 E1000_CTRL_FD); /* Force Duplex to FULL */ 5770 5771 switch (hw->mac.type) { 5772 case e1000_82540: 5773 case e1000_82545: 5774 case e1000_82545_rev_3: 5775 case e1000_82546: 5776 case e1000_82546_rev_3: 5777 /* 5778 * For some serdes we'll need to commit the writes now 5779 * so that the status is updated on link 5780 */ 5781 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 5782 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5783 msec_delay(100); 5784 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5785 } 5786 5787 if (hw->phy.media_type == e1000_media_type_copper) { 5788 /* Invert Loss of Signal */ 5789 ctrl |= E1000_CTRL_ILOS; 5790 } else { 5791 /* Set ILOS on fiber nic if half duplex is detected */ 5792 status = E1000_READ_REG(hw, E1000_STATUS); 5793 if ((status & E1000_STATUS_FD) == 0) 5794 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5795 } 5796 break; 5797 5798 case e1000_82571: 5799 case e1000_82572: 5800 /* 5801 * The fiber/SerDes versions of this adapter do not contain an 5802 * accessible PHY. Therefore, loopback beyond MAC must be done 5803 * using SerDes analog loopback. 5804 */ 5805 if (hw->phy.media_type != e1000_media_type_copper) { 5806 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5807 txcw = E1000_READ_REG(hw, E1000_TXCW); 5808 txcw &= ~((uint32_t)1 << 31); 5809 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5810 5811 /* 5812 * Write 0x410 to Serdes Control register 5813 * to enable Serdes analog loopback 5814 */ 5815 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5816 msec_delay(10); 5817 } 5818 5819 status = E1000_READ_REG(hw, E1000_STATUS); 5820 /* Set ILOS on fiber nic if half duplex is detected */ 5821 if ((hw->phy.media_type == e1000_media_type_fiber) && 5822 ((status & E1000_STATUS_FD) == 0 || 5823 (status & E1000_STATUS_LU) == 0)) 5824 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5825 else if (hw->phy.media_type == e1000_media_type_internal_serdes) 5826 ctrl |= E1000_CTRL_SLU; 5827 break; 5828 5829 case e1000_82573: 5830 ctrl |= E1000_CTRL_ILOS; 5831 break; 5832 case e1000_ich9lan: 5833 case e1000_ich10lan: 5834 ctrl |= E1000_CTRL_SLU; 5835 break; 5836 } 5837 if (hw->phy.type == e1000_phy_bm) 5838 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS; 5839 5840 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5841 } 5842 5843 static void 5844 e1000g_set_external_loopback_1000(struct e1000g *Adapter) 5845 { 5846 struct e1000_hw *hw; 5847 uint32_t rctl; 5848 uint32_t ctrl_ext; 5849 uint32_t ctrl; 5850 uint32_t status; 5851 uint32_t txcw; 5852 uint16_t phydata; 5853 5854 hw = &Adapter->shared; 5855 5856 /* Disable Smart Power Down */ 5857 phy_spd_state(hw, B_FALSE); 5858 5859 switch (hw->mac.type) { 5860 case e1000_82571: 5861 case e1000_82572: 5862 switch (hw->phy.media_type) { 5863 case e1000_media_type_copper: 5864 /* Force link up (Must be done before the PHY writes) */ 5865 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5866 ctrl |= E1000_CTRL_SLU; /* Force Link Up */ 5867 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5868 5869 rctl = E1000_READ_REG(hw, E1000_RCTL); 5870 rctl |= (E1000_RCTL_EN | 5871 E1000_RCTL_SBP | 5872 E1000_RCTL_UPE | 5873 E1000_RCTL_MPE | 5874 E1000_RCTL_LPE | 5875 E1000_RCTL_BAM); /* 0x803E */ 5876 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 5877 5878 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5879 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA | 5880 E1000_CTRL_EXT_SDP6_DATA | 5881 E1000_CTRL_EXT_SDP3_DATA | 5882 E1000_CTRL_EXT_SDP4_DIR | 5883 E1000_CTRL_EXT_SDP6_DIR | 5884 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */ 5885 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5886 5887 /* 5888 * This sequence tunes the PHY's SDP and no customer 5889 * settable values. For background, see comments above 5890 * e1000g_set_internal_loopback(). 5891 */ 5892 (void) e1000_write_phy_reg(hw, 0x0, 0x140); 5893 msec_delay(10); 5894 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00); 5895 (void) e1000_write_phy_reg(hw, 0x12, 0xC10); 5896 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10); 5897 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76); 5898 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1); 5899 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0); 5900 5901 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65); 5902 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C); 5903 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC); 5904 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C); 5905 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC); 5906 5907 msec_delay(50); 5908 break; 5909 case e1000_media_type_fiber: 5910 case e1000_media_type_internal_serdes: 5911 status = E1000_READ_REG(hw, E1000_STATUS); 5912 if (((status & E1000_STATUS_LU) == 0) || 5913 (hw->phy.media_type == 5914 e1000_media_type_internal_serdes)) { 5915 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5916 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5917 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5918 } 5919 5920 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5921 txcw = E1000_READ_REG(hw, E1000_TXCW); 5922 txcw &= ~((uint32_t)1 << 31); 5923 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5924 5925 /* 5926 * Write 0x410 to Serdes Control register 5927 * to enable Serdes analog loopback 5928 */ 5929 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5930 msec_delay(10); 5931 break; 5932 default: 5933 break; 5934 } 5935 break; 5936 case e1000_82574: 5937 case e1000_80003es2lan: 5938 case e1000_ich9lan: 5939 case e1000_ich10lan: 5940 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata); 5941 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16), 5942 phydata | (1 << 5)); 5943 Adapter->param_adv_autoneg = 1; 5944 Adapter->param_adv_1000fdx = 1; 5945 (void) e1000g_reset_link(Adapter); 5946 break; 5947 } 5948 } 5949 5950 static void 5951 e1000g_set_external_loopback_100(struct e1000g *Adapter) 5952 { 5953 struct e1000_hw *hw; 5954 uint32_t ctrl; 5955 uint16_t phy_ctrl; 5956 5957 hw = &Adapter->shared; 5958 5959 /* Disable Smart Power Down */ 5960 phy_spd_state(hw, B_FALSE); 5961 5962 phy_ctrl = (MII_CR_FULL_DUPLEX | 5963 MII_CR_SPEED_100); 5964 5965 /* Force 100/FD, reset PHY */ 5966 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5967 phy_ctrl | MII_CR_RESET); /* 0xA100 */ 5968 msec_delay(10); 5969 5970 /* Force 100/FD */ 5971 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5972 phy_ctrl); /* 0x2100 */ 5973 msec_delay(10); 5974 5975 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5976 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5977 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5978 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5979 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5980 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5981 E1000_CTRL_SPD_100 | /* Force Speed to 100 */ 5982 E1000_CTRL_FD); /* Force Duplex to FULL */ 5983 5984 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5985 } 5986 5987 static void 5988 e1000g_set_external_loopback_10(struct e1000g *Adapter) 5989 { 5990 struct e1000_hw *hw; 5991 uint32_t ctrl; 5992 uint16_t phy_ctrl; 5993 5994 hw = &Adapter->shared; 5995 5996 /* Disable Smart Power Down */ 5997 phy_spd_state(hw, B_FALSE); 5998 5999 phy_ctrl = (MII_CR_FULL_DUPLEX | 6000 MII_CR_SPEED_10); 6001 6002 /* Force 10/FD, reset PHY */ 6003 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 6004 phy_ctrl | MII_CR_RESET); /* 0x8100 */ 6005 msec_delay(10); 6006 6007 /* Force 10/FD */ 6008 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 6009 phy_ctrl); /* 0x0100 */ 6010 msec_delay(10); 6011 6012 /* Now setup the MAC to the same speed/duplex as the PHY. */ 6013 ctrl = E1000_READ_REG(hw, E1000_CTRL); 6014 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 6015 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 6016 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 6017 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 6018 E1000_CTRL_SPD_10 | /* Force Speed to 10 */ 6019 E1000_CTRL_FD); /* Force Duplex to FULL */ 6020 6021 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 6022 } 6023 6024 #ifdef __sparc 6025 static boolean_t 6026 e1000g_find_mac_address(struct e1000g *Adapter) 6027 { 6028 struct e1000_hw *hw = &Adapter->shared; 6029 uchar_t *bytes; 6030 struct ether_addr sysaddr; 6031 uint_t nelts; 6032 int err; 6033 boolean_t found = B_FALSE; 6034 6035 /* 6036 * The "vendor's factory-set address" may already have 6037 * been extracted from the chip, but if the property 6038 * "local-mac-address" is set we use that instead. 6039 * 6040 * We check whether it looks like an array of 6 6041 * bytes (which it should, if OBP set it). If we can't 6042 * make sense of it this way, we'll ignore it. 6043 */ 6044 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 6045 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 6046 if (err == DDI_PROP_SUCCESS) { 6047 if (nelts == ETHERADDRL) { 6048 while (nelts--) 6049 hw->mac.addr[nelts] = bytes[nelts]; 6050 found = B_TRUE; 6051 } 6052 ddi_prop_free(bytes); 6053 } 6054 6055 /* 6056 * Look up the OBP property "local-mac-address?". If the user has set 6057 * 'local-mac-address? = false', use "the system address" instead. 6058 */ 6059 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0, 6060 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 6061 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 6062 if (localetheraddr(NULL, &sysaddr) != 0) { 6063 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 6064 found = B_TRUE; 6065 } 6066 } 6067 ddi_prop_free(bytes); 6068 } 6069 6070 /* 6071 * Finally(!), if there's a valid "mac-address" property (created 6072 * if we netbooted from this interface), we must use this instead 6073 * of any of the above to ensure that the NFS/install server doesn't 6074 * get confused by the address changing as Solaris takes over! 6075 */ 6076 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 6077 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 6078 if (err == DDI_PROP_SUCCESS) { 6079 if (nelts == ETHERADDRL) { 6080 while (nelts--) 6081 hw->mac.addr[nelts] = bytes[nelts]; 6082 found = B_TRUE; 6083 } 6084 ddi_prop_free(bytes); 6085 } 6086 6087 if (found) { 6088 bcopy(hw->mac.addr, hw->mac.perm_addr, 6089 ETHERADDRL); 6090 } 6091 6092 return (found); 6093 } 6094 #endif 6095 6096 static int 6097 e1000g_add_intrs(struct e1000g *Adapter) 6098 { 6099 dev_info_t *devinfo; 6100 int intr_types; 6101 int rc; 6102 6103 devinfo = Adapter->dip; 6104 6105 /* Get supported interrupt types */ 6106 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 6107 6108 if (rc != DDI_SUCCESS) { 6109 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6110 "Get supported interrupt types failed: %d\n", rc); 6111 return (DDI_FAILURE); 6112 } 6113 6114 /* 6115 * Based on Intel Technical Advisory document (TA-160), there are some 6116 * cases where some older Intel PCI-X NICs may "advertise" to the OS 6117 * that it supports MSI, but in fact has problems. 6118 * So we should only enable MSI for PCI-E NICs and disable MSI for old 6119 * PCI/PCI-X NICs. 6120 */ 6121 if (Adapter->shared.mac.type < e1000_82571) 6122 Adapter->msi_enable = B_FALSE; 6123 6124 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) { 6125 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI); 6126 6127 if (rc != DDI_SUCCESS) { 6128 /* EMPTY */ 6129 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6130 "Add MSI failed, trying Legacy interrupts\n"); 6131 } else { 6132 Adapter->intr_type = DDI_INTR_TYPE_MSI; 6133 } 6134 } 6135 6136 if ((Adapter->intr_type == 0) && 6137 (intr_types & DDI_INTR_TYPE_FIXED)) { 6138 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED); 6139 6140 if (rc != DDI_SUCCESS) { 6141 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6142 "Add Legacy interrupts failed\n"); 6143 return (DDI_FAILURE); 6144 } 6145 6146 Adapter->intr_type = DDI_INTR_TYPE_FIXED; 6147 } 6148 6149 if (Adapter->intr_type == 0) { 6150 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6151 "No interrupts registered\n"); 6152 return (DDI_FAILURE); 6153 } 6154 6155 return (DDI_SUCCESS); 6156 } 6157 6158 /* 6159 * e1000g_intr_add() handles MSI/Legacy interrupts 6160 */ 6161 static int 6162 e1000g_intr_add(struct e1000g *Adapter, int intr_type) 6163 { 6164 dev_info_t *devinfo; 6165 int count, avail, actual; 6166 int x, y, rc, inum = 0; 6167 int flag; 6168 ddi_intr_handler_t *intr_handler; 6169 6170 devinfo = Adapter->dip; 6171 6172 /* get number of interrupts */ 6173 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 6174 if ((rc != DDI_SUCCESS) || (count == 0)) { 6175 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6176 "Get interrupt number failed. Return: %d, count: %d\n", 6177 rc, count); 6178 return (DDI_FAILURE); 6179 } 6180 6181 /* get number of available interrupts */ 6182 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 6183 if ((rc != DDI_SUCCESS) || (avail == 0)) { 6184 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6185 "Get interrupt available number failed. " 6186 "Return: %d, available: %d\n", rc, avail); 6187 return (DDI_FAILURE); 6188 } 6189 6190 if (avail < count) { 6191 /* EMPTY */ 6192 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6193 "Interrupts count: %d, available: %d\n", 6194 count, avail); 6195 } 6196 6197 /* Allocate an array of interrupt handles */ 6198 Adapter->intr_size = count * sizeof (ddi_intr_handle_t); 6199 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP); 6200 6201 /* Set NORMAL behavior for both MSI and FIXED interrupt */ 6202 flag = DDI_INTR_ALLOC_NORMAL; 6203 6204 /* call ddi_intr_alloc() */ 6205 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum, 6206 count, &actual, flag); 6207 6208 if ((rc != DDI_SUCCESS) || (actual == 0)) { 6209 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6210 "Allocate interrupts failed: %d\n", rc); 6211 6212 kmem_free(Adapter->htable, Adapter->intr_size); 6213 return (DDI_FAILURE); 6214 } 6215 6216 if (actual < count) { 6217 /* EMPTY */ 6218 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6219 "Interrupts requested: %d, received: %d\n", 6220 count, actual); 6221 } 6222 6223 Adapter->intr_cnt = actual; 6224 6225 /* Get priority for first msi, assume remaining are all the same */ 6226 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri); 6227 6228 if (rc != DDI_SUCCESS) { 6229 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6230 "Get interrupt priority failed: %d\n", rc); 6231 6232 /* Free already allocated intr */ 6233 for (y = 0; y < actual; y++) 6234 (void) ddi_intr_free(Adapter->htable[y]); 6235 6236 kmem_free(Adapter->htable, Adapter->intr_size); 6237 return (DDI_FAILURE); 6238 } 6239 6240 /* 6241 * In Legacy Interrupt mode, for PCI-Express adapters, we should 6242 * use the interrupt service routine e1000g_intr_pciexpress() 6243 * to avoid interrupt stealing when sharing interrupt with other 6244 * devices. 6245 */ 6246 if (Adapter->shared.mac.type < e1000_82571) 6247 intr_handler = e1000g_intr; 6248 else 6249 intr_handler = e1000g_intr_pciexpress; 6250 6251 /* Call ddi_intr_add_handler() */ 6252 for (x = 0; x < actual; x++) { 6253 rc = ddi_intr_add_handler(Adapter->htable[x], 6254 intr_handler, (caddr_t)Adapter, NULL); 6255 6256 if (rc != DDI_SUCCESS) { 6257 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6258 "Add interrupt handler failed: %d\n", rc); 6259 6260 /* Remove already added handler */ 6261 for (y = 0; y < x; y++) 6262 (void) ddi_intr_remove_handler( 6263 Adapter->htable[y]); 6264 6265 /* Free already allocated intr */ 6266 for (y = 0; y < actual; y++) 6267 (void) ddi_intr_free(Adapter->htable[y]); 6268 6269 kmem_free(Adapter->htable, Adapter->intr_size); 6270 return (DDI_FAILURE); 6271 } 6272 } 6273 6274 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap); 6275 6276 if (rc != DDI_SUCCESS) { 6277 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6278 "Get interrupt cap failed: %d\n", rc); 6279 6280 /* Free already allocated intr */ 6281 for (y = 0; y < actual; y++) { 6282 (void) ddi_intr_remove_handler(Adapter->htable[y]); 6283 (void) ddi_intr_free(Adapter->htable[y]); 6284 } 6285 6286 kmem_free(Adapter->htable, Adapter->intr_size); 6287 return (DDI_FAILURE); 6288 } 6289 6290 return (DDI_SUCCESS); 6291 } 6292 6293 static int 6294 e1000g_rem_intrs(struct e1000g *Adapter) 6295 { 6296 int x; 6297 int rc; 6298 6299 for (x = 0; x < Adapter->intr_cnt; x++) { 6300 rc = ddi_intr_remove_handler(Adapter->htable[x]); 6301 if (rc != DDI_SUCCESS) { 6302 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6303 "Remove intr handler failed: %d\n", rc); 6304 return (DDI_FAILURE); 6305 } 6306 6307 rc = ddi_intr_free(Adapter->htable[x]); 6308 if (rc != DDI_SUCCESS) { 6309 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6310 "Free intr failed: %d\n", rc); 6311 return (DDI_FAILURE); 6312 } 6313 } 6314 6315 kmem_free(Adapter->htable, Adapter->intr_size); 6316 6317 return (DDI_SUCCESS); 6318 } 6319 6320 static int 6321 e1000g_enable_intrs(struct e1000g *Adapter) 6322 { 6323 int x; 6324 int rc; 6325 6326 /* Enable interrupts */ 6327 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6328 /* Call ddi_intr_block_enable() for MSI */ 6329 rc = ddi_intr_block_enable(Adapter->htable, 6330 Adapter->intr_cnt); 6331 if (rc != DDI_SUCCESS) { 6332 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6333 "Enable block intr failed: %d\n", rc); 6334 return (DDI_FAILURE); 6335 } 6336 } else { 6337 /* Call ddi_intr_enable() for Legacy/MSI non block enable */ 6338 for (x = 0; x < Adapter->intr_cnt; x++) { 6339 rc = ddi_intr_enable(Adapter->htable[x]); 6340 if (rc != DDI_SUCCESS) { 6341 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6342 "Enable intr failed: %d\n", rc); 6343 return (DDI_FAILURE); 6344 } 6345 } 6346 } 6347 6348 return (DDI_SUCCESS); 6349 } 6350 6351 static int 6352 e1000g_disable_intrs(struct e1000g *Adapter) 6353 { 6354 int x; 6355 int rc; 6356 6357 /* Disable all interrupts */ 6358 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6359 rc = ddi_intr_block_disable(Adapter->htable, 6360 Adapter->intr_cnt); 6361 if (rc != DDI_SUCCESS) { 6362 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6363 "Disable block intr failed: %d\n", rc); 6364 return (DDI_FAILURE); 6365 } 6366 } else { 6367 for (x = 0; x < Adapter->intr_cnt; x++) { 6368 rc = ddi_intr_disable(Adapter->htable[x]); 6369 if (rc != DDI_SUCCESS) { 6370 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6371 "Disable intr failed: %d\n", rc); 6372 return (DDI_FAILURE); 6373 } 6374 } 6375 } 6376 6377 return (DDI_SUCCESS); 6378 } 6379 6380 /* 6381 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter 6382 */ 6383 static void 6384 e1000g_get_phy_state(struct e1000g *Adapter) 6385 { 6386 struct e1000_hw *hw = &Adapter->shared; 6387 6388 if (hw->phy.media_type == e1000_media_type_copper) { 6389 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl); 6390 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status); 6391 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 6392 &Adapter->phy_an_adv); 6393 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, 6394 &Adapter->phy_an_exp); 6395 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, 6396 &Adapter->phy_ext_status); 6397 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, 6398 &Adapter->phy_1000t_ctrl); 6399 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, 6400 &Adapter->phy_1000t_status); 6401 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, 6402 &Adapter->phy_lp_able); 6403 6404 Adapter->param_autoneg_cap = 6405 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; 6406 Adapter->param_pause_cap = 6407 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6408 Adapter->param_asym_pause_cap = 6409 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6410 Adapter->param_1000fdx_cap = 6411 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 6412 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; 6413 Adapter->param_1000hdx_cap = 6414 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || 6415 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; 6416 Adapter->param_100t4_cap = 6417 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0; 6418 Adapter->param_100fdx_cap = 6419 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 6420 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; 6421 Adapter->param_100hdx_cap = 6422 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 6423 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; 6424 Adapter->param_10fdx_cap = 6425 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; 6426 Adapter->param_10hdx_cap = 6427 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; 6428 6429 Adapter->param_adv_autoneg = hw->mac.autoneg; 6430 Adapter->param_adv_pause = 6431 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6432 Adapter->param_adv_asym_pause = 6433 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6434 Adapter->param_adv_1000hdx = 6435 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; 6436 Adapter->param_adv_100t4 = 6437 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; 6438 if (Adapter->param_adv_autoneg == 1) { 6439 Adapter->param_adv_1000fdx = 6440 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) 6441 ? 1 : 0; 6442 Adapter->param_adv_100fdx = 6443 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) 6444 ? 1 : 0; 6445 Adapter->param_adv_100hdx = 6446 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) 6447 ? 1 : 0; 6448 Adapter->param_adv_10fdx = 6449 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; 6450 Adapter->param_adv_10hdx = 6451 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; 6452 } 6453 6454 Adapter->param_lp_autoneg = 6455 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; 6456 Adapter->param_lp_pause = 6457 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; 6458 Adapter->param_lp_asym_pause = 6459 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; 6460 Adapter->param_lp_1000fdx = 6461 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; 6462 Adapter->param_lp_1000hdx = 6463 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; 6464 Adapter->param_lp_100t4 = 6465 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; 6466 Adapter->param_lp_100fdx = 6467 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; 6468 Adapter->param_lp_100hdx = 6469 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; 6470 Adapter->param_lp_10fdx = 6471 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; 6472 Adapter->param_lp_10hdx = 6473 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; 6474 } else { 6475 /* 6476 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning, 6477 * it can only work with 1Gig Full Duplex Link Partner. 6478 */ 6479 Adapter->param_autoneg_cap = 0; 6480 Adapter->param_pause_cap = 1; 6481 Adapter->param_asym_pause_cap = 1; 6482 Adapter->param_1000fdx_cap = 1; 6483 Adapter->param_1000hdx_cap = 0; 6484 Adapter->param_100t4_cap = 0; 6485 Adapter->param_100fdx_cap = 0; 6486 Adapter->param_100hdx_cap = 0; 6487 Adapter->param_10fdx_cap = 0; 6488 Adapter->param_10hdx_cap = 0; 6489 6490 Adapter->param_adv_autoneg = 0; 6491 Adapter->param_adv_pause = 1; 6492 Adapter->param_adv_asym_pause = 1; 6493 Adapter->param_adv_1000fdx = 1; 6494 Adapter->param_adv_1000hdx = 0; 6495 Adapter->param_adv_100t4 = 0; 6496 Adapter->param_adv_100fdx = 0; 6497 Adapter->param_adv_100hdx = 0; 6498 Adapter->param_adv_10fdx = 0; 6499 Adapter->param_adv_10hdx = 0; 6500 6501 Adapter->param_lp_autoneg = 0; 6502 Adapter->param_lp_pause = 0; 6503 Adapter->param_lp_asym_pause = 0; 6504 Adapter->param_lp_1000fdx = 0; 6505 Adapter->param_lp_1000hdx = 0; 6506 Adapter->param_lp_100t4 = 0; 6507 Adapter->param_lp_100fdx = 0; 6508 Adapter->param_lp_100hdx = 0; 6509 Adapter->param_lp_10fdx = 0; 6510 Adapter->param_lp_10hdx = 0; 6511 } 6512 } 6513 6514 /* 6515 * FMA support 6516 */ 6517 6518 int 6519 e1000g_check_acc_handle(ddi_acc_handle_t handle) 6520 { 6521 ddi_fm_error_t de; 6522 6523 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6524 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 6525 return (de.fme_status); 6526 } 6527 6528 int 6529 e1000g_check_dma_handle(ddi_dma_handle_t handle) 6530 { 6531 ddi_fm_error_t de; 6532 6533 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6534 return (de.fme_status); 6535 } 6536 6537 /* 6538 * The IO fault service error handling callback function 6539 */ 6540 /* ARGSUSED2 */ 6541 static int 6542 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6543 { 6544 /* 6545 * as the driver can always deal with an error in any dma or 6546 * access handle, we can just return the fme_status value. 6547 */ 6548 pci_ereport_post(dip, err, NULL); 6549 return (err->fme_status); 6550 } 6551 6552 static void 6553 e1000g_fm_init(struct e1000g *Adapter) 6554 { 6555 ddi_iblock_cookie_t iblk; 6556 int fma_dma_flag; 6557 6558 /* Only register with IO Fault Services if we have some capability */ 6559 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 6560 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6561 } else { 6562 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6563 } 6564 6565 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 6566 fma_dma_flag = 1; 6567 } else { 6568 fma_dma_flag = 0; 6569 } 6570 6571 (void) e1000g_set_fma_flags(fma_dma_flag); 6572 6573 if (Adapter->fm_capabilities) { 6574 6575 /* Register capabilities with IO Fault Services */ 6576 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk); 6577 6578 /* 6579 * Initialize pci ereport capabilities if ereport capable 6580 */ 6581 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6582 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6583 pci_ereport_setup(Adapter->dip); 6584 6585 /* 6586 * Register error callback if error callback capable 6587 */ 6588 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6589 ddi_fm_handler_register(Adapter->dip, 6590 e1000g_fm_error_cb, (void*) Adapter); 6591 } 6592 } 6593 6594 static void 6595 e1000g_fm_fini(struct e1000g *Adapter) 6596 { 6597 /* Only unregister FMA capabilities if we registered some */ 6598 if (Adapter->fm_capabilities) { 6599 6600 /* 6601 * Release any resources allocated by pci_ereport_setup() 6602 */ 6603 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6604 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6605 pci_ereport_teardown(Adapter->dip); 6606 6607 /* 6608 * Un-register error callback if error callback capable 6609 */ 6610 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6611 ddi_fm_handler_unregister(Adapter->dip); 6612 6613 /* Unregister from IO Fault Services */ 6614 mutex_enter(&e1000g_rx_detach_lock); 6615 ddi_fm_fini(Adapter->dip); 6616 if (Adapter->priv_dip != NULL) { 6617 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL; 6618 } 6619 mutex_exit(&e1000g_rx_detach_lock); 6620 } 6621 } 6622 6623 void 6624 e1000g_fm_ereport(struct e1000g *Adapter, char *detail) 6625 { 6626 uint64_t ena; 6627 char buf[FM_MAX_CLASS]; 6628 6629 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6630 ena = fm_ena_generate(0, FM_ENA_FMT1); 6631 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) { 6632 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP, 6633 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6634 } 6635 } 6636 6637 /* 6638 * quiesce(9E) entry point. 6639 * 6640 * This function is called when the system is single-threaded at high 6641 * PIL with preemption disabled. Therefore, this function must not be 6642 * blocked. 6643 * 6644 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6645 * DDI_FAILURE indicates an error condition and should almost never happen. 6646 */ 6647 static int 6648 e1000g_quiesce(dev_info_t *devinfo) 6649 { 6650 struct e1000g *Adapter; 6651 6652 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 6653 6654 if (Adapter == NULL) 6655 return (DDI_FAILURE); 6656 6657 e1000g_clear_all_interrupts(Adapter); 6658 6659 (void) e1000_reset_hw(&Adapter->shared); 6660 6661 /* Setup our HW Tx Head & Tail descriptor pointers */ 6662 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 6663 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 6664 6665 /* Setup our HW Rx Head & Tail descriptor pointers */ 6666 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0); 6667 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0); 6668 6669 return (DDI_SUCCESS); 6670 } 6671 6672 /* 6673 * synchronize the adv* and en* parameters. 6674 * 6675 * See comments in <sys/dld.h> for details of the *_en_* 6676 * parameters. The usage of ndd for setting adv parameters will 6677 * synchronize all the en parameters with the e1000g parameters, 6678 * implicitly disabling any settings made via dladm. 6679 */ 6680 static void 6681 e1000g_param_sync(struct e1000g *Adapter) 6682 { 6683 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx; 6684 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx; 6685 Adapter->param_en_100fdx = Adapter->param_adv_100fdx; 6686 Adapter->param_en_100hdx = Adapter->param_adv_100hdx; 6687 Adapter->param_en_10fdx = Adapter->param_adv_10fdx; 6688 Adapter->param_en_10hdx = Adapter->param_adv_10hdx; 6689 } 6690 6691 /* 6692 * e1000g_get_driver_control - tell manageability firmware that the driver 6693 * has control. 6694 */ 6695 static void 6696 e1000g_get_driver_control(struct e1000_hw *hw) 6697 { 6698 uint32_t ctrl_ext; 6699 uint32_t swsm; 6700 6701 /* tell manageability firmware the driver has taken over */ 6702 switch (hw->mac.type) { 6703 case e1000_82573: 6704 swsm = E1000_READ_REG(hw, E1000_SWSM); 6705 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 6706 break; 6707 case e1000_82571: 6708 case e1000_82572: 6709 case e1000_82574: 6710 case e1000_80003es2lan: 6711 case e1000_ich8lan: 6712 case e1000_ich9lan: 6713 case e1000_ich10lan: 6714 case e1000_pchlan: 6715 case e1000_pch2lan: 6716 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6717 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6718 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 6719 break; 6720 default: 6721 /* no manageability firmware: do nothing */ 6722 break; 6723 } 6724 } 6725 6726 /* 6727 * e1000g_release_driver_control - tell manageability firmware that the driver 6728 * has released control. 6729 */ 6730 static void 6731 e1000g_release_driver_control(struct e1000_hw *hw) 6732 { 6733 uint32_t ctrl_ext; 6734 uint32_t swsm; 6735 6736 /* tell manageability firmware the driver has released control */ 6737 switch (hw->mac.type) { 6738 case e1000_82573: 6739 swsm = E1000_READ_REG(hw, E1000_SWSM); 6740 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 6741 break; 6742 case e1000_82571: 6743 case e1000_82572: 6744 case e1000_82574: 6745 case e1000_80003es2lan: 6746 case e1000_ich8lan: 6747 case e1000_ich9lan: 6748 case e1000_ich10lan: 6749 case e1000_pchlan: 6750 case e1000_pch2lan: 6751 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6752 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6753 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 6754 break; 6755 default: 6756 /* no manageability firmware: do nothing */ 6757 break; 6758 } 6759 } 6760 6761 /* 6762 * Restore e1000g promiscuous mode. 6763 */ 6764 static void 6765 e1000g_restore_promisc(struct e1000g *Adapter) 6766 { 6767 if (Adapter->e1000g_promisc) { 6768 uint32_t rctl; 6769 6770 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 6771 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 6772 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 6773 } 6774 } 6775