1 /* 2 * This file is provided under a CDDLv1 license. When using or 3 * redistributing this file, you may do so under this license. 4 * In redistributing this file this license must be included 5 * and no other modification of this header file is permitted. 6 * 7 * CDDL LICENSE SUMMARY 8 * 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved. 10 * 11 * The contents of this file are subject to the terms of Version 12 * 1.0 of the Common Development and Distribution License (the "License"). 13 * 14 * You should have received a copy of the License with this software. 15 * You can obtain a copy of the License at 16 * http://www.opensolaris.org/os/licensing. 17 * See the License for the specific language governing permissions 18 * and limitations under the License. 19 */ 20 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 28 * Copyright (c) 2018, Joyent, Inc. 29 * Copyright 2024 Oxide Computer Company 30 */ 31 32 /* 33 * ********************************************************************** 34 * * 35 * Module Name: * 36 * e1000g_main.c * 37 * * 38 * Abstract: * 39 * This file contains the interface routines for the solaris OS. * 40 * It has all DDI entry point routines and GLD entry point routines. * 41 * * 42 * This file also contains routines that take care of initialization * 43 * uninit routine and interrupt routine. * 44 * * 45 * ********************************************************************** 46 */ 47 48 #include <sys/dlpi.h> 49 #include <sys/mac.h> 50 #include "e1000g_sw.h" 51 #include "e1000g_debug.h" 52 53 static char ident[] = "Intel PRO/1000 Ethernet"; 54 /* LINTED E_STATIC_UNUSED */ 55 static char e1000g_version[] = "Driver Ver. 5.4.00"; 56 57 /* 58 * Proto types for DDI entry points 59 */ 60 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t); 61 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t); 62 static int e1000g_quiesce(dev_info_t *); 63 64 /* 65 * init and intr routines prototype 66 */ 67 static int e1000g_resume(dev_info_t *); 68 static int e1000g_suspend(dev_info_t *); 69 static uint_t e1000g_intr_pciexpress(caddr_t, caddr_t); 70 static uint_t e1000g_intr(caddr_t, caddr_t); 71 static void e1000g_intr_work(struct e1000g *, uint32_t); 72 #pragma inline(e1000g_intr_work) 73 static int e1000g_init(struct e1000g *); 74 static int e1000g_start(struct e1000g *, boolean_t); 75 static void e1000g_stop(struct e1000g *, boolean_t); 76 static int e1000g_m_start(void *); 77 static void e1000g_m_stop(void *); 78 static int e1000g_m_promisc(void *, boolean_t); 79 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *); 80 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *); 81 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *); 82 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t, 83 uint_t, const void *); 84 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t, 85 uint_t, void *); 86 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t, 87 mac_prop_info_handle_t); 88 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t, 89 const void *); 90 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *); 91 static void e1000g_init_locks(struct e1000g *); 92 static void e1000g_destroy_locks(struct e1000g *); 93 static int e1000g_identify_hardware(struct e1000g *); 94 static int e1000g_regs_map(struct e1000g *); 95 static int e1000g_set_driver_params(struct e1000g *); 96 static void e1000g_set_bufsize(struct e1000g *); 97 static int e1000g_register_mac(struct e1000g *); 98 static boolean_t e1000g_rx_drain(struct e1000g *); 99 static boolean_t e1000g_tx_drain(struct e1000g *); 100 static void e1000g_init_unicst(struct e1000g *); 101 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int); 102 static int e1000g_alloc_rx_data(struct e1000g *); 103 static void e1000g_release_multicast(struct e1000g *); 104 static void e1000g_pch_limits(struct e1000g *); 105 static uint32_t e1000g_mtu2maxframe(uint32_t); 106 107 /* 108 * Local routines 109 */ 110 static boolean_t e1000g_reset_adapter(struct e1000g *); 111 static void e1000g_tx_clean(struct e1000g *); 112 static void e1000g_rx_clean(struct e1000g *); 113 static void e1000g_link_timer(void *); 114 static void e1000g_local_timer(void *); 115 static boolean_t e1000g_link_check(struct e1000g *); 116 static boolean_t e1000g_stall_check(struct e1000g *); 117 static void e1000g_smartspeed(struct e1000g *); 118 static void e1000g_get_conf(struct e1000g *); 119 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int, 120 int *); 121 static void enable_watchdog_timer(struct e1000g *); 122 static void disable_watchdog_timer(struct e1000g *); 123 static void start_watchdog_timer(struct e1000g *); 124 static void restart_watchdog_timer(struct e1000g *); 125 static void stop_watchdog_timer(struct e1000g *); 126 static void stop_link_timer(struct e1000g *); 127 static void stop_82547_timer(e1000g_tx_ring_t *); 128 static void e1000g_force_speed_duplex(struct e1000g *); 129 static void e1000g_setup_max_mtu(struct e1000g *); 130 static void e1000g_get_max_frame_size(struct e1000g *); 131 static boolean_t is_valid_mac_addr(uint8_t *); 132 static void e1000g_unattach(dev_info_t *, struct e1000g *); 133 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *); 134 #ifdef E1000G_DEBUG 135 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *); 136 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *); 137 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *); 138 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *); 139 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *, 140 struct iocblk *, mblk_t *); 141 #endif 142 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *, 143 struct iocblk *, mblk_t *); 144 static boolean_t e1000g_check_loopback_support(struct e1000_hw *); 145 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t); 146 static void e1000g_set_internal_loopback(struct e1000g *); 147 static void e1000g_set_external_loopback_1000(struct e1000g *); 148 static void e1000g_set_external_loopback_100(struct e1000g *); 149 static void e1000g_set_external_loopback_10(struct e1000g *); 150 static int e1000g_add_intrs(struct e1000g *); 151 static int e1000g_intr_add(struct e1000g *, int); 152 static int e1000g_rem_intrs(struct e1000g *); 153 static int e1000g_enable_intrs(struct e1000g *); 154 static int e1000g_disable_intrs(struct e1000g *); 155 static boolean_t e1000g_link_up(struct e1000g *); 156 #ifdef __sparc 157 static boolean_t e1000g_find_mac_address(struct e1000g *); 158 #endif 159 static void e1000g_get_phy_state(struct e1000g *); 160 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 161 const void *impl_data); 162 static void e1000g_fm_init(struct e1000g *Adapter); 163 static void e1000g_fm_fini(struct e1000g *Adapter); 164 static void e1000g_param_sync(struct e1000g *); 165 static void e1000g_get_driver_control(struct e1000_hw *); 166 static void e1000g_release_driver_control(struct e1000_hw *); 167 static void e1000g_restore_promisc(struct e1000g *Adapter); 168 169 char *e1000g_priv_props[] = { 170 "_tx_bcopy_threshold", 171 "_tx_interrupt_enable", 172 "_tx_intr_delay", 173 "_tx_intr_abs_delay", 174 "_rx_bcopy_threshold", 175 "_max_num_rcv_packets", 176 "_rx_intr_delay", 177 "_rx_intr_abs_delay", 178 "_intr_throttling_rate", 179 "_intr_adaptive", 180 "_adv_pause_cap", 181 "_adv_asym_pause_cap", 182 NULL 183 }; 184 185 static struct cb_ops cb_ws_ops = { 186 nulldev, /* cb_open */ 187 nulldev, /* cb_close */ 188 nodev, /* cb_strategy */ 189 nodev, /* cb_print */ 190 nodev, /* cb_dump */ 191 nodev, /* cb_read */ 192 nodev, /* cb_write */ 193 nodev, /* cb_ioctl */ 194 nodev, /* cb_devmap */ 195 nodev, /* cb_mmap */ 196 nodev, /* cb_segmap */ 197 nochpoll, /* cb_chpoll */ 198 ddi_prop_op, /* cb_prop_op */ 199 NULL, /* cb_stream */ 200 D_MP | D_HOTPLUG, /* cb_flag */ 201 CB_REV, /* cb_rev */ 202 nodev, /* cb_aread */ 203 nodev /* cb_awrite */ 204 }; 205 206 static struct dev_ops ws_ops = { 207 DEVO_REV, /* devo_rev */ 208 0, /* devo_refcnt */ 209 NULL, /* devo_getinfo */ 210 nulldev, /* devo_identify */ 211 nulldev, /* devo_probe */ 212 e1000g_attach, /* devo_attach */ 213 e1000g_detach, /* devo_detach */ 214 nodev, /* devo_reset */ 215 &cb_ws_ops, /* devo_cb_ops */ 216 NULL, /* devo_bus_ops */ 217 ddi_power, /* devo_power */ 218 e1000g_quiesce /* devo_quiesce */ 219 }; 220 221 static struct modldrv modldrv = { 222 &mod_driverops, /* Type of module. This one is a driver */ 223 ident, /* Discription string */ 224 &ws_ops, /* driver ops */ 225 }; 226 227 static struct modlinkage modlinkage = { 228 MODREV_1, &modldrv, NULL 229 }; 230 231 /* Access attributes for register mapping */ 232 static ddi_device_acc_attr_t e1000g_regs_acc_attr = { 233 DDI_DEVICE_ATTR_V1, 234 DDI_STRUCTURE_LE_ACC, 235 DDI_STRICTORDER_ACC, 236 DDI_FLAGERR_ACC 237 }; 238 239 #define E1000G_M_CALLBACK_FLAGS \ 240 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 241 242 static mac_callbacks_t e1000g_m_callbacks = { 243 E1000G_M_CALLBACK_FLAGS, 244 e1000g_m_stat, 245 e1000g_m_start, 246 e1000g_m_stop, 247 e1000g_m_promisc, 248 e1000g_m_multicst, 249 NULL, 250 e1000g_m_tx, 251 NULL, 252 e1000g_m_ioctl, 253 e1000g_m_getcapab, 254 NULL, 255 NULL, 256 e1000g_m_setprop, 257 e1000g_m_getprop, 258 e1000g_m_propinfo 259 }; 260 261 /* 262 * Global variables 263 */ 264 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K; 265 uint32_t e1000g_mblks_pending = 0; 266 /* 267 * Workaround for Dynamic Reconfiguration support, for x86 platform only. 268 * Here we maintain a private dev_info list if e1000g_force_detach is 269 * enabled. If we force the driver to detach while there are still some 270 * rx buffers retained in the upper layer, we have to keep a copy of the 271 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data 272 * structure will be freed after the driver is detached. However when we 273 * finally free those rx buffers released by the upper layer, we need to 274 * refer to the dev_info to free the dma buffers. So we save a copy of 275 * the dev_info for this purpose. On x86 platform, we assume this copy 276 * of dev_info is always valid, but on SPARC platform, it could be invalid 277 * after the system board level DR operation. For this reason, the global 278 * variable e1000g_force_detach must be B_FALSE on SPARC platform. 279 */ 280 #ifdef __sparc 281 boolean_t e1000g_force_detach = B_FALSE; 282 #else 283 boolean_t e1000g_force_detach = B_TRUE; 284 #endif 285 private_devi_list_t *e1000g_private_devi_list = NULL; 286 287 /* 288 * The mutex e1000g_rx_detach_lock is defined to protect the processing of 289 * the private dev_info list, and to serialize the processing of rx buffer 290 * freeing and rx buffer recycling. 291 */ 292 kmutex_t e1000g_rx_detach_lock; 293 /* 294 * The rwlock e1000g_dma_type_lock is defined to protect the global flag 295 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA". 296 * If there are many e1000g instances, the system may run out of DVMA 297 * resources during the initialization of the instances, then the flag will 298 * be changed to "USE_DMA". Because different e1000g instances are initialized 299 * in parallel, we need to use this lock to protect the flag. 300 */ 301 krwlock_t e1000g_dma_type_lock; 302 303 /* 304 * The 82546 chipset is a dual-port device, both the ports share one eeprom. 305 * Based on the information from Intel, the 82546 chipset has some hardware 306 * problem. When one port is being reset and the other port is trying to 307 * access the eeprom, it could cause system hang or panic. To workaround this 308 * hardware problem, we use a global mutex to prevent such operations from 309 * happening simultaneously on different instances. This workaround is applied 310 * to all the devices supported by this driver. 311 */ 312 kmutex_t e1000g_nvm_lock; 313 314 /* 315 * Loadable module configuration entry points for the driver 316 */ 317 318 /* 319 * _init - module initialization 320 */ 321 int 322 _init(void) 323 { 324 int status; 325 326 mac_init_ops(&ws_ops, WSNAME); 327 status = mod_install(&modlinkage); 328 if (status != DDI_SUCCESS) 329 mac_fini_ops(&ws_ops); 330 else { 331 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL); 332 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL); 333 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL); 334 } 335 336 return (status); 337 } 338 339 /* 340 * _fini - module finalization 341 */ 342 int 343 _fini(void) 344 { 345 int status; 346 347 if (e1000g_mblks_pending != 0) 348 return (EBUSY); 349 350 status = mod_remove(&modlinkage); 351 if (status == DDI_SUCCESS) { 352 mac_fini_ops(&ws_ops); 353 354 if (e1000g_force_detach) { 355 private_devi_list_t *devi_node; 356 357 mutex_enter(&e1000g_rx_detach_lock); 358 while (e1000g_private_devi_list != NULL) { 359 devi_node = e1000g_private_devi_list; 360 e1000g_private_devi_list = 361 e1000g_private_devi_list->next; 362 363 kmem_free(devi_node->priv_dip, 364 sizeof (struct dev_info)); 365 kmem_free(devi_node, 366 sizeof (private_devi_list_t)); 367 } 368 mutex_exit(&e1000g_rx_detach_lock); 369 } 370 371 mutex_destroy(&e1000g_rx_detach_lock); 372 rw_destroy(&e1000g_dma_type_lock); 373 mutex_destroy(&e1000g_nvm_lock); 374 } 375 376 return (status); 377 } 378 379 /* 380 * _info - module information 381 */ 382 int 383 _info(struct modinfo *modinfop) 384 { 385 return (mod_info(&modlinkage, modinfop)); 386 } 387 388 /* 389 * e1000g_attach - driver attach 390 * 391 * This function is the device-specific initialization entry 392 * point. This entry point is required and must be written. 393 * The DDI_ATTACH command must be provided in the attach entry 394 * point. When attach() is called with cmd set to DDI_ATTACH, 395 * all normal kernel services (such as kmem_alloc(9F)) are 396 * available for use by the driver. 397 * 398 * The attach() function will be called once for each instance 399 * of the device on the system with cmd set to DDI_ATTACH. 400 * Until attach() succeeds, the only driver entry points which 401 * may be called are open(9E) and getinfo(9E). 402 */ 403 static int 404 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 405 { 406 struct e1000g *Adapter; 407 struct e1000_hw *hw; 408 struct e1000g_osdep *osdep; 409 int instance; 410 411 switch (cmd) { 412 default: 413 e1000g_log(NULL, CE_WARN, 414 "Unsupported command send to e1000g_attach... "); 415 return (DDI_FAILURE); 416 417 case DDI_RESUME: 418 return (e1000g_resume(devinfo)); 419 420 case DDI_ATTACH: 421 break; 422 } 423 424 /* 425 * get device instance number 426 */ 427 instance = ddi_get_instance(devinfo); 428 429 /* 430 * Allocate soft data structure 431 */ 432 Adapter = 433 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP); 434 435 Adapter->dip = devinfo; 436 Adapter->instance = instance; 437 Adapter->tx_ring->adapter = Adapter; 438 Adapter->rx_ring->adapter = Adapter; 439 440 hw = &Adapter->shared; 441 osdep = &Adapter->osdep; 442 hw->back = osdep; 443 osdep->adapter = Adapter; 444 445 ddi_set_driver_private(devinfo, (caddr_t)Adapter); 446 447 /* 448 * Initialize for fma support 449 */ 450 (void) e1000g_get_prop(Adapter, "fm-capable", 451 0, 0x0f, 452 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 453 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE, 454 &Adapter->fm_capabilities); 455 e1000g_fm_init(Adapter); 456 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT; 457 458 /* 459 * PCI Configure 460 */ 461 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 462 e1000g_log(Adapter, CE_WARN, "PCI configuration failed"); 463 goto attach_fail; 464 } 465 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 466 467 /* 468 * Setup hardware 469 */ 470 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) { 471 e1000g_log(Adapter, CE_WARN, "Identify hardware failed"); 472 goto attach_fail; 473 } 474 475 /* 476 * Map in the device registers. 477 */ 478 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) { 479 e1000g_log(Adapter, CE_WARN, "Mapping registers failed"); 480 goto attach_fail; 481 } 482 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 483 484 /* 485 * Initialize driver parameters 486 */ 487 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) { 488 goto attach_fail; 489 } 490 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP; 491 492 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 493 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 494 goto attach_fail; 495 } 496 497 /* 498 * Disable ULP support 499 */ 500 (void) e1000_disable_ulp_lpt_lp(hw, TRUE); 501 502 /* 503 * Initialize interrupts 504 */ 505 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { 506 e1000g_log(Adapter, CE_WARN, "Add interrupts failed"); 507 goto attach_fail; 508 } 509 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 510 511 /* 512 * Initialize mutex's for this device. 513 * Do this before enabling the interrupt handler and 514 * register the softint to avoid the condition where 515 * interrupt handler can try using uninitialized mutex 516 */ 517 e1000g_init_locks(Adapter); 518 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS; 519 520 /* 521 * Initialize Driver Counters 522 */ 523 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) { 524 e1000g_log(Adapter, CE_WARN, "Init stats failed"); 525 goto attach_fail; 526 } 527 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS; 528 529 /* 530 * Initialize chip hardware and software structures 531 */ 532 rw_enter(&Adapter->chip_lock, RW_WRITER); 533 if (e1000g_init(Adapter) != DDI_SUCCESS) { 534 rw_exit(&Adapter->chip_lock); 535 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed"); 536 goto attach_fail; 537 } 538 rw_exit(&Adapter->chip_lock); 539 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 540 541 /* 542 * Register the driver to the MAC 543 */ 544 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) { 545 e1000g_log(Adapter, CE_WARN, "Register MAC failed"); 546 goto attach_fail; 547 } 548 Adapter->attach_progress |= ATTACH_PROGRESS_MAC; 549 550 /* 551 * Now that mutex locks are initialized, and the chip is also 552 * initialized, enable interrupts. 553 */ 554 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) { 555 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed"); 556 goto attach_fail; 557 } 558 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 559 560 /* 561 * If e1000g_force_detach is enabled, in global private dip list, 562 * we will create a new entry, which maintains the priv_dip for DR 563 * supports after driver detached. 564 */ 565 if (e1000g_force_detach) { 566 private_devi_list_t *devi_node; 567 568 Adapter->priv_dip = 569 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP); 570 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip), 571 sizeof (struct dev_info)); 572 573 devi_node = 574 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP); 575 576 mutex_enter(&e1000g_rx_detach_lock); 577 devi_node->priv_dip = Adapter->priv_dip; 578 devi_node->flag = E1000G_PRIV_DEVI_ATTACH; 579 devi_node->pending_rx_count = 0; 580 581 Adapter->priv_devi_node = devi_node; 582 583 if (e1000g_private_devi_list == NULL) { 584 devi_node->prev = NULL; 585 devi_node->next = NULL; 586 e1000g_private_devi_list = devi_node; 587 } else { 588 devi_node->prev = NULL; 589 devi_node->next = e1000g_private_devi_list; 590 e1000g_private_devi_list->prev = devi_node; 591 e1000g_private_devi_list = devi_node; 592 } 593 mutex_exit(&e1000g_rx_detach_lock); 594 } 595 596 Adapter->e1000g_state = E1000G_INITIALIZED; 597 return (DDI_SUCCESS); 598 599 attach_fail: 600 e1000g_unattach(devinfo, Adapter); 601 return (DDI_FAILURE); 602 } 603 604 static int 605 e1000g_register_mac(struct e1000g *Adapter) 606 { 607 struct e1000_hw *hw = &Adapter->shared; 608 mac_register_t *mac; 609 int err; 610 611 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 612 return (DDI_FAILURE); 613 614 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 615 mac->m_driver = Adapter; 616 mac->m_dip = Adapter->dip; 617 mac->m_src_addr = hw->mac.addr; 618 mac->m_callbacks = &e1000g_m_callbacks; 619 mac->m_min_sdu = 0; 620 mac->m_max_sdu = Adapter->default_mtu; 621 mac->m_margin = VLAN_TAGSZ; 622 mac->m_priv_props = e1000g_priv_props; 623 mac->m_v12n = MAC_VIRT_LEVEL1; 624 625 err = mac_register(mac, &Adapter->mh); 626 mac_free(mac); 627 628 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE); 629 } 630 631 static int 632 e1000g_identify_hardware(struct e1000g *Adapter) 633 { 634 struct e1000_hw *hw = &Adapter->shared; 635 struct e1000g_osdep *osdep = &Adapter->osdep; 636 637 /* Get the device id */ 638 hw->vendor_id = 639 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 640 hw->device_id = 641 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 642 hw->revision_id = 643 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 644 hw->subsystem_device_id = 645 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 646 hw->subsystem_vendor_id = 647 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 648 649 if (e1000_set_mac_type(hw) != E1000_SUCCESS) { 650 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 651 "MAC type could not be set properly."); 652 return (DDI_FAILURE); 653 } 654 655 return (DDI_SUCCESS); 656 } 657 658 static int 659 e1000g_regs_map(struct e1000g *Adapter) 660 { 661 dev_info_t *devinfo = Adapter->dip; 662 struct e1000_hw *hw = &Adapter->shared; 663 struct e1000g_osdep *osdep = &Adapter->osdep; 664 off_t mem_size; 665 bar_info_t bar_info; 666 int offset, rnumber; 667 668 rnumber = ADAPTER_REG_SET; 669 /* Get size of adapter register memory */ 670 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) != 671 DDI_SUCCESS) { 672 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 673 "ddi_dev_regsize for registers failed"); 674 return (DDI_FAILURE); 675 } 676 677 /* Map adapter register memory */ 678 if ((ddi_regs_map_setup(devinfo, rnumber, 679 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr, 680 &osdep->reg_handle)) != DDI_SUCCESS) { 681 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 682 "ddi_regs_map_setup for registers failed"); 683 goto regs_map_fail; 684 } 685 686 /* ICH needs to map flash memory */ 687 switch (hw->mac.type) { 688 case e1000_ich8lan: 689 case e1000_ich9lan: 690 case e1000_ich10lan: 691 case e1000_pchlan: 692 case e1000_pch2lan: 693 case e1000_pch_lpt: 694 rnumber = ICH_FLASH_REG_SET; 695 696 /* get flash size */ 697 if (ddi_dev_regsize(devinfo, rnumber, 698 &mem_size) != DDI_SUCCESS) { 699 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 700 "ddi_dev_regsize for ICH flash failed"); 701 goto regs_map_fail; 702 } 703 704 /* map flash in */ 705 if (ddi_regs_map_setup(devinfo, rnumber, 706 (caddr_t *)&hw->flash_address, 0, 707 mem_size, &e1000g_regs_acc_attr, 708 &osdep->ich_flash_handle) != DDI_SUCCESS) { 709 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 710 "ddi_regs_map_setup for ICH flash failed"); 711 goto regs_map_fail; 712 } 713 break; 714 case e1000_pch_spt: 715 case e1000_pch_cnp: 716 case e1000_pch_tgp: 717 case e1000_pch_adp: 718 case e1000_pch_mtp: 719 case e1000_pch_lnp: 720 case e1000_pch_rpl: 721 case e1000_pch_arl: 722 case e1000_pch_ptp: 723 case e1000_pch_nvl: 724 /* 725 * On the SPT, the device flash is actually in BAR0, not a 726 * separate BAR. Therefore we end up setting the 727 * ich_flash_handle to be the same as the register handle. 728 * We mark the same to reduce the confusion in the other 729 * functions and macros. Though this does make the set up and 730 * tear-down path slightly more complicated. 731 */ 732 osdep->ich_flash_handle = osdep->reg_handle; 733 hw->flash_address = hw->hw_addr; 734 default: 735 break; 736 } 737 738 /* map io space */ 739 switch (hw->mac.type) { 740 case e1000_82544: 741 case e1000_82540: 742 case e1000_82545: 743 case e1000_82546: 744 case e1000_82541: 745 case e1000_82541_rev_2: 746 /* find the IO bar */ 747 rnumber = -1; 748 for (offset = PCI_CONF_BASE1; 749 offset <= PCI_CONF_BASE5; offset += 4) { 750 if (e1000g_get_bar_info(devinfo, offset, &bar_info) 751 != DDI_SUCCESS) 752 continue; 753 if (bar_info.type == E1000G_BAR_IO) { 754 rnumber = bar_info.rnumber; 755 break; 756 } 757 } 758 759 if (rnumber < 0) { 760 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 761 "No io space is found"); 762 goto regs_map_fail; 763 } 764 765 /* get io space size */ 766 if (ddi_dev_regsize(devinfo, rnumber, 767 &mem_size) != DDI_SUCCESS) { 768 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 769 "ddi_dev_regsize for io space failed"); 770 goto regs_map_fail; 771 } 772 773 /* map io space */ 774 if ((ddi_regs_map_setup(devinfo, rnumber, 775 (caddr_t *)&hw->io_base, 0, mem_size, 776 &e1000g_regs_acc_attr, 777 &osdep->io_reg_handle)) != DDI_SUCCESS) { 778 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 779 "ddi_regs_map_setup for io space failed"); 780 goto regs_map_fail; 781 } 782 break; 783 default: 784 hw->io_base = 0; 785 break; 786 } 787 788 return (DDI_SUCCESS); 789 790 regs_map_fail: 791 if (osdep->reg_handle != NULL) 792 ddi_regs_map_free(&osdep->reg_handle); 793 if (osdep->ich_flash_handle != NULL && hw->mac.type < e1000_pch_spt) 794 ddi_regs_map_free(&osdep->ich_flash_handle); 795 return (DDI_FAILURE); 796 } 797 798 static int 799 e1000g_set_driver_params(struct e1000g *Adapter) 800 { 801 struct e1000_hw *hw; 802 803 hw = &Adapter->shared; 804 805 /* Set MAC type and initialize hardware functions */ 806 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { 807 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 808 "Could not setup hardware functions"); 809 return (DDI_FAILURE); 810 } 811 812 /* Get bus information */ 813 if (e1000_get_bus_info(hw) != E1000_SUCCESS) { 814 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 815 "Could not get bus information"); 816 return (DDI_FAILURE); 817 } 818 819 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word); 820 821 hw->mac.autoneg_failed = B_TRUE; 822 823 /* Set the autoneg_wait_to_complete flag to B_FALSE */ 824 hw->phy.autoneg_wait_to_complete = B_FALSE; 825 826 /* Adaptive IFS related changes */ 827 hw->mac.adaptive_ifs = B_TRUE; 828 829 /* Enable phy init script for IGP phy of 82541/82547 */ 830 if ((hw->mac.type == e1000_82547) || 831 (hw->mac.type == e1000_82541) || 832 (hw->mac.type == e1000_82547_rev_2) || 833 (hw->mac.type == e1000_82541_rev_2)) 834 e1000_init_script_state_82541(hw, B_TRUE); 835 836 /* Enable the TTL workaround for 82541/82547 */ 837 e1000_set_ttl_workaround_state_82541(hw, B_TRUE); 838 839 #ifdef __sparc 840 Adapter->strip_crc = B_TRUE; 841 #else 842 Adapter->strip_crc = B_FALSE; 843 #endif 844 845 /* setup the maximum MTU size of the chip */ 846 e1000g_setup_max_mtu(Adapter); 847 848 /* Get speed/duplex settings in conf file */ 849 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; 850 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 851 e1000g_force_speed_duplex(Adapter); 852 853 /* Get Jumbo Frames settings in conf file */ 854 e1000g_get_max_frame_size(Adapter); 855 856 /* Get conf file properties */ 857 e1000g_get_conf(Adapter); 858 859 /* enforce PCH limits */ 860 e1000g_pch_limits(Adapter); 861 862 /* Set Rx/Tx buffer size */ 863 e1000g_set_bufsize(Adapter); 864 865 /* Master Latency Timer */ 866 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER; 867 868 /* copper options */ 869 if (hw->phy.media_type == e1000_media_type_copper) { 870 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 871 hw->phy.disable_polarity_correction = B_FALSE; 872 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ 873 } 874 875 /* The initial link state should be "unknown" */ 876 Adapter->link_state = LINK_STATE_UNKNOWN; 877 878 /* Initialize rx parameters */ 879 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY; 880 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY; 881 882 /* Initialize tx parameters */ 883 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE; 884 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD; 885 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY; 886 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY; 887 888 /* Initialize rx parameters */ 889 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD; 890 891 return (DDI_SUCCESS); 892 } 893 894 static void 895 e1000g_setup_max_mtu(struct e1000g *Adapter) 896 { 897 struct e1000_mac_info *mac = &Adapter->shared.mac; 898 struct e1000_phy_info *phy = &Adapter->shared.phy; 899 900 switch (mac->type) { 901 /* types that do not support jumbo frames */ 902 case e1000_ich8lan: 903 case e1000_82573: 904 case e1000_82583: 905 Adapter->max_mtu = ETHERMTU; 906 break; 907 /* ich9 supports jumbo frames except on one phy type */ 908 case e1000_ich9lan: 909 if (phy->type == e1000_phy_ife) 910 Adapter->max_mtu = ETHERMTU; 911 else 912 Adapter->max_mtu = MAXIMUM_MTU_9K; 913 break; 914 /* pch can do jumbo frames up to 4K */ 915 case e1000_pchlan: 916 Adapter->max_mtu = MAXIMUM_MTU_4K; 917 break; 918 /* pch2 can do jumbo frames up to 9K */ 919 case e1000_pch2lan: 920 case e1000_pch_lpt: 921 case e1000_pch_spt: 922 case e1000_pch_cnp: 923 case e1000_pch_tgp: 924 case e1000_pch_adp: 925 case e1000_pch_mtp: 926 case e1000_pch_lnp: 927 case e1000_pch_rpl: 928 case e1000_pch_arl: 929 case e1000_pch_ptp: 930 case e1000_pch_nvl: 931 Adapter->max_mtu = MAXIMUM_MTU_9K; 932 break; 933 /* types with a special limit */ 934 case e1000_82571: 935 case e1000_82572: 936 case e1000_82574: 937 case e1000_80003es2lan: 938 case e1000_ich10lan: 939 if (e1000g_jumbo_mtu >= ETHERMTU && 940 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) { 941 Adapter->max_mtu = e1000g_jumbo_mtu; 942 } else { 943 Adapter->max_mtu = MAXIMUM_MTU_9K; 944 } 945 break; 946 /* default limit is 16K */ 947 default: 948 Adapter->max_mtu = FRAME_SIZE_UPTO_16K - 949 sizeof (struct ether_vlan_header) - ETHERFCSL; 950 break; 951 } 952 } 953 954 static void 955 e1000g_set_bufsize(struct e1000g *Adapter) 956 { 957 struct e1000_mac_info *mac = &Adapter->shared.mac; 958 uint64_t rx_size; 959 uint64_t tx_size; 960 961 dev_info_t *devinfo = Adapter->dip; 962 #ifdef __sparc 963 ulong_t iommu_pagesize; 964 #endif 965 /* Get the system page size */ 966 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1); 967 968 #ifdef __sparc 969 iommu_pagesize = dvma_pagesize(devinfo); 970 if (iommu_pagesize != 0) { 971 if (Adapter->sys_page_sz == iommu_pagesize) { 972 if (iommu_pagesize > 0x4000) 973 Adapter->sys_page_sz = 0x4000; 974 } else { 975 if (Adapter->sys_page_sz > iommu_pagesize) 976 Adapter->sys_page_sz = iommu_pagesize; 977 } 978 } 979 if (Adapter->lso_enable) { 980 Adapter->dvma_page_num = E1000_LSO_MAXLEN / 981 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 982 } else { 983 Adapter->dvma_page_num = Adapter->max_frame_size / 984 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 985 } 986 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM); 987 #endif 988 989 Adapter->min_frame_size = ETHERMIN + ETHERFCSL; 990 991 if (Adapter->mem_workaround_82546 && 992 ((mac->type == e1000_82545) || 993 (mac->type == e1000_82546) || 994 (mac->type == e1000_82546_rev_3))) { 995 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 996 } else { 997 rx_size = Adapter->max_frame_size; 998 if ((rx_size > FRAME_SIZE_UPTO_2K) && 999 (rx_size <= FRAME_SIZE_UPTO_4K)) 1000 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K; 1001 else if ((rx_size > FRAME_SIZE_UPTO_4K) && 1002 (rx_size <= FRAME_SIZE_UPTO_8K)) 1003 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K; 1004 else if ((rx_size > FRAME_SIZE_UPTO_8K) && 1005 (rx_size <= FRAME_SIZE_UPTO_16K)) 1006 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K; 1007 else 1008 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 1009 } 1010 Adapter->rx_buffer_size += E1000G_IPALIGNROOM; 1011 1012 tx_size = Adapter->max_frame_size; 1013 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K)) 1014 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K; 1015 else if ((tx_size > FRAME_SIZE_UPTO_4K) && 1016 (tx_size <= FRAME_SIZE_UPTO_8K)) 1017 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K; 1018 else if ((tx_size > FRAME_SIZE_UPTO_8K) && 1019 (tx_size <= FRAME_SIZE_UPTO_16K)) 1020 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K; 1021 else 1022 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K; 1023 1024 /* 1025 * For Wiseman adapters we have an requirement of having receive 1026 * buffers aligned at 256 byte boundary. Since Livengood does not 1027 * require this and forcing it for all hardwares will have 1028 * performance implications, I am making it applicable only for 1029 * Wiseman and for Jumbo frames enabled mode as rest of the time, 1030 * it is okay to have normal frames...but it does involve a 1031 * potential risk where we may loose data if buffer is not 1032 * aligned...so all wiseman boards to have 256 byte aligned 1033 * buffers 1034 */ 1035 if (mac->type < e1000_82543) 1036 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE; 1037 else 1038 Adapter->rx_buf_align = 1; 1039 } 1040 1041 /* 1042 * e1000g_detach - driver detach 1043 * 1044 * The detach() function is the complement of the attach routine. 1045 * If cmd is set to DDI_DETACH, detach() is used to remove the 1046 * state associated with a given instance of a device node 1047 * prior to the removal of that instance from the system. 1048 * 1049 * The detach() function will be called once for each instance 1050 * of the device for which there has been a successful attach() 1051 * once there are no longer any opens on the device. 1052 * 1053 * Interrupts routine are disabled, All memory allocated by this 1054 * driver are freed. 1055 */ 1056 static int 1057 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1058 { 1059 struct e1000g *Adapter; 1060 boolean_t rx_drain; 1061 1062 switch (cmd) { 1063 default: 1064 return (DDI_FAILURE); 1065 1066 case DDI_SUSPEND: 1067 return (e1000g_suspend(devinfo)); 1068 1069 case DDI_DETACH: 1070 break; 1071 } 1072 1073 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1074 if (Adapter == NULL) 1075 return (DDI_FAILURE); 1076 1077 rx_drain = e1000g_rx_drain(Adapter); 1078 if (!rx_drain && !e1000g_force_detach) 1079 return (DDI_FAILURE); 1080 1081 if (mac_unregister(Adapter->mh) != 0) { 1082 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed"); 1083 return (DDI_FAILURE); 1084 } 1085 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC; 1086 1087 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED)); 1088 1089 if (!e1000g_force_detach && !rx_drain) 1090 return (DDI_FAILURE); 1091 1092 e1000g_unattach(devinfo, Adapter); 1093 1094 return (DDI_SUCCESS); 1095 } 1096 1097 /* 1098 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance 1099 */ 1100 void 1101 e1000g_free_priv_devi_node(private_devi_list_t *devi_node) 1102 { 1103 ASSERT(e1000g_private_devi_list != NULL); 1104 ASSERT(devi_node != NULL); 1105 1106 if (devi_node->prev != NULL) 1107 devi_node->prev->next = devi_node->next; 1108 if (devi_node->next != NULL) 1109 devi_node->next->prev = devi_node->prev; 1110 if (devi_node == e1000g_private_devi_list) 1111 e1000g_private_devi_list = devi_node->next; 1112 1113 kmem_free(devi_node->priv_dip, 1114 sizeof (struct dev_info)); 1115 kmem_free(devi_node, 1116 sizeof (private_devi_list_t)); 1117 } 1118 1119 static void 1120 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter) 1121 { 1122 private_devi_list_t *devi_node; 1123 int result; 1124 1125 if (Adapter->e1000g_blink != NULL) { 1126 ddi_periodic_delete(Adapter->e1000g_blink); 1127 Adapter->e1000g_blink = NULL; 1128 } 1129 1130 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1131 (void) e1000g_disable_intrs(Adapter); 1132 } 1133 1134 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) { 1135 (void) mac_unregister(Adapter->mh); 1136 } 1137 1138 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1139 (void) e1000g_rem_intrs(Adapter); 1140 } 1141 1142 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) { 1143 (void) ddi_prop_remove_all(devinfo); 1144 } 1145 1146 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) { 1147 kstat_delete((kstat_t *)Adapter->e1000g_ksp); 1148 } 1149 1150 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) { 1151 stop_link_timer(Adapter); 1152 1153 mutex_enter(&e1000g_nvm_lock); 1154 result = e1000_reset_hw(&Adapter->shared); 1155 mutex_exit(&e1000g_nvm_lock); 1156 1157 if (result != E1000_SUCCESS) { 1158 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1159 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1160 } 1161 } 1162 1163 e1000g_release_multicast(Adapter); 1164 1165 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 1166 if (Adapter->osdep.reg_handle != NULL) 1167 ddi_regs_map_free(&Adapter->osdep.reg_handle); 1168 if (Adapter->osdep.ich_flash_handle != NULL && 1169 Adapter->shared.mac.type < e1000_pch_spt) 1170 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle); 1171 if (Adapter->osdep.io_reg_handle != NULL) 1172 ddi_regs_map_free(&Adapter->osdep.io_reg_handle); 1173 } 1174 1175 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 1176 if (Adapter->osdep.cfg_handle != NULL) 1177 pci_config_teardown(&Adapter->osdep.cfg_handle); 1178 } 1179 1180 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) { 1181 e1000g_destroy_locks(Adapter); 1182 } 1183 1184 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) { 1185 e1000g_fm_fini(Adapter); 1186 } 1187 1188 mutex_enter(&e1000g_rx_detach_lock); 1189 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) { 1190 devi_node = Adapter->priv_devi_node; 1191 devi_node->flag |= E1000G_PRIV_DEVI_DETACH; 1192 1193 if (devi_node->pending_rx_count == 0) { 1194 e1000g_free_priv_devi_node(devi_node); 1195 } 1196 } 1197 mutex_exit(&e1000g_rx_detach_lock); 1198 1199 kmem_free((caddr_t)Adapter, sizeof (struct e1000g)); 1200 1201 /* 1202 * Another hotplug spec requirement, 1203 * run ddi_set_driver_private(devinfo, null); 1204 */ 1205 ddi_set_driver_private(devinfo, NULL); 1206 } 1207 1208 /* 1209 * Get the BAR type and rnumber for a given PCI BAR offset 1210 */ 1211 static int 1212 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info) 1213 { 1214 pci_regspec_t *regs; 1215 uint_t regs_length; 1216 int type, rnumber, rcount; 1217 1218 ASSERT((bar_offset >= PCI_CONF_BASE0) && 1219 (bar_offset <= PCI_CONF_BASE5)); 1220 1221 /* 1222 * Get the DDI "reg" property 1223 */ 1224 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 1225 DDI_PROP_DONTPASS, "reg", (int **)®s, 1226 ®s_length) != DDI_PROP_SUCCESS) { 1227 return (DDI_FAILURE); 1228 } 1229 1230 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t); 1231 /* 1232 * Check the BAR offset 1233 */ 1234 for (rnumber = 0; rnumber < rcount; ++rnumber) { 1235 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) { 1236 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK; 1237 break; 1238 } 1239 } 1240 1241 ddi_prop_free(regs); 1242 1243 if (rnumber >= rcount) 1244 return (DDI_FAILURE); 1245 1246 switch (type) { 1247 case PCI_ADDR_CONFIG: 1248 bar_info->type = E1000G_BAR_CONFIG; 1249 break; 1250 case PCI_ADDR_IO: 1251 bar_info->type = E1000G_BAR_IO; 1252 break; 1253 case PCI_ADDR_MEM32: 1254 bar_info->type = E1000G_BAR_MEM32; 1255 break; 1256 case PCI_ADDR_MEM64: 1257 bar_info->type = E1000G_BAR_MEM64; 1258 break; 1259 default: 1260 return (DDI_FAILURE); 1261 } 1262 bar_info->rnumber = rnumber; 1263 return (DDI_SUCCESS); 1264 } 1265 1266 static void 1267 e1000g_init_locks(struct e1000g *Adapter) 1268 { 1269 e1000g_tx_ring_t *tx_ring; 1270 e1000g_rx_ring_t *rx_ring; 1271 1272 rw_init(&Adapter->chip_lock, NULL, 1273 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1274 mutex_init(&Adapter->link_lock, NULL, 1275 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1276 mutex_init(&Adapter->watchdog_lock, NULL, 1277 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1278 1279 tx_ring = Adapter->tx_ring; 1280 1281 mutex_init(&tx_ring->tx_lock, NULL, 1282 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1283 mutex_init(&tx_ring->usedlist_lock, NULL, 1284 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1285 mutex_init(&tx_ring->freelist_lock, NULL, 1286 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1287 1288 rx_ring = Adapter->rx_ring; 1289 1290 mutex_init(&rx_ring->rx_lock, NULL, 1291 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1292 1293 mutex_init(&Adapter->e1000g_led_lock, NULL, 1294 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1295 } 1296 1297 static void 1298 e1000g_destroy_locks(struct e1000g *Adapter) 1299 { 1300 e1000g_tx_ring_t *tx_ring; 1301 e1000g_rx_ring_t *rx_ring; 1302 1303 mutex_destroy(&Adapter->e1000g_led_lock); 1304 1305 tx_ring = Adapter->tx_ring; 1306 mutex_destroy(&tx_ring->tx_lock); 1307 mutex_destroy(&tx_ring->usedlist_lock); 1308 mutex_destroy(&tx_ring->freelist_lock); 1309 1310 rx_ring = Adapter->rx_ring; 1311 mutex_destroy(&rx_ring->rx_lock); 1312 1313 mutex_destroy(&Adapter->link_lock); 1314 mutex_destroy(&Adapter->watchdog_lock); 1315 rw_destroy(&Adapter->chip_lock); 1316 1317 /* destory mutex initialized in shared code */ 1318 e1000_destroy_hw_mutex(&Adapter->shared); 1319 } 1320 1321 static int 1322 e1000g_resume(dev_info_t *devinfo) 1323 { 1324 struct e1000g *Adapter; 1325 1326 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1327 if (Adapter == NULL) 1328 e1000g_log(Adapter, CE_PANIC, 1329 "Instance pointer is null\n"); 1330 1331 if (Adapter->dip != devinfo) 1332 e1000g_log(Adapter, CE_PANIC, 1333 "Devinfo is not the same as saved devinfo\n"); 1334 1335 rw_enter(&Adapter->chip_lock, RW_WRITER); 1336 1337 if (Adapter->e1000g_state & E1000G_STARTED) { 1338 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 1339 rw_exit(&Adapter->chip_lock); 1340 /* 1341 * We note the failure, but return success, as the 1342 * system is still usable without this controller. 1343 */ 1344 e1000g_log(Adapter, CE_WARN, 1345 "e1000g_resume: failed to restart controller\n"); 1346 return (DDI_SUCCESS); 1347 } 1348 /* Enable and start the watchdog timer */ 1349 enable_watchdog_timer(Adapter); 1350 } 1351 1352 Adapter->e1000g_state &= ~E1000G_SUSPENDED; 1353 1354 rw_exit(&Adapter->chip_lock); 1355 1356 return (DDI_SUCCESS); 1357 } 1358 1359 static int 1360 e1000g_suspend(dev_info_t *devinfo) 1361 { 1362 struct e1000g *Adapter; 1363 1364 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1365 if (Adapter == NULL) 1366 return (DDI_FAILURE); 1367 1368 rw_enter(&Adapter->chip_lock, RW_WRITER); 1369 1370 Adapter->e1000g_state |= E1000G_SUSPENDED; 1371 1372 /* if the port isn't plumbed, we can simply return */ 1373 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 1374 rw_exit(&Adapter->chip_lock); 1375 return (DDI_SUCCESS); 1376 } 1377 1378 e1000g_stop(Adapter, B_FALSE); 1379 1380 rw_exit(&Adapter->chip_lock); 1381 1382 /* Disable and stop all the timers */ 1383 disable_watchdog_timer(Adapter); 1384 stop_link_timer(Adapter); 1385 stop_82547_timer(Adapter->tx_ring); 1386 1387 return (DDI_SUCCESS); 1388 } 1389 1390 static int 1391 e1000g_init(struct e1000g *Adapter) 1392 { 1393 uint32_t pba; 1394 uint32_t high_water; 1395 struct e1000_hw *hw; 1396 clock_t link_timeout; 1397 int result; 1398 1399 hw = &Adapter->shared; 1400 1401 /* 1402 * reset to put the hardware in a known state 1403 * before we try to do anything with the eeprom 1404 */ 1405 mutex_enter(&e1000g_nvm_lock); 1406 result = e1000_reset_hw(hw); 1407 mutex_exit(&e1000g_nvm_lock); 1408 1409 if (result != E1000_SUCCESS) { 1410 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1411 goto init_fail; 1412 } 1413 1414 mutex_enter(&e1000g_nvm_lock); 1415 result = e1000_validate_nvm_checksum(hw); 1416 if (result < E1000_SUCCESS) { 1417 /* 1418 * Some PCI-E parts fail the first check due to 1419 * the link being in sleep state. Call it again, 1420 * if it fails a second time its a real issue. 1421 */ 1422 result = e1000_validate_nvm_checksum(hw); 1423 } 1424 mutex_exit(&e1000g_nvm_lock); 1425 1426 if (result < E1000_SUCCESS) { 1427 e1000g_log(Adapter, CE_WARN, 1428 "Invalid NVM checksum. Please contact " 1429 "the vendor to update the NVM."); 1430 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1431 goto init_fail; 1432 } 1433 1434 result = 0; 1435 #ifdef __sparc 1436 /* 1437 * First, we try to get the local ethernet address from OBP. If 1438 * failed, then we get it from the EEPROM of NIC card. 1439 */ 1440 result = e1000g_find_mac_address(Adapter); 1441 #endif 1442 /* Get the local ethernet address. */ 1443 if (!result) { 1444 mutex_enter(&e1000g_nvm_lock); 1445 result = e1000_read_mac_addr(hw); 1446 mutex_exit(&e1000g_nvm_lock); 1447 } 1448 1449 if (result < E1000_SUCCESS) { 1450 e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); 1451 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1452 goto init_fail; 1453 } 1454 1455 /* check for valid mac address */ 1456 if (!is_valid_mac_addr(hw->mac.addr)) { 1457 e1000g_log(Adapter, CE_WARN, "Invalid mac addr"); 1458 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1459 goto init_fail; 1460 } 1461 1462 /* Set LAA state for 82571 chipset */ 1463 e1000_set_laa_state_82571(hw, B_TRUE); 1464 1465 /* Master Latency Timer implementation */ 1466 if (Adapter->master_latency_timer) { 1467 pci_config_put8(Adapter->osdep.cfg_handle, 1468 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer); 1469 } 1470 1471 if (hw->mac.type < e1000_82547) { 1472 /* 1473 * Total FIFO is 64K 1474 */ 1475 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1476 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1477 else 1478 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1479 } else if ((hw->mac.type == e1000_82571) || 1480 (hw->mac.type == e1000_82572) || 1481 (hw->mac.type == e1000_80003es2lan)) { 1482 /* 1483 * Total FIFO is 48K 1484 */ 1485 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1486 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */ 1487 else 1488 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */ 1489 } else if (hw->mac.type == e1000_82573) { 1490 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */ 1491 } else if (hw->mac.type == e1000_82574) { 1492 /* Keep adapter default: 20K for Rx, 20K for Tx */ 1493 pba = E1000_READ_REG(hw, E1000_PBA); 1494 } else if (hw->mac.type == e1000_ich8lan) { 1495 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */ 1496 } else if (hw->mac.type == e1000_ich9lan) { 1497 pba = E1000_PBA_10K; 1498 } else if (hw->mac.type == e1000_ich10lan) { 1499 pba = E1000_PBA_10K; 1500 } else if (hw->mac.type == e1000_pchlan) { 1501 pba = E1000_PBA_26K; 1502 } else if (hw->mac.type == e1000_pch2lan) { 1503 pba = E1000_PBA_26K; 1504 } else if (hw->mac.type == e1000_pch_lpt) { 1505 pba = E1000_PBA_26K; 1506 } else if (hw->mac.type == e1000_pch_spt) { 1507 pba = E1000_PBA_26K; 1508 } else if (hw->mac.type == e1000_pch_cnp) { 1509 pba = E1000_PBA_26K; 1510 } else if (hw->mac.type == e1000_pch_tgp) { 1511 pba = E1000_PBA_26K; 1512 } else if (hw->mac.type == e1000_pch_adp) { 1513 pba = E1000_PBA_26K; 1514 } else if (hw->mac.type == e1000_pch_mtp) { 1515 pba = E1000_PBA_26K; 1516 } else if (hw->mac.type == e1000_pch_lnp) { 1517 pba = E1000_PBA_26K; 1518 } else if (hw->mac.type == e1000_pch_rpl) { 1519 pba = E1000_PBA_26K; 1520 } else if (hw->mac.type == e1000_pch_arl) { 1521 pba = E1000_PBA_26K; 1522 } else if (hw->mac.type == e1000_pch_ptp) { 1523 pba = E1000_PBA_26K; 1524 } else if (hw->mac.type == e1000_pch_nvl) { 1525 pba = E1000_PBA_26K; 1526 } else { 1527 /* 1528 * Total FIFO is 40K 1529 */ 1530 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1531 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1532 else 1533 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1534 } 1535 E1000_WRITE_REG(hw, E1000_PBA, pba); 1536 1537 /* 1538 * These parameters set thresholds for the adapter's generation(Tx) 1539 * and response(Rx) to Ethernet PAUSE frames. These are just threshold 1540 * settings. Flow control is enabled or disabled in the configuration 1541 * file. 1542 * High-water mark is set down from the top of the rx fifo (not 1543 * sensitive to max_frame_size) and low-water is set just below 1544 * high-water mark. 1545 * The high water mark must be low enough to fit one full frame above 1546 * it in the rx FIFO. Should be the lower of: 1547 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early 1548 * receive size (assuming ERT set to E1000_ERT_2048), or the full 1549 * Rx FIFO size minus one full frame. 1550 */ 1551 high_water = min(((pba << 10) * 9 / 10), 1552 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 || 1553 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ? 1554 ((pba << 10) - (E1000_ERT_2048 << 3)) : 1555 ((pba << 10) - Adapter->max_frame_size))); 1556 1557 hw->fc.high_water = high_water & 0xFFF8; 1558 hw->fc.low_water = hw->fc.high_water - 8; 1559 1560 if (hw->mac.type == e1000_80003es2lan) 1561 hw->fc.pause_time = 0xFFFF; 1562 else 1563 hw->fc.pause_time = E1000_FC_PAUSE_TIME; 1564 hw->fc.send_xon = B_TRUE; 1565 1566 /* 1567 * Reset the adapter hardware the second time. 1568 */ 1569 mutex_enter(&e1000g_nvm_lock); 1570 result = e1000_reset_hw(hw); 1571 mutex_exit(&e1000g_nvm_lock); 1572 1573 if (result != E1000_SUCCESS) { 1574 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1575 goto init_fail; 1576 } 1577 1578 /* disable wakeup control by default */ 1579 if (hw->mac.type >= e1000_82544) 1580 E1000_WRITE_REG(hw, E1000_WUC, 0); 1581 1582 /* 1583 * MWI should be disabled on 82546. 1584 */ 1585 if (hw->mac.type == e1000_82546) 1586 e1000_pci_clear_mwi(hw); 1587 else 1588 e1000_pci_set_mwi(hw); 1589 1590 /* 1591 * Configure/Initialize hardware 1592 */ 1593 mutex_enter(&e1000g_nvm_lock); 1594 result = e1000_init_hw(hw); 1595 mutex_exit(&e1000g_nvm_lock); 1596 1597 if (result < E1000_SUCCESS) { 1598 e1000g_log(Adapter, CE_WARN, "Initialize hw failed"); 1599 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1600 goto init_fail; 1601 } 1602 1603 /* 1604 * Restore LED settings to the default from EEPROM 1605 * to meet the standard for Sun platforms. 1606 */ 1607 (void) e1000_cleanup_led(hw); 1608 1609 /* Disable Smart Power Down */ 1610 phy_spd_state(hw, B_FALSE); 1611 1612 /* Make sure driver has control */ 1613 e1000g_get_driver_control(hw); 1614 1615 /* 1616 * Initialize unicast addresses. 1617 */ 1618 e1000g_init_unicst(Adapter); 1619 1620 /* 1621 * Setup and initialize the mctable structures. After this routine 1622 * completes Multicast table will be set 1623 */ 1624 e1000_update_mc_addr_list(hw, 1625 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 1626 msec_delay(5); 1627 1628 /* 1629 * Implement Adaptive IFS 1630 */ 1631 e1000_reset_adaptive(hw); 1632 1633 /* Setup Interrupt Throttling Register */ 1634 if (hw->mac.type >= e1000_82540) { 1635 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate); 1636 } else 1637 Adapter->intr_adaptive = B_FALSE; 1638 1639 /* Start the timer for link setup */ 1640 if (hw->mac.autoneg) 1641 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000); 1642 else 1643 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); 1644 1645 mutex_enter(&Adapter->link_lock); 1646 if (hw->phy.autoneg_wait_to_complete) { 1647 Adapter->link_complete = B_TRUE; 1648 } else { 1649 Adapter->link_complete = B_FALSE; 1650 Adapter->link_tid = timeout(e1000g_link_timer, 1651 (void *)Adapter, link_timeout); 1652 } 1653 mutex_exit(&Adapter->link_lock); 1654 1655 /* Save the state of the phy */ 1656 e1000g_get_phy_state(Adapter); 1657 1658 e1000g_param_sync(Adapter); 1659 1660 Adapter->init_count++; 1661 1662 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 1663 goto init_fail; 1664 } 1665 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1666 goto init_fail; 1667 } 1668 1669 Adapter->poll_mode = e1000g_poll_mode; 1670 1671 return (DDI_SUCCESS); 1672 1673 init_fail: 1674 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1675 return (DDI_FAILURE); 1676 } 1677 1678 static int 1679 e1000g_alloc_rx_data(struct e1000g *Adapter) 1680 { 1681 e1000g_rx_ring_t *rx_ring; 1682 e1000g_rx_data_t *rx_data; 1683 1684 rx_ring = Adapter->rx_ring; 1685 1686 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP); 1687 1688 if (rx_data == NULL) 1689 return (DDI_FAILURE); 1690 1691 rx_data->priv_devi_node = Adapter->priv_devi_node; 1692 rx_data->rx_ring = rx_ring; 1693 1694 mutex_init(&rx_data->freelist_lock, NULL, 1695 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1696 mutex_init(&rx_data->recycle_lock, NULL, 1697 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1698 1699 rx_ring->rx_data = rx_data; 1700 1701 return (DDI_SUCCESS); 1702 } 1703 1704 void 1705 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data) 1706 { 1707 rx_sw_packet_t *packet, *next_packet; 1708 1709 if (rx_data == NULL) 1710 return; 1711 1712 packet = rx_data->packet_area; 1713 while (packet != NULL) { 1714 next_packet = packet->next; 1715 e1000g_free_rx_sw_packet(packet, B_TRUE); 1716 packet = next_packet; 1717 } 1718 rx_data->packet_area = NULL; 1719 } 1720 1721 void 1722 e1000g_free_rx_data(e1000g_rx_data_t *rx_data) 1723 { 1724 if (rx_data == NULL) 1725 return; 1726 1727 mutex_destroy(&rx_data->freelist_lock); 1728 mutex_destroy(&rx_data->recycle_lock); 1729 1730 kmem_free(rx_data, sizeof (e1000g_rx_data_t)); 1731 } 1732 1733 /* 1734 * Check if the link is up 1735 */ 1736 static boolean_t 1737 e1000g_link_up(struct e1000g *Adapter) 1738 { 1739 struct e1000_hw *hw = &Adapter->shared; 1740 boolean_t link_up = B_FALSE; 1741 1742 /* 1743 * get_link_status is set in the interrupt handler on link-status-change 1744 * or rx sequence error interrupt. get_link_status will stay 1745 * false until the e1000_check_for_link establishes link only 1746 * for copper adapters. 1747 */ 1748 switch (hw->phy.media_type) { 1749 case e1000_media_type_copper: 1750 if (hw->mac.get_link_status) { 1751 /* 1752 * SPT and newer devices need a bit of extra time before 1753 * we ask them. 1754 */ 1755 if (hw->mac.type >= e1000_pch_spt) 1756 msec_delay(50); 1757 (void) e1000_check_for_link(hw); 1758 if ((E1000_READ_REG(hw, E1000_STATUS) & 1759 E1000_STATUS_LU)) { 1760 link_up = B_TRUE; 1761 } else { 1762 link_up = !hw->mac.get_link_status; 1763 } 1764 } else { 1765 link_up = B_TRUE; 1766 } 1767 break; 1768 case e1000_media_type_fiber: 1769 (void) e1000_check_for_link(hw); 1770 link_up = (E1000_READ_REG(hw, E1000_STATUS) & 1771 E1000_STATUS_LU); 1772 break; 1773 case e1000_media_type_internal_serdes: 1774 (void) e1000_check_for_link(hw); 1775 link_up = hw->mac.serdes_has_link; 1776 break; 1777 } 1778 1779 return (link_up); 1780 } 1781 1782 static void 1783 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 1784 { 1785 struct iocblk *iocp; 1786 struct e1000g *e1000gp; 1787 enum ioc_reply status; 1788 1789 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; 1790 iocp->ioc_error = 0; 1791 e1000gp = (struct e1000g *)arg; 1792 1793 ASSERT(e1000gp); 1794 if (e1000gp == NULL) { 1795 miocnak(q, mp, 0, EINVAL); 1796 return; 1797 } 1798 1799 rw_enter(&e1000gp->chip_lock, RW_READER); 1800 if (e1000gp->e1000g_state & E1000G_SUSPENDED) { 1801 rw_exit(&e1000gp->chip_lock); 1802 miocnak(q, mp, 0, EINVAL); 1803 return; 1804 } 1805 rw_exit(&e1000gp->chip_lock); 1806 1807 switch (iocp->ioc_cmd) { 1808 1809 case LB_GET_INFO_SIZE: 1810 case LB_GET_INFO: 1811 case LB_GET_MODE: 1812 case LB_SET_MODE: 1813 status = e1000g_loopback_ioctl(e1000gp, iocp, mp); 1814 break; 1815 1816 1817 #ifdef E1000G_DEBUG 1818 case E1000G_IOC_REG_PEEK: 1819 case E1000G_IOC_REG_POKE: 1820 status = e1000g_pp_ioctl(e1000gp, iocp, mp); 1821 break; 1822 case E1000G_IOC_CHIP_RESET: 1823 e1000gp->reset_count++; 1824 if (e1000g_reset_adapter(e1000gp)) 1825 status = IOC_ACK; 1826 else 1827 status = IOC_INVAL; 1828 break; 1829 #endif 1830 default: 1831 status = IOC_INVAL; 1832 break; 1833 } 1834 1835 /* 1836 * Decide how to reply 1837 */ 1838 switch (status) { 1839 default: 1840 case IOC_INVAL: 1841 /* 1842 * Error, reply with a NAK and EINVAL or the specified error 1843 */ 1844 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 1845 EINVAL : iocp->ioc_error); 1846 break; 1847 1848 case IOC_DONE: 1849 /* 1850 * OK, reply already sent 1851 */ 1852 break; 1853 1854 case IOC_ACK: 1855 /* 1856 * OK, reply with an ACK 1857 */ 1858 miocack(q, mp, 0, 0); 1859 break; 1860 1861 case IOC_REPLY: 1862 /* 1863 * OK, send prepared reply as ACK or NAK 1864 */ 1865 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1866 M_IOCACK : M_IOCNAK; 1867 qreply(q, mp); 1868 break; 1869 } 1870 } 1871 1872 /* 1873 * The default value of e1000g_poll_mode == 0 assumes that the NIC is 1874 * capable of supporting only one interrupt and we shouldn't disable 1875 * the physical interrupt. In this case we let the interrupt come and 1876 * we queue the packets in the rx ring itself in case we are in polling 1877 * mode (better latency but slightly lower performance and a very 1878 * high intrrupt count in mpstat which is harmless). 1879 * 1880 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt 1881 * which can be disabled in poll mode. This gives better overall 1882 * throughput (compared to the mode above), shows very low interrupt 1883 * count but has slightly higher latency since we pick the packets when 1884 * the poll thread does polling. 1885 * 1886 * Currently, this flag should be enabled only while doing performance 1887 * measurement or when it can be guaranteed that entire NIC going 1888 * in poll mode will not harm any traffic like cluster heartbeat etc. 1889 */ 1890 int e1000g_poll_mode = 0; 1891 1892 /* 1893 * Called from the upper layers when driver is in polling mode to 1894 * pick up any queued packets. Care should be taken to not block 1895 * this thread. 1896 */ 1897 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup) 1898 { 1899 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg; 1900 mblk_t *mp = NULL; 1901 mblk_t *tail; 1902 struct e1000g *adapter; 1903 1904 adapter = rx_ring->adapter; 1905 1906 rw_enter(&adapter->chip_lock, RW_READER); 1907 1908 if (adapter->e1000g_state & E1000G_SUSPENDED) { 1909 rw_exit(&adapter->chip_lock); 1910 return (NULL); 1911 } 1912 1913 mutex_enter(&rx_ring->rx_lock); 1914 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup); 1915 mutex_exit(&rx_ring->rx_lock); 1916 rw_exit(&adapter->chip_lock); 1917 return (mp); 1918 } 1919 1920 static int 1921 e1000g_m_start(void *arg) 1922 { 1923 struct e1000g *Adapter = (struct e1000g *)arg; 1924 1925 rw_enter(&Adapter->chip_lock, RW_WRITER); 1926 1927 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 1928 rw_exit(&Adapter->chip_lock); 1929 return (ECANCELED); 1930 } 1931 1932 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 1933 rw_exit(&Adapter->chip_lock); 1934 return (ENOTACTIVE); 1935 } 1936 1937 Adapter->e1000g_state |= E1000G_STARTED; 1938 1939 rw_exit(&Adapter->chip_lock); 1940 1941 /* Enable and start the watchdog timer */ 1942 enable_watchdog_timer(Adapter); 1943 1944 return (0); 1945 } 1946 1947 static int 1948 e1000g_start(struct e1000g *Adapter, boolean_t global) 1949 { 1950 e1000g_rx_data_t *rx_data; 1951 1952 if (global) { 1953 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) { 1954 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed"); 1955 goto start_fail; 1956 } 1957 1958 /* Allocate dma resources for descriptors and buffers */ 1959 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) { 1960 e1000g_log(Adapter, CE_WARN, 1961 "Alloc DMA resources failed"); 1962 goto start_fail; 1963 } 1964 Adapter->rx_buffer_setup = B_FALSE; 1965 } 1966 1967 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) { 1968 if (e1000g_init(Adapter) != DDI_SUCCESS) { 1969 e1000g_log(Adapter, CE_WARN, 1970 "Adapter initialization failed"); 1971 goto start_fail; 1972 } 1973 } 1974 1975 /* Setup and initialize the transmit structures */ 1976 e1000g_tx_setup(Adapter); 1977 msec_delay(5); 1978 1979 /* Setup and initialize the receive structures */ 1980 e1000g_rx_setup(Adapter); 1981 msec_delay(5); 1982 1983 /* Restore the e1000g promiscuous mode */ 1984 e1000g_restore_promisc(Adapter); 1985 1986 e1000g_mask_interrupt(Adapter); 1987 1988 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 1989 1990 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1991 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1992 goto start_fail; 1993 } 1994 1995 return (DDI_SUCCESS); 1996 1997 start_fail: 1998 rx_data = Adapter->rx_ring->rx_data; 1999 2000 if (global) { 2001 e1000g_release_dma_resources(Adapter); 2002 e1000g_free_rx_pending_buffers(rx_data); 2003 e1000g_free_rx_data(rx_data); 2004 } 2005 2006 mutex_enter(&e1000g_nvm_lock); 2007 (void) e1000_reset_hw(&Adapter->shared); 2008 mutex_exit(&e1000g_nvm_lock); 2009 2010 return (DDI_FAILURE); 2011 } 2012 2013 /* 2014 * The I219 has the curious property that if the descriptor rings are not 2015 * emptied before resetting the hardware or before changing the device state 2016 * based on runtime power management, it'll cause the card to hang. This can 2017 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we 2018 * have to flush the rings if we're in this state. 2019 */ 2020 static void 2021 e1000g_flush_desc_rings(struct e1000g *Adapter) 2022 { 2023 struct e1000_hw *hw = &Adapter->shared; 2024 u16 hang_state; 2025 u32 fext_nvm11, tdlen; 2026 2027 /* First, disable MULR fix in FEXTNVM11 */ 2028 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 2029 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 2030 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 2031 2032 /* do nothing if we're not in faulty state, or if the queue is empty */ 2033 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); 2034 hang_state = pci_config_get16(Adapter->osdep.cfg_handle, 2035 PCICFG_DESC_RING_STATUS); 2036 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) 2037 return; 2038 e1000g_flush_tx_ring(Adapter); 2039 2040 /* recheck, maybe the fault is caused by the rx ring */ 2041 hang_state = pci_config_get16(Adapter->osdep.cfg_handle, 2042 PCICFG_DESC_RING_STATUS); 2043 if (hang_state & FLUSH_DESC_REQUIRED) 2044 e1000g_flush_rx_ring(Adapter); 2045 2046 } 2047 2048 static void 2049 e1000g_m_stop(void *arg) 2050 { 2051 struct e1000g *Adapter = (struct e1000g *)arg; 2052 2053 /* Drain tx sessions */ 2054 (void) e1000g_tx_drain(Adapter); 2055 2056 rw_enter(&Adapter->chip_lock, RW_WRITER); 2057 2058 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2059 rw_exit(&Adapter->chip_lock); 2060 return; 2061 } 2062 Adapter->e1000g_state &= ~E1000G_STARTED; 2063 e1000g_stop(Adapter, B_TRUE); 2064 2065 rw_exit(&Adapter->chip_lock); 2066 2067 /* Disable and stop all the timers */ 2068 disable_watchdog_timer(Adapter); 2069 stop_link_timer(Adapter); 2070 stop_82547_timer(Adapter->tx_ring); 2071 } 2072 2073 static void 2074 e1000g_stop(struct e1000g *Adapter, boolean_t global) 2075 { 2076 private_devi_list_t *devi_node; 2077 e1000g_rx_data_t *rx_data; 2078 int result; 2079 2080 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT; 2081 2082 /* Stop the chip and release pending resources */ 2083 2084 /* Tell firmware driver is no longer in control */ 2085 e1000g_release_driver_control(&Adapter->shared); 2086 2087 e1000g_clear_all_interrupts(Adapter); 2088 2089 mutex_enter(&e1000g_nvm_lock); 2090 result = e1000_reset_hw(&Adapter->shared); 2091 mutex_exit(&e1000g_nvm_lock); 2092 2093 if (result != E1000_SUCCESS) { 2094 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 2095 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 2096 } 2097 2098 mutex_enter(&Adapter->link_lock); 2099 Adapter->link_complete = B_FALSE; 2100 mutex_exit(&Adapter->link_lock); 2101 2102 /* Release resources still held by the TX descriptors */ 2103 e1000g_tx_clean(Adapter); 2104 2105 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2106 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 2107 2108 /* Clean the pending rx jumbo packet fragment */ 2109 e1000g_rx_clean(Adapter); 2110 2111 /* 2112 * The I219, eg. the pch_spt, has bugs such that we must ensure that 2113 * rings are flushed before we do anything else. This must be done 2114 * before we release DMA resources. 2115 */ 2116 if (Adapter->shared.mac.type >= e1000_pch_spt) 2117 e1000g_flush_desc_rings(Adapter); 2118 2119 if (global) { 2120 e1000g_release_dma_resources(Adapter); 2121 2122 mutex_enter(&e1000g_rx_detach_lock); 2123 rx_data = Adapter->rx_ring->rx_data; 2124 rx_data->flag |= E1000G_RX_STOPPED; 2125 2126 if (rx_data->pending_count == 0) { 2127 e1000g_free_rx_pending_buffers(rx_data); 2128 e1000g_free_rx_data(rx_data); 2129 } else { 2130 devi_node = rx_data->priv_devi_node; 2131 if (devi_node != NULL) 2132 atomic_inc_32(&devi_node->pending_rx_count); 2133 else 2134 atomic_inc_32(&Adapter->pending_rx_count); 2135 } 2136 mutex_exit(&e1000g_rx_detach_lock); 2137 } 2138 2139 if (Adapter->link_state != LINK_STATE_UNKNOWN) { 2140 Adapter->link_state = LINK_STATE_UNKNOWN; 2141 if (!Adapter->reset_flag) 2142 mac_link_update(Adapter->mh, Adapter->link_state); 2143 } 2144 } 2145 2146 static void 2147 e1000g_rx_clean(struct e1000g *Adapter) 2148 { 2149 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data; 2150 2151 if (rx_data == NULL) 2152 return; 2153 2154 if (rx_data->rx_mblk != NULL) { 2155 freemsg(rx_data->rx_mblk); 2156 rx_data->rx_mblk = NULL; 2157 rx_data->rx_mblk_tail = NULL; 2158 rx_data->rx_mblk_len = 0; 2159 } 2160 } 2161 2162 static void 2163 e1000g_tx_clean(struct e1000g *Adapter) 2164 { 2165 e1000g_tx_ring_t *tx_ring; 2166 p_tx_sw_packet_t packet; 2167 mblk_t *mp; 2168 mblk_t *nmp; 2169 uint32_t packet_count; 2170 2171 tx_ring = Adapter->tx_ring; 2172 2173 /* 2174 * Here we don't need to protect the lists using 2175 * the usedlist_lock and freelist_lock, for they 2176 * have been protected by the chip_lock. 2177 */ 2178 mp = NULL; 2179 nmp = NULL; 2180 packet_count = 0; 2181 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list); 2182 while (packet != NULL) { 2183 if (packet->mp != NULL) { 2184 /* Assemble the message chain */ 2185 if (mp == NULL) { 2186 mp = packet->mp; 2187 nmp = packet->mp; 2188 } else { 2189 nmp->b_next = packet->mp; 2190 nmp = packet->mp; 2191 } 2192 /* Disconnect the message from the sw packet */ 2193 packet->mp = NULL; 2194 } 2195 2196 e1000g_free_tx_swpkt(packet); 2197 packet_count++; 2198 2199 packet = (p_tx_sw_packet_t) 2200 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link); 2201 } 2202 2203 if (mp != NULL) 2204 freemsgchain(mp); 2205 2206 if (packet_count > 0) { 2207 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list); 2208 QUEUE_INIT_LIST(&tx_ring->used_list); 2209 2210 /* Setup TX descriptor pointers */ 2211 tx_ring->tbd_next = tx_ring->tbd_first; 2212 tx_ring->tbd_oldest = tx_ring->tbd_first; 2213 2214 /* Setup our HW Tx Head & Tail descriptor pointers */ 2215 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 2216 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 2217 } 2218 } 2219 2220 static boolean_t 2221 e1000g_tx_drain(struct e1000g *Adapter) 2222 { 2223 int i; 2224 boolean_t done; 2225 e1000g_tx_ring_t *tx_ring; 2226 2227 tx_ring = Adapter->tx_ring; 2228 2229 /* Allow up to 'wsdraintime' for pending xmit's to complete. */ 2230 for (i = 0; i < TX_DRAIN_TIME; i++) { 2231 mutex_enter(&tx_ring->usedlist_lock); 2232 done = IS_QUEUE_EMPTY(&tx_ring->used_list); 2233 mutex_exit(&tx_ring->usedlist_lock); 2234 2235 if (done) 2236 break; 2237 2238 msec_delay(1); 2239 } 2240 2241 return (done); 2242 } 2243 2244 static boolean_t 2245 e1000g_rx_drain(struct e1000g *Adapter) 2246 { 2247 int i; 2248 boolean_t done; 2249 2250 /* 2251 * Allow up to RX_DRAIN_TIME for pending received packets to complete. 2252 */ 2253 for (i = 0; i < RX_DRAIN_TIME; i++) { 2254 done = (Adapter->pending_rx_count == 0); 2255 2256 if (done) 2257 break; 2258 2259 msec_delay(1); 2260 } 2261 2262 return (done); 2263 } 2264 2265 static boolean_t 2266 e1000g_reset_adapter(struct e1000g *Adapter) 2267 { 2268 /* Disable and stop all the timers */ 2269 disable_watchdog_timer(Adapter); 2270 stop_link_timer(Adapter); 2271 stop_82547_timer(Adapter->tx_ring); 2272 2273 rw_enter(&Adapter->chip_lock, RW_WRITER); 2274 2275 if (Adapter->stall_flag) { 2276 Adapter->stall_flag = B_FALSE; 2277 Adapter->reset_flag = B_TRUE; 2278 } 2279 2280 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2281 rw_exit(&Adapter->chip_lock); 2282 return (B_TRUE); 2283 } 2284 2285 e1000g_stop(Adapter, B_FALSE); 2286 2287 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 2288 rw_exit(&Adapter->chip_lock); 2289 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2290 return (B_FALSE); 2291 } 2292 2293 rw_exit(&Adapter->chip_lock); 2294 2295 /* Enable and start the watchdog timer */ 2296 enable_watchdog_timer(Adapter); 2297 2298 return (B_TRUE); 2299 } 2300 2301 boolean_t 2302 e1000g_global_reset(struct e1000g *Adapter) 2303 { 2304 /* Disable and stop all the timers */ 2305 disable_watchdog_timer(Adapter); 2306 stop_link_timer(Adapter); 2307 stop_82547_timer(Adapter->tx_ring); 2308 2309 rw_enter(&Adapter->chip_lock, RW_WRITER); 2310 2311 e1000g_stop(Adapter, B_TRUE); 2312 2313 Adapter->init_count = 0; 2314 2315 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 2316 rw_exit(&Adapter->chip_lock); 2317 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2318 return (B_FALSE); 2319 } 2320 2321 rw_exit(&Adapter->chip_lock); 2322 2323 /* Enable and start the watchdog timer */ 2324 enable_watchdog_timer(Adapter); 2325 2326 return (B_TRUE); 2327 } 2328 2329 /* 2330 * e1000g_intr_pciexpress - ISR for PCI Express chipsets 2331 * 2332 * This interrupt service routine is for PCI-Express adapters. 2333 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED 2334 * bit is set. 2335 */ 2336 static uint_t 2337 e1000g_intr_pciexpress(caddr_t arg, caddr_t arg1 __unused) 2338 { 2339 struct e1000g *Adapter; 2340 uint32_t icr; 2341 2342 Adapter = (struct e1000g *)(uintptr_t)arg; 2343 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2344 2345 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2346 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2347 return (DDI_INTR_CLAIMED); 2348 } 2349 2350 if (icr & E1000_ICR_INT_ASSERTED) { 2351 /* 2352 * E1000_ICR_INT_ASSERTED bit was set: 2353 * Read(Clear) the ICR, claim this interrupt, 2354 * look for work to do. 2355 */ 2356 e1000g_intr_work(Adapter, icr); 2357 return (DDI_INTR_CLAIMED); 2358 } else { 2359 /* 2360 * E1000_ICR_INT_ASSERTED bit was not set: 2361 * Don't claim this interrupt, return immediately. 2362 */ 2363 return (DDI_INTR_UNCLAIMED); 2364 } 2365 } 2366 2367 /* 2368 * e1000g_intr - ISR for PCI/PCI-X chipsets 2369 * 2370 * This interrupt service routine is for PCI/PCI-X adapters. 2371 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED 2372 * bit is set or not. 2373 */ 2374 static uint_t 2375 e1000g_intr(caddr_t arg, caddr_t arg1 __unused) 2376 { 2377 struct e1000g *Adapter; 2378 uint32_t icr; 2379 2380 Adapter = (struct e1000g *)(uintptr_t)arg; 2381 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2382 2383 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2384 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2385 return (DDI_INTR_CLAIMED); 2386 } 2387 2388 if (icr) { 2389 /* 2390 * Any bit was set in ICR: 2391 * Read(Clear) the ICR, claim this interrupt, 2392 * look for work to do. 2393 */ 2394 e1000g_intr_work(Adapter, icr); 2395 return (DDI_INTR_CLAIMED); 2396 } else { 2397 /* 2398 * No bit was set in ICR: 2399 * Don't claim this interrupt, return immediately. 2400 */ 2401 return (DDI_INTR_UNCLAIMED); 2402 } 2403 } 2404 2405 /* 2406 * e1000g_intr_work - actual processing of ISR 2407 * 2408 * Read(clear) the ICR contents and call appropriate interrupt 2409 * processing routines. 2410 */ 2411 static void 2412 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr) 2413 { 2414 struct e1000_hw *hw; 2415 hw = &Adapter->shared; 2416 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 2417 2418 Adapter->rx_pkt_cnt = 0; 2419 Adapter->tx_pkt_cnt = 0; 2420 2421 rw_enter(&Adapter->chip_lock, RW_READER); 2422 2423 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2424 rw_exit(&Adapter->chip_lock); 2425 return; 2426 } 2427 /* 2428 * Here we need to check the "e1000g_state" flag within the chip_lock to 2429 * ensure the receive routine will not execute when the adapter is 2430 * being reset. 2431 */ 2432 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2433 rw_exit(&Adapter->chip_lock); 2434 return; 2435 } 2436 2437 if (icr & E1000_ICR_RXT0) { 2438 mblk_t *mp = NULL; 2439 mblk_t *tail = NULL; 2440 e1000g_rx_ring_t *rx_ring; 2441 2442 rx_ring = Adapter->rx_ring; 2443 mutex_enter(&rx_ring->rx_lock); 2444 /* 2445 * Sometimes with legacy interrupts, it possible that 2446 * there is a single interrupt for Rx/Tx. In which 2447 * case, if poll flag is set, we shouldn't really 2448 * be doing Rx processing. 2449 */ 2450 if (!rx_ring->poll_flag) 2451 mp = e1000g_receive(rx_ring, &tail, 2452 E1000G_CHAIN_NO_LIMIT); 2453 mutex_exit(&rx_ring->rx_lock); 2454 rw_exit(&Adapter->chip_lock); 2455 if (mp != NULL) 2456 mac_rx_ring(Adapter->mh, rx_ring->mrh, 2457 mp, rx_ring->ring_gen_num); 2458 } else 2459 rw_exit(&Adapter->chip_lock); 2460 2461 if (icr & E1000_ICR_TXDW) { 2462 if (!Adapter->tx_intr_enable) 2463 e1000g_clear_tx_interrupt(Adapter); 2464 2465 /* Recycle the tx descriptors */ 2466 rw_enter(&Adapter->chip_lock, RW_READER); 2467 (void) e1000g_recycle(tx_ring); 2468 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr); 2469 rw_exit(&Adapter->chip_lock); 2470 2471 if (tx_ring->resched_needed && 2472 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) { 2473 tx_ring->resched_needed = B_FALSE; 2474 mac_tx_update(Adapter->mh); 2475 E1000G_STAT(tx_ring->stat_reschedule); 2476 } 2477 } 2478 2479 /* 2480 * The Receive Sequence errors RXSEQ and the link status change LSC 2481 * are checked to detect that the cable has been pulled out. For 2482 * the Wiseman 2.0 silicon, the receive sequence errors interrupt 2483 * are an indication that cable is not connected. 2484 */ 2485 if ((icr & E1000_ICR_RXSEQ) || 2486 (icr & E1000_ICR_LSC) || 2487 (icr & E1000_ICR_GPI_EN1)) { 2488 boolean_t link_changed; 2489 timeout_id_t tid = 0; 2490 2491 stop_watchdog_timer(Adapter); 2492 2493 rw_enter(&Adapter->chip_lock, RW_WRITER); 2494 2495 /* 2496 * Because we got a link-status-change interrupt, force 2497 * e1000_check_for_link() to look at phy 2498 */ 2499 Adapter->shared.mac.get_link_status = B_TRUE; 2500 2501 /* e1000g_link_check takes care of link status change */ 2502 link_changed = e1000g_link_check(Adapter); 2503 2504 /* Get new phy state */ 2505 e1000g_get_phy_state(Adapter); 2506 2507 /* 2508 * If the link timer has not timed out, we'll not notify 2509 * the upper layer with any link state until the link is up. 2510 */ 2511 if (link_changed && !Adapter->link_complete) { 2512 if (Adapter->link_state == LINK_STATE_UP) { 2513 mutex_enter(&Adapter->link_lock); 2514 Adapter->link_complete = B_TRUE; 2515 tid = Adapter->link_tid; 2516 Adapter->link_tid = 0; 2517 mutex_exit(&Adapter->link_lock); 2518 } else { 2519 link_changed = B_FALSE; 2520 } 2521 } 2522 rw_exit(&Adapter->chip_lock); 2523 2524 if (link_changed) { 2525 if (tid != 0) 2526 (void) untimeout(tid); 2527 2528 /* 2529 * Workaround for esb2. Data stuck in fifo on a link 2530 * down event. Stop receiver here and reset in watchdog. 2531 */ 2532 if ((Adapter->link_state == LINK_STATE_DOWN) && 2533 (Adapter->shared.mac.type == e1000_80003es2lan)) { 2534 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 2535 E1000_WRITE_REG(hw, E1000_RCTL, 2536 rctl & ~E1000_RCTL_EN); 2537 e1000g_log(Adapter, CE_WARN, 2538 "ESB2 receiver disabled"); 2539 Adapter->esb2_workaround = B_TRUE; 2540 } 2541 if (!Adapter->reset_flag) 2542 mac_link_update(Adapter->mh, 2543 Adapter->link_state); 2544 if (Adapter->link_state == LINK_STATE_UP) 2545 Adapter->reset_flag = B_FALSE; 2546 } 2547 2548 start_watchdog_timer(Adapter); 2549 } 2550 } 2551 2552 static void 2553 e1000g_init_unicst(struct e1000g *Adapter) 2554 { 2555 struct e1000_hw *hw; 2556 int slot; 2557 2558 hw = &Adapter->shared; 2559 2560 if (Adapter->init_count == 0) { 2561 /* Initialize the multiple unicast addresses */ 2562 Adapter->unicst_total = min(hw->mac.rar_entry_count, 2563 MAX_NUM_UNICAST_ADDRESSES); 2564 2565 /* 2566 * The common code does not correctly calculate the number of 2567 * rar's that could be reserved by firmware for the pch_lpt and 2568 * pch_spt macs. The interface has one primary rar, and 11 2569 * additional ones. Those 11 additional ones are not always 2570 * available. According to the datasheet, we need to check a 2571 * few of the bits set in the FWSM register. If the value is 2572 * zero, everything is available. If the value is 1, none of the 2573 * additional registers are available. If the value is 2-7, only 2574 * that number are available. 2575 */ 2576 if (hw->mac.type >= e1000_pch_lpt) { 2577 uint32_t locked, rar; 2578 2579 locked = E1000_READ_REG(hw, E1000_FWSM) & 2580 E1000_FWSM_WLOCK_MAC_MASK; 2581 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT; 2582 rar = 1; 2583 if (locked == 0) 2584 rar += 11; 2585 else if (locked == 1) 2586 rar += 0; 2587 else 2588 rar += locked; 2589 Adapter->unicst_total = min(rar, 2590 MAX_NUM_UNICAST_ADDRESSES); 2591 } 2592 2593 /* Workaround for an erratum of 82571 chipst */ 2594 if ((hw->mac.type == e1000_82571) && 2595 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2596 Adapter->unicst_total--; 2597 2598 /* VMware doesn't support multiple mac addresses properly */ 2599 if (hw->subsystem_vendor_id == 0x15ad) 2600 Adapter->unicst_total = 1; 2601 2602 Adapter->unicst_avail = Adapter->unicst_total; 2603 2604 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2605 /* Clear both the flag and MAC address */ 2606 Adapter->unicst_addr[slot].reg.high = 0; 2607 Adapter->unicst_addr[slot].reg.low = 0; 2608 } 2609 } else { 2610 /* Workaround for an erratum of 82571 chipst */ 2611 if ((hw->mac.type == e1000_82571) && 2612 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2613 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); 2614 2615 /* Re-configure the RAR registers */ 2616 for (slot = 0; slot < Adapter->unicst_total; slot++) 2617 if (Adapter->unicst_addr[slot].mac.set == 1) 2618 (void) e1000_rar_set(hw, 2619 Adapter->unicst_addr[slot].mac.addr, slot); 2620 } 2621 2622 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2623 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2624 } 2625 2626 static int 2627 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, 2628 int slot) 2629 { 2630 struct e1000_hw *hw; 2631 2632 hw = &Adapter->shared; 2633 2634 /* 2635 * The first revision of Wiseman silicon (rev 2.0) has an errata 2636 * that requires the receiver to be in reset when any of the 2637 * receive address registers (RAR regs) are accessed. The first 2638 * rev of Wiseman silicon also requires MWI to be disabled when 2639 * a global reset or a receive reset is issued. So before we 2640 * initialize the RARs, we check the rev of the Wiseman controller 2641 * and work around any necessary HW errata. 2642 */ 2643 if ((hw->mac.type == e1000_82542) && 2644 (hw->revision_id == E1000_REVISION_2)) { 2645 e1000_pci_clear_mwi(hw); 2646 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); 2647 msec_delay(5); 2648 } 2649 if (mac_addr == NULL) { 2650 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0); 2651 E1000_WRITE_FLUSH(hw); 2652 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0); 2653 E1000_WRITE_FLUSH(hw); 2654 /* Clear both the flag and MAC address */ 2655 Adapter->unicst_addr[slot].reg.high = 0; 2656 Adapter->unicst_addr[slot].reg.low = 0; 2657 } else { 2658 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, 2659 ETHERADDRL); 2660 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot); 2661 Adapter->unicst_addr[slot].mac.set = 1; 2662 } 2663 2664 /* Workaround for an erratum of 82571 chipst */ 2665 if (slot == 0) { 2666 if ((hw->mac.type == e1000_82571) && 2667 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2668 if (mac_addr == NULL) { 2669 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2670 slot << 1, 0); 2671 E1000_WRITE_FLUSH(hw); 2672 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2673 (slot << 1) + 1, 0); 2674 E1000_WRITE_FLUSH(hw); 2675 } else { 2676 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, 2677 LAST_RAR_ENTRY); 2678 } 2679 } 2680 2681 /* 2682 * If we are using Wiseman rev 2.0 silicon, we will have previously 2683 * put the receive in reset, and disabled MWI, to work around some 2684 * HW errata. Now we should take the receiver out of reset, and 2685 * re-enabled if MWI if it was previously enabled by the PCI BIOS. 2686 */ 2687 if ((hw->mac.type == e1000_82542) && 2688 (hw->revision_id == E1000_REVISION_2)) { 2689 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2690 msec_delay(1); 2691 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2692 e1000_pci_set_mwi(hw); 2693 e1000g_rx_setup(Adapter); 2694 } 2695 2696 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2697 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2698 return (EIO); 2699 } 2700 2701 return (0); 2702 } 2703 2704 static int 2705 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr) 2706 { 2707 struct e1000_hw *hw = &Adapter->shared; 2708 struct ether_addr *newtable; 2709 size_t new_len; 2710 size_t old_len; 2711 int res = 0; 2712 2713 if ((multiaddr[0] & 01) == 0) { 2714 res = EINVAL; 2715 e1000g_log(Adapter, CE_WARN, "Illegal multicast address"); 2716 goto done; 2717 } 2718 2719 if (Adapter->mcast_count >= Adapter->mcast_max_num) { 2720 res = ENOENT; 2721 e1000g_log(Adapter, CE_WARN, 2722 "Adapter requested more than %d mcast addresses", 2723 Adapter->mcast_max_num); 2724 goto done; 2725 } 2726 2727 2728 if (Adapter->mcast_count == Adapter->mcast_alloc_count) { 2729 old_len = Adapter->mcast_alloc_count * 2730 sizeof (struct ether_addr); 2731 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) * 2732 sizeof (struct ether_addr); 2733 2734 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2735 if (newtable == NULL) { 2736 res = ENOMEM; 2737 e1000g_log(Adapter, CE_WARN, 2738 "Not enough memory to alloc mcast table"); 2739 goto done; 2740 } 2741 2742 if (Adapter->mcast_table != NULL) { 2743 bcopy(Adapter->mcast_table, newtable, old_len); 2744 kmem_free(Adapter->mcast_table, old_len); 2745 } 2746 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE; 2747 Adapter->mcast_table = newtable; 2748 } 2749 2750 bcopy(multiaddr, 2751 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL); 2752 Adapter->mcast_count++; 2753 2754 /* 2755 * Update the MC table in the hardware 2756 */ 2757 e1000g_clear_interrupt(Adapter); 2758 2759 e1000_update_mc_addr_list(hw, 2760 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2761 2762 e1000g_mask_interrupt(Adapter); 2763 2764 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2765 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2766 res = EIO; 2767 } 2768 2769 done: 2770 return (res); 2771 } 2772 2773 static int 2774 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr) 2775 { 2776 struct e1000_hw *hw = &Adapter->shared; 2777 struct ether_addr *newtable; 2778 size_t new_len; 2779 size_t old_len; 2780 unsigned i; 2781 2782 for (i = 0; i < Adapter->mcast_count; i++) { 2783 if (bcmp(multiaddr, &Adapter->mcast_table[i], 2784 ETHERADDRL) == 0) { 2785 for (i++; i < Adapter->mcast_count; i++) { 2786 Adapter->mcast_table[i - 1] = 2787 Adapter->mcast_table[i]; 2788 } 2789 Adapter->mcast_count--; 2790 break; 2791 } 2792 } 2793 2794 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) > 2795 MCAST_ALLOC_SIZE) { 2796 old_len = Adapter->mcast_alloc_count * 2797 sizeof (struct ether_addr); 2798 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) * 2799 sizeof (struct ether_addr); 2800 2801 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2802 if (newtable != NULL) { 2803 bcopy(Adapter->mcast_table, newtable, new_len); 2804 kmem_free(Adapter->mcast_table, old_len); 2805 2806 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE; 2807 Adapter->mcast_table = newtable; 2808 } 2809 } 2810 2811 /* 2812 * Update the MC table in the hardware 2813 */ 2814 e1000g_clear_interrupt(Adapter); 2815 2816 e1000_update_mc_addr_list(hw, 2817 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2818 2819 e1000g_mask_interrupt(Adapter); 2820 2821 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2822 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2823 return (EIO); 2824 } 2825 2826 return (0); 2827 } 2828 2829 static void 2830 e1000g_release_multicast(struct e1000g *Adapter) 2831 { 2832 if (Adapter->mcast_table != NULL) { 2833 kmem_free(Adapter->mcast_table, 2834 Adapter->mcast_alloc_count * sizeof (struct ether_addr)); 2835 Adapter->mcast_table = NULL; 2836 } 2837 } 2838 2839 int 2840 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 2841 { 2842 struct e1000g *Adapter = (struct e1000g *)arg; 2843 int result; 2844 2845 rw_enter(&Adapter->chip_lock, RW_WRITER); 2846 2847 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2848 result = ECANCELED; 2849 goto done; 2850 } 2851 2852 result = (add) ? multicst_add(Adapter, addr) 2853 : multicst_remove(Adapter, addr); 2854 2855 done: 2856 rw_exit(&Adapter->chip_lock); 2857 return (result); 2858 2859 } 2860 2861 int 2862 e1000g_m_promisc(void *arg, boolean_t on) 2863 { 2864 struct e1000g *Adapter = (struct e1000g *)arg; 2865 uint32_t rctl; 2866 2867 rw_enter(&Adapter->chip_lock, RW_WRITER); 2868 2869 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2870 rw_exit(&Adapter->chip_lock); 2871 return (ECANCELED); 2872 } 2873 2874 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 2875 2876 if (on) 2877 rctl |= 2878 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 2879 else 2880 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 2881 2882 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 2883 2884 Adapter->e1000g_promisc = on; 2885 2886 rw_exit(&Adapter->chip_lock); 2887 2888 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2889 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2890 return (EIO); 2891 } 2892 2893 return (0); 2894 } 2895 2896 /* 2897 * Entry points to enable and disable interrupts at the granularity of 2898 * a group. 2899 * Turns the poll_mode for the whole adapter on and off to enable or 2900 * override the ring level polling control over the hardware interrupts. 2901 */ 2902 static int 2903 e1000g_rx_group_intr_enable(mac_intr_handle_t arg) 2904 { 2905 struct e1000g *adapter = (struct e1000g *)arg; 2906 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2907 2908 /* 2909 * Later interrupts at the granularity of the this ring will 2910 * invoke mac_rx() with NULL, indicating the need for another 2911 * software classification. 2912 * We have a single ring usable per adapter now, so we only need to 2913 * reset the rx handle for that one. 2914 * When more RX rings can be used, we should update each one of them. 2915 */ 2916 mutex_enter(&rx_ring->rx_lock); 2917 rx_ring->mrh = NULL; 2918 adapter->poll_mode = B_FALSE; 2919 mutex_exit(&rx_ring->rx_lock); 2920 return (0); 2921 } 2922 2923 static int 2924 e1000g_rx_group_intr_disable(mac_intr_handle_t arg) 2925 { 2926 struct e1000g *adapter = (struct e1000g *)arg; 2927 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2928 2929 mutex_enter(&rx_ring->rx_lock); 2930 2931 /* 2932 * Later interrupts at the granularity of the this ring will 2933 * invoke mac_rx() with the handle for this ring; 2934 */ 2935 adapter->poll_mode = B_TRUE; 2936 rx_ring->mrh = rx_ring->mrh_init; 2937 mutex_exit(&rx_ring->rx_lock); 2938 return (0); 2939 } 2940 2941 /* 2942 * Entry points to enable and disable interrupts at the granularity of 2943 * a ring. 2944 * adapter poll_mode controls whether we actually proceed with hardware 2945 * interrupt toggling. 2946 */ 2947 static int 2948 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh) 2949 { 2950 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2951 struct e1000g *adapter = rx_ring->adapter; 2952 struct e1000_hw *hw = &adapter->shared; 2953 uint32_t intr_mask; 2954 2955 rw_enter(&adapter->chip_lock, RW_READER); 2956 2957 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2958 rw_exit(&adapter->chip_lock); 2959 return (0); 2960 } 2961 2962 mutex_enter(&rx_ring->rx_lock); 2963 rx_ring->poll_flag = 0; 2964 mutex_exit(&rx_ring->rx_lock); 2965 2966 /* Rx interrupt enabling for MSI and legacy */ 2967 intr_mask = E1000_READ_REG(hw, E1000_IMS); 2968 intr_mask |= E1000_IMS_RXT0; 2969 E1000_WRITE_REG(hw, E1000_IMS, intr_mask); 2970 E1000_WRITE_FLUSH(hw); 2971 2972 /* Trigger a Rx interrupt to check Rx ring */ 2973 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 2974 E1000_WRITE_FLUSH(hw); 2975 2976 rw_exit(&adapter->chip_lock); 2977 return (0); 2978 } 2979 2980 static int 2981 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh) 2982 { 2983 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2984 struct e1000g *adapter = rx_ring->adapter; 2985 struct e1000_hw *hw = &adapter->shared; 2986 2987 rw_enter(&adapter->chip_lock, RW_READER); 2988 2989 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2990 rw_exit(&adapter->chip_lock); 2991 return (0); 2992 } 2993 mutex_enter(&rx_ring->rx_lock); 2994 rx_ring->poll_flag = 1; 2995 mutex_exit(&rx_ring->rx_lock); 2996 2997 /* Rx interrupt disabling for MSI and legacy */ 2998 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 2999 E1000_WRITE_FLUSH(hw); 3000 3001 rw_exit(&adapter->chip_lock); 3002 return (0); 3003 } 3004 3005 /* 3006 * e1000g_unicst_find - Find the slot for the specified unicast address 3007 */ 3008 static int 3009 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr) 3010 { 3011 int slot; 3012 3013 for (slot = 0; slot < Adapter->unicst_total; slot++) { 3014 if ((Adapter->unicst_addr[slot].mac.set == 1) && 3015 (bcmp(Adapter->unicst_addr[slot].mac.addr, 3016 mac_addr, ETHERADDRL) == 0)) 3017 return (slot); 3018 } 3019 3020 return (-1); 3021 } 3022 3023 /* 3024 * Entry points to add and remove a MAC address to a ring group. 3025 * The caller takes care of adding and removing the MAC addresses 3026 * to the filter via these two routines. 3027 */ 3028 3029 static int 3030 e1000g_addmac(void *arg, const uint8_t *mac_addr) 3031 { 3032 struct e1000g *Adapter = (struct e1000g *)arg; 3033 int slot, err; 3034 3035 rw_enter(&Adapter->chip_lock, RW_WRITER); 3036 3037 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3038 rw_exit(&Adapter->chip_lock); 3039 return (ECANCELED); 3040 } 3041 3042 if (e1000g_unicst_find(Adapter, mac_addr) != -1) { 3043 /* The same address is already in slot */ 3044 rw_exit(&Adapter->chip_lock); 3045 return (0); 3046 } 3047 3048 if (Adapter->unicst_avail == 0) { 3049 /* no slots available */ 3050 rw_exit(&Adapter->chip_lock); 3051 return (ENOSPC); 3052 } 3053 3054 /* Search for a free slot */ 3055 for (slot = 0; slot < Adapter->unicst_total; slot++) { 3056 if (Adapter->unicst_addr[slot].mac.set == 0) 3057 break; 3058 } 3059 ASSERT(slot < Adapter->unicst_total); 3060 3061 err = e1000g_unicst_set(Adapter, mac_addr, slot); 3062 if (err == 0) 3063 Adapter->unicst_avail--; 3064 3065 rw_exit(&Adapter->chip_lock); 3066 3067 return (err); 3068 } 3069 3070 static int 3071 e1000g_remmac(void *arg, const uint8_t *mac_addr) 3072 { 3073 struct e1000g *Adapter = (struct e1000g *)arg; 3074 int slot, err; 3075 3076 rw_enter(&Adapter->chip_lock, RW_WRITER); 3077 3078 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3079 rw_exit(&Adapter->chip_lock); 3080 return (ECANCELED); 3081 } 3082 3083 slot = e1000g_unicst_find(Adapter, mac_addr); 3084 if (slot == -1) { 3085 rw_exit(&Adapter->chip_lock); 3086 return (EINVAL); 3087 } 3088 3089 ASSERT(Adapter->unicst_addr[slot].mac.set); 3090 3091 /* Clear this slot */ 3092 err = e1000g_unicst_set(Adapter, NULL, slot); 3093 if (err == 0) 3094 Adapter->unicst_avail++; 3095 3096 rw_exit(&Adapter->chip_lock); 3097 3098 return (err); 3099 } 3100 3101 static int 3102 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 3103 { 3104 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh; 3105 3106 mutex_enter(&rx_ring->rx_lock); 3107 rx_ring->ring_gen_num = mr_gen_num; 3108 mutex_exit(&rx_ring->rx_lock); 3109 return (0); 3110 } 3111 3112 /* 3113 * Callback funtion for MAC layer to register all rings. 3114 * 3115 * The hardware supports a single group with currently only one ring 3116 * available. 3117 * Though not offering virtualization ability per se, exposing the 3118 * group/ring still enables the polling and interrupt toggling. 3119 */ 3120 /* ARGSUSED */ 3121 void 3122 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index, 3123 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 3124 { 3125 struct e1000g *Adapter = (struct e1000g *)arg; 3126 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring; 3127 mac_intr_t *mintr; 3128 3129 /* 3130 * We advertised only RX group/rings, so the MAC framework shouldn't 3131 * ask for any thing else. 3132 */ 3133 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0); 3134 3135 rx_ring->mrh = rx_ring->mrh_init = rh; 3136 infop->mri_driver = (mac_ring_driver_t)rx_ring; 3137 infop->mri_start = e1000g_ring_start; 3138 infop->mri_stop = NULL; 3139 infop->mri_poll = e1000g_poll_ring; 3140 infop->mri_stat = e1000g_rx_ring_stat; 3141 3142 /* Ring level interrupts */ 3143 mintr = &infop->mri_intr; 3144 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 3145 mintr->mi_enable = e1000g_rx_ring_intr_enable; 3146 mintr->mi_disable = e1000g_rx_ring_intr_disable; 3147 if (Adapter->msi_enable) 3148 mintr->mi_ddi_handle = Adapter->htable[0]; 3149 } 3150 3151 /* ARGSUSED */ 3152 static void 3153 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index, 3154 mac_group_info_t *infop, mac_group_handle_t gh) 3155 { 3156 struct e1000g *Adapter = (struct e1000g *)arg; 3157 mac_intr_t *mintr; 3158 3159 /* 3160 * We advertised a single RX ring. Getting a request for anything else 3161 * signifies a bug in the MAC framework. 3162 */ 3163 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0); 3164 3165 Adapter->rx_group = gh; 3166 3167 infop->mgi_driver = (mac_group_driver_t)Adapter; 3168 infop->mgi_start = NULL; 3169 infop->mgi_stop = NULL; 3170 infop->mgi_addmac = e1000g_addmac; 3171 infop->mgi_remmac = e1000g_remmac; 3172 infop->mgi_count = 1; 3173 3174 /* Group level interrupts */ 3175 mintr = &infop->mgi_intr; 3176 mintr->mi_handle = (mac_intr_handle_t)Adapter; 3177 mintr->mi_enable = e1000g_rx_group_intr_enable; 3178 mintr->mi_disable = e1000g_rx_group_intr_disable; 3179 } 3180 3181 static void 3182 e1000g_led_blink(void *arg) 3183 { 3184 e1000g_t *e1000g = arg; 3185 3186 mutex_enter(&e1000g->e1000g_led_lock); 3187 VERIFY(e1000g->e1000g_emul_blink); 3188 if (e1000g->e1000g_emul_state) { 3189 (void) e1000_led_on(&e1000g->shared); 3190 } else { 3191 (void) e1000_led_off(&e1000g->shared); 3192 } 3193 e1000g->e1000g_emul_state = !e1000g->e1000g_emul_state; 3194 mutex_exit(&e1000g->e1000g_led_lock); 3195 } 3196 3197 static int 3198 e1000g_led_set(void *arg, mac_led_mode_t mode, uint_t flags) 3199 { 3200 e1000g_t *e1000g = arg; 3201 3202 if (flags != 0) 3203 return (EINVAL); 3204 3205 if (mode != MAC_LED_DEFAULT && 3206 mode != MAC_LED_IDENT && 3207 mode != MAC_LED_OFF && 3208 mode != MAC_LED_ON) 3209 return (ENOTSUP); 3210 3211 mutex_enter(&e1000g->e1000g_led_lock); 3212 3213 if ((mode == MAC_LED_IDENT || mode == MAC_LED_OFF || 3214 mode == MAC_LED_ON) && 3215 !e1000g->e1000g_led_setup) { 3216 if (e1000_setup_led(&e1000g->shared) != E1000_SUCCESS) { 3217 mutex_exit(&e1000g->e1000g_led_lock); 3218 return (EIO); 3219 } 3220 3221 e1000g->e1000g_led_setup = B_TRUE; 3222 } 3223 3224 if (mode != MAC_LED_IDENT && e1000g->e1000g_blink != NULL) { 3225 ddi_periodic_t id = e1000g->e1000g_blink; 3226 e1000g->e1000g_blink = NULL; 3227 mutex_exit(&e1000g->e1000g_led_lock); 3228 ddi_periodic_delete(id); 3229 mutex_enter(&e1000g->e1000g_led_lock); 3230 } 3231 3232 switch (mode) { 3233 case MAC_LED_DEFAULT: 3234 if (e1000g->e1000g_led_setup) { 3235 if (e1000_cleanup_led(&e1000g->shared) != 3236 E1000_SUCCESS) { 3237 mutex_exit(&e1000g->e1000g_led_lock); 3238 return (EIO); 3239 } 3240 e1000g->e1000g_led_setup = B_FALSE; 3241 } 3242 break; 3243 case MAC_LED_IDENT: 3244 if (e1000g->e1000g_emul_blink) { 3245 if (e1000g->e1000g_blink != NULL) 3246 break; 3247 3248 /* 3249 * Note, we use a 200 ms period here as that's what 3250 * section 10.1.3 8254x Intel Manual (PCI/PCI-X Family 3251 * of Gigabit Ethernet Controllers Software Developer's 3252 * Manual) indicates that the optional blink hardware 3253 * operates at. 3254 */ 3255 e1000g->e1000g_blink = 3256 ddi_periodic_add(e1000g_led_blink, e1000g, 3257 200ULL * (NANOSEC / MILLISEC), DDI_IPL_0); 3258 } else if (e1000_blink_led(&e1000g->shared) != E1000_SUCCESS) { 3259 mutex_exit(&e1000g->e1000g_led_lock); 3260 return (EIO); 3261 } 3262 break; 3263 case MAC_LED_OFF: 3264 if (e1000_led_off(&e1000g->shared) != E1000_SUCCESS) { 3265 mutex_exit(&e1000g->e1000g_led_lock); 3266 return (EIO); 3267 } 3268 break; 3269 case MAC_LED_ON: 3270 if (e1000_led_on(&e1000g->shared) != E1000_SUCCESS) { 3271 mutex_exit(&e1000g->e1000g_led_lock); 3272 return (EIO); 3273 } 3274 break; 3275 default: 3276 mutex_exit(&e1000g->e1000g_led_lock); 3277 return (ENOTSUP); 3278 } 3279 3280 mutex_exit(&e1000g->e1000g_led_lock); 3281 return (0); 3282 3283 } 3284 3285 static boolean_t 3286 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3287 { 3288 struct e1000g *Adapter = (struct e1000g *)arg; 3289 3290 switch (cap) { 3291 case MAC_CAPAB_HCKSUM: { 3292 uint32_t *txflags = cap_data; 3293 3294 if (Adapter->tx_hcksum_enable) 3295 *txflags = HCKSUM_IPHDRCKSUM | 3296 HCKSUM_INET_PARTIAL; 3297 else 3298 return (B_FALSE); 3299 break; 3300 } 3301 3302 case MAC_CAPAB_LSO: { 3303 mac_capab_lso_t *cap_lso = cap_data; 3304 3305 if (Adapter->lso_enable) { 3306 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 3307 cap_lso->lso_basic_tcp_ipv4.lso_max = 3308 E1000_LSO_MAXLEN; 3309 } else 3310 return (B_FALSE); 3311 break; 3312 } 3313 case MAC_CAPAB_RINGS: { 3314 mac_capab_rings_t *cap_rings = cap_data; 3315 3316 /* No TX rings exposed yet */ 3317 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 3318 return (B_FALSE); 3319 3320 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 3321 cap_rings->mr_rnum = 1; 3322 cap_rings->mr_gnum = 1; 3323 cap_rings->mr_rget = e1000g_fill_ring; 3324 cap_rings->mr_gget = e1000g_fill_group; 3325 break; 3326 } 3327 case MAC_CAPAB_LED: { 3328 mac_capab_led_t *cap_led = cap_data; 3329 3330 cap_led->mcl_flags = 0; 3331 cap_led->mcl_modes = MAC_LED_DEFAULT; 3332 if (Adapter->shared.mac.ops.blink_led != NULL && 3333 Adapter->shared.mac.ops.blink_led != 3334 e1000_null_ops_generic) { 3335 cap_led->mcl_modes |= MAC_LED_IDENT; 3336 } 3337 3338 if (Adapter->shared.mac.ops.led_off != NULL && 3339 Adapter->shared.mac.ops.led_off != 3340 e1000_null_ops_generic) { 3341 cap_led->mcl_modes |= MAC_LED_OFF; 3342 } 3343 3344 if (Adapter->shared.mac.ops.led_on != NULL && 3345 Adapter->shared.mac.ops.led_on != 3346 e1000_null_ops_generic) { 3347 cap_led->mcl_modes |= MAC_LED_ON; 3348 } 3349 3350 /* 3351 * Some hardware doesn't support blinking natively as they're 3352 * missing the optional blink circuit. If they have both off and 3353 * on then we'll emulate it ourselves. 3354 */ 3355 if (((cap_led->mcl_modes & MAC_LED_IDENT) == 0) && 3356 ((cap_led->mcl_modes & MAC_LED_OFF) != 0) && 3357 ((cap_led->mcl_modes & MAC_LED_ON) != 0)) { 3358 cap_led->mcl_modes |= MAC_LED_IDENT; 3359 Adapter->e1000g_emul_blink = B_TRUE; 3360 } 3361 3362 cap_led->mcl_set = e1000g_led_set; 3363 break; 3364 } 3365 default: 3366 return (B_FALSE); 3367 } 3368 return (B_TRUE); 3369 } 3370 3371 static boolean_t 3372 e1000g_param_locked(mac_prop_id_t pr_num) 3373 { 3374 /* 3375 * All en_* parameters are locked (read-only) while 3376 * the device is in any sort of loopback mode ... 3377 */ 3378 switch (pr_num) { 3379 case MAC_PROP_EN_1000FDX_CAP: 3380 case MAC_PROP_EN_1000HDX_CAP: 3381 case MAC_PROP_EN_100FDX_CAP: 3382 case MAC_PROP_EN_100HDX_CAP: 3383 case MAC_PROP_EN_10FDX_CAP: 3384 case MAC_PROP_EN_10HDX_CAP: 3385 case MAC_PROP_AUTONEG: 3386 case MAC_PROP_FLOWCTRL: 3387 return (B_TRUE); 3388 } 3389 return (B_FALSE); 3390 } 3391 3392 /* 3393 * callback function for set/get of properties 3394 */ 3395 static int 3396 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3397 uint_t pr_valsize, const void *pr_val) 3398 { 3399 struct e1000g *Adapter = arg; 3400 struct e1000_hw *hw = &Adapter->shared; 3401 struct e1000_fc_info *fc = &Adapter->shared.fc; 3402 int err = 0; 3403 link_flowctrl_t flowctrl; 3404 uint32_t cur_mtu, new_mtu; 3405 3406 rw_enter(&Adapter->chip_lock, RW_WRITER); 3407 3408 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3409 rw_exit(&Adapter->chip_lock); 3410 return (ECANCELED); 3411 } 3412 3413 if (Adapter->loopback_mode != E1000G_LB_NONE && 3414 e1000g_param_locked(pr_num)) { 3415 /* 3416 * All en_* parameters are locked (read-only) 3417 * while the device is in any sort of loopback mode. 3418 */ 3419 rw_exit(&Adapter->chip_lock); 3420 return (EBUSY); 3421 } 3422 3423 switch (pr_num) { 3424 case MAC_PROP_EN_1000FDX_CAP: 3425 if (hw->phy.media_type != e1000_media_type_copper) { 3426 err = ENOTSUP; 3427 break; 3428 } 3429 Adapter->param_en_1000fdx = *(uint8_t *)pr_val; 3430 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val; 3431 goto reset; 3432 case MAC_PROP_EN_100FDX_CAP: 3433 if (hw->phy.media_type != e1000_media_type_copper) { 3434 err = ENOTSUP; 3435 break; 3436 } 3437 Adapter->param_en_100fdx = *(uint8_t *)pr_val; 3438 Adapter->param_adv_100fdx = *(uint8_t *)pr_val; 3439 goto reset; 3440 case MAC_PROP_EN_100HDX_CAP: 3441 if (hw->phy.media_type != e1000_media_type_copper) { 3442 err = ENOTSUP; 3443 break; 3444 } 3445 Adapter->param_en_100hdx = *(uint8_t *)pr_val; 3446 Adapter->param_adv_100hdx = *(uint8_t *)pr_val; 3447 goto reset; 3448 case MAC_PROP_EN_10FDX_CAP: 3449 if (hw->phy.media_type != e1000_media_type_copper) { 3450 err = ENOTSUP; 3451 break; 3452 } 3453 Adapter->param_en_10fdx = *(uint8_t *)pr_val; 3454 Adapter->param_adv_10fdx = *(uint8_t *)pr_val; 3455 goto reset; 3456 case MAC_PROP_EN_10HDX_CAP: 3457 if (hw->phy.media_type != e1000_media_type_copper) { 3458 err = ENOTSUP; 3459 break; 3460 } 3461 Adapter->param_en_10hdx = *(uint8_t *)pr_val; 3462 Adapter->param_adv_10hdx = *(uint8_t *)pr_val; 3463 goto reset; 3464 case MAC_PROP_AUTONEG: 3465 if (hw->phy.media_type != e1000_media_type_copper) { 3466 err = ENOTSUP; 3467 break; 3468 } 3469 Adapter->param_adv_autoneg = *(uint8_t *)pr_val; 3470 goto reset; 3471 case MAC_PROP_FLOWCTRL: 3472 fc->send_xon = B_TRUE; 3473 bcopy(pr_val, &flowctrl, sizeof (flowctrl)); 3474 3475 switch (flowctrl) { 3476 default: 3477 err = EINVAL; 3478 break; 3479 case LINK_FLOWCTRL_NONE: 3480 fc->requested_mode = e1000_fc_none; 3481 break; 3482 case LINK_FLOWCTRL_RX: 3483 fc->requested_mode = e1000_fc_rx_pause; 3484 break; 3485 case LINK_FLOWCTRL_TX: 3486 fc->requested_mode = e1000_fc_tx_pause; 3487 break; 3488 case LINK_FLOWCTRL_BI: 3489 fc->requested_mode = e1000_fc_full; 3490 break; 3491 } 3492 reset: 3493 if (err == 0) { 3494 /* check PCH limits & reset the link */ 3495 e1000g_pch_limits(Adapter); 3496 if (e1000g_reset_link(Adapter) != DDI_SUCCESS) 3497 err = EINVAL; 3498 } 3499 break; 3500 case MAC_PROP_ADV_1000FDX_CAP: 3501 case MAC_PROP_ADV_1000HDX_CAP: 3502 case MAC_PROP_ADV_100FDX_CAP: 3503 case MAC_PROP_ADV_100HDX_CAP: 3504 case MAC_PROP_ADV_10FDX_CAP: 3505 case MAC_PROP_ADV_10HDX_CAP: 3506 case MAC_PROP_EN_1000HDX_CAP: 3507 case MAC_PROP_STATUS: 3508 case MAC_PROP_SPEED: 3509 case MAC_PROP_DUPLEX: 3510 case MAC_PROP_MEDIA: 3511 err = ENOTSUP; /* read-only prop. Can't set this. */ 3512 break; 3513 case MAC_PROP_MTU: 3514 /* adapter must be stopped for an MTU change */ 3515 if (Adapter->e1000g_state & E1000G_STARTED) { 3516 err = EBUSY; 3517 break; 3518 } 3519 3520 cur_mtu = Adapter->default_mtu; 3521 3522 /* get new requested MTU */ 3523 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3524 if (new_mtu == cur_mtu) { 3525 err = 0; 3526 break; 3527 } 3528 3529 if ((new_mtu < DEFAULT_MTU) || 3530 (new_mtu > Adapter->max_mtu)) { 3531 err = EINVAL; 3532 break; 3533 } 3534 3535 /* inform MAC framework of new MTU */ 3536 err = mac_maxsdu_update(Adapter->mh, new_mtu); 3537 3538 if (err == 0) { 3539 Adapter->default_mtu = new_mtu; 3540 Adapter->max_frame_size = 3541 e1000g_mtu2maxframe(new_mtu); 3542 3543 /* 3544 * check PCH limits & set buffer sizes to 3545 * match new MTU 3546 */ 3547 e1000g_pch_limits(Adapter); 3548 e1000g_set_bufsize(Adapter); 3549 3550 /* 3551 * decrease the number of descriptors and free 3552 * packets for jumbo frames to reduce tx/rx 3553 * resource consumption 3554 */ 3555 if (Adapter->max_frame_size >= 3556 (FRAME_SIZE_UPTO_4K)) { 3557 if (Adapter->tx_desc_num_flag == 0) 3558 Adapter->tx_desc_num = 3559 DEFAULT_JUMBO_NUM_TX_DESC; 3560 3561 if (Adapter->rx_desc_num_flag == 0) 3562 Adapter->rx_desc_num = 3563 DEFAULT_JUMBO_NUM_RX_DESC; 3564 3565 if (Adapter->tx_buf_num_flag == 0) 3566 Adapter->tx_freelist_num = 3567 DEFAULT_JUMBO_NUM_TX_BUF; 3568 3569 if (Adapter->rx_buf_num_flag == 0) 3570 Adapter->rx_freelist_limit = 3571 DEFAULT_JUMBO_NUM_RX_BUF; 3572 } else { 3573 if (Adapter->tx_desc_num_flag == 0) 3574 Adapter->tx_desc_num = 3575 DEFAULT_NUM_TX_DESCRIPTOR; 3576 3577 if (Adapter->rx_desc_num_flag == 0) 3578 Adapter->rx_desc_num = 3579 DEFAULT_NUM_RX_DESCRIPTOR; 3580 3581 if (Adapter->tx_buf_num_flag == 0) 3582 Adapter->tx_freelist_num = 3583 DEFAULT_NUM_TX_FREELIST; 3584 3585 if (Adapter->rx_buf_num_flag == 0) 3586 Adapter->rx_freelist_limit = 3587 DEFAULT_NUM_RX_FREELIST; 3588 } 3589 } 3590 break; 3591 case MAC_PROP_PRIVATE: 3592 err = e1000g_set_priv_prop(Adapter, pr_name, 3593 pr_valsize, pr_val); 3594 break; 3595 default: 3596 err = ENOTSUP; 3597 break; 3598 } 3599 rw_exit(&Adapter->chip_lock); 3600 return (err); 3601 } 3602 3603 static int 3604 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3605 uint_t pr_valsize, void *pr_val) 3606 { 3607 struct e1000g *Adapter = arg; 3608 struct e1000_hw *hw = &Adapter->shared; 3609 struct e1000_fc_info *fc = &Adapter->shared.fc; 3610 int err = 0; 3611 link_flowctrl_t flowctrl; 3612 uint64_t tmp = 0; 3613 3614 switch (pr_num) { 3615 case MAC_PROP_DUPLEX: 3616 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 3617 bcopy(&Adapter->link_duplex, pr_val, 3618 sizeof (link_duplex_t)); 3619 break; 3620 case MAC_PROP_SPEED: 3621 ASSERT(pr_valsize >= sizeof (uint64_t)); 3622 tmp = Adapter->link_speed * 1000000ull; 3623 bcopy(&tmp, pr_val, sizeof (tmp)); 3624 break; 3625 case MAC_PROP_AUTONEG: 3626 *(uint8_t *)pr_val = Adapter->param_adv_autoneg; 3627 break; 3628 case MAC_PROP_FLOWCTRL: 3629 ASSERT(pr_valsize >= sizeof (link_flowctrl_t)); 3630 switch (fc->current_mode) { 3631 case e1000_fc_none: 3632 flowctrl = LINK_FLOWCTRL_NONE; 3633 break; 3634 case e1000_fc_rx_pause: 3635 flowctrl = LINK_FLOWCTRL_RX; 3636 break; 3637 case e1000_fc_tx_pause: 3638 flowctrl = LINK_FLOWCTRL_TX; 3639 break; 3640 case e1000_fc_full: 3641 flowctrl = LINK_FLOWCTRL_BI; 3642 break; 3643 } 3644 bcopy(&flowctrl, pr_val, sizeof (flowctrl)); 3645 break; 3646 case MAC_PROP_ADV_1000FDX_CAP: 3647 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx; 3648 break; 3649 case MAC_PROP_EN_1000FDX_CAP: 3650 *(uint8_t *)pr_val = Adapter->param_en_1000fdx; 3651 break; 3652 case MAC_PROP_ADV_1000HDX_CAP: 3653 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx; 3654 break; 3655 case MAC_PROP_EN_1000HDX_CAP: 3656 *(uint8_t *)pr_val = Adapter->param_en_1000hdx; 3657 break; 3658 case MAC_PROP_ADV_100FDX_CAP: 3659 *(uint8_t *)pr_val = Adapter->param_adv_100fdx; 3660 break; 3661 case MAC_PROP_EN_100FDX_CAP: 3662 *(uint8_t *)pr_val = Adapter->param_en_100fdx; 3663 break; 3664 case MAC_PROP_ADV_100HDX_CAP: 3665 *(uint8_t *)pr_val = Adapter->param_adv_100hdx; 3666 break; 3667 case MAC_PROP_EN_100HDX_CAP: 3668 *(uint8_t *)pr_val = Adapter->param_en_100hdx; 3669 break; 3670 case MAC_PROP_ADV_10FDX_CAP: 3671 *(uint8_t *)pr_val = Adapter->param_adv_10fdx; 3672 break; 3673 case MAC_PROP_EN_10FDX_CAP: 3674 *(uint8_t *)pr_val = Adapter->param_en_10fdx; 3675 break; 3676 case MAC_PROP_ADV_10HDX_CAP: 3677 *(uint8_t *)pr_val = Adapter->param_adv_10hdx; 3678 break; 3679 case MAC_PROP_EN_10HDX_CAP: 3680 *(uint8_t *)pr_val = Adapter->param_en_10hdx; 3681 break; 3682 case MAC_PROP_ADV_100T4_CAP: 3683 case MAC_PROP_EN_100T4_CAP: 3684 *(uint8_t *)pr_val = Adapter->param_adv_100t4; 3685 break; 3686 case MAC_PROP_MEDIA: 3687 *(mac_ether_media_t *)pr_val = e1000_link_to_media(hw, 3688 Adapter->link_speed); 3689 break; 3690 case MAC_PROP_PRIVATE: 3691 err = e1000g_get_priv_prop(Adapter, pr_name, 3692 pr_valsize, pr_val); 3693 break; 3694 default: 3695 err = ENOTSUP; 3696 break; 3697 } 3698 3699 return (err); 3700 } 3701 3702 static void 3703 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3704 mac_prop_info_handle_t prh) 3705 { 3706 struct e1000g *Adapter = arg; 3707 struct e1000_hw *hw = &Adapter->shared; 3708 3709 switch (pr_num) { 3710 case MAC_PROP_DUPLEX: 3711 case MAC_PROP_SPEED: 3712 case MAC_PROP_ADV_1000FDX_CAP: 3713 case MAC_PROP_ADV_1000HDX_CAP: 3714 case MAC_PROP_ADV_100FDX_CAP: 3715 case MAC_PROP_ADV_100HDX_CAP: 3716 case MAC_PROP_ADV_10FDX_CAP: 3717 case MAC_PROP_ADV_10HDX_CAP: 3718 case MAC_PROP_ADV_100T4_CAP: 3719 case MAC_PROP_EN_100T4_CAP: 3720 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3721 break; 3722 3723 case MAC_PROP_EN_1000FDX_CAP: 3724 if (hw->phy.media_type != e1000_media_type_copper) { 3725 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3726 } else { 3727 mac_prop_info_set_default_uint8(prh, 3728 ((Adapter->phy_ext_status & 3729 IEEE_ESR_1000T_FD_CAPS) || 3730 (Adapter->phy_ext_status & 3731 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0); 3732 } 3733 break; 3734 3735 case MAC_PROP_EN_100FDX_CAP: 3736 if (hw->phy.media_type != e1000_media_type_copper) { 3737 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3738 } else { 3739 mac_prop_info_set_default_uint8(prh, 3740 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 3741 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 3742 ? 1 : 0); 3743 } 3744 break; 3745 3746 case MAC_PROP_EN_100HDX_CAP: 3747 if (hw->phy.media_type != e1000_media_type_copper) { 3748 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3749 } else { 3750 mac_prop_info_set_default_uint8(prh, 3751 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 3752 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) 3753 ? 1 : 0); 3754 } 3755 break; 3756 3757 case MAC_PROP_EN_10FDX_CAP: 3758 if (hw->phy.media_type != e1000_media_type_copper) { 3759 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3760 } else { 3761 mac_prop_info_set_default_uint8(prh, 3762 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0); 3763 } 3764 break; 3765 3766 case MAC_PROP_EN_10HDX_CAP: 3767 if (hw->phy.media_type != e1000_media_type_copper) { 3768 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3769 } else { 3770 mac_prop_info_set_default_uint8(prh, 3771 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0); 3772 } 3773 break; 3774 3775 case MAC_PROP_EN_1000HDX_CAP: 3776 if (hw->phy.media_type != e1000_media_type_copper) 3777 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3778 break; 3779 3780 case MAC_PROP_AUTONEG: 3781 if (hw->phy.media_type != e1000_media_type_copper) { 3782 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3783 } else { 3784 mac_prop_info_set_default_uint8(prh, 3785 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) 3786 ? 1 : 0); 3787 } 3788 break; 3789 3790 case MAC_PROP_FLOWCTRL: 3791 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI); 3792 break; 3793 3794 case MAC_PROP_MTU: { 3795 struct e1000_mac_info *mac = &Adapter->shared.mac; 3796 struct e1000_phy_info *phy = &Adapter->shared.phy; 3797 uint32_t max; 3798 3799 /* some MAC types do not support jumbo frames */ 3800 if ((mac->type == e1000_ich8lan) || 3801 ((mac->type == e1000_ich9lan) && (phy->type == 3802 e1000_phy_ife))) { 3803 max = DEFAULT_MTU; 3804 } else { 3805 max = Adapter->max_mtu; 3806 } 3807 3808 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max); 3809 break; 3810 } 3811 case MAC_PROP_PRIVATE: { 3812 char valstr[64]; 3813 int value; 3814 3815 if (strcmp(pr_name, "_adv_pause_cap") == 0 || 3816 strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3817 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3818 return; 3819 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3820 value = DEFAULT_TX_BCOPY_THRESHOLD; 3821 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3822 value = DEFAULT_TX_INTR_ENABLE; 3823 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3824 value = DEFAULT_TX_INTR_DELAY; 3825 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3826 value = DEFAULT_TX_INTR_ABS_DELAY; 3827 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3828 value = DEFAULT_RX_BCOPY_THRESHOLD; 3829 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3830 value = DEFAULT_RX_LIMIT_ON_INTR; 3831 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3832 value = DEFAULT_RX_INTR_DELAY; 3833 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3834 value = DEFAULT_RX_INTR_ABS_DELAY; 3835 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3836 value = DEFAULT_INTR_THROTTLING; 3837 } else if (strcmp(pr_name, "_intr_adaptive") == 0) { 3838 value = 1; 3839 } else { 3840 return; 3841 } 3842 3843 (void) snprintf(valstr, sizeof (valstr), "%d", value); 3844 mac_prop_info_set_default_str(prh, valstr); 3845 break; 3846 } 3847 } 3848 } 3849 3850 /* ARGSUSED2 */ 3851 static int 3852 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name, 3853 uint_t pr_valsize, const void *pr_val) 3854 { 3855 int err = 0; 3856 long result; 3857 struct e1000_hw *hw = &Adapter->shared; 3858 3859 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3860 if (pr_val == NULL) { 3861 err = EINVAL; 3862 return (err); 3863 } 3864 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3865 if (result < MIN_TX_BCOPY_THRESHOLD || 3866 result > MAX_TX_BCOPY_THRESHOLD) 3867 err = EINVAL; 3868 else { 3869 Adapter->tx_bcopy_thresh = (uint32_t)result; 3870 } 3871 return (err); 3872 } 3873 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3874 if (pr_val == NULL) { 3875 err = EINVAL; 3876 return (err); 3877 } 3878 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3879 if (result < 0 || result > 1) 3880 err = EINVAL; 3881 else { 3882 Adapter->tx_intr_enable = (result == 1) ? 3883 B_TRUE: B_FALSE; 3884 if (Adapter->tx_intr_enable) 3885 e1000g_mask_tx_interrupt(Adapter); 3886 else 3887 e1000g_clear_tx_interrupt(Adapter); 3888 if (e1000g_check_acc_handle( 3889 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3890 ddi_fm_service_impact(Adapter->dip, 3891 DDI_SERVICE_DEGRADED); 3892 err = EIO; 3893 } 3894 } 3895 return (err); 3896 } 3897 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3898 if (pr_val == NULL) { 3899 err = EINVAL; 3900 return (err); 3901 } 3902 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3903 if (result < MIN_TX_INTR_DELAY || 3904 result > MAX_TX_INTR_DELAY) 3905 err = EINVAL; 3906 else { 3907 Adapter->tx_intr_delay = (uint32_t)result; 3908 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay); 3909 if (e1000g_check_acc_handle( 3910 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3911 ddi_fm_service_impact(Adapter->dip, 3912 DDI_SERVICE_DEGRADED); 3913 err = EIO; 3914 } 3915 } 3916 return (err); 3917 } 3918 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3919 if (pr_val == NULL) { 3920 err = EINVAL; 3921 return (err); 3922 } 3923 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3924 if (result < MIN_TX_INTR_ABS_DELAY || 3925 result > MAX_TX_INTR_ABS_DELAY) 3926 err = EINVAL; 3927 else { 3928 Adapter->tx_intr_abs_delay = (uint32_t)result; 3929 E1000_WRITE_REG(hw, E1000_TADV, 3930 Adapter->tx_intr_abs_delay); 3931 if (e1000g_check_acc_handle( 3932 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3933 ddi_fm_service_impact(Adapter->dip, 3934 DDI_SERVICE_DEGRADED); 3935 err = EIO; 3936 } 3937 } 3938 return (err); 3939 } 3940 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3941 if (pr_val == NULL) { 3942 err = EINVAL; 3943 return (err); 3944 } 3945 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3946 if (result < MIN_RX_BCOPY_THRESHOLD || 3947 result > MAX_RX_BCOPY_THRESHOLD) 3948 err = EINVAL; 3949 else 3950 Adapter->rx_bcopy_thresh = (uint32_t)result; 3951 return (err); 3952 } 3953 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3954 if (pr_val == NULL) { 3955 err = EINVAL; 3956 return (err); 3957 } 3958 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3959 if (result < MIN_RX_LIMIT_ON_INTR || 3960 result > MAX_RX_LIMIT_ON_INTR) 3961 err = EINVAL; 3962 else 3963 Adapter->rx_limit_onintr = (uint32_t)result; 3964 return (err); 3965 } 3966 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3967 if (pr_val == NULL) { 3968 err = EINVAL; 3969 return (err); 3970 } 3971 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3972 if (result < MIN_RX_INTR_DELAY || 3973 result > MAX_RX_INTR_DELAY) 3974 err = EINVAL; 3975 else { 3976 Adapter->rx_intr_delay = (uint32_t)result; 3977 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay); 3978 if (e1000g_check_acc_handle( 3979 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3980 ddi_fm_service_impact(Adapter->dip, 3981 DDI_SERVICE_DEGRADED); 3982 err = EIO; 3983 } 3984 } 3985 return (err); 3986 } 3987 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3988 if (pr_val == NULL) { 3989 err = EINVAL; 3990 return (err); 3991 } 3992 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3993 if (result < MIN_RX_INTR_ABS_DELAY || 3994 result > MAX_RX_INTR_ABS_DELAY) 3995 err = EINVAL; 3996 else { 3997 Adapter->rx_intr_abs_delay = (uint32_t)result; 3998 E1000_WRITE_REG(hw, E1000_RADV, 3999 Adapter->rx_intr_abs_delay); 4000 if (e1000g_check_acc_handle( 4001 Adapter->osdep.reg_handle) != DDI_FM_OK) { 4002 ddi_fm_service_impact(Adapter->dip, 4003 DDI_SERVICE_DEGRADED); 4004 err = EIO; 4005 } 4006 } 4007 return (err); 4008 } 4009 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 4010 if (pr_val == NULL) { 4011 err = EINVAL; 4012 return (err); 4013 } 4014 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4015 if (result < MIN_INTR_THROTTLING || 4016 result > MAX_INTR_THROTTLING) 4017 err = EINVAL; 4018 else { 4019 if (hw->mac.type >= e1000_82540) { 4020 Adapter->intr_throttling_rate = 4021 (uint32_t)result; 4022 E1000_WRITE_REG(hw, E1000_ITR, 4023 Adapter->intr_throttling_rate); 4024 if (e1000g_check_acc_handle( 4025 Adapter->osdep.reg_handle) != DDI_FM_OK) { 4026 ddi_fm_service_impact(Adapter->dip, 4027 DDI_SERVICE_DEGRADED); 4028 err = EIO; 4029 } 4030 } else 4031 err = EINVAL; 4032 } 4033 return (err); 4034 } 4035 if (strcmp(pr_name, "_intr_adaptive") == 0) { 4036 if (pr_val == NULL) { 4037 err = EINVAL; 4038 return (err); 4039 } 4040 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 4041 if (result < 0 || result > 1) 4042 err = EINVAL; 4043 else { 4044 if (hw->mac.type >= e1000_82540) { 4045 Adapter->intr_adaptive = (result == 1) ? 4046 B_TRUE : B_FALSE; 4047 } else { 4048 err = EINVAL; 4049 } 4050 } 4051 return (err); 4052 } 4053 return (ENOTSUP); 4054 } 4055 4056 static int 4057 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name, 4058 uint_t pr_valsize, void *pr_val) 4059 { 4060 int err = ENOTSUP; 4061 int value; 4062 4063 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 4064 value = Adapter->param_adv_pause; 4065 err = 0; 4066 goto done; 4067 } 4068 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 4069 value = Adapter->param_adv_asym_pause; 4070 err = 0; 4071 goto done; 4072 } 4073 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 4074 value = Adapter->tx_bcopy_thresh; 4075 err = 0; 4076 goto done; 4077 } 4078 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 4079 value = Adapter->tx_intr_enable; 4080 err = 0; 4081 goto done; 4082 } 4083 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 4084 value = Adapter->tx_intr_delay; 4085 err = 0; 4086 goto done; 4087 } 4088 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 4089 value = Adapter->tx_intr_abs_delay; 4090 err = 0; 4091 goto done; 4092 } 4093 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 4094 value = Adapter->rx_bcopy_thresh; 4095 err = 0; 4096 goto done; 4097 } 4098 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 4099 value = Adapter->rx_limit_onintr; 4100 err = 0; 4101 goto done; 4102 } 4103 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 4104 value = Adapter->rx_intr_delay; 4105 err = 0; 4106 goto done; 4107 } 4108 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 4109 value = Adapter->rx_intr_abs_delay; 4110 err = 0; 4111 goto done; 4112 } 4113 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 4114 value = Adapter->intr_throttling_rate; 4115 err = 0; 4116 goto done; 4117 } 4118 if (strcmp(pr_name, "_intr_adaptive") == 0) { 4119 value = Adapter->intr_adaptive; 4120 err = 0; 4121 goto done; 4122 } 4123 done: 4124 if (err == 0) { 4125 (void) snprintf(pr_val, pr_valsize, "%d", value); 4126 } 4127 return (err); 4128 } 4129 4130 /* 4131 * e1000g_get_conf - get configurations set in e1000g.conf 4132 * This routine gets user-configured values out of the configuration 4133 * file e1000g.conf. 4134 * 4135 * For each configurable value, there is a minimum, a maximum, and a 4136 * default. 4137 * If user does not configure a value, use the default. 4138 * If user configures below the minimum, use the minumum. 4139 * If user configures above the maximum, use the maxumum. 4140 */ 4141 static void 4142 e1000g_get_conf(struct e1000g *Adapter) 4143 { 4144 struct e1000_hw *hw = &Adapter->shared; 4145 boolean_t tbi_compatibility = B_FALSE; 4146 boolean_t is_jumbo = B_FALSE; 4147 int propval; 4148 /* 4149 * decrease the number of descriptors and free packets 4150 * for jumbo frames to reduce tx/rx resource consumption 4151 */ 4152 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) { 4153 is_jumbo = B_TRUE; 4154 } 4155 4156 /* 4157 * get each configurable property from e1000g.conf 4158 */ 4159 4160 /* 4161 * NumTxDescriptors 4162 */ 4163 Adapter->tx_desc_num_flag = 4164 e1000g_get_prop(Adapter, "NumTxDescriptors", 4165 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR, 4166 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC 4167 : DEFAULT_NUM_TX_DESCRIPTOR, &propval); 4168 Adapter->tx_desc_num = propval; 4169 4170 /* 4171 * NumRxDescriptors 4172 */ 4173 Adapter->rx_desc_num_flag = 4174 e1000g_get_prop(Adapter, "NumRxDescriptors", 4175 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR, 4176 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC 4177 : DEFAULT_NUM_RX_DESCRIPTOR, &propval); 4178 Adapter->rx_desc_num = propval; 4179 4180 /* 4181 * NumRxFreeList 4182 */ 4183 Adapter->rx_buf_num_flag = 4184 e1000g_get_prop(Adapter, "NumRxFreeList", 4185 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST, 4186 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF 4187 : DEFAULT_NUM_RX_FREELIST, &propval); 4188 Adapter->rx_freelist_limit = propval; 4189 4190 /* 4191 * NumTxPacketList 4192 */ 4193 Adapter->tx_buf_num_flag = 4194 e1000g_get_prop(Adapter, "NumTxPacketList", 4195 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST, 4196 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF 4197 : DEFAULT_NUM_TX_FREELIST, &propval); 4198 Adapter->tx_freelist_num = propval; 4199 4200 /* 4201 * FlowControl 4202 */ 4203 hw->fc.send_xon = B_TRUE; 4204 (void) e1000g_get_prop(Adapter, "FlowControl", 4205 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval); 4206 hw->fc.requested_mode = propval; 4207 /* 4 is the setting that says "let the eeprom decide" */ 4208 if (hw->fc.requested_mode == 4) 4209 hw->fc.requested_mode = e1000_fc_default; 4210 4211 /* 4212 * Max Num Receive Packets on Interrupt 4213 */ 4214 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets", 4215 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR, 4216 DEFAULT_RX_LIMIT_ON_INTR, &propval); 4217 Adapter->rx_limit_onintr = propval; 4218 4219 /* 4220 * PHY master slave setting 4221 */ 4222 (void) e1000g_get_prop(Adapter, "SetMasterSlave", 4223 e1000_ms_hw_default, e1000_ms_auto, 4224 e1000_ms_hw_default, &propval); 4225 hw->phy.ms_type = propval; 4226 4227 /* 4228 * Parameter which controls TBI mode workaround, which is only 4229 * needed on certain switches such as Cisco 6500/Foundry 4230 */ 4231 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable", 4232 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval); 4233 tbi_compatibility = (propval == 1); 4234 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility); 4235 4236 /* 4237 * MSI Enable 4238 */ 4239 (void) e1000g_get_prop(Adapter, "MSIEnable", 4240 0, 1, DEFAULT_MSI_ENABLE, &propval); 4241 Adapter->msi_enable = (propval == 1); 4242 4243 /* 4244 * Interrupt Throttling Rate 4245 */ 4246 (void) e1000g_get_prop(Adapter, "intr_throttling_rate", 4247 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 4248 DEFAULT_INTR_THROTTLING, &propval); 4249 Adapter->intr_throttling_rate = propval; 4250 4251 /* 4252 * Adaptive Interrupt Blanking Enable/Disable 4253 * It is enabled by default 4254 */ 4255 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1, 4256 &propval); 4257 Adapter->intr_adaptive = (propval == 1); 4258 4259 /* 4260 * Hardware checksum enable/disable parameter 4261 */ 4262 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable", 4263 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval); 4264 Adapter->tx_hcksum_enable = (propval == 1); 4265 /* 4266 * Checksum on/off selection via global parameters. 4267 * 4268 * If the chip is flagged as not capable of (correctly) 4269 * handling checksumming, we don't enable it on either 4270 * Rx or Tx side. Otherwise, we take this chip's settings 4271 * from the patchable global defaults. 4272 * 4273 * We advertise our capabilities only if TX offload is 4274 * enabled. On receive, the stack will accept checksummed 4275 * packets anyway, even if we haven't said we can deliver 4276 * them. 4277 */ 4278 switch (hw->mac.type) { 4279 case e1000_82540: 4280 case e1000_82544: 4281 case e1000_82545: 4282 case e1000_82545_rev_3: 4283 case e1000_82546: 4284 case e1000_82546_rev_3: 4285 case e1000_82571: 4286 case e1000_82572: 4287 case e1000_82573: 4288 case e1000_80003es2lan: 4289 break; 4290 /* 4291 * For the following Intel PRO/1000 chipsets, we have not 4292 * tested the hardware checksum offload capability, so we 4293 * disable the capability for them. 4294 * e1000_82542, 4295 * e1000_82543, 4296 * e1000_82541, 4297 * e1000_82541_rev_2, 4298 * e1000_82547, 4299 * e1000_82547_rev_2, 4300 */ 4301 default: 4302 Adapter->tx_hcksum_enable = B_FALSE; 4303 } 4304 4305 /* 4306 * Large Send Offloading(LSO) Enable/Disable 4307 * If the tx hardware checksum is not enabled, LSO should be 4308 * disabled. 4309 */ 4310 (void) e1000g_get_prop(Adapter, "lso_enable", 4311 0, 1, DEFAULT_LSO_ENABLE, &propval); 4312 Adapter->lso_enable = (propval == 1); 4313 4314 switch (hw->mac.type) { 4315 case e1000_82546: 4316 case e1000_82546_rev_3: 4317 if (Adapter->lso_enable) 4318 Adapter->lso_premature_issue = B_TRUE; 4319 /* FALLTHRU */ 4320 case e1000_82571: 4321 case e1000_82572: 4322 case e1000_82573: 4323 case e1000_80003es2lan: 4324 break; 4325 default: 4326 Adapter->lso_enable = B_FALSE; 4327 } 4328 4329 if (!Adapter->tx_hcksum_enable) { 4330 Adapter->lso_premature_issue = B_FALSE; 4331 Adapter->lso_enable = B_FALSE; 4332 } 4333 4334 /* 4335 * If mem_workaround_82546 is enabled, the rx buffer allocated by 4336 * e1000_82545, e1000_82546 and e1000_82546_rev_3 4337 * will not cross 64k boundary. 4338 */ 4339 (void) e1000g_get_prop(Adapter, "mem_workaround_82546", 4340 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval); 4341 Adapter->mem_workaround_82546 = (propval == 1); 4342 4343 /* 4344 * Max number of multicast addresses 4345 */ 4346 (void) e1000g_get_prop(Adapter, "mcast_max_num", 4347 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32, 4348 &propval); 4349 Adapter->mcast_max_num = propval; 4350 } 4351 4352 /* 4353 * e1000g_get_prop - routine to read properties 4354 * 4355 * Get a user-configure property value out of the configuration 4356 * file e1000g.conf. 4357 * 4358 * Caller provides name of the property, a default value, a minimum 4359 * value, a maximum value and a pointer to the returned property 4360 * value. 4361 * 4362 * Return B_TRUE if the configured value of the property is not a default 4363 * value, otherwise return B_FALSE. 4364 */ 4365 static boolean_t 4366 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */ 4367 char *propname, /* name of the property */ 4368 int minval, /* minimum acceptable value */ 4369 int maxval, /* maximim acceptable value */ 4370 int defval, /* default value */ 4371 int *propvalue) /* property value return to caller */ 4372 { 4373 int propval; /* value returned for requested property */ 4374 int *props; /* point to array of properties returned */ 4375 uint_t nprops; /* number of property value returned */ 4376 boolean_t ret = B_TRUE; 4377 4378 /* 4379 * get the array of properties from the config file 4380 */ 4381 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip, 4382 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) { 4383 /* got some properties, test if we got enough */ 4384 if (Adapter->instance < nprops) { 4385 propval = props[Adapter->instance]; 4386 } else { 4387 /* not enough properties configured */ 4388 propval = defval; 4389 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4390 "Not Enough %s values found in e1000g.conf" 4391 " - set to %d\n", 4392 propname, propval); 4393 ret = B_FALSE; 4394 } 4395 4396 /* free memory allocated for properties */ 4397 ddi_prop_free(props); 4398 4399 } else { 4400 propval = defval; 4401 ret = B_FALSE; 4402 } 4403 4404 /* 4405 * enforce limits 4406 */ 4407 if (propval > maxval) { 4408 propval = maxval; 4409 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4410 "Too High %s value in e1000g.conf - set to %d\n", 4411 propname, propval); 4412 } 4413 4414 if (propval < minval) { 4415 propval = minval; 4416 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4417 "Too Low %s value in e1000g.conf - set to %d\n", 4418 propname, propval); 4419 } 4420 4421 *propvalue = propval; 4422 return (ret); 4423 } 4424 4425 static boolean_t 4426 e1000g_link_check(struct e1000g *Adapter) 4427 { 4428 uint16_t speed, duplex, phydata; 4429 boolean_t link_changed = B_FALSE; 4430 struct e1000_hw *hw; 4431 uint32_t reg_tarc; 4432 4433 hw = &Adapter->shared; 4434 4435 if (e1000g_link_up(Adapter)) { 4436 /* 4437 * The Link is up, check whether it was marked as down earlier 4438 */ 4439 if (Adapter->link_state != LINK_STATE_UP) { 4440 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex); 4441 Adapter->link_speed = speed; 4442 Adapter->link_duplex = duplex; 4443 Adapter->link_state = LINK_STATE_UP; 4444 link_changed = B_TRUE; 4445 4446 if (Adapter->link_speed == SPEED_1000) 4447 Adapter->stall_threshold = TX_STALL_TIME_2S; 4448 else 4449 Adapter->stall_threshold = TX_STALL_TIME_8S; 4450 4451 Adapter->tx_link_down_timeout = 0; 4452 4453 if ((hw->mac.type == e1000_82571) || 4454 (hw->mac.type == e1000_82572)) { 4455 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0)); 4456 if (speed == SPEED_1000) 4457 reg_tarc |= (1 << 21); 4458 else 4459 reg_tarc &= ~(1 << 21); 4460 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc); 4461 } 4462 } 4463 Adapter->smartspeed = 0; 4464 } else { 4465 if (Adapter->link_state != LINK_STATE_DOWN) { 4466 Adapter->link_speed = 0; 4467 Adapter->link_duplex = 0; 4468 Adapter->link_state = LINK_STATE_DOWN; 4469 link_changed = B_TRUE; 4470 4471 /* 4472 * SmartSpeed workaround for Tabor/TanaX, When the 4473 * driver loses link disable auto master/slave 4474 * resolution. 4475 */ 4476 if (hw->phy.type == e1000_phy_igp) { 4477 (void) e1000_read_phy_reg(hw, 4478 PHY_1000T_CTRL, &phydata); 4479 phydata |= CR_1000T_MS_ENABLE; 4480 (void) e1000_write_phy_reg(hw, 4481 PHY_1000T_CTRL, phydata); 4482 } 4483 } else { 4484 e1000g_smartspeed(Adapter); 4485 } 4486 4487 if (Adapter->e1000g_state & E1000G_STARTED) { 4488 if (Adapter->tx_link_down_timeout < 4489 MAX_TX_LINK_DOWN_TIMEOUT) { 4490 Adapter->tx_link_down_timeout++; 4491 } else if (Adapter->tx_link_down_timeout == 4492 MAX_TX_LINK_DOWN_TIMEOUT) { 4493 e1000g_tx_clean(Adapter); 4494 Adapter->tx_link_down_timeout++; 4495 } 4496 } 4497 } 4498 4499 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4500 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4501 4502 return (link_changed); 4503 } 4504 4505 /* 4506 * e1000g_reset_link - Using the link properties to setup the link 4507 */ 4508 int 4509 e1000g_reset_link(struct e1000g *Adapter) 4510 { 4511 struct e1000_mac_info *mac; 4512 struct e1000_phy_info *phy; 4513 struct e1000_hw *hw; 4514 boolean_t invalid; 4515 4516 mac = &Adapter->shared.mac; 4517 phy = &Adapter->shared.phy; 4518 hw = &Adapter->shared; 4519 invalid = B_FALSE; 4520 4521 if (hw->phy.media_type != e1000_media_type_copper) 4522 goto out; 4523 4524 if (Adapter->param_adv_autoneg == 1) { 4525 mac->autoneg = B_TRUE; 4526 phy->autoneg_advertised = 0; 4527 4528 /* 4529 * 1000hdx is not supported for autonegotiation 4530 */ 4531 if (Adapter->param_adv_1000fdx == 1) 4532 phy->autoneg_advertised |= ADVERTISE_1000_FULL; 4533 4534 if (Adapter->param_adv_100fdx == 1) 4535 phy->autoneg_advertised |= ADVERTISE_100_FULL; 4536 4537 if (Adapter->param_adv_100hdx == 1) 4538 phy->autoneg_advertised |= ADVERTISE_100_HALF; 4539 4540 if (Adapter->param_adv_10fdx == 1) 4541 phy->autoneg_advertised |= ADVERTISE_10_FULL; 4542 4543 if (Adapter->param_adv_10hdx == 1) 4544 phy->autoneg_advertised |= ADVERTISE_10_HALF; 4545 4546 if (phy->autoneg_advertised == 0) 4547 invalid = B_TRUE; 4548 } else { 4549 mac->autoneg = B_FALSE; 4550 4551 /* 4552 * For Intel copper cards, 1000fdx and 1000hdx are not 4553 * supported for forced link 4554 */ 4555 if (Adapter->param_adv_100fdx == 1) 4556 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4557 else if (Adapter->param_adv_100hdx == 1) 4558 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4559 else if (Adapter->param_adv_10fdx == 1) 4560 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4561 else if (Adapter->param_adv_10hdx == 1) 4562 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4563 else 4564 invalid = B_TRUE; 4565 4566 } 4567 4568 if (invalid) { 4569 e1000g_log(Adapter, CE_WARN, 4570 "Invalid link settings. Setup link to " 4571 "support autonegotiation with all link capabilities."); 4572 mac->autoneg = B_TRUE; 4573 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 4574 } 4575 4576 out: 4577 return (e1000_setup_link(&Adapter->shared)); 4578 } 4579 4580 static void 4581 e1000g_timer_tx_resched(struct e1000g *Adapter) 4582 { 4583 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 4584 4585 rw_enter(&Adapter->chip_lock, RW_READER); 4586 4587 if (tx_ring->resched_needed && 4588 ((ddi_get_lbolt() - tx_ring->resched_timestamp) > 4589 drv_usectohz(1000000)) && 4590 (Adapter->e1000g_state & E1000G_STARTED) && 4591 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) { 4592 tx_ring->resched_needed = B_FALSE; 4593 mac_tx_update(Adapter->mh); 4594 E1000G_STAT(tx_ring->stat_reschedule); 4595 E1000G_STAT(tx_ring->stat_timer_reschedule); 4596 } 4597 4598 rw_exit(&Adapter->chip_lock); 4599 } 4600 4601 static void 4602 e1000g_local_timer(void *ws) 4603 { 4604 struct e1000g *Adapter = (struct e1000g *)ws; 4605 struct e1000_hw *hw; 4606 e1000g_ether_addr_t ether_addr; 4607 boolean_t link_changed; 4608 4609 hw = &Adapter->shared; 4610 4611 if (Adapter->e1000g_state & E1000G_ERROR) { 4612 rw_enter(&Adapter->chip_lock, RW_WRITER); 4613 Adapter->e1000g_state &= ~E1000G_ERROR; 4614 rw_exit(&Adapter->chip_lock); 4615 4616 Adapter->reset_count++; 4617 if (e1000g_global_reset(Adapter)) { 4618 ddi_fm_service_impact(Adapter->dip, 4619 DDI_SERVICE_RESTORED); 4620 e1000g_timer_tx_resched(Adapter); 4621 } else 4622 ddi_fm_service_impact(Adapter->dip, 4623 DDI_SERVICE_LOST); 4624 return; 4625 } 4626 4627 if (e1000g_stall_check(Adapter)) { 4628 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 4629 "Tx stall detected. Activate automatic recovery.\n"); 4630 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL); 4631 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 4632 Adapter->reset_count++; 4633 if (e1000g_reset_adapter(Adapter)) { 4634 ddi_fm_service_impact(Adapter->dip, 4635 DDI_SERVICE_RESTORED); 4636 e1000g_timer_tx_resched(Adapter); 4637 } 4638 return; 4639 } 4640 4641 link_changed = B_FALSE; 4642 rw_enter(&Adapter->chip_lock, RW_READER); 4643 if (Adapter->link_complete) 4644 link_changed = e1000g_link_check(Adapter); 4645 rw_exit(&Adapter->chip_lock); 4646 4647 if (link_changed) { 4648 if (!Adapter->reset_flag && 4649 (Adapter->e1000g_state & E1000G_STARTED) && 4650 !(Adapter->e1000g_state & E1000G_SUSPENDED)) 4651 mac_link_update(Adapter->mh, Adapter->link_state); 4652 if (Adapter->link_state == LINK_STATE_UP) 4653 Adapter->reset_flag = B_FALSE; 4654 } 4655 /* 4656 * Workaround for esb2. Data stuck in fifo on a link 4657 * down event. Reset the adapter to recover it. 4658 */ 4659 if (Adapter->esb2_workaround) { 4660 Adapter->esb2_workaround = B_FALSE; 4661 (void) e1000g_reset_adapter(Adapter); 4662 return; 4663 } 4664 4665 /* 4666 * With 82571 controllers, any locally administered address will 4667 * be overwritten when there is a reset on the other port. 4668 * Detect this circumstance and correct it. 4669 */ 4670 if ((hw->mac.type == e1000_82571) && 4671 (e1000_get_laa_state_82571(hw) == B_TRUE)) { 4672 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0); 4673 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1); 4674 4675 ether_addr.reg.low = ntohl(ether_addr.reg.low); 4676 ether_addr.reg.high = ntohl(ether_addr.reg.high); 4677 4678 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) || 4679 (ether_addr.mac.addr[4] != hw->mac.addr[1]) || 4680 (ether_addr.mac.addr[3] != hw->mac.addr[2]) || 4681 (ether_addr.mac.addr[2] != hw->mac.addr[3]) || 4682 (ether_addr.mac.addr[1] != hw->mac.addr[4]) || 4683 (ether_addr.mac.addr[0] != hw->mac.addr[5])) { 4684 (void) e1000_rar_set(hw, hw->mac.addr, 0); 4685 } 4686 } 4687 4688 /* 4689 * Long TTL workaround for 82541/82547 4690 */ 4691 (void) e1000_igp_ttl_workaround_82547(hw); 4692 4693 /* 4694 * Check for Adaptive IFS settings If there are lots of collisions 4695 * change the value in steps... 4696 * These properties should only be set for 10/100 4697 */ 4698 if ((hw->phy.media_type == e1000_media_type_copper) && 4699 ((Adapter->link_speed == SPEED_100) || 4700 (Adapter->link_speed == SPEED_10))) { 4701 e1000_update_adaptive(hw); 4702 } 4703 /* 4704 * Set Timer Interrupts 4705 */ 4706 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 4707 4708 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4709 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4710 else 4711 e1000g_timer_tx_resched(Adapter); 4712 4713 restart_watchdog_timer(Adapter); 4714 } 4715 4716 /* 4717 * The function e1000g_link_timer() is called when the timer for link setup 4718 * is expired, which indicates the completion of the link setup. The link 4719 * state will not be updated until the link setup is completed. And the 4720 * link state will not be sent to the upper layer through mac_link_update() 4721 * in this function. It will be updated in the local timer routine or the 4722 * interrupt service routine after the interface is started (plumbed). 4723 */ 4724 static void 4725 e1000g_link_timer(void *arg) 4726 { 4727 struct e1000g *Adapter = (struct e1000g *)arg; 4728 4729 mutex_enter(&Adapter->link_lock); 4730 Adapter->link_complete = B_TRUE; 4731 Adapter->link_tid = 0; 4732 mutex_exit(&Adapter->link_lock); 4733 } 4734 4735 /* 4736 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf 4737 * 4738 * This function read the forced speed and duplex for 10/100 Mbps speeds 4739 * and also for 1000 Mbps speeds from the e1000g.conf file 4740 */ 4741 static void 4742 e1000g_force_speed_duplex(struct e1000g *Adapter) 4743 { 4744 int forced; 4745 int propval; 4746 struct e1000_mac_info *mac = &Adapter->shared.mac; 4747 struct e1000_phy_info *phy = &Adapter->shared.phy; 4748 4749 /* 4750 * get value out of config file 4751 */ 4752 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex", 4753 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced); 4754 4755 switch (forced) { 4756 case GDIAG_10_HALF: 4757 /* 4758 * Disable Auto Negotiation 4759 */ 4760 mac->autoneg = B_FALSE; 4761 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4762 break; 4763 case GDIAG_10_FULL: 4764 /* 4765 * Disable Auto Negotiation 4766 */ 4767 mac->autoneg = B_FALSE; 4768 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4769 break; 4770 case GDIAG_100_HALF: 4771 /* 4772 * Disable Auto Negotiation 4773 */ 4774 mac->autoneg = B_FALSE; 4775 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4776 break; 4777 case GDIAG_100_FULL: 4778 /* 4779 * Disable Auto Negotiation 4780 */ 4781 mac->autoneg = B_FALSE; 4782 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4783 break; 4784 case GDIAG_1000_FULL: 4785 /* 4786 * The gigabit spec requires autonegotiation. Therefore, 4787 * when the user wants to force the speed to 1000Mbps, we 4788 * enable AutoNeg, but only allow the harware to advertise 4789 * 1000Mbps. This is different from 10/100 operation, where 4790 * we are allowed to link without any negotiation. 4791 */ 4792 mac->autoneg = B_TRUE; 4793 phy->autoneg_advertised = ADVERTISE_1000_FULL; 4794 break; 4795 default: /* obey the setting of AutoNegAdvertised */ 4796 mac->autoneg = B_TRUE; 4797 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised", 4798 0, AUTONEG_ADVERTISE_SPEED_DEFAULT, 4799 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval); 4800 phy->autoneg_advertised = (uint16_t)propval; 4801 break; 4802 } /* switch */ 4803 } 4804 4805 /* 4806 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf 4807 * 4808 * This function reads MaxFrameSize from e1000g.conf 4809 */ 4810 static void 4811 e1000g_get_max_frame_size(struct e1000g *Adapter) 4812 { 4813 int max_frame; 4814 4815 /* 4816 * get value out of config file 4817 */ 4818 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0, 4819 &max_frame); 4820 4821 switch (max_frame) { 4822 case 0: 4823 Adapter->default_mtu = ETHERMTU; 4824 break; 4825 case 1: 4826 Adapter->default_mtu = FRAME_SIZE_UPTO_4K - 4827 sizeof (struct ether_vlan_header) - ETHERFCSL; 4828 break; 4829 case 2: 4830 Adapter->default_mtu = FRAME_SIZE_UPTO_8K - 4831 sizeof (struct ether_vlan_header) - ETHERFCSL; 4832 break; 4833 case 3: 4834 Adapter->default_mtu = FRAME_SIZE_UPTO_16K - 4835 sizeof (struct ether_vlan_header) - ETHERFCSL; 4836 break; 4837 default: 4838 Adapter->default_mtu = ETHERMTU; 4839 break; 4840 } /* switch */ 4841 4842 /* 4843 * If the user configed MTU is larger than the deivce's maximum MTU, 4844 * the MTU is set to the deivce's maximum value. 4845 */ 4846 if (Adapter->default_mtu > Adapter->max_mtu) 4847 Adapter->default_mtu = Adapter->max_mtu; 4848 4849 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu); 4850 } 4851 4852 /* 4853 * e1000g_pch_limits - Apply limits of the PCH silicon type 4854 * 4855 * At any frame size larger than the ethernet default, 4856 * prevent linking at 10/100 speeds. 4857 */ 4858 static void 4859 e1000g_pch_limits(struct e1000g *Adapter) 4860 { 4861 struct e1000_hw *hw = &Adapter->shared; 4862 4863 /* only applies to PCH silicon type */ 4864 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan) 4865 return; 4866 4867 /* only applies to frames larger than ethernet default */ 4868 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) { 4869 hw->mac.autoneg = B_TRUE; 4870 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; 4871 4872 Adapter->param_adv_autoneg = 1; 4873 Adapter->param_adv_1000fdx = 1; 4874 4875 Adapter->param_adv_100fdx = 0; 4876 Adapter->param_adv_100hdx = 0; 4877 Adapter->param_adv_10fdx = 0; 4878 Adapter->param_adv_10hdx = 0; 4879 4880 e1000g_param_sync(Adapter); 4881 } 4882 } 4883 4884 /* 4885 * e1000g_mtu2maxframe - convert given MTU to maximum frame size 4886 */ 4887 static uint32_t 4888 e1000g_mtu2maxframe(uint32_t mtu) 4889 { 4890 uint32_t maxframe; 4891 4892 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL; 4893 4894 return (maxframe); 4895 } 4896 4897 static void 4898 arm_watchdog_timer(struct e1000g *Adapter) 4899 { 4900 Adapter->watchdog_tid = 4901 timeout(e1000g_local_timer, 4902 (void *)Adapter, 1 * drv_usectohz(1000000)); 4903 } 4904 #pragma inline(arm_watchdog_timer) 4905 4906 static void 4907 enable_watchdog_timer(struct e1000g *Adapter) 4908 { 4909 mutex_enter(&Adapter->watchdog_lock); 4910 4911 if (!Adapter->watchdog_timer_enabled) { 4912 Adapter->watchdog_timer_enabled = B_TRUE; 4913 Adapter->watchdog_timer_started = B_TRUE; 4914 arm_watchdog_timer(Adapter); 4915 } 4916 4917 mutex_exit(&Adapter->watchdog_lock); 4918 } 4919 4920 static void 4921 disable_watchdog_timer(struct e1000g *Adapter) 4922 { 4923 timeout_id_t tid; 4924 4925 mutex_enter(&Adapter->watchdog_lock); 4926 4927 Adapter->watchdog_timer_enabled = B_FALSE; 4928 Adapter->watchdog_timer_started = B_FALSE; 4929 tid = Adapter->watchdog_tid; 4930 Adapter->watchdog_tid = 0; 4931 4932 mutex_exit(&Adapter->watchdog_lock); 4933 4934 if (tid != 0) 4935 (void) untimeout(tid); 4936 } 4937 4938 static void 4939 start_watchdog_timer(struct e1000g *Adapter) 4940 { 4941 mutex_enter(&Adapter->watchdog_lock); 4942 4943 if (Adapter->watchdog_timer_enabled) { 4944 if (!Adapter->watchdog_timer_started) { 4945 Adapter->watchdog_timer_started = B_TRUE; 4946 arm_watchdog_timer(Adapter); 4947 } 4948 } 4949 4950 mutex_exit(&Adapter->watchdog_lock); 4951 } 4952 4953 static void 4954 restart_watchdog_timer(struct e1000g *Adapter) 4955 { 4956 mutex_enter(&Adapter->watchdog_lock); 4957 4958 if (Adapter->watchdog_timer_started) 4959 arm_watchdog_timer(Adapter); 4960 4961 mutex_exit(&Adapter->watchdog_lock); 4962 } 4963 4964 static void 4965 stop_watchdog_timer(struct e1000g *Adapter) 4966 { 4967 timeout_id_t tid; 4968 4969 mutex_enter(&Adapter->watchdog_lock); 4970 4971 Adapter->watchdog_timer_started = B_FALSE; 4972 tid = Adapter->watchdog_tid; 4973 Adapter->watchdog_tid = 0; 4974 4975 mutex_exit(&Adapter->watchdog_lock); 4976 4977 if (tid != 0) 4978 (void) untimeout(tid); 4979 } 4980 4981 static void 4982 stop_link_timer(struct e1000g *Adapter) 4983 { 4984 timeout_id_t tid; 4985 4986 /* Disable the link timer */ 4987 mutex_enter(&Adapter->link_lock); 4988 4989 tid = Adapter->link_tid; 4990 Adapter->link_tid = 0; 4991 4992 mutex_exit(&Adapter->link_lock); 4993 4994 if (tid != 0) 4995 (void) untimeout(tid); 4996 } 4997 4998 static void 4999 stop_82547_timer(e1000g_tx_ring_t *tx_ring) 5000 { 5001 timeout_id_t tid; 5002 5003 /* Disable the tx timer for 82547 chipset */ 5004 mutex_enter(&tx_ring->tx_lock); 5005 5006 tx_ring->timer_enable_82547 = B_FALSE; 5007 tid = tx_ring->timer_id_82547; 5008 tx_ring->timer_id_82547 = 0; 5009 5010 mutex_exit(&tx_ring->tx_lock); 5011 5012 if (tid != 0) 5013 (void) untimeout(tid); 5014 } 5015 5016 void 5017 e1000g_clear_interrupt(struct e1000g *Adapter) 5018 { 5019 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 5020 0xffffffff & ~E1000_IMS_RXSEQ); 5021 } 5022 5023 void 5024 e1000g_mask_interrupt(struct e1000g *Adapter) 5025 { 5026 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, 5027 IMS_ENABLE_MASK & ~E1000_IMS_TXDW); 5028 5029 if (Adapter->tx_intr_enable) 5030 e1000g_mask_tx_interrupt(Adapter); 5031 } 5032 5033 /* 5034 * This routine is called by e1000g_quiesce(), therefore must not block. 5035 */ 5036 void 5037 e1000g_clear_all_interrupts(struct e1000g *Adapter) 5038 { 5039 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff); 5040 } 5041 5042 void 5043 e1000g_mask_tx_interrupt(struct e1000g *Adapter) 5044 { 5045 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW); 5046 } 5047 5048 void 5049 e1000g_clear_tx_interrupt(struct e1000g *Adapter) 5050 { 5051 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW); 5052 } 5053 5054 static void 5055 e1000g_smartspeed(struct e1000g *Adapter) 5056 { 5057 struct e1000_hw *hw = &Adapter->shared; 5058 uint16_t phy_status; 5059 uint16_t phy_ctrl; 5060 5061 /* 5062 * If we're not T-or-T, or we're not autoneg'ing, or we're not 5063 * advertising 1000Full, we don't even use the workaround 5064 */ 5065 if ((hw->phy.type != e1000_phy_igp) || 5066 !hw->mac.autoneg || 5067 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)) 5068 return; 5069 5070 /* 5071 * True if this is the first call of this function or after every 5072 * 30 seconds of not having link 5073 */ 5074 if (Adapter->smartspeed == 0) { 5075 /* 5076 * If Master/Slave config fault is asserted twice, we 5077 * assume back-to-back 5078 */ 5079 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 5080 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 5081 return; 5082 5083 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 5084 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 5085 return; 5086 /* 5087 * We're assuming back-2-back because our status register 5088 * insists! there's a fault in the master/slave 5089 * relationship that was "negotiated" 5090 */ 5091 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 5092 /* 5093 * Is the phy configured for manual configuration of 5094 * master/slave? 5095 */ 5096 if (phy_ctrl & CR_1000T_MS_ENABLE) { 5097 /* 5098 * Yes. Then disable manual configuration (enable 5099 * auto configuration) of master/slave 5100 */ 5101 phy_ctrl &= ~CR_1000T_MS_ENABLE; 5102 (void) e1000_write_phy_reg(hw, 5103 PHY_1000T_CTRL, phy_ctrl); 5104 /* 5105 * Effectively starting the clock 5106 */ 5107 Adapter->smartspeed++; 5108 /* 5109 * Restart autonegotiation 5110 */ 5111 if (!e1000_phy_setup_autoneg(hw) && 5112 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 5113 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 5114 MII_CR_RESTART_AUTO_NEG); 5115 (void) e1000_write_phy_reg(hw, 5116 PHY_CONTROL, phy_ctrl); 5117 } 5118 } 5119 return; 5120 /* 5121 * Has 6 seconds transpired still without link? Remember, 5122 * you should reset the smartspeed counter once you obtain 5123 * link 5124 */ 5125 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 5126 /* 5127 * Yes. Remember, we did at the start determine that 5128 * there's a master/slave configuration fault, so we're 5129 * still assuming there's someone on the other end, but we 5130 * just haven't yet been able to talk to it. We then 5131 * re-enable auto configuration of master/slave to see if 5132 * we're running 2/3 pair cables. 5133 */ 5134 /* 5135 * If still no link, perhaps using 2/3 pair cable 5136 */ 5137 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 5138 phy_ctrl |= CR_1000T_MS_ENABLE; 5139 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 5140 /* 5141 * Restart autoneg with phy enabled for manual 5142 * configuration of master/slave 5143 */ 5144 if (!e1000_phy_setup_autoneg(hw) && 5145 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 5146 phy_ctrl |= 5147 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 5148 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 5149 } 5150 /* 5151 * Hopefully, there are no more faults and we've obtained 5152 * link as a result. 5153 */ 5154 } 5155 /* 5156 * Restart process after E1000_SMARTSPEED_MAX iterations (30 5157 * seconds) 5158 */ 5159 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 5160 Adapter->smartspeed = 0; 5161 } 5162 5163 static boolean_t 5164 is_valid_mac_addr(uint8_t *mac_addr) 5165 { 5166 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 5167 const uint8_t addr_test2[6] = 5168 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 5169 5170 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 5171 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 5172 return (B_FALSE); 5173 5174 return (B_TRUE); 5175 } 5176 5177 /* 5178 * e1000g_stall_check - check for tx stall 5179 * 5180 * This function checks if the adapter is stalled (in transmit). 5181 * 5182 * It is called each time the watchdog timeout is invoked. 5183 * If the transmit descriptor reclaim continuously fails, 5184 * the watchdog value will increment by 1. If the watchdog 5185 * value exceeds the threshold, the adapter is assumed to 5186 * have stalled and need to be reset. 5187 */ 5188 static boolean_t 5189 e1000g_stall_check(struct e1000g *Adapter) 5190 { 5191 e1000g_tx_ring_t *tx_ring; 5192 5193 tx_ring = Adapter->tx_ring; 5194 5195 if (Adapter->link_state != LINK_STATE_UP) 5196 return (B_FALSE); 5197 5198 (void) e1000g_recycle(tx_ring); 5199 5200 if (Adapter->stall_flag) 5201 return (B_TRUE); 5202 5203 return (B_FALSE); 5204 } 5205 5206 #ifdef E1000G_DEBUG 5207 static enum ioc_reply 5208 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp) 5209 { 5210 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd); 5211 e1000g_peekpoke_t *ppd; 5212 uint64_t mem_va; 5213 uint64_t maxoff; 5214 boolean_t peek; 5215 5216 switch (iocp->ioc_cmd) { 5217 5218 case E1000G_IOC_REG_PEEK: 5219 peek = B_TRUE; 5220 break; 5221 5222 case E1000G_IOC_REG_POKE: 5223 peek = B_FALSE; 5224 break; 5225 5226 deault: 5227 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 5228 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n", 5229 iocp->ioc_cmd); 5230 return (IOC_INVAL); 5231 } 5232 5233 /* 5234 * Validate format of ioctl 5235 */ 5236 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t)) 5237 return (IOC_INVAL); 5238 if (mp->b_cont == NULL) 5239 return (IOC_INVAL); 5240 5241 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr; 5242 5243 /* 5244 * Validate request parameters 5245 */ 5246 switch (ppd->pp_acc_space) { 5247 5248 default: 5249 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 5250 "e1000g_diag_ioctl: invalid access space 0x%X\n", 5251 ppd->pp_acc_space); 5252 return (IOC_INVAL); 5253 5254 case E1000G_PP_SPACE_REG: 5255 /* 5256 * Memory-mapped I/O space 5257 */ 5258 ASSERT(ppd->pp_acc_size == 4); 5259 if (ppd->pp_acc_size != 4) 5260 return (IOC_INVAL); 5261 5262 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 5263 return (IOC_INVAL); 5264 5265 mem_va = 0; 5266 maxoff = 0x10000; 5267 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg; 5268 break; 5269 5270 case E1000G_PP_SPACE_E1000G: 5271 /* 5272 * E1000g data structure! 5273 */ 5274 mem_va = (uintptr_t)e1000gp; 5275 maxoff = sizeof (struct e1000g); 5276 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem; 5277 break; 5278 5279 } 5280 5281 if (ppd->pp_acc_offset >= maxoff) 5282 return (IOC_INVAL); 5283 5284 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff) 5285 return (IOC_INVAL); 5286 5287 /* 5288 * All OK - go! 5289 */ 5290 ppd->pp_acc_offset += mem_va; 5291 (*ppfn)(e1000gp, ppd); 5292 return (peek ? IOC_REPLY : IOC_ACK); 5293 } 5294 5295 static void 5296 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5297 { 5298 ddi_acc_handle_t handle; 5299 uint32_t *regaddr; 5300 5301 handle = e1000gp->osdep.reg_handle; 5302 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5303 (uintptr_t)ppd->pp_acc_offset); 5304 5305 ppd->pp_acc_data = ddi_get32(handle, regaddr); 5306 } 5307 5308 static void 5309 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5310 { 5311 ddi_acc_handle_t handle; 5312 uint32_t *regaddr; 5313 uint32_t value; 5314 5315 handle = e1000gp->osdep.reg_handle; 5316 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5317 (uintptr_t)ppd->pp_acc_offset); 5318 value = (uint32_t)ppd->pp_acc_data; 5319 5320 ddi_put32(handle, regaddr, value); 5321 } 5322 5323 static void 5324 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5325 { 5326 uint64_t value; 5327 void *vaddr; 5328 5329 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5330 5331 switch (ppd->pp_acc_size) { 5332 case 1: 5333 value = *(uint8_t *)vaddr; 5334 break; 5335 5336 case 2: 5337 value = *(uint16_t *)vaddr; 5338 break; 5339 5340 case 4: 5341 value = *(uint32_t *)vaddr; 5342 break; 5343 5344 case 8: 5345 value = *(uint64_t *)vaddr; 5346 break; 5347 } 5348 5349 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5350 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n", 5351 (void *)e1000gp, (void *)ppd, value, vaddr); 5352 5353 ppd->pp_acc_data = value; 5354 } 5355 5356 static void 5357 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5358 { 5359 uint64_t value; 5360 void *vaddr; 5361 5362 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5363 value = ppd->pp_acc_data; 5364 5365 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5366 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n", 5367 (void *)e1000gp, (void *)ppd, value, vaddr); 5368 5369 switch (ppd->pp_acc_size) { 5370 case 1: 5371 *(uint8_t *)vaddr = (uint8_t)value; 5372 break; 5373 5374 case 2: 5375 *(uint16_t *)vaddr = (uint16_t)value; 5376 break; 5377 5378 case 4: 5379 *(uint32_t *)vaddr = (uint32_t)value; 5380 break; 5381 5382 case 8: 5383 *(uint64_t *)vaddr = (uint64_t)value; 5384 break; 5385 } 5386 } 5387 #endif 5388 5389 /* 5390 * Loopback Support 5391 */ 5392 static lb_property_t lb_normal = 5393 { normal, "normal", E1000G_LB_NONE }; 5394 static lb_property_t lb_external1000 = 5395 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 }; 5396 static lb_property_t lb_external100 = 5397 { external, "100Mbps", E1000G_LB_EXTERNAL_100 }; 5398 static lb_property_t lb_external10 = 5399 { external, "10Mbps", E1000G_LB_EXTERNAL_10 }; 5400 static lb_property_t lb_phy = 5401 { internal, "PHY", E1000G_LB_INTERNAL_PHY }; 5402 5403 static enum ioc_reply 5404 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp) 5405 { 5406 lb_info_sz_t *lbsp; 5407 lb_property_t *lbpp; 5408 struct e1000_hw *hw; 5409 uint32_t *lbmp; 5410 uint32_t size; 5411 uint32_t value; 5412 5413 hw = &Adapter->shared; 5414 5415 if (mp->b_cont == NULL) 5416 return (IOC_INVAL); 5417 5418 if (!e1000g_check_loopback_support(hw)) { 5419 e1000g_log(NULL, CE_WARN, 5420 "Loopback is not supported on e1000g%d", Adapter->instance); 5421 return (IOC_INVAL); 5422 } 5423 5424 switch (iocp->ioc_cmd) { 5425 default: 5426 return (IOC_INVAL); 5427 5428 case LB_GET_INFO_SIZE: 5429 size = sizeof (lb_info_sz_t); 5430 if (iocp->ioc_count != size) 5431 return (IOC_INVAL); 5432 5433 rw_enter(&Adapter->chip_lock, RW_WRITER); 5434 e1000g_get_phy_state(Adapter); 5435 5436 /* 5437 * Workaround for hardware faults. In order to get a stable 5438 * state of phy, we will wait for a specific interval and 5439 * try again. The time delay is an experiential value based 5440 * on our testing. 5441 */ 5442 msec_delay(100); 5443 e1000g_get_phy_state(Adapter); 5444 rw_exit(&Adapter->chip_lock); 5445 5446 value = sizeof (lb_normal); 5447 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5448 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5449 (hw->phy.media_type == e1000_media_type_fiber) || 5450 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5451 value += sizeof (lb_phy); 5452 switch (hw->mac.type) { 5453 case e1000_82571: 5454 case e1000_82572: 5455 case e1000_80003es2lan: 5456 value += sizeof (lb_external1000); 5457 break; 5458 } 5459 } 5460 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5461 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5462 value += sizeof (lb_external100); 5463 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5464 value += sizeof (lb_external10); 5465 5466 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 5467 *lbsp = value; 5468 break; 5469 5470 case LB_GET_INFO: 5471 value = sizeof (lb_normal); 5472 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5473 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5474 (hw->phy.media_type == e1000_media_type_fiber) || 5475 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5476 value += sizeof (lb_phy); 5477 switch (hw->mac.type) { 5478 case e1000_82571: 5479 case e1000_82572: 5480 case e1000_80003es2lan: 5481 value += sizeof (lb_external1000); 5482 break; 5483 } 5484 } 5485 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5486 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5487 value += sizeof (lb_external100); 5488 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5489 value += sizeof (lb_external10); 5490 5491 size = value; 5492 if (iocp->ioc_count != size) 5493 return (IOC_INVAL); 5494 5495 value = 0; 5496 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 5497 lbpp[value++] = lb_normal; 5498 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5499 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5500 (hw->phy.media_type == e1000_media_type_fiber) || 5501 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5502 lbpp[value++] = lb_phy; 5503 switch (hw->mac.type) { 5504 case e1000_82571: 5505 case e1000_82572: 5506 case e1000_80003es2lan: 5507 lbpp[value++] = lb_external1000; 5508 break; 5509 } 5510 } 5511 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5512 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5513 lbpp[value++] = lb_external100; 5514 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5515 lbpp[value++] = lb_external10; 5516 break; 5517 5518 case LB_GET_MODE: 5519 size = sizeof (uint32_t); 5520 if (iocp->ioc_count != size) 5521 return (IOC_INVAL); 5522 5523 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5524 *lbmp = Adapter->loopback_mode; 5525 break; 5526 5527 case LB_SET_MODE: 5528 size = 0; 5529 if (iocp->ioc_count != sizeof (uint32_t)) 5530 return (IOC_INVAL); 5531 5532 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5533 if (!e1000g_set_loopback_mode(Adapter, *lbmp)) 5534 return (IOC_INVAL); 5535 break; 5536 } 5537 5538 iocp->ioc_count = size; 5539 iocp->ioc_error = 0; 5540 5541 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 5542 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 5543 return (IOC_INVAL); 5544 } 5545 5546 return (IOC_REPLY); 5547 } 5548 5549 static boolean_t 5550 e1000g_check_loopback_support(struct e1000_hw *hw) 5551 { 5552 switch (hw->mac.type) { 5553 case e1000_82540: 5554 case e1000_82545: 5555 case e1000_82545_rev_3: 5556 case e1000_82546: 5557 case e1000_82546_rev_3: 5558 case e1000_82541: 5559 case e1000_82541_rev_2: 5560 case e1000_82547: 5561 case e1000_82547_rev_2: 5562 case e1000_82571: 5563 case e1000_82572: 5564 case e1000_82573: 5565 case e1000_82574: 5566 case e1000_80003es2lan: 5567 case e1000_ich9lan: 5568 case e1000_ich10lan: 5569 return (B_TRUE); 5570 } 5571 return (B_FALSE); 5572 } 5573 5574 static boolean_t 5575 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode) 5576 { 5577 struct e1000_hw *hw; 5578 int i, times; 5579 boolean_t link_up; 5580 5581 if (mode == Adapter->loopback_mode) 5582 return (B_TRUE); 5583 5584 hw = &Adapter->shared; 5585 times = 0; 5586 5587 Adapter->loopback_mode = mode; 5588 5589 if (mode == E1000G_LB_NONE) { 5590 /* Reset the chip */ 5591 hw->phy.autoneg_wait_to_complete = B_TRUE; 5592 (void) e1000g_reset_adapter(Adapter); 5593 hw->phy.autoneg_wait_to_complete = B_FALSE; 5594 return (B_TRUE); 5595 } 5596 5597 again: 5598 5599 rw_enter(&Adapter->chip_lock, RW_WRITER); 5600 5601 switch (mode) { 5602 default: 5603 rw_exit(&Adapter->chip_lock); 5604 return (B_FALSE); 5605 5606 case E1000G_LB_EXTERNAL_1000: 5607 e1000g_set_external_loopback_1000(Adapter); 5608 break; 5609 5610 case E1000G_LB_EXTERNAL_100: 5611 e1000g_set_external_loopback_100(Adapter); 5612 break; 5613 5614 case E1000G_LB_EXTERNAL_10: 5615 e1000g_set_external_loopback_10(Adapter); 5616 break; 5617 5618 case E1000G_LB_INTERNAL_PHY: 5619 e1000g_set_internal_loopback(Adapter); 5620 break; 5621 } 5622 5623 times++; 5624 5625 rw_exit(&Adapter->chip_lock); 5626 5627 /* Wait for link up */ 5628 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--) 5629 msec_delay(100); 5630 5631 rw_enter(&Adapter->chip_lock, RW_WRITER); 5632 5633 link_up = e1000g_link_up(Adapter); 5634 5635 rw_exit(&Adapter->chip_lock); 5636 5637 if (!link_up) { 5638 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5639 "Failed to get the link up"); 5640 if (times < 2) { 5641 /* Reset the link */ 5642 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5643 "Reset the link ..."); 5644 (void) e1000g_reset_adapter(Adapter); 5645 goto again; 5646 } 5647 5648 /* 5649 * Reset driver to loopback none when set loopback failed 5650 * for the second time. 5651 */ 5652 Adapter->loopback_mode = E1000G_LB_NONE; 5653 5654 /* Reset the chip */ 5655 hw->phy.autoneg_wait_to_complete = B_TRUE; 5656 (void) e1000g_reset_adapter(Adapter); 5657 hw->phy.autoneg_wait_to_complete = B_FALSE; 5658 5659 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5660 "Set loopback mode failed, reset to loopback none"); 5661 5662 return (B_FALSE); 5663 } 5664 5665 return (B_TRUE); 5666 } 5667 5668 /* 5669 * The following loopback settings are from Intel's technical 5670 * document - "How To Loopback". All the register settings and 5671 * time delay values are directly inherited from the document 5672 * without more explanations available. 5673 */ 5674 static void 5675 e1000g_set_internal_loopback(struct e1000g *Adapter) 5676 { 5677 struct e1000_hw *hw; 5678 uint32_t ctrl; 5679 uint32_t status; 5680 uint16_t phy_ctrl; 5681 uint16_t phy_reg; 5682 uint32_t txcw; 5683 5684 hw = &Adapter->shared; 5685 5686 /* Disable Smart Power Down */ 5687 phy_spd_state(hw, B_FALSE); 5688 5689 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 5690 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10); 5691 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000; 5692 5693 switch (hw->mac.type) { 5694 case e1000_82540: 5695 case e1000_82545: 5696 case e1000_82545_rev_3: 5697 case e1000_82546: 5698 case e1000_82546_rev_3: 5699 case e1000_82573: 5700 /* Auto-MDI/MDIX off */ 5701 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 5702 /* Reset PHY to update Auto-MDI/MDIX */ 5703 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5704 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN); 5705 /* Reset PHY to auto-neg off and force 1000 */ 5706 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5707 phy_ctrl | MII_CR_RESET); 5708 /* 5709 * Disable PHY receiver for 82540/545/546 and 82573 Family. 5710 * See comments above e1000g_set_internal_loopback() for the 5711 * background. 5712 */ 5713 (void) e1000_write_phy_reg(hw, 29, 0x001F); 5714 (void) e1000_write_phy_reg(hw, 30, 0x8FFC); 5715 (void) e1000_write_phy_reg(hw, 29, 0x001A); 5716 (void) e1000_write_phy_reg(hw, 30, 0x8FF0); 5717 break; 5718 case e1000_80003es2lan: 5719 /* Force Link Up */ 5720 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 5721 0x1CC); 5722 /* Sets PCS loopback at 1Gbs */ 5723 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 5724 0x1046); 5725 break; 5726 } 5727 5728 /* 5729 * The following registers should be set for e1000_phy_bm phy type. 5730 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy. 5731 * For others, we do not need to set these registers. 5732 */ 5733 if (hw->phy.type == e1000_phy_bm) { 5734 /* Set Default MAC Interface speed to 1GB */ 5735 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg); 5736 phy_reg &= ~0x0007; 5737 phy_reg |= 0x006; 5738 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg); 5739 /* Assert SW reset for above settings to take effect */ 5740 (void) e1000_phy_commit(hw); 5741 msec_delay(1); 5742 /* Force Full Duplex */ 5743 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5744 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5745 phy_reg | 0x000C); 5746 /* Set Link Up (in force link) */ 5747 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg); 5748 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16), 5749 phy_reg | 0x0040); 5750 /* Force Link */ 5751 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5752 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5753 phy_reg | 0x0040); 5754 /* Set Early Link Enable */ 5755 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg); 5756 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20), 5757 phy_reg | 0x0400); 5758 } 5759 5760 /* Set loopback */ 5761 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK); 5762 5763 msec_delay(250); 5764 5765 /* Now set up the MAC to the same speed/duplex as the PHY. */ 5766 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5767 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5768 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5769 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5770 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ 5771 E1000_CTRL_FD); /* Force Duplex to FULL */ 5772 5773 switch (hw->mac.type) { 5774 case e1000_82540: 5775 case e1000_82545: 5776 case e1000_82545_rev_3: 5777 case e1000_82546: 5778 case e1000_82546_rev_3: 5779 /* 5780 * For some serdes we'll need to commit the writes now 5781 * so that the status is updated on link 5782 */ 5783 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 5784 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5785 msec_delay(100); 5786 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5787 } 5788 5789 if (hw->phy.media_type == e1000_media_type_copper) { 5790 /* Invert Loss of Signal */ 5791 ctrl |= E1000_CTRL_ILOS; 5792 } else { 5793 /* Set ILOS on fiber nic if half duplex is detected */ 5794 status = E1000_READ_REG(hw, E1000_STATUS); 5795 if ((status & E1000_STATUS_FD) == 0) 5796 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5797 } 5798 break; 5799 5800 case e1000_82571: 5801 case e1000_82572: 5802 /* 5803 * The fiber/SerDes versions of this adapter do not contain an 5804 * accessible PHY. Therefore, loopback beyond MAC must be done 5805 * using SerDes analog loopback. 5806 */ 5807 if (hw->phy.media_type != e1000_media_type_copper) { 5808 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5809 txcw = E1000_READ_REG(hw, E1000_TXCW); 5810 txcw &= ~((uint32_t)1 << 31); 5811 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5812 5813 /* 5814 * Write 0x410 to Serdes Control register 5815 * to enable Serdes analog loopback 5816 */ 5817 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5818 msec_delay(10); 5819 } 5820 5821 status = E1000_READ_REG(hw, E1000_STATUS); 5822 /* Set ILOS on fiber nic if half duplex is detected */ 5823 if ((hw->phy.media_type == e1000_media_type_fiber) && 5824 ((status & E1000_STATUS_FD) == 0 || 5825 (status & E1000_STATUS_LU) == 0)) 5826 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5827 else if (hw->phy.media_type == e1000_media_type_internal_serdes) 5828 ctrl |= E1000_CTRL_SLU; 5829 break; 5830 5831 case e1000_82573: 5832 ctrl |= E1000_CTRL_ILOS; 5833 break; 5834 case e1000_ich9lan: 5835 case e1000_ich10lan: 5836 ctrl |= E1000_CTRL_SLU; 5837 break; 5838 } 5839 if (hw->phy.type == e1000_phy_bm) 5840 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS; 5841 5842 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5843 } 5844 5845 static void 5846 e1000g_set_external_loopback_1000(struct e1000g *Adapter) 5847 { 5848 struct e1000_hw *hw; 5849 uint32_t rctl; 5850 uint32_t ctrl_ext; 5851 uint32_t ctrl; 5852 uint32_t status; 5853 uint32_t txcw; 5854 uint16_t phydata; 5855 5856 hw = &Adapter->shared; 5857 5858 /* Disable Smart Power Down */ 5859 phy_spd_state(hw, B_FALSE); 5860 5861 switch (hw->mac.type) { 5862 case e1000_82571: 5863 case e1000_82572: 5864 switch (hw->phy.media_type) { 5865 case e1000_media_type_copper: 5866 /* Force link up (Must be done before the PHY writes) */ 5867 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5868 ctrl |= E1000_CTRL_SLU; /* Force Link Up */ 5869 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5870 5871 rctl = E1000_READ_REG(hw, E1000_RCTL); 5872 rctl |= (E1000_RCTL_EN | 5873 E1000_RCTL_SBP | 5874 E1000_RCTL_UPE | 5875 E1000_RCTL_MPE | 5876 E1000_RCTL_LPE | 5877 E1000_RCTL_BAM); /* 0x803E */ 5878 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 5879 5880 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5881 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA | 5882 E1000_CTRL_EXT_SDP6_DATA | 5883 E1000_CTRL_EXT_SDP3_DATA | 5884 E1000_CTRL_EXT_SDP4_DIR | 5885 E1000_CTRL_EXT_SDP6_DIR | 5886 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */ 5887 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5888 5889 /* 5890 * This sequence tunes the PHY's SDP and no customer 5891 * settable values. For background, see comments above 5892 * e1000g_set_internal_loopback(). 5893 */ 5894 (void) e1000_write_phy_reg(hw, 0x0, 0x140); 5895 msec_delay(10); 5896 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00); 5897 (void) e1000_write_phy_reg(hw, 0x12, 0xC10); 5898 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10); 5899 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76); 5900 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1); 5901 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0); 5902 5903 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65); 5904 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C); 5905 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC); 5906 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C); 5907 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC); 5908 5909 msec_delay(50); 5910 break; 5911 case e1000_media_type_fiber: 5912 case e1000_media_type_internal_serdes: 5913 status = E1000_READ_REG(hw, E1000_STATUS); 5914 if (((status & E1000_STATUS_LU) == 0) || 5915 (hw->phy.media_type == 5916 e1000_media_type_internal_serdes)) { 5917 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5918 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5919 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5920 } 5921 5922 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5923 txcw = E1000_READ_REG(hw, E1000_TXCW); 5924 txcw &= ~((uint32_t)1 << 31); 5925 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5926 5927 /* 5928 * Write 0x410 to Serdes Control register 5929 * to enable Serdes analog loopback 5930 */ 5931 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5932 msec_delay(10); 5933 break; 5934 default: 5935 break; 5936 } 5937 break; 5938 case e1000_82574: 5939 case e1000_80003es2lan: 5940 case e1000_ich9lan: 5941 case e1000_ich10lan: 5942 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata); 5943 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16), 5944 phydata | (1 << 5)); 5945 Adapter->param_adv_autoneg = 1; 5946 Adapter->param_adv_1000fdx = 1; 5947 (void) e1000g_reset_link(Adapter); 5948 break; 5949 } 5950 } 5951 5952 static void 5953 e1000g_set_external_loopback_100(struct e1000g *Adapter) 5954 { 5955 struct e1000_hw *hw; 5956 uint32_t ctrl; 5957 uint16_t phy_ctrl; 5958 5959 hw = &Adapter->shared; 5960 5961 /* Disable Smart Power Down */ 5962 phy_spd_state(hw, B_FALSE); 5963 5964 phy_ctrl = (MII_CR_FULL_DUPLEX | 5965 MII_CR_SPEED_100); 5966 5967 /* Force 100/FD, reset PHY */ 5968 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5969 phy_ctrl | MII_CR_RESET); /* 0xA100 */ 5970 msec_delay(10); 5971 5972 /* Force 100/FD */ 5973 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5974 phy_ctrl); /* 0x2100 */ 5975 msec_delay(10); 5976 5977 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5978 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5979 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5980 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5981 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5982 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5983 E1000_CTRL_SPD_100 | /* Force Speed to 100 */ 5984 E1000_CTRL_FD); /* Force Duplex to FULL */ 5985 5986 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5987 } 5988 5989 static void 5990 e1000g_set_external_loopback_10(struct e1000g *Adapter) 5991 { 5992 struct e1000_hw *hw; 5993 uint32_t ctrl; 5994 uint16_t phy_ctrl; 5995 5996 hw = &Adapter->shared; 5997 5998 /* Disable Smart Power Down */ 5999 phy_spd_state(hw, B_FALSE); 6000 6001 phy_ctrl = (MII_CR_FULL_DUPLEX | 6002 MII_CR_SPEED_10); 6003 6004 /* Force 10/FD, reset PHY */ 6005 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 6006 phy_ctrl | MII_CR_RESET); /* 0x8100 */ 6007 msec_delay(10); 6008 6009 /* Force 10/FD */ 6010 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 6011 phy_ctrl); /* 0x0100 */ 6012 msec_delay(10); 6013 6014 /* Now setup the MAC to the same speed/duplex as the PHY. */ 6015 ctrl = E1000_READ_REG(hw, E1000_CTRL); 6016 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 6017 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 6018 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 6019 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 6020 E1000_CTRL_SPD_10 | /* Force Speed to 10 */ 6021 E1000_CTRL_FD); /* Force Duplex to FULL */ 6022 6023 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 6024 } 6025 6026 #ifdef __sparc 6027 static boolean_t 6028 e1000g_find_mac_address(struct e1000g *Adapter) 6029 { 6030 struct e1000_hw *hw = &Adapter->shared; 6031 uchar_t *bytes; 6032 struct ether_addr sysaddr; 6033 uint_t nelts; 6034 int err; 6035 boolean_t found = B_FALSE; 6036 6037 /* 6038 * The "vendor's factory-set address" may already have 6039 * been extracted from the chip, but if the property 6040 * "local-mac-address" is set we use that instead. 6041 * 6042 * We check whether it looks like an array of 6 6043 * bytes (which it should, if OBP set it). If we can't 6044 * make sense of it this way, we'll ignore it. 6045 */ 6046 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 6047 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 6048 if (err == DDI_PROP_SUCCESS) { 6049 if (nelts == ETHERADDRL) { 6050 while (nelts--) 6051 hw->mac.addr[nelts] = bytes[nelts]; 6052 found = B_TRUE; 6053 } 6054 ddi_prop_free(bytes); 6055 } 6056 6057 /* 6058 * Look up the OBP property "local-mac-address?". If the user has set 6059 * 'local-mac-address? = false', use "the system address" instead. 6060 */ 6061 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0, 6062 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 6063 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 6064 if (localetheraddr(NULL, &sysaddr) != 0) { 6065 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 6066 found = B_TRUE; 6067 } 6068 } 6069 ddi_prop_free(bytes); 6070 } 6071 6072 /* 6073 * Finally(!), if there's a valid "mac-address" property (created 6074 * if we netbooted from this interface), we must use this instead 6075 * of any of the above to ensure that the NFS/install server doesn't 6076 * get confused by the address changing as Solaris takes over! 6077 */ 6078 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 6079 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 6080 if (err == DDI_PROP_SUCCESS) { 6081 if (nelts == ETHERADDRL) { 6082 while (nelts--) 6083 hw->mac.addr[nelts] = bytes[nelts]; 6084 found = B_TRUE; 6085 } 6086 ddi_prop_free(bytes); 6087 } 6088 6089 if (found) { 6090 bcopy(hw->mac.addr, hw->mac.perm_addr, 6091 ETHERADDRL); 6092 } 6093 6094 return (found); 6095 } 6096 #endif 6097 6098 static int 6099 e1000g_add_intrs(struct e1000g *Adapter) 6100 { 6101 dev_info_t *devinfo; 6102 int intr_types; 6103 int rc; 6104 6105 devinfo = Adapter->dip; 6106 6107 /* Get supported interrupt types */ 6108 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 6109 6110 if (rc != DDI_SUCCESS) { 6111 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6112 "Get supported interrupt types failed: %d\n", rc); 6113 return (DDI_FAILURE); 6114 } 6115 6116 /* 6117 * Based on Intel Technical Advisory document (TA-160), there are some 6118 * cases where some older Intel PCI-X NICs may "advertise" to the OS 6119 * that it supports MSI, but in fact has problems. 6120 * So we should only enable MSI for PCI-E NICs and disable MSI for old 6121 * PCI/PCI-X NICs. 6122 */ 6123 if (Adapter->shared.mac.type < e1000_82571) 6124 Adapter->msi_enable = B_FALSE; 6125 6126 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) { 6127 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI); 6128 6129 if (rc != DDI_SUCCESS) { 6130 /* EMPTY */ 6131 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6132 "Add MSI failed, trying Legacy interrupts\n"); 6133 } else { 6134 Adapter->intr_type = DDI_INTR_TYPE_MSI; 6135 } 6136 } 6137 6138 if ((Adapter->intr_type == 0) && 6139 (intr_types & DDI_INTR_TYPE_FIXED)) { 6140 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED); 6141 6142 if (rc != DDI_SUCCESS) { 6143 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6144 "Add Legacy interrupts failed\n"); 6145 return (DDI_FAILURE); 6146 } 6147 6148 Adapter->intr_type = DDI_INTR_TYPE_FIXED; 6149 } 6150 6151 if (Adapter->intr_type == 0) { 6152 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 6153 "No interrupts registered\n"); 6154 return (DDI_FAILURE); 6155 } 6156 6157 return (DDI_SUCCESS); 6158 } 6159 6160 /* 6161 * e1000g_intr_add() handles MSI/Legacy interrupts 6162 */ 6163 static int 6164 e1000g_intr_add(struct e1000g *Adapter, int intr_type) 6165 { 6166 dev_info_t *devinfo; 6167 int count, avail, actual; 6168 int x, y, rc, inum = 0; 6169 int flag; 6170 ddi_intr_handler_t *intr_handler; 6171 6172 devinfo = Adapter->dip; 6173 6174 /* get number of interrupts */ 6175 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 6176 if ((rc != DDI_SUCCESS) || (count == 0)) { 6177 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6178 "Get interrupt number failed. Return: %d, count: %d\n", 6179 rc, count); 6180 return (DDI_FAILURE); 6181 } 6182 6183 /* get number of available interrupts */ 6184 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 6185 if ((rc != DDI_SUCCESS) || (avail == 0)) { 6186 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6187 "Get interrupt available number failed. " 6188 "Return: %d, available: %d\n", rc, avail); 6189 return (DDI_FAILURE); 6190 } 6191 6192 if (avail < count) { 6193 /* EMPTY */ 6194 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6195 "Interrupts count: %d, available: %d\n", 6196 count, avail); 6197 } 6198 6199 /* Allocate an array of interrupt handles */ 6200 Adapter->intr_size = count * sizeof (ddi_intr_handle_t); 6201 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP); 6202 6203 /* Set NORMAL behavior for both MSI and FIXED interrupt */ 6204 flag = DDI_INTR_ALLOC_NORMAL; 6205 6206 /* call ddi_intr_alloc() */ 6207 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum, 6208 count, &actual, flag); 6209 6210 if ((rc != DDI_SUCCESS) || (actual == 0)) { 6211 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6212 "Allocate interrupts failed: %d\n", rc); 6213 6214 kmem_free(Adapter->htable, Adapter->intr_size); 6215 return (DDI_FAILURE); 6216 } 6217 6218 if (actual < count) { 6219 /* EMPTY */ 6220 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6221 "Interrupts requested: %d, received: %d\n", 6222 count, actual); 6223 } 6224 6225 Adapter->intr_cnt = actual; 6226 6227 /* Get priority for first msi, assume remaining are all the same */ 6228 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri); 6229 6230 if (rc != DDI_SUCCESS) { 6231 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6232 "Get interrupt priority failed: %d\n", rc); 6233 6234 /* Free already allocated intr */ 6235 for (y = 0; y < actual; y++) 6236 (void) ddi_intr_free(Adapter->htable[y]); 6237 6238 kmem_free(Adapter->htable, Adapter->intr_size); 6239 return (DDI_FAILURE); 6240 } 6241 6242 /* 6243 * In Legacy Interrupt mode, for PCI-Express adapters, we should 6244 * use the interrupt service routine e1000g_intr_pciexpress() 6245 * to avoid interrupt stealing when sharing interrupt with other 6246 * devices. 6247 */ 6248 if (Adapter->shared.mac.type < e1000_82571) 6249 intr_handler = e1000g_intr; 6250 else 6251 intr_handler = e1000g_intr_pciexpress; 6252 6253 /* Call ddi_intr_add_handler() */ 6254 for (x = 0; x < actual; x++) { 6255 rc = ddi_intr_add_handler(Adapter->htable[x], 6256 intr_handler, (caddr_t)Adapter, NULL); 6257 6258 if (rc != DDI_SUCCESS) { 6259 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6260 "Add interrupt handler failed: %d\n", rc); 6261 6262 /* Remove already added handler */ 6263 for (y = 0; y < x; y++) 6264 (void) ddi_intr_remove_handler( 6265 Adapter->htable[y]); 6266 6267 /* Free already allocated intr */ 6268 for (y = 0; y < actual; y++) 6269 (void) ddi_intr_free(Adapter->htable[y]); 6270 6271 kmem_free(Adapter->htable, Adapter->intr_size); 6272 return (DDI_FAILURE); 6273 } 6274 } 6275 6276 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap); 6277 6278 if (rc != DDI_SUCCESS) { 6279 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6280 "Get interrupt cap failed: %d\n", rc); 6281 6282 /* Free already allocated intr */ 6283 for (y = 0; y < actual; y++) { 6284 (void) ddi_intr_remove_handler(Adapter->htable[y]); 6285 (void) ddi_intr_free(Adapter->htable[y]); 6286 } 6287 6288 kmem_free(Adapter->htable, Adapter->intr_size); 6289 return (DDI_FAILURE); 6290 } 6291 6292 return (DDI_SUCCESS); 6293 } 6294 6295 static int 6296 e1000g_rem_intrs(struct e1000g *Adapter) 6297 { 6298 int x; 6299 int rc; 6300 6301 for (x = 0; x < Adapter->intr_cnt; x++) { 6302 rc = ddi_intr_remove_handler(Adapter->htable[x]); 6303 if (rc != DDI_SUCCESS) { 6304 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6305 "Remove intr handler failed: %d\n", rc); 6306 return (DDI_FAILURE); 6307 } 6308 6309 rc = ddi_intr_free(Adapter->htable[x]); 6310 if (rc != DDI_SUCCESS) { 6311 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6312 "Free intr failed: %d\n", rc); 6313 return (DDI_FAILURE); 6314 } 6315 } 6316 6317 kmem_free(Adapter->htable, Adapter->intr_size); 6318 6319 return (DDI_SUCCESS); 6320 } 6321 6322 static int 6323 e1000g_enable_intrs(struct e1000g *Adapter) 6324 { 6325 int x; 6326 int rc; 6327 6328 /* Enable interrupts */ 6329 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6330 /* Call ddi_intr_block_enable() for MSI */ 6331 rc = ddi_intr_block_enable(Adapter->htable, 6332 Adapter->intr_cnt); 6333 if (rc != DDI_SUCCESS) { 6334 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6335 "Enable block intr failed: %d\n", rc); 6336 return (DDI_FAILURE); 6337 } 6338 } else { 6339 /* Call ddi_intr_enable() for Legacy/MSI non block enable */ 6340 for (x = 0; x < Adapter->intr_cnt; x++) { 6341 rc = ddi_intr_enable(Adapter->htable[x]); 6342 if (rc != DDI_SUCCESS) { 6343 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6344 "Enable intr failed: %d\n", rc); 6345 return (DDI_FAILURE); 6346 } 6347 } 6348 } 6349 6350 return (DDI_SUCCESS); 6351 } 6352 6353 static int 6354 e1000g_disable_intrs(struct e1000g *Adapter) 6355 { 6356 int x; 6357 int rc; 6358 6359 /* Disable all interrupts */ 6360 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6361 rc = ddi_intr_block_disable(Adapter->htable, 6362 Adapter->intr_cnt); 6363 if (rc != DDI_SUCCESS) { 6364 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6365 "Disable block intr failed: %d\n", rc); 6366 return (DDI_FAILURE); 6367 } 6368 } else { 6369 for (x = 0; x < Adapter->intr_cnt; x++) { 6370 rc = ddi_intr_disable(Adapter->htable[x]); 6371 if (rc != DDI_SUCCESS) { 6372 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6373 "Disable intr failed: %d\n", rc); 6374 return (DDI_FAILURE); 6375 } 6376 } 6377 } 6378 6379 return (DDI_SUCCESS); 6380 } 6381 6382 /* 6383 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter 6384 */ 6385 static void 6386 e1000g_get_phy_state(struct e1000g *Adapter) 6387 { 6388 struct e1000_hw *hw = &Adapter->shared; 6389 6390 if (hw->phy.media_type == e1000_media_type_copper) { 6391 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl); 6392 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status); 6393 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 6394 &Adapter->phy_an_adv); 6395 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, 6396 &Adapter->phy_an_exp); 6397 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, 6398 &Adapter->phy_ext_status); 6399 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, 6400 &Adapter->phy_1000t_ctrl); 6401 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, 6402 &Adapter->phy_1000t_status); 6403 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, 6404 &Adapter->phy_lp_able); 6405 6406 Adapter->param_autoneg_cap = 6407 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; 6408 Adapter->param_pause_cap = 6409 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6410 Adapter->param_asym_pause_cap = 6411 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6412 Adapter->param_1000fdx_cap = 6413 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 6414 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; 6415 Adapter->param_1000hdx_cap = 6416 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || 6417 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; 6418 Adapter->param_100t4_cap = 6419 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0; 6420 Adapter->param_100fdx_cap = 6421 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 6422 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; 6423 Adapter->param_100hdx_cap = 6424 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 6425 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; 6426 Adapter->param_10fdx_cap = 6427 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; 6428 Adapter->param_10hdx_cap = 6429 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; 6430 6431 Adapter->param_adv_autoneg = hw->mac.autoneg; 6432 Adapter->param_adv_pause = 6433 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6434 Adapter->param_adv_asym_pause = 6435 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6436 Adapter->param_adv_1000hdx = 6437 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; 6438 Adapter->param_adv_100t4 = 6439 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; 6440 if (Adapter->param_adv_autoneg == 1) { 6441 Adapter->param_adv_1000fdx = 6442 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) 6443 ? 1 : 0; 6444 Adapter->param_adv_100fdx = 6445 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) 6446 ? 1 : 0; 6447 Adapter->param_adv_100hdx = 6448 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) 6449 ? 1 : 0; 6450 Adapter->param_adv_10fdx = 6451 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; 6452 Adapter->param_adv_10hdx = 6453 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; 6454 } 6455 6456 Adapter->param_lp_autoneg = 6457 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; 6458 Adapter->param_lp_pause = 6459 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; 6460 Adapter->param_lp_asym_pause = 6461 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; 6462 Adapter->param_lp_1000fdx = 6463 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; 6464 Adapter->param_lp_1000hdx = 6465 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; 6466 Adapter->param_lp_100t4 = 6467 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; 6468 Adapter->param_lp_100fdx = 6469 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; 6470 Adapter->param_lp_100hdx = 6471 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; 6472 Adapter->param_lp_10fdx = 6473 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; 6474 Adapter->param_lp_10hdx = 6475 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; 6476 } else { 6477 /* 6478 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning, 6479 * it can only work with 1Gig Full Duplex Link Partner. 6480 */ 6481 Adapter->param_autoneg_cap = 0; 6482 Adapter->param_pause_cap = 1; 6483 Adapter->param_asym_pause_cap = 1; 6484 Adapter->param_1000fdx_cap = 1; 6485 Adapter->param_1000hdx_cap = 0; 6486 Adapter->param_100t4_cap = 0; 6487 Adapter->param_100fdx_cap = 0; 6488 Adapter->param_100hdx_cap = 0; 6489 Adapter->param_10fdx_cap = 0; 6490 Adapter->param_10hdx_cap = 0; 6491 6492 Adapter->param_adv_autoneg = 0; 6493 Adapter->param_adv_pause = 1; 6494 Adapter->param_adv_asym_pause = 1; 6495 Adapter->param_adv_1000fdx = 1; 6496 Adapter->param_adv_1000hdx = 0; 6497 Adapter->param_adv_100t4 = 0; 6498 Adapter->param_adv_100fdx = 0; 6499 Adapter->param_adv_100hdx = 0; 6500 Adapter->param_adv_10fdx = 0; 6501 Adapter->param_adv_10hdx = 0; 6502 6503 Adapter->param_lp_autoneg = 0; 6504 Adapter->param_lp_pause = 0; 6505 Adapter->param_lp_asym_pause = 0; 6506 Adapter->param_lp_1000fdx = 0; 6507 Adapter->param_lp_1000hdx = 0; 6508 Adapter->param_lp_100t4 = 0; 6509 Adapter->param_lp_100fdx = 0; 6510 Adapter->param_lp_100hdx = 0; 6511 Adapter->param_lp_10fdx = 0; 6512 Adapter->param_lp_10hdx = 0; 6513 } 6514 } 6515 6516 /* 6517 * FMA support 6518 */ 6519 6520 int 6521 e1000g_check_acc_handle(ddi_acc_handle_t handle) 6522 { 6523 ddi_fm_error_t de; 6524 6525 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6526 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 6527 return (de.fme_status); 6528 } 6529 6530 int 6531 e1000g_check_dma_handle(ddi_dma_handle_t handle) 6532 { 6533 ddi_fm_error_t de; 6534 6535 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6536 return (de.fme_status); 6537 } 6538 6539 /* 6540 * The IO fault service error handling callback function 6541 */ 6542 /* ARGSUSED2 */ 6543 static int 6544 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6545 { 6546 /* 6547 * as the driver can always deal with an error in any dma or 6548 * access handle, we can just return the fme_status value. 6549 */ 6550 pci_ereport_post(dip, err, NULL); 6551 return (err->fme_status); 6552 } 6553 6554 static void 6555 e1000g_fm_init(struct e1000g *Adapter) 6556 { 6557 ddi_iblock_cookie_t iblk; 6558 int fma_dma_flag; 6559 6560 /* Only register with IO Fault Services if we have some capability */ 6561 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 6562 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6563 } else { 6564 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6565 } 6566 6567 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 6568 fma_dma_flag = 1; 6569 } else { 6570 fma_dma_flag = 0; 6571 } 6572 6573 (void) e1000g_set_fma_flags(fma_dma_flag); 6574 6575 if (Adapter->fm_capabilities) { 6576 6577 /* Register capabilities with IO Fault Services */ 6578 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk); 6579 6580 /* 6581 * Initialize pci ereport capabilities if ereport capable 6582 */ 6583 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6584 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6585 pci_ereport_setup(Adapter->dip); 6586 6587 /* 6588 * Register error callback if error callback capable 6589 */ 6590 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6591 ddi_fm_handler_register(Adapter->dip, 6592 e1000g_fm_error_cb, (void*) Adapter); 6593 } 6594 } 6595 6596 static void 6597 e1000g_fm_fini(struct e1000g *Adapter) 6598 { 6599 /* Only unregister FMA capabilities if we registered some */ 6600 if (Adapter->fm_capabilities) { 6601 6602 /* 6603 * Release any resources allocated by pci_ereport_setup() 6604 */ 6605 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6606 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6607 pci_ereport_teardown(Adapter->dip); 6608 6609 /* 6610 * Un-register error callback if error callback capable 6611 */ 6612 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6613 ddi_fm_handler_unregister(Adapter->dip); 6614 6615 /* Unregister from IO Fault Services */ 6616 mutex_enter(&e1000g_rx_detach_lock); 6617 ddi_fm_fini(Adapter->dip); 6618 if (Adapter->priv_dip != NULL) { 6619 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL; 6620 } 6621 mutex_exit(&e1000g_rx_detach_lock); 6622 } 6623 } 6624 6625 void 6626 e1000g_fm_ereport(struct e1000g *Adapter, char *detail) 6627 { 6628 uint64_t ena; 6629 char buf[FM_MAX_CLASS]; 6630 6631 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6632 ena = fm_ena_generate(0, FM_ENA_FMT1); 6633 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) { 6634 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP, 6635 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6636 } 6637 } 6638 6639 /* 6640 * quiesce(9E) entry point. 6641 * 6642 * This function is called when the system is single-threaded at high 6643 * PIL with preemption disabled. Therefore, this function must not be 6644 * blocked. 6645 * 6646 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6647 * DDI_FAILURE indicates an error condition and should almost never happen. 6648 */ 6649 static int 6650 e1000g_quiesce(dev_info_t *devinfo) 6651 { 6652 struct e1000g *Adapter; 6653 6654 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 6655 6656 if (Adapter == NULL) 6657 return (DDI_FAILURE); 6658 6659 e1000g_clear_all_interrupts(Adapter); 6660 6661 (void) e1000_reset_hw(&Adapter->shared); 6662 6663 /* Setup our HW Tx Head & Tail descriptor pointers */ 6664 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 6665 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 6666 6667 /* Setup our HW Rx Head & Tail descriptor pointers */ 6668 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0); 6669 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0); 6670 6671 return (DDI_SUCCESS); 6672 } 6673 6674 /* 6675 * synchronize the adv* and en* parameters. 6676 * 6677 * See comments in <sys/dld.h> for details of the *_en_* 6678 * parameters. The usage of ndd for setting adv parameters will 6679 * synchronize all the en parameters with the e1000g parameters, 6680 * implicitly disabling any settings made via dladm. 6681 */ 6682 static void 6683 e1000g_param_sync(struct e1000g *Adapter) 6684 { 6685 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx; 6686 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx; 6687 Adapter->param_en_100fdx = Adapter->param_adv_100fdx; 6688 Adapter->param_en_100hdx = Adapter->param_adv_100hdx; 6689 Adapter->param_en_10fdx = Adapter->param_adv_10fdx; 6690 Adapter->param_en_10hdx = Adapter->param_adv_10hdx; 6691 } 6692 6693 /* 6694 * e1000g_get_driver_control - tell manageability firmware that the driver 6695 * has control. 6696 */ 6697 static void 6698 e1000g_get_driver_control(struct e1000_hw *hw) 6699 { 6700 uint32_t ctrl_ext; 6701 uint32_t swsm; 6702 6703 /* tell manageability firmware the driver has taken over */ 6704 switch (hw->mac.type) { 6705 case e1000_82573: 6706 swsm = E1000_READ_REG(hw, E1000_SWSM); 6707 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 6708 break; 6709 case e1000_82571: 6710 case e1000_82572: 6711 case e1000_82574: 6712 case e1000_80003es2lan: 6713 case e1000_ich8lan: 6714 case e1000_ich9lan: 6715 case e1000_ich10lan: 6716 case e1000_pchlan: 6717 case e1000_pch2lan: 6718 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6719 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 6721 break; 6722 default: 6723 /* no manageability firmware: do nothing */ 6724 break; 6725 } 6726 } 6727 6728 /* 6729 * e1000g_release_driver_control - tell manageability firmware that the driver 6730 * has released control. 6731 */ 6732 static void 6733 e1000g_release_driver_control(struct e1000_hw *hw) 6734 { 6735 uint32_t ctrl_ext; 6736 uint32_t swsm; 6737 6738 /* tell manageability firmware the driver has released control */ 6739 switch (hw->mac.type) { 6740 case e1000_82573: 6741 swsm = E1000_READ_REG(hw, E1000_SWSM); 6742 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 6743 break; 6744 case e1000_82571: 6745 case e1000_82572: 6746 case e1000_82574: 6747 case e1000_80003es2lan: 6748 case e1000_ich8lan: 6749 case e1000_ich9lan: 6750 case e1000_ich10lan: 6751 case e1000_pchlan: 6752 case e1000_pch2lan: 6753 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6754 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6755 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 6756 break; 6757 default: 6758 /* no manageability firmware: do nothing */ 6759 break; 6760 } 6761 } 6762 6763 /* 6764 * Restore e1000g promiscuous mode. 6765 */ 6766 static void 6767 e1000g_restore_promisc(struct e1000g *Adapter) 6768 { 6769 if (Adapter->e1000g_promisc) { 6770 uint32_t rctl; 6771 6772 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 6773 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 6774 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 6775 } 6776 } 6777