1 /* 2 * This file is provided under a CDDLv1 license. When using or 3 * redistributing this file, you may do so under this license. 4 * In redistributing this file this license must be included 5 * and no other modification of this header file is permitted. 6 * 7 * CDDL LICENSE SUMMARY 8 * 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved. 10 * 11 * The contents of this file are subject to the terms of Version 12 * 1.0 of the Common Development and Distribution License (the "License"). 13 * 14 * You should have received a copy of the License with this software. 15 * You can obtain a copy of the License at 16 * http://www.opensolaris.org/os/licensing. 17 * See the License for the specific language governing permissions 18 * and limitations under the License. 19 */ 20 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 28 * Copyright 2016 Joyent, Inc. 29 */ 30 31 /* 32 * ********************************************************************** 33 * * 34 * Module Name: * 35 * e1000g_main.c * 36 * * 37 * Abstract: * 38 * This file contains the interface routines for the solaris OS. * 39 * It has all DDI entry point routines and GLD entry point routines. * 40 * * 41 * This file also contains routines that take care of initialization * 42 * uninit routine and interrupt routine. * 43 * * 44 * ********************************************************************** 45 */ 46 47 #include <sys/dlpi.h> 48 #include <sys/mac.h> 49 #include "e1000g_sw.h" 50 #include "e1000g_debug.h" 51 52 static char ident[] = "Intel PRO/1000 Ethernet"; 53 /* LINTED E_STATIC_UNUSED */ 54 static char e1000g_version[] = "Driver Ver. 5.3.24"; 55 56 /* 57 * Proto types for DDI entry points 58 */ 59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t); 60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t); 61 static int e1000g_quiesce(dev_info_t *); 62 63 /* 64 * init and intr routines prototype 65 */ 66 static int e1000g_resume(dev_info_t *); 67 static int e1000g_suspend(dev_info_t *); 68 static uint_t e1000g_intr_pciexpress(caddr_t); 69 static uint_t e1000g_intr(caddr_t); 70 static void e1000g_intr_work(struct e1000g *, uint32_t); 71 #pragma inline(e1000g_intr_work) 72 static int e1000g_init(struct e1000g *); 73 static int e1000g_start(struct e1000g *, boolean_t); 74 static void e1000g_stop(struct e1000g *, boolean_t); 75 static int e1000g_m_start(void *); 76 static void e1000g_m_stop(void *); 77 static int e1000g_m_promisc(void *, boolean_t); 78 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *); 79 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *); 80 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *); 81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t, 82 uint_t, const void *); 83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t, 84 uint_t, void *); 85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t, 86 mac_prop_info_handle_t); 87 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t, 88 const void *); 89 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *); 90 static void e1000g_init_locks(struct e1000g *); 91 static void e1000g_destroy_locks(struct e1000g *); 92 static int e1000g_identify_hardware(struct e1000g *); 93 static int e1000g_regs_map(struct e1000g *); 94 static int e1000g_set_driver_params(struct e1000g *); 95 static void e1000g_set_bufsize(struct e1000g *); 96 static int e1000g_register_mac(struct e1000g *); 97 static boolean_t e1000g_rx_drain(struct e1000g *); 98 static boolean_t e1000g_tx_drain(struct e1000g *); 99 static void e1000g_init_unicst(struct e1000g *); 100 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int); 101 static int e1000g_alloc_rx_data(struct e1000g *); 102 static void e1000g_release_multicast(struct e1000g *); 103 static void e1000g_pch_limits(struct e1000g *); 104 static uint32_t e1000g_mtu2maxframe(uint32_t); 105 106 /* 107 * Local routines 108 */ 109 static boolean_t e1000g_reset_adapter(struct e1000g *); 110 static void e1000g_tx_clean(struct e1000g *); 111 static void e1000g_rx_clean(struct e1000g *); 112 static void e1000g_link_timer(void *); 113 static void e1000g_local_timer(void *); 114 static boolean_t e1000g_link_check(struct e1000g *); 115 static boolean_t e1000g_stall_check(struct e1000g *); 116 static void e1000g_smartspeed(struct e1000g *); 117 static void e1000g_get_conf(struct e1000g *); 118 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int, 119 int *); 120 static void enable_watchdog_timer(struct e1000g *); 121 static void disable_watchdog_timer(struct e1000g *); 122 static void start_watchdog_timer(struct e1000g *); 123 static void restart_watchdog_timer(struct e1000g *); 124 static void stop_watchdog_timer(struct e1000g *); 125 static void stop_link_timer(struct e1000g *); 126 static void stop_82547_timer(e1000g_tx_ring_t *); 127 static void e1000g_force_speed_duplex(struct e1000g *); 128 static void e1000g_setup_max_mtu(struct e1000g *); 129 static void e1000g_get_max_frame_size(struct e1000g *); 130 static boolean_t is_valid_mac_addr(uint8_t *); 131 static void e1000g_unattach(dev_info_t *, struct e1000g *); 132 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *); 133 #ifdef E1000G_DEBUG 134 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *); 135 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *); 136 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *); 137 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *); 138 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *, 139 struct iocblk *, mblk_t *); 140 #endif 141 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *, 142 struct iocblk *, mblk_t *); 143 static boolean_t e1000g_check_loopback_support(struct e1000_hw *); 144 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t); 145 static void e1000g_set_internal_loopback(struct e1000g *); 146 static void e1000g_set_external_loopback_1000(struct e1000g *); 147 static void e1000g_set_external_loopback_100(struct e1000g *); 148 static void e1000g_set_external_loopback_10(struct e1000g *); 149 static int e1000g_add_intrs(struct e1000g *); 150 static int e1000g_intr_add(struct e1000g *, int); 151 static int e1000g_rem_intrs(struct e1000g *); 152 static int e1000g_enable_intrs(struct e1000g *); 153 static int e1000g_disable_intrs(struct e1000g *); 154 static boolean_t e1000g_link_up(struct e1000g *); 155 #ifdef __sparc 156 static boolean_t e1000g_find_mac_address(struct e1000g *); 157 #endif 158 static void e1000g_get_phy_state(struct e1000g *); 159 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 160 const void *impl_data); 161 static void e1000g_fm_init(struct e1000g *Adapter); 162 static void e1000g_fm_fini(struct e1000g *Adapter); 163 static void e1000g_param_sync(struct e1000g *); 164 static void e1000g_get_driver_control(struct e1000_hw *); 165 static void e1000g_release_driver_control(struct e1000_hw *); 166 static void e1000g_restore_promisc(struct e1000g *Adapter); 167 168 char *e1000g_priv_props[] = { 169 "_tx_bcopy_threshold", 170 "_tx_interrupt_enable", 171 "_tx_intr_delay", 172 "_tx_intr_abs_delay", 173 "_rx_bcopy_threshold", 174 "_max_num_rcv_packets", 175 "_rx_intr_delay", 176 "_rx_intr_abs_delay", 177 "_intr_throttling_rate", 178 "_intr_adaptive", 179 "_adv_pause_cap", 180 "_adv_asym_pause_cap", 181 NULL 182 }; 183 184 static struct cb_ops cb_ws_ops = { 185 nulldev, /* cb_open */ 186 nulldev, /* cb_close */ 187 nodev, /* cb_strategy */ 188 nodev, /* cb_print */ 189 nodev, /* cb_dump */ 190 nodev, /* cb_read */ 191 nodev, /* cb_write */ 192 nodev, /* cb_ioctl */ 193 nodev, /* cb_devmap */ 194 nodev, /* cb_mmap */ 195 nodev, /* cb_segmap */ 196 nochpoll, /* cb_chpoll */ 197 ddi_prop_op, /* cb_prop_op */ 198 NULL, /* cb_stream */ 199 D_MP | D_HOTPLUG, /* cb_flag */ 200 CB_REV, /* cb_rev */ 201 nodev, /* cb_aread */ 202 nodev /* cb_awrite */ 203 }; 204 205 static struct dev_ops ws_ops = { 206 DEVO_REV, /* devo_rev */ 207 0, /* devo_refcnt */ 208 NULL, /* devo_getinfo */ 209 nulldev, /* devo_identify */ 210 nulldev, /* devo_probe */ 211 e1000g_attach, /* devo_attach */ 212 e1000g_detach, /* devo_detach */ 213 nodev, /* devo_reset */ 214 &cb_ws_ops, /* devo_cb_ops */ 215 NULL, /* devo_bus_ops */ 216 ddi_power, /* devo_power */ 217 e1000g_quiesce /* devo_quiesce */ 218 }; 219 220 static struct modldrv modldrv = { 221 &mod_driverops, /* Type of module. This one is a driver */ 222 ident, /* Discription string */ 223 &ws_ops, /* driver ops */ 224 }; 225 226 static struct modlinkage modlinkage = { 227 MODREV_1, &modldrv, NULL 228 }; 229 230 /* Access attributes for register mapping */ 231 static ddi_device_acc_attr_t e1000g_regs_acc_attr = { 232 DDI_DEVICE_ATTR_V1, 233 DDI_STRUCTURE_LE_ACC, 234 DDI_STRICTORDER_ACC, 235 DDI_FLAGERR_ACC 236 }; 237 238 #define E1000G_M_CALLBACK_FLAGS \ 239 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 240 241 static mac_callbacks_t e1000g_m_callbacks = { 242 E1000G_M_CALLBACK_FLAGS, 243 e1000g_m_stat, 244 e1000g_m_start, 245 e1000g_m_stop, 246 e1000g_m_promisc, 247 e1000g_m_multicst, 248 NULL, 249 e1000g_m_tx, 250 NULL, 251 e1000g_m_ioctl, 252 e1000g_m_getcapab, 253 NULL, 254 NULL, 255 e1000g_m_setprop, 256 e1000g_m_getprop, 257 e1000g_m_propinfo 258 }; 259 260 /* 261 * Global variables 262 */ 263 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K; 264 uint32_t e1000g_mblks_pending = 0; 265 /* 266 * Workaround for Dynamic Reconfiguration support, for x86 platform only. 267 * Here we maintain a private dev_info list if e1000g_force_detach is 268 * enabled. If we force the driver to detach while there are still some 269 * rx buffers retained in the upper layer, we have to keep a copy of the 270 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data 271 * structure will be freed after the driver is detached. However when we 272 * finally free those rx buffers released by the upper layer, we need to 273 * refer to the dev_info to free the dma buffers. So we save a copy of 274 * the dev_info for this purpose. On x86 platform, we assume this copy 275 * of dev_info is always valid, but on SPARC platform, it could be invalid 276 * after the system board level DR operation. For this reason, the global 277 * variable e1000g_force_detach must be B_FALSE on SPARC platform. 278 */ 279 #ifdef __sparc 280 boolean_t e1000g_force_detach = B_FALSE; 281 #else 282 boolean_t e1000g_force_detach = B_TRUE; 283 #endif 284 private_devi_list_t *e1000g_private_devi_list = NULL; 285 286 /* 287 * The mutex e1000g_rx_detach_lock is defined to protect the processing of 288 * the private dev_info list, and to serialize the processing of rx buffer 289 * freeing and rx buffer recycling. 290 */ 291 kmutex_t e1000g_rx_detach_lock; 292 /* 293 * The rwlock e1000g_dma_type_lock is defined to protect the global flag 294 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA". 295 * If there are many e1000g instances, the system may run out of DVMA 296 * resources during the initialization of the instances, then the flag will 297 * be changed to "USE_DMA". Because different e1000g instances are initialized 298 * in parallel, we need to use this lock to protect the flag. 299 */ 300 krwlock_t e1000g_dma_type_lock; 301 302 /* 303 * The 82546 chipset is a dual-port device, both the ports share one eeprom. 304 * Based on the information from Intel, the 82546 chipset has some hardware 305 * problem. When one port is being reset and the other port is trying to 306 * access the eeprom, it could cause system hang or panic. To workaround this 307 * hardware problem, we use a global mutex to prevent such operations from 308 * happening simultaneously on different instances. This workaround is applied 309 * to all the devices supported by this driver. 310 */ 311 kmutex_t e1000g_nvm_lock; 312 313 /* 314 * Loadable module configuration entry points for the driver 315 */ 316 317 /* 318 * _init - module initialization 319 */ 320 int 321 _init(void) 322 { 323 int status; 324 325 mac_init_ops(&ws_ops, WSNAME); 326 status = mod_install(&modlinkage); 327 if (status != DDI_SUCCESS) 328 mac_fini_ops(&ws_ops); 329 else { 330 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL); 331 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL); 332 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL); 333 } 334 335 return (status); 336 } 337 338 /* 339 * _fini - module finalization 340 */ 341 int 342 _fini(void) 343 { 344 int status; 345 346 if (e1000g_mblks_pending != 0) 347 return (EBUSY); 348 349 status = mod_remove(&modlinkage); 350 if (status == DDI_SUCCESS) { 351 mac_fini_ops(&ws_ops); 352 353 if (e1000g_force_detach) { 354 private_devi_list_t *devi_node; 355 356 mutex_enter(&e1000g_rx_detach_lock); 357 while (e1000g_private_devi_list != NULL) { 358 devi_node = e1000g_private_devi_list; 359 e1000g_private_devi_list = 360 e1000g_private_devi_list->next; 361 362 kmem_free(devi_node->priv_dip, 363 sizeof (struct dev_info)); 364 kmem_free(devi_node, 365 sizeof (private_devi_list_t)); 366 } 367 mutex_exit(&e1000g_rx_detach_lock); 368 } 369 370 mutex_destroy(&e1000g_rx_detach_lock); 371 rw_destroy(&e1000g_dma_type_lock); 372 mutex_destroy(&e1000g_nvm_lock); 373 } 374 375 return (status); 376 } 377 378 /* 379 * _info - module information 380 */ 381 int 382 _info(struct modinfo *modinfop) 383 { 384 return (mod_info(&modlinkage, modinfop)); 385 } 386 387 /* 388 * e1000g_attach - driver attach 389 * 390 * This function is the device-specific initialization entry 391 * point. This entry point is required and must be written. 392 * The DDI_ATTACH command must be provided in the attach entry 393 * point. When attach() is called with cmd set to DDI_ATTACH, 394 * all normal kernel services (such as kmem_alloc(9F)) are 395 * available for use by the driver. 396 * 397 * The attach() function will be called once for each instance 398 * of the device on the system with cmd set to DDI_ATTACH. 399 * Until attach() succeeds, the only driver entry points which 400 * may be called are open(9E) and getinfo(9E). 401 */ 402 static int 403 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 404 { 405 struct e1000g *Adapter; 406 struct e1000_hw *hw; 407 struct e1000g_osdep *osdep; 408 int instance; 409 410 switch (cmd) { 411 default: 412 e1000g_log(NULL, CE_WARN, 413 "Unsupported command send to e1000g_attach... "); 414 return (DDI_FAILURE); 415 416 case DDI_RESUME: 417 return (e1000g_resume(devinfo)); 418 419 case DDI_ATTACH: 420 break; 421 } 422 423 /* 424 * get device instance number 425 */ 426 instance = ddi_get_instance(devinfo); 427 428 /* 429 * Allocate soft data structure 430 */ 431 Adapter = 432 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP); 433 434 Adapter->dip = devinfo; 435 Adapter->instance = instance; 436 Adapter->tx_ring->adapter = Adapter; 437 Adapter->rx_ring->adapter = Adapter; 438 439 hw = &Adapter->shared; 440 osdep = &Adapter->osdep; 441 hw->back = osdep; 442 osdep->adapter = Adapter; 443 444 ddi_set_driver_private(devinfo, (caddr_t)Adapter); 445 446 /* 447 * Initialize for fma support 448 */ 449 (void) e1000g_get_prop(Adapter, "fm-capable", 450 0, 0x0f, 451 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 452 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE, 453 &Adapter->fm_capabilities); 454 e1000g_fm_init(Adapter); 455 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT; 456 457 /* 458 * PCI Configure 459 */ 460 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 461 e1000g_log(Adapter, CE_WARN, "PCI configuration failed"); 462 goto attach_fail; 463 } 464 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 465 466 /* 467 * Setup hardware 468 */ 469 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) { 470 e1000g_log(Adapter, CE_WARN, "Identify hardware failed"); 471 goto attach_fail; 472 } 473 474 /* 475 * Map in the device registers. 476 */ 477 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) { 478 e1000g_log(Adapter, CE_WARN, "Mapping registers failed"); 479 goto attach_fail; 480 } 481 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 482 483 /* 484 * Initialize driver parameters 485 */ 486 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) { 487 goto attach_fail; 488 } 489 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP; 490 491 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 492 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 493 goto attach_fail; 494 } 495 496 /* 497 * Disable ULP support 498 */ 499 (void) e1000_disable_ulp_lpt_lp(hw, TRUE); 500 501 /* 502 * Initialize interrupts 503 */ 504 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { 505 e1000g_log(Adapter, CE_WARN, "Add interrupts failed"); 506 goto attach_fail; 507 } 508 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 509 510 /* 511 * Initialize mutex's for this device. 512 * Do this before enabling the interrupt handler and 513 * register the softint to avoid the condition where 514 * interrupt handler can try using uninitialized mutex 515 */ 516 e1000g_init_locks(Adapter); 517 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS; 518 519 /* 520 * Initialize Driver Counters 521 */ 522 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) { 523 e1000g_log(Adapter, CE_WARN, "Init stats failed"); 524 goto attach_fail; 525 } 526 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS; 527 528 /* 529 * Initialize chip hardware and software structures 530 */ 531 rw_enter(&Adapter->chip_lock, RW_WRITER); 532 if (e1000g_init(Adapter) != DDI_SUCCESS) { 533 rw_exit(&Adapter->chip_lock); 534 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed"); 535 goto attach_fail; 536 } 537 rw_exit(&Adapter->chip_lock); 538 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 539 540 /* 541 * Register the driver to the MAC 542 */ 543 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) { 544 e1000g_log(Adapter, CE_WARN, "Register MAC failed"); 545 goto attach_fail; 546 } 547 Adapter->attach_progress |= ATTACH_PROGRESS_MAC; 548 549 /* 550 * Now that mutex locks are initialized, and the chip is also 551 * initialized, enable interrupts. 552 */ 553 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) { 554 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed"); 555 goto attach_fail; 556 } 557 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 558 559 /* 560 * If e1000g_force_detach is enabled, in global private dip list, 561 * we will create a new entry, which maintains the priv_dip for DR 562 * supports after driver detached. 563 */ 564 if (e1000g_force_detach) { 565 private_devi_list_t *devi_node; 566 567 Adapter->priv_dip = 568 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP); 569 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip), 570 sizeof (struct dev_info)); 571 572 devi_node = 573 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP); 574 575 mutex_enter(&e1000g_rx_detach_lock); 576 devi_node->priv_dip = Adapter->priv_dip; 577 devi_node->flag = E1000G_PRIV_DEVI_ATTACH; 578 devi_node->pending_rx_count = 0; 579 580 Adapter->priv_devi_node = devi_node; 581 582 if (e1000g_private_devi_list == NULL) { 583 devi_node->prev = NULL; 584 devi_node->next = NULL; 585 e1000g_private_devi_list = devi_node; 586 } else { 587 devi_node->prev = NULL; 588 devi_node->next = e1000g_private_devi_list; 589 e1000g_private_devi_list->prev = devi_node; 590 e1000g_private_devi_list = devi_node; 591 } 592 mutex_exit(&e1000g_rx_detach_lock); 593 } 594 595 Adapter->e1000g_state = E1000G_INITIALIZED; 596 return (DDI_SUCCESS); 597 598 attach_fail: 599 e1000g_unattach(devinfo, Adapter); 600 return (DDI_FAILURE); 601 } 602 603 static int 604 e1000g_register_mac(struct e1000g *Adapter) 605 { 606 struct e1000_hw *hw = &Adapter->shared; 607 mac_register_t *mac; 608 int err; 609 610 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 611 return (DDI_FAILURE); 612 613 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 614 mac->m_driver = Adapter; 615 mac->m_dip = Adapter->dip; 616 mac->m_src_addr = hw->mac.addr; 617 mac->m_callbacks = &e1000g_m_callbacks; 618 mac->m_min_sdu = 0; 619 mac->m_max_sdu = Adapter->default_mtu; 620 mac->m_margin = VLAN_TAGSZ; 621 mac->m_priv_props = e1000g_priv_props; 622 mac->m_v12n = MAC_VIRT_LEVEL1; 623 624 err = mac_register(mac, &Adapter->mh); 625 mac_free(mac); 626 627 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE); 628 } 629 630 static int 631 e1000g_identify_hardware(struct e1000g *Adapter) 632 { 633 struct e1000_hw *hw = &Adapter->shared; 634 struct e1000g_osdep *osdep = &Adapter->osdep; 635 636 /* Get the device id */ 637 hw->vendor_id = 638 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 639 hw->device_id = 640 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 641 hw->revision_id = 642 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 643 hw->subsystem_device_id = 644 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 645 hw->subsystem_vendor_id = 646 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 647 648 if (e1000_set_mac_type(hw) != E1000_SUCCESS) { 649 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 650 "MAC type could not be set properly."); 651 return (DDI_FAILURE); 652 } 653 654 return (DDI_SUCCESS); 655 } 656 657 static int 658 e1000g_regs_map(struct e1000g *Adapter) 659 { 660 dev_info_t *devinfo = Adapter->dip; 661 struct e1000_hw *hw = &Adapter->shared; 662 struct e1000g_osdep *osdep = &Adapter->osdep; 663 off_t mem_size; 664 bar_info_t bar_info; 665 int offset, rnumber; 666 667 rnumber = ADAPTER_REG_SET; 668 /* Get size of adapter register memory */ 669 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) != 670 DDI_SUCCESS) { 671 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 672 "ddi_dev_regsize for registers failed"); 673 return (DDI_FAILURE); 674 } 675 676 /* Map adapter register memory */ 677 if ((ddi_regs_map_setup(devinfo, rnumber, 678 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr, 679 &osdep->reg_handle)) != DDI_SUCCESS) { 680 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 681 "ddi_regs_map_setup for registers failed"); 682 goto regs_map_fail; 683 } 684 685 /* ICH needs to map flash memory */ 686 switch (hw->mac.type) { 687 case e1000_ich8lan: 688 case e1000_ich9lan: 689 case e1000_ich10lan: 690 case e1000_pchlan: 691 case e1000_pch2lan: 692 case e1000_pch_lpt: 693 rnumber = ICH_FLASH_REG_SET; 694 695 /* get flash size */ 696 if (ddi_dev_regsize(devinfo, rnumber, 697 &mem_size) != DDI_SUCCESS) { 698 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 699 "ddi_dev_regsize for ICH flash failed"); 700 goto regs_map_fail; 701 } 702 703 /* map flash in */ 704 if (ddi_regs_map_setup(devinfo, rnumber, 705 (caddr_t *)&hw->flash_address, 0, 706 mem_size, &e1000g_regs_acc_attr, 707 &osdep->ich_flash_handle) != DDI_SUCCESS) { 708 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 709 "ddi_regs_map_setup for ICH flash failed"); 710 goto regs_map_fail; 711 } 712 break; 713 case e1000_pch_spt: 714 /* 715 * On the SPT, the device flash is actually in BAR0, not a 716 * separate BAR. Therefore we end up setting the 717 * ich_flash_handle to be the same as the register handle. 718 * We mark the same to reduce the confusion in the other 719 * functions and macros. Though this does make the set up and 720 * tear-down path slightly more complicated. 721 */ 722 osdep->ich_flash_handle = osdep->reg_handle; 723 hw->flash_address = hw->hw_addr; 724 default: 725 break; 726 } 727 728 /* map io space */ 729 switch (hw->mac.type) { 730 case e1000_82544: 731 case e1000_82540: 732 case e1000_82545: 733 case e1000_82546: 734 case e1000_82541: 735 case e1000_82541_rev_2: 736 /* find the IO bar */ 737 rnumber = -1; 738 for (offset = PCI_CONF_BASE1; 739 offset <= PCI_CONF_BASE5; offset += 4) { 740 if (e1000g_get_bar_info(devinfo, offset, &bar_info) 741 != DDI_SUCCESS) 742 continue; 743 if (bar_info.type == E1000G_BAR_IO) { 744 rnumber = bar_info.rnumber; 745 break; 746 } 747 } 748 749 if (rnumber < 0) { 750 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 751 "No io space is found"); 752 goto regs_map_fail; 753 } 754 755 /* get io space size */ 756 if (ddi_dev_regsize(devinfo, rnumber, 757 &mem_size) != DDI_SUCCESS) { 758 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 759 "ddi_dev_regsize for io space failed"); 760 goto regs_map_fail; 761 } 762 763 /* map io space */ 764 if ((ddi_regs_map_setup(devinfo, rnumber, 765 (caddr_t *)&hw->io_base, 0, mem_size, 766 &e1000g_regs_acc_attr, 767 &osdep->io_reg_handle)) != DDI_SUCCESS) { 768 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 769 "ddi_regs_map_setup for io space failed"); 770 goto regs_map_fail; 771 } 772 break; 773 default: 774 hw->io_base = 0; 775 break; 776 } 777 778 return (DDI_SUCCESS); 779 780 regs_map_fail: 781 if (osdep->reg_handle != NULL) 782 ddi_regs_map_free(&osdep->reg_handle); 783 if (osdep->ich_flash_handle != NULL && hw->mac.type != e1000_pch_spt) 784 ddi_regs_map_free(&osdep->ich_flash_handle); 785 return (DDI_FAILURE); 786 } 787 788 static int 789 e1000g_set_driver_params(struct e1000g *Adapter) 790 { 791 struct e1000_hw *hw; 792 793 hw = &Adapter->shared; 794 795 /* Set MAC type and initialize hardware functions */ 796 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { 797 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 798 "Could not setup hardware functions"); 799 return (DDI_FAILURE); 800 } 801 802 /* Get bus information */ 803 if (e1000_get_bus_info(hw) != E1000_SUCCESS) { 804 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 805 "Could not get bus information"); 806 return (DDI_FAILURE); 807 } 808 809 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word); 810 811 hw->mac.autoneg_failed = B_TRUE; 812 813 /* Set the autoneg_wait_to_complete flag to B_FALSE */ 814 hw->phy.autoneg_wait_to_complete = B_FALSE; 815 816 /* Adaptive IFS related changes */ 817 hw->mac.adaptive_ifs = B_TRUE; 818 819 /* Enable phy init script for IGP phy of 82541/82547 */ 820 if ((hw->mac.type == e1000_82547) || 821 (hw->mac.type == e1000_82541) || 822 (hw->mac.type == e1000_82547_rev_2) || 823 (hw->mac.type == e1000_82541_rev_2)) 824 e1000_init_script_state_82541(hw, B_TRUE); 825 826 /* Enable the TTL workaround for 82541/82547 */ 827 e1000_set_ttl_workaround_state_82541(hw, B_TRUE); 828 829 #ifdef __sparc 830 Adapter->strip_crc = B_TRUE; 831 #else 832 Adapter->strip_crc = B_FALSE; 833 #endif 834 835 /* setup the maximum MTU size of the chip */ 836 e1000g_setup_max_mtu(Adapter); 837 838 /* Get speed/duplex settings in conf file */ 839 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; 840 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 841 e1000g_force_speed_duplex(Adapter); 842 843 /* Get Jumbo Frames settings in conf file */ 844 e1000g_get_max_frame_size(Adapter); 845 846 /* Get conf file properties */ 847 e1000g_get_conf(Adapter); 848 849 /* enforce PCH limits */ 850 e1000g_pch_limits(Adapter); 851 852 /* Set Rx/Tx buffer size */ 853 e1000g_set_bufsize(Adapter); 854 855 /* Master Latency Timer */ 856 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER; 857 858 /* copper options */ 859 if (hw->phy.media_type == e1000_media_type_copper) { 860 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 861 hw->phy.disable_polarity_correction = B_FALSE; 862 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ 863 } 864 865 /* The initial link state should be "unknown" */ 866 Adapter->link_state = LINK_STATE_UNKNOWN; 867 868 /* Initialize rx parameters */ 869 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY; 870 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY; 871 872 /* Initialize tx parameters */ 873 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE; 874 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD; 875 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY; 876 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY; 877 878 /* Initialize rx parameters */ 879 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD; 880 881 return (DDI_SUCCESS); 882 } 883 884 static void 885 e1000g_setup_max_mtu(struct e1000g *Adapter) 886 { 887 struct e1000_mac_info *mac = &Adapter->shared.mac; 888 struct e1000_phy_info *phy = &Adapter->shared.phy; 889 890 switch (mac->type) { 891 /* types that do not support jumbo frames */ 892 case e1000_ich8lan: 893 case e1000_82573: 894 case e1000_82583: 895 Adapter->max_mtu = ETHERMTU; 896 break; 897 /* ich9 supports jumbo frames except on one phy type */ 898 case e1000_ich9lan: 899 if (phy->type == e1000_phy_ife) 900 Adapter->max_mtu = ETHERMTU; 901 else 902 Adapter->max_mtu = MAXIMUM_MTU_9K; 903 break; 904 /* pch can do jumbo frames up to 4K */ 905 case e1000_pchlan: 906 Adapter->max_mtu = MAXIMUM_MTU_4K; 907 break; 908 /* pch2 can do jumbo frames up to 9K */ 909 case e1000_pch2lan: 910 case e1000_pch_lpt: 911 case e1000_pch_spt: 912 Adapter->max_mtu = MAXIMUM_MTU_9K; 913 break; 914 /* types with a special limit */ 915 case e1000_82571: 916 case e1000_82572: 917 case e1000_82574: 918 case e1000_80003es2lan: 919 case e1000_ich10lan: 920 if (e1000g_jumbo_mtu >= ETHERMTU && 921 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) { 922 Adapter->max_mtu = e1000g_jumbo_mtu; 923 } else { 924 Adapter->max_mtu = MAXIMUM_MTU_9K; 925 } 926 break; 927 /* default limit is 16K */ 928 default: 929 Adapter->max_mtu = FRAME_SIZE_UPTO_16K - 930 sizeof (struct ether_vlan_header) - ETHERFCSL; 931 break; 932 } 933 } 934 935 static void 936 e1000g_set_bufsize(struct e1000g *Adapter) 937 { 938 struct e1000_mac_info *mac = &Adapter->shared.mac; 939 uint64_t rx_size; 940 uint64_t tx_size; 941 942 dev_info_t *devinfo = Adapter->dip; 943 #ifdef __sparc 944 ulong_t iommu_pagesize; 945 #endif 946 /* Get the system page size */ 947 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1); 948 949 #ifdef __sparc 950 iommu_pagesize = dvma_pagesize(devinfo); 951 if (iommu_pagesize != 0) { 952 if (Adapter->sys_page_sz == iommu_pagesize) { 953 if (iommu_pagesize > 0x4000) 954 Adapter->sys_page_sz = 0x4000; 955 } else { 956 if (Adapter->sys_page_sz > iommu_pagesize) 957 Adapter->sys_page_sz = iommu_pagesize; 958 } 959 } 960 if (Adapter->lso_enable) { 961 Adapter->dvma_page_num = E1000_LSO_MAXLEN / 962 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 963 } else { 964 Adapter->dvma_page_num = Adapter->max_frame_size / 965 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 966 } 967 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM); 968 #endif 969 970 Adapter->min_frame_size = ETHERMIN + ETHERFCSL; 971 972 if (Adapter->mem_workaround_82546 && 973 ((mac->type == e1000_82545) || 974 (mac->type == e1000_82546) || 975 (mac->type == e1000_82546_rev_3))) { 976 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 977 } else { 978 rx_size = Adapter->max_frame_size; 979 if ((rx_size > FRAME_SIZE_UPTO_2K) && 980 (rx_size <= FRAME_SIZE_UPTO_4K)) 981 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K; 982 else if ((rx_size > FRAME_SIZE_UPTO_4K) && 983 (rx_size <= FRAME_SIZE_UPTO_8K)) 984 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K; 985 else if ((rx_size > FRAME_SIZE_UPTO_8K) && 986 (rx_size <= FRAME_SIZE_UPTO_16K)) 987 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K; 988 else 989 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 990 } 991 Adapter->rx_buffer_size += E1000G_IPALIGNROOM; 992 993 tx_size = Adapter->max_frame_size; 994 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K)) 995 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K; 996 else if ((tx_size > FRAME_SIZE_UPTO_4K) && 997 (tx_size <= FRAME_SIZE_UPTO_8K)) 998 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K; 999 else if ((tx_size > FRAME_SIZE_UPTO_8K) && 1000 (tx_size <= FRAME_SIZE_UPTO_16K)) 1001 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K; 1002 else 1003 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K; 1004 1005 /* 1006 * For Wiseman adapters we have an requirement of having receive 1007 * buffers aligned at 256 byte boundary. Since Livengood does not 1008 * require this and forcing it for all hardwares will have 1009 * performance implications, I am making it applicable only for 1010 * Wiseman and for Jumbo frames enabled mode as rest of the time, 1011 * it is okay to have normal frames...but it does involve a 1012 * potential risk where we may loose data if buffer is not 1013 * aligned...so all wiseman boards to have 256 byte aligned 1014 * buffers 1015 */ 1016 if (mac->type < e1000_82543) 1017 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE; 1018 else 1019 Adapter->rx_buf_align = 1; 1020 } 1021 1022 /* 1023 * e1000g_detach - driver detach 1024 * 1025 * The detach() function is the complement of the attach routine. 1026 * If cmd is set to DDI_DETACH, detach() is used to remove the 1027 * state associated with a given instance of a device node 1028 * prior to the removal of that instance from the system. 1029 * 1030 * The detach() function will be called once for each instance 1031 * of the device for which there has been a successful attach() 1032 * once there are no longer any opens on the device. 1033 * 1034 * Interrupts routine are disabled, All memory allocated by this 1035 * driver are freed. 1036 */ 1037 static int 1038 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1039 { 1040 struct e1000g *Adapter; 1041 boolean_t rx_drain; 1042 1043 switch (cmd) { 1044 default: 1045 return (DDI_FAILURE); 1046 1047 case DDI_SUSPEND: 1048 return (e1000g_suspend(devinfo)); 1049 1050 case DDI_DETACH: 1051 break; 1052 } 1053 1054 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1055 if (Adapter == NULL) 1056 return (DDI_FAILURE); 1057 1058 rx_drain = e1000g_rx_drain(Adapter); 1059 if (!rx_drain && !e1000g_force_detach) 1060 return (DDI_FAILURE); 1061 1062 if (mac_unregister(Adapter->mh) != 0) { 1063 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed"); 1064 return (DDI_FAILURE); 1065 } 1066 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC; 1067 1068 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED)); 1069 1070 if (!e1000g_force_detach && !rx_drain) 1071 return (DDI_FAILURE); 1072 1073 e1000g_unattach(devinfo, Adapter); 1074 1075 return (DDI_SUCCESS); 1076 } 1077 1078 /* 1079 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance 1080 */ 1081 void 1082 e1000g_free_priv_devi_node(private_devi_list_t *devi_node) 1083 { 1084 ASSERT(e1000g_private_devi_list != NULL); 1085 ASSERT(devi_node != NULL); 1086 1087 if (devi_node->prev != NULL) 1088 devi_node->prev->next = devi_node->next; 1089 if (devi_node->next != NULL) 1090 devi_node->next->prev = devi_node->prev; 1091 if (devi_node == e1000g_private_devi_list) 1092 e1000g_private_devi_list = devi_node->next; 1093 1094 kmem_free(devi_node->priv_dip, 1095 sizeof (struct dev_info)); 1096 kmem_free(devi_node, 1097 sizeof (private_devi_list_t)); 1098 } 1099 1100 static void 1101 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter) 1102 { 1103 private_devi_list_t *devi_node; 1104 int result; 1105 1106 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1107 (void) e1000g_disable_intrs(Adapter); 1108 } 1109 1110 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) { 1111 (void) mac_unregister(Adapter->mh); 1112 } 1113 1114 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1115 (void) e1000g_rem_intrs(Adapter); 1116 } 1117 1118 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) { 1119 (void) ddi_prop_remove_all(devinfo); 1120 } 1121 1122 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) { 1123 kstat_delete((kstat_t *)Adapter->e1000g_ksp); 1124 } 1125 1126 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) { 1127 stop_link_timer(Adapter); 1128 1129 mutex_enter(&e1000g_nvm_lock); 1130 result = e1000_reset_hw(&Adapter->shared); 1131 mutex_exit(&e1000g_nvm_lock); 1132 1133 if (result != E1000_SUCCESS) { 1134 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1135 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1136 } 1137 } 1138 1139 e1000g_release_multicast(Adapter); 1140 1141 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 1142 if (Adapter->osdep.reg_handle != NULL) 1143 ddi_regs_map_free(&Adapter->osdep.reg_handle); 1144 if (Adapter->osdep.ich_flash_handle != NULL && 1145 Adapter->shared.mac.type != e1000_pch_spt) 1146 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle); 1147 if (Adapter->osdep.io_reg_handle != NULL) 1148 ddi_regs_map_free(&Adapter->osdep.io_reg_handle); 1149 } 1150 1151 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 1152 if (Adapter->osdep.cfg_handle != NULL) 1153 pci_config_teardown(&Adapter->osdep.cfg_handle); 1154 } 1155 1156 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) { 1157 e1000g_destroy_locks(Adapter); 1158 } 1159 1160 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) { 1161 e1000g_fm_fini(Adapter); 1162 } 1163 1164 mutex_enter(&e1000g_rx_detach_lock); 1165 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) { 1166 devi_node = Adapter->priv_devi_node; 1167 devi_node->flag |= E1000G_PRIV_DEVI_DETACH; 1168 1169 if (devi_node->pending_rx_count == 0) { 1170 e1000g_free_priv_devi_node(devi_node); 1171 } 1172 } 1173 mutex_exit(&e1000g_rx_detach_lock); 1174 1175 kmem_free((caddr_t)Adapter, sizeof (struct e1000g)); 1176 1177 /* 1178 * Another hotplug spec requirement, 1179 * run ddi_set_driver_private(devinfo, null); 1180 */ 1181 ddi_set_driver_private(devinfo, NULL); 1182 } 1183 1184 /* 1185 * Get the BAR type and rnumber for a given PCI BAR offset 1186 */ 1187 static int 1188 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info) 1189 { 1190 pci_regspec_t *regs; 1191 uint_t regs_length; 1192 int type, rnumber, rcount; 1193 1194 ASSERT((bar_offset >= PCI_CONF_BASE0) && 1195 (bar_offset <= PCI_CONF_BASE5)); 1196 1197 /* 1198 * Get the DDI "reg" property 1199 */ 1200 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 1201 DDI_PROP_DONTPASS, "reg", (int **)®s, 1202 ®s_length) != DDI_PROP_SUCCESS) { 1203 return (DDI_FAILURE); 1204 } 1205 1206 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t); 1207 /* 1208 * Check the BAR offset 1209 */ 1210 for (rnumber = 0; rnumber < rcount; ++rnumber) { 1211 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) { 1212 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK; 1213 break; 1214 } 1215 } 1216 1217 ddi_prop_free(regs); 1218 1219 if (rnumber >= rcount) 1220 return (DDI_FAILURE); 1221 1222 switch (type) { 1223 case PCI_ADDR_CONFIG: 1224 bar_info->type = E1000G_BAR_CONFIG; 1225 break; 1226 case PCI_ADDR_IO: 1227 bar_info->type = E1000G_BAR_IO; 1228 break; 1229 case PCI_ADDR_MEM32: 1230 bar_info->type = E1000G_BAR_MEM32; 1231 break; 1232 case PCI_ADDR_MEM64: 1233 bar_info->type = E1000G_BAR_MEM64; 1234 break; 1235 default: 1236 return (DDI_FAILURE); 1237 } 1238 bar_info->rnumber = rnumber; 1239 return (DDI_SUCCESS); 1240 } 1241 1242 static void 1243 e1000g_init_locks(struct e1000g *Adapter) 1244 { 1245 e1000g_tx_ring_t *tx_ring; 1246 e1000g_rx_ring_t *rx_ring; 1247 1248 rw_init(&Adapter->chip_lock, NULL, 1249 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1250 mutex_init(&Adapter->link_lock, NULL, 1251 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1252 mutex_init(&Adapter->watchdog_lock, NULL, 1253 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1254 1255 tx_ring = Adapter->tx_ring; 1256 1257 mutex_init(&tx_ring->tx_lock, NULL, 1258 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1259 mutex_init(&tx_ring->usedlist_lock, NULL, 1260 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1261 mutex_init(&tx_ring->freelist_lock, NULL, 1262 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1263 1264 rx_ring = Adapter->rx_ring; 1265 1266 mutex_init(&rx_ring->rx_lock, NULL, 1267 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1268 } 1269 1270 static void 1271 e1000g_destroy_locks(struct e1000g *Adapter) 1272 { 1273 e1000g_tx_ring_t *tx_ring; 1274 e1000g_rx_ring_t *rx_ring; 1275 1276 tx_ring = Adapter->tx_ring; 1277 mutex_destroy(&tx_ring->tx_lock); 1278 mutex_destroy(&tx_ring->usedlist_lock); 1279 mutex_destroy(&tx_ring->freelist_lock); 1280 1281 rx_ring = Adapter->rx_ring; 1282 mutex_destroy(&rx_ring->rx_lock); 1283 1284 mutex_destroy(&Adapter->link_lock); 1285 mutex_destroy(&Adapter->watchdog_lock); 1286 rw_destroy(&Adapter->chip_lock); 1287 1288 /* destory mutex initialized in shared code */ 1289 e1000_destroy_hw_mutex(&Adapter->shared); 1290 } 1291 1292 static int 1293 e1000g_resume(dev_info_t *devinfo) 1294 { 1295 struct e1000g *Adapter; 1296 1297 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1298 if (Adapter == NULL) 1299 e1000g_log(Adapter, CE_PANIC, 1300 "Instance pointer is null\n"); 1301 1302 if (Adapter->dip != devinfo) 1303 e1000g_log(Adapter, CE_PANIC, 1304 "Devinfo is not the same as saved devinfo\n"); 1305 1306 rw_enter(&Adapter->chip_lock, RW_WRITER); 1307 1308 if (Adapter->e1000g_state & E1000G_STARTED) { 1309 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 1310 rw_exit(&Adapter->chip_lock); 1311 /* 1312 * We note the failure, but return success, as the 1313 * system is still usable without this controller. 1314 */ 1315 e1000g_log(Adapter, CE_WARN, 1316 "e1000g_resume: failed to restart controller\n"); 1317 return (DDI_SUCCESS); 1318 } 1319 /* Enable and start the watchdog timer */ 1320 enable_watchdog_timer(Adapter); 1321 } 1322 1323 Adapter->e1000g_state &= ~E1000G_SUSPENDED; 1324 1325 rw_exit(&Adapter->chip_lock); 1326 1327 return (DDI_SUCCESS); 1328 } 1329 1330 static int 1331 e1000g_suspend(dev_info_t *devinfo) 1332 { 1333 struct e1000g *Adapter; 1334 1335 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1336 if (Adapter == NULL) 1337 return (DDI_FAILURE); 1338 1339 rw_enter(&Adapter->chip_lock, RW_WRITER); 1340 1341 Adapter->e1000g_state |= E1000G_SUSPENDED; 1342 1343 /* if the port isn't plumbed, we can simply return */ 1344 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 1345 rw_exit(&Adapter->chip_lock); 1346 return (DDI_SUCCESS); 1347 } 1348 1349 e1000g_stop(Adapter, B_FALSE); 1350 1351 rw_exit(&Adapter->chip_lock); 1352 1353 /* Disable and stop all the timers */ 1354 disable_watchdog_timer(Adapter); 1355 stop_link_timer(Adapter); 1356 stop_82547_timer(Adapter->tx_ring); 1357 1358 return (DDI_SUCCESS); 1359 } 1360 1361 static int 1362 e1000g_init(struct e1000g *Adapter) 1363 { 1364 uint32_t pba; 1365 uint32_t high_water; 1366 struct e1000_hw *hw; 1367 clock_t link_timeout; 1368 int result; 1369 1370 hw = &Adapter->shared; 1371 1372 /* 1373 * reset to put the hardware in a known state 1374 * before we try to do anything with the eeprom 1375 */ 1376 mutex_enter(&e1000g_nvm_lock); 1377 result = e1000_reset_hw(hw); 1378 mutex_exit(&e1000g_nvm_lock); 1379 1380 if (result != E1000_SUCCESS) { 1381 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1382 goto init_fail; 1383 } 1384 1385 mutex_enter(&e1000g_nvm_lock); 1386 result = e1000_validate_nvm_checksum(hw); 1387 if (result < E1000_SUCCESS) { 1388 /* 1389 * Some PCI-E parts fail the first check due to 1390 * the link being in sleep state. Call it again, 1391 * if it fails a second time its a real issue. 1392 */ 1393 result = e1000_validate_nvm_checksum(hw); 1394 } 1395 mutex_exit(&e1000g_nvm_lock); 1396 1397 if (result < E1000_SUCCESS) { 1398 e1000g_log(Adapter, CE_WARN, 1399 "Invalid NVM checksum. Please contact " 1400 "the vendor to update the NVM."); 1401 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1402 goto init_fail; 1403 } 1404 1405 result = 0; 1406 #ifdef __sparc 1407 /* 1408 * First, we try to get the local ethernet address from OBP. If 1409 * failed, then we get it from the EEPROM of NIC card. 1410 */ 1411 result = e1000g_find_mac_address(Adapter); 1412 #endif 1413 /* Get the local ethernet address. */ 1414 if (!result) { 1415 mutex_enter(&e1000g_nvm_lock); 1416 result = e1000_read_mac_addr(hw); 1417 mutex_exit(&e1000g_nvm_lock); 1418 } 1419 1420 if (result < E1000_SUCCESS) { 1421 e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); 1422 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1423 goto init_fail; 1424 } 1425 1426 /* check for valid mac address */ 1427 if (!is_valid_mac_addr(hw->mac.addr)) { 1428 e1000g_log(Adapter, CE_WARN, "Invalid mac addr"); 1429 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1430 goto init_fail; 1431 } 1432 1433 /* Set LAA state for 82571 chipset */ 1434 e1000_set_laa_state_82571(hw, B_TRUE); 1435 1436 /* Master Latency Timer implementation */ 1437 if (Adapter->master_latency_timer) { 1438 pci_config_put8(Adapter->osdep.cfg_handle, 1439 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer); 1440 } 1441 1442 if (hw->mac.type < e1000_82547) { 1443 /* 1444 * Total FIFO is 64K 1445 */ 1446 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1447 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1448 else 1449 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1450 } else if ((hw->mac.type == e1000_82571) || 1451 (hw->mac.type == e1000_82572) || 1452 (hw->mac.type == e1000_80003es2lan)) { 1453 /* 1454 * Total FIFO is 48K 1455 */ 1456 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1457 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */ 1458 else 1459 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */ 1460 } else if (hw->mac.type == e1000_82573) { 1461 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */ 1462 } else if (hw->mac.type == e1000_82574) { 1463 /* Keep adapter default: 20K for Rx, 20K for Tx */ 1464 pba = E1000_READ_REG(hw, E1000_PBA); 1465 } else if (hw->mac.type == e1000_ich8lan) { 1466 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */ 1467 } else if (hw->mac.type == e1000_ich9lan) { 1468 pba = E1000_PBA_10K; 1469 } else if (hw->mac.type == e1000_ich10lan) { 1470 pba = E1000_PBA_10K; 1471 } else if (hw->mac.type == e1000_pchlan) { 1472 pba = E1000_PBA_26K; 1473 } else if (hw->mac.type == e1000_pch2lan) { 1474 pba = E1000_PBA_26K; 1475 } else if (hw->mac.type == e1000_pch_lpt) { 1476 pba = E1000_PBA_26K; 1477 } else if (hw->mac.type == e1000_pch_spt) { 1478 pba = E1000_PBA_26K; 1479 } else { 1480 /* 1481 * Total FIFO is 40K 1482 */ 1483 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1484 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1485 else 1486 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1487 } 1488 E1000_WRITE_REG(hw, E1000_PBA, pba); 1489 1490 /* 1491 * These parameters set thresholds for the adapter's generation(Tx) 1492 * and response(Rx) to Ethernet PAUSE frames. These are just threshold 1493 * settings. Flow control is enabled or disabled in the configuration 1494 * file. 1495 * High-water mark is set down from the top of the rx fifo (not 1496 * sensitive to max_frame_size) and low-water is set just below 1497 * high-water mark. 1498 * The high water mark must be low enough to fit one full frame above 1499 * it in the rx FIFO. Should be the lower of: 1500 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early 1501 * receive size (assuming ERT set to E1000_ERT_2048), or the full 1502 * Rx FIFO size minus one full frame. 1503 */ 1504 high_water = min(((pba << 10) * 9 / 10), 1505 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 || 1506 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ? 1507 ((pba << 10) - (E1000_ERT_2048 << 3)) : 1508 ((pba << 10) - Adapter->max_frame_size))); 1509 1510 hw->fc.high_water = high_water & 0xFFF8; 1511 hw->fc.low_water = hw->fc.high_water - 8; 1512 1513 if (hw->mac.type == e1000_80003es2lan) 1514 hw->fc.pause_time = 0xFFFF; 1515 else 1516 hw->fc.pause_time = E1000_FC_PAUSE_TIME; 1517 hw->fc.send_xon = B_TRUE; 1518 1519 /* 1520 * Reset the adapter hardware the second time. 1521 */ 1522 mutex_enter(&e1000g_nvm_lock); 1523 result = e1000_reset_hw(hw); 1524 mutex_exit(&e1000g_nvm_lock); 1525 1526 if (result != E1000_SUCCESS) { 1527 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1528 goto init_fail; 1529 } 1530 1531 /* disable wakeup control by default */ 1532 if (hw->mac.type >= e1000_82544) 1533 E1000_WRITE_REG(hw, E1000_WUC, 0); 1534 1535 /* 1536 * MWI should be disabled on 82546. 1537 */ 1538 if (hw->mac.type == e1000_82546) 1539 e1000_pci_clear_mwi(hw); 1540 else 1541 e1000_pci_set_mwi(hw); 1542 1543 /* 1544 * Configure/Initialize hardware 1545 */ 1546 mutex_enter(&e1000g_nvm_lock); 1547 result = e1000_init_hw(hw); 1548 mutex_exit(&e1000g_nvm_lock); 1549 1550 if (result < E1000_SUCCESS) { 1551 e1000g_log(Adapter, CE_WARN, "Initialize hw failed"); 1552 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1553 goto init_fail; 1554 } 1555 1556 /* 1557 * Restore LED settings to the default from EEPROM 1558 * to meet the standard for Sun platforms. 1559 */ 1560 (void) e1000_cleanup_led(hw); 1561 1562 /* Disable Smart Power Down */ 1563 phy_spd_state(hw, B_FALSE); 1564 1565 /* Make sure driver has control */ 1566 e1000g_get_driver_control(hw); 1567 1568 /* 1569 * Initialize unicast addresses. 1570 */ 1571 e1000g_init_unicst(Adapter); 1572 1573 /* 1574 * Setup and initialize the mctable structures. After this routine 1575 * completes Multicast table will be set 1576 */ 1577 e1000_update_mc_addr_list(hw, 1578 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 1579 msec_delay(5); 1580 1581 /* 1582 * Implement Adaptive IFS 1583 */ 1584 e1000_reset_adaptive(hw); 1585 1586 /* Setup Interrupt Throttling Register */ 1587 if (hw->mac.type >= e1000_82540) { 1588 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate); 1589 } else 1590 Adapter->intr_adaptive = B_FALSE; 1591 1592 /* Start the timer for link setup */ 1593 if (hw->mac.autoneg) 1594 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000); 1595 else 1596 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); 1597 1598 mutex_enter(&Adapter->link_lock); 1599 if (hw->phy.autoneg_wait_to_complete) { 1600 Adapter->link_complete = B_TRUE; 1601 } else { 1602 Adapter->link_complete = B_FALSE; 1603 Adapter->link_tid = timeout(e1000g_link_timer, 1604 (void *)Adapter, link_timeout); 1605 } 1606 mutex_exit(&Adapter->link_lock); 1607 1608 /* Save the state of the phy */ 1609 e1000g_get_phy_state(Adapter); 1610 1611 e1000g_param_sync(Adapter); 1612 1613 Adapter->init_count++; 1614 1615 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 1616 goto init_fail; 1617 } 1618 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1619 goto init_fail; 1620 } 1621 1622 Adapter->poll_mode = e1000g_poll_mode; 1623 1624 return (DDI_SUCCESS); 1625 1626 init_fail: 1627 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1628 return (DDI_FAILURE); 1629 } 1630 1631 static int 1632 e1000g_alloc_rx_data(struct e1000g *Adapter) 1633 { 1634 e1000g_rx_ring_t *rx_ring; 1635 e1000g_rx_data_t *rx_data; 1636 1637 rx_ring = Adapter->rx_ring; 1638 1639 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP); 1640 1641 if (rx_data == NULL) 1642 return (DDI_FAILURE); 1643 1644 rx_data->priv_devi_node = Adapter->priv_devi_node; 1645 rx_data->rx_ring = rx_ring; 1646 1647 mutex_init(&rx_data->freelist_lock, NULL, 1648 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1649 mutex_init(&rx_data->recycle_lock, NULL, 1650 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1651 1652 rx_ring->rx_data = rx_data; 1653 1654 return (DDI_SUCCESS); 1655 } 1656 1657 void 1658 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data) 1659 { 1660 rx_sw_packet_t *packet, *next_packet; 1661 1662 if (rx_data == NULL) 1663 return; 1664 1665 packet = rx_data->packet_area; 1666 while (packet != NULL) { 1667 next_packet = packet->next; 1668 e1000g_free_rx_sw_packet(packet, B_TRUE); 1669 packet = next_packet; 1670 } 1671 rx_data->packet_area = NULL; 1672 } 1673 1674 void 1675 e1000g_free_rx_data(e1000g_rx_data_t *rx_data) 1676 { 1677 if (rx_data == NULL) 1678 return; 1679 1680 mutex_destroy(&rx_data->freelist_lock); 1681 mutex_destroy(&rx_data->recycle_lock); 1682 1683 kmem_free(rx_data, sizeof (e1000g_rx_data_t)); 1684 } 1685 1686 /* 1687 * Check if the link is up 1688 */ 1689 static boolean_t 1690 e1000g_link_up(struct e1000g *Adapter) 1691 { 1692 struct e1000_hw *hw = &Adapter->shared; 1693 boolean_t link_up = B_FALSE; 1694 1695 /* 1696 * get_link_status is set in the interrupt handler on link-status-change 1697 * or rx sequence error interrupt. get_link_status will stay 1698 * false until the e1000_check_for_link establishes link only 1699 * for copper adapters. 1700 */ 1701 switch (hw->phy.media_type) { 1702 case e1000_media_type_copper: 1703 if (hw->mac.get_link_status) { 1704 /* 1705 * SPT devices need a bit of extra time before we ask 1706 * them. 1707 */ 1708 if (hw->mac.type == e1000_pch_spt) 1709 msec_delay(50); 1710 (void) e1000_check_for_link(hw); 1711 if ((E1000_READ_REG(hw, E1000_STATUS) & 1712 E1000_STATUS_LU)) { 1713 link_up = B_TRUE; 1714 } else { 1715 link_up = !hw->mac.get_link_status; 1716 } 1717 } else { 1718 link_up = B_TRUE; 1719 } 1720 break; 1721 case e1000_media_type_fiber: 1722 (void) e1000_check_for_link(hw); 1723 link_up = (E1000_READ_REG(hw, E1000_STATUS) & 1724 E1000_STATUS_LU); 1725 break; 1726 case e1000_media_type_internal_serdes: 1727 (void) e1000_check_for_link(hw); 1728 link_up = hw->mac.serdes_has_link; 1729 break; 1730 } 1731 1732 return (link_up); 1733 } 1734 1735 static void 1736 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 1737 { 1738 struct iocblk *iocp; 1739 struct e1000g *e1000gp; 1740 enum ioc_reply status; 1741 1742 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; 1743 iocp->ioc_error = 0; 1744 e1000gp = (struct e1000g *)arg; 1745 1746 ASSERT(e1000gp); 1747 if (e1000gp == NULL) { 1748 miocnak(q, mp, 0, EINVAL); 1749 return; 1750 } 1751 1752 rw_enter(&e1000gp->chip_lock, RW_READER); 1753 if (e1000gp->e1000g_state & E1000G_SUSPENDED) { 1754 rw_exit(&e1000gp->chip_lock); 1755 miocnak(q, mp, 0, EINVAL); 1756 return; 1757 } 1758 rw_exit(&e1000gp->chip_lock); 1759 1760 switch (iocp->ioc_cmd) { 1761 1762 case LB_GET_INFO_SIZE: 1763 case LB_GET_INFO: 1764 case LB_GET_MODE: 1765 case LB_SET_MODE: 1766 status = e1000g_loopback_ioctl(e1000gp, iocp, mp); 1767 break; 1768 1769 1770 #ifdef E1000G_DEBUG 1771 case E1000G_IOC_REG_PEEK: 1772 case E1000G_IOC_REG_POKE: 1773 status = e1000g_pp_ioctl(e1000gp, iocp, mp); 1774 break; 1775 case E1000G_IOC_CHIP_RESET: 1776 e1000gp->reset_count++; 1777 if (e1000g_reset_adapter(e1000gp)) 1778 status = IOC_ACK; 1779 else 1780 status = IOC_INVAL; 1781 break; 1782 #endif 1783 default: 1784 status = IOC_INVAL; 1785 break; 1786 } 1787 1788 /* 1789 * Decide how to reply 1790 */ 1791 switch (status) { 1792 default: 1793 case IOC_INVAL: 1794 /* 1795 * Error, reply with a NAK and EINVAL or the specified error 1796 */ 1797 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 1798 EINVAL : iocp->ioc_error); 1799 break; 1800 1801 case IOC_DONE: 1802 /* 1803 * OK, reply already sent 1804 */ 1805 break; 1806 1807 case IOC_ACK: 1808 /* 1809 * OK, reply with an ACK 1810 */ 1811 miocack(q, mp, 0, 0); 1812 break; 1813 1814 case IOC_REPLY: 1815 /* 1816 * OK, send prepared reply as ACK or NAK 1817 */ 1818 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1819 M_IOCACK : M_IOCNAK; 1820 qreply(q, mp); 1821 break; 1822 } 1823 } 1824 1825 /* 1826 * The default value of e1000g_poll_mode == 0 assumes that the NIC is 1827 * capable of supporting only one interrupt and we shouldn't disable 1828 * the physical interrupt. In this case we let the interrupt come and 1829 * we queue the packets in the rx ring itself in case we are in polling 1830 * mode (better latency but slightly lower performance and a very 1831 * high intrrupt count in mpstat which is harmless). 1832 * 1833 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt 1834 * which can be disabled in poll mode. This gives better overall 1835 * throughput (compared to the mode above), shows very low interrupt 1836 * count but has slightly higher latency since we pick the packets when 1837 * the poll thread does polling. 1838 * 1839 * Currently, this flag should be enabled only while doing performance 1840 * measurement or when it can be guaranteed that entire NIC going 1841 * in poll mode will not harm any traffic like cluster heartbeat etc. 1842 */ 1843 int e1000g_poll_mode = 0; 1844 1845 /* 1846 * Called from the upper layers when driver is in polling mode to 1847 * pick up any queued packets. Care should be taken to not block 1848 * this thread. 1849 */ 1850 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup) 1851 { 1852 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg; 1853 mblk_t *mp = NULL; 1854 mblk_t *tail; 1855 struct e1000g *adapter; 1856 1857 adapter = rx_ring->adapter; 1858 1859 rw_enter(&adapter->chip_lock, RW_READER); 1860 1861 if (adapter->e1000g_state & E1000G_SUSPENDED) { 1862 rw_exit(&adapter->chip_lock); 1863 return (NULL); 1864 } 1865 1866 mutex_enter(&rx_ring->rx_lock); 1867 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup); 1868 mutex_exit(&rx_ring->rx_lock); 1869 rw_exit(&adapter->chip_lock); 1870 return (mp); 1871 } 1872 1873 static int 1874 e1000g_m_start(void *arg) 1875 { 1876 struct e1000g *Adapter = (struct e1000g *)arg; 1877 1878 rw_enter(&Adapter->chip_lock, RW_WRITER); 1879 1880 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 1881 rw_exit(&Adapter->chip_lock); 1882 return (ECANCELED); 1883 } 1884 1885 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 1886 rw_exit(&Adapter->chip_lock); 1887 return (ENOTACTIVE); 1888 } 1889 1890 Adapter->e1000g_state |= E1000G_STARTED; 1891 1892 rw_exit(&Adapter->chip_lock); 1893 1894 /* Enable and start the watchdog timer */ 1895 enable_watchdog_timer(Adapter); 1896 1897 return (0); 1898 } 1899 1900 static int 1901 e1000g_start(struct e1000g *Adapter, boolean_t global) 1902 { 1903 e1000g_rx_data_t *rx_data; 1904 1905 if (global) { 1906 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) { 1907 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed"); 1908 goto start_fail; 1909 } 1910 1911 /* Allocate dma resources for descriptors and buffers */ 1912 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) { 1913 e1000g_log(Adapter, CE_WARN, 1914 "Alloc DMA resources failed"); 1915 goto start_fail; 1916 } 1917 Adapter->rx_buffer_setup = B_FALSE; 1918 } 1919 1920 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) { 1921 if (e1000g_init(Adapter) != DDI_SUCCESS) { 1922 e1000g_log(Adapter, CE_WARN, 1923 "Adapter initialization failed"); 1924 goto start_fail; 1925 } 1926 } 1927 1928 /* Setup and initialize the transmit structures */ 1929 e1000g_tx_setup(Adapter); 1930 msec_delay(5); 1931 1932 /* Setup and initialize the receive structures */ 1933 e1000g_rx_setup(Adapter); 1934 msec_delay(5); 1935 1936 /* Restore the e1000g promiscuous mode */ 1937 e1000g_restore_promisc(Adapter); 1938 1939 e1000g_mask_interrupt(Adapter); 1940 1941 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 1942 1943 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1944 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1945 goto start_fail; 1946 } 1947 1948 return (DDI_SUCCESS); 1949 1950 start_fail: 1951 rx_data = Adapter->rx_ring->rx_data; 1952 1953 if (global) { 1954 e1000g_release_dma_resources(Adapter); 1955 e1000g_free_rx_pending_buffers(rx_data); 1956 e1000g_free_rx_data(rx_data); 1957 } 1958 1959 mutex_enter(&e1000g_nvm_lock); 1960 (void) e1000_reset_hw(&Adapter->shared); 1961 mutex_exit(&e1000g_nvm_lock); 1962 1963 return (DDI_FAILURE); 1964 } 1965 1966 /* 1967 * The I219 has the curious property that if the descriptor rings are not 1968 * emptied before resetting the hardware or before changing the device state 1969 * based on runtime power management, it'll cause the card to hang. This can 1970 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we 1971 * have to flush the rings if we're in this state. 1972 */ 1973 static void 1974 e1000g_flush_desc_rings(struct e1000g *Adapter) 1975 { 1976 struct e1000_hw *hw = &Adapter->shared; 1977 u16 hang_state; 1978 u32 fext_nvm11, tdlen; 1979 1980 /* First, disable MULR fix in FEXTNVM11 */ 1981 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11); 1982 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; 1983 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11); 1984 1985 /* do nothing if we're not in faulty state, or if the queue is empty */ 1986 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0)); 1987 hang_state = pci_config_get16(Adapter->osdep.cfg_handle, 1988 PCICFG_DESC_RING_STATUS); 1989 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) 1990 return; 1991 e1000g_flush_tx_ring(Adapter); 1992 1993 /* recheck, maybe the fault is caused by the rx ring */ 1994 hang_state = pci_config_get16(Adapter->osdep.cfg_handle, 1995 PCICFG_DESC_RING_STATUS); 1996 if (hang_state & FLUSH_DESC_REQUIRED) 1997 e1000g_flush_rx_ring(Adapter); 1998 1999 } 2000 2001 static void 2002 e1000g_m_stop(void *arg) 2003 { 2004 struct e1000g *Adapter = (struct e1000g *)arg; 2005 2006 /* Drain tx sessions */ 2007 (void) e1000g_tx_drain(Adapter); 2008 2009 rw_enter(&Adapter->chip_lock, RW_WRITER); 2010 2011 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2012 rw_exit(&Adapter->chip_lock); 2013 return; 2014 } 2015 Adapter->e1000g_state &= ~E1000G_STARTED; 2016 e1000g_stop(Adapter, B_TRUE); 2017 2018 rw_exit(&Adapter->chip_lock); 2019 2020 /* Disable and stop all the timers */ 2021 disable_watchdog_timer(Adapter); 2022 stop_link_timer(Adapter); 2023 stop_82547_timer(Adapter->tx_ring); 2024 } 2025 2026 static void 2027 e1000g_stop(struct e1000g *Adapter, boolean_t global) 2028 { 2029 private_devi_list_t *devi_node; 2030 e1000g_rx_data_t *rx_data; 2031 int result; 2032 2033 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT; 2034 2035 /* Stop the chip and release pending resources */ 2036 2037 /* Tell firmware driver is no longer in control */ 2038 e1000g_release_driver_control(&Adapter->shared); 2039 2040 e1000g_clear_all_interrupts(Adapter); 2041 2042 mutex_enter(&e1000g_nvm_lock); 2043 result = e1000_reset_hw(&Adapter->shared); 2044 mutex_exit(&e1000g_nvm_lock); 2045 2046 if (result != E1000_SUCCESS) { 2047 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 2048 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 2049 } 2050 2051 mutex_enter(&Adapter->link_lock); 2052 Adapter->link_complete = B_FALSE; 2053 mutex_exit(&Adapter->link_lock); 2054 2055 /* Release resources still held by the TX descriptors */ 2056 e1000g_tx_clean(Adapter); 2057 2058 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2059 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 2060 2061 /* Clean the pending rx jumbo packet fragment */ 2062 e1000g_rx_clean(Adapter); 2063 2064 /* 2065 * The I219, eg. the pch_spt, has bugs such that we must ensure that 2066 * rings are flushed before we do anything else. This must be done 2067 * before we release DMA resources. 2068 */ 2069 if (Adapter->shared.mac.type == e1000_pch_spt) 2070 e1000g_flush_desc_rings(Adapter); 2071 2072 if (global) { 2073 e1000g_release_dma_resources(Adapter); 2074 2075 mutex_enter(&e1000g_rx_detach_lock); 2076 rx_data = Adapter->rx_ring->rx_data; 2077 rx_data->flag |= E1000G_RX_STOPPED; 2078 2079 if (rx_data->pending_count == 0) { 2080 e1000g_free_rx_pending_buffers(rx_data); 2081 e1000g_free_rx_data(rx_data); 2082 } else { 2083 devi_node = rx_data->priv_devi_node; 2084 if (devi_node != NULL) 2085 atomic_inc_32(&devi_node->pending_rx_count); 2086 else 2087 atomic_inc_32(&Adapter->pending_rx_count); 2088 } 2089 mutex_exit(&e1000g_rx_detach_lock); 2090 } 2091 2092 if (Adapter->link_state != LINK_STATE_UNKNOWN) { 2093 Adapter->link_state = LINK_STATE_UNKNOWN; 2094 if (!Adapter->reset_flag) 2095 mac_link_update(Adapter->mh, Adapter->link_state); 2096 } 2097 } 2098 2099 static void 2100 e1000g_rx_clean(struct e1000g *Adapter) 2101 { 2102 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data; 2103 2104 if (rx_data == NULL) 2105 return; 2106 2107 if (rx_data->rx_mblk != NULL) { 2108 freemsg(rx_data->rx_mblk); 2109 rx_data->rx_mblk = NULL; 2110 rx_data->rx_mblk_tail = NULL; 2111 rx_data->rx_mblk_len = 0; 2112 } 2113 } 2114 2115 static void 2116 e1000g_tx_clean(struct e1000g *Adapter) 2117 { 2118 e1000g_tx_ring_t *tx_ring; 2119 p_tx_sw_packet_t packet; 2120 mblk_t *mp; 2121 mblk_t *nmp; 2122 uint32_t packet_count; 2123 2124 tx_ring = Adapter->tx_ring; 2125 2126 /* 2127 * Here we don't need to protect the lists using 2128 * the usedlist_lock and freelist_lock, for they 2129 * have been protected by the chip_lock. 2130 */ 2131 mp = NULL; 2132 nmp = NULL; 2133 packet_count = 0; 2134 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list); 2135 while (packet != NULL) { 2136 if (packet->mp != NULL) { 2137 /* Assemble the message chain */ 2138 if (mp == NULL) { 2139 mp = packet->mp; 2140 nmp = packet->mp; 2141 } else { 2142 nmp->b_next = packet->mp; 2143 nmp = packet->mp; 2144 } 2145 /* Disconnect the message from the sw packet */ 2146 packet->mp = NULL; 2147 } 2148 2149 e1000g_free_tx_swpkt(packet); 2150 packet_count++; 2151 2152 packet = (p_tx_sw_packet_t) 2153 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link); 2154 } 2155 2156 if (mp != NULL) 2157 freemsgchain(mp); 2158 2159 if (packet_count > 0) { 2160 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list); 2161 QUEUE_INIT_LIST(&tx_ring->used_list); 2162 2163 /* Setup TX descriptor pointers */ 2164 tx_ring->tbd_next = tx_ring->tbd_first; 2165 tx_ring->tbd_oldest = tx_ring->tbd_first; 2166 2167 /* Setup our HW Tx Head & Tail descriptor pointers */ 2168 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 2169 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 2170 } 2171 } 2172 2173 static boolean_t 2174 e1000g_tx_drain(struct e1000g *Adapter) 2175 { 2176 int i; 2177 boolean_t done; 2178 e1000g_tx_ring_t *tx_ring; 2179 2180 tx_ring = Adapter->tx_ring; 2181 2182 /* Allow up to 'wsdraintime' for pending xmit's to complete. */ 2183 for (i = 0; i < TX_DRAIN_TIME; i++) { 2184 mutex_enter(&tx_ring->usedlist_lock); 2185 done = IS_QUEUE_EMPTY(&tx_ring->used_list); 2186 mutex_exit(&tx_ring->usedlist_lock); 2187 2188 if (done) 2189 break; 2190 2191 msec_delay(1); 2192 } 2193 2194 return (done); 2195 } 2196 2197 static boolean_t 2198 e1000g_rx_drain(struct e1000g *Adapter) 2199 { 2200 int i; 2201 boolean_t done; 2202 2203 /* 2204 * Allow up to RX_DRAIN_TIME for pending received packets to complete. 2205 */ 2206 for (i = 0; i < RX_DRAIN_TIME; i++) { 2207 done = (Adapter->pending_rx_count == 0); 2208 2209 if (done) 2210 break; 2211 2212 msec_delay(1); 2213 } 2214 2215 return (done); 2216 } 2217 2218 static boolean_t 2219 e1000g_reset_adapter(struct e1000g *Adapter) 2220 { 2221 /* Disable and stop all the timers */ 2222 disable_watchdog_timer(Adapter); 2223 stop_link_timer(Adapter); 2224 stop_82547_timer(Adapter->tx_ring); 2225 2226 rw_enter(&Adapter->chip_lock, RW_WRITER); 2227 2228 if (Adapter->stall_flag) { 2229 Adapter->stall_flag = B_FALSE; 2230 Adapter->reset_flag = B_TRUE; 2231 } 2232 2233 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2234 rw_exit(&Adapter->chip_lock); 2235 return (B_TRUE); 2236 } 2237 2238 e1000g_stop(Adapter, B_FALSE); 2239 2240 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 2241 rw_exit(&Adapter->chip_lock); 2242 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2243 return (B_FALSE); 2244 } 2245 2246 rw_exit(&Adapter->chip_lock); 2247 2248 /* Enable and start the watchdog timer */ 2249 enable_watchdog_timer(Adapter); 2250 2251 return (B_TRUE); 2252 } 2253 2254 boolean_t 2255 e1000g_global_reset(struct e1000g *Adapter) 2256 { 2257 /* Disable and stop all the timers */ 2258 disable_watchdog_timer(Adapter); 2259 stop_link_timer(Adapter); 2260 stop_82547_timer(Adapter->tx_ring); 2261 2262 rw_enter(&Adapter->chip_lock, RW_WRITER); 2263 2264 e1000g_stop(Adapter, B_TRUE); 2265 2266 Adapter->init_count = 0; 2267 2268 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 2269 rw_exit(&Adapter->chip_lock); 2270 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2271 return (B_FALSE); 2272 } 2273 2274 rw_exit(&Adapter->chip_lock); 2275 2276 /* Enable and start the watchdog timer */ 2277 enable_watchdog_timer(Adapter); 2278 2279 return (B_TRUE); 2280 } 2281 2282 /* 2283 * e1000g_intr_pciexpress - ISR for PCI Express chipsets 2284 * 2285 * This interrupt service routine is for PCI-Express adapters. 2286 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED 2287 * bit is set. 2288 */ 2289 static uint_t 2290 e1000g_intr_pciexpress(caddr_t arg) 2291 { 2292 struct e1000g *Adapter; 2293 uint32_t icr; 2294 2295 Adapter = (struct e1000g *)(uintptr_t)arg; 2296 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2297 2298 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2299 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2300 return (DDI_INTR_CLAIMED); 2301 } 2302 2303 if (icr & E1000_ICR_INT_ASSERTED) { 2304 /* 2305 * E1000_ICR_INT_ASSERTED bit was set: 2306 * Read(Clear) the ICR, claim this interrupt, 2307 * look for work to do. 2308 */ 2309 e1000g_intr_work(Adapter, icr); 2310 return (DDI_INTR_CLAIMED); 2311 } else { 2312 /* 2313 * E1000_ICR_INT_ASSERTED bit was not set: 2314 * Don't claim this interrupt, return immediately. 2315 */ 2316 return (DDI_INTR_UNCLAIMED); 2317 } 2318 } 2319 2320 /* 2321 * e1000g_intr - ISR for PCI/PCI-X chipsets 2322 * 2323 * This interrupt service routine is for PCI/PCI-X adapters. 2324 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED 2325 * bit is set or not. 2326 */ 2327 static uint_t 2328 e1000g_intr(caddr_t arg) 2329 { 2330 struct e1000g *Adapter; 2331 uint32_t icr; 2332 2333 Adapter = (struct e1000g *)(uintptr_t)arg; 2334 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2335 2336 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2337 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2338 return (DDI_INTR_CLAIMED); 2339 } 2340 2341 if (icr) { 2342 /* 2343 * Any bit was set in ICR: 2344 * Read(Clear) the ICR, claim this interrupt, 2345 * look for work to do. 2346 */ 2347 e1000g_intr_work(Adapter, icr); 2348 return (DDI_INTR_CLAIMED); 2349 } else { 2350 /* 2351 * No bit was set in ICR: 2352 * Don't claim this interrupt, return immediately. 2353 */ 2354 return (DDI_INTR_UNCLAIMED); 2355 } 2356 } 2357 2358 /* 2359 * e1000g_intr_work - actual processing of ISR 2360 * 2361 * Read(clear) the ICR contents and call appropriate interrupt 2362 * processing routines. 2363 */ 2364 static void 2365 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr) 2366 { 2367 struct e1000_hw *hw; 2368 hw = &Adapter->shared; 2369 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 2370 2371 Adapter->rx_pkt_cnt = 0; 2372 Adapter->tx_pkt_cnt = 0; 2373 2374 rw_enter(&Adapter->chip_lock, RW_READER); 2375 2376 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2377 rw_exit(&Adapter->chip_lock); 2378 return; 2379 } 2380 /* 2381 * Here we need to check the "e1000g_state" flag within the chip_lock to 2382 * ensure the receive routine will not execute when the adapter is 2383 * being reset. 2384 */ 2385 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2386 rw_exit(&Adapter->chip_lock); 2387 return; 2388 } 2389 2390 if (icr & E1000_ICR_RXT0) { 2391 mblk_t *mp = NULL; 2392 mblk_t *tail = NULL; 2393 e1000g_rx_ring_t *rx_ring; 2394 2395 rx_ring = Adapter->rx_ring; 2396 mutex_enter(&rx_ring->rx_lock); 2397 /* 2398 * Sometimes with legacy interrupts, it possible that 2399 * there is a single interrupt for Rx/Tx. In which 2400 * case, if poll flag is set, we shouldn't really 2401 * be doing Rx processing. 2402 */ 2403 if (!rx_ring->poll_flag) 2404 mp = e1000g_receive(rx_ring, &tail, 2405 E1000G_CHAIN_NO_LIMIT); 2406 mutex_exit(&rx_ring->rx_lock); 2407 rw_exit(&Adapter->chip_lock); 2408 if (mp != NULL) 2409 mac_rx_ring(Adapter->mh, rx_ring->mrh, 2410 mp, rx_ring->ring_gen_num); 2411 } else 2412 rw_exit(&Adapter->chip_lock); 2413 2414 if (icr & E1000_ICR_TXDW) { 2415 if (!Adapter->tx_intr_enable) 2416 e1000g_clear_tx_interrupt(Adapter); 2417 2418 /* Recycle the tx descriptors */ 2419 rw_enter(&Adapter->chip_lock, RW_READER); 2420 (void) e1000g_recycle(tx_ring); 2421 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr); 2422 rw_exit(&Adapter->chip_lock); 2423 2424 if (tx_ring->resched_needed && 2425 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) { 2426 tx_ring->resched_needed = B_FALSE; 2427 mac_tx_update(Adapter->mh); 2428 E1000G_STAT(tx_ring->stat_reschedule); 2429 } 2430 } 2431 2432 /* 2433 * The Receive Sequence errors RXSEQ and the link status change LSC 2434 * are checked to detect that the cable has been pulled out. For 2435 * the Wiseman 2.0 silicon, the receive sequence errors interrupt 2436 * are an indication that cable is not connected. 2437 */ 2438 if ((icr & E1000_ICR_RXSEQ) || 2439 (icr & E1000_ICR_LSC) || 2440 (icr & E1000_ICR_GPI_EN1)) { 2441 boolean_t link_changed; 2442 timeout_id_t tid = 0; 2443 2444 stop_watchdog_timer(Adapter); 2445 2446 rw_enter(&Adapter->chip_lock, RW_WRITER); 2447 2448 /* 2449 * Because we got a link-status-change interrupt, force 2450 * e1000_check_for_link() to look at phy 2451 */ 2452 Adapter->shared.mac.get_link_status = B_TRUE; 2453 2454 /* e1000g_link_check takes care of link status change */ 2455 link_changed = e1000g_link_check(Adapter); 2456 2457 /* Get new phy state */ 2458 e1000g_get_phy_state(Adapter); 2459 2460 /* 2461 * If the link timer has not timed out, we'll not notify 2462 * the upper layer with any link state until the link is up. 2463 */ 2464 if (link_changed && !Adapter->link_complete) { 2465 if (Adapter->link_state == LINK_STATE_UP) { 2466 mutex_enter(&Adapter->link_lock); 2467 Adapter->link_complete = B_TRUE; 2468 tid = Adapter->link_tid; 2469 Adapter->link_tid = 0; 2470 mutex_exit(&Adapter->link_lock); 2471 } else { 2472 link_changed = B_FALSE; 2473 } 2474 } 2475 rw_exit(&Adapter->chip_lock); 2476 2477 if (link_changed) { 2478 if (tid != 0) 2479 (void) untimeout(tid); 2480 2481 /* 2482 * Workaround for esb2. Data stuck in fifo on a link 2483 * down event. Stop receiver here and reset in watchdog. 2484 */ 2485 if ((Adapter->link_state == LINK_STATE_DOWN) && 2486 (Adapter->shared.mac.type == e1000_80003es2lan)) { 2487 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 2488 E1000_WRITE_REG(hw, E1000_RCTL, 2489 rctl & ~E1000_RCTL_EN); 2490 e1000g_log(Adapter, CE_WARN, 2491 "ESB2 receiver disabled"); 2492 Adapter->esb2_workaround = B_TRUE; 2493 } 2494 if (!Adapter->reset_flag) 2495 mac_link_update(Adapter->mh, 2496 Adapter->link_state); 2497 if (Adapter->link_state == LINK_STATE_UP) 2498 Adapter->reset_flag = B_FALSE; 2499 } 2500 2501 start_watchdog_timer(Adapter); 2502 } 2503 } 2504 2505 static void 2506 e1000g_init_unicst(struct e1000g *Adapter) 2507 { 2508 struct e1000_hw *hw; 2509 int slot; 2510 2511 hw = &Adapter->shared; 2512 2513 if (Adapter->init_count == 0) { 2514 /* Initialize the multiple unicast addresses */ 2515 Adapter->unicst_total = min(hw->mac.rar_entry_count, 2516 MAX_NUM_UNICAST_ADDRESSES); 2517 2518 /* 2519 * The common code does not correctly calculate the number of 2520 * rar's that could be reserved by firmware for the pch_lpt and 2521 * pch_spt macs. The interface has one primary rar, and 11 2522 * additional ones. Those 11 additional ones are not always 2523 * available. According to the datasheet, we need to check a 2524 * few of the bits set in the FWSM register. If the value is 2525 * zero, everything is available. If the value is 1, none of the 2526 * additional registers are available. If the value is 2-7, only 2527 * that number are available. 2528 */ 2529 if (hw->mac.type == e1000_pch_lpt || 2530 hw->mac.type == e1000_pch_spt) { 2531 uint32_t locked, rar; 2532 2533 locked = E1000_READ_REG(hw, E1000_FWSM) & 2534 E1000_FWSM_WLOCK_MAC_MASK; 2535 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT; 2536 rar = 1; 2537 if (locked == 0) 2538 rar += 11; 2539 else if (locked == 1) 2540 rar += 0; 2541 else 2542 rar += locked; 2543 Adapter->unicst_total = min(rar, 2544 MAX_NUM_UNICAST_ADDRESSES); 2545 } 2546 2547 /* Workaround for an erratum of 82571 chipst */ 2548 if ((hw->mac.type == e1000_82571) && 2549 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2550 Adapter->unicst_total--; 2551 2552 /* VMware doesn't support multiple mac addresses properly */ 2553 if (hw->subsystem_vendor_id == 0x15ad) 2554 Adapter->unicst_total = 1; 2555 2556 Adapter->unicst_avail = Adapter->unicst_total; 2557 2558 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2559 /* Clear both the flag and MAC address */ 2560 Adapter->unicst_addr[slot].reg.high = 0; 2561 Adapter->unicst_addr[slot].reg.low = 0; 2562 } 2563 } else { 2564 /* Workaround for an erratum of 82571 chipst */ 2565 if ((hw->mac.type == e1000_82571) && 2566 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2567 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); 2568 2569 /* Re-configure the RAR registers */ 2570 for (slot = 0; slot < Adapter->unicst_total; slot++) 2571 if (Adapter->unicst_addr[slot].mac.set == 1) 2572 (void) e1000_rar_set(hw, 2573 Adapter->unicst_addr[slot].mac.addr, slot); 2574 } 2575 2576 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2577 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2578 } 2579 2580 static int 2581 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, 2582 int slot) 2583 { 2584 struct e1000_hw *hw; 2585 2586 hw = &Adapter->shared; 2587 2588 /* 2589 * The first revision of Wiseman silicon (rev 2.0) has an errata 2590 * that requires the receiver to be in reset when any of the 2591 * receive address registers (RAR regs) are accessed. The first 2592 * rev of Wiseman silicon also requires MWI to be disabled when 2593 * a global reset or a receive reset is issued. So before we 2594 * initialize the RARs, we check the rev of the Wiseman controller 2595 * and work around any necessary HW errata. 2596 */ 2597 if ((hw->mac.type == e1000_82542) && 2598 (hw->revision_id == E1000_REVISION_2)) { 2599 e1000_pci_clear_mwi(hw); 2600 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); 2601 msec_delay(5); 2602 } 2603 if (mac_addr == NULL) { 2604 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0); 2605 E1000_WRITE_FLUSH(hw); 2606 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0); 2607 E1000_WRITE_FLUSH(hw); 2608 /* Clear both the flag and MAC address */ 2609 Adapter->unicst_addr[slot].reg.high = 0; 2610 Adapter->unicst_addr[slot].reg.low = 0; 2611 } else { 2612 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, 2613 ETHERADDRL); 2614 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot); 2615 Adapter->unicst_addr[slot].mac.set = 1; 2616 } 2617 2618 /* Workaround for an erratum of 82571 chipst */ 2619 if (slot == 0) { 2620 if ((hw->mac.type == e1000_82571) && 2621 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2622 if (mac_addr == NULL) { 2623 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2624 slot << 1, 0); 2625 E1000_WRITE_FLUSH(hw); 2626 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2627 (slot << 1) + 1, 0); 2628 E1000_WRITE_FLUSH(hw); 2629 } else { 2630 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, 2631 LAST_RAR_ENTRY); 2632 } 2633 } 2634 2635 /* 2636 * If we are using Wiseman rev 2.0 silicon, we will have previously 2637 * put the receive in reset, and disabled MWI, to work around some 2638 * HW errata. Now we should take the receiver out of reset, and 2639 * re-enabled if MWI if it was previously enabled by the PCI BIOS. 2640 */ 2641 if ((hw->mac.type == e1000_82542) && 2642 (hw->revision_id == E1000_REVISION_2)) { 2643 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2644 msec_delay(1); 2645 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2646 e1000_pci_set_mwi(hw); 2647 e1000g_rx_setup(Adapter); 2648 } 2649 2650 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2651 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2652 return (EIO); 2653 } 2654 2655 return (0); 2656 } 2657 2658 static int 2659 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr) 2660 { 2661 struct e1000_hw *hw = &Adapter->shared; 2662 struct ether_addr *newtable; 2663 size_t new_len; 2664 size_t old_len; 2665 int res = 0; 2666 2667 if ((multiaddr[0] & 01) == 0) { 2668 res = EINVAL; 2669 e1000g_log(Adapter, CE_WARN, "Illegal multicast address"); 2670 goto done; 2671 } 2672 2673 if (Adapter->mcast_count >= Adapter->mcast_max_num) { 2674 res = ENOENT; 2675 e1000g_log(Adapter, CE_WARN, 2676 "Adapter requested more than %d mcast addresses", 2677 Adapter->mcast_max_num); 2678 goto done; 2679 } 2680 2681 2682 if (Adapter->mcast_count == Adapter->mcast_alloc_count) { 2683 old_len = Adapter->mcast_alloc_count * 2684 sizeof (struct ether_addr); 2685 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) * 2686 sizeof (struct ether_addr); 2687 2688 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2689 if (newtable == NULL) { 2690 res = ENOMEM; 2691 e1000g_log(Adapter, CE_WARN, 2692 "Not enough memory to alloc mcast table"); 2693 goto done; 2694 } 2695 2696 if (Adapter->mcast_table != NULL) { 2697 bcopy(Adapter->mcast_table, newtable, old_len); 2698 kmem_free(Adapter->mcast_table, old_len); 2699 } 2700 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE; 2701 Adapter->mcast_table = newtable; 2702 } 2703 2704 bcopy(multiaddr, 2705 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL); 2706 Adapter->mcast_count++; 2707 2708 /* 2709 * Update the MC table in the hardware 2710 */ 2711 e1000g_clear_interrupt(Adapter); 2712 2713 e1000_update_mc_addr_list(hw, 2714 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2715 2716 e1000g_mask_interrupt(Adapter); 2717 2718 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2719 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2720 res = EIO; 2721 } 2722 2723 done: 2724 return (res); 2725 } 2726 2727 static int 2728 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr) 2729 { 2730 struct e1000_hw *hw = &Adapter->shared; 2731 struct ether_addr *newtable; 2732 size_t new_len; 2733 size_t old_len; 2734 unsigned i; 2735 2736 for (i = 0; i < Adapter->mcast_count; i++) { 2737 if (bcmp(multiaddr, &Adapter->mcast_table[i], 2738 ETHERADDRL) == 0) { 2739 for (i++; i < Adapter->mcast_count; i++) { 2740 Adapter->mcast_table[i - 1] = 2741 Adapter->mcast_table[i]; 2742 } 2743 Adapter->mcast_count--; 2744 break; 2745 } 2746 } 2747 2748 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) > 2749 MCAST_ALLOC_SIZE) { 2750 old_len = Adapter->mcast_alloc_count * 2751 sizeof (struct ether_addr); 2752 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) * 2753 sizeof (struct ether_addr); 2754 2755 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2756 if (newtable != NULL) { 2757 bcopy(Adapter->mcast_table, newtable, new_len); 2758 kmem_free(Adapter->mcast_table, old_len); 2759 2760 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE; 2761 Adapter->mcast_table = newtable; 2762 } 2763 } 2764 2765 /* 2766 * Update the MC table in the hardware 2767 */ 2768 e1000g_clear_interrupt(Adapter); 2769 2770 e1000_update_mc_addr_list(hw, 2771 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2772 2773 e1000g_mask_interrupt(Adapter); 2774 2775 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2776 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2777 return (EIO); 2778 } 2779 2780 return (0); 2781 } 2782 2783 static void 2784 e1000g_release_multicast(struct e1000g *Adapter) 2785 { 2786 if (Adapter->mcast_table != NULL) { 2787 kmem_free(Adapter->mcast_table, 2788 Adapter->mcast_alloc_count * sizeof (struct ether_addr)); 2789 Adapter->mcast_table = NULL; 2790 } 2791 } 2792 2793 int 2794 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 2795 { 2796 struct e1000g *Adapter = (struct e1000g *)arg; 2797 int result; 2798 2799 rw_enter(&Adapter->chip_lock, RW_WRITER); 2800 2801 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2802 result = ECANCELED; 2803 goto done; 2804 } 2805 2806 result = (add) ? multicst_add(Adapter, addr) 2807 : multicst_remove(Adapter, addr); 2808 2809 done: 2810 rw_exit(&Adapter->chip_lock); 2811 return (result); 2812 2813 } 2814 2815 int 2816 e1000g_m_promisc(void *arg, boolean_t on) 2817 { 2818 struct e1000g *Adapter = (struct e1000g *)arg; 2819 uint32_t rctl; 2820 2821 rw_enter(&Adapter->chip_lock, RW_WRITER); 2822 2823 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2824 rw_exit(&Adapter->chip_lock); 2825 return (ECANCELED); 2826 } 2827 2828 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 2829 2830 if (on) 2831 rctl |= 2832 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 2833 else 2834 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 2835 2836 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 2837 2838 Adapter->e1000g_promisc = on; 2839 2840 rw_exit(&Adapter->chip_lock); 2841 2842 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2843 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2844 return (EIO); 2845 } 2846 2847 return (0); 2848 } 2849 2850 /* 2851 * Entry points to enable and disable interrupts at the granularity of 2852 * a group. 2853 * Turns the poll_mode for the whole adapter on and off to enable or 2854 * override the ring level polling control over the hardware interrupts. 2855 */ 2856 static int 2857 e1000g_rx_group_intr_enable(mac_intr_handle_t arg) 2858 { 2859 struct e1000g *adapter = (struct e1000g *)arg; 2860 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2861 2862 /* 2863 * Later interrupts at the granularity of the this ring will 2864 * invoke mac_rx() with NULL, indicating the need for another 2865 * software classification. 2866 * We have a single ring usable per adapter now, so we only need to 2867 * reset the rx handle for that one. 2868 * When more RX rings can be used, we should update each one of them. 2869 */ 2870 mutex_enter(&rx_ring->rx_lock); 2871 rx_ring->mrh = NULL; 2872 adapter->poll_mode = B_FALSE; 2873 mutex_exit(&rx_ring->rx_lock); 2874 return (0); 2875 } 2876 2877 static int 2878 e1000g_rx_group_intr_disable(mac_intr_handle_t arg) 2879 { 2880 struct e1000g *adapter = (struct e1000g *)arg; 2881 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2882 2883 mutex_enter(&rx_ring->rx_lock); 2884 2885 /* 2886 * Later interrupts at the granularity of the this ring will 2887 * invoke mac_rx() with the handle for this ring; 2888 */ 2889 adapter->poll_mode = B_TRUE; 2890 rx_ring->mrh = rx_ring->mrh_init; 2891 mutex_exit(&rx_ring->rx_lock); 2892 return (0); 2893 } 2894 2895 /* 2896 * Entry points to enable and disable interrupts at the granularity of 2897 * a ring. 2898 * adapter poll_mode controls whether we actually proceed with hardware 2899 * interrupt toggling. 2900 */ 2901 static int 2902 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh) 2903 { 2904 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2905 struct e1000g *adapter = rx_ring->adapter; 2906 struct e1000_hw *hw = &adapter->shared; 2907 uint32_t intr_mask; 2908 2909 rw_enter(&adapter->chip_lock, RW_READER); 2910 2911 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2912 rw_exit(&adapter->chip_lock); 2913 return (0); 2914 } 2915 2916 mutex_enter(&rx_ring->rx_lock); 2917 rx_ring->poll_flag = 0; 2918 mutex_exit(&rx_ring->rx_lock); 2919 2920 /* Rx interrupt enabling for MSI and legacy */ 2921 intr_mask = E1000_READ_REG(hw, E1000_IMS); 2922 intr_mask |= E1000_IMS_RXT0; 2923 E1000_WRITE_REG(hw, E1000_IMS, intr_mask); 2924 E1000_WRITE_FLUSH(hw); 2925 2926 /* Trigger a Rx interrupt to check Rx ring */ 2927 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 2928 E1000_WRITE_FLUSH(hw); 2929 2930 rw_exit(&adapter->chip_lock); 2931 return (0); 2932 } 2933 2934 static int 2935 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh) 2936 { 2937 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2938 struct e1000g *adapter = rx_ring->adapter; 2939 struct e1000_hw *hw = &adapter->shared; 2940 2941 rw_enter(&adapter->chip_lock, RW_READER); 2942 2943 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2944 rw_exit(&adapter->chip_lock); 2945 return (0); 2946 } 2947 mutex_enter(&rx_ring->rx_lock); 2948 rx_ring->poll_flag = 1; 2949 mutex_exit(&rx_ring->rx_lock); 2950 2951 /* Rx interrupt disabling for MSI and legacy */ 2952 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 2953 E1000_WRITE_FLUSH(hw); 2954 2955 rw_exit(&adapter->chip_lock); 2956 return (0); 2957 } 2958 2959 /* 2960 * e1000g_unicst_find - Find the slot for the specified unicast address 2961 */ 2962 static int 2963 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr) 2964 { 2965 int slot; 2966 2967 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2968 if ((Adapter->unicst_addr[slot].mac.set == 1) && 2969 (bcmp(Adapter->unicst_addr[slot].mac.addr, 2970 mac_addr, ETHERADDRL) == 0)) 2971 return (slot); 2972 } 2973 2974 return (-1); 2975 } 2976 2977 /* 2978 * Entry points to add and remove a MAC address to a ring group. 2979 * The caller takes care of adding and removing the MAC addresses 2980 * to the filter via these two routines. 2981 */ 2982 2983 static int 2984 e1000g_addmac(void *arg, const uint8_t *mac_addr) 2985 { 2986 struct e1000g *Adapter = (struct e1000g *)arg; 2987 int slot, err; 2988 2989 rw_enter(&Adapter->chip_lock, RW_WRITER); 2990 2991 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2992 rw_exit(&Adapter->chip_lock); 2993 return (ECANCELED); 2994 } 2995 2996 if (e1000g_unicst_find(Adapter, mac_addr) != -1) { 2997 /* The same address is already in slot */ 2998 rw_exit(&Adapter->chip_lock); 2999 return (0); 3000 } 3001 3002 if (Adapter->unicst_avail == 0) { 3003 /* no slots available */ 3004 rw_exit(&Adapter->chip_lock); 3005 return (ENOSPC); 3006 } 3007 3008 /* Search for a free slot */ 3009 for (slot = 0; slot < Adapter->unicst_total; slot++) { 3010 if (Adapter->unicst_addr[slot].mac.set == 0) 3011 break; 3012 } 3013 ASSERT(slot < Adapter->unicst_total); 3014 3015 err = e1000g_unicst_set(Adapter, mac_addr, slot); 3016 if (err == 0) 3017 Adapter->unicst_avail--; 3018 3019 rw_exit(&Adapter->chip_lock); 3020 3021 return (err); 3022 } 3023 3024 static int 3025 e1000g_remmac(void *arg, const uint8_t *mac_addr) 3026 { 3027 struct e1000g *Adapter = (struct e1000g *)arg; 3028 int slot, err; 3029 3030 rw_enter(&Adapter->chip_lock, RW_WRITER); 3031 3032 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3033 rw_exit(&Adapter->chip_lock); 3034 return (ECANCELED); 3035 } 3036 3037 slot = e1000g_unicst_find(Adapter, mac_addr); 3038 if (slot == -1) { 3039 rw_exit(&Adapter->chip_lock); 3040 return (EINVAL); 3041 } 3042 3043 ASSERT(Adapter->unicst_addr[slot].mac.set); 3044 3045 /* Clear this slot */ 3046 err = e1000g_unicst_set(Adapter, NULL, slot); 3047 if (err == 0) 3048 Adapter->unicst_avail++; 3049 3050 rw_exit(&Adapter->chip_lock); 3051 3052 return (err); 3053 } 3054 3055 static int 3056 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 3057 { 3058 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh; 3059 3060 mutex_enter(&rx_ring->rx_lock); 3061 rx_ring->ring_gen_num = mr_gen_num; 3062 mutex_exit(&rx_ring->rx_lock); 3063 return (0); 3064 } 3065 3066 /* 3067 * Callback funtion for MAC layer to register all rings. 3068 * 3069 * The hardware supports a single group with currently only one ring 3070 * available. 3071 * Though not offering virtualization ability per se, exposing the 3072 * group/ring still enables the polling and interrupt toggling. 3073 */ 3074 /* ARGSUSED */ 3075 void 3076 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index, 3077 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 3078 { 3079 struct e1000g *Adapter = (struct e1000g *)arg; 3080 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring; 3081 mac_intr_t *mintr; 3082 3083 /* 3084 * We advertised only RX group/rings, so the MAC framework shouldn't 3085 * ask for any thing else. 3086 */ 3087 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0); 3088 3089 rx_ring->mrh = rx_ring->mrh_init = rh; 3090 infop->mri_driver = (mac_ring_driver_t)rx_ring; 3091 infop->mri_start = e1000g_ring_start; 3092 infop->mri_stop = NULL; 3093 infop->mri_poll = e1000g_poll_ring; 3094 infop->mri_stat = e1000g_rx_ring_stat; 3095 3096 /* Ring level interrupts */ 3097 mintr = &infop->mri_intr; 3098 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 3099 mintr->mi_enable = e1000g_rx_ring_intr_enable; 3100 mintr->mi_disable = e1000g_rx_ring_intr_disable; 3101 if (Adapter->msi_enable) 3102 mintr->mi_ddi_handle = Adapter->htable[0]; 3103 } 3104 3105 /* ARGSUSED */ 3106 static void 3107 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index, 3108 mac_group_info_t *infop, mac_group_handle_t gh) 3109 { 3110 struct e1000g *Adapter = (struct e1000g *)arg; 3111 mac_intr_t *mintr; 3112 3113 /* 3114 * We advertised a single RX ring. Getting a request for anything else 3115 * signifies a bug in the MAC framework. 3116 */ 3117 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0); 3118 3119 Adapter->rx_group = gh; 3120 3121 infop->mgi_driver = (mac_group_driver_t)Adapter; 3122 infop->mgi_start = NULL; 3123 infop->mgi_stop = NULL; 3124 infop->mgi_addmac = e1000g_addmac; 3125 infop->mgi_remmac = e1000g_remmac; 3126 infop->mgi_count = 1; 3127 3128 /* Group level interrupts */ 3129 mintr = &infop->mgi_intr; 3130 mintr->mi_handle = (mac_intr_handle_t)Adapter; 3131 mintr->mi_enable = e1000g_rx_group_intr_enable; 3132 mintr->mi_disable = e1000g_rx_group_intr_disable; 3133 } 3134 3135 static boolean_t 3136 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3137 { 3138 struct e1000g *Adapter = (struct e1000g *)arg; 3139 3140 switch (cap) { 3141 case MAC_CAPAB_HCKSUM: { 3142 uint32_t *txflags = cap_data; 3143 3144 if (Adapter->tx_hcksum_enable) 3145 *txflags = HCKSUM_IPHDRCKSUM | 3146 HCKSUM_INET_PARTIAL; 3147 else 3148 return (B_FALSE); 3149 break; 3150 } 3151 3152 case MAC_CAPAB_LSO: { 3153 mac_capab_lso_t *cap_lso = cap_data; 3154 3155 if (Adapter->lso_enable) { 3156 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 3157 cap_lso->lso_basic_tcp_ipv4.lso_max = 3158 E1000_LSO_MAXLEN; 3159 } else 3160 return (B_FALSE); 3161 break; 3162 } 3163 case MAC_CAPAB_RINGS: { 3164 mac_capab_rings_t *cap_rings = cap_data; 3165 3166 /* No TX rings exposed yet */ 3167 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 3168 return (B_FALSE); 3169 3170 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 3171 cap_rings->mr_rnum = 1; 3172 cap_rings->mr_gnum = 1; 3173 cap_rings->mr_rget = e1000g_fill_ring; 3174 cap_rings->mr_gget = e1000g_fill_group; 3175 break; 3176 } 3177 default: 3178 return (B_FALSE); 3179 } 3180 return (B_TRUE); 3181 } 3182 3183 static boolean_t 3184 e1000g_param_locked(mac_prop_id_t pr_num) 3185 { 3186 /* 3187 * All en_* parameters are locked (read-only) while 3188 * the device is in any sort of loopback mode ... 3189 */ 3190 switch (pr_num) { 3191 case MAC_PROP_EN_1000FDX_CAP: 3192 case MAC_PROP_EN_1000HDX_CAP: 3193 case MAC_PROP_EN_100FDX_CAP: 3194 case MAC_PROP_EN_100HDX_CAP: 3195 case MAC_PROP_EN_10FDX_CAP: 3196 case MAC_PROP_EN_10HDX_CAP: 3197 case MAC_PROP_AUTONEG: 3198 case MAC_PROP_FLOWCTRL: 3199 return (B_TRUE); 3200 } 3201 return (B_FALSE); 3202 } 3203 3204 /* 3205 * callback function for set/get of properties 3206 */ 3207 static int 3208 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3209 uint_t pr_valsize, const void *pr_val) 3210 { 3211 struct e1000g *Adapter = arg; 3212 struct e1000_hw *hw = &Adapter->shared; 3213 struct e1000_fc_info *fc = &Adapter->shared.fc; 3214 int err = 0; 3215 link_flowctrl_t flowctrl; 3216 uint32_t cur_mtu, new_mtu; 3217 3218 rw_enter(&Adapter->chip_lock, RW_WRITER); 3219 3220 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3221 rw_exit(&Adapter->chip_lock); 3222 return (ECANCELED); 3223 } 3224 3225 if (Adapter->loopback_mode != E1000G_LB_NONE && 3226 e1000g_param_locked(pr_num)) { 3227 /* 3228 * All en_* parameters are locked (read-only) 3229 * while the device is in any sort of loopback mode. 3230 */ 3231 rw_exit(&Adapter->chip_lock); 3232 return (EBUSY); 3233 } 3234 3235 switch (pr_num) { 3236 case MAC_PROP_EN_1000FDX_CAP: 3237 if (hw->phy.media_type != e1000_media_type_copper) { 3238 err = ENOTSUP; 3239 break; 3240 } 3241 Adapter->param_en_1000fdx = *(uint8_t *)pr_val; 3242 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val; 3243 goto reset; 3244 case MAC_PROP_EN_100FDX_CAP: 3245 if (hw->phy.media_type != e1000_media_type_copper) { 3246 err = ENOTSUP; 3247 break; 3248 } 3249 Adapter->param_en_100fdx = *(uint8_t *)pr_val; 3250 Adapter->param_adv_100fdx = *(uint8_t *)pr_val; 3251 goto reset; 3252 case MAC_PROP_EN_100HDX_CAP: 3253 if (hw->phy.media_type != e1000_media_type_copper) { 3254 err = ENOTSUP; 3255 break; 3256 } 3257 Adapter->param_en_100hdx = *(uint8_t *)pr_val; 3258 Adapter->param_adv_100hdx = *(uint8_t *)pr_val; 3259 goto reset; 3260 case MAC_PROP_EN_10FDX_CAP: 3261 if (hw->phy.media_type != e1000_media_type_copper) { 3262 err = ENOTSUP; 3263 break; 3264 } 3265 Adapter->param_en_10fdx = *(uint8_t *)pr_val; 3266 Adapter->param_adv_10fdx = *(uint8_t *)pr_val; 3267 goto reset; 3268 case MAC_PROP_EN_10HDX_CAP: 3269 if (hw->phy.media_type != e1000_media_type_copper) { 3270 err = ENOTSUP; 3271 break; 3272 } 3273 Adapter->param_en_10hdx = *(uint8_t *)pr_val; 3274 Adapter->param_adv_10hdx = *(uint8_t *)pr_val; 3275 goto reset; 3276 case MAC_PROP_AUTONEG: 3277 if (hw->phy.media_type != e1000_media_type_copper) { 3278 err = ENOTSUP; 3279 break; 3280 } 3281 Adapter->param_adv_autoneg = *(uint8_t *)pr_val; 3282 goto reset; 3283 case MAC_PROP_FLOWCTRL: 3284 fc->send_xon = B_TRUE; 3285 bcopy(pr_val, &flowctrl, sizeof (flowctrl)); 3286 3287 switch (flowctrl) { 3288 default: 3289 err = EINVAL; 3290 break; 3291 case LINK_FLOWCTRL_NONE: 3292 fc->requested_mode = e1000_fc_none; 3293 break; 3294 case LINK_FLOWCTRL_RX: 3295 fc->requested_mode = e1000_fc_rx_pause; 3296 break; 3297 case LINK_FLOWCTRL_TX: 3298 fc->requested_mode = e1000_fc_tx_pause; 3299 break; 3300 case LINK_FLOWCTRL_BI: 3301 fc->requested_mode = e1000_fc_full; 3302 break; 3303 } 3304 reset: 3305 if (err == 0) { 3306 /* check PCH limits & reset the link */ 3307 e1000g_pch_limits(Adapter); 3308 if (e1000g_reset_link(Adapter) != DDI_SUCCESS) 3309 err = EINVAL; 3310 } 3311 break; 3312 case MAC_PROP_ADV_1000FDX_CAP: 3313 case MAC_PROP_ADV_1000HDX_CAP: 3314 case MAC_PROP_ADV_100FDX_CAP: 3315 case MAC_PROP_ADV_100HDX_CAP: 3316 case MAC_PROP_ADV_10FDX_CAP: 3317 case MAC_PROP_ADV_10HDX_CAP: 3318 case MAC_PROP_EN_1000HDX_CAP: 3319 case MAC_PROP_STATUS: 3320 case MAC_PROP_SPEED: 3321 case MAC_PROP_DUPLEX: 3322 err = ENOTSUP; /* read-only prop. Can't set this. */ 3323 break; 3324 case MAC_PROP_MTU: 3325 /* adapter must be stopped for an MTU change */ 3326 if (Adapter->e1000g_state & E1000G_STARTED) { 3327 err = EBUSY; 3328 break; 3329 } 3330 3331 cur_mtu = Adapter->default_mtu; 3332 3333 /* get new requested MTU */ 3334 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3335 if (new_mtu == cur_mtu) { 3336 err = 0; 3337 break; 3338 } 3339 3340 if ((new_mtu < DEFAULT_MTU) || 3341 (new_mtu > Adapter->max_mtu)) { 3342 err = EINVAL; 3343 break; 3344 } 3345 3346 /* inform MAC framework of new MTU */ 3347 err = mac_maxsdu_update(Adapter->mh, new_mtu); 3348 3349 if (err == 0) { 3350 Adapter->default_mtu = new_mtu; 3351 Adapter->max_frame_size = 3352 e1000g_mtu2maxframe(new_mtu); 3353 3354 /* 3355 * check PCH limits & set buffer sizes to 3356 * match new MTU 3357 */ 3358 e1000g_pch_limits(Adapter); 3359 e1000g_set_bufsize(Adapter); 3360 3361 /* 3362 * decrease the number of descriptors and free 3363 * packets for jumbo frames to reduce tx/rx 3364 * resource consumption 3365 */ 3366 if (Adapter->max_frame_size >= 3367 (FRAME_SIZE_UPTO_4K)) { 3368 if (Adapter->tx_desc_num_flag == 0) 3369 Adapter->tx_desc_num = 3370 DEFAULT_JUMBO_NUM_TX_DESC; 3371 3372 if (Adapter->rx_desc_num_flag == 0) 3373 Adapter->rx_desc_num = 3374 DEFAULT_JUMBO_NUM_RX_DESC; 3375 3376 if (Adapter->tx_buf_num_flag == 0) 3377 Adapter->tx_freelist_num = 3378 DEFAULT_JUMBO_NUM_TX_BUF; 3379 3380 if (Adapter->rx_buf_num_flag == 0) 3381 Adapter->rx_freelist_limit = 3382 DEFAULT_JUMBO_NUM_RX_BUF; 3383 } else { 3384 if (Adapter->tx_desc_num_flag == 0) 3385 Adapter->tx_desc_num = 3386 DEFAULT_NUM_TX_DESCRIPTOR; 3387 3388 if (Adapter->rx_desc_num_flag == 0) 3389 Adapter->rx_desc_num = 3390 DEFAULT_NUM_RX_DESCRIPTOR; 3391 3392 if (Adapter->tx_buf_num_flag == 0) 3393 Adapter->tx_freelist_num = 3394 DEFAULT_NUM_TX_FREELIST; 3395 3396 if (Adapter->rx_buf_num_flag == 0) 3397 Adapter->rx_freelist_limit = 3398 DEFAULT_NUM_RX_FREELIST; 3399 } 3400 } 3401 break; 3402 case MAC_PROP_PRIVATE: 3403 err = e1000g_set_priv_prop(Adapter, pr_name, 3404 pr_valsize, pr_val); 3405 break; 3406 default: 3407 err = ENOTSUP; 3408 break; 3409 } 3410 rw_exit(&Adapter->chip_lock); 3411 return (err); 3412 } 3413 3414 static int 3415 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3416 uint_t pr_valsize, void *pr_val) 3417 { 3418 struct e1000g *Adapter = arg; 3419 struct e1000_fc_info *fc = &Adapter->shared.fc; 3420 int err = 0; 3421 link_flowctrl_t flowctrl; 3422 uint64_t tmp = 0; 3423 3424 switch (pr_num) { 3425 case MAC_PROP_DUPLEX: 3426 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 3427 bcopy(&Adapter->link_duplex, pr_val, 3428 sizeof (link_duplex_t)); 3429 break; 3430 case MAC_PROP_SPEED: 3431 ASSERT(pr_valsize >= sizeof (uint64_t)); 3432 tmp = Adapter->link_speed * 1000000ull; 3433 bcopy(&tmp, pr_val, sizeof (tmp)); 3434 break; 3435 case MAC_PROP_AUTONEG: 3436 *(uint8_t *)pr_val = Adapter->param_adv_autoneg; 3437 break; 3438 case MAC_PROP_FLOWCTRL: 3439 ASSERT(pr_valsize >= sizeof (link_flowctrl_t)); 3440 switch (fc->current_mode) { 3441 case e1000_fc_none: 3442 flowctrl = LINK_FLOWCTRL_NONE; 3443 break; 3444 case e1000_fc_rx_pause: 3445 flowctrl = LINK_FLOWCTRL_RX; 3446 break; 3447 case e1000_fc_tx_pause: 3448 flowctrl = LINK_FLOWCTRL_TX; 3449 break; 3450 case e1000_fc_full: 3451 flowctrl = LINK_FLOWCTRL_BI; 3452 break; 3453 } 3454 bcopy(&flowctrl, pr_val, sizeof (flowctrl)); 3455 break; 3456 case MAC_PROP_ADV_1000FDX_CAP: 3457 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx; 3458 break; 3459 case MAC_PROP_EN_1000FDX_CAP: 3460 *(uint8_t *)pr_val = Adapter->param_en_1000fdx; 3461 break; 3462 case MAC_PROP_ADV_1000HDX_CAP: 3463 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx; 3464 break; 3465 case MAC_PROP_EN_1000HDX_CAP: 3466 *(uint8_t *)pr_val = Adapter->param_en_1000hdx; 3467 break; 3468 case MAC_PROP_ADV_100FDX_CAP: 3469 *(uint8_t *)pr_val = Adapter->param_adv_100fdx; 3470 break; 3471 case MAC_PROP_EN_100FDX_CAP: 3472 *(uint8_t *)pr_val = Adapter->param_en_100fdx; 3473 break; 3474 case MAC_PROP_ADV_100HDX_CAP: 3475 *(uint8_t *)pr_val = Adapter->param_adv_100hdx; 3476 break; 3477 case MAC_PROP_EN_100HDX_CAP: 3478 *(uint8_t *)pr_val = Adapter->param_en_100hdx; 3479 break; 3480 case MAC_PROP_ADV_10FDX_CAP: 3481 *(uint8_t *)pr_val = Adapter->param_adv_10fdx; 3482 break; 3483 case MAC_PROP_EN_10FDX_CAP: 3484 *(uint8_t *)pr_val = Adapter->param_en_10fdx; 3485 break; 3486 case MAC_PROP_ADV_10HDX_CAP: 3487 *(uint8_t *)pr_val = Adapter->param_adv_10hdx; 3488 break; 3489 case MAC_PROP_EN_10HDX_CAP: 3490 *(uint8_t *)pr_val = Adapter->param_en_10hdx; 3491 break; 3492 case MAC_PROP_ADV_100T4_CAP: 3493 case MAC_PROP_EN_100T4_CAP: 3494 *(uint8_t *)pr_val = Adapter->param_adv_100t4; 3495 break; 3496 case MAC_PROP_PRIVATE: 3497 err = e1000g_get_priv_prop(Adapter, pr_name, 3498 pr_valsize, pr_val); 3499 break; 3500 default: 3501 err = ENOTSUP; 3502 break; 3503 } 3504 3505 return (err); 3506 } 3507 3508 static void 3509 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3510 mac_prop_info_handle_t prh) 3511 { 3512 struct e1000g *Adapter = arg; 3513 struct e1000_hw *hw = &Adapter->shared; 3514 3515 switch (pr_num) { 3516 case MAC_PROP_DUPLEX: 3517 case MAC_PROP_SPEED: 3518 case MAC_PROP_ADV_1000FDX_CAP: 3519 case MAC_PROP_ADV_1000HDX_CAP: 3520 case MAC_PROP_ADV_100FDX_CAP: 3521 case MAC_PROP_ADV_100HDX_CAP: 3522 case MAC_PROP_ADV_10FDX_CAP: 3523 case MAC_PROP_ADV_10HDX_CAP: 3524 case MAC_PROP_ADV_100T4_CAP: 3525 case MAC_PROP_EN_100T4_CAP: 3526 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3527 break; 3528 3529 case MAC_PROP_EN_1000FDX_CAP: 3530 if (hw->phy.media_type != e1000_media_type_copper) { 3531 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3532 } else { 3533 mac_prop_info_set_default_uint8(prh, 3534 ((Adapter->phy_ext_status & 3535 IEEE_ESR_1000T_FD_CAPS) || 3536 (Adapter->phy_ext_status & 3537 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0); 3538 } 3539 break; 3540 3541 case MAC_PROP_EN_100FDX_CAP: 3542 if (hw->phy.media_type != e1000_media_type_copper) { 3543 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3544 } else { 3545 mac_prop_info_set_default_uint8(prh, 3546 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 3547 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 3548 ? 1 : 0); 3549 } 3550 break; 3551 3552 case MAC_PROP_EN_100HDX_CAP: 3553 if (hw->phy.media_type != e1000_media_type_copper) { 3554 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3555 } else { 3556 mac_prop_info_set_default_uint8(prh, 3557 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 3558 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) 3559 ? 1 : 0); 3560 } 3561 break; 3562 3563 case MAC_PROP_EN_10FDX_CAP: 3564 if (hw->phy.media_type != e1000_media_type_copper) { 3565 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3566 } else { 3567 mac_prop_info_set_default_uint8(prh, 3568 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0); 3569 } 3570 break; 3571 3572 case MAC_PROP_EN_10HDX_CAP: 3573 if (hw->phy.media_type != e1000_media_type_copper) { 3574 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3575 } else { 3576 mac_prop_info_set_default_uint8(prh, 3577 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0); 3578 } 3579 break; 3580 3581 case MAC_PROP_EN_1000HDX_CAP: 3582 if (hw->phy.media_type != e1000_media_type_copper) 3583 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3584 break; 3585 3586 case MAC_PROP_AUTONEG: 3587 if (hw->phy.media_type != e1000_media_type_copper) { 3588 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3589 } else { 3590 mac_prop_info_set_default_uint8(prh, 3591 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) 3592 ? 1 : 0); 3593 } 3594 break; 3595 3596 case MAC_PROP_FLOWCTRL: 3597 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI); 3598 break; 3599 3600 case MAC_PROP_MTU: { 3601 struct e1000_mac_info *mac = &Adapter->shared.mac; 3602 struct e1000_phy_info *phy = &Adapter->shared.phy; 3603 uint32_t max; 3604 3605 /* some MAC types do not support jumbo frames */ 3606 if ((mac->type == e1000_ich8lan) || 3607 ((mac->type == e1000_ich9lan) && (phy->type == 3608 e1000_phy_ife))) { 3609 max = DEFAULT_MTU; 3610 } else { 3611 max = Adapter->max_mtu; 3612 } 3613 3614 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max); 3615 break; 3616 } 3617 case MAC_PROP_PRIVATE: { 3618 char valstr[64]; 3619 int value; 3620 3621 if (strcmp(pr_name, "_adv_pause_cap") == 0 || 3622 strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3623 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3624 return; 3625 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3626 value = DEFAULT_TX_BCOPY_THRESHOLD; 3627 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3628 value = DEFAULT_TX_INTR_ENABLE; 3629 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3630 value = DEFAULT_TX_INTR_DELAY; 3631 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3632 value = DEFAULT_TX_INTR_ABS_DELAY; 3633 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3634 value = DEFAULT_RX_BCOPY_THRESHOLD; 3635 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3636 value = DEFAULT_RX_LIMIT_ON_INTR; 3637 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3638 value = DEFAULT_RX_INTR_DELAY; 3639 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3640 value = DEFAULT_RX_INTR_ABS_DELAY; 3641 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3642 value = DEFAULT_INTR_THROTTLING; 3643 } else if (strcmp(pr_name, "_intr_adaptive") == 0) { 3644 value = 1; 3645 } else { 3646 return; 3647 } 3648 3649 (void) snprintf(valstr, sizeof (valstr), "%d", value); 3650 mac_prop_info_set_default_str(prh, valstr); 3651 break; 3652 } 3653 } 3654 } 3655 3656 /* ARGSUSED2 */ 3657 static int 3658 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name, 3659 uint_t pr_valsize, const void *pr_val) 3660 { 3661 int err = 0; 3662 long result; 3663 struct e1000_hw *hw = &Adapter->shared; 3664 3665 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3666 if (pr_val == NULL) { 3667 err = EINVAL; 3668 return (err); 3669 } 3670 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3671 if (result < MIN_TX_BCOPY_THRESHOLD || 3672 result > MAX_TX_BCOPY_THRESHOLD) 3673 err = EINVAL; 3674 else { 3675 Adapter->tx_bcopy_thresh = (uint32_t)result; 3676 } 3677 return (err); 3678 } 3679 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3680 if (pr_val == NULL) { 3681 err = EINVAL; 3682 return (err); 3683 } 3684 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3685 if (result < 0 || result > 1) 3686 err = EINVAL; 3687 else { 3688 Adapter->tx_intr_enable = (result == 1) ? 3689 B_TRUE: B_FALSE; 3690 if (Adapter->tx_intr_enable) 3691 e1000g_mask_tx_interrupt(Adapter); 3692 else 3693 e1000g_clear_tx_interrupt(Adapter); 3694 if (e1000g_check_acc_handle( 3695 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3696 ddi_fm_service_impact(Adapter->dip, 3697 DDI_SERVICE_DEGRADED); 3698 err = EIO; 3699 } 3700 } 3701 return (err); 3702 } 3703 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3704 if (pr_val == NULL) { 3705 err = EINVAL; 3706 return (err); 3707 } 3708 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3709 if (result < MIN_TX_INTR_DELAY || 3710 result > MAX_TX_INTR_DELAY) 3711 err = EINVAL; 3712 else { 3713 Adapter->tx_intr_delay = (uint32_t)result; 3714 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay); 3715 if (e1000g_check_acc_handle( 3716 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3717 ddi_fm_service_impact(Adapter->dip, 3718 DDI_SERVICE_DEGRADED); 3719 err = EIO; 3720 } 3721 } 3722 return (err); 3723 } 3724 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3725 if (pr_val == NULL) { 3726 err = EINVAL; 3727 return (err); 3728 } 3729 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3730 if (result < MIN_TX_INTR_ABS_DELAY || 3731 result > MAX_TX_INTR_ABS_DELAY) 3732 err = EINVAL; 3733 else { 3734 Adapter->tx_intr_abs_delay = (uint32_t)result; 3735 E1000_WRITE_REG(hw, E1000_TADV, 3736 Adapter->tx_intr_abs_delay); 3737 if (e1000g_check_acc_handle( 3738 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3739 ddi_fm_service_impact(Adapter->dip, 3740 DDI_SERVICE_DEGRADED); 3741 err = EIO; 3742 } 3743 } 3744 return (err); 3745 } 3746 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3747 if (pr_val == NULL) { 3748 err = EINVAL; 3749 return (err); 3750 } 3751 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3752 if (result < MIN_RX_BCOPY_THRESHOLD || 3753 result > MAX_RX_BCOPY_THRESHOLD) 3754 err = EINVAL; 3755 else 3756 Adapter->rx_bcopy_thresh = (uint32_t)result; 3757 return (err); 3758 } 3759 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3760 if (pr_val == NULL) { 3761 err = EINVAL; 3762 return (err); 3763 } 3764 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3765 if (result < MIN_RX_LIMIT_ON_INTR || 3766 result > MAX_RX_LIMIT_ON_INTR) 3767 err = EINVAL; 3768 else 3769 Adapter->rx_limit_onintr = (uint32_t)result; 3770 return (err); 3771 } 3772 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3773 if (pr_val == NULL) { 3774 err = EINVAL; 3775 return (err); 3776 } 3777 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3778 if (result < MIN_RX_INTR_DELAY || 3779 result > MAX_RX_INTR_DELAY) 3780 err = EINVAL; 3781 else { 3782 Adapter->rx_intr_delay = (uint32_t)result; 3783 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay); 3784 if (e1000g_check_acc_handle( 3785 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3786 ddi_fm_service_impact(Adapter->dip, 3787 DDI_SERVICE_DEGRADED); 3788 err = EIO; 3789 } 3790 } 3791 return (err); 3792 } 3793 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3794 if (pr_val == NULL) { 3795 err = EINVAL; 3796 return (err); 3797 } 3798 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3799 if (result < MIN_RX_INTR_ABS_DELAY || 3800 result > MAX_RX_INTR_ABS_DELAY) 3801 err = EINVAL; 3802 else { 3803 Adapter->rx_intr_abs_delay = (uint32_t)result; 3804 E1000_WRITE_REG(hw, E1000_RADV, 3805 Adapter->rx_intr_abs_delay); 3806 if (e1000g_check_acc_handle( 3807 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3808 ddi_fm_service_impact(Adapter->dip, 3809 DDI_SERVICE_DEGRADED); 3810 err = EIO; 3811 } 3812 } 3813 return (err); 3814 } 3815 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3816 if (pr_val == NULL) { 3817 err = EINVAL; 3818 return (err); 3819 } 3820 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3821 if (result < MIN_INTR_THROTTLING || 3822 result > MAX_INTR_THROTTLING) 3823 err = EINVAL; 3824 else { 3825 if (hw->mac.type >= e1000_82540) { 3826 Adapter->intr_throttling_rate = 3827 (uint32_t)result; 3828 E1000_WRITE_REG(hw, E1000_ITR, 3829 Adapter->intr_throttling_rate); 3830 if (e1000g_check_acc_handle( 3831 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3832 ddi_fm_service_impact(Adapter->dip, 3833 DDI_SERVICE_DEGRADED); 3834 err = EIO; 3835 } 3836 } else 3837 err = EINVAL; 3838 } 3839 return (err); 3840 } 3841 if (strcmp(pr_name, "_intr_adaptive") == 0) { 3842 if (pr_val == NULL) { 3843 err = EINVAL; 3844 return (err); 3845 } 3846 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3847 if (result < 0 || result > 1) 3848 err = EINVAL; 3849 else { 3850 if (hw->mac.type >= e1000_82540) { 3851 Adapter->intr_adaptive = (result == 1) ? 3852 B_TRUE : B_FALSE; 3853 } else { 3854 err = EINVAL; 3855 } 3856 } 3857 return (err); 3858 } 3859 return (ENOTSUP); 3860 } 3861 3862 static int 3863 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name, 3864 uint_t pr_valsize, void *pr_val) 3865 { 3866 int err = ENOTSUP; 3867 int value; 3868 3869 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 3870 value = Adapter->param_adv_pause; 3871 err = 0; 3872 goto done; 3873 } 3874 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3875 value = Adapter->param_adv_asym_pause; 3876 err = 0; 3877 goto done; 3878 } 3879 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3880 value = Adapter->tx_bcopy_thresh; 3881 err = 0; 3882 goto done; 3883 } 3884 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3885 value = Adapter->tx_intr_enable; 3886 err = 0; 3887 goto done; 3888 } 3889 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3890 value = Adapter->tx_intr_delay; 3891 err = 0; 3892 goto done; 3893 } 3894 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3895 value = Adapter->tx_intr_abs_delay; 3896 err = 0; 3897 goto done; 3898 } 3899 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3900 value = Adapter->rx_bcopy_thresh; 3901 err = 0; 3902 goto done; 3903 } 3904 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3905 value = Adapter->rx_limit_onintr; 3906 err = 0; 3907 goto done; 3908 } 3909 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3910 value = Adapter->rx_intr_delay; 3911 err = 0; 3912 goto done; 3913 } 3914 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3915 value = Adapter->rx_intr_abs_delay; 3916 err = 0; 3917 goto done; 3918 } 3919 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3920 value = Adapter->intr_throttling_rate; 3921 err = 0; 3922 goto done; 3923 } 3924 if (strcmp(pr_name, "_intr_adaptive") == 0) { 3925 value = Adapter->intr_adaptive; 3926 err = 0; 3927 goto done; 3928 } 3929 done: 3930 if (err == 0) { 3931 (void) snprintf(pr_val, pr_valsize, "%d", value); 3932 } 3933 return (err); 3934 } 3935 3936 /* 3937 * e1000g_get_conf - get configurations set in e1000g.conf 3938 * This routine gets user-configured values out of the configuration 3939 * file e1000g.conf. 3940 * 3941 * For each configurable value, there is a minimum, a maximum, and a 3942 * default. 3943 * If user does not configure a value, use the default. 3944 * If user configures below the minimum, use the minumum. 3945 * If user configures above the maximum, use the maxumum. 3946 */ 3947 static void 3948 e1000g_get_conf(struct e1000g *Adapter) 3949 { 3950 struct e1000_hw *hw = &Adapter->shared; 3951 boolean_t tbi_compatibility = B_FALSE; 3952 boolean_t is_jumbo = B_FALSE; 3953 int propval; 3954 /* 3955 * decrease the number of descriptors and free packets 3956 * for jumbo frames to reduce tx/rx resource consumption 3957 */ 3958 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) { 3959 is_jumbo = B_TRUE; 3960 } 3961 3962 /* 3963 * get each configurable property from e1000g.conf 3964 */ 3965 3966 /* 3967 * NumTxDescriptors 3968 */ 3969 Adapter->tx_desc_num_flag = 3970 e1000g_get_prop(Adapter, "NumTxDescriptors", 3971 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR, 3972 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC 3973 : DEFAULT_NUM_TX_DESCRIPTOR, &propval); 3974 Adapter->tx_desc_num = propval; 3975 3976 /* 3977 * NumRxDescriptors 3978 */ 3979 Adapter->rx_desc_num_flag = 3980 e1000g_get_prop(Adapter, "NumRxDescriptors", 3981 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR, 3982 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC 3983 : DEFAULT_NUM_RX_DESCRIPTOR, &propval); 3984 Adapter->rx_desc_num = propval; 3985 3986 /* 3987 * NumRxFreeList 3988 */ 3989 Adapter->rx_buf_num_flag = 3990 e1000g_get_prop(Adapter, "NumRxFreeList", 3991 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST, 3992 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF 3993 : DEFAULT_NUM_RX_FREELIST, &propval); 3994 Adapter->rx_freelist_limit = propval; 3995 3996 /* 3997 * NumTxPacketList 3998 */ 3999 Adapter->tx_buf_num_flag = 4000 e1000g_get_prop(Adapter, "NumTxPacketList", 4001 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST, 4002 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF 4003 : DEFAULT_NUM_TX_FREELIST, &propval); 4004 Adapter->tx_freelist_num = propval; 4005 4006 /* 4007 * FlowControl 4008 */ 4009 hw->fc.send_xon = B_TRUE; 4010 (void) e1000g_get_prop(Adapter, "FlowControl", 4011 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval); 4012 hw->fc.requested_mode = propval; 4013 /* 4 is the setting that says "let the eeprom decide" */ 4014 if (hw->fc.requested_mode == 4) 4015 hw->fc.requested_mode = e1000_fc_default; 4016 4017 /* 4018 * Max Num Receive Packets on Interrupt 4019 */ 4020 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets", 4021 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR, 4022 DEFAULT_RX_LIMIT_ON_INTR, &propval); 4023 Adapter->rx_limit_onintr = propval; 4024 4025 /* 4026 * PHY master slave setting 4027 */ 4028 (void) e1000g_get_prop(Adapter, "SetMasterSlave", 4029 e1000_ms_hw_default, e1000_ms_auto, 4030 e1000_ms_hw_default, &propval); 4031 hw->phy.ms_type = propval; 4032 4033 /* 4034 * Parameter which controls TBI mode workaround, which is only 4035 * needed on certain switches such as Cisco 6500/Foundry 4036 */ 4037 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable", 4038 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval); 4039 tbi_compatibility = (propval == 1); 4040 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility); 4041 4042 /* 4043 * MSI Enable 4044 */ 4045 (void) e1000g_get_prop(Adapter, "MSIEnable", 4046 0, 1, DEFAULT_MSI_ENABLE, &propval); 4047 Adapter->msi_enable = (propval == 1); 4048 4049 /* 4050 * Interrupt Throttling Rate 4051 */ 4052 (void) e1000g_get_prop(Adapter, "intr_throttling_rate", 4053 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 4054 DEFAULT_INTR_THROTTLING, &propval); 4055 Adapter->intr_throttling_rate = propval; 4056 4057 /* 4058 * Adaptive Interrupt Blanking Enable/Disable 4059 * It is enabled by default 4060 */ 4061 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1, 4062 &propval); 4063 Adapter->intr_adaptive = (propval == 1); 4064 4065 /* 4066 * Hardware checksum enable/disable parameter 4067 */ 4068 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable", 4069 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval); 4070 Adapter->tx_hcksum_enable = (propval == 1); 4071 /* 4072 * Checksum on/off selection via global parameters. 4073 * 4074 * If the chip is flagged as not capable of (correctly) 4075 * handling checksumming, we don't enable it on either 4076 * Rx or Tx side. Otherwise, we take this chip's settings 4077 * from the patchable global defaults. 4078 * 4079 * We advertise our capabilities only if TX offload is 4080 * enabled. On receive, the stack will accept checksummed 4081 * packets anyway, even if we haven't said we can deliver 4082 * them. 4083 */ 4084 switch (hw->mac.type) { 4085 case e1000_82540: 4086 case e1000_82544: 4087 case e1000_82545: 4088 case e1000_82545_rev_3: 4089 case e1000_82546: 4090 case e1000_82546_rev_3: 4091 case e1000_82571: 4092 case e1000_82572: 4093 case e1000_82573: 4094 case e1000_80003es2lan: 4095 break; 4096 /* 4097 * For the following Intel PRO/1000 chipsets, we have not 4098 * tested the hardware checksum offload capability, so we 4099 * disable the capability for them. 4100 * e1000_82542, 4101 * e1000_82543, 4102 * e1000_82541, 4103 * e1000_82541_rev_2, 4104 * e1000_82547, 4105 * e1000_82547_rev_2, 4106 */ 4107 default: 4108 Adapter->tx_hcksum_enable = B_FALSE; 4109 } 4110 4111 /* 4112 * Large Send Offloading(LSO) Enable/Disable 4113 * If the tx hardware checksum is not enabled, LSO should be 4114 * disabled. 4115 */ 4116 (void) e1000g_get_prop(Adapter, "lso_enable", 4117 0, 1, DEFAULT_LSO_ENABLE, &propval); 4118 Adapter->lso_enable = (propval == 1); 4119 4120 switch (hw->mac.type) { 4121 case e1000_82546: 4122 case e1000_82546_rev_3: 4123 if (Adapter->lso_enable) 4124 Adapter->lso_premature_issue = B_TRUE; 4125 /* FALLTHRU */ 4126 case e1000_82571: 4127 case e1000_82572: 4128 case e1000_82573: 4129 case e1000_80003es2lan: 4130 break; 4131 default: 4132 Adapter->lso_enable = B_FALSE; 4133 } 4134 4135 if (!Adapter->tx_hcksum_enable) { 4136 Adapter->lso_premature_issue = B_FALSE; 4137 Adapter->lso_enable = B_FALSE; 4138 } 4139 4140 /* 4141 * If mem_workaround_82546 is enabled, the rx buffer allocated by 4142 * e1000_82545, e1000_82546 and e1000_82546_rev_3 4143 * will not cross 64k boundary. 4144 */ 4145 (void) e1000g_get_prop(Adapter, "mem_workaround_82546", 4146 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval); 4147 Adapter->mem_workaround_82546 = (propval == 1); 4148 4149 /* 4150 * Max number of multicast addresses 4151 */ 4152 (void) e1000g_get_prop(Adapter, "mcast_max_num", 4153 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32, 4154 &propval); 4155 Adapter->mcast_max_num = propval; 4156 } 4157 4158 /* 4159 * e1000g_get_prop - routine to read properties 4160 * 4161 * Get a user-configure property value out of the configuration 4162 * file e1000g.conf. 4163 * 4164 * Caller provides name of the property, a default value, a minimum 4165 * value, a maximum value and a pointer to the returned property 4166 * value. 4167 * 4168 * Return B_TRUE if the configured value of the property is not a default 4169 * value, otherwise return B_FALSE. 4170 */ 4171 static boolean_t 4172 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */ 4173 char *propname, /* name of the property */ 4174 int minval, /* minimum acceptable value */ 4175 int maxval, /* maximim acceptable value */ 4176 int defval, /* default value */ 4177 int *propvalue) /* property value return to caller */ 4178 { 4179 int propval; /* value returned for requested property */ 4180 int *props; /* point to array of properties returned */ 4181 uint_t nprops; /* number of property value returned */ 4182 boolean_t ret = B_TRUE; 4183 4184 /* 4185 * get the array of properties from the config file 4186 */ 4187 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip, 4188 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) { 4189 /* got some properties, test if we got enough */ 4190 if (Adapter->instance < nprops) { 4191 propval = props[Adapter->instance]; 4192 } else { 4193 /* not enough properties configured */ 4194 propval = defval; 4195 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4196 "Not Enough %s values found in e1000g.conf" 4197 " - set to %d\n", 4198 propname, propval); 4199 ret = B_FALSE; 4200 } 4201 4202 /* free memory allocated for properties */ 4203 ddi_prop_free(props); 4204 4205 } else { 4206 propval = defval; 4207 ret = B_FALSE; 4208 } 4209 4210 /* 4211 * enforce limits 4212 */ 4213 if (propval > maxval) { 4214 propval = maxval; 4215 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4216 "Too High %s value in e1000g.conf - set to %d\n", 4217 propname, propval); 4218 } 4219 4220 if (propval < minval) { 4221 propval = minval; 4222 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4223 "Too Low %s value in e1000g.conf - set to %d\n", 4224 propname, propval); 4225 } 4226 4227 *propvalue = propval; 4228 return (ret); 4229 } 4230 4231 static boolean_t 4232 e1000g_link_check(struct e1000g *Adapter) 4233 { 4234 uint16_t speed, duplex, phydata; 4235 boolean_t link_changed = B_FALSE; 4236 struct e1000_hw *hw; 4237 uint32_t reg_tarc; 4238 4239 hw = &Adapter->shared; 4240 4241 if (e1000g_link_up(Adapter)) { 4242 /* 4243 * The Link is up, check whether it was marked as down earlier 4244 */ 4245 if (Adapter->link_state != LINK_STATE_UP) { 4246 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex); 4247 Adapter->link_speed = speed; 4248 Adapter->link_duplex = duplex; 4249 Adapter->link_state = LINK_STATE_UP; 4250 link_changed = B_TRUE; 4251 4252 if (Adapter->link_speed == SPEED_1000) 4253 Adapter->stall_threshold = TX_STALL_TIME_2S; 4254 else 4255 Adapter->stall_threshold = TX_STALL_TIME_8S; 4256 4257 Adapter->tx_link_down_timeout = 0; 4258 4259 if ((hw->mac.type == e1000_82571) || 4260 (hw->mac.type == e1000_82572)) { 4261 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0)); 4262 if (speed == SPEED_1000) 4263 reg_tarc |= (1 << 21); 4264 else 4265 reg_tarc &= ~(1 << 21); 4266 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc); 4267 } 4268 } 4269 Adapter->smartspeed = 0; 4270 } else { 4271 if (Adapter->link_state != LINK_STATE_DOWN) { 4272 Adapter->link_speed = 0; 4273 Adapter->link_duplex = 0; 4274 Adapter->link_state = LINK_STATE_DOWN; 4275 link_changed = B_TRUE; 4276 4277 /* 4278 * SmartSpeed workaround for Tabor/TanaX, When the 4279 * driver loses link disable auto master/slave 4280 * resolution. 4281 */ 4282 if (hw->phy.type == e1000_phy_igp) { 4283 (void) e1000_read_phy_reg(hw, 4284 PHY_1000T_CTRL, &phydata); 4285 phydata |= CR_1000T_MS_ENABLE; 4286 (void) e1000_write_phy_reg(hw, 4287 PHY_1000T_CTRL, phydata); 4288 } 4289 } else { 4290 e1000g_smartspeed(Adapter); 4291 } 4292 4293 if (Adapter->e1000g_state & E1000G_STARTED) { 4294 if (Adapter->tx_link_down_timeout < 4295 MAX_TX_LINK_DOWN_TIMEOUT) { 4296 Adapter->tx_link_down_timeout++; 4297 } else if (Adapter->tx_link_down_timeout == 4298 MAX_TX_LINK_DOWN_TIMEOUT) { 4299 e1000g_tx_clean(Adapter); 4300 Adapter->tx_link_down_timeout++; 4301 } 4302 } 4303 } 4304 4305 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4306 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4307 4308 return (link_changed); 4309 } 4310 4311 /* 4312 * e1000g_reset_link - Using the link properties to setup the link 4313 */ 4314 int 4315 e1000g_reset_link(struct e1000g *Adapter) 4316 { 4317 struct e1000_mac_info *mac; 4318 struct e1000_phy_info *phy; 4319 struct e1000_hw *hw; 4320 boolean_t invalid; 4321 4322 mac = &Adapter->shared.mac; 4323 phy = &Adapter->shared.phy; 4324 hw = &Adapter->shared; 4325 invalid = B_FALSE; 4326 4327 if (hw->phy.media_type != e1000_media_type_copper) 4328 goto out; 4329 4330 if (Adapter->param_adv_autoneg == 1) { 4331 mac->autoneg = B_TRUE; 4332 phy->autoneg_advertised = 0; 4333 4334 /* 4335 * 1000hdx is not supported for autonegotiation 4336 */ 4337 if (Adapter->param_adv_1000fdx == 1) 4338 phy->autoneg_advertised |= ADVERTISE_1000_FULL; 4339 4340 if (Adapter->param_adv_100fdx == 1) 4341 phy->autoneg_advertised |= ADVERTISE_100_FULL; 4342 4343 if (Adapter->param_adv_100hdx == 1) 4344 phy->autoneg_advertised |= ADVERTISE_100_HALF; 4345 4346 if (Adapter->param_adv_10fdx == 1) 4347 phy->autoneg_advertised |= ADVERTISE_10_FULL; 4348 4349 if (Adapter->param_adv_10hdx == 1) 4350 phy->autoneg_advertised |= ADVERTISE_10_HALF; 4351 4352 if (phy->autoneg_advertised == 0) 4353 invalid = B_TRUE; 4354 } else { 4355 mac->autoneg = B_FALSE; 4356 4357 /* 4358 * For Intel copper cards, 1000fdx and 1000hdx are not 4359 * supported for forced link 4360 */ 4361 if (Adapter->param_adv_100fdx == 1) 4362 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4363 else if (Adapter->param_adv_100hdx == 1) 4364 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4365 else if (Adapter->param_adv_10fdx == 1) 4366 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4367 else if (Adapter->param_adv_10hdx == 1) 4368 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4369 else 4370 invalid = B_TRUE; 4371 4372 } 4373 4374 if (invalid) { 4375 e1000g_log(Adapter, CE_WARN, 4376 "Invalid link settings. Setup link to " 4377 "support autonegotiation with all link capabilities."); 4378 mac->autoneg = B_TRUE; 4379 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 4380 } 4381 4382 out: 4383 return (e1000_setup_link(&Adapter->shared)); 4384 } 4385 4386 static void 4387 e1000g_timer_tx_resched(struct e1000g *Adapter) 4388 { 4389 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 4390 4391 rw_enter(&Adapter->chip_lock, RW_READER); 4392 4393 if (tx_ring->resched_needed && 4394 ((ddi_get_lbolt() - tx_ring->resched_timestamp) > 4395 drv_usectohz(1000000)) && 4396 (Adapter->e1000g_state & E1000G_STARTED) && 4397 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) { 4398 tx_ring->resched_needed = B_FALSE; 4399 mac_tx_update(Adapter->mh); 4400 E1000G_STAT(tx_ring->stat_reschedule); 4401 E1000G_STAT(tx_ring->stat_timer_reschedule); 4402 } 4403 4404 rw_exit(&Adapter->chip_lock); 4405 } 4406 4407 static void 4408 e1000g_local_timer(void *ws) 4409 { 4410 struct e1000g *Adapter = (struct e1000g *)ws; 4411 struct e1000_hw *hw; 4412 e1000g_ether_addr_t ether_addr; 4413 boolean_t link_changed; 4414 4415 hw = &Adapter->shared; 4416 4417 if (Adapter->e1000g_state & E1000G_ERROR) { 4418 rw_enter(&Adapter->chip_lock, RW_WRITER); 4419 Adapter->e1000g_state &= ~E1000G_ERROR; 4420 rw_exit(&Adapter->chip_lock); 4421 4422 Adapter->reset_count++; 4423 if (e1000g_global_reset(Adapter)) { 4424 ddi_fm_service_impact(Adapter->dip, 4425 DDI_SERVICE_RESTORED); 4426 e1000g_timer_tx_resched(Adapter); 4427 } else 4428 ddi_fm_service_impact(Adapter->dip, 4429 DDI_SERVICE_LOST); 4430 return; 4431 } 4432 4433 if (e1000g_stall_check(Adapter)) { 4434 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 4435 "Tx stall detected. Activate automatic recovery.\n"); 4436 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL); 4437 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 4438 Adapter->reset_count++; 4439 if (e1000g_reset_adapter(Adapter)) { 4440 ddi_fm_service_impact(Adapter->dip, 4441 DDI_SERVICE_RESTORED); 4442 e1000g_timer_tx_resched(Adapter); 4443 } 4444 return; 4445 } 4446 4447 link_changed = B_FALSE; 4448 rw_enter(&Adapter->chip_lock, RW_READER); 4449 if (Adapter->link_complete) 4450 link_changed = e1000g_link_check(Adapter); 4451 rw_exit(&Adapter->chip_lock); 4452 4453 if (link_changed) { 4454 if (!Adapter->reset_flag && 4455 (Adapter->e1000g_state & E1000G_STARTED) && 4456 !(Adapter->e1000g_state & E1000G_SUSPENDED)) 4457 mac_link_update(Adapter->mh, Adapter->link_state); 4458 if (Adapter->link_state == LINK_STATE_UP) 4459 Adapter->reset_flag = B_FALSE; 4460 } 4461 /* 4462 * Workaround for esb2. Data stuck in fifo on a link 4463 * down event. Reset the adapter to recover it. 4464 */ 4465 if (Adapter->esb2_workaround) { 4466 Adapter->esb2_workaround = B_FALSE; 4467 (void) e1000g_reset_adapter(Adapter); 4468 return; 4469 } 4470 4471 /* 4472 * With 82571 controllers, any locally administered address will 4473 * be overwritten when there is a reset on the other port. 4474 * Detect this circumstance and correct it. 4475 */ 4476 if ((hw->mac.type == e1000_82571) && 4477 (e1000_get_laa_state_82571(hw) == B_TRUE)) { 4478 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0); 4479 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1); 4480 4481 ether_addr.reg.low = ntohl(ether_addr.reg.low); 4482 ether_addr.reg.high = ntohl(ether_addr.reg.high); 4483 4484 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) || 4485 (ether_addr.mac.addr[4] != hw->mac.addr[1]) || 4486 (ether_addr.mac.addr[3] != hw->mac.addr[2]) || 4487 (ether_addr.mac.addr[2] != hw->mac.addr[3]) || 4488 (ether_addr.mac.addr[1] != hw->mac.addr[4]) || 4489 (ether_addr.mac.addr[0] != hw->mac.addr[5])) { 4490 (void) e1000_rar_set(hw, hw->mac.addr, 0); 4491 } 4492 } 4493 4494 /* 4495 * Long TTL workaround for 82541/82547 4496 */ 4497 (void) e1000_igp_ttl_workaround_82547(hw); 4498 4499 /* 4500 * Check for Adaptive IFS settings If there are lots of collisions 4501 * change the value in steps... 4502 * These properties should only be set for 10/100 4503 */ 4504 if ((hw->phy.media_type == e1000_media_type_copper) && 4505 ((Adapter->link_speed == SPEED_100) || 4506 (Adapter->link_speed == SPEED_10))) { 4507 e1000_update_adaptive(hw); 4508 } 4509 /* 4510 * Set Timer Interrupts 4511 */ 4512 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 4513 4514 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4515 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4516 else 4517 e1000g_timer_tx_resched(Adapter); 4518 4519 restart_watchdog_timer(Adapter); 4520 } 4521 4522 /* 4523 * The function e1000g_link_timer() is called when the timer for link setup 4524 * is expired, which indicates the completion of the link setup. The link 4525 * state will not be updated until the link setup is completed. And the 4526 * link state will not be sent to the upper layer through mac_link_update() 4527 * in this function. It will be updated in the local timer routine or the 4528 * interrupt service routine after the interface is started (plumbed). 4529 */ 4530 static void 4531 e1000g_link_timer(void *arg) 4532 { 4533 struct e1000g *Adapter = (struct e1000g *)arg; 4534 4535 mutex_enter(&Adapter->link_lock); 4536 Adapter->link_complete = B_TRUE; 4537 Adapter->link_tid = 0; 4538 mutex_exit(&Adapter->link_lock); 4539 } 4540 4541 /* 4542 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf 4543 * 4544 * This function read the forced speed and duplex for 10/100 Mbps speeds 4545 * and also for 1000 Mbps speeds from the e1000g.conf file 4546 */ 4547 static void 4548 e1000g_force_speed_duplex(struct e1000g *Adapter) 4549 { 4550 int forced; 4551 int propval; 4552 struct e1000_mac_info *mac = &Adapter->shared.mac; 4553 struct e1000_phy_info *phy = &Adapter->shared.phy; 4554 4555 /* 4556 * get value out of config file 4557 */ 4558 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex", 4559 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced); 4560 4561 switch (forced) { 4562 case GDIAG_10_HALF: 4563 /* 4564 * Disable Auto Negotiation 4565 */ 4566 mac->autoneg = B_FALSE; 4567 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4568 break; 4569 case GDIAG_10_FULL: 4570 /* 4571 * Disable Auto Negotiation 4572 */ 4573 mac->autoneg = B_FALSE; 4574 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4575 break; 4576 case GDIAG_100_HALF: 4577 /* 4578 * Disable Auto Negotiation 4579 */ 4580 mac->autoneg = B_FALSE; 4581 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4582 break; 4583 case GDIAG_100_FULL: 4584 /* 4585 * Disable Auto Negotiation 4586 */ 4587 mac->autoneg = B_FALSE; 4588 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4589 break; 4590 case GDIAG_1000_FULL: 4591 /* 4592 * The gigabit spec requires autonegotiation. Therefore, 4593 * when the user wants to force the speed to 1000Mbps, we 4594 * enable AutoNeg, but only allow the harware to advertise 4595 * 1000Mbps. This is different from 10/100 operation, where 4596 * we are allowed to link without any negotiation. 4597 */ 4598 mac->autoneg = B_TRUE; 4599 phy->autoneg_advertised = ADVERTISE_1000_FULL; 4600 break; 4601 default: /* obey the setting of AutoNegAdvertised */ 4602 mac->autoneg = B_TRUE; 4603 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised", 4604 0, AUTONEG_ADVERTISE_SPEED_DEFAULT, 4605 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval); 4606 phy->autoneg_advertised = (uint16_t)propval; 4607 break; 4608 } /* switch */ 4609 } 4610 4611 /* 4612 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf 4613 * 4614 * This function reads MaxFrameSize from e1000g.conf 4615 */ 4616 static void 4617 e1000g_get_max_frame_size(struct e1000g *Adapter) 4618 { 4619 int max_frame; 4620 4621 /* 4622 * get value out of config file 4623 */ 4624 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0, 4625 &max_frame); 4626 4627 switch (max_frame) { 4628 case 0: 4629 Adapter->default_mtu = ETHERMTU; 4630 break; 4631 case 1: 4632 Adapter->default_mtu = FRAME_SIZE_UPTO_4K - 4633 sizeof (struct ether_vlan_header) - ETHERFCSL; 4634 break; 4635 case 2: 4636 Adapter->default_mtu = FRAME_SIZE_UPTO_8K - 4637 sizeof (struct ether_vlan_header) - ETHERFCSL; 4638 break; 4639 case 3: 4640 Adapter->default_mtu = FRAME_SIZE_UPTO_16K - 4641 sizeof (struct ether_vlan_header) - ETHERFCSL; 4642 break; 4643 default: 4644 Adapter->default_mtu = ETHERMTU; 4645 break; 4646 } /* switch */ 4647 4648 /* 4649 * If the user configed MTU is larger than the deivce's maximum MTU, 4650 * the MTU is set to the deivce's maximum value. 4651 */ 4652 if (Adapter->default_mtu > Adapter->max_mtu) 4653 Adapter->default_mtu = Adapter->max_mtu; 4654 4655 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu); 4656 } 4657 4658 /* 4659 * e1000g_pch_limits - Apply limits of the PCH silicon type 4660 * 4661 * At any frame size larger than the ethernet default, 4662 * prevent linking at 10/100 speeds. 4663 */ 4664 static void 4665 e1000g_pch_limits(struct e1000g *Adapter) 4666 { 4667 struct e1000_hw *hw = &Adapter->shared; 4668 4669 /* only applies to PCH silicon type */ 4670 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan) 4671 return; 4672 4673 /* only applies to frames larger than ethernet default */ 4674 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) { 4675 hw->mac.autoneg = B_TRUE; 4676 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; 4677 4678 Adapter->param_adv_autoneg = 1; 4679 Adapter->param_adv_1000fdx = 1; 4680 4681 Adapter->param_adv_100fdx = 0; 4682 Adapter->param_adv_100hdx = 0; 4683 Adapter->param_adv_10fdx = 0; 4684 Adapter->param_adv_10hdx = 0; 4685 4686 e1000g_param_sync(Adapter); 4687 } 4688 } 4689 4690 /* 4691 * e1000g_mtu2maxframe - convert given MTU to maximum frame size 4692 */ 4693 static uint32_t 4694 e1000g_mtu2maxframe(uint32_t mtu) 4695 { 4696 uint32_t maxframe; 4697 4698 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL; 4699 4700 return (maxframe); 4701 } 4702 4703 static void 4704 arm_watchdog_timer(struct e1000g *Adapter) 4705 { 4706 Adapter->watchdog_tid = 4707 timeout(e1000g_local_timer, 4708 (void *)Adapter, 1 * drv_usectohz(1000000)); 4709 } 4710 #pragma inline(arm_watchdog_timer) 4711 4712 static void 4713 enable_watchdog_timer(struct e1000g *Adapter) 4714 { 4715 mutex_enter(&Adapter->watchdog_lock); 4716 4717 if (!Adapter->watchdog_timer_enabled) { 4718 Adapter->watchdog_timer_enabled = B_TRUE; 4719 Adapter->watchdog_timer_started = B_TRUE; 4720 arm_watchdog_timer(Adapter); 4721 } 4722 4723 mutex_exit(&Adapter->watchdog_lock); 4724 } 4725 4726 static void 4727 disable_watchdog_timer(struct e1000g *Adapter) 4728 { 4729 timeout_id_t tid; 4730 4731 mutex_enter(&Adapter->watchdog_lock); 4732 4733 Adapter->watchdog_timer_enabled = B_FALSE; 4734 Adapter->watchdog_timer_started = B_FALSE; 4735 tid = Adapter->watchdog_tid; 4736 Adapter->watchdog_tid = 0; 4737 4738 mutex_exit(&Adapter->watchdog_lock); 4739 4740 if (tid != 0) 4741 (void) untimeout(tid); 4742 } 4743 4744 static void 4745 start_watchdog_timer(struct e1000g *Adapter) 4746 { 4747 mutex_enter(&Adapter->watchdog_lock); 4748 4749 if (Adapter->watchdog_timer_enabled) { 4750 if (!Adapter->watchdog_timer_started) { 4751 Adapter->watchdog_timer_started = B_TRUE; 4752 arm_watchdog_timer(Adapter); 4753 } 4754 } 4755 4756 mutex_exit(&Adapter->watchdog_lock); 4757 } 4758 4759 static void 4760 restart_watchdog_timer(struct e1000g *Adapter) 4761 { 4762 mutex_enter(&Adapter->watchdog_lock); 4763 4764 if (Adapter->watchdog_timer_started) 4765 arm_watchdog_timer(Adapter); 4766 4767 mutex_exit(&Adapter->watchdog_lock); 4768 } 4769 4770 static void 4771 stop_watchdog_timer(struct e1000g *Adapter) 4772 { 4773 timeout_id_t tid; 4774 4775 mutex_enter(&Adapter->watchdog_lock); 4776 4777 Adapter->watchdog_timer_started = B_FALSE; 4778 tid = Adapter->watchdog_tid; 4779 Adapter->watchdog_tid = 0; 4780 4781 mutex_exit(&Adapter->watchdog_lock); 4782 4783 if (tid != 0) 4784 (void) untimeout(tid); 4785 } 4786 4787 static void 4788 stop_link_timer(struct e1000g *Adapter) 4789 { 4790 timeout_id_t tid; 4791 4792 /* Disable the link timer */ 4793 mutex_enter(&Adapter->link_lock); 4794 4795 tid = Adapter->link_tid; 4796 Adapter->link_tid = 0; 4797 4798 mutex_exit(&Adapter->link_lock); 4799 4800 if (tid != 0) 4801 (void) untimeout(tid); 4802 } 4803 4804 static void 4805 stop_82547_timer(e1000g_tx_ring_t *tx_ring) 4806 { 4807 timeout_id_t tid; 4808 4809 /* Disable the tx timer for 82547 chipset */ 4810 mutex_enter(&tx_ring->tx_lock); 4811 4812 tx_ring->timer_enable_82547 = B_FALSE; 4813 tid = tx_ring->timer_id_82547; 4814 tx_ring->timer_id_82547 = 0; 4815 4816 mutex_exit(&tx_ring->tx_lock); 4817 4818 if (tid != 0) 4819 (void) untimeout(tid); 4820 } 4821 4822 void 4823 e1000g_clear_interrupt(struct e1000g *Adapter) 4824 { 4825 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 4826 0xffffffff & ~E1000_IMS_RXSEQ); 4827 } 4828 4829 void 4830 e1000g_mask_interrupt(struct e1000g *Adapter) 4831 { 4832 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, 4833 IMS_ENABLE_MASK & ~E1000_IMS_TXDW); 4834 4835 if (Adapter->tx_intr_enable) 4836 e1000g_mask_tx_interrupt(Adapter); 4837 } 4838 4839 /* 4840 * This routine is called by e1000g_quiesce(), therefore must not block. 4841 */ 4842 void 4843 e1000g_clear_all_interrupts(struct e1000g *Adapter) 4844 { 4845 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff); 4846 } 4847 4848 void 4849 e1000g_mask_tx_interrupt(struct e1000g *Adapter) 4850 { 4851 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW); 4852 } 4853 4854 void 4855 e1000g_clear_tx_interrupt(struct e1000g *Adapter) 4856 { 4857 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW); 4858 } 4859 4860 static void 4861 e1000g_smartspeed(struct e1000g *Adapter) 4862 { 4863 struct e1000_hw *hw = &Adapter->shared; 4864 uint16_t phy_status; 4865 uint16_t phy_ctrl; 4866 4867 /* 4868 * If we're not T-or-T, or we're not autoneg'ing, or we're not 4869 * advertising 1000Full, we don't even use the workaround 4870 */ 4871 if ((hw->phy.type != e1000_phy_igp) || 4872 !hw->mac.autoneg || 4873 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)) 4874 return; 4875 4876 /* 4877 * True if this is the first call of this function or after every 4878 * 30 seconds of not having link 4879 */ 4880 if (Adapter->smartspeed == 0) { 4881 /* 4882 * If Master/Slave config fault is asserted twice, we 4883 * assume back-to-back 4884 */ 4885 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4886 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4887 return; 4888 4889 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4890 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4891 return; 4892 /* 4893 * We're assuming back-2-back because our status register 4894 * insists! there's a fault in the master/slave 4895 * relationship that was "negotiated" 4896 */ 4897 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4898 /* 4899 * Is the phy configured for manual configuration of 4900 * master/slave? 4901 */ 4902 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4903 /* 4904 * Yes. Then disable manual configuration (enable 4905 * auto configuration) of master/slave 4906 */ 4907 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4908 (void) e1000_write_phy_reg(hw, 4909 PHY_1000T_CTRL, phy_ctrl); 4910 /* 4911 * Effectively starting the clock 4912 */ 4913 Adapter->smartspeed++; 4914 /* 4915 * Restart autonegotiation 4916 */ 4917 if (!e1000_phy_setup_autoneg(hw) && 4918 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 4919 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4920 MII_CR_RESTART_AUTO_NEG); 4921 (void) e1000_write_phy_reg(hw, 4922 PHY_CONTROL, phy_ctrl); 4923 } 4924 } 4925 return; 4926 /* 4927 * Has 6 seconds transpired still without link? Remember, 4928 * you should reset the smartspeed counter once you obtain 4929 * link 4930 */ 4931 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4932 /* 4933 * Yes. Remember, we did at the start determine that 4934 * there's a master/slave configuration fault, so we're 4935 * still assuming there's someone on the other end, but we 4936 * just haven't yet been able to talk to it. We then 4937 * re-enable auto configuration of master/slave to see if 4938 * we're running 2/3 pair cables. 4939 */ 4940 /* 4941 * If still no link, perhaps using 2/3 pair cable 4942 */ 4943 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4944 phy_ctrl |= CR_1000T_MS_ENABLE; 4945 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4946 /* 4947 * Restart autoneg with phy enabled for manual 4948 * configuration of master/slave 4949 */ 4950 if (!e1000_phy_setup_autoneg(hw) && 4951 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 4952 phy_ctrl |= 4953 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 4954 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 4955 } 4956 /* 4957 * Hopefully, there are no more faults and we've obtained 4958 * link as a result. 4959 */ 4960 } 4961 /* 4962 * Restart process after E1000_SMARTSPEED_MAX iterations (30 4963 * seconds) 4964 */ 4965 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4966 Adapter->smartspeed = 0; 4967 } 4968 4969 static boolean_t 4970 is_valid_mac_addr(uint8_t *mac_addr) 4971 { 4972 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 4973 const uint8_t addr_test2[6] = 4974 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4975 4976 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4977 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4978 return (B_FALSE); 4979 4980 return (B_TRUE); 4981 } 4982 4983 /* 4984 * e1000g_stall_check - check for tx stall 4985 * 4986 * This function checks if the adapter is stalled (in transmit). 4987 * 4988 * It is called each time the watchdog timeout is invoked. 4989 * If the transmit descriptor reclaim continuously fails, 4990 * the watchdog value will increment by 1. If the watchdog 4991 * value exceeds the threshold, the adapter is assumed to 4992 * have stalled and need to be reset. 4993 */ 4994 static boolean_t 4995 e1000g_stall_check(struct e1000g *Adapter) 4996 { 4997 e1000g_tx_ring_t *tx_ring; 4998 4999 tx_ring = Adapter->tx_ring; 5000 5001 if (Adapter->link_state != LINK_STATE_UP) 5002 return (B_FALSE); 5003 5004 (void) e1000g_recycle(tx_ring); 5005 5006 if (Adapter->stall_flag) 5007 return (B_TRUE); 5008 5009 return (B_FALSE); 5010 } 5011 5012 #ifdef E1000G_DEBUG 5013 static enum ioc_reply 5014 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp) 5015 { 5016 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd); 5017 e1000g_peekpoke_t *ppd; 5018 uint64_t mem_va; 5019 uint64_t maxoff; 5020 boolean_t peek; 5021 5022 switch (iocp->ioc_cmd) { 5023 5024 case E1000G_IOC_REG_PEEK: 5025 peek = B_TRUE; 5026 break; 5027 5028 case E1000G_IOC_REG_POKE: 5029 peek = B_FALSE; 5030 break; 5031 5032 deault: 5033 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 5034 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n", 5035 iocp->ioc_cmd); 5036 return (IOC_INVAL); 5037 } 5038 5039 /* 5040 * Validate format of ioctl 5041 */ 5042 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t)) 5043 return (IOC_INVAL); 5044 if (mp->b_cont == NULL) 5045 return (IOC_INVAL); 5046 5047 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr; 5048 5049 /* 5050 * Validate request parameters 5051 */ 5052 switch (ppd->pp_acc_space) { 5053 5054 default: 5055 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 5056 "e1000g_diag_ioctl: invalid access space 0x%X\n", 5057 ppd->pp_acc_space); 5058 return (IOC_INVAL); 5059 5060 case E1000G_PP_SPACE_REG: 5061 /* 5062 * Memory-mapped I/O space 5063 */ 5064 ASSERT(ppd->pp_acc_size == 4); 5065 if (ppd->pp_acc_size != 4) 5066 return (IOC_INVAL); 5067 5068 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 5069 return (IOC_INVAL); 5070 5071 mem_va = 0; 5072 maxoff = 0x10000; 5073 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg; 5074 break; 5075 5076 case E1000G_PP_SPACE_E1000G: 5077 /* 5078 * E1000g data structure! 5079 */ 5080 mem_va = (uintptr_t)e1000gp; 5081 maxoff = sizeof (struct e1000g); 5082 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem; 5083 break; 5084 5085 } 5086 5087 if (ppd->pp_acc_offset >= maxoff) 5088 return (IOC_INVAL); 5089 5090 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff) 5091 return (IOC_INVAL); 5092 5093 /* 5094 * All OK - go! 5095 */ 5096 ppd->pp_acc_offset += mem_va; 5097 (*ppfn)(e1000gp, ppd); 5098 return (peek ? IOC_REPLY : IOC_ACK); 5099 } 5100 5101 static void 5102 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5103 { 5104 ddi_acc_handle_t handle; 5105 uint32_t *regaddr; 5106 5107 handle = e1000gp->osdep.reg_handle; 5108 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5109 (uintptr_t)ppd->pp_acc_offset); 5110 5111 ppd->pp_acc_data = ddi_get32(handle, regaddr); 5112 } 5113 5114 static void 5115 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5116 { 5117 ddi_acc_handle_t handle; 5118 uint32_t *regaddr; 5119 uint32_t value; 5120 5121 handle = e1000gp->osdep.reg_handle; 5122 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5123 (uintptr_t)ppd->pp_acc_offset); 5124 value = (uint32_t)ppd->pp_acc_data; 5125 5126 ddi_put32(handle, regaddr, value); 5127 } 5128 5129 static void 5130 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5131 { 5132 uint64_t value; 5133 void *vaddr; 5134 5135 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5136 5137 switch (ppd->pp_acc_size) { 5138 case 1: 5139 value = *(uint8_t *)vaddr; 5140 break; 5141 5142 case 2: 5143 value = *(uint16_t *)vaddr; 5144 break; 5145 5146 case 4: 5147 value = *(uint32_t *)vaddr; 5148 break; 5149 5150 case 8: 5151 value = *(uint64_t *)vaddr; 5152 break; 5153 } 5154 5155 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5156 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n", 5157 (void *)e1000gp, (void *)ppd, value, vaddr); 5158 5159 ppd->pp_acc_data = value; 5160 } 5161 5162 static void 5163 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5164 { 5165 uint64_t value; 5166 void *vaddr; 5167 5168 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5169 value = ppd->pp_acc_data; 5170 5171 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5172 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n", 5173 (void *)e1000gp, (void *)ppd, value, vaddr); 5174 5175 switch (ppd->pp_acc_size) { 5176 case 1: 5177 *(uint8_t *)vaddr = (uint8_t)value; 5178 break; 5179 5180 case 2: 5181 *(uint16_t *)vaddr = (uint16_t)value; 5182 break; 5183 5184 case 4: 5185 *(uint32_t *)vaddr = (uint32_t)value; 5186 break; 5187 5188 case 8: 5189 *(uint64_t *)vaddr = (uint64_t)value; 5190 break; 5191 } 5192 } 5193 #endif 5194 5195 /* 5196 * Loopback Support 5197 */ 5198 static lb_property_t lb_normal = 5199 { normal, "normal", E1000G_LB_NONE }; 5200 static lb_property_t lb_external1000 = 5201 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 }; 5202 static lb_property_t lb_external100 = 5203 { external, "100Mbps", E1000G_LB_EXTERNAL_100 }; 5204 static lb_property_t lb_external10 = 5205 { external, "10Mbps", E1000G_LB_EXTERNAL_10 }; 5206 static lb_property_t lb_phy = 5207 { internal, "PHY", E1000G_LB_INTERNAL_PHY }; 5208 5209 static enum ioc_reply 5210 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp) 5211 { 5212 lb_info_sz_t *lbsp; 5213 lb_property_t *lbpp; 5214 struct e1000_hw *hw; 5215 uint32_t *lbmp; 5216 uint32_t size; 5217 uint32_t value; 5218 5219 hw = &Adapter->shared; 5220 5221 if (mp->b_cont == NULL) 5222 return (IOC_INVAL); 5223 5224 if (!e1000g_check_loopback_support(hw)) { 5225 e1000g_log(NULL, CE_WARN, 5226 "Loopback is not supported on e1000g%d", Adapter->instance); 5227 return (IOC_INVAL); 5228 } 5229 5230 switch (iocp->ioc_cmd) { 5231 default: 5232 return (IOC_INVAL); 5233 5234 case LB_GET_INFO_SIZE: 5235 size = sizeof (lb_info_sz_t); 5236 if (iocp->ioc_count != size) 5237 return (IOC_INVAL); 5238 5239 rw_enter(&Adapter->chip_lock, RW_WRITER); 5240 e1000g_get_phy_state(Adapter); 5241 5242 /* 5243 * Workaround for hardware faults. In order to get a stable 5244 * state of phy, we will wait for a specific interval and 5245 * try again. The time delay is an experiential value based 5246 * on our testing. 5247 */ 5248 msec_delay(100); 5249 e1000g_get_phy_state(Adapter); 5250 rw_exit(&Adapter->chip_lock); 5251 5252 value = sizeof (lb_normal); 5253 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5254 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5255 (hw->phy.media_type == e1000_media_type_fiber) || 5256 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5257 value += sizeof (lb_phy); 5258 switch (hw->mac.type) { 5259 case e1000_82571: 5260 case e1000_82572: 5261 case e1000_80003es2lan: 5262 value += sizeof (lb_external1000); 5263 break; 5264 } 5265 } 5266 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5267 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5268 value += sizeof (lb_external100); 5269 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5270 value += sizeof (lb_external10); 5271 5272 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 5273 *lbsp = value; 5274 break; 5275 5276 case LB_GET_INFO: 5277 value = sizeof (lb_normal); 5278 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5279 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5280 (hw->phy.media_type == e1000_media_type_fiber) || 5281 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5282 value += sizeof (lb_phy); 5283 switch (hw->mac.type) { 5284 case e1000_82571: 5285 case e1000_82572: 5286 case e1000_80003es2lan: 5287 value += sizeof (lb_external1000); 5288 break; 5289 } 5290 } 5291 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5292 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5293 value += sizeof (lb_external100); 5294 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5295 value += sizeof (lb_external10); 5296 5297 size = value; 5298 if (iocp->ioc_count != size) 5299 return (IOC_INVAL); 5300 5301 value = 0; 5302 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 5303 lbpp[value++] = lb_normal; 5304 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5305 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5306 (hw->phy.media_type == e1000_media_type_fiber) || 5307 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5308 lbpp[value++] = lb_phy; 5309 switch (hw->mac.type) { 5310 case e1000_82571: 5311 case e1000_82572: 5312 case e1000_80003es2lan: 5313 lbpp[value++] = lb_external1000; 5314 break; 5315 } 5316 } 5317 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5318 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5319 lbpp[value++] = lb_external100; 5320 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5321 lbpp[value++] = lb_external10; 5322 break; 5323 5324 case LB_GET_MODE: 5325 size = sizeof (uint32_t); 5326 if (iocp->ioc_count != size) 5327 return (IOC_INVAL); 5328 5329 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5330 *lbmp = Adapter->loopback_mode; 5331 break; 5332 5333 case LB_SET_MODE: 5334 size = 0; 5335 if (iocp->ioc_count != sizeof (uint32_t)) 5336 return (IOC_INVAL); 5337 5338 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5339 if (!e1000g_set_loopback_mode(Adapter, *lbmp)) 5340 return (IOC_INVAL); 5341 break; 5342 } 5343 5344 iocp->ioc_count = size; 5345 iocp->ioc_error = 0; 5346 5347 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 5348 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 5349 return (IOC_INVAL); 5350 } 5351 5352 return (IOC_REPLY); 5353 } 5354 5355 static boolean_t 5356 e1000g_check_loopback_support(struct e1000_hw *hw) 5357 { 5358 switch (hw->mac.type) { 5359 case e1000_82540: 5360 case e1000_82545: 5361 case e1000_82545_rev_3: 5362 case e1000_82546: 5363 case e1000_82546_rev_3: 5364 case e1000_82541: 5365 case e1000_82541_rev_2: 5366 case e1000_82547: 5367 case e1000_82547_rev_2: 5368 case e1000_82571: 5369 case e1000_82572: 5370 case e1000_82573: 5371 case e1000_82574: 5372 case e1000_80003es2lan: 5373 case e1000_ich9lan: 5374 case e1000_ich10lan: 5375 return (B_TRUE); 5376 } 5377 return (B_FALSE); 5378 } 5379 5380 static boolean_t 5381 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode) 5382 { 5383 struct e1000_hw *hw; 5384 int i, times; 5385 boolean_t link_up; 5386 5387 if (mode == Adapter->loopback_mode) 5388 return (B_TRUE); 5389 5390 hw = &Adapter->shared; 5391 times = 0; 5392 5393 Adapter->loopback_mode = mode; 5394 5395 if (mode == E1000G_LB_NONE) { 5396 /* Reset the chip */ 5397 hw->phy.autoneg_wait_to_complete = B_TRUE; 5398 (void) e1000g_reset_adapter(Adapter); 5399 hw->phy.autoneg_wait_to_complete = B_FALSE; 5400 return (B_TRUE); 5401 } 5402 5403 again: 5404 5405 rw_enter(&Adapter->chip_lock, RW_WRITER); 5406 5407 switch (mode) { 5408 default: 5409 rw_exit(&Adapter->chip_lock); 5410 return (B_FALSE); 5411 5412 case E1000G_LB_EXTERNAL_1000: 5413 e1000g_set_external_loopback_1000(Adapter); 5414 break; 5415 5416 case E1000G_LB_EXTERNAL_100: 5417 e1000g_set_external_loopback_100(Adapter); 5418 break; 5419 5420 case E1000G_LB_EXTERNAL_10: 5421 e1000g_set_external_loopback_10(Adapter); 5422 break; 5423 5424 case E1000G_LB_INTERNAL_PHY: 5425 e1000g_set_internal_loopback(Adapter); 5426 break; 5427 } 5428 5429 times++; 5430 5431 rw_exit(&Adapter->chip_lock); 5432 5433 /* Wait for link up */ 5434 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--) 5435 msec_delay(100); 5436 5437 rw_enter(&Adapter->chip_lock, RW_WRITER); 5438 5439 link_up = e1000g_link_up(Adapter); 5440 5441 rw_exit(&Adapter->chip_lock); 5442 5443 if (!link_up) { 5444 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5445 "Failed to get the link up"); 5446 if (times < 2) { 5447 /* Reset the link */ 5448 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5449 "Reset the link ..."); 5450 (void) e1000g_reset_adapter(Adapter); 5451 goto again; 5452 } 5453 5454 /* 5455 * Reset driver to loopback none when set loopback failed 5456 * for the second time. 5457 */ 5458 Adapter->loopback_mode = E1000G_LB_NONE; 5459 5460 /* Reset the chip */ 5461 hw->phy.autoneg_wait_to_complete = B_TRUE; 5462 (void) e1000g_reset_adapter(Adapter); 5463 hw->phy.autoneg_wait_to_complete = B_FALSE; 5464 5465 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5466 "Set loopback mode failed, reset to loopback none"); 5467 5468 return (B_FALSE); 5469 } 5470 5471 return (B_TRUE); 5472 } 5473 5474 /* 5475 * The following loopback settings are from Intel's technical 5476 * document - "How To Loopback". All the register settings and 5477 * time delay values are directly inherited from the document 5478 * without more explanations available. 5479 */ 5480 static void 5481 e1000g_set_internal_loopback(struct e1000g *Adapter) 5482 { 5483 struct e1000_hw *hw; 5484 uint32_t ctrl; 5485 uint32_t status; 5486 uint16_t phy_ctrl; 5487 uint16_t phy_reg; 5488 uint32_t txcw; 5489 5490 hw = &Adapter->shared; 5491 5492 /* Disable Smart Power Down */ 5493 phy_spd_state(hw, B_FALSE); 5494 5495 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 5496 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10); 5497 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000; 5498 5499 switch (hw->mac.type) { 5500 case e1000_82540: 5501 case e1000_82545: 5502 case e1000_82545_rev_3: 5503 case e1000_82546: 5504 case e1000_82546_rev_3: 5505 case e1000_82573: 5506 /* Auto-MDI/MDIX off */ 5507 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 5508 /* Reset PHY to update Auto-MDI/MDIX */ 5509 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5510 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN); 5511 /* Reset PHY to auto-neg off and force 1000 */ 5512 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5513 phy_ctrl | MII_CR_RESET); 5514 /* 5515 * Disable PHY receiver for 82540/545/546 and 82573 Family. 5516 * See comments above e1000g_set_internal_loopback() for the 5517 * background. 5518 */ 5519 (void) e1000_write_phy_reg(hw, 29, 0x001F); 5520 (void) e1000_write_phy_reg(hw, 30, 0x8FFC); 5521 (void) e1000_write_phy_reg(hw, 29, 0x001A); 5522 (void) e1000_write_phy_reg(hw, 30, 0x8FF0); 5523 break; 5524 case e1000_80003es2lan: 5525 /* Force Link Up */ 5526 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 5527 0x1CC); 5528 /* Sets PCS loopback at 1Gbs */ 5529 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 5530 0x1046); 5531 break; 5532 } 5533 5534 /* 5535 * The following registers should be set for e1000_phy_bm phy type. 5536 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy. 5537 * For others, we do not need to set these registers. 5538 */ 5539 if (hw->phy.type == e1000_phy_bm) { 5540 /* Set Default MAC Interface speed to 1GB */ 5541 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg); 5542 phy_reg &= ~0x0007; 5543 phy_reg |= 0x006; 5544 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg); 5545 /* Assert SW reset for above settings to take effect */ 5546 (void) e1000_phy_commit(hw); 5547 msec_delay(1); 5548 /* Force Full Duplex */ 5549 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5550 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5551 phy_reg | 0x000C); 5552 /* Set Link Up (in force link) */ 5553 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg); 5554 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16), 5555 phy_reg | 0x0040); 5556 /* Force Link */ 5557 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5558 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5559 phy_reg | 0x0040); 5560 /* Set Early Link Enable */ 5561 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg); 5562 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20), 5563 phy_reg | 0x0400); 5564 } 5565 5566 /* Set loopback */ 5567 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK); 5568 5569 msec_delay(250); 5570 5571 /* Now set up the MAC to the same speed/duplex as the PHY. */ 5572 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5573 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5574 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5575 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5576 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ 5577 E1000_CTRL_FD); /* Force Duplex to FULL */ 5578 5579 switch (hw->mac.type) { 5580 case e1000_82540: 5581 case e1000_82545: 5582 case e1000_82545_rev_3: 5583 case e1000_82546: 5584 case e1000_82546_rev_3: 5585 /* 5586 * For some serdes we'll need to commit the writes now 5587 * so that the status is updated on link 5588 */ 5589 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 5590 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5591 msec_delay(100); 5592 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5593 } 5594 5595 if (hw->phy.media_type == e1000_media_type_copper) { 5596 /* Invert Loss of Signal */ 5597 ctrl |= E1000_CTRL_ILOS; 5598 } else { 5599 /* Set ILOS on fiber nic if half duplex is detected */ 5600 status = E1000_READ_REG(hw, E1000_STATUS); 5601 if ((status & E1000_STATUS_FD) == 0) 5602 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5603 } 5604 break; 5605 5606 case e1000_82571: 5607 case e1000_82572: 5608 /* 5609 * The fiber/SerDes versions of this adapter do not contain an 5610 * accessible PHY. Therefore, loopback beyond MAC must be done 5611 * using SerDes analog loopback. 5612 */ 5613 if (hw->phy.media_type != e1000_media_type_copper) { 5614 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5615 txcw = E1000_READ_REG(hw, E1000_TXCW); 5616 txcw &= ~((uint32_t)1 << 31); 5617 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5618 5619 /* 5620 * Write 0x410 to Serdes Control register 5621 * to enable Serdes analog loopback 5622 */ 5623 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5624 msec_delay(10); 5625 } 5626 5627 status = E1000_READ_REG(hw, E1000_STATUS); 5628 /* Set ILOS on fiber nic if half duplex is detected */ 5629 if ((hw->phy.media_type == e1000_media_type_fiber) && 5630 ((status & E1000_STATUS_FD) == 0 || 5631 (status & E1000_STATUS_LU) == 0)) 5632 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5633 else if (hw->phy.media_type == e1000_media_type_internal_serdes) 5634 ctrl |= E1000_CTRL_SLU; 5635 break; 5636 5637 case e1000_82573: 5638 ctrl |= E1000_CTRL_ILOS; 5639 break; 5640 case e1000_ich9lan: 5641 case e1000_ich10lan: 5642 ctrl |= E1000_CTRL_SLU; 5643 break; 5644 } 5645 if (hw->phy.type == e1000_phy_bm) 5646 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS; 5647 5648 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5649 } 5650 5651 static void 5652 e1000g_set_external_loopback_1000(struct e1000g *Adapter) 5653 { 5654 struct e1000_hw *hw; 5655 uint32_t rctl; 5656 uint32_t ctrl_ext; 5657 uint32_t ctrl; 5658 uint32_t status; 5659 uint32_t txcw; 5660 uint16_t phydata; 5661 5662 hw = &Adapter->shared; 5663 5664 /* Disable Smart Power Down */ 5665 phy_spd_state(hw, B_FALSE); 5666 5667 switch (hw->mac.type) { 5668 case e1000_82571: 5669 case e1000_82572: 5670 switch (hw->phy.media_type) { 5671 case e1000_media_type_copper: 5672 /* Force link up (Must be done before the PHY writes) */ 5673 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5674 ctrl |= E1000_CTRL_SLU; /* Force Link Up */ 5675 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5676 5677 rctl = E1000_READ_REG(hw, E1000_RCTL); 5678 rctl |= (E1000_RCTL_EN | 5679 E1000_RCTL_SBP | 5680 E1000_RCTL_UPE | 5681 E1000_RCTL_MPE | 5682 E1000_RCTL_LPE | 5683 E1000_RCTL_BAM); /* 0x803E */ 5684 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 5685 5686 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5687 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA | 5688 E1000_CTRL_EXT_SDP6_DATA | 5689 E1000_CTRL_EXT_SDP3_DATA | 5690 E1000_CTRL_EXT_SDP4_DIR | 5691 E1000_CTRL_EXT_SDP6_DIR | 5692 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */ 5693 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5694 5695 /* 5696 * This sequence tunes the PHY's SDP and no customer 5697 * settable values. For background, see comments above 5698 * e1000g_set_internal_loopback(). 5699 */ 5700 (void) e1000_write_phy_reg(hw, 0x0, 0x140); 5701 msec_delay(10); 5702 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00); 5703 (void) e1000_write_phy_reg(hw, 0x12, 0xC10); 5704 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10); 5705 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76); 5706 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1); 5707 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0); 5708 5709 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65); 5710 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C); 5711 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC); 5712 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C); 5713 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC); 5714 5715 msec_delay(50); 5716 break; 5717 case e1000_media_type_fiber: 5718 case e1000_media_type_internal_serdes: 5719 status = E1000_READ_REG(hw, E1000_STATUS); 5720 if (((status & E1000_STATUS_LU) == 0) || 5721 (hw->phy.media_type == 5722 e1000_media_type_internal_serdes)) { 5723 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5724 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5725 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5726 } 5727 5728 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5729 txcw = E1000_READ_REG(hw, E1000_TXCW); 5730 txcw &= ~((uint32_t)1 << 31); 5731 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5732 5733 /* 5734 * Write 0x410 to Serdes Control register 5735 * to enable Serdes analog loopback 5736 */ 5737 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5738 msec_delay(10); 5739 break; 5740 default: 5741 break; 5742 } 5743 break; 5744 case e1000_82574: 5745 case e1000_80003es2lan: 5746 case e1000_ich9lan: 5747 case e1000_ich10lan: 5748 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata); 5749 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16), 5750 phydata | (1 << 5)); 5751 Adapter->param_adv_autoneg = 1; 5752 Adapter->param_adv_1000fdx = 1; 5753 (void) e1000g_reset_link(Adapter); 5754 break; 5755 } 5756 } 5757 5758 static void 5759 e1000g_set_external_loopback_100(struct e1000g *Adapter) 5760 { 5761 struct e1000_hw *hw; 5762 uint32_t ctrl; 5763 uint16_t phy_ctrl; 5764 5765 hw = &Adapter->shared; 5766 5767 /* Disable Smart Power Down */ 5768 phy_spd_state(hw, B_FALSE); 5769 5770 phy_ctrl = (MII_CR_FULL_DUPLEX | 5771 MII_CR_SPEED_100); 5772 5773 /* Force 100/FD, reset PHY */ 5774 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5775 phy_ctrl | MII_CR_RESET); /* 0xA100 */ 5776 msec_delay(10); 5777 5778 /* Force 100/FD */ 5779 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5780 phy_ctrl); /* 0x2100 */ 5781 msec_delay(10); 5782 5783 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5784 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5785 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5786 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5787 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5788 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5789 E1000_CTRL_SPD_100 | /* Force Speed to 100 */ 5790 E1000_CTRL_FD); /* Force Duplex to FULL */ 5791 5792 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5793 } 5794 5795 static void 5796 e1000g_set_external_loopback_10(struct e1000g *Adapter) 5797 { 5798 struct e1000_hw *hw; 5799 uint32_t ctrl; 5800 uint16_t phy_ctrl; 5801 5802 hw = &Adapter->shared; 5803 5804 /* Disable Smart Power Down */ 5805 phy_spd_state(hw, B_FALSE); 5806 5807 phy_ctrl = (MII_CR_FULL_DUPLEX | 5808 MII_CR_SPEED_10); 5809 5810 /* Force 10/FD, reset PHY */ 5811 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5812 phy_ctrl | MII_CR_RESET); /* 0x8100 */ 5813 msec_delay(10); 5814 5815 /* Force 10/FD */ 5816 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5817 phy_ctrl); /* 0x0100 */ 5818 msec_delay(10); 5819 5820 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5821 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5822 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5823 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5824 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5825 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5826 E1000_CTRL_SPD_10 | /* Force Speed to 10 */ 5827 E1000_CTRL_FD); /* Force Duplex to FULL */ 5828 5829 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5830 } 5831 5832 #ifdef __sparc 5833 static boolean_t 5834 e1000g_find_mac_address(struct e1000g *Adapter) 5835 { 5836 struct e1000_hw *hw = &Adapter->shared; 5837 uchar_t *bytes; 5838 struct ether_addr sysaddr; 5839 uint_t nelts; 5840 int err; 5841 boolean_t found = B_FALSE; 5842 5843 /* 5844 * The "vendor's factory-set address" may already have 5845 * been extracted from the chip, but if the property 5846 * "local-mac-address" is set we use that instead. 5847 * 5848 * We check whether it looks like an array of 6 5849 * bytes (which it should, if OBP set it). If we can't 5850 * make sense of it this way, we'll ignore it. 5851 */ 5852 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 5853 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 5854 if (err == DDI_PROP_SUCCESS) { 5855 if (nelts == ETHERADDRL) { 5856 while (nelts--) 5857 hw->mac.addr[nelts] = bytes[nelts]; 5858 found = B_TRUE; 5859 } 5860 ddi_prop_free(bytes); 5861 } 5862 5863 /* 5864 * Look up the OBP property "local-mac-address?". If the user has set 5865 * 'local-mac-address? = false', use "the system address" instead. 5866 */ 5867 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0, 5868 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 5869 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 5870 if (localetheraddr(NULL, &sysaddr) != 0) { 5871 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 5872 found = B_TRUE; 5873 } 5874 } 5875 ddi_prop_free(bytes); 5876 } 5877 5878 /* 5879 * Finally(!), if there's a valid "mac-address" property (created 5880 * if we netbooted from this interface), we must use this instead 5881 * of any of the above to ensure that the NFS/install server doesn't 5882 * get confused by the address changing as Solaris takes over! 5883 */ 5884 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 5885 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 5886 if (err == DDI_PROP_SUCCESS) { 5887 if (nelts == ETHERADDRL) { 5888 while (nelts--) 5889 hw->mac.addr[nelts] = bytes[nelts]; 5890 found = B_TRUE; 5891 } 5892 ddi_prop_free(bytes); 5893 } 5894 5895 if (found) { 5896 bcopy(hw->mac.addr, hw->mac.perm_addr, 5897 ETHERADDRL); 5898 } 5899 5900 return (found); 5901 } 5902 #endif 5903 5904 static int 5905 e1000g_add_intrs(struct e1000g *Adapter) 5906 { 5907 dev_info_t *devinfo; 5908 int intr_types; 5909 int rc; 5910 5911 devinfo = Adapter->dip; 5912 5913 /* Get supported interrupt types */ 5914 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 5915 5916 if (rc != DDI_SUCCESS) { 5917 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5918 "Get supported interrupt types failed: %d\n", rc); 5919 return (DDI_FAILURE); 5920 } 5921 5922 /* 5923 * Based on Intel Technical Advisory document (TA-160), there are some 5924 * cases where some older Intel PCI-X NICs may "advertise" to the OS 5925 * that it supports MSI, but in fact has problems. 5926 * So we should only enable MSI for PCI-E NICs and disable MSI for old 5927 * PCI/PCI-X NICs. 5928 */ 5929 if (Adapter->shared.mac.type < e1000_82571) 5930 Adapter->msi_enable = B_FALSE; 5931 5932 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) { 5933 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI); 5934 5935 if (rc != DDI_SUCCESS) { 5936 /* EMPTY */ 5937 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5938 "Add MSI failed, trying Legacy interrupts\n"); 5939 } else { 5940 Adapter->intr_type = DDI_INTR_TYPE_MSI; 5941 } 5942 } 5943 5944 if ((Adapter->intr_type == 0) && 5945 (intr_types & DDI_INTR_TYPE_FIXED)) { 5946 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED); 5947 5948 if (rc != DDI_SUCCESS) { 5949 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5950 "Add Legacy interrupts failed\n"); 5951 return (DDI_FAILURE); 5952 } 5953 5954 Adapter->intr_type = DDI_INTR_TYPE_FIXED; 5955 } 5956 5957 if (Adapter->intr_type == 0) { 5958 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5959 "No interrupts registered\n"); 5960 return (DDI_FAILURE); 5961 } 5962 5963 return (DDI_SUCCESS); 5964 } 5965 5966 /* 5967 * e1000g_intr_add() handles MSI/Legacy interrupts 5968 */ 5969 static int 5970 e1000g_intr_add(struct e1000g *Adapter, int intr_type) 5971 { 5972 dev_info_t *devinfo; 5973 int count, avail, actual; 5974 int x, y, rc, inum = 0; 5975 int flag; 5976 ddi_intr_handler_t *intr_handler; 5977 5978 devinfo = Adapter->dip; 5979 5980 /* get number of interrupts */ 5981 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5982 if ((rc != DDI_SUCCESS) || (count == 0)) { 5983 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5984 "Get interrupt number failed. Return: %d, count: %d\n", 5985 rc, count); 5986 return (DDI_FAILURE); 5987 } 5988 5989 /* get number of available interrupts */ 5990 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 5991 if ((rc != DDI_SUCCESS) || (avail == 0)) { 5992 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5993 "Get interrupt available number failed. " 5994 "Return: %d, available: %d\n", rc, avail); 5995 return (DDI_FAILURE); 5996 } 5997 5998 if (avail < count) { 5999 /* EMPTY */ 6000 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6001 "Interrupts count: %d, available: %d\n", 6002 count, avail); 6003 } 6004 6005 /* Allocate an array of interrupt handles */ 6006 Adapter->intr_size = count * sizeof (ddi_intr_handle_t); 6007 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP); 6008 6009 /* Set NORMAL behavior for both MSI and FIXED interrupt */ 6010 flag = DDI_INTR_ALLOC_NORMAL; 6011 6012 /* call ddi_intr_alloc() */ 6013 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum, 6014 count, &actual, flag); 6015 6016 if ((rc != DDI_SUCCESS) || (actual == 0)) { 6017 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6018 "Allocate interrupts failed: %d\n", rc); 6019 6020 kmem_free(Adapter->htable, Adapter->intr_size); 6021 return (DDI_FAILURE); 6022 } 6023 6024 if (actual < count) { 6025 /* EMPTY */ 6026 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 6027 "Interrupts requested: %d, received: %d\n", 6028 count, actual); 6029 } 6030 6031 Adapter->intr_cnt = actual; 6032 6033 /* Get priority for first msi, assume remaining are all the same */ 6034 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri); 6035 6036 if (rc != DDI_SUCCESS) { 6037 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6038 "Get interrupt priority failed: %d\n", rc); 6039 6040 /* Free already allocated intr */ 6041 for (y = 0; y < actual; y++) 6042 (void) ddi_intr_free(Adapter->htable[y]); 6043 6044 kmem_free(Adapter->htable, Adapter->intr_size); 6045 return (DDI_FAILURE); 6046 } 6047 6048 /* 6049 * In Legacy Interrupt mode, for PCI-Express adapters, we should 6050 * use the interrupt service routine e1000g_intr_pciexpress() 6051 * to avoid interrupt stealing when sharing interrupt with other 6052 * devices. 6053 */ 6054 if (Adapter->shared.mac.type < e1000_82571) 6055 intr_handler = (ddi_intr_handler_t *)e1000g_intr; 6056 else 6057 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress; 6058 6059 /* Call ddi_intr_add_handler() */ 6060 for (x = 0; x < actual; x++) { 6061 rc = ddi_intr_add_handler(Adapter->htable[x], 6062 intr_handler, (caddr_t)Adapter, NULL); 6063 6064 if (rc != DDI_SUCCESS) { 6065 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6066 "Add interrupt handler failed: %d\n", rc); 6067 6068 /* Remove already added handler */ 6069 for (y = 0; y < x; y++) 6070 (void) ddi_intr_remove_handler( 6071 Adapter->htable[y]); 6072 6073 /* Free already allocated intr */ 6074 for (y = 0; y < actual; y++) 6075 (void) ddi_intr_free(Adapter->htable[y]); 6076 6077 kmem_free(Adapter->htable, Adapter->intr_size); 6078 return (DDI_FAILURE); 6079 } 6080 } 6081 6082 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap); 6083 6084 if (rc != DDI_SUCCESS) { 6085 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6086 "Get interrupt cap failed: %d\n", rc); 6087 6088 /* Free already allocated intr */ 6089 for (y = 0; y < actual; y++) { 6090 (void) ddi_intr_remove_handler(Adapter->htable[y]); 6091 (void) ddi_intr_free(Adapter->htable[y]); 6092 } 6093 6094 kmem_free(Adapter->htable, Adapter->intr_size); 6095 return (DDI_FAILURE); 6096 } 6097 6098 return (DDI_SUCCESS); 6099 } 6100 6101 static int 6102 e1000g_rem_intrs(struct e1000g *Adapter) 6103 { 6104 int x; 6105 int rc; 6106 6107 for (x = 0; x < Adapter->intr_cnt; x++) { 6108 rc = ddi_intr_remove_handler(Adapter->htable[x]); 6109 if (rc != DDI_SUCCESS) { 6110 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6111 "Remove intr handler failed: %d\n", rc); 6112 return (DDI_FAILURE); 6113 } 6114 6115 rc = ddi_intr_free(Adapter->htable[x]); 6116 if (rc != DDI_SUCCESS) { 6117 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6118 "Free intr failed: %d\n", rc); 6119 return (DDI_FAILURE); 6120 } 6121 } 6122 6123 kmem_free(Adapter->htable, Adapter->intr_size); 6124 6125 return (DDI_SUCCESS); 6126 } 6127 6128 static int 6129 e1000g_enable_intrs(struct e1000g *Adapter) 6130 { 6131 int x; 6132 int rc; 6133 6134 /* Enable interrupts */ 6135 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6136 /* Call ddi_intr_block_enable() for MSI */ 6137 rc = ddi_intr_block_enable(Adapter->htable, 6138 Adapter->intr_cnt); 6139 if (rc != DDI_SUCCESS) { 6140 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6141 "Enable block intr failed: %d\n", rc); 6142 return (DDI_FAILURE); 6143 } 6144 } else { 6145 /* Call ddi_intr_enable() for Legacy/MSI non block enable */ 6146 for (x = 0; x < Adapter->intr_cnt; x++) { 6147 rc = ddi_intr_enable(Adapter->htable[x]); 6148 if (rc != DDI_SUCCESS) { 6149 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6150 "Enable intr failed: %d\n", rc); 6151 return (DDI_FAILURE); 6152 } 6153 } 6154 } 6155 6156 return (DDI_SUCCESS); 6157 } 6158 6159 static int 6160 e1000g_disable_intrs(struct e1000g *Adapter) 6161 { 6162 int x; 6163 int rc; 6164 6165 /* Disable all interrupts */ 6166 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6167 rc = ddi_intr_block_disable(Adapter->htable, 6168 Adapter->intr_cnt); 6169 if (rc != DDI_SUCCESS) { 6170 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6171 "Disable block intr failed: %d\n", rc); 6172 return (DDI_FAILURE); 6173 } 6174 } else { 6175 for (x = 0; x < Adapter->intr_cnt; x++) { 6176 rc = ddi_intr_disable(Adapter->htable[x]); 6177 if (rc != DDI_SUCCESS) { 6178 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6179 "Disable intr failed: %d\n", rc); 6180 return (DDI_FAILURE); 6181 } 6182 } 6183 } 6184 6185 return (DDI_SUCCESS); 6186 } 6187 6188 /* 6189 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter 6190 */ 6191 static void 6192 e1000g_get_phy_state(struct e1000g *Adapter) 6193 { 6194 struct e1000_hw *hw = &Adapter->shared; 6195 6196 if (hw->phy.media_type == e1000_media_type_copper) { 6197 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl); 6198 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status); 6199 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 6200 &Adapter->phy_an_adv); 6201 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, 6202 &Adapter->phy_an_exp); 6203 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, 6204 &Adapter->phy_ext_status); 6205 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, 6206 &Adapter->phy_1000t_ctrl); 6207 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, 6208 &Adapter->phy_1000t_status); 6209 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, 6210 &Adapter->phy_lp_able); 6211 6212 Adapter->param_autoneg_cap = 6213 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; 6214 Adapter->param_pause_cap = 6215 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6216 Adapter->param_asym_pause_cap = 6217 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6218 Adapter->param_1000fdx_cap = 6219 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 6220 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; 6221 Adapter->param_1000hdx_cap = 6222 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || 6223 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; 6224 Adapter->param_100t4_cap = 6225 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0; 6226 Adapter->param_100fdx_cap = 6227 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 6228 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; 6229 Adapter->param_100hdx_cap = 6230 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 6231 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; 6232 Adapter->param_10fdx_cap = 6233 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; 6234 Adapter->param_10hdx_cap = 6235 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; 6236 6237 Adapter->param_adv_autoneg = hw->mac.autoneg; 6238 Adapter->param_adv_pause = 6239 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6240 Adapter->param_adv_asym_pause = 6241 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6242 Adapter->param_adv_1000hdx = 6243 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; 6244 Adapter->param_adv_100t4 = 6245 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; 6246 if (Adapter->param_adv_autoneg == 1) { 6247 Adapter->param_adv_1000fdx = 6248 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) 6249 ? 1 : 0; 6250 Adapter->param_adv_100fdx = 6251 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) 6252 ? 1 : 0; 6253 Adapter->param_adv_100hdx = 6254 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) 6255 ? 1 : 0; 6256 Adapter->param_adv_10fdx = 6257 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; 6258 Adapter->param_adv_10hdx = 6259 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; 6260 } 6261 6262 Adapter->param_lp_autoneg = 6263 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; 6264 Adapter->param_lp_pause = 6265 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; 6266 Adapter->param_lp_asym_pause = 6267 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; 6268 Adapter->param_lp_1000fdx = 6269 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; 6270 Adapter->param_lp_1000hdx = 6271 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; 6272 Adapter->param_lp_100t4 = 6273 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; 6274 Adapter->param_lp_100fdx = 6275 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; 6276 Adapter->param_lp_100hdx = 6277 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; 6278 Adapter->param_lp_10fdx = 6279 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; 6280 Adapter->param_lp_10hdx = 6281 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; 6282 } else { 6283 /* 6284 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning, 6285 * it can only work with 1Gig Full Duplex Link Partner. 6286 */ 6287 Adapter->param_autoneg_cap = 0; 6288 Adapter->param_pause_cap = 1; 6289 Adapter->param_asym_pause_cap = 1; 6290 Adapter->param_1000fdx_cap = 1; 6291 Adapter->param_1000hdx_cap = 0; 6292 Adapter->param_100t4_cap = 0; 6293 Adapter->param_100fdx_cap = 0; 6294 Adapter->param_100hdx_cap = 0; 6295 Adapter->param_10fdx_cap = 0; 6296 Adapter->param_10hdx_cap = 0; 6297 6298 Adapter->param_adv_autoneg = 0; 6299 Adapter->param_adv_pause = 1; 6300 Adapter->param_adv_asym_pause = 1; 6301 Adapter->param_adv_1000fdx = 1; 6302 Adapter->param_adv_1000hdx = 0; 6303 Adapter->param_adv_100t4 = 0; 6304 Adapter->param_adv_100fdx = 0; 6305 Adapter->param_adv_100hdx = 0; 6306 Adapter->param_adv_10fdx = 0; 6307 Adapter->param_adv_10hdx = 0; 6308 6309 Adapter->param_lp_autoneg = 0; 6310 Adapter->param_lp_pause = 0; 6311 Adapter->param_lp_asym_pause = 0; 6312 Adapter->param_lp_1000fdx = 0; 6313 Adapter->param_lp_1000hdx = 0; 6314 Adapter->param_lp_100t4 = 0; 6315 Adapter->param_lp_100fdx = 0; 6316 Adapter->param_lp_100hdx = 0; 6317 Adapter->param_lp_10fdx = 0; 6318 Adapter->param_lp_10hdx = 0; 6319 } 6320 } 6321 6322 /* 6323 * FMA support 6324 */ 6325 6326 int 6327 e1000g_check_acc_handle(ddi_acc_handle_t handle) 6328 { 6329 ddi_fm_error_t de; 6330 6331 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6332 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 6333 return (de.fme_status); 6334 } 6335 6336 int 6337 e1000g_check_dma_handle(ddi_dma_handle_t handle) 6338 { 6339 ddi_fm_error_t de; 6340 6341 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6342 return (de.fme_status); 6343 } 6344 6345 /* 6346 * The IO fault service error handling callback function 6347 */ 6348 /* ARGSUSED2 */ 6349 static int 6350 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6351 { 6352 /* 6353 * as the driver can always deal with an error in any dma or 6354 * access handle, we can just return the fme_status value. 6355 */ 6356 pci_ereport_post(dip, err, NULL); 6357 return (err->fme_status); 6358 } 6359 6360 static void 6361 e1000g_fm_init(struct e1000g *Adapter) 6362 { 6363 ddi_iblock_cookie_t iblk; 6364 int fma_dma_flag; 6365 6366 /* Only register with IO Fault Services if we have some capability */ 6367 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 6368 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6369 } else { 6370 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6371 } 6372 6373 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 6374 fma_dma_flag = 1; 6375 } else { 6376 fma_dma_flag = 0; 6377 } 6378 6379 (void) e1000g_set_fma_flags(fma_dma_flag); 6380 6381 if (Adapter->fm_capabilities) { 6382 6383 /* Register capabilities with IO Fault Services */ 6384 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk); 6385 6386 /* 6387 * Initialize pci ereport capabilities if ereport capable 6388 */ 6389 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6390 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6391 pci_ereport_setup(Adapter->dip); 6392 6393 /* 6394 * Register error callback if error callback capable 6395 */ 6396 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6397 ddi_fm_handler_register(Adapter->dip, 6398 e1000g_fm_error_cb, (void*) Adapter); 6399 } 6400 } 6401 6402 static void 6403 e1000g_fm_fini(struct e1000g *Adapter) 6404 { 6405 /* Only unregister FMA capabilities if we registered some */ 6406 if (Adapter->fm_capabilities) { 6407 6408 /* 6409 * Release any resources allocated by pci_ereport_setup() 6410 */ 6411 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6412 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6413 pci_ereport_teardown(Adapter->dip); 6414 6415 /* 6416 * Un-register error callback if error callback capable 6417 */ 6418 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6419 ddi_fm_handler_unregister(Adapter->dip); 6420 6421 /* Unregister from IO Fault Services */ 6422 mutex_enter(&e1000g_rx_detach_lock); 6423 ddi_fm_fini(Adapter->dip); 6424 if (Adapter->priv_dip != NULL) { 6425 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL; 6426 } 6427 mutex_exit(&e1000g_rx_detach_lock); 6428 } 6429 } 6430 6431 void 6432 e1000g_fm_ereport(struct e1000g *Adapter, char *detail) 6433 { 6434 uint64_t ena; 6435 char buf[FM_MAX_CLASS]; 6436 6437 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6438 ena = fm_ena_generate(0, FM_ENA_FMT1); 6439 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) { 6440 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP, 6441 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6442 } 6443 } 6444 6445 /* 6446 * quiesce(9E) entry point. 6447 * 6448 * This function is called when the system is single-threaded at high 6449 * PIL with preemption disabled. Therefore, this function must not be 6450 * blocked. 6451 * 6452 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6453 * DDI_FAILURE indicates an error condition and should almost never happen. 6454 */ 6455 static int 6456 e1000g_quiesce(dev_info_t *devinfo) 6457 { 6458 struct e1000g *Adapter; 6459 6460 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 6461 6462 if (Adapter == NULL) 6463 return (DDI_FAILURE); 6464 6465 e1000g_clear_all_interrupts(Adapter); 6466 6467 (void) e1000_reset_hw(&Adapter->shared); 6468 6469 /* Setup our HW Tx Head & Tail descriptor pointers */ 6470 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 6471 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 6472 6473 /* Setup our HW Rx Head & Tail descriptor pointers */ 6474 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0); 6475 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0); 6476 6477 return (DDI_SUCCESS); 6478 } 6479 6480 /* 6481 * synchronize the adv* and en* parameters. 6482 * 6483 * See comments in <sys/dld.h> for details of the *_en_* 6484 * parameters. The usage of ndd for setting adv parameters will 6485 * synchronize all the en parameters with the e1000g parameters, 6486 * implicitly disabling any settings made via dladm. 6487 */ 6488 static void 6489 e1000g_param_sync(struct e1000g *Adapter) 6490 { 6491 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx; 6492 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx; 6493 Adapter->param_en_100fdx = Adapter->param_adv_100fdx; 6494 Adapter->param_en_100hdx = Adapter->param_adv_100hdx; 6495 Adapter->param_en_10fdx = Adapter->param_adv_10fdx; 6496 Adapter->param_en_10hdx = Adapter->param_adv_10hdx; 6497 } 6498 6499 /* 6500 * e1000g_get_driver_control - tell manageability firmware that the driver 6501 * has control. 6502 */ 6503 static void 6504 e1000g_get_driver_control(struct e1000_hw *hw) 6505 { 6506 uint32_t ctrl_ext; 6507 uint32_t swsm; 6508 6509 /* tell manageability firmware the driver has taken over */ 6510 switch (hw->mac.type) { 6511 case e1000_82573: 6512 swsm = E1000_READ_REG(hw, E1000_SWSM); 6513 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 6514 break; 6515 case e1000_82571: 6516 case e1000_82572: 6517 case e1000_82574: 6518 case e1000_80003es2lan: 6519 case e1000_ich8lan: 6520 case e1000_ich9lan: 6521 case e1000_ich10lan: 6522 case e1000_pchlan: 6523 case e1000_pch2lan: 6524 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6525 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6526 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 6527 break; 6528 default: 6529 /* no manageability firmware: do nothing */ 6530 break; 6531 } 6532 } 6533 6534 /* 6535 * e1000g_release_driver_control - tell manageability firmware that the driver 6536 * has released control. 6537 */ 6538 static void 6539 e1000g_release_driver_control(struct e1000_hw *hw) 6540 { 6541 uint32_t ctrl_ext; 6542 uint32_t swsm; 6543 6544 /* tell manageability firmware the driver has released control */ 6545 switch (hw->mac.type) { 6546 case e1000_82573: 6547 swsm = E1000_READ_REG(hw, E1000_SWSM); 6548 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 6549 break; 6550 case e1000_82571: 6551 case e1000_82572: 6552 case e1000_82574: 6553 case e1000_80003es2lan: 6554 case e1000_ich8lan: 6555 case e1000_ich9lan: 6556 case e1000_ich10lan: 6557 case e1000_pchlan: 6558 case e1000_pch2lan: 6559 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6560 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6561 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 6562 break; 6563 default: 6564 /* no manageability firmware: do nothing */ 6565 break; 6566 } 6567 } 6568 6569 /* 6570 * Restore e1000g promiscuous mode. 6571 */ 6572 static void 6573 e1000g_restore_promisc(struct e1000g *Adapter) 6574 { 6575 if (Adapter->e1000g_promisc) { 6576 uint32_t rctl; 6577 6578 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 6579 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 6580 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 6581 } 6582 } 6583