1 /* 2 * This file is provided under a CDDLv1 license. When using or 3 * redistributing this file, you may do so under this license. 4 * In redistributing this file this license must be included 5 * and no other modification of this header file is permitted. 6 * 7 * CDDL LICENSE SUMMARY 8 * 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved. 10 * 11 * The contents of this file are subject to the terms of Version 12 * 1.0 of the Common Development and Distribution License (the "License"). 13 * 14 * You should have received a copy of the License with this software. 15 * You can obtain a copy of the License at 16 * http://www.opensolaris.org/os/licensing. 17 * See the License for the specific language governing permissions 18 * and limitations under the License. 19 */ 20 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 28 */ 29 30 /* 31 * ********************************************************************** 32 * * 33 * Module Name: * 34 * e1000g_main.c * 35 * * 36 * Abstract: * 37 * This file contains the interface routines for the solaris OS. * 38 * It has all DDI entry point routines and GLD entry point routines. * 39 * * 40 * This file also contains routines that take care of initialization * 41 * uninit routine and interrupt routine. * 42 * * 43 * ********************************************************************** 44 */ 45 46 #include <sys/dlpi.h> 47 #include <sys/mac.h> 48 #include "e1000g_sw.h" 49 #include "e1000g_debug.h" 50 51 static char ident[] = "Intel PRO/1000 Ethernet"; 52 /* LINTED E_STATIC_UNUSED */ 53 static char e1000g_version[] = "Driver Ver. 5.3.24"; 54 55 /* 56 * Proto types for DDI entry points 57 */ 58 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t); 59 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t); 60 static int e1000g_quiesce(dev_info_t *); 61 62 /* 63 * init and intr routines prototype 64 */ 65 static int e1000g_resume(dev_info_t *); 66 static int e1000g_suspend(dev_info_t *); 67 static uint_t e1000g_intr_pciexpress(caddr_t); 68 static uint_t e1000g_intr(caddr_t); 69 static void e1000g_intr_work(struct e1000g *, uint32_t); 70 #pragma inline(e1000g_intr_work) 71 static int e1000g_init(struct e1000g *); 72 static int e1000g_start(struct e1000g *, boolean_t); 73 static void e1000g_stop(struct e1000g *, boolean_t); 74 static int e1000g_m_start(void *); 75 static void e1000g_m_stop(void *); 76 static int e1000g_m_promisc(void *, boolean_t); 77 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *); 78 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *); 79 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *); 80 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t, 81 uint_t, const void *); 82 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t, 83 uint_t, void *); 84 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t, 85 mac_prop_info_handle_t); 86 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t, 87 const void *); 88 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *); 89 static void e1000g_init_locks(struct e1000g *); 90 static void e1000g_destroy_locks(struct e1000g *); 91 static int e1000g_identify_hardware(struct e1000g *); 92 static int e1000g_regs_map(struct e1000g *); 93 static int e1000g_set_driver_params(struct e1000g *); 94 static void e1000g_set_bufsize(struct e1000g *); 95 static int e1000g_register_mac(struct e1000g *); 96 static boolean_t e1000g_rx_drain(struct e1000g *); 97 static boolean_t e1000g_tx_drain(struct e1000g *); 98 static void e1000g_init_unicst(struct e1000g *); 99 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int); 100 static int e1000g_alloc_rx_data(struct e1000g *); 101 static void e1000g_release_multicast(struct e1000g *); 102 static void e1000g_pch_limits(struct e1000g *); 103 static uint32_t e1000g_mtu2maxframe(uint32_t); 104 105 /* 106 * Local routines 107 */ 108 static boolean_t e1000g_reset_adapter(struct e1000g *); 109 static void e1000g_tx_clean(struct e1000g *); 110 static void e1000g_rx_clean(struct e1000g *); 111 static void e1000g_link_timer(void *); 112 static void e1000g_local_timer(void *); 113 static boolean_t e1000g_link_check(struct e1000g *); 114 static boolean_t e1000g_stall_check(struct e1000g *); 115 static void e1000g_smartspeed(struct e1000g *); 116 static void e1000g_get_conf(struct e1000g *); 117 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int, 118 int *); 119 static void enable_watchdog_timer(struct e1000g *); 120 static void disable_watchdog_timer(struct e1000g *); 121 static void start_watchdog_timer(struct e1000g *); 122 static void restart_watchdog_timer(struct e1000g *); 123 static void stop_watchdog_timer(struct e1000g *); 124 static void stop_link_timer(struct e1000g *); 125 static void stop_82547_timer(e1000g_tx_ring_t *); 126 static void e1000g_force_speed_duplex(struct e1000g *); 127 static void e1000g_setup_max_mtu(struct e1000g *); 128 static void e1000g_get_max_frame_size(struct e1000g *); 129 static boolean_t is_valid_mac_addr(uint8_t *); 130 static void e1000g_unattach(dev_info_t *, struct e1000g *); 131 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *); 132 #ifdef E1000G_DEBUG 133 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *); 134 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *); 135 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *); 136 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *); 137 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *, 138 struct iocblk *, mblk_t *); 139 #endif 140 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *, 141 struct iocblk *, mblk_t *); 142 static boolean_t e1000g_check_loopback_support(struct e1000_hw *); 143 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t); 144 static void e1000g_set_internal_loopback(struct e1000g *); 145 static void e1000g_set_external_loopback_1000(struct e1000g *); 146 static void e1000g_set_external_loopback_100(struct e1000g *); 147 static void e1000g_set_external_loopback_10(struct e1000g *); 148 static int e1000g_add_intrs(struct e1000g *); 149 static int e1000g_intr_add(struct e1000g *, int); 150 static int e1000g_rem_intrs(struct e1000g *); 151 static int e1000g_enable_intrs(struct e1000g *); 152 static int e1000g_disable_intrs(struct e1000g *); 153 static boolean_t e1000g_link_up(struct e1000g *); 154 #ifdef __sparc 155 static boolean_t e1000g_find_mac_address(struct e1000g *); 156 #endif 157 static void e1000g_get_phy_state(struct e1000g *); 158 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 159 const void *impl_data); 160 static void e1000g_fm_init(struct e1000g *Adapter); 161 static void e1000g_fm_fini(struct e1000g *Adapter); 162 static void e1000g_param_sync(struct e1000g *); 163 static void e1000g_get_driver_control(struct e1000_hw *); 164 static void e1000g_release_driver_control(struct e1000_hw *); 165 static void e1000g_restore_promisc(struct e1000g *Adapter); 166 167 char *e1000g_priv_props[] = { 168 "_tx_bcopy_threshold", 169 "_tx_interrupt_enable", 170 "_tx_intr_delay", 171 "_tx_intr_abs_delay", 172 "_rx_bcopy_threshold", 173 "_max_num_rcv_packets", 174 "_rx_intr_delay", 175 "_rx_intr_abs_delay", 176 "_intr_throttling_rate", 177 "_intr_adaptive", 178 "_adv_pause_cap", 179 "_adv_asym_pause_cap", 180 NULL 181 }; 182 183 static struct cb_ops cb_ws_ops = { 184 nulldev, /* cb_open */ 185 nulldev, /* cb_close */ 186 nodev, /* cb_strategy */ 187 nodev, /* cb_print */ 188 nodev, /* cb_dump */ 189 nodev, /* cb_read */ 190 nodev, /* cb_write */ 191 nodev, /* cb_ioctl */ 192 nodev, /* cb_devmap */ 193 nodev, /* cb_mmap */ 194 nodev, /* cb_segmap */ 195 nochpoll, /* cb_chpoll */ 196 ddi_prop_op, /* cb_prop_op */ 197 NULL, /* cb_stream */ 198 D_MP | D_HOTPLUG, /* cb_flag */ 199 CB_REV, /* cb_rev */ 200 nodev, /* cb_aread */ 201 nodev /* cb_awrite */ 202 }; 203 204 static struct dev_ops ws_ops = { 205 DEVO_REV, /* devo_rev */ 206 0, /* devo_refcnt */ 207 NULL, /* devo_getinfo */ 208 nulldev, /* devo_identify */ 209 nulldev, /* devo_probe */ 210 e1000g_attach, /* devo_attach */ 211 e1000g_detach, /* devo_detach */ 212 nodev, /* devo_reset */ 213 &cb_ws_ops, /* devo_cb_ops */ 214 NULL, /* devo_bus_ops */ 215 ddi_power, /* devo_power */ 216 e1000g_quiesce /* devo_quiesce */ 217 }; 218 219 static struct modldrv modldrv = { 220 &mod_driverops, /* Type of module. This one is a driver */ 221 ident, /* Discription string */ 222 &ws_ops, /* driver ops */ 223 }; 224 225 static struct modlinkage modlinkage = { 226 MODREV_1, &modldrv, NULL 227 }; 228 229 /* Access attributes for register mapping */ 230 static ddi_device_acc_attr_t e1000g_regs_acc_attr = { 231 DDI_DEVICE_ATTR_V1, 232 DDI_STRUCTURE_LE_ACC, 233 DDI_STRICTORDER_ACC, 234 DDI_FLAGERR_ACC 235 }; 236 237 #define E1000G_M_CALLBACK_FLAGS \ 238 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 239 240 static mac_callbacks_t e1000g_m_callbacks = { 241 E1000G_M_CALLBACK_FLAGS, 242 e1000g_m_stat, 243 e1000g_m_start, 244 e1000g_m_stop, 245 e1000g_m_promisc, 246 e1000g_m_multicst, 247 NULL, 248 e1000g_m_tx, 249 NULL, 250 e1000g_m_ioctl, 251 e1000g_m_getcapab, 252 NULL, 253 NULL, 254 e1000g_m_setprop, 255 e1000g_m_getprop, 256 e1000g_m_propinfo 257 }; 258 259 /* 260 * Global variables 261 */ 262 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K; 263 uint32_t e1000g_mblks_pending = 0; 264 /* 265 * Workaround for Dynamic Reconfiguration support, for x86 platform only. 266 * Here we maintain a private dev_info list if e1000g_force_detach is 267 * enabled. If we force the driver to detach while there are still some 268 * rx buffers retained in the upper layer, we have to keep a copy of the 269 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data 270 * structure will be freed after the driver is detached. However when we 271 * finally free those rx buffers released by the upper layer, we need to 272 * refer to the dev_info to free the dma buffers. So we save a copy of 273 * the dev_info for this purpose. On x86 platform, we assume this copy 274 * of dev_info is always valid, but on SPARC platform, it could be invalid 275 * after the system board level DR operation. For this reason, the global 276 * variable e1000g_force_detach must be B_FALSE on SPARC platform. 277 */ 278 #ifdef __sparc 279 boolean_t e1000g_force_detach = B_FALSE; 280 #else 281 boolean_t e1000g_force_detach = B_TRUE; 282 #endif 283 private_devi_list_t *e1000g_private_devi_list = NULL; 284 285 /* 286 * The mutex e1000g_rx_detach_lock is defined to protect the processing of 287 * the private dev_info list, and to serialize the processing of rx buffer 288 * freeing and rx buffer recycling. 289 */ 290 kmutex_t e1000g_rx_detach_lock; 291 /* 292 * The rwlock e1000g_dma_type_lock is defined to protect the global flag 293 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA". 294 * If there are many e1000g instances, the system may run out of DVMA 295 * resources during the initialization of the instances, then the flag will 296 * be changed to "USE_DMA". Because different e1000g instances are initialized 297 * in parallel, we need to use this lock to protect the flag. 298 */ 299 krwlock_t e1000g_dma_type_lock; 300 301 /* 302 * The 82546 chipset is a dual-port device, both the ports share one eeprom. 303 * Based on the information from Intel, the 82546 chipset has some hardware 304 * problem. When one port is being reset and the other port is trying to 305 * access the eeprom, it could cause system hang or panic. To workaround this 306 * hardware problem, we use a global mutex to prevent such operations from 307 * happening simultaneously on different instances. This workaround is applied 308 * to all the devices supported by this driver. 309 */ 310 kmutex_t e1000g_nvm_lock; 311 312 /* 313 * Loadable module configuration entry points for the driver 314 */ 315 316 /* 317 * _init - module initialization 318 */ 319 int 320 _init(void) 321 { 322 int status; 323 324 mac_init_ops(&ws_ops, WSNAME); 325 status = mod_install(&modlinkage); 326 if (status != DDI_SUCCESS) 327 mac_fini_ops(&ws_ops); 328 else { 329 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL); 330 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL); 331 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL); 332 } 333 334 return (status); 335 } 336 337 /* 338 * _fini - module finalization 339 */ 340 int 341 _fini(void) 342 { 343 int status; 344 345 if (e1000g_mblks_pending != 0) 346 return (EBUSY); 347 348 status = mod_remove(&modlinkage); 349 if (status == DDI_SUCCESS) { 350 mac_fini_ops(&ws_ops); 351 352 if (e1000g_force_detach) { 353 private_devi_list_t *devi_node; 354 355 mutex_enter(&e1000g_rx_detach_lock); 356 while (e1000g_private_devi_list != NULL) { 357 devi_node = e1000g_private_devi_list; 358 e1000g_private_devi_list = 359 e1000g_private_devi_list->next; 360 361 kmem_free(devi_node->priv_dip, 362 sizeof (struct dev_info)); 363 kmem_free(devi_node, 364 sizeof (private_devi_list_t)); 365 } 366 mutex_exit(&e1000g_rx_detach_lock); 367 } 368 369 mutex_destroy(&e1000g_rx_detach_lock); 370 rw_destroy(&e1000g_dma_type_lock); 371 mutex_destroy(&e1000g_nvm_lock); 372 } 373 374 return (status); 375 } 376 377 /* 378 * _info - module information 379 */ 380 int 381 _info(struct modinfo *modinfop) 382 { 383 return (mod_info(&modlinkage, modinfop)); 384 } 385 386 /* 387 * e1000g_attach - driver attach 388 * 389 * This function is the device-specific initialization entry 390 * point. This entry point is required and must be written. 391 * The DDI_ATTACH command must be provided in the attach entry 392 * point. When attach() is called with cmd set to DDI_ATTACH, 393 * all normal kernel services (such as kmem_alloc(9F)) are 394 * available for use by the driver. 395 * 396 * The attach() function will be called once for each instance 397 * of the device on the system with cmd set to DDI_ATTACH. 398 * Until attach() succeeds, the only driver entry points which 399 * may be called are open(9E) and getinfo(9E). 400 */ 401 static int 402 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 403 { 404 struct e1000g *Adapter; 405 struct e1000_hw *hw; 406 struct e1000g_osdep *osdep; 407 int instance; 408 409 switch (cmd) { 410 default: 411 e1000g_log(NULL, CE_WARN, 412 "Unsupported command send to e1000g_attach... "); 413 return (DDI_FAILURE); 414 415 case DDI_RESUME: 416 return (e1000g_resume(devinfo)); 417 418 case DDI_ATTACH: 419 break; 420 } 421 422 /* 423 * get device instance number 424 */ 425 instance = ddi_get_instance(devinfo); 426 427 /* 428 * Allocate soft data structure 429 */ 430 Adapter = 431 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP); 432 433 Adapter->dip = devinfo; 434 Adapter->instance = instance; 435 Adapter->tx_ring->adapter = Adapter; 436 Adapter->rx_ring->adapter = Adapter; 437 438 hw = &Adapter->shared; 439 osdep = &Adapter->osdep; 440 hw->back = osdep; 441 osdep->adapter = Adapter; 442 443 ddi_set_driver_private(devinfo, (caddr_t)Adapter); 444 445 /* 446 * Initialize for fma support 447 */ 448 (void) e1000g_get_prop(Adapter, "fm-capable", 449 0, 0x0f, 450 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 451 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE, 452 &Adapter->fm_capabilities); 453 e1000g_fm_init(Adapter); 454 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT; 455 456 /* 457 * PCI Configure 458 */ 459 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 460 e1000g_log(Adapter, CE_WARN, "PCI configuration failed"); 461 goto attach_fail; 462 } 463 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 464 465 /* 466 * Setup hardware 467 */ 468 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) { 469 e1000g_log(Adapter, CE_WARN, "Identify hardware failed"); 470 goto attach_fail; 471 } 472 473 /* 474 * Map in the device registers. 475 */ 476 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) { 477 e1000g_log(Adapter, CE_WARN, "Mapping registers failed"); 478 goto attach_fail; 479 } 480 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 481 482 /* 483 * Initialize driver parameters 484 */ 485 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) { 486 goto attach_fail; 487 } 488 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP; 489 490 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 491 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 492 goto attach_fail; 493 } 494 495 /* 496 * Initialize interrupts 497 */ 498 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { 499 e1000g_log(Adapter, CE_WARN, "Add interrupts failed"); 500 goto attach_fail; 501 } 502 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 503 504 /* 505 * Initialize mutex's for this device. 506 * Do this before enabling the interrupt handler and 507 * register the softint to avoid the condition where 508 * interrupt handler can try using uninitialized mutex 509 */ 510 e1000g_init_locks(Adapter); 511 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS; 512 513 /* 514 * Initialize Driver Counters 515 */ 516 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) { 517 e1000g_log(Adapter, CE_WARN, "Init stats failed"); 518 goto attach_fail; 519 } 520 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS; 521 522 /* 523 * Initialize chip hardware and software structures 524 */ 525 rw_enter(&Adapter->chip_lock, RW_WRITER); 526 if (e1000g_init(Adapter) != DDI_SUCCESS) { 527 rw_exit(&Adapter->chip_lock); 528 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed"); 529 goto attach_fail; 530 } 531 rw_exit(&Adapter->chip_lock); 532 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 533 534 /* 535 * Register the driver to the MAC 536 */ 537 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) { 538 e1000g_log(Adapter, CE_WARN, "Register MAC failed"); 539 goto attach_fail; 540 } 541 Adapter->attach_progress |= ATTACH_PROGRESS_MAC; 542 543 /* 544 * Now that mutex locks are initialized, and the chip is also 545 * initialized, enable interrupts. 546 */ 547 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) { 548 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed"); 549 goto attach_fail; 550 } 551 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 552 553 /* 554 * If e1000g_force_detach is enabled, in global private dip list, 555 * we will create a new entry, which maintains the priv_dip for DR 556 * supports after driver detached. 557 */ 558 if (e1000g_force_detach) { 559 private_devi_list_t *devi_node; 560 561 Adapter->priv_dip = 562 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP); 563 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip), 564 sizeof (struct dev_info)); 565 566 devi_node = 567 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP); 568 569 mutex_enter(&e1000g_rx_detach_lock); 570 devi_node->priv_dip = Adapter->priv_dip; 571 devi_node->flag = E1000G_PRIV_DEVI_ATTACH; 572 devi_node->pending_rx_count = 0; 573 574 Adapter->priv_devi_node = devi_node; 575 576 if (e1000g_private_devi_list == NULL) { 577 devi_node->prev = NULL; 578 devi_node->next = NULL; 579 e1000g_private_devi_list = devi_node; 580 } else { 581 devi_node->prev = NULL; 582 devi_node->next = e1000g_private_devi_list; 583 e1000g_private_devi_list->prev = devi_node; 584 e1000g_private_devi_list = devi_node; 585 } 586 mutex_exit(&e1000g_rx_detach_lock); 587 } 588 589 Adapter->e1000g_state = E1000G_INITIALIZED; 590 return (DDI_SUCCESS); 591 592 attach_fail: 593 e1000g_unattach(devinfo, Adapter); 594 return (DDI_FAILURE); 595 } 596 597 static int 598 e1000g_register_mac(struct e1000g *Adapter) 599 { 600 struct e1000_hw *hw = &Adapter->shared; 601 mac_register_t *mac; 602 int err; 603 604 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 605 return (DDI_FAILURE); 606 607 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 608 mac->m_driver = Adapter; 609 mac->m_dip = Adapter->dip; 610 mac->m_src_addr = hw->mac.addr; 611 mac->m_callbacks = &e1000g_m_callbacks; 612 mac->m_min_sdu = 0; 613 mac->m_max_sdu = Adapter->default_mtu; 614 mac->m_margin = VLAN_TAGSZ; 615 mac->m_priv_props = e1000g_priv_props; 616 mac->m_v12n = MAC_VIRT_LEVEL1; 617 618 err = mac_register(mac, &Adapter->mh); 619 mac_free(mac); 620 621 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE); 622 } 623 624 static int 625 e1000g_identify_hardware(struct e1000g *Adapter) 626 { 627 struct e1000_hw *hw = &Adapter->shared; 628 struct e1000g_osdep *osdep = &Adapter->osdep; 629 630 /* Get the device id */ 631 hw->vendor_id = 632 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 633 hw->device_id = 634 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 635 hw->revision_id = 636 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 637 hw->subsystem_device_id = 638 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 639 hw->subsystem_vendor_id = 640 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 641 642 if (e1000_set_mac_type(hw) != E1000_SUCCESS) { 643 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 644 "MAC type could not be set properly."); 645 return (DDI_FAILURE); 646 } 647 648 return (DDI_SUCCESS); 649 } 650 651 static int 652 e1000g_regs_map(struct e1000g *Adapter) 653 { 654 dev_info_t *devinfo = Adapter->dip; 655 struct e1000_hw *hw = &Adapter->shared; 656 struct e1000g_osdep *osdep = &Adapter->osdep; 657 off_t mem_size; 658 bar_info_t bar_info; 659 int offset, rnumber; 660 661 rnumber = ADAPTER_REG_SET; 662 /* Get size of adapter register memory */ 663 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) != 664 DDI_SUCCESS) { 665 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 666 "ddi_dev_regsize for registers failed"); 667 return (DDI_FAILURE); 668 } 669 670 /* Map adapter register memory */ 671 if ((ddi_regs_map_setup(devinfo, rnumber, 672 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr, 673 &osdep->reg_handle)) != DDI_SUCCESS) { 674 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 675 "ddi_regs_map_setup for registers failed"); 676 goto regs_map_fail; 677 } 678 679 /* ICH needs to map flash memory */ 680 switch (hw->mac.type) { 681 case e1000_ich8lan: 682 case e1000_ich9lan: 683 case e1000_ich10lan: 684 case e1000_pchlan: 685 case e1000_pch2lan: 686 case e1000_pch_lpt: 687 rnumber = ICH_FLASH_REG_SET; 688 689 /* get flash size */ 690 if (ddi_dev_regsize(devinfo, rnumber, 691 &mem_size) != DDI_SUCCESS) { 692 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 693 "ddi_dev_regsize for ICH flash failed"); 694 goto regs_map_fail; 695 } 696 697 /* map flash in */ 698 if (ddi_regs_map_setup(devinfo, rnumber, 699 (caddr_t *)&hw->flash_address, 0, 700 mem_size, &e1000g_regs_acc_attr, 701 &osdep->ich_flash_handle) != DDI_SUCCESS) { 702 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 703 "ddi_regs_map_setup for ICH flash failed"); 704 goto regs_map_fail; 705 } 706 break; 707 default: 708 break; 709 } 710 711 /* map io space */ 712 switch (hw->mac.type) { 713 case e1000_82544: 714 case e1000_82540: 715 case e1000_82545: 716 case e1000_82546: 717 case e1000_82541: 718 case e1000_82541_rev_2: 719 /* find the IO bar */ 720 rnumber = -1; 721 for (offset = PCI_CONF_BASE1; 722 offset <= PCI_CONF_BASE5; offset += 4) { 723 if (e1000g_get_bar_info(devinfo, offset, &bar_info) 724 != DDI_SUCCESS) 725 continue; 726 if (bar_info.type == E1000G_BAR_IO) { 727 rnumber = bar_info.rnumber; 728 break; 729 } 730 } 731 732 if (rnumber < 0) { 733 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 734 "No io space is found"); 735 goto regs_map_fail; 736 } 737 738 /* get io space size */ 739 if (ddi_dev_regsize(devinfo, rnumber, 740 &mem_size) != DDI_SUCCESS) { 741 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 742 "ddi_dev_regsize for io space failed"); 743 goto regs_map_fail; 744 } 745 746 /* map io space */ 747 if ((ddi_regs_map_setup(devinfo, rnumber, 748 (caddr_t *)&hw->io_base, 0, mem_size, 749 &e1000g_regs_acc_attr, 750 &osdep->io_reg_handle)) != DDI_SUCCESS) { 751 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 752 "ddi_regs_map_setup for io space failed"); 753 goto regs_map_fail; 754 } 755 break; 756 default: 757 hw->io_base = 0; 758 break; 759 } 760 761 return (DDI_SUCCESS); 762 763 regs_map_fail: 764 if (osdep->reg_handle != NULL) 765 ddi_regs_map_free(&osdep->reg_handle); 766 if (osdep->ich_flash_handle != NULL) 767 ddi_regs_map_free(&osdep->ich_flash_handle); 768 return (DDI_FAILURE); 769 } 770 771 static int 772 e1000g_set_driver_params(struct e1000g *Adapter) 773 { 774 struct e1000_hw *hw; 775 776 hw = &Adapter->shared; 777 778 /* Set MAC type and initialize hardware functions */ 779 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { 780 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 781 "Could not setup hardware functions"); 782 return (DDI_FAILURE); 783 } 784 785 /* Get bus information */ 786 if (e1000_get_bus_info(hw) != E1000_SUCCESS) { 787 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 788 "Could not get bus information"); 789 return (DDI_FAILURE); 790 } 791 792 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word); 793 794 hw->mac.autoneg_failed = B_TRUE; 795 796 /* Set the autoneg_wait_to_complete flag to B_FALSE */ 797 hw->phy.autoneg_wait_to_complete = B_FALSE; 798 799 /* Adaptive IFS related changes */ 800 hw->mac.adaptive_ifs = B_TRUE; 801 802 /* Enable phy init script for IGP phy of 82541/82547 */ 803 if ((hw->mac.type == e1000_82547) || 804 (hw->mac.type == e1000_82541) || 805 (hw->mac.type == e1000_82547_rev_2) || 806 (hw->mac.type == e1000_82541_rev_2)) 807 e1000_init_script_state_82541(hw, B_TRUE); 808 809 /* Enable the TTL workaround for 82541/82547 */ 810 e1000_set_ttl_workaround_state_82541(hw, B_TRUE); 811 812 #ifdef __sparc 813 Adapter->strip_crc = B_TRUE; 814 #else 815 Adapter->strip_crc = B_FALSE; 816 #endif 817 818 /* setup the maximum MTU size of the chip */ 819 e1000g_setup_max_mtu(Adapter); 820 821 /* Get speed/duplex settings in conf file */ 822 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; 823 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 824 e1000g_force_speed_duplex(Adapter); 825 826 /* Get Jumbo Frames settings in conf file */ 827 e1000g_get_max_frame_size(Adapter); 828 829 /* Get conf file properties */ 830 e1000g_get_conf(Adapter); 831 832 /* enforce PCH limits */ 833 e1000g_pch_limits(Adapter); 834 835 /* Set Rx/Tx buffer size */ 836 e1000g_set_bufsize(Adapter); 837 838 /* Master Latency Timer */ 839 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER; 840 841 /* copper options */ 842 if (hw->phy.media_type == e1000_media_type_copper) { 843 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 844 hw->phy.disable_polarity_correction = B_FALSE; 845 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ 846 } 847 848 /* The initial link state should be "unknown" */ 849 Adapter->link_state = LINK_STATE_UNKNOWN; 850 851 /* Initialize rx parameters */ 852 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY; 853 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY; 854 855 /* Initialize tx parameters */ 856 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE; 857 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD; 858 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY; 859 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY; 860 861 /* Initialize rx parameters */ 862 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD; 863 864 return (DDI_SUCCESS); 865 } 866 867 static void 868 e1000g_setup_max_mtu(struct e1000g *Adapter) 869 { 870 struct e1000_mac_info *mac = &Adapter->shared.mac; 871 struct e1000_phy_info *phy = &Adapter->shared.phy; 872 873 switch (mac->type) { 874 /* types that do not support jumbo frames */ 875 case e1000_ich8lan: 876 case e1000_82573: 877 case e1000_82583: 878 Adapter->max_mtu = ETHERMTU; 879 break; 880 /* ich9 supports jumbo frames except on one phy type */ 881 case e1000_ich9lan: 882 if (phy->type == e1000_phy_ife) 883 Adapter->max_mtu = ETHERMTU; 884 else 885 Adapter->max_mtu = MAXIMUM_MTU_9K; 886 break; 887 /* pch can do jumbo frames up to 4K */ 888 case e1000_pchlan: 889 Adapter->max_mtu = MAXIMUM_MTU_4K; 890 break; 891 /* pch2 can do jumbo frames up to 9K */ 892 case e1000_pch2lan: 893 case e1000_pch_lpt: 894 Adapter->max_mtu = MAXIMUM_MTU_9K; 895 break; 896 /* types with a special limit */ 897 case e1000_82571: 898 case e1000_82572: 899 case e1000_82574: 900 case e1000_80003es2lan: 901 case e1000_ich10lan: 902 if (e1000g_jumbo_mtu >= ETHERMTU && 903 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) { 904 Adapter->max_mtu = e1000g_jumbo_mtu; 905 } else { 906 Adapter->max_mtu = MAXIMUM_MTU_9K; 907 } 908 break; 909 /* default limit is 16K */ 910 default: 911 Adapter->max_mtu = FRAME_SIZE_UPTO_16K - 912 sizeof (struct ether_vlan_header) - ETHERFCSL; 913 break; 914 } 915 } 916 917 static void 918 e1000g_set_bufsize(struct e1000g *Adapter) 919 { 920 struct e1000_mac_info *mac = &Adapter->shared.mac; 921 uint64_t rx_size; 922 uint64_t tx_size; 923 924 dev_info_t *devinfo = Adapter->dip; 925 #ifdef __sparc 926 ulong_t iommu_pagesize; 927 #endif 928 /* Get the system page size */ 929 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1); 930 931 #ifdef __sparc 932 iommu_pagesize = dvma_pagesize(devinfo); 933 if (iommu_pagesize != 0) { 934 if (Adapter->sys_page_sz == iommu_pagesize) { 935 if (iommu_pagesize > 0x4000) 936 Adapter->sys_page_sz = 0x4000; 937 } else { 938 if (Adapter->sys_page_sz > iommu_pagesize) 939 Adapter->sys_page_sz = iommu_pagesize; 940 } 941 } 942 if (Adapter->lso_enable) { 943 Adapter->dvma_page_num = E1000_LSO_MAXLEN / 944 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 945 } else { 946 Adapter->dvma_page_num = Adapter->max_frame_size / 947 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 948 } 949 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM); 950 #endif 951 952 Adapter->min_frame_size = ETHERMIN + ETHERFCSL; 953 954 if (Adapter->mem_workaround_82546 && 955 ((mac->type == e1000_82545) || 956 (mac->type == e1000_82546) || 957 (mac->type == e1000_82546_rev_3))) { 958 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 959 } else { 960 rx_size = Adapter->max_frame_size; 961 if ((rx_size > FRAME_SIZE_UPTO_2K) && 962 (rx_size <= FRAME_SIZE_UPTO_4K)) 963 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K; 964 else if ((rx_size > FRAME_SIZE_UPTO_4K) && 965 (rx_size <= FRAME_SIZE_UPTO_8K)) 966 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K; 967 else if ((rx_size > FRAME_SIZE_UPTO_8K) && 968 (rx_size <= FRAME_SIZE_UPTO_16K)) 969 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K; 970 else 971 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 972 } 973 Adapter->rx_buffer_size += E1000G_IPALIGNROOM; 974 975 tx_size = Adapter->max_frame_size; 976 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K)) 977 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K; 978 else if ((tx_size > FRAME_SIZE_UPTO_4K) && 979 (tx_size <= FRAME_SIZE_UPTO_8K)) 980 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K; 981 else if ((tx_size > FRAME_SIZE_UPTO_8K) && 982 (tx_size <= FRAME_SIZE_UPTO_16K)) 983 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K; 984 else 985 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K; 986 987 /* 988 * For Wiseman adapters we have an requirement of having receive 989 * buffers aligned at 256 byte boundary. Since Livengood does not 990 * require this and forcing it for all hardwares will have 991 * performance implications, I am making it applicable only for 992 * Wiseman and for Jumbo frames enabled mode as rest of the time, 993 * it is okay to have normal frames...but it does involve a 994 * potential risk where we may loose data if buffer is not 995 * aligned...so all wiseman boards to have 256 byte aligned 996 * buffers 997 */ 998 if (mac->type < e1000_82543) 999 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE; 1000 else 1001 Adapter->rx_buf_align = 1; 1002 } 1003 1004 /* 1005 * e1000g_detach - driver detach 1006 * 1007 * The detach() function is the complement of the attach routine. 1008 * If cmd is set to DDI_DETACH, detach() is used to remove the 1009 * state associated with a given instance of a device node 1010 * prior to the removal of that instance from the system. 1011 * 1012 * The detach() function will be called once for each instance 1013 * of the device for which there has been a successful attach() 1014 * once there are no longer any opens on the device. 1015 * 1016 * Interrupts routine are disabled, All memory allocated by this 1017 * driver are freed. 1018 */ 1019 static int 1020 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1021 { 1022 struct e1000g *Adapter; 1023 boolean_t rx_drain; 1024 1025 switch (cmd) { 1026 default: 1027 return (DDI_FAILURE); 1028 1029 case DDI_SUSPEND: 1030 return (e1000g_suspend(devinfo)); 1031 1032 case DDI_DETACH: 1033 break; 1034 } 1035 1036 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1037 if (Adapter == NULL) 1038 return (DDI_FAILURE); 1039 1040 rx_drain = e1000g_rx_drain(Adapter); 1041 if (!rx_drain && !e1000g_force_detach) 1042 return (DDI_FAILURE); 1043 1044 if (mac_unregister(Adapter->mh) != 0) { 1045 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed"); 1046 return (DDI_FAILURE); 1047 } 1048 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC; 1049 1050 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED)); 1051 1052 if (!e1000g_force_detach && !rx_drain) 1053 return (DDI_FAILURE); 1054 1055 e1000g_unattach(devinfo, Adapter); 1056 1057 return (DDI_SUCCESS); 1058 } 1059 1060 /* 1061 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance 1062 */ 1063 void 1064 e1000g_free_priv_devi_node(private_devi_list_t *devi_node) 1065 { 1066 ASSERT(e1000g_private_devi_list != NULL); 1067 ASSERT(devi_node != NULL); 1068 1069 if (devi_node->prev != NULL) 1070 devi_node->prev->next = devi_node->next; 1071 if (devi_node->next != NULL) 1072 devi_node->next->prev = devi_node->prev; 1073 if (devi_node == e1000g_private_devi_list) 1074 e1000g_private_devi_list = devi_node->next; 1075 1076 kmem_free(devi_node->priv_dip, 1077 sizeof (struct dev_info)); 1078 kmem_free(devi_node, 1079 sizeof (private_devi_list_t)); 1080 } 1081 1082 static void 1083 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter) 1084 { 1085 private_devi_list_t *devi_node; 1086 int result; 1087 1088 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1089 (void) e1000g_disable_intrs(Adapter); 1090 } 1091 1092 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) { 1093 (void) mac_unregister(Adapter->mh); 1094 } 1095 1096 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1097 (void) e1000g_rem_intrs(Adapter); 1098 } 1099 1100 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) { 1101 (void) ddi_prop_remove_all(devinfo); 1102 } 1103 1104 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) { 1105 kstat_delete((kstat_t *)Adapter->e1000g_ksp); 1106 } 1107 1108 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) { 1109 stop_link_timer(Adapter); 1110 1111 mutex_enter(&e1000g_nvm_lock); 1112 result = e1000_reset_hw(&Adapter->shared); 1113 mutex_exit(&e1000g_nvm_lock); 1114 1115 if (result != E1000_SUCCESS) { 1116 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1117 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1118 } 1119 } 1120 1121 e1000g_release_multicast(Adapter); 1122 1123 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 1124 if (Adapter->osdep.reg_handle != NULL) 1125 ddi_regs_map_free(&Adapter->osdep.reg_handle); 1126 if (Adapter->osdep.ich_flash_handle != NULL) 1127 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle); 1128 if (Adapter->osdep.io_reg_handle != NULL) 1129 ddi_regs_map_free(&Adapter->osdep.io_reg_handle); 1130 } 1131 1132 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 1133 if (Adapter->osdep.cfg_handle != NULL) 1134 pci_config_teardown(&Adapter->osdep.cfg_handle); 1135 } 1136 1137 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) { 1138 e1000g_destroy_locks(Adapter); 1139 } 1140 1141 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) { 1142 e1000g_fm_fini(Adapter); 1143 } 1144 1145 mutex_enter(&e1000g_rx_detach_lock); 1146 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) { 1147 devi_node = Adapter->priv_devi_node; 1148 devi_node->flag |= E1000G_PRIV_DEVI_DETACH; 1149 1150 if (devi_node->pending_rx_count == 0) { 1151 e1000g_free_priv_devi_node(devi_node); 1152 } 1153 } 1154 mutex_exit(&e1000g_rx_detach_lock); 1155 1156 kmem_free((caddr_t)Adapter, sizeof (struct e1000g)); 1157 1158 /* 1159 * Another hotplug spec requirement, 1160 * run ddi_set_driver_private(devinfo, null); 1161 */ 1162 ddi_set_driver_private(devinfo, NULL); 1163 } 1164 1165 /* 1166 * Get the BAR type and rnumber for a given PCI BAR offset 1167 */ 1168 static int 1169 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info) 1170 { 1171 pci_regspec_t *regs; 1172 uint_t regs_length; 1173 int type, rnumber, rcount; 1174 1175 ASSERT((bar_offset >= PCI_CONF_BASE0) && 1176 (bar_offset <= PCI_CONF_BASE5)); 1177 1178 /* 1179 * Get the DDI "reg" property 1180 */ 1181 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 1182 DDI_PROP_DONTPASS, "reg", (int **)®s, 1183 ®s_length) != DDI_PROP_SUCCESS) { 1184 return (DDI_FAILURE); 1185 } 1186 1187 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t); 1188 /* 1189 * Check the BAR offset 1190 */ 1191 for (rnumber = 0; rnumber < rcount; ++rnumber) { 1192 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) { 1193 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK; 1194 break; 1195 } 1196 } 1197 1198 ddi_prop_free(regs); 1199 1200 if (rnumber >= rcount) 1201 return (DDI_FAILURE); 1202 1203 switch (type) { 1204 case PCI_ADDR_CONFIG: 1205 bar_info->type = E1000G_BAR_CONFIG; 1206 break; 1207 case PCI_ADDR_IO: 1208 bar_info->type = E1000G_BAR_IO; 1209 break; 1210 case PCI_ADDR_MEM32: 1211 bar_info->type = E1000G_BAR_MEM32; 1212 break; 1213 case PCI_ADDR_MEM64: 1214 bar_info->type = E1000G_BAR_MEM64; 1215 break; 1216 default: 1217 return (DDI_FAILURE); 1218 } 1219 bar_info->rnumber = rnumber; 1220 return (DDI_SUCCESS); 1221 } 1222 1223 static void 1224 e1000g_init_locks(struct e1000g *Adapter) 1225 { 1226 e1000g_tx_ring_t *tx_ring; 1227 e1000g_rx_ring_t *rx_ring; 1228 1229 rw_init(&Adapter->chip_lock, NULL, 1230 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1231 mutex_init(&Adapter->link_lock, NULL, 1232 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1233 mutex_init(&Adapter->watchdog_lock, NULL, 1234 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1235 1236 tx_ring = Adapter->tx_ring; 1237 1238 mutex_init(&tx_ring->tx_lock, NULL, 1239 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1240 mutex_init(&tx_ring->usedlist_lock, NULL, 1241 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1242 mutex_init(&tx_ring->freelist_lock, NULL, 1243 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1244 1245 rx_ring = Adapter->rx_ring; 1246 1247 mutex_init(&rx_ring->rx_lock, NULL, 1248 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1249 } 1250 1251 static void 1252 e1000g_destroy_locks(struct e1000g *Adapter) 1253 { 1254 e1000g_tx_ring_t *tx_ring; 1255 e1000g_rx_ring_t *rx_ring; 1256 1257 tx_ring = Adapter->tx_ring; 1258 mutex_destroy(&tx_ring->tx_lock); 1259 mutex_destroy(&tx_ring->usedlist_lock); 1260 mutex_destroy(&tx_ring->freelist_lock); 1261 1262 rx_ring = Adapter->rx_ring; 1263 mutex_destroy(&rx_ring->rx_lock); 1264 1265 mutex_destroy(&Adapter->link_lock); 1266 mutex_destroy(&Adapter->watchdog_lock); 1267 rw_destroy(&Adapter->chip_lock); 1268 1269 /* destory mutex initialized in shared code */ 1270 e1000_destroy_hw_mutex(&Adapter->shared); 1271 } 1272 1273 static int 1274 e1000g_resume(dev_info_t *devinfo) 1275 { 1276 struct e1000g *Adapter; 1277 1278 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1279 if (Adapter == NULL) 1280 e1000g_log(Adapter, CE_PANIC, 1281 "Instance pointer is null\n"); 1282 1283 if (Adapter->dip != devinfo) 1284 e1000g_log(Adapter, CE_PANIC, 1285 "Devinfo is not the same as saved devinfo\n"); 1286 1287 rw_enter(&Adapter->chip_lock, RW_WRITER); 1288 1289 if (Adapter->e1000g_state & E1000G_STARTED) { 1290 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 1291 rw_exit(&Adapter->chip_lock); 1292 /* 1293 * We note the failure, but return success, as the 1294 * system is still usable without this controller. 1295 */ 1296 e1000g_log(Adapter, CE_WARN, 1297 "e1000g_resume: failed to restart controller\n"); 1298 return (DDI_SUCCESS); 1299 } 1300 /* Enable and start the watchdog timer */ 1301 enable_watchdog_timer(Adapter); 1302 } 1303 1304 Adapter->e1000g_state &= ~E1000G_SUSPENDED; 1305 1306 rw_exit(&Adapter->chip_lock); 1307 1308 return (DDI_SUCCESS); 1309 } 1310 1311 static int 1312 e1000g_suspend(dev_info_t *devinfo) 1313 { 1314 struct e1000g *Adapter; 1315 1316 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1317 if (Adapter == NULL) 1318 return (DDI_FAILURE); 1319 1320 rw_enter(&Adapter->chip_lock, RW_WRITER); 1321 1322 Adapter->e1000g_state |= E1000G_SUSPENDED; 1323 1324 /* if the port isn't plumbed, we can simply return */ 1325 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 1326 rw_exit(&Adapter->chip_lock); 1327 return (DDI_SUCCESS); 1328 } 1329 1330 e1000g_stop(Adapter, B_FALSE); 1331 1332 rw_exit(&Adapter->chip_lock); 1333 1334 /* Disable and stop all the timers */ 1335 disable_watchdog_timer(Adapter); 1336 stop_link_timer(Adapter); 1337 stop_82547_timer(Adapter->tx_ring); 1338 1339 return (DDI_SUCCESS); 1340 } 1341 1342 static int 1343 e1000g_init(struct e1000g *Adapter) 1344 { 1345 uint32_t pba; 1346 uint32_t high_water; 1347 struct e1000_hw *hw; 1348 clock_t link_timeout; 1349 int result; 1350 1351 hw = &Adapter->shared; 1352 1353 /* 1354 * reset to put the hardware in a known state 1355 * before we try to do anything with the eeprom 1356 */ 1357 mutex_enter(&e1000g_nvm_lock); 1358 result = e1000_reset_hw(hw); 1359 mutex_exit(&e1000g_nvm_lock); 1360 1361 if (result != E1000_SUCCESS) { 1362 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1363 goto init_fail; 1364 } 1365 1366 mutex_enter(&e1000g_nvm_lock); 1367 result = e1000_validate_nvm_checksum(hw); 1368 if (result < E1000_SUCCESS) { 1369 /* 1370 * Some PCI-E parts fail the first check due to 1371 * the link being in sleep state. Call it again, 1372 * if it fails a second time its a real issue. 1373 */ 1374 result = e1000_validate_nvm_checksum(hw); 1375 } 1376 mutex_exit(&e1000g_nvm_lock); 1377 1378 if (result < E1000_SUCCESS) { 1379 e1000g_log(Adapter, CE_WARN, 1380 "Invalid NVM checksum. Please contact " 1381 "the vendor to update the NVM."); 1382 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1383 goto init_fail; 1384 } 1385 1386 result = 0; 1387 #ifdef __sparc 1388 /* 1389 * First, we try to get the local ethernet address from OBP. If 1390 * failed, then we get it from the EEPROM of NIC card. 1391 */ 1392 result = e1000g_find_mac_address(Adapter); 1393 #endif 1394 /* Get the local ethernet address. */ 1395 if (!result) { 1396 mutex_enter(&e1000g_nvm_lock); 1397 result = e1000_read_mac_addr(hw); 1398 mutex_exit(&e1000g_nvm_lock); 1399 } 1400 1401 if (result < E1000_SUCCESS) { 1402 e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); 1403 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1404 goto init_fail; 1405 } 1406 1407 /* check for valid mac address */ 1408 if (!is_valid_mac_addr(hw->mac.addr)) { 1409 e1000g_log(Adapter, CE_WARN, "Invalid mac addr"); 1410 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1411 goto init_fail; 1412 } 1413 1414 /* Set LAA state for 82571 chipset */ 1415 e1000_set_laa_state_82571(hw, B_TRUE); 1416 1417 /* Master Latency Timer implementation */ 1418 if (Adapter->master_latency_timer) { 1419 pci_config_put8(Adapter->osdep.cfg_handle, 1420 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer); 1421 } 1422 1423 if (hw->mac.type < e1000_82547) { 1424 /* 1425 * Total FIFO is 64K 1426 */ 1427 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1428 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1429 else 1430 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1431 } else if ((hw->mac.type == e1000_82571) || 1432 (hw->mac.type == e1000_82572) || 1433 (hw->mac.type == e1000_80003es2lan)) { 1434 /* 1435 * Total FIFO is 48K 1436 */ 1437 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1438 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */ 1439 else 1440 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */ 1441 } else if (hw->mac.type == e1000_82573) { 1442 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */ 1443 } else if (hw->mac.type == e1000_82574) { 1444 /* Keep adapter default: 20K for Rx, 20K for Tx */ 1445 pba = E1000_READ_REG(hw, E1000_PBA); 1446 } else if (hw->mac.type == e1000_ich8lan) { 1447 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */ 1448 } else if (hw->mac.type == e1000_ich9lan) { 1449 pba = E1000_PBA_10K; 1450 } else if (hw->mac.type == e1000_ich10lan) { 1451 pba = E1000_PBA_10K; 1452 } else if (hw->mac.type == e1000_pchlan) { 1453 pba = E1000_PBA_26K; 1454 } else if (hw->mac.type == e1000_pch2lan) { 1455 pba = E1000_PBA_26K; 1456 } else if (hw->mac.type == e1000_pch_lpt) { 1457 pba = E1000_PBA_26K; 1458 } else { 1459 /* 1460 * Total FIFO is 40K 1461 */ 1462 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1463 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1464 else 1465 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1466 } 1467 E1000_WRITE_REG(hw, E1000_PBA, pba); 1468 1469 /* 1470 * These parameters set thresholds for the adapter's generation(Tx) 1471 * and response(Rx) to Ethernet PAUSE frames. These are just threshold 1472 * settings. Flow control is enabled or disabled in the configuration 1473 * file. 1474 * High-water mark is set down from the top of the rx fifo (not 1475 * sensitive to max_frame_size) and low-water is set just below 1476 * high-water mark. 1477 * The high water mark must be low enough to fit one full frame above 1478 * it in the rx FIFO. Should be the lower of: 1479 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early 1480 * receive size (assuming ERT set to E1000_ERT_2048), or the full 1481 * Rx FIFO size minus one full frame. 1482 */ 1483 high_water = min(((pba << 10) * 9 / 10), 1484 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 || 1485 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ? 1486 ((pba << 10) - (E1000_ERT_2048 << 3)) : 1487 ((pba << 10) - Adapter->max_frame_size))); 1488 1489 hw->fc.high_water = high_water & 0xFFF8; 1490 hw->fc.low_water = hw->fc.high_water - 8; 1491 1492 if (hw->mac.type == e1000_80003es2lan) 1493 hw->fc.pause_time = 0xFFFF; 1494 else 1495 hw->fc.pause_time = E1000_FC_PAUSE_TIME; 1496 hw->fc.send_xon = B_TRUE; 1497 1498 /* 1499 * Reset the adapter hardware the second time. 1500 */ 1501 mutex_enter(&e1000g_nvm_lock); 1502 result = e1000_reset_hw(hw); 1503 mutex_exit(&e1000g_nvm_lock); 1504 1505 if (result != E1000_SUCCESS) { 1506 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1507 goto init_fail; 1508 } 1509 1510 /* disable wakeup control by default */ 1511 if (hw->mac.type >= e1000_82544) 1512 E1000_WRITE_REG(hw, E1000_WUC, 0); 1513 1514 /* 1515 * MWI should be disabled on 82546. 1516 */ 1517 if (hw->mac.type == e1000_82546) 1518 e1000_pci_clear_mwi(hw); 1519 else 1520 e1000_pci_set_mwi(hw); 1521 1522 /* 1523 * Configure/Initialize hardware 1524 */ 1525 mutex_enter(&e1000g_nvm_lock); 1526 result = e1000_init_hw(hw); 1527 mutex_exit(&e1000g_nvm_lock); 1528 1529 if (result < E1000_SUCCESS) { 1530 e1000g_log(Adapter, CE_WARN, "Initialize hw failed"); 1531 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1532 goto init_fail; 1533 } 1534 1535 /* 1536 * Restore LED settings to the default from EEPROM 1537 * to meet the standard for Sun platforms. 1538 */ 1539 (void) e1000_cleanup_led(hw); 1540 1541 /* Disable Smart Power Down */ 1542 phy_spd_state(hw, B_FALSE); 1543 1544 /* Make sure driver has control */ 1545 e1000g_get_driver_control(hw); 1546 1547 /* 1548 * Initialize unicast addresses. 1549 */ 1550 e1000g_init_unicst(Adapter); 1551 1552 /* 1553 * Setup and initialize the mctable structures. After this routine 1554 * completes Multicast table will be set 1555 */ 1556 e1000_update_mc_addr_list(hw, 1557 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 1558 msec_delay(5); 1559 1560 /* 1561 * Implement Adaptive IFS 1562 */ 1563 e1000_reset_adaptive(hw); 1564 1565 /* Setup Interrupt Throttling Register */ 1566 if (hw->mac.type >= e1000_82540) { 1567 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate); 1568 } else 1569 Adapter->intr_adaptive = B_FALSE; 1570 1571 /* Start the timer for link setup */ 1572 if (hw->mac.autoneg) 1573 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000); 1574 else 1575 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); 1576 1577 mutex_enter(&Adapter->link_lock); 1578 if (hw->phy.autoneg_wait_to_complete) { 1579 Adapter->link_complete = B_TRUE; 1580 } else { 1581 Adapter->link_complete = B_FALSE; 1582 Adapter->link_tid = timeout(e1000g_link_timer, 1583 (void *)Adapter, link_timeout); 1584 } 1585 mutex_exit(&Adapter->link_lock); 1586 1587 /* Save the state of the phy */ 1588 e1000g_get_phy_state(Adapter); 1589 1590 e1000g_param_sync(Adapter); 1591 1592 Adapter->init_count++; 1593 1594 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 1595 goto init_fail; 1596 } 1597 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1598 goto init_fail; 1599 } 1600 1601 Adapter->poll_mode = e1000g_poll_mode; 1602 1603 return (DDI_SUCCESS); 1604 1605 init_fail: 1606 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1607 return (DDI_FAILURE); 1608 } 1609 1610 static int 1611 e1000g_alloc_rx_data(struct e1000g *Adapter) 1612 { 1613 e1000g_rx_ring_t *rx_ring; 1614 e1000g_rx_data_t *rx_data; 1615 1616 rx_ring = Adapter->rx_ring; 1617 1618 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP); 1619 1620 if (rx_data == NULL) 1621 return (DDI_FAILURE); 1622 1623 rx_data->priv_devi_node = Adapter->priv_devi_node; 1624 rx_data->rx_ring = rx_ring; 1625 1626 mutex_init(&rx_data->freelist_lock, NULL, 1627 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1628 mutex_init(&rx_data->recycle_lock, NULL, 1629 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1630 1631 rx_ring->rx_data = rx_data; 1632 1633 return (DDI_SUCCESS); 1634 } 1635 1636 void 1637 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data) 1638 { 1639 rx_sw_packet_t *packet, *next_packet; 1640 1641 if (rx_data == NULL) 1642 return; 1643 1644 packet = rx_data->packet_area; 1645 while (packet != NULL) { 1646 next_packet = packet->next; 1647 e1000g_free_rx_sw_packet(packet, B_TRUE); 1648 packet = next_packet; 1649 } 1650 rx_data->packet_area = NULL; 1651 } 1652 1653 void 1654 e1000g_free_rx_data(e1000g_rx_data_t *rx_data) 1655 { 1656 if (rx_data == NULL) 1657 return; 1658 1659 mutex_destroy(&rx_data->freelist_lock); 1660 mutex_destroy(&rx_data->recycle_lock); 1661 1662 kmem_free(rx_data, sizeof (e1000g_rx_data_t)); 1663 } 1664 1665 /* 1666 * Check if the link is up 1667 */ 1668 static boolean_t 1669 e1000g_link_up(struct e1000g *Adapter) 1670 { 1671 struct e1000_hw *hw = &Adapter->shared; 1672 boolean_t link_up = B_FALSE; 1673 1674 /* 1675 * get_link_status is set in the interrupt handler on link-status-change 1676 * or rx sequence error interrupt. get_link_status will stay 1677 * false until the e1000_check_for_link establishes link only 1678 * for copper adapters. 1679 */ 1680 switch (hw->phy.media_type) { 1681 case e1000_media_type_copper: 1682 if (hw->mac.get_link_status) { 1683 (void) e1000_check_for_link(hw); 1684 if ((E1000_READ_REG(hw, E1000_STATUS) & 1685 E1000_STATUS_LU)) { 1686 link_up = B_TRUE; 1687 } else { 1688 link_up = !hw->mac.get_link_status; 1689 } 1690 } else { 1691 link_up = B_TRUE; 1692 } 1693 break; 1694 case e1000_media_type_fiber: 1695 (void) e1000_check_for_link(hw); 1696 link_up = (E1000_READ_REG(hw, E1000_STATUS) & 1697 E1000_STATUS_LU); 1698 break; 1699 case e1000_media_type_internal_serdes: 1700 (void) e1000_check_for_link(hw); 1701 link_up = hw->mac.serdes_has_link; 1702 break; 1703 } 1704 1705 return (link_up); 1706 } 1707 1708 static void 1709 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 1710 { 1711 struct iocblk *iocp; 1712 struct e1000g *e1000gp; 1713 enum ioc_reply status; 1714 1715 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; 1716 iocp->ioc_error = 0; 1717 e1000gp = (struct e1000g *)arg; 1718 1719 ASSERT(e1000gp); 1720 if (e1000gp == NULL) { 1721 miocnak(q, mp, 0, EINVAL); 1722 return; 1723 } 1724 1725 rw_enter(&e1000gp->chip_lock, RW_READER); 1726 if (e1000gp->e1000g_state & E1000G_SUSPENDED) { 1727 rw_exit(&e1000gp->chip_lock); 1728 miocnak(q, mp, 0, EINVAL); 1729 return; 1730 } 1731 rw_exit(&e1000gp->chip_lock); 1732 1733 switch (iocp->ioc_cmd) { 1734 1735 case LB_GET_INFO_SIZE: 1736 case LB_GET_INFO: 1737 case LB_GET_MODE: 1738 case LB_SET_MODE: 1739 status = e1000g_loopback_ioctl(e1000gp, iocp, mp); 1740 break; 1741 1742 1743 #ifdef E1000G_DEBUG 1744 case E1000G_IOC_REG_PEEK: 1745 case E1000G_IOC_REG_POKE: 1746 status = e1000g_pp_ioctl(e1000gp, iocp, mp); 1747 break; 1748 case E1000G_IOC_CHIP_RESET: 1749 e1000gp->reset_count++; 1750 if (e1000g_reset_adapter(e1000gp)) 1751 status = IOC_ACK; 1752 else 1753 status = IOC_INVAL; 1754 break; 1755 #endif 1756 default: 1757 status = IOC_INVAL; 1758 break; 1759 } 1760 1761 /* 1762 * Decide how to reply 1763 */ 1764 switch (status) { 1765 default: 1766 case IOC_INVAL: 1767 /* 1768 * Error, reply with a NAK and EINVAL or the specified error 1769 */ 1770 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 1771 EINVAL : iocp->ioc_error); 1772 break; 1773 1774 case IOC_DONE: 1775 /* 1776 * OK, reply already sent 1777 */ 1778 break; 1779 1780 case IOC_ACK: 1781 /* 1782 * OK, reply with an ACK 1783 */ 1784 miocack(q, mp, 0, 0); 1785 break; 1786 1787 case IOC_REPLY: 1788 /* 1789 * OK, send prepared reply as ACK or NAK 1790 */ 1791 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1792 M_IOCACK : M_IOCNAK; 1793 qreply(q, mp); 1794 break; 1795 } 1796 } 1797 1798 /* 1799 * The default value of e1000g_poll_mode == 0 assumes that the NIC is 1800 * capable of supporting only one interrupt and we shouldn't disable 1801 * the physical interrupt. In this case we let the interrupt come and 1802 * we queue the packets in the rx ring itself in case we are in polling 1803 * mode (better latency but slightly lower performance and a very 1804 * high intrrupt count in mpstat which is harmless). 1805 * 1806 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt 1807 * which can be disabled in poll mode. This gives better overall 1808 * throughput (compared to the mode above), shows very low interrupt 1809 * count but has slightly higher latency since we pick the packets when 1810 * the poll thread does polling. 1811 * 1812 * Currently, this flag should be enabled only while doing performance 1813 * measurement or when it can be guaranteed that entire NIC going 1814 * in poll mode will not harm any traffic like cluster heartbeat etc. 1815 */ 1816 int e1000g_poll_mode = 0; 1817 1818 /* 1819 * Called from the upper layers when driver is in polling mode to 1820 * pick up any queued packets. Care should be taken to not block 1821 * this thread. 1822 */ 1823 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup) 1824 { 1825 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg; 1826 mblk_t *mp = NULL; 1827 mblk_t *tail; 1828 struct e1000g *adapter; 1829 1830 adapter = rx_ring->adapter; 1831 1832 rw_enter(&adapter->chip_lock, RW_READER); 1833 1834 if (adapter->e1000g_state & E1000G_SUSPENDED) { 1835 rw_exit(&adapter->chip_lock); 1836 return (NULL); 1837 } 1838 1839 mutex_enter(&rx_ring->rx_lock); 1840 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup); 1841 mutex_exit(&rx_ring->rx_lock); 1842 rw_exit(&adapter->chip_lock); 1843 return (mp); 1844 } 1845 1846 static int 1847 e1000g_m_start(void *arg) 1848 { 1849 struct e1000g *Adapter = (struct e1000g *)arg; 1850 1851 rw_enter(&Adapter->chip_lock, RW_WRITER); 1852 1853 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 1854 rw_exit(&Adapter->chip_lock); 1855 return (ECANCELED); 1856 } 1857 1858 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 1859 rw_exit(&Adapter->chip_lock); 1860 return (ENOTACTIVE); 1861 } 1862 1863 Adapter->e1000g_state |= E1000G_STARTED; 1864 1865 rw_exit(&Adapter->chip_lock); 1866 1867 /* Enable and start the watchdog timer */ 1868 enable_watchdog_timer(Adapter); 1869 1870 return (0); 1871 } 1872 1873 static int 1874 e1000g_start(struct e1000g *Adapter, boolean_t global) 1875 { 1876 e1000g_rx_data_t *rx_data; 1877 1878 if (global) { 1879 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) { 1880 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed"); 1881 goto start_fail; 1882 } 1883 1884 /* Allocate dma resources for descriptors and buffers */ 1885 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) { 1886 e1000g_log(Adapter, CE_WARN, 1887 "Alloc DMA resources failed"); 1888 goto start_fail; 1889 } 1890 Adapter->rx_buffer_setup = B_FALSE; 1891 } 1892 1893 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) { 1894 if (e1000g_init(Adapter) != DDI_SUCCESS) { 1895 e1000g_log(Adapter, CE_WARN, 1896 "Adapter initialization failed"); 1897 goto start_fail; 1898 } 1899 } 1900 1901 /* Setup and initialize the transmit structures */ 1902 e1000g_tx_setup(Adapter); 1903 msec_delay(5); 1904 1905 /* Setup and initialize the receive structures */ 1906 e1000g_rx_setup(Adapter); 1907 msec_delay(5); 1908 1909 /* Restore the e1000g promiscuous mode */ 1910 e1000g_restore_promisc(Adapter); 1911 1912 e1000g_mask_interrupt(Adapter); 1913 1914 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 1915 1916 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1917 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1918 goto start_fail; 1919 } 1920 1921 return (DDI_SUCCESS); 1922 1923 start_fail: 1924 rx_data = Adapter->rx_ring->rx_data; 1925 1926 if (global) { 1927 e1000g_release_dma_resources(Adapter); 1928 e1000g_free_rx_pending_buffers(rx_data); 1929 e1000g_free_rx_data(rx_data); 1930 } 1931 1932 mutex_enter(&e1000g_nvm_lock); 1933 (void) e1000_reset_hw(&Adapter->shared); 1934 mutex_exit(&e1000g_nvm_lock); 1935 1936 return (DDI_FAILURE); 1937 } 1938 1939 static void 1940 e1000g_m_stop(void *arg) 1941 { 1942 struct e1000g *Adapter = (struct e1000g *)arg; 1943 1944 /* Drain tx sessions */ 1945 (void) e1000g_tx_drain(Adapter); 1946 1947 rw_enter(&Adapter->chip_lock, RW_WRITER); 1948 1949 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 1950 rw_exit(&Adapter->chip_lock); 1951 return; 1952 } 1953 Adapter->e1000g_state &= ~E1000G_STARTED; 1954 e1000g_stop(Adapter, B_TRUE); 1955 1956 rw_exit(&Adapter->chip_lock); 1957 1958 /* Disable and stop all the timers */ 1959 disable_watchdog_timer(Adapter); 1960 stop_link_timer(Adapter); 1961 stop_82547_timer(Adapter->tx_ring); 1962 } 1963 1964 static void 1965 e1000g_stop(struct e1000g *Adapter, boolean_t global) 1966 { 1967 private_devi_list_t *devi_node; 1968 e1000g_rx_data_t *rx_data; 1969 int result; 1970 1971 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT; 1972 1973 /* Stop the chip and release pending resources */ 1974 1975 /* Tell firmware driver is no longer in control */ 1976 e1000g_release_driver_control(&Adapter->shared); 1977 1978 e1000g_clear_all_interrupts(Adapter); 1979 1980 mutex_enter(&e1000g_nvm_lock); 1981 result = e1000_reset_hw(&Adapter->shared); 1982 mutex_exit(&e1000g_nvm_lock); 1983 1984 if (result != E1000_SUCCESS) { 1985 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1986 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1987 } 1988 1989 mutex_enter(&Adapter->link_lock); 1990 Adapter->link_complete = B_FALSE; 1991 mutex_exit(&Adapter->link_lock); 1992 1993 /* Release resources still held by the TX descriptors */ 1994 e1000g_tx_clean(Adapter); 1995 1996 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 1997 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1998 1999 /* Clean the pending rx jumbo packet fragment */ 2000 e1000g_rx_clean(Adapter); 2001 2002 if (global) { 2003 e1000g_release_dma_resources(Adapter); 2004 2005 mutex_enter(&e1000g_rx_detach_lock); 2006 rx_data = Adapter->rx_ring->rx_data; 2007 rx_data->flag |= E1000G_RX_STOPPED; 2008 2009 if (rx_data->pending_count == 0) { 2010 e1000g_free_rx_pending_buffers(rx_data); 2011 e1000g_free_rx_data(rx_data); 2012 } else { 2013 devi_node = rx_data->priv_devi_node; 2014 if (devi_node != NULL) 2015 atomic_inc_32(&devi_node->pending_rx_count); 2016 else 2017 atomic_inc_32(&Adapter->pending_rx_count); 2018 } 2019 mutex_exit(&e1000g_rx_detach_lock); 2020 } 2021 2022 if (Adapter->link_state != LINK_STATE_UNKNOWN) { 2023 Adapter->link_state = LINK_STATE_UNKNOWN; 2024 if (!Adapter->reset_flag) 2025 mac_link_update(Adapter->mh, Adapter->link_state); 2026 } 2027 } 2028 2029 static void 2030 e1000g_rx_clean(struct e1000g *Adapter) 2031 { 2032 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data; 2033 2034 if (rx_data == NULL) 2035 return; 2036 2037 if (rx_data->rx_mblk != NULL) { 2038 freemsg(rx_data->rx_mblk); 2039 rx_data->rx_mblk = NULL; 2040 rx_data->rx_mblk_tail = NULL; 2041 rx_data->rx_mblk_len = 0; 2042 } 2043 } 2044 2045 static void 2046 e1000g_tx_clean(struct e1000g *Adapter) 2047 { 2048 e1000g_tx_ring_t *tx_ring; 2049 p_tx_sw_packet_t packet; 2050 mblk_t *mp; 2051 mblk_t *nmp; 2052 uint32_t packet_count; 2053 2054 tx_ring = Adapter->tx_ring; 2055 2056 /* 2057 * Here we don't need to protect the lists using 2058 * the usedlist_lock and freelist_lock, for they 2059 * have been protected by the chip_lock. 2060 */ 2061 mp = NULL; 2062 nmp = NULL; 2063 packet_count = 0; 2064 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list); 2065 while (packet != NULL) { 2066 if (packet->mp != NULL) { 2067 /* Assemble the message chain */ 2068 if (mp == NULL) { 2069 mp = packet->mp; 2070 nmp = packet->mp; 2071 } else { 2072 nmp->b_next = packet->mp; 2073 nmp = packet->mp; 2074 } 2075 /* Disconnect the message from the sw packet */ 2076 packet->mp = NULL; 2077 } 2078 2079 e1000g_free_tx_swpkt(packet); 2080 packet_count++; 2081 2082 packet = (p_tx_sw_packet_t) 2083 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link); 2084 } 2085 2086 if (mp != NULL) 2087 freemsgchain(mp); 2088 2089 if (packet_count > 0) { 2090 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list); 2091 QUEUE_INIT_LIST(&tx_ring->used_list); 2092 2093 /* Setup TX descriptor pointers */ 2094 tx_ring->tbd_next = tx_ring->tbd_first; 2095 tx_ring->tbd_oldest = tx_ring->tbd_first; 2096 2097 /* Setup our HW Tx Head & Tail descriptor pointers */ 2098 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 2099 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 2100 } 2101 } 2102 2103 static boolean_t 2104 e1000g_tx_drain(struct e1000g *Adapter) 2105 { 2106 int i; 2107 boolean_t done; 2108 e1000g_tx_ring_t *tx_ring; 2109 2110 tx_ring = Adapter->tx_ring; 2111 2112 /* Allow up to 'wsdraintime' for pending xmit's to complete. */ 2113 for (i = 0; i < TX_DRAIN_TIME; i++) { 2114 mutex_enter(&tx_ring->usedlist_lock); 2115 done = IS_QUEUE_EMPTY(&tx_ring->used_list); 2116 mutex_exit(&tx_ring->usedlist_lock); 2117 2118 if (done) 2119 break; 2120 2121 msec_delay(1); 2122 } 2123 2124 return (done); 2125 } 2126 2127 static boolean_t 2128 e1000g_rx_drain(struct e1000g *Adapter) 2129 { 2130 int i; 2131 boolean_t done; 2132 2133 /* 2134 * Allow up to RX_DRAIN_TIME for pending received packets to complete. 2135 */ 2136 for (i = 0; i < RX_DRAIN_TIME; i++) { 2137 done = (Adapter->pending_rx_count == 0); 2138 2139 if (done) 2140 break; 2141 2142 msec_delay(1); 2143 } 2144 2145 return (done); 2146 } 2147 2148 static boolean_t 2149 e1000g_reset_adapter(struct e1000g *Adapter) 2150 { 2151 /* Disable and stop all the timers */ 2152 disable_watchdog_timer(Adapter); 2153 stop_link_timer(Adapter); 2154 stop_82547_timer(Adapter->tx_ring); 2155 2156 rw_enter(&Adapter->chip_lock, RW_WRITER); 2157 2158 if (Adapter->stall_flag) { 2159 Adapter->stall_flag = B_FALSE; 2160 Adapter->reset_flag = B_TRUE; 2161 } 2162 2163 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2164 rw_exit(&Adapter->chip_lock); 2165 return (B_TRUE); 2166 } 2167 2168 e1000g_stop(Adapter, B_FALSE); 2169 2170 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 2171 rw_exit(&Adapter->chip_lock); 2172 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2173 return (B_FALSE); 2174 } 2175 2176 rw_exit(&Adapter->chip_lock); 2177 2178 /* Enable and start the watchdog timer */ 2179 enable_watchdog_timer(Adapter); 2180 2181 return (B_TRUE); 2182 } 2183 2184 boolean_t 2185 e1000g_global_reset(struct e1000g *Adapter) 2186 { 2187 /* Disable and stop all the timers */ 2188 disable_watchdog_timer(Adapter); 2189 stop_link_timer(Adapter); 2190 stop_82547_timer(Adapter->tx_ring); 2191 2192 rw_enter(&Adapter->chip_lock, RW_WRITER); 2193 2194 e1000g_stop(Adapter, B_TRUE); 2195 2196 Adapter->init_count = 0; 2197 2198 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 2199 rw_exit(&Adapter->chip_lock); 2200 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2201 return (B_FALSE); 2202 } 2203 2204 rw_exit(&Adapter->chip_lock); 2205 2206 /* Enable and start the watchdog timer */ 2207 enable_watchdog_timer(Adapter); 2208 2209 return (B_TRUE); 2210 } 2211 2212 /* 2213 * e1000g_intr_pciexpress - ISR for PCI Express chipsets 2214 * 2215 * This interrupt service routine is for PCI-Express adapters. 2216 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED 2217 * bit is set. 2218 */ 2219 static uint_t 2220 e1000g_intr_pciexpress(caddr_t arg) 2221 { 2222 struct e1000g *Adapter; 2223 uint32_t icr; 2224 2225 Adapter = (struct e1000g *)(uintptr_t)arg; 2226 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2227 2228 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2229 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2230 return (DDI_INTR_CLAIMED); 2231 } 2232 2233 if (icr & E1000_ICR_INT_ASSERTED) { 2234 /* 2235 * E1000_ICR_INT_ASSERTED bit was set: 2236 * Read(Clear) the ICR, claim this interrupt, 2237 * look for work to do. 2238 */ 2239 e1000g_intr_work(Adapter, icr); 2240 return (DDI_INTR_CLAIMED); 2241 } else { 2242 /* 2243 * E1000_ICR_INT_ASSERTED bit was not set: 2244 * Don't claim this interrupt, return immediately. 2245 */ 2246 return (DDI_INTR_UNCLAIMED); 2247 } 2248 } 2249 2250 /* 2251 * e1000g_intr - ISR for PCI/PCI-X chipsets 2252 * 2253 * This interrupt service routine is for PCI/PCI-X adapters. 2254 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED 2255 * bit is set or not. 2256 */ 2257 static uint_t 2258 e1000g_intr(caddr_t arg) 2259 { 2260 struct e1000g *Adapter; 2261 uint32_t icr; 2262 2263 Adapter = (struct e1000g *)(uintptr_t)arg; 2264 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2265 2266 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2267 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2268 return (DDI_INTR_CLAIMED); 2269 } 2270 2271 if (icr) { 2272 /* 2273 * Any bit was set in ICR: 2274 * Read(Clear) the ICR, claim this interrupt, 2275 * look for work to do. 2276 */ 2277 e1000g_intr_work(Adapter, icr); 2278 return (DDI_INTR_CLAIMED); 2279 } else { 2280 /* 2281 * No bit was set in ICR: 2282 * Don't claim this interrupt, return immediately. 2283 */ 2284 return (DDI_INTR_UNCLAIMED); 2285 } 2286 } 2287 2288 /* 2289 * e1000g_intr_work - actual processing of ISR 2290 * 2291 * Read(clear) the ICR contents and call appropriate interrupt 2292 * processing routines. 2293 */ 2294 static void 2295 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr) 2296 { 2297 struct e1000_hw *hw; 2298 hw = &Adapter->shared; 2299 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 2300 2301 Adapter->rx_pkt_cnt = 0; 2302 Adapter->tx_pkt_cnt = 0; 2303 2304 rw_enter(&Adapter->chip_lock, RW_READER); 2305 2306 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2307 rw_exit(&Adapter->chip_lock); 2308 return; 2309 } 2310 /* 2311 * Here we need to check the "e1000g_state" flag within the chip_lock to 2312 * ensure the receive routine will not execute when the adapter is 2313 * being reset. 2314 */ 2315 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2316 rw_exit(&Adapter->chip_lock); 2317 return; 2318 } 2319 2320 if (icr & E1000_ICR_RXT0) { 2321 mblk_t *mp = NULL; 2322 mblk_t *tail = NULL; 2323 e1000g_rx_ring_t *rx_ring; 2324 2325 rx_ring = Adapter->rx_ring; 2326 mutex_enter(&rx_ring->rx_lock); 2327 /* 2328 * Sometimes with legacy interrupts, it possible that 2329 * there is a single interrupt for Rx/Tx. In which 2330 * case, if poll flag is set, we shouldn't really 2331 * be doing Rx processing. 2332 */ 2333 if (!rx_ring->poll_flag) 2334 mp = e1000g_receive(rx_ring, &tail, 2335 E1000G_CHAIN_NO_LIMIT); 2336 mutex_exit(&rx_ring->rx_lock); 2337 rw_exit(&Adapter->chip_lock); 2338 if (mp != NULL) 2339 mac_rx_ring(Adapter->mh, rx_ring->mrh, 2340 mp, rx_ring->ring_gen_num); 2341 } else 2342 rw_exit(&Adapter->chip_lock); 2343 2344 if (icr & E1000_ICR_TXDW) { 2345 if (!Adapter->tx_intr_enable) 2346 e1000g_clear_tx_interrupt(Adapter); 2347 2348 /* Recycle the tx descriptors */ 2349 rw_enter(&Adapter->chip_lock, RW_READER); 2350 (void) e1000g_recycle(tx_ring); 2351 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr); 2352 rw_exit(&Adapter->chip_lock); 2353 2354 if (tx_ring->resched_needed && 2355 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) { 2356 tx_ring->resched_needed = B_FALSE; 2357 mac_tx_update(Adapter->mh); 2358 E1000G_STAT(tx_ring->stat_reschedule); 2359 } 2360 } 2361 2362 /* 2363 * The Receive Sequence errors RXSEQ and the link status change LSC 2364 * are checked to detect that the cable has been pulled out. For 2365 * the Wiseman 2.0 silicon, the receive sequence errors interrupt 2366 * are an indication that cable is not connected. 2367 */ 2368 if ((icr & E1000_ICR_RXSEQ) || 2369 (icr & E1000_ICR_LSC) || 2370 (icr & E1000_ICR_GPI_EN1)) { 2371 boolean_t link_changed; 2372 timeout_id_t tid = 0; 2373 2374 stop_watchdog_timer(Adapter); 2375 2376 rw_enter(&Adapter->chip_lock, RW_WRITER); 2377 2378 /* 2379 * Because we got a link-status-change interrupt, force 2380 * e1000_check_for_link() to look at phy 2381 */ 2382 Adapter->shared.mac.get_link_status = B_TRUE; 2383 2384 /* e1000g_link_check takes care of link status change */ 2385 link_changed = e1000g_link_check(Adapter); 2386 2387 /* Get new phy state */ 2388 e1000g_get_phy_state(Adapter); 2389 2390 /* 2391 * If the link timer has not timed out, we'll not notify 2392 * the upper layer with any link state until the link is up. 2393 */ 2394 if (link_changed && !Adapter->link_complete) { 2395 if (Adapter->link_state == LINK_STATE_UP) { 2396 mutex_enter(&Adapter->link_lock); 2397 Adapter->link_complete = B_TRUE; 2398 tid = Adapter->link_tid; 2399 Adapter->link_tid = 0; 2400 mutex_exit(&Adapter->link_lock); 2401 } else { 2402 link_changed = B_FALSE; 2403 } 2404 } 2405 rw_exit(&Adapter->chip_lock); 2406 2407 if (link_changed) { 2408 if (tid != 0) 2409 (void) untimeout(tid); 2410 2411 /* 2412 * Workaround for esb2. Data stuck in fifo on a link 2413 * down event. Stop receiver here and reset in watchdog. 2414 */ 2415 if ((Adapter->link_state == LINK_STATE_DOWN) && 2416 (Adapter->shared.mac.type == e1000_80003es2lan)) { 2417 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 2418 E1000_WRITE_REG(hw, E1000_RCTL, 2419 rctl & ~E1000_RCTL_EN); 2420 e1000g_log(Adapter, CE_WARN, 2421 "ESB2 receiver disabled"); 2422 Adapter->esb2_workaround = B_TRUE; 2423 } 2424 if (!Adapter->reset_flag) 2425 mac_link_update(Adapter->mh, 2426 Adapter->link_state); 2427 if (Adapter->link_state == LINK_STATE_UP) 2428 Adapter->reset_flag = B_FALSE; 2429 } 2430 2431 start_watchdog_timer(Adapter); 2432 } 2433 } 2434 2435 static void 2436 e1000g_init_unicst(struct e1000g *Adapter) 2437 { 2438 struct e1000_hw *hw; 2439 int slot; 2440 2441 hw = &Adapter->shared; 2442 2443 if (Adapter->init_count == 0) { 2444 /* Initialize the multiple unicast addresses */ 2445 Adapter->unicst_total = min(hw->mac.rar_entry_count, 2446 MAX_NUM_UNICAST_ADDRESSES); 2447 2448 /* Workaround for an erratum of 82571 chipst */ 2449 if ((hw->mac.type == e1000_82571) && 2450 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2451 Adapter->unicst_total--; 2452 2453 /* VMware doesn't support multiple mac addresses properly */ 2454 if (hw->subsystem_vendor_id == 0x15ad) 2455 Adapter->unicst_total = 1; 2456 2457 Adapter->unicst_avail = Adapter->unicst_total; 2458 2459 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2460 /* Clear both the flag and MAC address */ 2461 Adapter->unicst_addr[slot].reg.high = 0; 2462 Adapter->unicst_addr[slot].reg.low = 0; 2463 } 2464 } else { 2465 /* Workaround for an erratum of 82571 chipst */ 2466 if ((hw->mac.type == e1000_82571) && 2467 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2468 e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); 2469 2470 /* Re-configure the RAR registers */ 2471 for (slot = 0; slot < Adapter->unicst_total; slot++) 2472 if (Adapter->unicst_addr[slot].mac.set == 1) 2473 e1000_rar_set(hw, 2474 Adapter->unicst_addr[slot].mac.addr, slot); 2475 } 2476 2477 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2478 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2479 } 2480 2481 static int 2482 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, 2483 int slot) 2484 { 2485 struct e1000_hw *hw; 2486 2487 hw = &Adapter->shared; 2488 2489 /* 2490 * The first revision of Wiseman silicon (rev 2.0) has an errata 2491 * that requires the receiver to be in reset when any of the 2492 * receive address registers (RAR regs) are accessed. The first 2493 * rev of Wiseman silicon also requires MWI to be disabled when 2494 * a global reset or a receive reset is issued. So before we 2495 * initialize the RARs, we check the rev of the Wiseman controller 2496 * and work around any necessary HW errata. 2497 */ 2498 if ((hw->mac.type == e1000_82542) && 2499 (hw->revision_id == E1000_REVISION_2)) { 2500 e1000_pci_clear_mwi(hw); 2501 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); 2502 msec_delay(5); 2503 } 2504 if (mac_addr == NULL) { 2505 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0); 2506 E1000_WRITE_FLUSH(hw); 2507 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0); 2508 E1000_WRITE_FLUSH(hw); 2509 /* Clear both the flag and MAC address */ 2510 Adapter->unicst_addr[slot].reg.high = 0; 2511 Adapter->unicst_addr[slot].reg.low = 0; 2512 } else { 2513 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, 2514 ETHERADDRL); 2515 e1000_rar_set(hw, (uint8_t *)mac_addr, slot); 2516 Adapter->unicst_addr[slot].mac.set = 1; 2517 } 2518 2519 /* Workaround for an erratum of 82571 chipst */ 2520 if (slot == 0) { 2521 if ((hw->mac.type == e1000_82571) && 2522 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2523 if (mac_addr == NULL) { 2524 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2525 slot << 1, 0); 2526 E1000_WRITE_FLUSH(hw); 2527 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2528 (slot << 1) + 1, 0); 2529 E1000_WRITE_FLUSH(hw); 2530 } else { 2531 e1000_rar_set(hw, (uint8_t *)mac_addr, 2532 LAST_RAR_ENTRY); 2533 } 2534 } 2535 2536 /* 2537 * If we are using Wiseman rev 2.0 silicon, we will have previously 2538 * put the receive in reset, and disabled MWI, to work around some 2539 * HW errata. Now we should take the receiver out of reset, and 2540 * re-enabled if MWI if it was previously enabled by the PCI BIOS. 2541 */ 2542 if ((hw->mac.type == e1000_82542) && 2543 (hw->revision_id == E1000_REVISION_2)) { 2544 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2545 msec_delay(1); 2546 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2547 e1000_pci_set_mwi(hw); 2548 e1000g_rx_setup(Adapter); 2549 } 2550 2551 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2552 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2553 return (EIO); 2554 } 2555 2556 return (0); 2557 } 2558 2559 static int 2560 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr) 2561 { 2562 struct e1000_hw *hw = &Adapter->shared; 2563 struct ether_addr *newtable; 2564 size_t new_len; 2565 size_t old_len; 2566 int res = 0; 2567 2568 if ((multiaddr[0] & 01) == 0) { 2569 res = EINVAL; 2570 e1000g_log(Adapter, CE_WARN, "Illegal multicast address"); 2571 goto done; 2572 } 2573 2574 if (Adapter->mcast_count >= Adapter->mcast_max_num) { 2575 res = ENOENT; 2576 e1000g_log(Adapter, CE_WARN, 2577 "Adapter requested more than %d mcast addresses", 2578 Adapter->mcast_max_num); 2579 goto done; 2580 } 2581 2582 2583 if (Adapter->mcast_count == Adapter->mcast_alloc_count) { 2584 old_len = Adapter->mcast_alloc_count * 2585 sizeof (struct ether_addr); 2586 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) * 2587 sizeof (struct ether_addr); 2588 2589 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2590 if (newtable == NULL) { 2591 res = ENOMEM; 2592 e1000g_log(Adapter, CE_WARN, 2593 "Not enough memory to alloc mcast table"); 2594 goto done; 2595 } 2596 2597 if (Adapter->mcast_table != NULL) { 2598 bcopy(Adapter->mcast_table, newtable, old_len); 2599 kmem_free(Adapter->mcast_table, old_len); 2600 } 2601 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE; 2602 Adapter->mcast_table = newtable; 2603 } 2604 2605 bcopy(multiaddr, 2606 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL); 2607 Adapter->mcast_count++; 2608 2609 /* 2610 * Update the MC table in the hardware 2611 */ 2612 e1000g_clear_interrupt(Adapter); 2613 2614 e1000_update_mc_addr_list(hw, 2615 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2616 2617 e1000g_mask_interrupt(Adapter); 2618 2619 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2620 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2621 res = EIO; 2622 } 2623 2624 done: 2625 return (res); 2626 } 2627 2628 static int 2629 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr) 2630 { 2631 struct e1000_hw *hw = &Adapter->shared; 2632 struct ether_addr *newtable; 2633 size_t new_len; 2634 size_t old_len; 2635 unsigned i; 2636 2637 for (i = 0; i < Adapter->mcast_count; i++) { 2638 if (bcmp(multiaddr, &Adapter->mcast_table[i], 2639 ETHERADDRL) == 0) { 2640 for (i++; i < Adapter->mcast_count; i++) { 2641 Adapter->mcast_table[i - 1] = 2642 Adapter->mcast_table[i]; 2643 } 2644 Adapter->mcast_count--; 2645 break; 2646 } 2647 } 2648 2649 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) > 2650 MCAST_ALLOC_SIZE) { 2651 old_len = Adapter->mcast_alloc_count * 2652 sizeof (struct ether_addr); 2653 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) * 2654 sizeof (struct ether_addr); 2655 2656 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2657 if (newtable != NULL) { 2658 bcopy(Adapter->mcast_table, newtable, new_len); 2659 kmem_free(Adapter->mcast_table, old_len); 2660 2661 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE; 2662 Adapter->mcast_table = newtable; 2663 } 2664 } 2665 2666 /* 2667 * Update the MC table in the hardware 2668 */ 2669 e1000g_clear_interrupt(Adapter); 2670 2671 e1000_update_mc_addr_list(hw, 2672 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2673 2674 e1000g_mask_interrupt(Adapter); 2675 2676 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2677 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2678 return (EIO); 2679 } 2680 2681 return (0); 2682 } 2683 2684 static void 2685 e1000g_release_multicast(struct e1000g *Adapter) 2686 { 2687 if (Adapter->mcast_table != NULL) { 2688 kmem_free(Adapter->mcast_table, 2689 Adapter->mcast_alloc_count * sizeof (struct ether_addr)); 2690 Adapter->mcast_table = NULL; 2691 } 2692 } 2693 2694 int 2695 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 2696 { 2697 struct e1000g *Adapter = (struct e1000g *)arg; 2698 int result; 2699 2700 rw_enter(&Adapter->chip_lock, RW_WRITER); 2701 2702 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2703 result = ECANCELED; 2704 goto done; 2705 } 2706 2707 result = (add) ? multicst_add(Adapter, addr) 2708 : multicst_remove(Adapter, addr); 2709 2710 done: 2711 rw_exit(&Adapter->chip_lock); 2712 return (result); 2713 2714 } 2715 2716 int 2717 e1000g_m_promisc(void *arg, boolean_t on) 2718 { 2719 struct e1000g *Adapter = (struct e1000g *)arg; 2720 uint32_t rctl; 2721 2722 rw_enter(&Adapter->chip_lock, RW_WRITER); 2723 2724 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2725 rw_exit(&Adapter->chip_lock); 2726 return (ECANCELED); 2727 } 2728 2729 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 2730 2731 if (on) 2732 rctl |= 2733 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 2734 else 2735 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 2736 2737 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 2738 2739 Adapter->e1000g_promisc = on; 2740 2741 rw_exit(&Adapter->chip_lock); 2742 2743 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2744 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2745 return (EIO); 2746 } 2747 2748 return (0); 2749 } 2750 2751 /* 2752 * Entry points to enable and disable interrupts at the granularity of 2753 * a group. 2754 * Turns the poll_mode for the whole adapter on and off to enable or 2755 * override the ring level polling control over the hardware interrupts. 2756 */ 2757 static int 2758 e1000g_rx_group_intr_enable(mac_intr_handle_t arg) 2759 { 2760 struct e1000g *adapter = (struct e1000g *)arg; 2761 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2762 2763 /* 2764 * Later interrupts at the granularity of the this ring will 2765 * invoke mac_rx() with NULL, indicating the need for another 2766 * software classification. 2767 * We have a single ring usable per adapter now, so we only need to 2768 * reset the rx handle for that one. 2769 * When more RX rings can be used, we should update each one of them. 2770 */ 2771 mutex_enter(&rx_ring->rx_lock); 2772 rx_ring->mrh = NULL; 2773 adapter->poll_mode = B_FALSE; 2774 mutex_exit(&rx_ring->rx_lock); 2775 return (0); 2776 } 2777 2778 static int 2779 e1000g_rx_group_intr_disable(mac_intr_handle_t arg) 2780 { 2781 struct e1000g *adapter = (struct e1000g *)arg; 2782 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2783 2784 mutex_enter(&rx_ring->rx_lock); 2785 2786 /* 2787 * Later interrupts at the granularity of the this ring will 2788 * invoke mac_rx() with the handle for this ring; 2789 */ 2790 adapter->poll_mode = B_TRUE; 2791 rx_ring->mrh = rx_ring->mrh_init; 2792 mutex_exit(&rx_ring->rx_lock); 2793 return (0); 2794 } 2795 2796 /* 2797 * Entry points to enable and disable interrupts at the granularity of 2798 * a ring. 2799 * adapter poll_mode controls whether we actually proceed with hardware 2800 * interrupt toggling. 2801 */ 2802 static int 2803 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh) 2804 { 2805 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2806 struct e1000g *adapter = rx_ring->adapter; 2807 struct e1000_hw *hw = &adapter->shared; 2808 uint32_t intr_mask; 2809 2810 rw_enter(&adapter->chip_lock, RW_READER); 2811 2812 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2813 rw_exit(&adapter->chip_lock); 2814 return (0); 2815 } 2816 2817 mutex_enter(&rx_ring->rx_lock); 2818 rx_ring->poll_flag = 0; 2819 mutex_exit(&rx_ring->rx_lock); 2820 2821 /* Rx interrupt enabling for MSI and legacy */ 2822 intr_mask = E1000_READ_REG(hw, E1000_IMS); 2823 intr_mask |= E1000_IMS_RXT0; 2824 E1000_WRITE_REG(hw, E1000_IMS, intr_mask); 2825 E1000_WRITE_FLUSH(hw); 2826 2827 /* Trigger a Rx interrupt to check Rx ring */ 2828 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 2829 E1000_WRITE_FLUSH(hw); 2830 2831 rw_exit(&adapter->chip_lock); 2832 return (0); 2833 } 2834 2835 static int 2836 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh) 2837 { 2838 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2839 struct e1000g *adapter = rx_ring->adapter; 2840 struct e1000_hw *hw = &adapter->shared; 2841 2842 rw_enter(&adapter->chip_lock, RW_READER); 2843 2844 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2845 rw_exit(&adapter->chip_lock); 2846 return (0); 2847 } 2848 mutex_enter(&rx_ring->rx_lock); 2849 rx_ring->poll_flag = 1; 2850 mutex_exit(&rx_ring->rx_lock); 2851 2852 /* Rx interrupt disabling for MSI and legacy */ 2853 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 2854 E1000_WRITE_FLUSH(hw); 2855 2856 rw_exit(&adapter->chip_lock); 2857 return (0); 2858 } 2859 2860 /* 2861 * e1000g_unicst_find - Find the slot for the specified unicast address 2862 */ 2863 static int 2864 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr) 2865 { 2866 int slot; 2867 2868 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2869 if ((Adapter->unicst_addr[slot].mac.set == 1) && 2870 (bcmp(Adapter->unicst_addr[slot].mac.addr, 2871 mac_addr, ETHERADDRL) == 0)) 2872 return (slot); 2873 } 2874 2875 return (-1); 2876 } 2877 2878 /* 2879 * Entry points to add and remove a MAC address to a ring group. 2880 * The caller takes care of adding and removing the MAC addresses 2881 * to the filter via these two routines. 2882 */ 2883 2884 static int 2885 e1000g_addmac(void *arg, const uint8_t *mac_addr) 2886 { 2887 struct e1000g *Adapter = (struct e1000g *)arg; 2888 int slot, err; 2889 2890 rw_enter(&Adapter->chip_lock, RW_WRITER); 2891 2892 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2893 rw_exit(&Adapter->chip_lock); 2894 return (ECANCELED); 2895 } 2896 2897 if (e1000g_unicst_find(Adapter, mac_addr) != -1) { 2898 /* The same address is already in slot */ 2899 rw_exit(&Adapter->chip_lock); 2900 return (0); 2901 } 2902 2903 if (Adapter->unicst_avail == 0) { 2904 /* no slots available */ 2905 rw_exit(&Adapter->chip_lock); 2906 return (ENOSPC); 2907 } 2908 2909 /* Search for a free slot */ 2910 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2911 if (Adapter->unicst_addr[slot].mac.set == 0) 2912 break; 2913 } 2914 ASSERT(slot < Adapter->unicst_total); 2915 2916 err = e1000g_unicst_set(Adapter, mac_addr, slot); 2917 if (err == 0) 2918 Adapter->unicst_avail--; 2919 2920 rw_exit(&Adapter->chip_lock); 2921 2922 return (err); 2923 } 2924 2925 static int 2926 e1000g_remmac(void *arg, const uint8_t *mac_addr) 2927 { 2928 struct e1000g *Adapter = (struct e1000g *)arg; 2929 int slot, err; 2930 2931 rw_enter(&Adapter->chip_lock, RW_WRITER); 2932 2933 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2934 rw_exit(&Adapter->chip_lock); 2935 return (ECANCELED); 2936 } 2937 2938 slot = e1000g_unicst_find(Adapter, mac_addr); 2939 if (slot == -1) { 2940 rw_exit(&Adapter->chip_lock); 2941 return (EINVAL); 2942 } 2943 2944 ASSERT(Adapter->unicst_addr[slot].mac.set); 2945 2946 /* Clear this slot */ 2947 err = e1000g_unicst_set(Adapter, NULL, slot); 2948 if (err == 0) 2949 Adapter->unicst_avail++; 2950 2951 rw_exit(&Adapter->chip_lock); 2952 2953 return (err); 2954 } 2955 2956 static int 2957 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 2958 { 2959 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh; 2960 2961 mutex_enter(&rx_ring->rx_lock); 2962 rx_ring->ring_gen_num = mr_gen_num; 2963 mutex_exit(&rx_ring->rx_lock); 2964 return (0); 2965 } 2966 2967 /* 2968 * Callback funtion for MAC layer to register all rings. 2969 * 2970 * The hardware supports a single group with currently only one ring 2971 * available. 2972 * Though not offering virtualization ability per se, exposing the 2973 * group/ring still enables the polling and interrupt toggling. 2974 */ 2975 /* ARGSUSED */ 2976 void 2977 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index, 2978 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 2979 { 2980 struct e1000g *Adapter = (struct e1000g *)arg; 2981 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring; 2982 mac_intr_t *mintr; 2983 2984 /* 2985 * We advertised only RX group/rings, so the MAC framework shouldn't 2986 * ask for any thing else. 2987 */ 2988 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0); 2989 2990 rx_ring->mrh = rx_ring->mrh_init = rh; 2991 infop->mri_driver = (mac_ring_driver_t)rx_ring; 2992 infop->mri_start = e1000g_ring_start; 2993 infop->mri_stop = NULL; 2994 infop->mri_poll = e1000g_poll_ring; 2995 infop->mri_stat = e1000g_rx_ring_stat; 2996 2997 /* Ring level interrupts */ 2998 mintr = &infop->mri_intr; 2999 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 3000 mintr->mi_enable = e1000g_rx_ring_intr_enable; 3001 mintr->mi_disable = e1000g_rx_ring_intr_disable; 3002 if (Adapter->msi_enable) 3003 mintr->mi_ddi_handle = Adapter->htable[0]; 3004 } 3005 3006 /* ARGSUSED */ 3007 static void 3008 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index, 3009 mac_group_info_t *infop, mac_group_handle_t gh) 3010 { 3011 struct e1000g *Adapter = (struct e1000g *)arg; 3012 mac_intr_t *mintr; 3013 3014 /* 3015 * We advertised a single RX ring. Getting a request for anything else 3016 * signifies a bug in the MAC framework. 3017 */ 3018 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0); 3019 3020 Adapter->rx_group = gh; 3021 3022 infop->mgi_driver = (mac_group_driver_t)Adapter; 3023 infop->mgi_start = NULL; 3024 infop->mgi_stop = NULL; 3025 infop->mgi_addmac = e1000g_addmac; 3026 infop->mgi_remmac = e1000g_remmac; 3027 infop->mgi_count = 1; 3028 3029 /* Group level interrupts */ 3030 mintr = &infop->mgi_intr; 3031 mintr->mi_handle = (mac_intr_handle_t)Adapter; 3032 mintr->mi_enable = e1000g_rx_group_intr_enable; 3033 mintr->mi_disable = e1000g_rx_group_intr_disable; 3034 } 3035 3036 static boolean_t 3037 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3038 { 3039 struct e1000g *Adapter = (struct e1000g *)arg; 3040 3041 switch (cap) { 3042 case MAC_CAPAB_HCKSUM: { 3043 uint32_t *txflags = cap_data; 3044 3045 if (Adapter->tx_hcksum_enable) 3046 *txflags = HCKSUM_IPHDRCKSUM | 3047 HCKSUM_INET_PARTIAL; 3048 else 3049 return (B_FALSE); 3050 break; 3051 } 3052 3053 case MAC_CAPAB_LSO: { 3054 mac_capab_lso_t *cap_lso = cap_data; 3055 3056 if (Adapter->lso_enable) { 3057 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 3058 cap_lso->lso_basic_tcp_ipv4.lso_max = 3059 E1000_LSO_MAXLEN; 3060 } else 3061 return (B_FALSE); 3062 break; 3063 } 3064 case MAC_CAPAB_RINGS: { 3065 mac_capab_rings_t *cap_rings = cap_data; 3066 3067 /* No TX rings exposed yet */ 3068 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 3069 return (B_FALSE); 3070 3071 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 3072 cap_rings->mr_rnum = 1; 3073 cap_rings->mr_gnum = 1; 3074 cap_rings->mr_rget = e1000g_fill_ring; 3075 cap_rings->mr_gget = e1000g_fill_group; 3076 break; 3077 } 3078 default: 3079 return (B_FALSE); 3080 } 3081 return (B_TRUE); 3082 } 3083 3084 static boolean_t 3085 e1000g_param_locked(mac_prop_id_t pr_num) 3086 { 3087 /* 3088 * All en_* parameters are locked (read-only) while 3089 * the device is in any sort of loopback mode ... 3090 */ 3091 switch (pr_num) { 3092 case MAC_PROP_EN_1000FDX_CAP: 3093 case MAC_PROP_EN_1000HDX_CAP: 3094 case MAC_PROP_EN_100FDX_CAP: 3095 case MAC_PROP_EN_100HDX_CAP: 3096 case MAC_PROP_EN_10FDX_CAP: 3097 case MAC_PROP_EN_10HDX_CAP: 3098 case MAC_PROP_AUTONEG: 3099 case MAC_PROP_FLOWCTRL: 3100 return (B_TRUE); 3101 } 3102 return (B_FALSE); 3103 } 3104 3105 /* 3106 * callback function for set/get of properties 3107 */ 3108 static int 3109 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3110 uint_t pr_valsize, const void *pr_val) 3111 { 3112 struct e1000g *Adapter = arg; 3113 struct e1000_hw *hw = &Adapter->shared; 3114 struct e1000_fc_info *fc = &Adapter->shared.fc; 3115 int err = 0; 3116 link_flowctrl_t flowctrl; 3117 uint32_t cur_mtu, new_mtu; 3118 3119 rw_enter(&Adapter->chip_lock, RW_WRITER); 3120 3121 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3122 rw_exit(&Adapter->chip_lock); 3123 return (ECANCELED); 3124 } 3125 3126 if (Adapter->loopback_mode != E1000G_LB_NONE && 3127 e1000g_param_locked(pr_num)) { 3128 /* 3129 * All en_* parameters are locked (read-only) 3130 * while the device is in any sort of loopback mode. 3131 */ 3132 rw_exit(&Adapter->chip_lock); 3133 return (EBUSY); 3134 } 3135 3136 switch (pr_num) { 3137 case MAC_PROP_EN_1000FDX_CAP: 3138 if (hw->phy.media_type != e1000_media_type_copper) { 3139 err = ENOTSUP; 3140 break; 3141 } 3142 Adapter->param_en_1000fdx = *(uint8_t *)pr_val; 3143 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val; 3144 goto reset; 3145 case MAC_PROP_EN_100FDX_CAP: 3146 if (hw->phy.media_type != e1000_media_type_copper) { 3147 err = ENOTSUP; 3148 break; 3149 } 3150 Adapter->param_en_100fdx = *(uint8_t *)pr_val; 3151 Adapter->param_adv_100fdx = *(uint8_t *)pr_val; 3152 goto reset; 3153 case MAC_PROP_EN_100HDX_CAP: 3154 if (hw->phy.media_type != e1000_media_type_copper) { 3155 err = ENOTSUP; 3156 break; 3157 } 3158 Adapter->param_en_100hdx = *(uint8_t *)pr_val; 3159 Adapter->param_adv_100hdx = *(uint8_t *)pr_val; 3160 goto reset; 3161 case MAC_PROP_EN_10FDX_CAP: 3162 if (hw->phy.media_type != e1000_media_type_copper) { 3163 err = ENOTSUP; 3164 break; 3165 } 3166 Adapter->param_en_10fdx = *(uint8_t *)pr_val; 3167 Adapter->param_adv_10fdx = *(uint8_t *)pr_val; 3168 goto reset; 3169 case MAC_PROP_EN_10HDX_CAP: 3170 if (hw->phy.media_type != e1000_media_type_copper) { 3171 err = ENOTSUP; 3172 break; 3173 } 3174 Adapter->param_en_10hdx = *(uint8_t *)pr_val; 3175 Adapter->param_adv_10hdx = *(uint8_t *)pr_val; 3176 goto reset; 3177 case MAC_PROP_AUTONEG: 3178 if (hw->phy.media_type != e1000_media_type_copper) { 3179 err = ENOTSUP; 3180 break; 3181 } 3182 Adapter->param_adv_autoneg = *(uint8_t *)pr_val; 3183 goto reset; 3184 case MAC_PROP_FLOWCTRL: 3185 fc->send_xon = B_TRUE; 3186 bcopy(pr_val, &flowctrl, sizeof (flowctrl)); 3187 3188 switch (flowctrl) { 3189 default: 3190 err = EINVAL; 3191 break; 3192 case LINK_FLOWCTRL_NONE: 3193 fc->requested_mode = e1000_fc_none; 3194 break; 3195 case LINK_FLOWCTRL_RX: 3196 fc->requested_mode = e1000_fc_rx_pause; 3197 break; 3198 case LINK_FLOWCTRL_TX: 3199 fc->requested_mode = e1000_fc_tx_pause; 3200 break; 3201 case LINK_FLOWCTRL_BI: 3202 fc->requested_mode = e1000_fc_full; 3203 break; 3204 } 3205 reset: 3206 if (err == 0) { 3207 /* check PCH limits & reset the link */ 3208 e1000g_pch_limits(Adapter); 3209 if (e1000g_reset_link(Adapter) != DDI_SUCCESS) 3210 err = EINVAL; 3211 } 3212 break; 3213 case MAC_PROP_ADV_1000FDX_CAP: 3214 case MAC_PROP_ADV_1000HDX_CAP: 3215 case MAC_PROP_ADV_100FDX_CAP: 3216 case MAC_PROP_ADV_100HDX_CAP: 3217 case MAC_PROP_ADV_10FDX_CAP: 3218 case MAC_PROP_ADV_10HDX_CAP: 3219 case MAC_PROP_EN_1000HDX_CAP: 3220 case MAC_PROP_STATUS: 3221 case MAC_PROP_SPEED: 3222 case MAC_PROP_DUPLEX: 3223 err = ENOTSUP; /* read-only prop. Can't set this. */ 3224 break; 3225 case MAC_PROP_MTU: 3226 /* adapter must be stopped for an MTU change */ 3227 if (Adapter->e1000g_state & E1000G_STARTED) { 3228 err = EBUSY; 3229 break; 3230 } 3231 3232 cur_mtu = Adapter->default_mtu; 3233 3234 /* get new requested MTU */ 3235 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3236 if (new_mtu == cur_mtu) { 3237 err = 0; 3238 break; 3239 } 3240 3241 if ((new_mtu < DEFAULT_MTU) || 3242 (new_mtu > Adapter->max_mtu)) { 3243 err = EINVAL; 3244 break; 3245 } 3246 3247 /* inform MAC framework of new MTU */ 3248 err = mac_maxsdu_update(Adapter->mh, new_mtu); 3249 3250 if (err == 0) { 3251 Adapter->default_mtu = new_mtu; 3252 Adapter->max_frame_size = 3253 e1000g_mtu2maxframe(new_mtu); 3254 3255 /* 3256 * check PCH limits & set buffer sizes to 3257 * match new MTU 3258 */ 3259 e1000g_pch_limits(Adapter); 3260 e1000g_set_bufsize(Adapter); 3261 3262 /* 3263 * decrease the number of descriptors and free 3264 * packets for jumbo frames to reduce tx/rx 3265 * resource consumption 3266 */ 3267 if (Adapter->max_frame_size >= 3268 (FRAME_SIZE_UPTO_4K)) { 3269 if (Adapter->tx_desc_num_flag == 0) 3270 Adapter->tx_desc_num = 3271 DEFAULT_JUMBO_NUM_TX_DESC; 3272 3273 if (Adapter->rx_desc_num_flag == 0) 3274 Adapter->rx_desc_num = 3275 DEFAULT_JUMBO_NUM_RX_DESC; 3276 3277 if (Adapter->tx_buf_num_flag == 0) 3278 Adapter->tx_freelist_num = 3279 DEFAULT_JUMBO_NUM_TX_BUF; 3280 3281 if (Adapter->rx_buf_num_flag == 0) 3282 Adapter->rx_freelist_limit = 3283 DEFAULT_JUMBO_NUM_RX_BUF; 3284 } else { 3285 if (Adapter->tx_desc_num_flag == 0) 3286 Adapter->tx_desc_num = 3287 DEFAULT_NUM_TX_DESCRIPTOR; 3288 3289 if (Adapter->rx_desc_num_flag == 0) 3290 Adapter->rx_desc_num = 3291 DEFAULT_NUM_RX_DESCRIPTOR; 3292 3293 if (Adapter->tx_buf_num_flag == 0) 3294 Adapter->tx_freelist_num = 3295 DEFAULT_NUM_TX_FREELIST; 3296 3297 if (Adapter->rx_buf_num_flag == 0) 3298 Adapter->rx_freelist_limit = 3299 DEFAULT_NUM_RX_FREELIST; 3300 } 3301 } 3302 break; 3303 case MAC_PROP_PRIVATE: 3304 err = e1000g_set_priv_prop(Adapter, pr_name, 3305 pr_valsize, pr_val); 3306 break; 3307 default: 3308 err = ENOTSUP; 3309 break; 3310 } 3311 rw_exit(&Adapter->chip_lock); 3312 return (err); 3313 } 3314 3315 static int 3316 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3317 uint_t pr_valsize, void *pr_val) 3318 { 3319 struct e1000g *Adapter = arg; 3320 struct e1000_fc_info *fc = &Adapter->shared.fc; 3321 int err = 0; 3322 link_flowctrl_t flowctrl; 3323 uint64_t tmp = 0; 3324 3325 switch (pr_num) { 3326 case MAC_PROP_DUPLEX: 3327 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 3328 bcopy(&Adapter->link_duplex, pr_val, 3329 sizeof (link_duplex_t)); 3330 break; 3331 case MAC_PROP_SPEED: 3332 ASSERT(pr_valsize >= sizeof (uint64_t)); 3333 tmp = Adapter->link_speed * 1000000ull; 3334 bcopy(&tmp, pr_val, sizeof (tmp)); 3335 break; 3336 case MAC_PROP_AUTONEG: 3337 *(uint8_t *)pr_val = Adapter->param_adv_autoneg; 3338 break; 3339 case MAC_PROP_FLOWCTRL: 3340 ASSERT(pr_valsize >= sizeof (link_flowctrl_t)); 3341 switch (fc->current_mode) { 3342 case e1000_fc_none: 3343 flowctrl = LINK_FLOWCTRL_NONE; 3344 break; 3345 case e1000_fc_rx_pause: 3346 flowctrl = LINK_FLOWCTRL_RX; 3347 break; 3348 case e1000_fc_tx_pause: 3349 flowctrl = LINK_FLOWCTRL_TX; 3350 break; 3351 case e1000_fc_full: 3352 flowctrl = LINK_FLOWCTRL_BI; 3353 break; 3354 } 3355 bcopy(&flowctrl, pr_val, sizeof (flowctrl)); 3356 break; 3357 case MAC_PROP_ADV_1000FDX_CAP: 3358 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx; 3359 break; 3360 case MAC_PROP_EN_1000FDX_CAP: 3361 *(uint8_t *)pr_val = Adapter->param_en_1000fdx; 3362 break; 3363 case MAC_PROP_ADV_1000HDX_CAP: 3364 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx; 3365 break; 3366 case MAC_PROP_EN_1000HDX_CAP: 3367 *(uint8_t *)pr_val = Adapter->param_en_1000hdx; 3368 break; 3369 case MAC_PROP_ADV_100FDX_CAP: 3370 *(uint8_t *)pr_val = Adapter->param_adv_100fdx; 3371 break; 3372 case MAC_PROP_EN_100FDX_CAP: 3373 *(uint8_t *)pr_val = Adapter->param_en_100fdx; 3374 break; 3375 case MAC_PROP_ADV_100HDX_CAP: 3376 *(uint8_t *)pr_val = Adapter->param_adv_100hdx; 3377 break; 3378 case MAC_PROP_EN_100HDX_CAP: 3379 *(uint8_t *)pr_val = Adapter->param_en_100hdx; 3380 break; 3381 case MAC_PROP_ADV_10FDX_CAP: 3382 *(uint8_t *)pr_val = Adapter->param_adv_10fdx; 3383 break; 3384 case MAC_PROP_EN_10FDX_CAP: 3385 *(uint8_t *)pr_val = Adapter->param_en_10fdx; 3386 break; 3387 case MAC_PROP_ADV_10HDX_CAP: 3388 *(uint8_t *)pr_val = Adapter->param_adv_10hdx; 3389 break; 3390 case MAC_PROP_EN_10HDX_CAP: 3391 *(uint8_t *)pr_val = Adapter->param_en_10hdx; 3392 break; 3393 case MAC_PROP_ADV_100T4_CAP: 3394 case MAC_PROP_EN_100T4_CAP: 3395 *(uint8_t *)pr_val = Adapter->param_adv_100t4; 3396 break; 3397 case MAC_PROP_PRIVATE: 3398 err = e1000g_get_priv_prop(Adapter, pr_name, 3399 pr_valsize, pr_val); 3400 break; 3401 default: 3402 err = ENOTSUP; 3403 break; 3404 } 3405 3406 return (err); 3407 } 3408 3409 static void 3410 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3411 mac_prop_info_handle_t prh) 3412 { 3413 struct e1000g *Adapter = arg; 3414 struct e1000_hw *hw = &Adapter->shared; 3415 3416 switch (pr_num) { 3417 case MAC_PROP_DUPLEX: 3418 case MAC_PROP_SPEED: 3419 case MAC_PROP_ADV_1000FDX_CAP: 3420 case MAC_PROP_ADV_1000HDX_CAP: 3421 case MAC_PROP_ADV_100FDX_CAP: 3422 case MAC_PROP_ADV_100HDX_CAP: 3423 case MAC_PROP_ADV_10FDX_CAP: 3424 case MAC_PROP_ADV_10HDX_CAP: 3425 case MAC_PROP_ADV_100T4_CAP: 3426 case MAC_PROP_EN_100T4_CAP: 3427 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3428 break; 3429 3430 case MAC_PROP_EN_1000FDX_CAP: 3431 if (hw->phy.media_type != e1000_media_type_copper) { 3432 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3433 } else { 3434 mac_prop_info_set_default_uint8(prh, 3435 ((Adapter->phy_ext_status & 3436 IEEE_ESR_1000T_FD_CAPS) || 3437 (Adapter->phy_ext_status & 3438 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0); 3439 } 3440 break; 3441 3442 case MAC_PROP_EN_100FDX_CAP: 3443 if (hw->phy.media_type != e1000_media_type_copper) { 3444 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3445 } else { 3446 mac_prop_info_set_default_uint8(prh, 3447 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 3448 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 3449 ? 1 : 0); 3450 } 3451 break; 3452 3453 case MAC_PROP_EN_100HDX_CAP: 3454 if (hw->phy.media_type != e1000_media_type_copper) { 3455 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3456 } else { 3457 mac_prop_info_set_default_uint8(prh, 3458 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 3459 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) 3460 ? 1 : 0); 3461 } 3462 break; 3463 3464 case MAC_PROP_EN_10FDX_CAP: 3465 if (hw->phy.media_type != e1000_media_type_copper) { 3466 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3467 } else { 3468 mac_prop_info_set_default_uint8(prh, 3469 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0); 3470 } 3471 break; 3472 3473 case MAC_PROP_EN_10HDX_CAP: 3474 if (hw->phy.media_type != e1000_media_type_copper) { 3475 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3476 } else { 3477 mac_prop_info_set_default_uint8(prh, 3478 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0); 3479 } 3480 break; 3481 3482 case MAC_PROP_EN_1000HDX_CAP: 3483 if (hw->phy.media_type != e1000_media_type_copper) 3484 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3485 break; 3486 3487 case MAC_PROP_AUTONEG: 3488 if (hw->phy.media_type != e1000_media_type_copper) { 3489 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3490 } else { 3491 mac_prop_info_set_default_uint8(prh, 3492 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) 3493 ? 1 : 0); 3494 } 3495 break; 3496 3497 case MAC_PROP_FLOWCTRL: 3498 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI); 3499 break; 3500 3501 case MAC_PROP_MTU: { 3502 struct e1000_mac_info *mac = &Adapter->shared.mac; 3503 struct e1000_phy_info *phy = &Adapter->shared.phy; 3504 uint32_t max; 3505 3506 /* some MAC types do not support jumbo frames */ 3507 if ((mac->type == e1000_ich8lan) || 3508 ((mac->type == e1000_ich9lan) && (phy->type == 3509 e1000_phy_ife))) { 3510 max = DEFAULT_MTU; 3511 } else { 3512 max = Adapter->max_mtu; 3513 } 3514 3515 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max); 3516 break; 3517 } 3518 case MAC_PROP_PRIVATE: { 3519 char valstr[64]; 3520 int value; 3521 3522 if (strcmp(pr_name, "_adv_pause_cap") == 0 || 3523 strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3524 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3525 return; 3526 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3527 value = DEFAULT_TX_BCOPY_THRESHOLD; 3528 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3529 value = DEFAULT_TX_INTR_ENABLE; 3530 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3531 value = DEFAULT_TX_INTR_DELAY; 3532 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3533 value = DEFAULT_TX_INTR_ABS_DELAY; 3534 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3535 value = DEFAULT_RX_BCOPY_THRESHOLD; 3536 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3537 value = DEFAULT_RX_LIMIT_ON_INTR; 3538 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3539 value = DEFAULT_RX_INTR_DELAY; 3540 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3541 value = DEFAULT_RX_INTR_ABS_DELAY; 3542 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3543 value = DEFAULT_INTR_THROTTLING; 3544 } else if (strcmp(pr_name, "_intr_adaptive") == 0) { 3545 value = 1; 3546 } else { 3547 return; 3548 } 3549 3550 (void) snprintf(valstr, sizeof (valstr), "%d", value); 3551 mac_prop_info_set_default_str(prh, valstr); 3552 break; 3553 } 3554 } 3555 } 3556 3557 /* ARGSUSED2 */ 3558 static int 3559 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name, 3560 uint_t pr_valsize, const void *pr_val) 3561 { 3562 int err = 0; 3563 long result; 3564 struct e1000_hw *hw = &Adapter->shared; 3565 3566 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3567 if (pr_val == NULL) { 3568 err = EINVAL; 3569 return (err); 3570 } 3571 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3572 if (result < MIN_TX_BCOPY_THRESHOLD || 3573 result > MAX_TX_BCOPY_THRESHOLD) 3574 err = EINVAL; 3575 else { 3576 Adapter->tx_bcopy_thresh = (uint32_t)result; 3577 } 3578 return (err); 3579 } 3580 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3581 if (pr_val == NULL) { 3582 err = EINVAL; 3583 return (err); 3584 } 3585 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3586 if (result < 0 || result > 1) 3587 err = EINVAL; 3588 else { 3589 Adapter->tx_intr_enable = (result == 1) ? 3590 B_TRUE: B_FALSE; 3591 if (Adapter->tx_intr_enable) 3592 e1000g_mask_tx_interrupt(Adapter); 3593 else 3594 e1000g_clear_tx_interrupt(Adapter); 3595 if (e1000g_check_acc_handle( 3596 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3597 ddi_fm_service_impact(Adapter->dip, 3598 DDI_SERVICE_DEGRADED); 3599 err = EIO; 3600 } 3601 } 3602 return (err); 3603 } 3604 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3605 if (pr_val == NULL) { 3606 err = EINVAL; 3607 return (err); 3608 } 3609 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3610 if (result < MIN_TX_INTR_DELAY || 3611 result > MAX_TX_INTR_DELAY) 3612 err = EINVAL; 3613 else { 3614 Adapter->tx_intr_delay = (uint32_t)result; 3615 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay); 3616 if (e1000g_check_acc_handle( 3617 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3618 ddi_fm_service_impact(Adapter->dip, 3619 DDI_SERVICE_DEGRADED); 3620 err = EIO; 3621 } 3622 } 3623 return (err); 3624 } 3625 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3626 if (pr_val == NULL) { 3627 err = EINVAL; 3628 return (err); 3629 } 3630 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3631 if (result < MIN_TX_INTR_ABS_DELAY || 3632 result > MAX_TX_INTR_ABS_DELAY) 3633 err = EINVAL; 3634 else { 3635 Adapter->tx_intr_abs_delay = (uint32_t)result; 3636 E1000_WRITE_REG(hw, E1000_TADV, 3637 Adapter->tx_intr_abs_delay); 3638 if (e1000g_check_acc_handle( 3639 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3640 ddi_fm_service_impact(Adapter->dip, 3641 DDI_SERVICE_DEGRADED); 3642 err = EIO; 3643 } 3644 } 3645 return (err); 3646 } 3647 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3648 if (pr_val == NULL) { 3649 err = EINVAL; 3650 return (err); 3651 } 3652 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3653 if (result < MIN_RX_BCOPY_THRESHOLD || 3654 result > MAX_RX_BCOPY_THRESHOLD) 3655 err = EINVAL; 3656 else 3657 Adapter->rx_bcopy_thresh = (uint32_t)result; 3658 return (err); 3659 } 3660 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3661 if (pr_val == NULL) { 3662 err = EINVAL; 3663 return (err); 3664 } 3665 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3666 if (result < MIN_RX_LIMIT_ON_INTR || 3667 result > MAX_RX_LIMIT_ON_INTR) 3668 err = EINVAL; 3669 else 3670 Adapter->rx_limit_onintr = (uint32_t)result; 3671 return (err); 3672 } 3673 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3674 if (pr_val == NULL) { 3675 err = EINVAL; 3676 return (err); 3677 } 3678 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3679 if (result < MIN_RX_INTR_DELAY || 3680 result > MAX_RX_INTR_DELAY) 3681 err = EINVAL; 3682 else { 3683 Adapter->rx_intr_delay = (uint32_t)result; 3684 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay); 3685 if (e1000g_check_acc_handle( 3686 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3687 ddi_fm_service_impact(Adapter->dip, 3688 DDI_SERVICE_DEGRADED); 3689 err = EIO; 3690 } 3691 } 3692 return (err); 3693 } 3694 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3695 if (pr_val == NULL) { 3696 err = EINVAL; 3697 return (err); 3698 } 3699 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3700 if (result < MIN_RX_INTR_ABS_DELAY || 3701 result > MAX_RX_INTR_ABS_DELAY) 3702 err = EINVAL; 3703 else { 3704 Adapter->rx_intr_abs_delay = (uint32_t)result; 3705 E1000_WRITE_REG(hw, E1000_RADV, 3706 Adapter->rx_intr_abs_delay); 3707 if (e1000g_check_acc_handle( 3708 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3709 ddi_fm_service_impact(Adapter->dip, 3710 DDI_SERVICE_DEGRADED); 3711 err = EIO; 3712 } 3713 } 3714 return (err); 3715 } 3716 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3717 if (pr_val == NULL) { 3718 err = EINVAL; 3719 return (err); 3720 } 3721 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3722 if (result < MIN_INTR_THROTTLING || 3723 result > MAX_INTR_THROTTLING) 3724 err = EINVAL; 3725 else { 3726 if (hw->mac.type >= e1000_82540) { 3727 Adapter->intr_throttling_rate = 3728 (uint32_t)result; 3729 E1000_WRITE_REG(hw, E1000_ITR, 3730 Adapter->intr_throttling_rate); 3731 if (e1000g_check_acc_handle( 3732 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3733 ddi_fm_service_impact(Adapter->dip, 3734 DDI_SERVICE_DEGRADED); 3735 err = EIO; 3736 } 3737 } else 3738 err = EINVAL; 3739 } 3740 return (err); 3741 } 3742 if (strcmp(pr_name, "_intr_adaptive") == 0) { 3743 if (pr_val == NULL) { 3744 err = EINVAL; 3745 return (err); 3746 } 3747 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3748 if (result < 0 || result > 1) 3749 err = EINVAL; 3750 else { 3751 if (hw->mac.type >= e1000_82540) { 3752 Adapter->intr_adaptive = (result == 1) ? 3753 B_TRUE : B_FALSE; 3754 } else { 3755 err = EINVAL; 3756 } 3757 } 3758 return (err); 3759 } 3760 return (ENOTSUP); 3761 } 3762 3763 static int 3764 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name, 3765 uint_t pr_valsize, void *pr_val) 3766 { 3767 int err = ENOTSUP; 3768 int value; 3769 3770 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 3771 value = Adapter->param_adv_pause; 3772 err = 0; 3773 goto done; 3774 } 3775 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3776 value = Adapter->param_adv_asym_pause; 3777 err = 0; 3778 goto done; 3779 } 3780 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3781 value = Adapter->tx_bcopy_thresh; 3782 err = 0; 3783 goto done; 3784 } 3785 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3786 value = Adapter->tx_intr_enable; 3787 err = 0; 3788 goto done; 3789 } 3790 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3791 value = Adapter->tx_intr_delay; 3792 err = 0; 3793 goto done; 3794 } 3795 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3796 value = Adapter->tx_intr_abs_delay; 3797 err = 0; 3798 goto done; 3799 } 3800 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3801 value = Adapter->rx_bcopy_thresh; 3802 err = 0; 3803 goto done; 3804 } 3805 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3806 value = Adapter->rx_limit_onintr; 3807 err = 0; 3808 goto done; 3809 } 3810 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3811 value = Adapter->rx_intr_delay; 3812 err = 0; 3813 goto done; 3814 } 3815 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3816 value = Adapter->rx_intr_abs_delay; 3817 err = 0; 3818 goto done; 3819 } 3820 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3821 value = Adapter->intr_throttling_rate; 3822 err = 0; 3823 goto done; 3824 } 3825 if (strcmp(pr_name, "_intr_adaptive") == 0) { 3826 value = Adapter->intr_adaptive; 3827 err = 0; 3828 goto done; 3829 } 3830 done: 3831 if (err == 0) { 3832 (void) snprintf(pr_val, pr_valsize, "%d", value); 3833 } 3834 return (err); 3835 } 3836 3837 /* 3838 * e1000g_get_conf - get configurations set in e1000g.conf 3839 * This routine gets user-configured values out of the configuration 3840 * file e1000g.conf. 3841 * 3842 * For each configurable value, there is a minimum, a maximum, and a 3843 * default. 3844 * If user does not configure a value, use the default. 3845 * If user configures below the minimum, use the minumum. 3846 * If user configures above the maximum, use the maxumum. 3847 */ 3848 static void 3849 e1000g_get_conf(struct e1000g *Adapter) 3850 { 3851 struct e1000_hw *hw = &Adapter->shared; 3852 boolean_t tbi_compatibility = B_FALSE; 3853 boolean_t is_jumbo = B_FALSE; 3854 int propval; 3855 /* 3856 * decrease the number of descriptors and free packets 3857 * for jumbo frames to reduce tx/rx resource consumption 3858 */ 3859 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) { 3860 is_jumbo = B_TRUE; 3861 } 3862 3863 /* 3864 * get each configurable property from e1000g.conf 3865 */ 3866 3867 /* 3868 * NumTxDescriptors 3869 */ 3870 Adapter->tx_desc_num_flag = 3871 e1000g_get_prop(Adapter, "NumTxDescriptors", 3872 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR, 3873 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC 3874 : DEFAULT_NUM_TX_DESCRIPTOR, &propval); 3875 Adapter->tx_desc_num = propval; 3876 3877 /* 3878 * NumRxDescriptors 3879 */ 3880 Adapter->rx_desc_num_flag = 3881 e1000g_get_prop(Adapter, "NumRxDescriptors", 3882 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR, 3883 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC 3884 : DEFAULT_NUM_RX_DESCRIPTOR, &propval); 3885 Adapter->rx_desc_num = propval; 3886 3887 /* 3888 * NumRxFreeList 3889 */ 3890 Adapter->rx_buf_num_flag = 3891 e1000g_get_prop(Adapter, "NumRxFreeList", 3892 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST, 3893 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF 3894 : DEFAULT_NUM_RX_FREELIST, &propval); 3895 Adapter->rx_freelist_limit = propval; 3896 3897 /* 3898 * NumTxPacketList 3899 */ 3900 Adapter->tx_buf_num_flag = 3901 e1000g_get_prop(Adapter, "NumTxPacketList", 3902 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST, 3903 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF 3904 : DEFAULT_NUM_TX_FREELIST, &propval); 3905 Adapter->tx_freelist_num = propval; 3906 3907 /* 3908 * FlowControl 3909 */ 3910 hw->fc.send_xon = B_TRUE; 3911 (void) e1000g_get_prop(Adapter, "FlowControl", 3912 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval); 3913 hw->fc.requested_mode = propval; 3914 /* 4 is the setting that says "let the eeprom decide" */ 3915 if (hw->fc.requested_mode == 4) 3916 hw->fc.requested_mode = e1000_fc_default; 3917 3918 /* 3919 * Max Num Receive Packets on Interrupt 3920 */ 3921 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets", 3922 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR, 3923 DEFAULT_RX_LIMIT_ON_INTR, &propval); 3924 Adapter->rx_limit_onintr = propval; 3925 3926 /* 3927 * PHY master slave setting 3928 */ 3929 (void) e1000g_get_prop(Adapter, "SetMasterSlave", 3930 e1000_ms_hw_default, e1000_ms_auto, 3931 e1000_ms_hw_default, &propval); 3932 hw->phy.ms_type = propval; 3933 3934 /* 3935 * Parameter which controls TBI mode workaround, which is only 3936 * needed on certain switches such as Cisco 6500/Foundry 3937 */ 3938 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable", 3939 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval); 3940 tbi_compatibility = (propval == 1); 3941 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility); 3942 3943 /* 3944 * MSI Enable 3945 */ 3946 (void) e1000g_get_prop(Adapter, "MSIEnable", 3947 0, 1, DEFAULT_MSI_ENABLE, &propval); 3948 Adapter->msi_enable = (propval == 1); 3949 3950 /* 3951 * Interrupt Throttling Rate 3952 */ 3953 (void) e1000g_get_prop(Adapter, "intr_throttling_rate", 3954 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 3955 DEFAULT_INTR_THROTTLING, &propval); 3956 Adapter->intr_throttling_rate = propval; 3957 3958 /* 3959 * Adaptive Interrupt Blanking Enable/Disable 3960 * It is enabled by default 3961 */ 3962 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1, 3963 &propval); 3964 Adapter->intr_adaptive = (propval == 1); 3965 3966 /* 3967 * Hardware checksum enable/disable parameter 3968 */ 3969 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable", 3970 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval); 3971 Adapter->tx_hcksum_enable = (propval == 1); 3972 /* 3973 * Checksum on/off selection via global parameters. 3974 * 3975 * If the chip is flagged as not capable of (correctly) 3976 * handling checksumming, we don't enable it on either 3977 * Rx or Tx side. Otherwise, we take this chip's settings 3978 * from the patchable global defaults. 3979 * 3980 * We advertise our capabilities only if TX offload is 3981 * enabled. On receive, the stack will accept checksummed 3982 * packets anyway, even if we haven't said we can deliver 3983 * them. 3984 */ 3985 switch (hw->mac.type) { 3986 case e1000_82540: 3987 case e1000_82544: 3988 case e1000_82545: 3989 case e1000_82545_rev_3: 3990 case e1000_82546: 3991 case e1000_82546_rev_3: 3992 case e1000_82571: 3993 case e1000_82572: 3994 case e1000_82573: 3995 case e1000_80003es2lan: 3996 break; 3997 /* 3998 * For the following Intel PRO/1000 chipsets, we have not 3999 * tested the hardware checksum offload capability, so we 4000 * disable the capability for them. 4001 * e1000_82542, 4002 * e1000_82543, 4003 * e1000_82541, 4004 * e1000_82541_rev_2, 4005 * e1000_82547, 4006 * e1000_82547_rev_2, 4007 */ 4008 default: 4009 Adapter->tx_hcksum_enable = B_FALSE; 4010 } 4011 4012 /* 4013 * Large Send Offloading(LSO) Enable/Disable 4014 * If the tx hardware checksum is not enabled, LSO should be 4015 * disabled. 4016 */ 4017 (void) e1000g_get_prop(Adapter, "lso_enable", 4018 0, 1, DEFAULT_LSO_ENABLE, &propval); 4019 Adapter->lso_enable = (propval == 1); 4020 4021 switch (hw->mac.type) { 4022 case e1000_82546: 4023 case e1000_82546_rev_3: 4024 if (Adapter->lso_enable) 4025 Adapter->lso_premature_issue = B_TRUE; 4026 /* FALLTHRU */ 4027 case e1000_82571: 4028 case e1000_82572: 4029 case e1000_82573: 4030 case e1000_80003es2lan: 4031 break; 4032 default: 4033 Adapter->lso_enable = B_FALSE; 4034 } 4035 4036 if (!Adapter->tx_hcksum_enable) { 4037 Adapter->lso_premature_issue = B_FALSE; 4038 Adapter->lso_enable = B_FALSE; 4039 } 4040 4041 /* 4042 * If mem_workaround_82546 is enabled, the rx buffer allocated by 4043 * e1000_82545, e1000_82546 and e1000_82546_rev_3 4044 * will not cross 64k boundary. 4045 */ 4046 (void) e1000g_get_prop(Adapter, "mem_workaround_82546", 4047 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval); 4048 Adapter->mem_workaround_82546 = (propval == 1); 4049 4050 /* 4051 * Max number of multicast addresses 4052 */ 4053 (void) e1000g_get_prop(Adapter, "mcast_max_num", 4054 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32, 4055 &propval); 4056 Adapter->mcast_max_num = propval; 4057 } 4058 4059 /* 4060 * e1000g_get_prop - routine to read properties 4061 * 4062 * Get a user-configure property value out of the configuration 4063 * file e1000g.conf. 4064 * 4065 * Caller provides name of the property, a default value, a minimum 4066 * value, a maximum value and a pointer to the returned property 4067 * value. 4068 * 4069 * Return B_TRUE if the configured value of the property is not a default 4070 * value, otherwise return B_FALSE. 4071 */ 4072 static boolean_t 4073 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */ 4074 char *propname, /* name of the property */ 4075 int minval, /* minimum acceptable value */ 4076 int maxval, /* maximim acceptable value */ 4077 int defval, /* default value */ 4078 int *propvalue) /* property value return to caller */ 4079 { 4080 int propval; /* value returned for requested property */ 4081 int *props; /* point to array of properties returned */ 4082 uint_t nprops; /* number of property value returned */ 4083 boolean_t ret = B_TRUE; 4084 4085 /* 4086 * get the array of properties from the config file 4087 */ 4088 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip, 4089 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) { 4090 /* got some properties, test if we got enough */ 4091 if (Adapter->instance < nprops) { 4092 propval = props[Adapter->instance]; 4093 } else { 4094 /* not enough properties configured */ 4095 propval = defval; 4096 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4097 "Not Enough %s values found in e1000g.conf" 4098 " - set to %d\n", 4099 propname, propval); 4100 ret = B_FALSE; 4101 } 4102 4103 /* free memory allocated for properties */ 4104 ddi_prop_free(props); 4105 4106 } else { 4107 propval = defval; 4108 ret = B_FALSE; 4109 } 4110 4111 /* 4112 * enforce limits 4113 */ 4114 if (propval > maxval) { 4115 propval = maxval; 4116 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4117 "Too High %s value in e1000g.conf - set to %d\n", 4118 propname, propval); 4119 } 4120 4121 if (propval < minval) { 4122 propval = minval; 4123 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4124 "Too Low %s value in e1000g.conf - set to %d\n", 4125 propname, propval); 4126 } 4127 4128 *propvalue = propval; 4129 return (ret); 4130 } 4131 4132 static boolean_t 4133 e1000g_link_check(struct e1000g *Adapter) 4134 { 4135 uint16_t speed, duplex, phydata; 4136 boolean_t link_changed = B_FALSE; 4137 struct e1000_hw *hw; 4138 uint32_t reg_tarc; 4139 4140 hw = &Adapter->shared; 4141 4142 if (e1000g_link_up(Adapter)) { 4143 /* 4144 * The Link is up, check whether it was marked as down earlier 4145 */ 4146 if (Adapter->link_state != LINK_STATE_UP) { 4147 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex); 4148 Adapter->link_speed = speed; 4149 Adapter->link_duplex = duplex; 4150 Adapter->link_state = LINK_STATE_UP; 4151 link_changed = B_TRUE; 4152 4153 if (Adapter->link_speed == SPEED_1000) 4154 Adapter->stall_threshold = TX_STALL_TIME_2S; 4155 else 4156 Adapter->stall_threshold = TX_STALL_TIME_8S; 4157 4158 Adapter->tx_link_down_timeout = 0; 4159 4160 if ((hw->mac.type == e1000_82571) || 4161 (hw->mac.type == e1000_82572)) { 4162 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0)); 4163 if (speed == SPEED_1000) 4164 reg_tarc |= (1 << 21); 4165 else 4166 reg_tarc &= ~(1 << 21); 4167 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc); 4168 } 4169 } 4170 Adapter->smartspeed = 0; 4171 } else { 4172 if (Adapter->link_state != LINK_STATE_DOWN) { 4173 Adapter->link_speed = 0; 4174 Adapter->link_duplex = 0; 4175 Adapter->link_state = LINK_STATE_DOWN; 4176 link_changed = B_TRUE; 4177 4178 /* 4179 * SmartSpeed workaround for Tabor/TanaX, When the 4180 * driver loses link disable auto master/slave 4181 * resolution. 4182 */ 4183 if (hw->phy.type == e1000_phy_igp) { 4184 (void) e1000_read_phy_reg(hw, 4185 PHY_1000T_CTRL, &phydata); 4186 phydata |= CR_1000T_MS_ENABLE; 4187 (void) e1000_write_phy_reg(hw, 4188 PHY_1000T_CTRL, phydata); 4189 } 4190 } else { 4191 e1000g_smartspeed(Adapter); 4192 } 4193 4194 if (Adapter->e1000g_state & E1000G_STARTED) { 4195 if (Adapter->tx_link_down_timeout < 4196 MAX_TX_LINK_DOWN_TIMEOUT) { 4197 Adapter->tx_link_down_timeout++; 4198 } else if (Adapter->tx_link_down_timeout == 4199 MAX_TX_LINK_DOWN_TIMEOUT) { 4200 e1000g_tx_clean(Adapter); 4201 Adapter->tx_link_down_timeout++; 4202 } 4203 } 4204 } 4205 4206 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4207 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4208 4209 return (link_changed); 4210 } 4211 4212 /* 4213 * e1000g_reset_link - Using the link properties to setup the link 4214 */ 4215 int 4216 e1000g_reset_link(struct e1000g *Adapter) 4217 { 4218 struct e1000_mac_info *mac; 4219 struct e1000_phy_info *phy; 4220 struct e1000_hw *hw; 4221 boolean_t invalid; 4222 4223 mac = &Adapter->shared.mac; 4224 phy = &Adapter->shared.phy; 4225 hw = &Adapter->shared; 4226 invalid = B_FALSE; 4227 4228 if (hw->phy.media_type != e1000_media_type_copper) 4229 goto out; 4230 4231 if (Adapter->param_adv_autoneg == 1) { 4232 mac->autoneg = B_TRUE; 4233 phy->autoneg_advertised = 0; 4234 4235 /* 4236 * 1000hdx is not supported for autonegotiation 4237 */ 4238 if (Adapter->param_adv_1000fdx == 1) 4239 phy->autoneg_advertised |= ADVERTISE_1000_FULL; 4240 4241 if (Adapter->param_adv_100fdx == 1) 4242 phy->autoneg_advertised |= ADVERTISE_100_FULL; 4243 4244 if (Adapter->param_adv_100hdx == 1) 4245 phy->autoneg_advertised |= ADVERTISE_100_HALF; 4246 4247 if (Adapter->param_adv_10fdx == 1) 4248 phy->autoneg_advertised |= ADVERTISE_10_FULL; 4249 4250 if (Adapter->param_adv_10hdx == 1) 4251 phy->autoneg_advertised |= ADVERTISE_10_HALF; 4252 4253 if (phy->autoneg_advertised == 0) 4254 invalid = B_TRUE; 4255 } else { 4256 mac->autoneg = B_FALSE; 4257 4258 /* 4259 * For Intel copper cards, 1000fdx and 1000hdx are not 4260 * supported for forced link 4261 */ 4262 if (Adapter->param_adv_100fdx == 1) 4263 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4264 else if (Adapter->param_adv_100hdx == 1) 4265 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4266 else if (Adapter->param_adv_10fdx == 1) 4267 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4268 else if (Adapter->param_adv_10hdx == 1) 4269 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4270 else 4271 invalid = B_TRUE; 4272 4273 } 4274 4275 if (invalid) { 4276 e1000g_log(Adapter, CE_WARN, 4277 "Invalid link settings. Setup link to " 4278 "support autonegotiation with all link capabilities."); 4279 mac->autoneg = B_TRUE; 4280 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 4281 } 4282 4283 out: 4284 return (e1000_setup_link(&Adapter->shared)); 4285 } 4286 4287 static void 4288 e1000g_timer_tx_resched(struct e1000g *Adapter) 4289 { 4290 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 4291 4292 rw_enter(&Adapter->chip_lock, RW_READER); 4293 4294 if (tx_ring->resched_needed && 4295 ((ddi_get_lbolt() - tx_ring->resched_timestamp) > 4296 drv_usectohz(1000000)) && 4297 (Adapter->e1000g_state & E1000G_STARTED) && 4298 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) { 4299 tx_ring->resched_needed = B_FALSE; 4300 mac_tx_update(Adapter->mh); 4301 E1000G_STAT(tx_ring->stat_reschedule); 4302 E1000G_STAT(tx_ring->stat_timer_reschedule); 4303 } 4304 4305 rw_exit(&Adapter->chip_lock); 4306 } 4307 4308 static void 4309 e1000g_local_timer(void *ws) 4310 { 4311 struct e1000g *Adapter = (struct e1000g *)ws; 4312 struct e1000_hw *hw; 4313 e1000g_ether_addr_t ether_addr; 4314 boolean_t link_changed; 4315 4316 hw = &Adapter->shared; 4317 4318 if (Adapter->e1000g_state & E1000G_ERROR) { 4319 rw_enter(&Adapter->chip_lock, RW_WRITER); 4320 Adapter->e1000g_state &= ~E1000G_ERROR; 4321 rw_exit(&Adapter->chip_lock); 4322 4323 Adapter->reset_count++; 4324 if (e1000g_global_reset(Adapter)) { 4325 ddi_fm_service_impact(Adapter->dip, 4326 DDI_SERVICE_RESTORED); 4327 e1000g_timer_tx_resched(Adapter); 4328 } else 4329 ddi_fm_service_impact(Adapter->dip, 4330 DDI_SERVICE_LOST); 4331 return; 4332 } 4333 4334 if (e1000g_stall_check(Adapter)) { 4335 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 4336 "Tx stall detected. Activate automatic recovery.\n"); 4337 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL); 4338 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 4339 Adapter->reset_count++; 4340 if (e1000g_reset_adapter(Adapter)) { 4341 ddi_fm_service_impact(Adapter->dip, 4342 DDI_SERVICE_RESTORED); 4343 e1000g_timer_tx_resched(Adapter); 4344 } 4345 return; 4346 } 4347 4348 link_changed = B_FALSE; 4349 rw_enter(&Adapter->chip_lock, RW_READER); 4350 if (Adapter->link_complete) 4351 link_changed = e1000g_link_check(Adapter); 4352 rw_exit(&Adapter->chip_lock); 4353 4354 if (link_changed) { 4355 if (!Adapter->reset_flag && 4356 (Adapter->e1000g_state & E1000G_STARTED) && 4357 !(Adapter->e1000g_state & E1000G_SUSPENDED)) 4358 mac_link_update(Adapter->mh, Adapter->link_state); 4359 if (Adapter->link_state == LINK_STATE_UP) 4360 Adapter->reset_flag = B_FALSE; 4361 } 4362 /* 4363 * Workaround for esb2. Data stuck in fifo on a link 4364 * down event. Reset the adapter to recover it. 4365 */ 4366 if (Adapter->esb2_workaround) { 4367 Adapter->esb2_workaround = B_FALSE; 4368 (void) e1000g_reset_adapter(Adapter); 4369 return; 4370 } 4371 4372 /* 4373 * With 82571 controllers, any locally administered address will 4374 * be overwritten when there is a reset on the other port. 4375 * Detect this circumstance and correct it. 4376 */ 4377 if ((hw->mac.type == e1000_82571) && 4378 (e1000_get_laa_state_82571(hw) == B_TRUE)) { 4379 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0); 4380 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1); 4381 4382 ether_addr.reg.low = ntohl(ether_addr.reg.low); 4383 ether_addr.reg.high = ntohl(ether_addr.reg.high); 4384 4385 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) || 4386 (ether_addr.mac.addr[4] != hw->mac.addr[1]) || 4387 (ether_addr.mac.addr[3] != hw->mac.addr[2]) || 4388 (ether_addr.mac.addr[2] != hw->mac.addr[3]) || 4389 (ether_addr.mac.addr[1] != hw->mac.addr[4]) || 4390 (ether_addr.mac.addr[0] != hw->mac.addr[5])) { 4391 e1000_rar_set(hw, hw->mac.addr, 0); 4392 } 4393 } 4394 4395 /* 4396 * Long TTL workaround for 82541/82547 4397 */ 4398 (void) e1000_igp_ttl_workaround_82547(hw); 4399 4400 /* 4401 * Check for Adaptive IFS settings If there are lots of collisions 4402 * change the value in steps... 4403 * These properties should only be set for 10/100 4404 */ 4405 if ((hw->phy.media_type == e1000_media_type_copper) && 4406 ((Adapter->link_speed == SPEED_100) || 4407 (Adapter->link_speed == SPEED_10))) { 4408 e1000_update_adaptive(hw); 4409 } 4410 /* 4411 * Set Timer Interrupts 4412 */ 4413 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 4414 4415 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4416 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4417 else 4418 e1000g_timer_tx_resched(Adapter); 4419 4420 restart_watchdog_timer(Adapter); 4421 } 4422 4423 /* 4424 * The function e1000g_link_timer() is called when the timer for link setup 4425 * is expired, which indicates the completion of the link setup. The link 4426 * state will not be updated until the link setup is completed. And the 4427 * link state will not be sent to the upper layer through mac_link_update() 4428 * in this function. It will be updated in the local timer routine or the 4429 * interrupt service routine after the interface is started (plumbed). 4430 */ 4431 static void 4432 e1000g_link_timer(void *arg) 4433 { 4434 struct e1000g *Adapter = (struct e1000g *)arg; 4435 4436 mutex_enter(&Adapter->link_lock); 4437 Adapter->link_complete = B_TRUE; 4438 Adapter->link_tid = 0; 4439 mutex_exit(&Adapter->link_lock); 4440 } 4441 4442 /* 4443 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf 4444 * 4445 * This function read the forced speed and duplex for 10/100 Mbps speeds 4446 * and also for 1000 Mbps speeds from the e1000g.conf file 4447 */ 4448 static void 4449 e1000g_force_speed_duplex(struct e1000g *Adapter) 4450 { 4451 int forced; 4452 int propval; 4453 struct e1000_mac_info *mac = &Adapter->shared.mac; 4454 struct e1000_phy_info *phy = &Adapter->shared.phy; 4455 4456 /* 4457 * get value out of config file 4458 */ 4459 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex", 4460 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced); 4461 4462 switch (forced) { 4463 case GDIAG_10_HALF: 4464 /* 4465 * Disable Auto Negotiation 4466 */ 4467 mac->autoneg = B_FALSE; 4468 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4469 break; 4470 case GDIAG_10_FULL: 4471 /* 4472 * Disable Auto Negotiation 4473 */ 4474 mac->autoneg = B_FALSE; 4475 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4476 break; 4477 case GDIAG_100_HALF: 4478 /* 4479 * Disable Auto Negotiation 4480 */ 4481 mac->autoneg = B_FALSE; 4482 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4483 break; 4484 case GDIAG_100_FULL: 4485 /* 4486 * Disable Auto Negotiation 4487 */ 4488 mac->autoneg = B_FALSE; 4489 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4490 break; 4491 case GDIAG_1000_FULL: 4492 /* 4493 * The gigabit spec requires autonegotiation. Therefore, 4494 * when the user wants to force the speed to 1000Mbps, we 4495 * enable AutoNeg, but only allow the harware to advertise 4496 * 1000Mbps. This is different from 10/100 operation, where 4497 * we are allowed to link without any negotiation. 4498 */ 4499 mac->autoneg = B_TRUE; 4500 phy->autoneg_advertised = ADVERTISE_1000_FULL; 4501 break; 4502 default: /* obey the setting of AutoNegAdvertised */ 4503 mac->autoneg = B_TRUE; 4504 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised", 4505 0, AUTONEG_ADVERTISE_SPEED_DEFAULT, 4506 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval); 4507 phy->autoneg_advertised = (uint16_t)propval; 4508 break; 4509 } /* switch */ 4510 } 4511 4512 /* 4513 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf 4514 * 4515 * This function reads MaxFrameSize from e1000g.conf 4516 */ 4517 static void 4518 e1000g_get_max_frame_size(struct e1000g *Adapter) 4519 { 4520 int max_frame; 4521 4522 /* 4523 * get value out of config file 4524 */ 4525 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0, 4526 &max_frame); 4527 4528 switch (max_frame) { 4529 case 0: 4530 Adapter->default_mtu = ETHERMTU; 4531 break; 4532 case 1: 4533 Adapter->default_mtu = FRAME_SIZE_UPTO_4K - 4534 sizeof (struct ether_vlan_header) - ETHERFCSL; 4535 break; 4536 case 2: 4537 Adapter->default_mtu = FRAME_SIZE_UPTO_8K - 4538 sizeof (struct ether_vlan_header) - ETHERFCSL; 4539 break; 4540 case 3: 4541 Adapter->default_mtu = FRAME_SIZE_UPTO_16K - 4542 sizeof (struct ether_vlan_header) - ETHERFCSL; 4543 break; 4544 default: 4545 Adapter->default_mtu = ETHERMTU; 4546 break; 4547 } /* switch */ 4548 4549 /* 4550 * If the user configed MTU is larger than the deivce's maximum MTU, 4551 * the MTU is set to the deivce's maximum value. 4552 */ 4553 if (Adapter->default_mtu > Adapter->max_mtu) 4554 Adapter->default_mtu = Adapter->max_mtu; 4555 4556 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu); 4557 } 4558 4559 /* 4560 * e1000g_pch_limits - Apply limits of the PCH silicon type 4561 * 4562 * At any frame size larger than the ethernet default, 4563 * prevent linking at 10/100 speeds. 4564 */ 4565 static void 4566 e1000g_pch_limits(struct e1000g *Adapter) 4567 { 4568 struct e1000_hw *hw = &Adapter->shared; 4569 4570 /* only applies to PCH silicon type */ 4571 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan) 4572 return; 4573 4574 /* only applies to frames larger than ethernet default */ 4575 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) { 4576 hw->mac.autoneg = B_TRUE; 4577 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; 4578 4579 Adapter->param_adv_autoneg = 1; 4580 Adapter->param_adv_1000fdx = 1; 4581 4582 Adapter->param_adv_100fdx = 0; 4583 Adapter->param_adv_100hdx = 0; 4584 Adapter->param_adv_10fdx = 0; 4585 Adapter->param_adv_10hdx = 0; 4586 4587 e1000g_param_sync(Adapter); 4588 } 4589 } 4590 4591 /* 4592 * e1000g_mtu2maxframe - convert given MTU to maximum frame size 4593 */ 4594 static uint32_t 4595 e1000g_mtu2maxframe(uint32_t mtu) 4596 { 4597 uint32_t maxframe; 4598 4599 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL; 4600 4601 return (maxframe); 4602 } 4603 4604 static void 4605 arm_watchdog_timer(struct e1000g *Adapter) 4606 { 4607 Adapter->watchdog_tid = 4608 timeout(e1000g_local_timer, 4609 (void *)Adapter, 1 * drv_usectohz(1000000)); 4610 } 4611 #pragma inline(arm_watchdog_timer) 4612 4613 static void 4614 enable_watchdog_timer(struct e1000g *Adapter) 4615 { 4616 mutex_enter(&Adapter->watchdog_lock); 4617 4618 if (!Adapter->watchdog_timer_enabled) { 4619 Adapter->watchdog_timer_enabled = B_TRUE; 4620 Adapter->watchdog_timer_started = B_TRUE; 4621 arm_watchdog_timer(Adapter); 4622 } 4623 4624 mutex_exit(&Adapter->watchdog_lock); 4625 } 4626 4627 static void 4628 disable_watchdog_timer(struct e1000g *Adapter) 4629 { 4630 timeout_id_t tid; 4631 4632 mutex_enter(&Adapter->watchdog_lock); 4633 4634 Adapter->watchdog_timer_enabled = B_FALSE; 4635 Adapter->watchdog_timer_started = B_FALSE; 4636 tid = Adapter->watchdog_tid; 4637 Adapter->watchdog_tid = 0; 4638 4639 mutex_exit(&Adapter->watchdog_lock); 4640 4641 if (tid != 0) 4642 (void) untimeout(tid); 4643 } 4644 4645 static void 4646 start_watchdog_timer(struct e1000g *Adapter) 4647 { 4648 mutex_enter(&Adapter->watchdog_lock); 4649 4650 if (Adapter->watchdog_timer_enabled) { 4651 if (!Adapter->watchdog_timer_started) { 4652 Adapter->watchdog_timer_started = B_TRUE; 4653 arm_watchdog_timer(Adapter); 4654 } 4655 } 4656 4657 mutex_exit(&Adapter->watchdog_lock); 4658 } 4659 4660 static void 4661 restart_watchdog_timer(struct e1000g *Adapter) 4662 { 4663 mutex_enter(&Adapter->watchdog_lock); 4664 4665 if (Adapter->watchdog_timer_started) 4666 arm_watchdog_timer(Adapter); 4667 4668 mutex_exit(&Adapter->watchdog_lock); 4669 } 4670 4671 static void 4672 stop_watchdog_timer(struct e1000g *Adapter) 4673 { 4674 timeout_id_t tid; 4675 4676 mutex_enter(&Adapter->watchdog_lock); 4677 4678 Adapter->watchdog_timer_started = B_FALSE; 4679 tid = Adapter->watchdog_tid; 4680 Adapter->watchdog_tid = 0; 4681 4682 mutex_exit(&Adapter->watchdog_lock); 4683 4684 if (tid != 0) 4685 (void) untimeout(tid); 4686 } 4687 4688 static void 4689 stop_link_timer(struct e1000g *Adapter) 4690 { 4691 timeout_id_t tid; 4692 4693 /* Disable the link timer */ 4694 mutex_enter(&Adapter->link_lock); 4695 4696 tid = Adapter->link_tid; 4697 Adapter->link_tid = 0; 4698 4699 mutex_exit(&Adapter->link_lock); 4700 4701 if (tid != 0) 4702 (void) untimeout(tid); 4703 } 4704 4705 static void 4706 stop_82547_timer(e1000g_tx_ring_t *tx_ring) 4707 { 4708 timeout_id_t tid; 4709 4710 /* Disable the tx timer for 82547 chipset */ 4711 mutex_enter(&tx_ring->tx_lock); 4712 4713 tx_ring->timer_enable_82547 = B_FALSE; 4714 tid = tx_ring->timer_id_82547; 4715 tx_ring->timer_id_82547 = 0; 4716 4717 mutex_exit(&tx_ring->tx_lock); 4718 4719 if (tid != 0) 4720 (void) untimeout(tid); 4721 } 4722 4723 void 4724 e1000g_clear_interrupt(struct e1000g *Adapter) 4725 { 4726 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 4727 0xffffffff & ~E1000_IMS_RXSEQ); 4728 } 4729 4730 void 4731 e1000g_mask_interrupt(struct e1000g *Adapter) 4732 { 4733 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, 4734 IMS_ENABLE_MASK & ~E1000_IMS_TXDW); 4735 4736 if (Adapter->tx_intr_enable) 4737 e1000g_mask_tx_interrupt(Adapter); 4738 } 4739 4740 /* 4741 * This routine is called by e1000g_quiesce(), therefore must not block. 4742 */ 4743 void 4744 e1000g_clear_all_interrupts(struct e1000g *Adapter) 4745 { 4746 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff); 4747 } 4748 4749 void 4750 e1000g_mask_tx_interrupt(struct e1000g *Adapter) 4751 { 4752 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW); 4753 } 4754 4755 void 4756 e1000g_clear_tx_interrupt(struct e1000g *Adapter) 4757 { 4758 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW); 4759 } 4760 4761 static void 4762 e1000g_smartspeed(struct e1000g *Adapter) 4763 { 4764 struct e1000_hw *hw = &Adapter->shared; 4765 uint16_t phy_status; 4766 uint16_t phy_ctrl; 4767 4768 /* 4769 * If we're not T-or-T, or we're not autoneg'ing, or we're not 4770 * advertising 1000Full, we don't even use the workaround 4771 */ 4772 if ((hw->phy.type != e1000_phy_igp) || 4773 !hw->mac.autoneg || 4774 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)) 4775 return; 4776 4777 /* 4778 * True if this is the first call of this function or after every 4779 * 30 seconds of not having link 4780 */ 4781 if (Adapter->smartspeed == 0) { 4782 /* 4783 * If Master/Slave config fault is asserted twice, we 4784 * assume back-to-back 4785 */ 4786 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4787 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4788 return; 4789 4790 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4791 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4792 return; 4793 /* 4794 * We're assuming back-2-back because our status register 4795 * insists! there's a fault in the master/slave 4796 * relationship that was "negotiated" 4797 */ 4798 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4799 /* 4800 * Is the phy configured for manual configuration of 4801 * master/slave? 4802 */ 4803 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4804 /* 4805 * Yes. Then disable manual configuration (enable 4806 * auto configuration) of master/slave 4807 */ 4808 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4809 (void) e1000_write_phy_reg(hw, 4810 PHY_1000T_CTRL, phy_ctrl); 4811 /* 4812 * Effectively starting the clock 4813 */ 4814 Adapter->smartspeed++; 4815 /* 4816 * Restart autonegotiation 4817 */ 4818 if (!e1000_phy_setup_autoneg(hw) && 4819 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 4820 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4821 MII_CR_RESTART_AUTO_NEG); 4822 (void) e1000_write_phy_reg(hw, 4823 PHY_CONTROL, phy_ctrl); 4824 } 4825 } 4826 return; 4827 /* 4828 * Has 6 seconds transpired still without link? Remember, 4829 * you should reset the smartspeed counter once you obtain 4830 * link 4831 */ 4832 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4833 /* 4834 * Yes. Remember, we did at the start determine that 4835 * there's a master/slave configuration fault, so we're 4836 * still assuming there's someone on the other end, but we 4837 * just haven't yet been able to talk to it. We then 4838 * re-enable auto configuration of master/slave to see if 4839 * we're running 2/3 pair cables. 4840 */ 4841 /* 4842 * If still no link, perhaps using 2/3 pair cable 4843 */ 4844 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4845 phy_ctrl |= CR_1000T_MS_ENABLE; 4846 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4847 /* 4848 * Restart autoneg with phy enabled for manual 4849 * configuration of master/slave 4850 */ 4851 if (!e1000_phy_setup_autoneg(hw) && 4852 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 4853 phy_ctrl |= 4854 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 4855 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 4856 } 4857 /* 4858 * Hopefully, there are no more faults and we've obtained 4859 * link as a result. 4860 */ 4861 } 4862 /* 4863 * Restart process after E1000_SMARTSPEED_MAX iterations (30 4864 * seconds) 4865 */ 4866 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4867 Adapter->smartspeed = 0; 4868 } 4869 4870 static boolean_t 4871 is_valid_mac_addr(uint8_t *mac_addr) 4872 { 4873 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 4874 const uint8_t addr_test2[6] = 4875 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4876 4877 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4878 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4879 return (B_FALSE); 4880 4881 return (B_TRUE); 4882 } 4883 4884 /* 4885 * e1000g_stall_check - check for tx stall 4886 * 4887 * This function checks if the adapter is stalled (in transmit). 4888 * 4889 * It is called each time the watchdog timeout is invoked. 4890 * If the transmit descriptor reclaim continuously fails, 4891 * the watchdog value will increment by 1. If the watchdog 4892 * value exceeds the threshold, the adapter is assumed to 4893 * have stalled and need to be reset. 4894 */ 4895 static boolean_t 4896 e1000g_stall_check(struct e1000g *Adapter) 4897 { 4898 e1000g_tx_ring_t *tx_ring; 4899 4900 tx_ring = Adapter->tx_ring; 4901 4902 if (Adapter->link_state != LINK_STATE_UP) 4903 return (B_FALSE); 4904 4905 (void) e1000g_recycle(tx_ring); 4906 4907 if (Adapter->stall_flag) 4908 return (B_TRUE); 4909 4910 return (B_FALSE); 4911 } 4912 4913 #ifdef E1000G_DEBUG 4914 static enum ioc_reply 4915 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp) 4916 { 4917 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd); 4918 e1000g_peekpoke_t *ppd; 4919 uint64_t mem_va; 4920 uint64_t maxoff; 4921 boolean_t peek; 4922 4923 switch (iocp->ioc_cmd) { 4924 4925 case E1000G_IOC_REG_PEEK: 4926 peek = B_TRUE; 4927 break; 4928 4929 case E1000G_IOC_REG_POKE: 4930 peek = B_FALSE; 4931 break; 4932 4933 deault: 4934 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 4935 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n", 4936 iocp->ioc_cmd); 4937 return (IOC_INVAL); 4938 } 4939 4940 /* 4941 * Validate format of ioctl 4942 */ 4943 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t)) 4944 return (IOC_INVAL); 4945 if (mp->b_cont == NULL) 4946 return (IOC_INVAL); 4947 4948 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr; 4949 4950 /* 4951 * Validate request parameters 4952 */ 4953 switch (ppd->pp_acc_space) { 4954 4955 default: 4956 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 4957 "e1000g_diag_ioctl: invalid access space 0x%X\n", 4958 ppd->pp_acc_space); 4959 return (IOC_INVAL); 4960 4961 case E1000G_PP_SPACE_REG: 4962 /* 4963 * Memory-mapped I/O space 4964 */ 4965 ASSERT(ppd->pp_acc_size == 4); 4966 if (ppd->pp_acc_size != 4) 4967 return (IOC_INVAL); 4968 4969 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 4970 return (IOC_INVAL); 4971 4972 mem_va = 0; 4973 maxoff = 0x10000; 4974 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg; 4975 break; 4976 4977 case E1000G_PP_SPACE_E1000G: 4978 /* 4979 * E1000g data structure! 4980 */ 4981 mem_va = (uintptr_t)e1000gp; 4982 maxoff = sizeof (struct e1000g); 4983 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem; 4984 break; 4985 4986 } 4987 4988 if (ppd->pp_acc_offset >= maxoff) 4989 return (IOC_INVAL); 4990 4991 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff) 4992 return (IOC_INVAL); 4993 4994 /* 4995 * All OK - go! 4996 */ 4997 ppd->pp_acc_offset += mem_va; 4998 (*ppfn)(e1000gp, ppd); 4999 return (peek ? IOC_REPLY : IOC_ACK); 5000 } 5001 5002 static void 5003 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5004 { 5005 ddi_acc_handle_t handle; 5006 uint32_t *regaddr; 5007 5008 handle = e1000gp->osdep.reg_handle; 5009 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5010 (uintptr_t)ppd->pp_acc_offset); 5011 5012 ppd->pp_acc_data = ddi_get32(handle, regaddr); 5013 } 5014 5015 static void 5016 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5017 { 5018 ddi_acc_handle_t handle; 5019 uint32_t *regaddr; 5020 uint32_t value; 5021 5022 handle = e1000gp->osdep.reg_handle; 5023 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5024 (uintptr_t)ppd->pp_acc_offset); 5025 value = (uint32_t)ppd->pp_acc_data; 5026 5027 ddi_put32(handle, regaddr, value); 5028 } 5029 5030 static void 5031 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5032 { 5033 uint64_t value; 5034 void *vaddr; 5035 5036 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5037 5038 switch (ppd->pp_acc_size) { 5039 case 1: 5040 value = *(uint8_t *)vaddr; 5041 break; 5042 5043 case 2: 5044 value = *(uint16_t *)vaddr; 5045 break; 5046 5047 case 4: 5048 value = *(uint32_t *)vaddr; 5049 break; 5050 5051 case 8: 5052 value = *(uint64_t *)vaddr; 5053 break; 5054 } 5055 5056 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5057 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n", 5058 (void *)e1000gp, (void *)ppd, value, vaddr); 5059 5060 ppd->pp_acc_data = value; 5061 } 5062 5063 static void 5064 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5065 { 5066 uint64_t value; 5067 void *vaddr; 5068 5069 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5070 value = ppd->pp_acc_data; 5071 5072 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5073 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n", 5074 (void *)e1000gp, (void *)ppd, value, vaddr); 5075 5076 switch (ppd->pp_acc_size) { 5077 case 1: 5078 *(uint8_t *)vaddr = (uint8_t)value; 5079 break; 5080 5081 case 2: 5082 *(uint16_t *)vaddr = (uint16_t)value; 5083 break; 5084 5085 case 4: 5086 *(uint32_t *)vaddr = (uint32_t)value; 5087 break; 5088 5089 case 8: 5090 *(uint64_t *)vaddr = (uint64_t)value; 5091 break; 5092 } 5093 } 5094 #endif 5095 5096 /* 5097 * Loopback Support 5098 */ 5099 static lb_property_t lb_normal = 5100 { normal, "normal", E1000G_LB_NONE }; 5101 static lb_property_t lb_external1000 = 5102 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 }; 5103 static lb_property_t lb_external100 = 5104 { external, "100Mbps", E1000G_LB_EXTERNAL_100 }; 5105 static lb_property_t lb_external10 = 5106 { external, "10Mbps", E1000G_LB_EXTERNAL_10 }; 5107 static lb_property_t lb_phy = 5108 { internal, "PHY", E1000G_LB_INTERNAL_PHY }; 5109 5110 static enum ioc_reply 5111 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp) 5112 { 5113 lb_info_sz_t *lbsp; 5114 lb_property_t *lbpp; 5115 struct e1000_hw *hw; 5116 uint32_t *lbmp; 5117 uint32_t size; 5118 uint32_t value; 5119 5120 hw = &Adapter->shared; 5121 5122 if (mp->b_cont == NULL) 5123 return (IOC_INVAL); 5124 5125 if (!e1000g_check_loopback_support(hw)) { 5126 e1000g_log(NULL, CE_WARN, 5127 "Loopback is not supported on e1000g%d", Adapter->instance); 5128 return (IOC_INVAL); 5129 } 5130 5131 switch (iocp->ioc_cmd) { 5132 default: 5133 return (IOC_INVAL); 5134 5135 case LB_GET_INFO_SIZE: 5136 size = sizeof (lb_info_sz_t); 5137 if (iocp->ioc_count != size) 5138 return (IOC_INVAL); 5139 5140 rw_enter(&Adapter->chip_lock, RW_WRITER); 5141 e1000g_get_phy_state(Adapter); 5142 5143 /* 5144 * Workaround for hardware faults. In order to get a stable 5145 * state of phy, we will wait for a specific interval and 5146 * try again. The time delay is an experiential value based 5147 * on our testing. 5148 */ 5149 msec_delay(100); 5150 e1000g_get_phy_state(Adapter); 5151 rw_exit(&Adapter->chip_lock); 5152 5153 value = sizeof (lb_normal); 5154 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5155 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5156 (hw->phy.media_type == e1000_media_type_fiber) || 5157 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5158 value += sizeof (lb_phy); 5159 switch (hw->mac.type) { 5160 case e1000_82571: 5161 case e1000_82572: 5162 case e1000_80003es2lan: 5163 value += sizeof (lb_external1000); 5164 break; 5165 } 5166 } 5167 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5168 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5169 value += sizeof (lb_external100); 5170 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5171 value += sizeof (lb_external10); 5172 5173 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 5174 *lbsp = value; 5175 break; 5176 5177 case LB_GET_INFO: 5178 value = sizeof (lb_normal); 5179 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5180 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5181 (hw->phy.media_type == e1000_media_type_fiber) || 5182 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5183 value += sizeof (lb_phy); 5184 switch (hw->mac.type) { 5185 case e1000_82571: 5186 case e1000_82572: 5187 case e1000_80003es2lan: 5188 value += sizeof (lb_external1000); 5189 break; 5190 } 5191 } 5192 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5193 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5194 value += sizeof (lb_external100); 5195 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5196 value += sizeof (lb_external10); 5197 5198 size = value; 5199 if (iocp->ioc_count != size) 5200 return (IOC_INVAL); 5201 5202 value = 0; 5203 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 5204 lbpp[value++] = lb_normal; 5205 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5206 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5207 (hw->phy.media_type == e1000_media_type_fiber) || 5208 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5209 lbpp[value++] = lb_phy; 5210 switch (hw->mac.type) { 5211 case e1000_82571: 5212 case e1000_82572: 5213 case e1000_80003es2lan: 5214 lbpp[value++] = lb_external1000; 5215 break; 5216 } 5217 } 5218 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5219 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5220 lbpp[value++] = lb_external100; 5221 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5222 lbpp[value++] = lb_external10; 5223 break; 5224 5225 case LB_GET_MODE: 5226 size = sizeof (uint32_t); 5227 if (iocp->ioc_count != size) 5228 return (IOC_INVAL); 5229 5230 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5231 *lbmp = Adapter->loopback_mode; 5232 break; 5233 5234 case LB_SET_MODE: 5235 size = 0; 5236 if (iocp->ioc_count != sizeof (uint32_t)) 5237 return (IOC_INVAL); 5238 5239 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5240 if (!e1000g_set_loopback_mode(Adapter, *lbmp)) 5241 return (IOC_INVAL); 5242 break; 5243 } 5244 5245 iocp->ioc_count = size; 5246 iocp->ioc_error = 0; 5247 5248 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 5249 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 5250 return (IOC_INVAL); 5251 } 5252 5253 return (IOC_REPLY); 5254 } 5255 5256 static boolean_t 5257 e1000g_check_loopback_support(struct e1000_hw *hw) 5258 { 5259 switch (hw->mac.type) { 5260 case e1000_82540: 5261 case e1000_82545: 5262 case e1000_82545_rev_3: 5263 case e1000_82546: 5264 case e1000_82546_rev_3: 5265 case e1000_82541: 5266 case e1000_82541_rev_2: 5267 case e1000_82547: 5268 case e1000_82547_rev_2: 5269 case e1000_82571: 5270 case e1000_82572: 5271 case e1000_82573: 5272 case e1000_82574: 5273 case e1000_80003es2lan: 5274 case e1000_ich9lan: 5275 case e1000_ich10lan: 5276 return (B_TRUE); 5277 } 5278 return (B_FALSE); 5279 } 5280 5281 static boolean_t 5282 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode) 5283 { 5284 struct e1000_hw *hw; 5285 int i, times; 5286 boolean_t link_up; 5287 5288 if (mode == Adapter->loopback_mode) 5289 return (B_TRUE); 5290 5291 hw = &Adapter->shared; 5292 times = 0; 5293 5294 Adapter->loopback_mode = mode; 5295 5296 if (mode == E1000G_LB_NONE) { 5297 /* Reset the chip */ 5298 hw->phy.autoneg_wait_to_complete = B_TRUE; 5299 (void) e1000g_reset_adapter(Adapter); 5300 hw->phy.autoneg_wait_to_complete = B_FALSE; 5301 return (B_TRUE); 5302 } 5303 5304 again: 5305 5306 rw_enter(&Adapter->chip_lock, RW_WRITER); 5307 5308 switch (mode) { 5309 default: 5310 rw_exit(&Adapter->chip_lock); 5311 return (B_FALSE); 5312 5313 case E1000G_LB_EXTERNAL_1000: 5314 e1000g_set_external_loopback_1000(Adapter); 5315 break; 5316 5317 case E1000G_LB_EXTERNAL_100: 5318 e1000g_set_external_loopback_100(Adapter); 5319 break; 5320 5321 case E1000G_LB_EXTERNAL_10: 5322 e1000g_set_external_loopback_10(Adapter); 5323 break; 5324 5325 case E1000G_LB_INTERNAL_PHY: 5326 e1000g_set_internal_loopback(Adapter); 5327 break; 5328 } 5329 5330 times++; 5331 5332 rw_exit(&Adapter->chip_lock); 5333 5334 /* Wait for link up */ 5335 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--) 5336 msec_delay(100); 5337 5338 rw_enter(&Adapter->chip_lock, RW_WRITER); 5339 5340 link_up = e1000g_link_up(Adapter); 5341 5342 rw_exit(&Adapter->chip_lock); 5343 5344 if (!link_up) { 5345 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5346 "Failed to get the link up"); 5347 if (times < 2) { 5348 /* Reset the link */ 5349 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5350 "Reset the link ..."); 5351 (void) e1000g_reset_adapter(Adapter); 5352 goto again; 5353 } 5354 5355 /* 5356 * Reset driver to loopback none when set loopback failed 5357 * for the second time. 5358 */ 5359 Adapter->loopback_mode = E1000G_LB_NONE; 5360 5361 /* Reset the chip */ 5362 hw->phy.autoneg_wait_to_complete = B_TRUE; 5363 (void) e1000g_reset_adapter(Adapter); 5364 hw->phy.autoneg_wait_to_complete = B_FALSE; 5365 5366 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5367 "Set loopback mode failed, reset to loopback none"); 5368 5369 return (B_FALSE); 5370 } 5371 5372 return (B_TRUE); 5373 } 5374 5375 /* 5376 * The following loopback settings are from Intel's technical 5377 * document - "How To Loopback". All the register settings and 5378 * time delay values are directly inherited from the document 5379 * without more explanations available. 5380 */ 5381 static void 5382 e1000g_set_internal_loopback(struct e1000g *Adapter) 5383 { 5384 struct e1000_hw *hw; 5385 uint32_t ctrl; 5386 uint32_t status; 5387 uint16_t phy_ctrl; 5388 uint16_t phy_reg; 5389 uint32_t txcw; 5390 5391 hw = &Adapter->shared; 5392 5393 /* Disable Smart Power Down */ 5394 phy_spd_state(hw, B_FALSE); 5395 5396 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 5397 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10); 5398 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000; 5399 5400 switch (hw->mac.type) { 5401 case e1000_82540: 5402 case e1000_82545: 5403 case e1000_82545_rev_3: 5404 case e1000_82546: 5405 case e1000_82546_rev_3: 5406 case e1000_82573: 5407 /* Auto-MDI/MDIX off */ 5408 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 5409 /* Reset PHY to update Auto-MDI/MDIX */ 5410 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5411 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN); 5412 /* Reset PHY to auto-neg off and force 1000 */ 5413 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5414 phy_ctrl | MII_CR_RESET); 5415 /* 5416 * Disable PHY receiver for 82540/545/546 and 82573 Family. 5417 * See comments above e1000g_set_internal_loopback() for the 5418 * background. 5419 */ 5420 (void) e1000_write_phy_reg(hw, 29, 0x001F); 5421 (void) e1000_write_phy_reg(hw, 30, 0x8FFC); 5422 (void) e1000_write_phy_reg(hw, 29, 0x001A); 5423 (void) e1000_write_phy_reg(hw, 30, 0x8FF0); 5424 break; 5425 case e1000_80003es2lan: 5426 /* Force Link Up */ 5427 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 5428 0x1CC); 5429 /* Sets PCS loopback at 1Gbs */ 5430 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 5431 0x1046); 5432 break; 5433 } 5434 5435 /* 5436 * The following registers should be set for e1000_phy_bm phy type. 5437 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy. 5438 * For others, we do not need to set these registers. 5439 */ 5440 if (hw->phy.type == e1000_phy_bm) { 5441 /* Set Default MAC Interface speed to 1GB */ 5442 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg); 5443 phy_reg &= ~0x0007; 5444 phy_reg |= 0x006; 5445 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg); 5446 /* Assert SW reset for above settings to take effect */ 5447 (void) e1000_phy_commit(hw); 5448 msec_delay(1); 5449 /* Force Full Duplex */ 5450 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5451 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5452 phy_reg | 0x000C); 5453 /* Set Link Up (in force link) */ 5454 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg); 5455 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16), 5456 phy_reg | 0x0040); 5457 /* Force Link */ 5458 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5459 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5460 phy_reg | 0x0040); 5461 /* Set Early Link Enable */ 5462 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg); 5463 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20), 5464 phy_reg | 0x0400); 5465 } 5466 5467 /* Set loopback */ 5468 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK); 5469 5470 msec_delay(250); 5471 5472 /* Now set up the MAC to the same speed/duplex as the PHY. */ 5473 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5474 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5475 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5476 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5477 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ 5478 E1000_CTRL_FD); /* Force Duplex to FULL */ 5479 5480 switch (hw->mac.type) { 5481 case e1000_82540: 5482 case e1000_82545: 5483 case e1000_82545_rev_3: 5484 case e1000_82546: 5485 case e1000_82546_rev_3: 5486 /* 5487 * For some serdes we'll need to commit the writes now 5488 * so that the status is updated on link 5489 */ 5490 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 5491 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5492 msec_delay(100); 5493 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5494 } 5495 5496 if (hw->phy.media_type == e1000_media_type_copper) { 5497 /* Invert Loss of Signal */ 5498 ctrl |= E1000_CTRL_ILOS; 5499 } else { 5500 /* Set ILOS on fiber nic if half duplex is detected */ 5501 status = E1000_READ_REG(hw, E1000_STATUS); 5502 if ((status & E1000_STATUS_FD) == 0) 5503 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5504 } 5505 break; 5506 5507 case e1000_82571: 5508 case e1000_82572: 5509 /* 5510 * The fiber/SerDes versions of this adapter do not contain an 5511 * accessible PHY. Therefore, loopback beyond MAC must be done 5512 * using SerDes analog loopback. 5513 */ 5514 if (hw->phy.media_type != e1000_media_type_copper) { 5515 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5516 txcw = E1000_READ_REG(hw, E1000_TXCW); 5517 txcw &= ~((uint32_t)1 << 31); 5518 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5519 5520 /* 5521 * Write 0x410 to Serdes Control register 5522 * to enable Serdes analog loopback 5523 */ 5524 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5525 msec_delay(10); 5526 } 5527 5528 status = E1000_READ_REG(hw, E1000_STATUS); 5529 /* Set ILOS on fiber nic if half duplex is detected */ 5530 if ((hw->phy.media_type == e1000_media_type_fiber) && 5531 ((status & E1000_STATUS_FD) == 0 || 5532 (status & E1000_STATUS_LU) == 0)) 5533 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5534 else if (hw->phy.media_type == e1000_media_type_internal_serdes) 5535 ctrl |= E1000_CTRL_SLU; 5536 break; 5537 5538 case e1000_82573: 5539 ctrl |= E1000_CTRL_ILOS; 5540 break; 5541 case e1000_ich9lan: 5542 case e1000_ich10lan: 5543 ctrl |= E1000_CTRL_SLU; 5544 break; 5545 } 5546 if (hw->phy.type == e1000_phy_bm) 5547 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS; 5548 5549 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5550 } 5551 5552 static void 5553 e1000g_set_external_loopback_1000(struct e1000g *Adapter) 5554 { 5555 struct e1000_hw *hw; 5556 uint32_t rctl; 5557 uint32_t ctrl_ext; 5558 uint32_t ctrl; 5559 uint32_t status; 5560 uint32_t txcw; 5561 uint16_t phydata; 5562 5563 hw = &Adapter->shared; 5564 5565 /* Disable Smart Power Down */ 5566 phy_spd_state(hw, B_FALSE); 5567 5568 switch (hw->mac.type) { 5569 case e1000_82571: 5570 case e1000_82572: 5571 switch (hw->phy.media_type) { 5572 case e1000_media_type_copper: 5573 /* Force link up (Must be done before the PHY writes) */ 5574 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5575 ctrl |= E1000_CTRL_SLU; /* Force Link Up */ 5576 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5577 5578 rctl = E1000_READ_REG(hw, E1000_RCTL); 5579 rctl |= (E1000_RCTL_EN | 5580 E1000_RCTL_SBP | 5581 E1000_RCTL_UPE | 5582 E1000_RCTL_MPE | 5583 E1000_RCTL_LPE | 5584 E1000_RCTL_BAM); /* 0x803E */ 5585 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 5586 5587 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5588 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA | 5589 E1000_CTRL_EXT_SDP6_DATA | 5590 E1000_CTRL_EXT_SDP3_DATA | 5591 E1000_CTRL_EXT_SDP4_DIR | 5592 E1000_CTRL_EXT_SDP6_DIR | 5593 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */ 5594 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5595 5596 /* 5597 * This sequence tunes the PHY's SDP and no customer 5598 * settable values. For background, see comments above 5599 * e1000g_set_internal_loopback(). 5600 */ 5601 (void) e1000_write_phy_reg(hw, 0x0, 0x140); 5602 msec_delay(10); 5603 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00); 5604 (void) e1000_write_phy_reg(hw, 0x12, 0xC10); 5605 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10); 5606 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76); 5607 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1); 5608 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0); 5609 5610 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65); 5611 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C); 5612 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC); 5613 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C); 5614 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC); 5615 5616 msec_delay(50); 5617 break; 5618 case e1000_media_type_fiber: 5619 case e1000_media_type_internal_serdes: 5620 status = E1000_READ_REG(hw, E1000_STATUS); 5621 if (((status & E1000_STATUS_LU) == 0) || 5622 (hw->phy.media_type == 5623 e1000_media_type_internal_serdes)) { 5624 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5625 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5626 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5627 } 5628 5629 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5630 txcw = E1000_READ_REG(hw, E1000_TXCW); 5631 txcw &= ~((uint32_t)1 << 31); 5632 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5633 5634 /* 5635 * Write 0x410 to Serdes Control register 5636 * to enable Serdes analog loopback 5637 */ 5638 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5639 msec_delay(10); 5640 break; 5641 default: 5642 break; 5643 } 5644 break; 5645 case e1000_82574: 5646 case e1000_80003es2lan: 5647 case e1000_ich9lan: 5648 case e1000_ich10lan: 5649 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata); 5650 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16), 5651 phydata | (1 << 5)); 5652 Adapter->param_adv_autoneg = 1; 5653 Adapter->param_adv_1000fdx = 1; 5654 (void) e1000g_reset_link(Adapter); 5655 break; 5656 } 5657 } 5658 5659 static void 5660 e1000g_set_external_loopback_100(struct e1000g *Adapter) 5661 { 5662 struct e1000_hw *hw; 5663 uint32_t ctrl; 5664 uint16_t phy_ctrl; 5665 5666 hw = &Adapter->shared; 5667 5668 /* Disable Smart Power Down */ 5669 phy_spd_state(hw, B_FALSE); 5670 5671 phy_ctrl = (MII_CR_FULL_DUPLEX | 5672 MII_CR_SPEED_100); 5673 5674 /* Force 100/FD, reset PHY */ 5675 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5676 phy_ctrl | MII_CR_RESET); /* 0xA100 */ 5677 msec_delay(10); 5678 5679 /* Force 100/FD */ 5680 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5681 phy_ctrl); /* 0x2100 */ 5682 msec_delay(10); 5683 5684 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5685 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5686 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5687 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5688 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5689 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5690 E1000_CTRL_SPD_100 | /* Force Speed to 100 */ 5691 E1000_CTRL_FD); /* Force Duplex to FULL */ 5692 5693 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5694 } 5695 5696 static void 5697 e1000g_set_external_loopback_10(struct e1000g *Adapter) 5698 { 5699 struct e1000_hw *hw; 5700 uint32_t ctrl; 5701 uint16_t phy_ctrl; 5702 5703 hw = &Adapter->shared; 5704 5705 /* Disable Smart Power Down */ 5706 phy_spd_state(hw, B_FALSE); 5707 5708 phy_ctrl = (MII_CR_FULL_DUPLEX | 5709 MII_CR_SPEED_10); 5710 5711 /* Force 10/FD, reset PHY */ 5712 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5713 phy_ctrl | MII_CR_RESET); /* 0x8100 */ 5714 msec_delay(10); 5715 5716 /* Force 10/FD */ 5717 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5718 phy_ctrl); /* 0x0100 */ 5719 msec_delay(10); 5720 5721 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5722 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5723 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5724 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5725 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5726 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5727 E1000_CTRL_SPD_10 | /* Force Speed to 10 */ 5728 E1000_CTRL_FD); /* Force Duplex to FULL */ 5729 5730 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5731 } 5732 5733 #ifdef __sparc 5734 static boolean_t 5735 e1000g_find_mac_address(struct e1000g *Adapter) 5736 { 5737 struct e1000_hw *hw = &Adapter->shared; 5738 uchar_t *bytes; 5739 struct ether_addr sysaddr; 5740 uint_t nelts; 5741 int err; 5742 boolean_t found = B_FALSE; 5743 5744 /* 5745 * The "vendor's factory-set address" may already have 5746 * been extracted from the chip, but if the property 5747 * "local-mac-address" is set we use that instead. 5748 * 5749 * We check whether it looks like an array of 6 5750 * bytes (which it should, if OBP set it). If we can't 5751 * make sense of it this way, we'll ignore it. 5752 */ 5753 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 5754 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 5755 if (err == DDI_PROP_SUCCESS) { 5756 if (nelts == ETHERADDRL) { 5757 while (nelts--) 5758 hw->mac.addr[nelts] = bytes[nelts]; 5759 found = B_TRUE; 5760 } 5761 ddi_prop_free(bytes); 5762 } 5763 5764 /* 5765 * Look up the OBP property "local-mac-address?". If the user has set 5766 * 'local-mac-address? = false', use "the system address" instead. 5767 */ 5768 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0, 5769 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 5770 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 5771 if (localetheraddr(NULL, &sysaddr) != 0) { 5772 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 5773 found = B_TRUE; 5774 } 5775 } 5776 ddi_prop_free(bytes); 5777 } 5778 5779 /* 5780 * Finally(!), if there's a valid "mac-address" property (created 5781 * if we netbooted from this interface), we must use this instead 5782 * of any of the above to ensure that the NFS/install server doesn't 5783 * get confused by the address changing as Solaris takes over! 5784 */ 5785 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 5786 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 5787 if (err == DDI_PROP_SUCCESS) { 5788 if (nelts == ETHERADDRL) { 5789 while (nelts--) 5790 hw->mac.addr[nelts] = bytes[nelts]; 5791 found = B_TRUE; 5792 } 5793 ddi_prop_free(bytes); 5794 } 5795 5796 if (found) { 5797 bcopy(hw->mac.addr, hw->mac.perm_addr, 5798 ETHERADDRL); 5799 } 5800 5801 return (found); 5802 } 5803 #endif 5804 5805 static int 5806 e1000g_add_intrs(struct e1000g *Adapter) 5807 { 5808 dev_info_t *devinfo; 5809 int intr_types; 5810 int rc; 5811 5812 devinfo = Adapter->dip; 5813 5814 /* Get supported interrupt types */ 5815 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 5816 5817 if (rc != DDI_SUCCESS) { 5818 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5819 "Get supported interrupt types failed: %d\n", rc); 5820 return (DDI_FAILURE); 5821 } 5822 5823 /* 5824 * Based on Intel Technical Advisory document (TA-160), there are some 5825 * cases where some older Intel PCI-X NICs may "advertise" to the OS 5826 * that it supports MSI, but in fact has problems. 5827 * So we should only enable MSI for PCI-E NICs and disable MSI for old 5828 * PCI/PCI-X NICs. 5829 */ 5830 if (Adapter->shared.mac.type < e1000_82571) 5831 Adapter->msi_enable = B_FALSE; 5832 5833 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) { 5834 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI); 5835 5836 if (rc != DDI_SUCCESS) { 5837 /* EMPTY */ 5838 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5839 "Add MSI failed, trying Legacy interrupts\n"); 5840 } else { 5841 Adapter->intr_type = DDI_INTR_TYPE_MSI; 5842 } 5843 } 5844 5845 if ((Adapter->intr_type == 0) && 5846 (intr_types & DDI_INTR_TYPE_FIXED)) { 5847 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED); 5848 5849 if (rc != DDI_SUCCESS) { 5850 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5851 "Add Legacy interrupts failed\n"); 5852 return (DDI_FAILURE); 5853 } 5854 5855 Adapter->intr_type = DDI_INTR_TYPE_FIXED; 5856 } 5857 5858 if (Adapter->intr_type == 0) { 5859 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5860 "No interrupts registered\n"); 5861 return (DDI_FAILURE); 5862 } 5863 5864 return (DDI_SUCCESS); 5865 } 5866 5867 /* 5868 * e1000g_intr_add() handles MSI/Legacy interrupts 5869 */ 5870 static int 5871 e1000g_intr_add(struct e1000g *Adapter, int intr_type) 5872 { 5873 dev_info_t *devinfo; 5874 int count, avail, actual; 5875 int x, y, rc, inum = 0; 5876 int flag; 5877 ddi_intr_handler_t *intr_handler; 5878 5879 devinfo = Adapter->dip; 5880 5881 /* get number of interrupts */ 5882 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5883 if ((rc != DDI_SUCCESS) || (count == 0)) { 5884 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5885 "Get interrupt number failed. Return: %d, count: %d\n", 5886 rc, count); 5887 return (DDI_FAILURE); 5888 } 5889 5890 /* get number of available interrupts */ 5891 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 5892 if ((rc != DDI_SUCCESS) || (avail == 0)) { 5893 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5894 "Get interrupt available number failed. " 5895 "Return: %d, available: %d\n", rc, avail); 5896 return (DDI_FAILURE); 5897 } 5898 5899 if (avail < count) { 5900 /* EMPTY */ 5901 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5902 "Interrupts count: %d, available: %d\n", 5903 count, avail); 5904 } 5905 5906 /* Allocate an array of interrupt handles */ 5907 Adapter->intr_size = count * sizeof (ddi_intr_handle_t); 5908 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP); 5909 5910 /* Set NORMAL behavior for both MSI and FIXED interrupt */ 5911 flag = DDI_INTR_ALLOC_NORMAL; 5912 5913 /* call ddi_intr_alloc() */ 5914 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum, 5915 count, &actual, flag); 5916 5917 if ((rc != DDI_SUCCESS) || (actual == 0)) { 5918 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5919 "Allocate interrupts failed: %d\n", rc); 5920 5921 kmem_free(Adapter->htable, Adapter->intr_size); 5922 return (DDI_FAILURE); 5923 } 5924 5925 if (actual < count) { 5926 /* EMPTY */ 5927 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5928 "Interrupts requested: %d, received: %d\n", 5929 count, actual); 5930 } 5931 5932 Adapter->intr_cnt = actual; 5933 5934 /* Get priority for first msi, assume remaining are all the same */ 5935 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri); 5936 5937 if (rc != DDI_SUCCESS) { 5938 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5939 "Get interrupt priority failed: %d\n", rc); 5940 5941 /* Free already allocated intr */ 5942 for (y = 0; y < actual; y++) 5943 (void) ddi_intr_free(Adapter->htable[y]); 5944 5945 kmem_free(Adapter->htable, Adapter->intr_size); 5946 return (DDI_FAILURE); 5947 } 5948 5949 /* 5950 * In Legacy Interrupt mode, for PCI-Express adapters, we should 5951 * use the interrupt service routine e1000g_intr_pciexpress() 5952 * to avoid interrupt stealing when sharing interrupt with other 5953 * devices. 5954 */ 5955 if (Adapter->shared.mac.type < e1000_82571) 5956 intr_handler = (ddi_intr_handler_t *)e1000g_intr; 5957 else 5958 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress; 5959 5960 /* Call ddi_intr_add_handler() */ 5961 for (x = 0; x < actual; x++) { 5962 rc = ddi_intr_add_handler(Adapter->htable[x], 5963 intr_handler, (caddr_t)Adapter, NULL); 5964 5965 if (rc != DDI_SUCCESS) { 5966 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5967 "Add interrupt handler failed: %d\n", rc); 5968 5969 /* Remove already added handler */ 5970 for (y = 0; y < x; y++) 5971 (void) ddi_intr_remove_handler( 5972 Adapter->htable[y]); 5973 5974 /* Free already allocated intr */ 5975 for (y = 0; y < actual; y++) 5976 (void) ddi_intr_free(Adapter->htable[y]); 5977 5978 kmem_free(Adapter->htable, Adapter->intr_size); 5979 return (DDI_FAILURE); 5980 } 5981 } 5982 5983 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap); 5984 5985 if (rc != DDI_SUCCESS) { 5986 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5987 "Get interrupt cap failed: %d\n", rc); 5988 5989 /* Free already allocated intr */ 5990 for (y = 0; y < actual; y++) { 5991 (void) ddi_intr_remove_handler(Adapter->htable[y]); 5992 (void) ddi_intr_free(Adapter->htable[y]); 5993 } 5994 5995 kmem_free(Adapter->htable, Adapter->intr_size); 5996 return (DDI_FAILURE); 5997 } 5998 5999 return (DDI_SUCCESS); 6000 } 6001 6002 static int 6003 e1000g_rem_intrs(struct e1000g *Adapter) 6004 { 6005 int x; 6006 int rc; 6007 6008 for (x = 0; x < Adapter->intr_cnt; x++) { 6009 rc = ddi_intr_remove_handler(Adapter->htable[x]); 6010 if (rc != DDI_SUCCESS) { 6011 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6012 "Remove intr handler failed: %d\n", rc); 6013 return (DDI_FAILURE); 6014 } 6015 6016 rc = ddi_intr_free(Adapter->htable[x]); 6017 if (rc != DDI_SUCCESS) { 6018 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6019 "Free intr failed: %d\n", rc); 6020 return (DDI_FAILURE); 6021 } 6022 } 6023 6024 kmem_free(Adapter->htable, Adapter->intr_size); 6025 6026 return (DDI_SUCCESS); 6027 } 6028 6029 static int 6030 e1000g_enable_intrs(struct e1000g *Adapter) 6031 { 6032 int x; 6033 int rc; 6034 6035 /* Enable interrupts */ 6036 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6037 /* Call ddi_intr_block_enable() for MSI */ 6038 rc = ddi_intr_block_enable(Adapter->htable, 6039 Adapter->intr_cnt); 6040 if (rc != DDI_SUCCESS) { 6041 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6042 "Enable block intr failed: %d\n", rc); 6043 return (DDI_FAILURE); 6044 } 6045 } else { 6046 /* Call ddi_intr_enable() for Legacy/MSI non block enable */ 6047 for (x = 0; x < Adapter->intr_cnt; x++) { 6048 rc = ddi_intr_enable(Adapter->htable[x]); 6049 if (rc != DDI_SUCCESS) { 6050 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6051 "Enable intr failed: %d\n", rc); 6052 return (DDI_FAILURE); 6053 } 6054 } 6055 } 6056 6057 return (DDI_SUCCESS); 6058 } 6059 6060 static int 6061 e1000g_disable_intrs(struct e1000g *Adapter) 6062 { 6063 int x; 6064 int rc; 6065 6066 /* Disable all interrupts */ 6067 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6068 rc = ddi_intr_block_disable(Adapter->htable, 6069 Adapter->intr_cnt); 6070 if (rc != DDI_SUCCESS) { 6071 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6072 "Disable block intr failed: %d\n", rc); 6073 return (DDI_FAILURE); 6074 } 6075 } else { 6076 for (x = 0; x < Adapter->intr_cnt; x++) { 6077 rc = ddi_intr_disable(Adapter->htable[x]); 6078 if (rc != DDI_SUCCESS) { 6079 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6080 "Disable intr failed: %d\n", rc); 6081 return (DDI_FAILURE); 6082 } 6083 } 6084 } 6085 6086 return (DDI_SUCCESS); 6087 } 6088 6089 /* 6090 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter 6091 */ 6092 static void 6093 e1000g_get_phy_state(struct e1000g *Adapter) 6094 { 6095 struct e1000_hw *hw = &Adapter->shared; 6096 6097 if (hw->phy.media_type == e1000_media_type_copper) { 6098 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl); 6099 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status); 6100 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 6101 &Adapter->phy_an_adv); 6102 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, 6103 &Adapter->phy_an_exp); 6104 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, 6105 &Adapter->phy_ext_status); 6106 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, 6107 &Adapter->phy_1000t_ctrl); 6108 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, 6109 &Adapter->phy_1000t_status); 6110 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, 6111 &Adapter->phy_lp_able); 6112 6113 Adapter->param_autoneg_cap = 6114 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; 6115 Adapter->param_pause_cap = 6116 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6117 Adapter->param_asym_pause_cap = 6118 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6119 Adapter->param_1000fdx_cap = 6120 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 6121 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; 6122 Adapter->param_1000hdx_cap = 6123 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || 6124 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; 6125 Adapter->param_100t4_cap = 6126 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0; 6127 Adapter->param_100fdx_cap = 6128 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 6129 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; 6130 Adapter->param_100hdx_cap = 6131 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 6132 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; 6133 Adapter->param_10fdx_cap = 6134 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; 6135 Adapter->param_10hdx_cap = 6136 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; 6137 6138 Adapter->param_adv_autoneg = hw->mac.autoneg; 6139 Adapter->param_adv_pause = 6140 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6141 Adapter->param_adv_asym_pause = 6142 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6143 Adapter->param_adv_1000hdx = 6144 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; 6145 Adapter->param_adv_100t4 = 6146 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; 6147 if (Adapter->param_adv_autoneg == 1) { 6148 Adapter->param_adv_1000fdx = 6149 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) 6150 ? 1 : 0; 6151 Adapter->param_adv_100fdx = 6152 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) 6153 ? 1 : 0; 6154 Adapter->param_adv_100hdx = 6155 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) 6156 ? 1 : 0; 6157 Adapter->param_adv_10fdx = 6158 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; 6159 Adapter->param_adv_10hdx = 6160 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; 6161 } 6162 6163 Adapter->param_lp_autoneg = 6164 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; 6165 Adapter->param_lp_pause = 6166 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; 6167 Adapter->param_lp_asym_pause = 6168 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; 6169 Adapter->param_lp_1000fdx = 6170 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; 6171 Adapter->param_lp_1000hdx = 6172 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; 6173 Adapter->param_lp_100t4 = 6174 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; 6175 Adapter->param_lp_100fdx = 6176 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; 6177 Adapter->param_lp_100hdx = 6178 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; 6179 Adapter->param_lp_10fdx = 6180 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; 6181 Adapter->param_lp_10hdx = 6182 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; 6183 } else { 6184 /* 6185 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning, 6186 * it can only work with 1Gig Full Duplex Link Partner. 6187 */ 6188 Adapter->param_autoneg_cap = 0; 6189 Adapter->param_pause_cap = 1; 6190 Adapter->param_asym_pause_cap = 1; 6191 Adapter->param_1000fdx_cap = 1; 6192 Adapter->param_1000hdx_cap = 0; 6193 Adapter->param_100t4_cap = 0; 6194 Adapter->param_100fdx_cap = 0; 6195 Adapter->param_100hdx_cap = 0; 6196 Adapter->param_10fdx_cap = 0; 6197 Adapter->param_10hdx_cap = 0; 6198 6199 Adapter->param_adv_autoneg = 0; 6200 Adapter->param_adv_pause = 1; 6201 Adapter->param_adv_asym_pause = 1; 6202 Adapter->param_adv_1000fdx = 1; 6203 Adapter->param_adv_1000hdx = 0; 6204 Adapter->param_adv_100t4 = 0; 6205 Adapter->param_adv_100fdx = 0; 6206 Adapter->param_adv_100hdx = 0; 6207 Adapter->param_adv_10fdx = 0; 6208 Adapter->param_adv_10hdx = 0; 6209 6210 Adapter->param_lp_autoneg = 0; 6211 Adapter->param_lp_pause = 0; 6212 Adapter->param_lp_asym_pause = 0; 6213 Adapter->param_lp_1000fdx = 0; 6214 Adapter->param_lp_1000hdx = 0; 6215 Adapter->param_lp_100t4 = 0; 6216 Adapter->param_lp_100fdx = 0; 6217 Adapter->param_lp_100hdx = 0; 6218 Adapter->param_lp_10fdx = 0; 6219 Adapter->param_lp_10hdx = 0; 6220 } 6221 } 6222 6223 /* 6224 * FMA support 6225 */ 6226 6227 int 6228 e1000g_check_acc_handle(ddi_acc_handle_t handle) 6229 { 6230 ddi_fm_error_t de; 6231 6232 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6233 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 6234 return (de.fme_status); 6235 } 6236 6237 int 6238 e1000g_check_dma_handle(ddi_dma_handle_t handle) 6239 { 6240 ddi_fm_error_t de; 6241 6242 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6243 return (de.fme_status); 6244 } 6245 6246 /* 6247 * The IO fault service error handling callback function 6248 */ 6249 /* ARGSUSED2 */ 6250 static int 6251 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6252 { 6253 /* 6254 * as the driver can always deal with an error in any dma or 6255 * access handle, we can just return the fme_status value. 6256 */ 6257 pci_ereport_post(dip, err, NULL); 6258 return (err->fme_status); 6259 } 6260 6261 static void 6262 e1000g_fm_init(struct e1000g *Adapter) 6263 { 6264 ddi_iblock_cookie_t iblk; 6265 int fma_dma_flag; 6266 6267 /* Only register with IO Fault Services if we have some capability */ 6268 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 6269 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6270 } else { 6271 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6272 } 6273 6274 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 6275 fma_dma_flag = 1; 6276 } else { 6277 fma_dma_flag = 0; 6278 } 6279 6280 (void) e1000g_set_fma_flags(fma_dma_flag); 6281 6282 if (Adapter->fm_capabilities) { 6283 6284 /* Register capabilities with IO Fault Services */ 6285 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk); 6286 6287 /* 6288 * Initialize pci ereport capabilities if ereport capable 6289 */ 6290 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6291 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6292 pci_ereport_setup(Adapter->dip); 6293 6294 /* 6295 * Register error callback if error callback capable 6296 */ 6297 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6298 ddi_fm_handler_register(Adapter->dip, 6299 e1000g_fm_error_cb, (void*) Adapter); 6300 } 6301 } 6302 6303 static void 6304 e1000g_fm_fini(struct e1000g *Adapter) 6305 { 6306 /* Only unregister FMA capabilities if we registered some */ 6307 if (Adapter->fm_capabilities) { 6308 6309 /* 6310 * Release any resources allocated by pci_ereport_setup() 6311 */ 6312 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6313 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6314 pci_ereport_teardown(Adapter->dip); 6315 6316 /* 6317 * Un-register error callback if error callback capable 6318 */ 6319 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6320 ddi_fm_handler_unregister(Adapter->dip); 6321 6322 /* Unregister from IO Fault Services */ 6323 mutex_enter(&e1000g_rx_detach_lock); 6324 ddi_fm_fini(Adapter->dip); 6325 if (Adapter->priv_dip != NULL) { 6326 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL; 6327 } 6328 mutex_exit(&e1000g_rx_detach_lock); 6329 } 6330 } 6331 6332 void 6333 e1000g_fm_ereport(struct e1000g *Adapter, char *detail) 6334 { 6335 uint64_t ena; 6336 char buf[FM_MAX_CLASS]; 6337 6338 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6339 ena = fm_ena_generate(0, FM_ENA_FMT1); 6340 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) { 6341 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP, 6342 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6343 } 6344 } 6345 6346 /* 6347 * quiesce(9E) entry point. 6348 * 6349 * This function is called when the system is single-threaded at high 6350 * PIL with preemption disabled. Therefore, this function must not be 6351 * blocked. 6352 * 6353 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6354 * DDI_FAILURE indicates an error condition and should almost never happen. 6355 */ 6356 static int 6357 e1000g_quiesce(dev_info_t *devinfo) 6358 { 6359 struct e1000g *Adapter; 6360 6361 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 6362 6363 if (Adapter == NULL) 6364 return (DDI_FAILURE); 6365 6366 e1000g_clear_all_interrupts(Adapter); 6367 6368 (void) e1000_reset_hw(&Adapter->shared); 6369 6370 /* Setup our HW Tx Head & Tail descriptor pointers */ 6371 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 6372 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 6373 6374 /* Setup our HW Rx Head & Tail descriptor pointers */ 6375 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0); 6376 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0); 6377 6378 return (DDI_SUCCESS); 6379 } 6380 6381 /* 6382 * synchronize the adv* and en* parameters. 6383 * 6384 * See comments in <sys/dld.h> for details of the *_en_* 6385 * parameters. The usage of ndd for setting adv parameters will 6386 * synchronize all the en parameters with the e1000g parameters, 6387 * implicitly disabling any settings made via dladm. 6388 */ 6389 static void 6390 e1000g_param_sync(struct e1000g *Adapter) 6391 { 6392 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx; 6393 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx; 6394 Adapter->param_en_100fdx = Adapter->param_adv_100fdx; 6395 Adapter->param_en_100hdx = Adapter->param_adv_100hdx; 6396 Adapter->param_en_10fdx = Adapter->param_adv_10fdx; 6397 Adapter->param_en_10hdx = Adapter->param_adv_10hdx; 6398 } 6399 6400 /* 6401 * e1000g_get_driver_control - tell manageability firmware that the driver 6402 * has control. 6403 */ 6404 static void 6405 e1000g_get_driver_control(struct e1000_hw *hw) 6406 { 6407 uint32_t ctrl_ext; 6408 uint32_t swsm; 6409 6410 /* tell manageability firmware the driver has taken over */ 6411 switch (hw->mac.type) { 6412 case e1000_82573: 6413 swsm = E1000_READ_REG(hw, E1000_SWSM); 6414 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 6415 break; 6416 case e1000_82571: 6417 case e1000_82572: 6418 case e1000_82574: 6419 case e1000_80003es2lan: 6420 case e1000_ich8lan: 6421 case e1000_ich9lan: 6422 case e1000_ich10lan: 6423 case e1000_pchlan: 6424 case e1000_pch2lan: 6425 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6426 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6427 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 6428 break; 6429 default: 6430 /* no manageability firmware: do nothing */ 6431 break; 6432 } 6433 } 6434 6435 /* 6436 * e1000g_release_driver_control - tell manageability firmware that the driver 6437 * has released control. 6438 */ 6439 static void 6440 e1000g_release_driver_control(struct e1000_hw *hw) 6441 { 6442 uint32_t ctrl_ext; 6443 uint32_t swsm; 6444 6445 /* tell manageability firmware the driver has released control */ 6446 switch (hw->mac.type) { 6447 case e1000_82573: 6448 swsm = E1000_READ_REG(hw, E1000_SWSM); 6449 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 6450 break; 6451 case e1000_82571: 6452 case e1000_82572: 6453 case e1000_82574: 6454 case e1000_80003es2lan: 6455 case e1000_ich8lan: 6456 case e1000_ich9lan: 6457 case e1000_ich10lan: 6458 case e1000_pchlan: 6459 case e1000_pch2lan: 6460 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6461 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6462 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 6463 break; 6464 default: 6465 /* no manageability firmware: do nothing */ 6466 break; 6467 } 6468 } 6469 6470 /* 6471 * Restore e1000g promiscuous mode. 6472 */ 6473 static void 6474 e1000g_restore_promisc(struct e1000g *Adapter) 6475 { 6476 if (Adapter->e1000g_promisc) { 6477 uint32_t rctl; 6478 6479 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 6480 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 6481 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 6482 } 6483 } 6484