1 /* 2 * This file is provided under a CDDLv1 license. When using or 3 * redistributing this file, you may do so under this license. 4 * In redistributing this file this license must be included 5 * and no other modification of this header file is permitted. 6 * 7 * CDDL LICENSE SUMMARY 8 * 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved. 10 * 11 * The contents of this file are subject to the terms of Version 12 * 1.0 of the Common Development and Distribution License (the "License"). 13 * 14 * You should have received a copy of the License with this software. 15 * You can obtain a copy of the License at 16 * http://www.opensolaris.org/os/licensing. 17 * See the License for the specific language governing permissions 18 * and limitations under the License. 19 */ 20 21 /* 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* 26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved. 27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 28 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 29 */ 30 31 /* 32 * ********************************************************************** 33 * * 34 * Module Name: * 35 * e1000g_main.c * 36 * * 37 * Abstract: * 38 * This file contains the interface routines for the solaris OS. * 39 * It has all DDI entry point routines and GLD entry point routines. * 40 * * 41 * This file also contains routines that take care of initialization * 42 * uninit routine and interrupt routine. * 43 * * 44 * ********************************************************************** 45 */ 46 47 #include <sys/dlpi.h> 48 #include <sys/mac.h> 49 #include "e1000g_sw.h" 50 #include "e1000g_debug.h" 51 52 static char ident[] = "Intel PRO/1000 Ethernet"; 53 /* LINTED E_STATIC_UNUSED */ 54 static char e1000g_version[] = "Driver Ver. 5.3.24"; 55 56 /* 57 * Proto types for DDI entry points 58 */ 59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t); 60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t); 61 static int e1000g_quiesce(dev_info_t *); 62 63 /* 64 * init and intr routines prototype 65 */ 66 static int e1000g_resume(dev_info_t *); 67 static int e1000g_suspend(dev_info_t *); 68 static uint_t e1000g_intr_pciexpress(caddr_t); 69 static uint_t e1000g_intr(caddr_t); 70 static void e1000g_intr_work(struct e1000g *, uint32_t); 71 #pragma inline(e1000g_intr_work) 72 static int e1000g_init(struct e1000g *); 73 static int e1000g_start(struct e1000g *, boolean_t); 74 static void e1000g_stop(struct e1000g *, boolean_t); 75 static int e1000g_m_start(void *); 76 static void e1000g_m_stop(void *); 77 static int e1000g_m_promisc(void *, boolean_t); 78 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *); 79 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *); 80 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *); 81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t, 82 uint_t, const void *); 83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t, 84 uint_t, void *); 85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t, 86 mac_prop_info_handle_t); 87 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t, 88 const void *); 89 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *); 90 static void e1000g_init_locks(struct e1000g *); 91 static void e1000g_destroy_locks(struct e1000g *); 92 static int e1000g_identify_hardware(struct e1000g *); 93 static int e1000g_regs_map(struct e1000g *); 94 static int e1000g_set_driver_params(struct e1000g *); 95 static void e1000g_set_bufsize(struct e1000g *); 96 static int e1000g_register_mac(struct e1000g *); 97 static boolean_t e1000g_rx_drain(struct e1000g *); 98 static boolean_t e1000g_tx_drain(struct e1000g *); 99 static void e1000g_init_unicst(struct e1000g *); 100 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int); 101 static int e1000g_alloc_rx_data(struct e1000g *); 102 static void e1000g_release_multicast(struct e1000g *); 103 static void e1000g_pch_limits(struct e1000g *); 104 static uint32_t e1000g_mtu2maxframe(uint32_t); 105 106 /* 107 * Local routines 108 */ 109 static boolean_t e1000g_reset_adapter(struct e1000g *); 110 static void e1000g_tx_clean(struct e1000g *); 111 static void e1000g_rx_clean(struct e1000g *); 112 static void e1000g_link_timer(void *); 113 static void e1000g_local_timer(void *); 114 static boolean_t e1000g_link_check(struct e1000g *); 115 static boolean_t e1000g_stall_check(struct e1000g *); 116 static void e1000g_smartspeed(struct e1000g *); 117 static void e1000g_get_conf(struct e1000g *); 118 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int, 119 int *); 120 static void enable_watchdog_timer(struct e1000g *); 121 static void disable_watchdog_timer(struct e1000g *); 122 static void start_watchdog_timer(struct e1000g *); 123 static void restart_watchdog_timer(struct e1000g *); 124 static void stop_watchdog_timer(struct e1000g *); 125 static void stop_link_timer(struct e1000g *); 126 static void stop_82547_timer(e1000g_tx_ring_t *); 127 static void e1000g_force_speed_duplex(struct e1000g *); 128 static void e1000g_setup_max_mtu(struct e1000g *); 129 static void e1000g_get_max_frame_size(struct e1000g *); 130 static boolean_t is_valid_mac_addr(uint8_t *); 131 static void e1000g_unattach(dev_info_t *, struct e1000g *); 132 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *); 133 #ifdef E1000G_DEBUG 134 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *); 135 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *); 136 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *); 137 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *); 138 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *, 139 struct iocblk *, mblk_t *); 140 #endif 141 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *, 142 struct iocblk *, mblk_t *); 143 static boolean_t e1000g_check_loopback_support(struct e1000_hw *); 144 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t); 145 static void e1000g_set_internal_loopback(struct e1000g *); 146 static void e1000g_set_external_loopback_1000(struct e1000g *); 147 static void e1000g_set_external_loopback_100(struct e1000g *); 148 static void e1000g_set_external_loopback_10(struct e1000g *); 149 static int e1000g_add_intrs(struct e1000g *); 150 static int e1000g_intr_add(struct e1000g *, int); 151 static int e1000g_rem_intrs(struct e1000g *); 152 static int e1000g_enable_intrs(struct e1000g *); 153 static int e1000g_disable_intrs(struct e1000g *); 154 static boolean_t e1000g_link_up(struct e1000g *); 155 #ifdef __sparc 156 static boolean_t e1000g_find_mac_address(struct e1000g *); 157 #endif 158 static void e1000g_get_phy_state(struct e1000g *); 159 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 160 const void *impl_data); 161 static void e1000g_fm_init(struct e1000g *Adapter); 162 static void e1000g_fm_fini(struct e1000g *Adapter); 163 static void e1000g_param_sync(struct e1000g *); 164 static void e1000g_get_driver_control(struct e1000_hw *); 165 static void e1000g_release_driver_control(struct e1000_hw *); 166 static void e1000g_restore_promisc(struct e1000g *Adapter); 167 168 char *e1000g_priv_props[] = { 169 "_tx_bcopy_threshold", 170 "_tx_interrupt_enable", 171 "_tx_intr_delay", 172 "_tx_intr_abs_delay", 173 "_rx_bcopy_threshold", 174 "_max_num_rcv_packets", 175 "_rx_intr_delay", 176 "_rx_intr_abs_delay", 177 "_intr_throttling_rate", 178 "_intr_adaptive", 179 "_adv_pause_cap", 180 "_adv_asym_pause_cap", 181 NULL 182 }; 183 184 static struct cb_ops cb_ws_ops = { 185 nulldev, /* cb_open */ 186 nulldev, /* cb_close */ 187 nodev, /* cb_strategy */ 188 nodev, /* cb_print */ 189 nodev, /* cb_dump */ 190 nodev, /* cb_read */ 191 nodev, /* cb_write */ 192 nodev, /* cb_ioctl */ 193 nodev, /* cb_devmap */ 194 nodev, /* cb_mmap */ 195 nodev, /* cb_segmap */ 196 nochpoll, /* cb_chpoll */ 197 ddi_prop_op, /* cb_prop_op */ 198 NULL, /* cb_stream */ 199 D_MP | D_HOTPLUG, /* cb_flag */ 200 CB_REV, /* cb_rev */ 201 nodev, /* cb_aread */ 202 nodev /* cb_awrite */ 203 }; 204 205 static struct dev_ops ws_ops = { 206 DEVO_REV, /* devo_rev */ 207 0, /* devo_refcnt */ 208 NULL, /* devo_getinfo */ 209 nulldev, /* devo_identify */ 210 nulldev, /* devo_probe */ 211 e1000g_attach, /* devo_attach */ 212 e1000g_detach, /* devo_detach */ 213 nodev, /* devo_reset */ 214 &cb_ws_ops, /* devo_cb_ops */ 215 NULL, /* devo_bus_ops */ 216 ddi_power, /* devo_power */ 217 e1000g_quiesce /* devo_quiesce */ 218 }; 219 220 static struct modldrv modldrv = { 221 &mod_driverops, /* Type of module. This one is a driver */ 222 ident, /* Discription string */ 223 &ws_ops, /* driver ops */ 224 }; 225 226 static struct modlinkage modlinkage = { 227 MODREV_1, &modldrv, NULL 228 }; 229 230 /* Access attributes for register mapping */ 231 static ddi_device_acc_attr_t e1000g_regs_acc_attr = { 232 DDI_DEVICE_ATTR_V1, 233 DDI_STRUCTURE_LE_ACC, 234 DDI_STRICTORDER_ACC, 235 DDI_FLAGERR_ACC 236 }; 237 238 #define E1000G_M_CALLBACK_FLAGS \ 239 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 240 241 static mac_callbacks_t e1000g_m_callbacks = { 242 E1000G_M_CALLBACK_FLAGS, 243 e1000g_m_stat, 244 e1000g_m_start, 245 e1000g_m_stop, 246 e1000g_m_promisc, 247 e1000g_m_multicst, 248 NULL, 249 e1000g_m_tx, 250 NULL, 251 e1000g_m_ioctl, 252 e1000g_m_getcapab, 253 NULL, 254 NULL, 255 e1000g_m_setprop, 256 e1000g_m_getprop, 257 e1000g_m_propinfo 258 }; 259 260 /* 261 * Global variables 262 */ 263 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K; 264 uint32_t e1000g_mblks_pending = 0; 265 /* 266 * Workaround for Dynamic Reconfiguration support, for x86 platform only. 267 * Here we maintain a private dev_info list if e1000g_force_detach is 268 * enabled. If we force the driver to detach while there are still some 269 * rx buffers retained in the upper layer, we have to keep a copy of the 270 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data 271 * structure will be freed after the driver is detached. However when we 272 * finally free those rx buffers released by the upper layer, we need to 273 * refer to the dev_info to free the dma buffers. So we save a copy of 274 * the dev_info for this purpose. On x86 platform, we assume this copy 275 * of dev_info is always valid, but on SPARC platform, it could be invalid 276 * after the system board level DR operation. For this reason, the global 277 * variable e1000g_force_detach must be B_FALSE on SPARC platform. 278 */ 279 #ifdef __sparc 280 boolean_t e1000g_force_detach = B_FALSE; 281 #else 282 boolean_t e1000g_force_detach = B_TRUE; 283 #endif 284 private_devi_list_t *e1000g_private_devi_list = NULL; 285 286 /* 287 * The mutex e1000g_rx_detach_lock is defined to protect the processing of 288 * the private dev_info list, and to serialize the processing of rx buffer 289 * freeing and rx buffer recycling. 290 */ 291 kmutex_t e1000g_rx_detach_lock; 292 /* 293 * The rwlock e1000g_dma_type_lock is defined to protect the global flag 294 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA". 295 * If there are many e1000g instances, the system may run out of DVMA 296 * resources during the initialization of the instances, then the flag will 297 * be changed to "USE_DMA". Because different e1000g instances are initialized 298 * in parallel, we need to use this lock to protect the flag. 299 */ 300 krwlock_t e1000g_dma_type_lock; 301 302 /* 303 * The 82546 chipset is a dual-port device, both the ports share one eeprom. 304 * Based on the information from Intel, the 82546 chipset has some hardware 305 * problem. When one port is being reset and the other port is trying to 306 * access the eeprom, it could cause system hang or panic. To workaround this 307 * hardware problem, we use a global mutex to prevent such operations from 308 * happening simultaneously on different instances. This workaround is applied 309 * to all the devices supported by this driver. 310 */ 311 kmutex_t e1000g_nvm_lock; 312 313 /* 314 * Loadable module configuration entry points for the driver 315 */ 316 317 /* 318 * _init - module initialization 319 */ 320 int 321 _init(void) 322 { 323 int status; 324 325 mac_init_ops(&ws_ops, WSNAME); 326 status = mod_install(&modlinkage); 327 if (status != DDI_SUCCESS) 328 mac_fini_ops(&ws_ops); 329 else { 330 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL); 331 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL); 332 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL); 333 } 334 335 return (status); 336 } 337 338 /* 339 * _fini - module finalization 340 */ 341 int 342 _fini(void) 343 { 344 int status; 345 346 if (e1000g_mblks_pending != 0) 347 return (EBUSY); 348 349 status = mod_remove(&modlinkage); 350 if (status == DDI_SUCCESS) { 351 mac_fini_ops(&ws_ops); 352 353 if (e1000g_force_detach) { 354 private_devi_list_t *devi_node; 355 356 mutex_enter(&e1000g_rx_detach_lock); 357 while (e1000g_private_devi_list != NULL) { 358 devi_node = e1000g_private_devi_list; 359 e1000g_private_devi_list = 360 e1000g_private_devi_list->next; 361 362 kmem_free(devi_node->priv_dip, 363 sizeof (struct dev_info)); 364 kmem_free(devi_node, 365 sizeof (private_devi_list_t)); 366 } 367 mutex_exit(&e1000g_rx_detach_lock); 368 } 369 370 mutex_destroy(&e1000g_rx_detach_lock); 371 rw_destroy(&e1000g_dma_type_lock); 372 mutex_destroy(&e1000g_nvm_lock); 373 } 374 375 return (status); 376 } 377 378 /* 379 * _info - module information 380 */ 381 int 382 _info(struct modinfo *modinfop) 383 { 384 return (mod_info(&modlinkage, modinfop)); 385 } 386 387 /* 388 * e1000g_attach - driver attach 389 * 390 * This function is the device-specific initialization entry 391 * point. This entry point is required and must be written. 392 * The DDI_ATTACH command must be provided in the attach entry 393 * point. When attach() is called with cmd set to DDI_ATTACH, 394 * all normal kernel services (such as kmem_alloc(9F)) are 395 * available for use by the driver. 396 * 397 * The attach() function will be called once for each instance 398 * of the device on the system with cmd set to DDI_ATTACH. 399 * Until attach() succeeds, the only driver entry points which 400 * may be called are open(9E) and getinfo(9E). 401 */ 402 static int 403 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 404 { 405 struct e1000g *Adapter; 406 struct e1000_hw *hw; 407 struct e1000g_osdep *osdep; 408 int instance; 409 410 switch (cmd) { 411 default: 412 e1000g_log(NULL, CE_WARN, 413 "Unsupported command send to e1000g_attach... "); 414 return (DDI_FAILURE); 415 416 case DDI_RESUME: 417 return (e1000g_resume(devinfo)); 418 419 case DDI_ATTACH: 420 break; 421 } 422 423 /* 424 * get device instance number 425 */ 426 instance = ddi_get_instance(devinfo); 427 428 /* 429 * Allocate soft data structure 430 */ 431 Adapter = 432 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP); 433 434 Adapter->dip = devinfo; 435 Adapter->instance = instance; 436 Adapter->tx_ring->adapter = Adapter; 437 Adapter->rx_ring->adapter = Adapter; 438 439 hw = &Adapter->shared; 440 osdep = &Adapter->osdep; 441 hw->back = osdep; 442 osdep->adapter = Adapter; 443 444 ddi_set_driver_private(devinfo, (caddr_t)Adapter); 445 446 /* 447 * Initialize for fma support 448 */ 449 (void) e1000g_get_prop(Adapter, "fm-capable", 450 0, 0x0f, 451 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 452 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE, 453 &Adapter->fm_capabilities); 454 e1000g_fm_init(Adapter); 455 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT; 456 457 /* 458 * PCI Configure 459 */ 460 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 461 e1000g_log(Adapter, CE_WARN, "PCI configuration failed"); 462 goto attach_fail; 463 } 464 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 465 466 /* 467 * Setup hardware 468 */ 469 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) { 470 e1000g_log(Adapter, CE_WARN, "Identify hardware failed"); 471 goto attach_fail; 472 } 473 474 /* 475 * Map in the device registers. 476 */ 477 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) { 478 e1000g_log(Adapter, CE_WARN, "Mapping registers failed"); 479 goto attach_fail; 480 } 481 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 482 483 /* 484 * Initialize driver parameters 485 */ 486 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) { 487 goto attach_fail; 488 } 489 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP; 490 491 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 492 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 493 goto attach_fail; 494 } 495 496 /* 497 * Initialize interrupts 498 */ 499 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { 500 e1000g_log(Adapter, CE_WARN, "Add interrupts failed"); 501 goto attach_fail; 502 } 503 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 504 505 /* 506 * Initialize mutex's for this device. 507 * Do this before enabling the interrupt handler and 508 * register the softint to avoid the condition where 509 * interrupt handler can try using uninitialized mutex 510 */ 511 e1000g_init_locks(Adapter); 512 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS; 513 514 /* 515 * Initialize Driver Counters 516 */ 517 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) { 518 e1000g_log(Adapter, CE_WARN, "Init stats failed"); 519 goto attach_fail; 520 } 521 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS; 522 523 /* 524 * Initialize chip hardware and software structures 525 */ 526 rw_enter(&Adapter->chip_lock, RW_WRITER); 527 if (e1000g_init(Adapter) != DDI_SUCCESS) { 528 rw_exit(&Adapter->chip_lock); 529 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed"); 530 goto attach_fail; 531 } 532 rw_exit(&Adapter->chip_lock); 533 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 534 535 /* 536 * Register the driver to the MAC 537 */ 538 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) { 539 e1000g_log(Adapter, CE_WARN, "Register MAC failed"); 540 goto attach_fail; 541 } 542 Adapter->attach_progress |= ATTACH_PROGRESS_MAC; 543 544 /* 545 * Now that mutex locks are initialized, and the chip is also 546 * initialized, enable interrupts. 547 */ 548 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) { 549 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed"); 550 goto attach_fail; 551 } 552 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 553 554 /* 555 * If e1000g_force_detach is enabled, in global private dip list, 556 * we will create a new entry, which maintains the priv_dip for DR 557 * supports after driver detached. 558 */ 559 if (e1000g_force_detach) { 560 private_devi_list_t *devi_node; 561 562 Adapter->priv_dip = 563 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP); 564 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip), 565 sizeof (struct dev_info)); 566 567 devi_node = 568 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP); 569 570 mutex_enter(&e1000g_rx_detach_lock); 571 devi_node->priv_dip = Adapter->priv_dip; 572 devi_node->flag = E1000G_PRIV_DEVI_ATTACH; 573 devi_node->pending_rx_count = 0; 574 575 Adapter->priv_devi_node = devi_node; 576 577 if (e1000g_private_devi_list == NULL) { 578 devi_node->prev = NULL; 579 devi_node->next = NULL; 580 e1000g_private_devi_list = devi_node; 581 } else { 582 devi_node->prev = NULL; 583 devi_node->next = e1000g_private_devi_list; 584 e1000g_private_devi_list->prev = devi_node; 585 e1000g_private_devi_list = devi_node; 586 } 587 mutex_exit(&e1000g_rx_detach_lock); 588 } 589 590 Adapter->e1000g_state = E1000G_INITIALIZED; 591 return (DDI_SUCCESS); 592 593 attach_fail: 594 e1000g_unattach(devinfo, Adapter); 595 return (DDI_FAILURE); 596 } 597 598 static int 599 e1000g_register_mac(struct e1000g *Adapter) 600 { 601 struct e1000_hw *hw = &Adapter->shared; 602 mac_register_t *mac; 603 int err; 604 605 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 606 return (DDI_FAILURE); 607 608 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 609 mac->m_driver = Adapter; 610 mac->m_dip = Adapter->dip; 611 mac->m_src_addr = hw->mac.addr; 612 mac->m_callbacks = &e1000g_m_callbacks; 613 mac->m_min_sdu = 0; 614 mac->m_max_sdu = Adapter->default_mtu; 615 mac->m_margin = VLAN_TAGSZ; 616 mac->m_priv_props = e1000g_priv_props; 617 mac->m_v12n = MAC_VIRT_LEVEL1; 618 619 err = mac_register(mac, &Adapter->mh); 620 mac_free(mac); 621 622 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE); 623 } 624 625 static int 626 e1000g_identify_hardware(struct e1000g *Adapter) 627 { 628 struct e1000_hw *hw = &Adapter->shared; 629 struct e1000g_osdep *osdep = &Adapter->osdep; 630 631 /* Get the device id */ 632 hw->vendor_id = 633 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 634 hw->device_id = 635 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 636 hw->revision_id = 637 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 638 hw->subsystem_device_id = 639 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 640 hw->subsystem_vendor_id = 641 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 642 643 if (e1000_set_mac_type(hw) != E1000_SUCCESS) { 644 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 645 "MAC type could not be set properly."); 646 return (DDI_FAILURE); 647 } 648 649 return (DDI_SUCCESS); 650 } 651 652 static int 653 e1000g_regs_map(struct e1000g *Adapter) 654 { 655 dev_info_t *devinfo = Adapter->dip; 656 struct e1000_hw *hw = &Adapter->shared; 657 struct e1000g_osdep *osdep = &Adapter->osdep; 658 off_t mem_size; 659 bar_info_t bar_info; 660 int offset, rnumber; 661 662 rnumber = ADAPTER_REG_SET; 663 /* Get size of adapter register memory */ 664 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) != 665 DDI_SUCCESS) { 666 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 667 "ddi_dev_regsize for registers failed"); 668 return (DDI_FAILURE); 669 } 670 671 /* Map adapter register memory */ 672 if ((ddi_regs_map_setup(devinfo, rnumber, 673 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr, 674 &osdep->reg_handle)) != DDI_SUCCESS) { 675 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 676 "ddi_regs_map_setup for registers failed"); 677 goto regs_map_fail; 678 } 679 680 /* ICH needs to map flash memory */ 681 switch (hw->mac.type) { 682 case e1000_ich8lan: 683 case e1000_ich9lan: 684 case e1000_ich10lan: 685 case e1000_pchlan: 686 case e1000_pch2lan: 687 case e1000_pch_lpt: 688 rnumber = ICH_FLASH_REG_SET; 689 690 /* get flash size */ 691 if (ddi_dev_regsize(devinfo, rnumber, 692 &mem_size) != DDI_SUCCESS) { 693 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 694 "ddi_dev_regsize for ICH flash failed"); 695 goto regs_map_fail; 696 } 697 698 /* map flash in */ 699 if (ddi_regs_map_setup(devinfo, rnumber, 700 (caddr_t *)&hw->flash_address, 0, 701 mem_size, &e1000g_regs_acc_attr, 702 &osdep->ich_flash_handle) != DDI_SUCCESS) { 703 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 704 "ddi_regs_map_setup for ICH flash failed"); 705 goto regs_map_fail; 706 } 707 break; 708 default: 709 break; 710 } 711 712 /* map io space */ 713 switch (hw->mac.type) { 714 case e1000_82544: 715 case e1000_82540: 716 case e1000_82545: 717 case e1000_82546: 718 case e1000_82541: 719 case e1000_82541_rev_2: 720 /* find the IO bar */ 721 rnumber = -1; 722 for (offset = PCI_CONF_BASE1; 723 offset <= PCI_CONF_BASE5; offset += 4) { 724 if (e1000g_get_bar_info(devinfo, offset, &bar_info) 725 != DDI_SUCCESS) 726 continue; 727 if (bar_info.type == E1000G_BAR_IO) { 728 rnumber = bar_info.rnumber; 729 break; 730 } 731 } 732 733 if (rnumber < 0) { 734 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 735 "No io space is found"); 736 goto regs_map_fail; 737 } 738 739 /* get io space size */ 740 if (ddi_dev_regsize(devinfo, rnumber, 741 &mem_size) != DDI_SUCCESS) { 742 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 743 "ddi_dev_regsize for io space failed"); 744 goto regs_map_fail; 745 } 746 747 /* map io space */ 748 if ((ddi_regs_map_setup(devinfo, rnumber, 749 (caddr_t *)&hw->io_base, 0, mem_size, 750 &e1000g_regs_acc_attr, 751 &osdep->io_reg_handle)) != DDI_SUCCESS) { 752 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 753 "ddi_regs_map_setup for io space failed"); 754 goto regs_map_fail; 755 } 756 break; 757 default: 758 hw->io_base = 0; 759 break; 760 } 761 762 return (DDI_SUCCESS); 763 764 regs_map_fail: 765 if (osdep->reg_handle != NULL) 766 ddi_regs_map_free(&osdep->reg_handle); 767 if (osdep->ich_flash_handle != NULL) 768 ddi_regs_map_free(&osdep->ich_flash_handle); 769 return (DDI_FAILURE); 770 } 771 772 static int 773 e1000g_set_driver_params(struct e1000g *Adapter) 774 { 775 struct e1000_hw *hw; 776 777 hw = &Adapter->shared; 778 779 /* Set MAC type and initialize hardware functions */ 780 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { 781 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 782 "Could not setup hardware functions"); 783 return (DDI_FAILURE); 784 } 785 786 /* Get bus information */ 787 if (e1000_get_bus_info(hw) != E1000_SUCCESS) { 788 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 789 "Could not get bus information"); 790 return (DDI_FAILURE); 791 } 792 793 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word); 794 795 hw->mac.autoneg_failed = B_TRUE; 796 797 /* Set the autoneg_wait_to_complete flag to B_FALSE */ 798 hw->phy.autoneg_wait_to_complete = B_FALSE; 799 800 /* Adaptive IFS related changes */ 801 hw->mac.adaptive_ifs = B_TRUE; 802 803 /* Enable phy init script for IGP phy of 82541/82547 */ 804 if ((hw->mac.type == e1000_82547) || 805 (hw->mac.type == e1000_82541) || 806 (hw->mac.type == e1000_82547_rev_2) || 807 (hw->mac.type == e1000_82541_rev_2)) 808 e1000_init_script_state_82541(hw, B_TRUE); 809 810 /* Enable the TTL workaround for 82541/82547 */ 811 e1000_set_ttl_workaround_state_82541(hw, B_TRUE); 812 813 #ifdef __sparc 814 Adapter->strip_crc = B_TRUE; 815 #else 816 Adapter->strip_crc = B_FALSE; 817 #endif 818 819 /* setup the maximum MTU size of the chip */ 820 e1000g_setup_max_mtu(Adapter); 821 822 /* Get speed/duplex settings in conf file */ 823 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; 824 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 825 e1000g_force_speed_duplex(Adapter); 826 827 /* Get Jumbo Frames settings in conf file */ 828 e1000g_get_max_frame_size(Adapter); 829 830 /* Get conf file properties */ 831 e1000g_get_conf(Adapter); 832 833 /* enforce PCH limits */ 834 e1000g_pch_limits(Adapter); 835 836 /* Set Rx/Tx buffer size */ 837 e1000g_set_bufsize(Adapter); 838 839 /* Master Latency Timer */ 840 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER; 841 842 /* copper options */ 843 if (hw->phy.media_type == e1000_media_type_copper) { 844 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 845 hw->phy.disable_polarity_correction = B_FALSE; 846 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ 847 } 848 849 /* The initial link state should be "unknown" */ 850 Adapter->link_state = LINK_STATE_UNKNOWN; 851 852 /* Initialize rx parameters */ 853 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY; 854 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY; 855 856 /* Initialize tx parameters */ 857 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE; 858 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD; 859 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY; 860 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY; 861 862 /* Initialize rx parameters */ 863 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD; 864 865 return (DDI_SUCCESS); 866 } 867 868 static void 869 e1000g_setup_max_mtu(struct e1000g *Adapter) 870 { 871 struct e1000_mac_info *mac = &Adapter->shared.mac; 872 struct e1000_phy_info *phy = &Adapter->shared.phy; 873 874 switch (mac->type) { 875 /* types that do not support jumbo frames */ 876 case e1000_ich8lan: 877 case e1000_82573: 878 case e1000_82583: 879 Adapter->max_mtu = ETHERMTU; 880 break; 881 /* ich9 supports jumbo frames except on one phy type */ 882 case e1000_ich9lan: 883 if (phy->type == e1000_phy_ife) 884 Adapter->max_mtu = ETHERMTU; 885 else 886 Adapter->max_mtu = MAXIMUM_MTU_9K; 887 break; 888 /* pch can do jumbo frames up to 4K */ 889 case e1000_pchlan: 890 Adapter->max_mtu = MAXIMUM_MTU_4K; 891 break; 892 /* pch2 can do jumbo frames up to 9K */ 893 case e1000_pch2lan: 894 case e1000_pch_lpt: 895 Adapter->max_mtu = MAXIMUM_MTU_9K; 896 break; 897 /* types with a special limit */ 898 case e1000_82571: 899 case e1000_82572: 900 case e1000_82574: 901 case e1000_80003es2lan: 902 case e1000_ich10lan: 903 if (e1000g_jumbo_mtu >= ETHERMTU && 904 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) { 905 Adapter->max_mtu = e1000g_jumbo_mtu; 906 } else { 907 Adapter->max_mtu = MAXIMUM_MTU_9K; 908 } 909 break; 910 /* default limit is 16K */ 911 default: 912 Adapter->max_mtu = FRAME_SIZE_UPTO_16K - 913 sizeof (struct ether_vlan_header) - ETHERFCSL; 914 break; 915 } 916 } 917 918 static void 919 e1000g_set_bufsize(struct e1000g *Adapter) 920 { 921 struct e1000_mac_info *mac = &Adapter->shared.mac; 922 uint64_t rx_size; 923 uint64_t tx_size; 924 925 dev_info_t *devinfo = Adapter->dip; 926 #ifdef __sparc 927 ulong_t iommu_pagesize; 928 #endif 929 /* Get the system page size */ 930 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1); 931 932 #ifdef __sparc 933 iommu_pagesize = dvma_pagesize(devinfo); 934 if (iommu_pagesize != 0) { 935 if (Adapter->sys_page_sz == iommu_pagesize) { 936 if (iommu_pagesize > 0x4000) 937 Adapter->sys_page_sz = 0x4000; 938 } else { 939 if (Adapter->sys_page_sz > iommu_pagesize) 940 Adapter->sys_page_sz = iommu_pagesize; 941 } 942 } 943 if (Adapter->lso_enable) { 944 Adapter->dvma_page_num = E1000_LSO_MAXLEN / 945 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 946 } else { 947 Adapter->dvma_page_num = Adapter->max_frame_size / 948 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 949 } 950 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM); 951 #endif 952 953 Adapter->min_frame_size = ETHERMIN + ETHERFCSL; 954 955 if (Adapter->mem_workaround_82546 && 956 ((mac->type == e1000_82545) || 957 (mac->type == e1000_82546) || 958 (mac->type == e1000_82546_rev_3))) { 959 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 960 } else { 961 rx_size = Adapter->max_frame_size; 962 if ((rx_size > FRAME_SIZE_UPTO_2K) && 963 (rx_size <= FRAME_SIZE_UPTO_4K)) 964 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K; 965 else if ((rx_size > FRAME_SIZE_UPTO_4K) && 966 (rx_size <= FRAME_SIZE_UPTO_8K)) 967 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K; 968 else if ((rx_size > FRAME_SIZE_UPTO_8K) && 969 (rx_size <= FRAME_SIZE_UPTO_16K)) 970 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K; 971 else 972 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 973 } 974 Adapter->rx_buffer_size += E1000G_IPALIGNROOM; 975 976 tx_size = Adapter->max_frame_size; 977 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K)) 978 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K; 979 else if ((tx_size > FRAME_SIZE_UPTO_4K) && 980 (tx_size <= FRAME_SIZE_UPTO_8K)) 981 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K; 982 else if ((tx_size > FRAME_SIZE_UPTO_8K) && 983 (tx_size <= FRAME_SIZE_UPTO_16K)) 984 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K; 985 else 986 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K; 987 988 /* 989 * For Wiseman adapters we have an requirement of having receive 990 * buffers aligned at 256 byte boundary. Since Livengood does not 991 * require this and forcing it for all hardwares will have 992 * performance implications, I am making it applicable only for 993 * Wiseman and for Jumbo frames enabled mode as rest of the time, 994 * it is okay to have normal frames...but it does involve a 995 * potential risk where we may loose data if buffer is not 996 * aligned...so all wiseman boards to have 256 byte aligned 997 * buffers 998 */ 999 if (mac->type < e1000_82543) 1000 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE; 1001 else 1002 Adapter->rx_buf_align = 1; 1003 } 1004 1005 /* 1006 * e1000g_detach - driver detach 1007 * 1008 * The detach() function is the complement of the attach routine. 1009 * If cmd is set to DDI_DETACH, detach() is used to remove the 1010 * state associated with a given instance of a device node 1011 * prior to the removal of that instance from the system. 1012 * 1013 * The detach() function will be called once for each instance 1014 * of the device for which there has been a successful attach() 1015 * once there are no longer any opens on the device. 1016 * 1017 * Interrupts routine are disabled, All memory allocated by this 1018 * driver are freed. 1019 */ 1020 static int 1021 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 1022 { 1023 struct e1000g *Adapter; 1024 boolean_t rx_drain; 1025 1026 switch (cmd) { 1027 default: 1028 return (DDI_FAILURE); 1029 1030 case DDI_SUSPEND: 1031 return (e1000g_suspend(devinfo)); 1032 1033 case DDI_DETACH: 1034 break; 1035 } 1036 1037 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1038 if (Adapter == NULL) 1039 return (DDI_FAILURE); 1040 1041 rx_drain = e1000g_rx_drain(Adapter); 1042 if (!rx_drain && !e1000g_force_detach) 1043 return (DDI_FAILURE); 1044 1045 if (mac_unregister(Adapter->mh) != 0) { 1046 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed"); 1047 return (DDI_FAILURE); 1048 } 1049 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC; 1050 1051 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED)); 1052 1053 if (!e1000g_force_detach && !rx_drain) 1054 return (DDI_FAILURE); 1055 1056 e1000g_unattach(devinfo, Adapter); 1057 1058 return (DDI_SUCCESS); 1059 } 1060 1061 /* 1062 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance 1063 */ 1064 void 1065 e1000g_free_priv_devi_node(private_devi_list_t *devi_node) 1066 { 1067 ASSERT(e1000g_private_devi_list != NULL); 1068 ASSERT(devi_node != NULL); 1069 1070 if (devi_node->prev != NULL) 1071 devi_node->prev->next = devi_node->next; 1072 if (devi_node->next != NULL) 1073 devi_node->next->prev = devi_node->prev; 1074 if (devi_node == e1000g_private_devi_list) 1075 e1000g_private_devi_list = devi_node->next; 1076 1077 kmem_free(devi_node->priv_dip, 1078 sizeof (struct dev_info)); 1079 kmem_free(devi_node, 1080 sizeof (private_devi_list_t)); 1081 } 1082 1083 static void 1084 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter) 1085 { 1086 private_devi_list_t *devi_node; 1087 int result; 1088 1089 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1090 (void) e1000g_disable_intrs(Adapter); 1091 } 1092 1093 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) { 1094 (void) mac_unregister(Adapter->mh); 1095 } 1096 1097 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1098 (void) e1000g_rem_intrs(Adapter); 1099 } 1100 1101 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) { 1102 (void) ddi_prop_remove_all(devinfo); 1103 } 1104 1105 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) { 1106 kstat_delete((kstat_t *)Adapter->e1000g_ksp); 1107 } 1108 1109 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) { 1110 stop_link_timer(Adapter); 1111 1112 mutex_enter(&e1000g_nvm_lock); 1113 result = e1000_reset_hw(&Adapter->shared); 1114 mutex_exit(&e1000g_nvm_lock); 1115 1116 if (result != E1000_SUCCESS) { 1117 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1118 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1119 } 1120 } 1121 1122 e1000g_release_multicast(Adapter); 1123 1124 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 1125 if (Adapter->osdep.reg_handle != NULL) 1126 ddi_regs_map_free(&Adapter->osdep.reg_handle); 1127 if (Adapter->osdep.ich_flash_handle != NULL) 1128 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle); 1129 if (Adapter->osdep.io_reg_handle != NULL) 1130 ddi_regs_map_free(&Adapter->osdep.io_reg_handle); 1131 } 1132 1133 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 1134 if (Adapter->osdep.cfg_handle != NULL) 1135 pci_config_teardown(&Adapter->osdep.cfg_handle); 1136 } 1137 1138 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) { 1139 e1000g_destroy_locks(Adapter); 1140 } 1141 1142 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) { 1143 e1000g_fm_fini(Adapter); 1144 } 1145 1146 mutex_enter(&e1000g_rx_detach_lock); 1147 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) { 1148 devi_node = Adapter->priv_devi_node; 1149 devi_node->flag |= E1000G_PRIV_DEVI_DETACH; 1150 1151 if (devi_node->pending_rx_count == 0) { 1152 e1000g_free_priv_devi_node(devi_node); 1153 } 1154 } 1155 mutex_exit(&e1000g_rx_detach_lock); 1156 1157 kmem_free((caddr_t)Adapter, sizeof (struct e1000g)); 1158 1159 /* 1160 * Another hotplug spec requirement, 1161 * run ddi_set_driver_private(devinfo, null); 1162 */ 1163 ddi_set_driver_private(devinfo, NULL); 1164 } 1165 1166 /* 1167 * Get the BAR type and rnumber for a given PCI BAR offset 1168 */ 1169 static int 1170 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info) 1171 { 1172 pci_regspec_t *regs; 1173 uint_t regs_length; 1174 int type, rnumber, rcount; 1175 1176 ASSERT((bar_offset >= PCI_CONF_BASE0) && 1177 (bar_offset <= PCI_CONF_BASE5)); 1178 1179 /* 1180 * Get the DDI "reg" property 1181 */ 1182 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, 1183 DDI_PROP_DONTPASS, "reg", (int **)®s, 1184 ®s_length) != DDI_PROP_SUCCESS) { 1185 return (DDI_FAILURE); 1186 } 1187 1188 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t); 1189 /* 1190 * Check the BAR offset 1191 */ 1192 for (rnumber = 0; rnumber < rcount; ++rnumber) { 1193 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) { 1194 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK; 1195 break; 1196 } 1197 } 1198 1199 ddi_prop_free(regs); 1200 1201 if (rnumber >= rcount) 1202 return (DDI_FAILURE); 1203 1204 switch (type) { 1205 case PCI_ADDR_CONFIG: 1206 bar_info->type = E1000G_BAR_CONFIG; 1207 break; 1208 case PCI_ADDR_IO: 1209 bar_info->type = E1000G_BAR_IO; 1210 break; 1211 case PCI_ADDR_MEM32: 1212 bar_info->type = E1000G_BAR_MEM32; 1213 break; 1214 case PCI_ADDR_MEM64: 1215 bar_info->type = E1000G_BAR_MEM64; 1216 break; 1217 default: 1218 return (DDI_FAILURE); 1219 } 1220 bar_info->rnumber = rnumber; 1221 return (DDI_SUCCESS); 1222 } 1223 1224 static void 1225 e1000g_init_locks(struct e1000g *Adapter) 1226 { 1227 e1000g_tx_ring_t *tx_ring; 1228 e1000g_rx_ring_t *rx_ring; 1229 1230 rw_init(&Adapter->chip_lock, NULL, 1231 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1232 mutex_init(&Adapter->link_lock, NULL, 1233 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1234 mutex_init(&Adapter->watchdog_lock, NULL, 1235 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1236 1237 tx_ring = Adapter->tx_ring; 1238 1239 mutex_init(&tx_ring->tx_lock, NULL, 1240 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1241 mutex_init(&tx_ring->usedlist_lock, NULL, 1242 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1243 mutex_init(&tx_ring->freelist_lock, NULL, 1244 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1245 1246 rx_ring = Adapter->rx_ring; 1247 1248 mutex_init(&rx_ring->rx_lock, NULL, 1249 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1250 } 1251 1252 static void 1253 e1000g_destroy_locks(struct e1000g *Adapter) 1254 { 1255 e1000g_tx_ring_t *tx_ring; 1256 e1000g_rx_ring_t *rx_ring; 1257 1258 tx_ring = Adapter->tx_ring; 1259 mutex_destroy(&tx_ring->tx_lock); 1260 mutex_destroy(&tx_ring->usedlist_lock); 1261 mutex_destroy(&tx_ring->freelist_lock); 1262 1263 rx_ring = Adapter->rx_ring; 1264 mutex_destroy(&rx_ring->rx_lock); 1265 1266 mutex_destroy(&Adapter->link_lock); 1267 mutex_destroy(&Adapter->watchdog_lock); 1268 rw_destroy(&Adapter->chip_lock); 1269 1270 /* destory mutex initialized in shared code */ 1271 e1000_destroy_hw_mutex(&Adapter->shared); 1272 } 1273 1274 static int 1275 e1000g_resume(dev_info_t *devinfo) 1276 { 1277 struct e1000g *Adapter; 1278 1279 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1280 if (Adapter == NULL) 1281 e1000g_log(Adapter, CE_PANIC, 1282 "Instance pointer is null\n"); 1283 1284 if (Adapter->dip != devinfo) 1285 e1000g_log(Adapter, CE_PANIC, 1286 "Devinfo is not the same as saved devinfo\n"); 1287 1288 rw_enter(&Adapter->chip_lock, RW_WRITER); 1289 1290 if (Adapter->e1000g_state & E1000G_STARTED) { 1291 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 1292 rw_exit(&Adapter->chip_lock); 1293 /* 1294 * We note the failure, but return success, as the 1295 * system is still usable without this controller. 1296 */ 1297 e1000g_log(Adapter, CE_WARN, 1298 "e1000g_resume: failed to restart controller\n"); 1299 return (DDI_SUCCESS); 1300 } 1301 /* Enable and start the watchdog timer */ 1302 enable_watchdog_timer(Adapter); 1303 } 1304 1305 Adapter->e1000g_state &= ~E1000G_SUSPENDED; 1306 1307 rw_exit(&Adapter->chip_lock); 1308 1309 return (DDI_SUCCESS); 1310 } 1311 1312 static int 1313 e1000g_suspend(dev_info_t *devinfo) 1314 { 1315 struct e1000g *Adapter; 1316 1317 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1318 if (Adapter == NULL) 1319 return (DDI_FAILURE); 1320 1321 rw_enter(&Adapter->chip_lock, RW_WRITER); 1322 1323 Adapter->e1000g_state |= E1000G_SUSPENDED; 1324 1325 /* if the port isn't plumbed, we can simply return */ 1326 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 1327 rw_exit(&Adapter->chip_lock); 1328 return (DDI_SUCCESS); 1329 } 1330 1331 e1000g_stop(Adapter, B_FALSE); 1332 1333 rw_exit(&Adapter->chip_lock); 1334 1335 /* Disable and stop all the timers */ 1336 disable_watchdog_timer(Adapter); 1337 stop_link_timer(Adapter); 1338 stop_82547_timer(Adapter->tx_ring); 1339 1340 return (DDI_SUCCESS); 1341 } 1342 1343 static int 1344 e1000g_init(struct e1000g *Adapter) 1345 { 1346 uint32_t pba; 1347 uint32_t high_water; 1348 struct e1000_hw *hw; 1349 clock_t link_timeout; 1350 int result; 1351 1352 hw = &Adapter->shared; 1353 1354 /* 1355 * reset to put the hardware in a known state 1356 * before we try to do anything with the eeprom 1357 */ 1358 mutex_enter(&e1000g_nvm_lock); 1359 result = e1000_reset_hw(hw); 1360 mutex_exit(&e1000g_nvm_lock); 1361 1362 if (result != E1000_SUCCESS) { 1363 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1364 goto init_fail; 1365 } 1366 1367 mutex_enter(&e1000g_nvm_lock); 1368 result = e1000_validate_nvm_checksum(hw); 1369 if (result < E1000_SUCCESS) { 1370 /* 1371 * Some PCI-E parts fail the first check due to 1372 * the link being in sleep state. Call it again, 1373 * if it fails a second time its a real issue. 1374 */ 1375 result = e1000_validate_nvm_checksum(hw); 1376 } 1377 mutex_exit(&e1000g_nvm_lock); 1378 1379 if (result < E1000_SUCCESS) { 1380 e1000g_log(Adapter, CE_WARN, 1381 "Invalid NVM checksum. Please contact " 1382 "the vendor to update the NVM."); 1383 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1384 goto init_fail; 1385 } 1386 1387 result = 0; 1388 #ifdef __sparc 1389 /* 1390 * First, we try to get the local ethernet address from OBP. If 1391 * failed, then we get it from the EEPROM of NIC card. 1392 */ 1393 result = e1000g_find_mac_address(Adapter); 1394 #endif 1395 /* Get the local ethernet address. */ 1396 if (!result) { 1397 mutex_enter(&e1000g_nvm_lock); 1398 result = e1000_read_mac_addr(hw); 1399 mutex_exit(&e1000g_nvm_lock); 1400 } 1401 1402 if (result < E1000_SUCCESS) { 1403 e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); 1404 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1405 goto init_fail; 1406 } 1407 1408 /* check for valid mac address */ 1409 if (!is_valid_mac_addr(hw->mac.addr)) { 1410 e1000g_log(Adapter, CE_WARN, "Invalid mac addr"); 1411 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1412 goto init_fail; 1413 } 1414 1415 /* Set LAA state for 82571 chipset */ 1416 e1000_set_laa_state_82571(hw, B_TRUE); 1417 1418 /* Master Latency Timer implementation */ 1419 if (Adapter->master_latency_timer) { 1420 pci_config_put8(Adapter->osdep.cfg_handle, 1421 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer); 1422 } 1423 1424 if (hw->mac.type < e1000_82547) { 1425 /* 1426 * Total FIFO is 64K 1427 */ 1428 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1429 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1430 else 1431 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1432 } else if ((hw->mac.type == e1000_82571) || 1433 (hw->mac.type == e1000_82572) || 1434 (hw->mac.type == e1000_80003es2lan)) { 1435 /* 1436 * Total FIFO is 48K 1437 */ 1438 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1439 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */ 1440 else 1441 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */ 1442 } else if (hw->mac.type == e1000_82573) { 1443 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */ 1444 } else if (hw->mac.type == e1000_82574) { 1445 /* Keep adapter default: 20K for Rx, 20K for Tx */ 1446 pba = E1000_READ_REG(hw, E1000_PBA); 1447 } else if (hw->mac.type == e1000_ich8lan) { 1448 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */ 1449 } else if (hw->mac.type == e1000_ich9lan) { 1450 pba = E1000_PBA_10K; 1451 } else if (hw->mac.type == e1000_ich10lan) { 1452 pba = E1000_PBA_10K; 1453 } else if (hw->mac.type == e1000_pchlan) { 1454 pba = E1000_PBA_26K; 1455 } else if (hw->mac.type == e1000_pch2lan) { 1456 pba = E1000_PBA_26K; 1457 } else if (hw->mac.type == e1000_pch_lpt) { 1458 pba = E1000_PBA_26K; 1459 } else { 1460 /* 1461 * Total FIFO is 40K 1462 */ 1463 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K) 1464 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1465 else 1466 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1467 } 1468 E1000_WRITE_REG(hw, E1000_PBA, pba); 1469 1470 /* 1471 * These parameters set thresholds for the adapter's generation(Tx) 1472 * and response(Rx) to Ethernet PAUSE frames. These are just threshold 1473 * settings. Flow control is enabled or disabled in the configuration 1474 * file. 1475 * High-water mark is set down from the top of the rx fifo (not 1476 * sensitive to max_frame_size) and low-water is set just below 1477 * high-water mark. 1478 * The high water mark must be low enough to fit one full frame above 1479 * it in the rx FIFO. Should be the lower of: 1480 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early 1481 * receive size (assuming ERT set to E1000_ERT_2048), or the full 1482 * Rx FIFO size minus one full frame. 1483 */ 1484 high_water = min(((pba << 10) * 9 / 10), 1485 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 || 1486 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ? 1487 ((pba << 10) - (E1000_ERT_2048 << 3)) : 1488 ((pba << 10) - Adapter->max_frame_size))); 1489 1490 hw->fc.high_water = high_water & 0xFFF8; 1491 hw->fc.low_water = hw->fc.high_water - 8; 1492 1493 if (hw->mac.type == e1000_80003es2lan) 1494 hw->fc.pause_time = 0xFFFF; 1495 else 1496 hw->fc.pause_time = E1000_FC_PAUSE_TIME; 1497 hw->fc.send_xon = B_TRUE; 1498 1499 /* 1500 * Reset the adapter hardware the second time. 1501 */ 1502 mutex_enter(&e1000g_nvm_lock); 1503 result = e1000_reset_hw(hw); 1504 mutex_exit(&e1000g_nvm_lock); 1505 1506 if (result != E1000_SUCCESS) { 1507 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1508 goto init_fail; 1509 } 1510 1511 /* disable wakeup control by default */ 1512 if (hw->mac.type >= e1000_82544) 1513 E1000_WRITE_REG(hw, E1000_WUC, 0); 1514 1515 /* 1516 * MWI should be disabled on 82546. 1517 */ 1518 if (hw->mac.type == e1000_82546) 1519 e1000_pci_clear_mwi(hw); 1520 else 1521 e1000_pci_set_mwi(hw); 1522 1523 /* 1524 * Configure/Initialize hardware 1525 */ 1526 mutex_enter(&e1000g_nvm_lock); 1527 result = e1000_init_hw(hw); 1528 mutex_exit(&e1000g_nvm_lock); 1529 1530 if (result < E1000_SUCCESS) { 1531 e1000g_log(Adapter, CE_WARN, "Initialize hw failed"); 1532 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1533 goto init_fail; 1534 } 1535 1536 /* 1537 * Restore LED settings to the default from EEPROM 1538 * to meet the standard for Sun platforms. 1539 */ 1540 (void) e1000_cleanup_led(hw); 1541 1542 /* Disable Smart Power Down */ 1543 phy_spd_state(hw, B_FALSE); 1544 1545 /* Make sure driver has control */ 1546 e1000g_get_driver_control(hw); 1547 1548 /* 1549 * Initialize unicast addresses. 1550 */ 1551 e1000g_init_unicst(Adapter); 1552 1553 /* 1554 * Setup and initialize the mctable structures. After this routine 1555 * completes Multicast table will be set 1556 */ 1557 e1000_update_mc_addr_list(hw, 1558 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 1559 msec_delay(5); 1560 1561 /* 1562 * Implement Adaptive IFS 1563 */ 1564 e1000_reset_adaptive(hw); 1565 1566 /* Setup Interrupt Throttling Register */ 1567 if (hw->mac.type >= e1000_82540) { 1568 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate); 1569 } else 1570 Adapter->intr_adaptive = B_FALSE; 1571 1572 /* Start the timer for link setup */ 1573 if (hw->mac.autoneg) 1574 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000); 1575 else 1576 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); 1577 1578 mutex_enter(&Adapter->link_lock); 1579 if (hw->phy.autoneg_wait_to_complete) { 1580 Adapter->link_complete = B_TRUE; 1581 } else { 1582 Adapter->link_complete = B_FALSE; 1583 Adapter->link_tid = timeout(e1000g_link_timer, 1584 (void *)Adapter, link_timeout); 1585 } 1586 mutex_exit(&Adapter->link_lock); 1587 1588 /* Save the state of the phy */ 1589 e1000g_get_phy_state(Adapter); 1590 1591 e1000g_param_sync(Adapter); 1592 1593 Adapter->init_count++; 1594 1595 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 1596 goto init_fail; 1597 } 1598 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1599 goto init_fail; 1600 } 1601 1602 Adapter->poll_mode = e1000g_poll_mode; 1603 1604 return (DDI_SUCCESS); 1605 1606 init_fail: 1607 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1608 return (DDI_FAILURE); 1609 } 1610 1611 static int 1612 e1000g_alloc_rx_data(struct e1000g *Adapter) 1613 { 1614 e1000g_rx_ring_t *rx_ring; 1615 e1000g_rx_data_t *rx_data; 1616 1617 rx_ring = Adapter->rx_ring; 1618 1619 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP); 1620 1621 if (rx_data == NULL) 1622 return (DDI_FAILURE); 1623 1624 rx_data->priv_devi_node = Adapter->priv_devi_node; 1625 rx_data->rx_ring = rx_ring; 1626 1627 mutex_init(&rx_data->freelist_lock, NULL, 1628 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1629 mutex_init(&rx_data->recycle_lock, NULL, 1630 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1631 1632 rx_ring->rx_data = rx_data; 1633 1634 return (DDI_SUCCESS); 1635 } 1636 1637 void 1638 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data) 1639 { 1640 rx_sw_packet_t *packet, *next_packet; 1641 1642 if (rx_data == NULL) 1643 return; 1644 1645 packet = rx_data->packet_area; 1646 while (packet != NULL) { 1647 next_packet = packet->next; 1648 e1000g_free_rx_sw_packet(packet, B_TRUE); 1649 packet = next_packet; 1650 } 1651 rx_data->packet_area = NULL; 1652 } 1653 1654 void 1655 e1000g_free_rx_data(e1000g_rx_data_t *rx_data) 1656 { 1657 if (rx_data == NULL) 1658 return; 1659 1660 mutex_destroy(&rx_data->freelist_lock); 1661 mutex_destroy(&rx_data->recycle_lock); 1662 1663 kmem_free(rx_data, sizeof (e1000g_rx_data_t)); 1664 } 1665 1666 /* 1667 * Check if the link is up 1668 */ 1669 static boolean_t 1670 e1000g_link_up(struct e1000g *Adapter) 1671 { 1672 struct e1000_hw *hw = &Adapter->shared; 1673 boolean_t link_up = B_FALSE; 1674 1675 /* 1676 * get_link_status is set in the interrupt handler on link-status-change 1677 * or rx sequence error interrupt. get_link_status will stay 1678 * false until the e1000_check_for_link establishes link only 1679 * for copper adapters. 1680 */ 1681 switch (hw->phy.media_type) { 1682 case e1000_media_type_copper: 1683 if (hw->mac.get_link_status) { 1684 (void) e1000_check_for_link(hw); 1685 if ((E1000_READ_REG(hw, E1000_STATUS) & 1686 E1000_STATUS_LU)) { 1687 link_up = B_TRUE; 1688 } else { 1689 link_up = !hw->mac.get_link_status; 1690 } 1691 } else { 1692 link_up = B_TRUE; 1693 } 1694 break; 1695 case e1000_media_type_fiber: 1696 (void) e1000_check_for_link(hw); 1697 link_up = (E1000_READ_REG(hw, E1000_STATUS) & 1698 E1000_STATUS_LU); 1699 break; 1700 case e1000_media_type_internal_serdes: 1701 (void) e1000_check_for_link(hw); 1702 link_up = hw->mac.serdes_has_link; 1703 break; 1704 } 1705 1706 return (link_up); 1707 } 1708 1709 static void 1710 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 1711 { 1712 struct iocblk *iocp; 1713 struct e1000g *e1000gp; 1714 enum ioc_reply status; 1715 1716 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr; 1717 iocp->ioc_error = 0; 1718 e1000gp = (struct e1000g *)arg; 1719 1720 ASSERT(e1000gp); 1721 if (e1000gp == NULL) { 1722 miocnak(q, mp, 0, EINVAL); 1723 return; 1724 } 1725 1726 rw_enter(&e1000gp->chip_lock, RW_READER); 1727 if (e1000gp->e1000g_state & E1000G_SUSPENDED) { 1728 rw_exit(&e1000gp->chip_lock); 1729 miocnak(q, mp, 0, EINVAL); 1730 return; 1731 } 1732 rw_exit(&e1000gp->chip_lock); 1733 1734 switch (iocp->ioc_cmd) { 1735 1736 case LB_GET_INFO_SIZE: 1737 case LB_GET_INFO: 1738 case LB_GET_MODE: 1739 case LB_SET_MODE: 1740 status = e1000g_loopback_ioctl(e1000gp, iocp, mp); 1741 break; 1742 1743 1744 #ifdef E1000G_DEBUG 1745 case E1000G_IOC_REG_PEEK: 1746 case E1000G_IOC_REG_POKE: 1747 status = e1000g_pp_ioctl(e1000gp, iocp, mp); 1748 break; 1749 case E1000G_IOC_CHIP_RESET: 1750 e1000gp->reset_count++; 1751 if (e1000g_reset_adapter(e1000gp)) 1752 status = IOC_ACK; 1753 else 1754 status = IOC_INVAL; 1755 break; 1756 #endif 1757 default: 1758 status = IOC_INVAL; 1759 break; 1760 } 1761 1762 /* 1763 * Decide how to reply 1764 */ 1765 switch (status) { 1766 default: 1767 case IOC_INVAL: 1768 /* 1769 * Error, reply with a NAK and EINVAL or the specified error 1770 */ 1771 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 1772 EINVAL : iocp->ioc_error); 1773 break; 1774 1775 case IOC_DONE: 1776 /* 1777 * OK, reply already sent 1778 */ 1779 break; 1780 1781 case IOC_ACK: 1782 /* 1783 * OK, reply with an ACK 1784 */ 1785 miocack(q, mp, 0, 0); 1786 break; 1787 1788 case IOC_REPLY: 1789 /* 1790 * OK, send prepared reply as ACK or NAK 1791 */ 1792 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1793 M_IOCACK : M_IOCNAK; 1794 qreply(q, mp); 1795 break; 1796 } 1797 } 1798 1799 /* 1800 * The default value of e1000g_poll_mode == 0 assumes that the NIC is 1801 * capable of supporting only one interrupt and we shouldn't disable 1802 * the physical interrupt. In this case we let the interrupt come and 1803 * we queue the packets in the rx ring itself in case we are in polling 1804 * mode (better latency but slightly lower performance and a very 1805 * high intrrupt count in mpstat which is harmless). 1806 * 1807 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt 1808 * which can be disabled in poll mode. This gives better overall 1809 * throughput (compared to the mode above), shows very low interrupt 1810 * count but has slightly higher latency since we pick the packets when 1811 * the poll thread does polling. 1812 * 1813 * Currently, this flag should be enabled only while doing performance 1814 * measurement or when it can be guaranteed that entire NIC going 1815 * in poll mode will not harm any traffic like cluster heartbeat etc. 1816 */ 1817 int e1000g_poll_mode = 0; 1818 1819 /* 1820 * Called from the upper layers when driver is in polling mode to 1821 * pick up any queued packets. Care should be taken to not block 1822 * this thread. 1823 */ 1824 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup) 1825 { 1826 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg; 1827 mblk_t *mp = NULL; 1828 mblk_t *tail; 1829 struct e1000g *adapter; 1830 1831 adapter = rx_ring->adapter; 1832 1833 rw_enter(&adapter->chip_lock, RW_READER); 1834 1835 if (adapter->e1000g_state & E1000G_SUSPENDED) { 1836 rw_exit(&adapter->chip_lock); 1837 return (NULL); 1838 } 1839 1840 mutex_enter(&rx_ring->rx_lock); 1841 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup); 1842 mutex_exit(&rx_ring->rx_lock); 1843 rw_exit(&adapter->chip_lock); 1844 return (mp); 1845 } 1846 1847 static int 1848 e1000g_m_start(void *arg) 1849 { 1850 struct e1000g *Adapter = (struct e1000g *)arg; 1851 1852 rw_enter(&Adapter->chip_lock, RW_WRITER); 1853 1854 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 1855 rw_exit(&Adapter->chip_lock); 1856 return (ECANCELED); 1857 } 1858 1859 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 1860 rw_exit(&Adapter->chip_lock); 1861 return (ENOTACTIVE); 1862 } 1863 1864 Adapter->e1000g_state |= E1000G_STARTED; 1865 1866 rw_exit(&Adapter->chip_lock); 1867 1868 /* Enable and start the watchdog timer */ 1869 enable_watchdog_timer(Adapter); 1870 1871 return (0); 1872 } 1873 1874 static int 1875 e1000g_start(struct e1000g *Adapter, boolean_t global) 1876 { 1877 e1000g_rx_data_t *rx_data; 1878 1879 if (global) { 1880 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) { 1881 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed"); 1882 goto start_fail; 1883 } 1884 1885 /* Allocate dma resources for descriptors and buffers */ 1886 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) { 1887 e1000g_log(Adapter, CE_WARN, 1888 "Alloc DMA resources failed"); 1889 goto start_fail; 1890 } 1891 Adapter->rx_buffer_setup = B_FALSE; 1892 } 1893 1894 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) { 1895 if (e1000g_init(Adapter) != DDI_SUCCESS) { 1896 e1000g_log(Adapter, CE_WARN, 1897 "Adapter initialization failed"); 1898 goto start_fail; 1899 } 1900 } 1901 1902 /* Setup and initialize the transmit structures */ 1903 e1000g_tx_setup(Adapter); 1904 msec_delay(5); 1905 1906 /* Setup and initialize the receive structures */ 1907 e1000g_rx_setup(Adapter); 1908 msec_delay(5); 1909 1910 /* Restore the e1000g promiscuous mode */ 1911 e1000g_restore_promisc(Adapter); 1912 1913 e1000g_mask_interrupt(Adapter); 1914 1915 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 1916 1917 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1918 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1919 goto start_fail; 1920 } 1921 1922 return (DDI_SUCCESS); 1923 1924 start_fail: 1925 rx_data = Adapter->rx_ring->rx_data; 1926 1927 if (global) { 1928 e1000g_release_dma_resources(Adapter); 1929 e1000g_free_rx_pending_buffers(rx_data); 1930 e1000g_free_rx_data(rx_data); 1931 } 1932 1933 mutex_enter(&e1000g_nvm_lock); 1934 (void) e1000_reset_hw(&Adapter->shared); 1935 mutex_exit(&e1000g_nvm_lock); 1936 1937 return (DDI_FAILURE); 1938 } 1939 1940 static void 1941 e1000g_m_stop(void *arg) 1942 { 1943 struct e1000g *Adapter = (struct e1000g *)arg; 1944 1945 /* Drain tx sessions */ 1946 (void) e1000g_tx_drain(Adapter); 1947 1948 rw_enter(&Adapter->chip_lock, RW_WRITER); 1949 1950 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 1951 rw_exit(&Adapter->chip_lock); 1952 return; 1953 } 1954 Adapter->e1000g_state &= ~E1000G_STARTED; 1955 e1000g_stop(Adapter, B_TRUE); 1956 1957 rw_exit(&Adapter->chip_lock); 1958 1959 /* Disable and stop all the timers */ 1960 disable_watchdog_timer(Adapter); 1961 stop_link_timer(Adapter); 1962 stop_82547_timer(Adapter->tx_ring); 1963 } 1964 1965 static void 1966 e1000g_stop(struct e1000g *Adapter, boolean_t global) 1967 { 1968 private_devi_list_t *devi_node; 1969 e1000g_rx_data_t *rx_data; 1970 int result; 1971 1972 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT; 1973 1974 /* Stop the chip and release pending resources */ 1975 1976 /* Tell firmware driver is no longer in control */ 1977 e1000g_release_driver_control(&Adapter->shared); 1978 1979 e1000g_clear_all_interrupts(Adapter); 1980 1981 mutex_enter(&e1000g_nvm_lock); 1982 result = e1000_reset_hw(&Adapter->shared); 1983 mutex_exit(&e1000g_nvm_lock); 1984 1985 if (result != E1000_SUCCESS) { 1986 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1987 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1988 } 1989 1990 mutex_enter(&Adapter->link_lock); 1991 Adapter->link_complete = B_FALSE; 1992 mutex_exit(&Adapter->link_lock); 1993 1994 /* Release resources still held by the TX descriptors */ 1995 e1000g_tx_clean(Adapter); 1996 1997 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 1998 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1999 2000 /* Clean the pending rx jumbo packet fragment */ 2001 e1000g_rx_clean(Adapter); 2002 2003 if (global) { 2004 e1000g_release_dma_resources(Adapter); 2005 2006 mutex_enter(&e1000g_rx_detach_lock); 2007 rx_data = Adapter->rx_ring->rx_data; 2008 rx_data->flag |= E1000G_RX_STOPPED; 2009 2010 if (rx_data->pending_count == 0) { 2011 e1000g_free_rx_pending_buffers(rx_data); 2012 e1000g_free_rx_data(rx_data); 2013 } else { 2014 devi_node = rx_data->priv_devi_node; 2015 if (devi_node != NULL) 2016 atomic_inc_32(&devi_node->pending_rx_count); 2017 else 2018 atomic_inc_32(&Adapter->pending_rx_count); 2019 } 2020 mutex_exit(&e1000g_rx_detach_lock); 2021 } 2022 2023 if (Adapter->link_state != LINK_STATE_UNKNOWN) { 2024 Adapter->link_state = LINK_STATE_UNKNOWN; 2025 if (!Adapter->reset_flag) 2026 mac_link_update(Adapter->mh, Adapter->link_state); 2027 } 2028 } 2029 2030 static void 2031 e1000g_rx_clean(struct e1000g *Adapter) 2032 { 2033 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data; 2034 2035 if (rx_data == NULL) 2036 return; 2037 2038 if (rx_data->rx_mblk != NULL) { 2039 freemsg(rx_data->rx_mblk); 2040 rx_data->rx_mblk = NULL; 2041 rx_data->rx_mblk_tail = NULL; 2042 rx_data->rx_mblk_len = 0; 2043 } 2044 } 2045 2046 static void 2047 e1000g_tx_clean(struct e1000g *Adapter) 2048 { 2049 e1000g_tx_ring_t *tx_ring; 2050 p_tx_sw_packet_t packet; 2051 mblk_t *mp; 2052 mblk_t *nmp; 2053 uint32_t packet_count; 2054 2055 tx_ring = Adapter->tx_ring; 2056 2057 /* 2058 * Here we don't need to protect the lists using 2059 * the usedlist_lock and freelist_lock, for they 2060 * have been protected by the chip_lock. 2061 */ 2062 mp = NULL; 2063 nmp = NULL; 2064 packet_count = 0; 2065 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list); 2066 while (packet != NULL) { 2067 if (packet->mp != NULL) { 2068 /* Assemble the message chain */ 2069 if (mp == NULL) { 2070 mp = packet->mp; 2071 nmp = packet->mp; 2072 } else { 2073 nmp->b_next = packet->mp; 2074 nmp = packet->mp; 2075 } 2076 /* Disconnect the message from the sw packet */ 2077 packet->mp = NULL; 2078 } 2079 2080 e1000g_free_tx_swpkt(packet); 2081 packet_count++; 2082 2083 packet = (p_tx_sw_packet_t) 2084 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link); 2085 } 2086 2087 if (mp != NULL) 2088 freemsgchain(mp); 2089 2090 if (packet_count > 0) { 2091 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list); 2092 QUEUE_INIT_LIST(&tx_ring->used_list); 2093 2094 /* Setup TX descriptor pointers */ 2095 tx_ring->tbd_next = tx_ring->tbd_first; 2096 tx_ring->tbd_oldest = tx_ring->tbd_first; 2097 2098 /* Setup our HW Tx Head & Tail descriptor pointers */ 2099 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 2100 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 2101 } 2102 } 2103 2104 static boolean_t 2105 e1000g_tx_drain(struct e1000g *Adapter) 2106 { 2107 int i; 2108 boolean_t done; 2109 e1000g_tx_ring_t *tx_ring; 2110 2111 tx_ring = Adapter->tx_ring; 2112 2113 /* Allow up to 'wsdraintime' for pending xmit's to complete. */ 2114 for (i = 0; i < TX_DRAIN_TIME; i++) { 2115 mutex_enter(&tx_ring->usedlist_lock); 2116 done = IS_QUEUE_EMPTY(&tx_ring->used_list); 2117 mutex_exit(&tx_ring->usedlist_lock); 2118 2119 if (done) 2120 break; 2121 2122 msec_delay(1); 2123 } 2124 2125 return (done); 2126 } 2127 2128 static boolean_t 2129 e1000g_rx_drain(struct e1000g *Adapter) 2130 { 2131 int i; 2132 boolean_t done; 2133 2134 /* 2135 * Allow up to RX_DRAIN_TIME for pending received packets to complete. 2136 */ 2137 for (i = 0; i < RX_DRAIN_TIME; i++) { 2138 done = (Adapter->pending_rx_count == 0); 2139 2140 if (done) 2141 break; 2142 2143 msec_delay(1); 2144 } 2145 2146 return (done); 2147 } 2148 2149 static boolean_t 2150 e1000g_reset_adapter(struct e1000g *Adapter) 2151 { 2152 /* Disable and stop all the timers */ 2153 disable_watchdog_timer(Adapter); 2154 stop_link_timer(Adapter); 2155 stop_82547_timer(Adapter->tx_ring); 2156 2157 rw_enter(&Adapter->chip_lock, RW_WRITER); 2158 2159 if (Adapter->stall_flag) { 2160 Adapter->stall_flag = B_FALSE; 2161 Adapter->reset_flag = B_TRUE; 2162 } 2163 2164 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2165 rw_exit(&Adapter->chip_lock); 2166 return (B_TRUE); 2167 } 2168 2169 e1000g_stop(Adapter, B_FALSE); 2170 2171 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) { 2172 rw_exit(&Adapter->chip_lock); 2173 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2174 return (B_FALSE); 2175 } 2176 2177 rw_exit(&Adapter->chip_lock); 2178 2179 /* Enable and start the watchdog timer */ 2180 enable_watchdog_timer(Adapter); 2181 2182 return (B_TRUE); 2183 } 2184 2185 boolean_t 2186 e1000g_global_reset(struct e1000g *Adapter) 2187 { 2188 /* Disable and stop all the timers */ 2189 disable_watchdog_timer(Adapter); 2190 stop_link_timer(Adapter); 2191 stop_82547_timer(Adapter->tx_ring); 2192 2193 rw_enter(&Adapter->chip_lock, RW_WRITER); 2194 2195 e1000g_stop(Adapter, B_TRUE); 2196 2197 Adapter->init_count = 0; 2198 2199 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) { 2200 rw_exit(&Adapter->chip_lock); 2201 e1000g_log(Adapter, CE_WARN, "Reset failed"); 2202 return (B_FALSE); 2203 } 2204 2205 rw_exit(&Adapter->chip_lock); 2206 2207 /* Enable and start the watchdog timer */ 2208 enable_watchdog_timer(Adapter); 2209 2210 return (B_TRUE); 2211 } 2212 2213 /* 2214 * e1000g_intr_pciexpress - ISR for PCI Express chipsets 2215 * 2216 * This interrupt service routine is for PCI-Express adapters. 2217 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED 2218 * bit is set. 2219 */ 2220 static uint_t 2221 e1000g_intr_pciexpress(caddr_t arg) 2222 { 2223 struct e1000g *Adapter; 2224 uint32_t icr; 2225 2226 Adapter = (struct e1000g *)(uintptr_t)arg; 2227 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2228 2229 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2230 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2231 return (DDI_INTR_CLAIMED); 2232 } 2233 2234 if (icr & E1000_ICR_INT_ASSERTED) { 2235 /* 2236 * E1000_ICR_INT_ASSERTED bit was set: 2237 * Read(Clear) the ICR, claim this interrupt, 2238 * look for work to do. 2239 */ 2240 e1000g_intr_work(Adapter, icr); 2241 return (DDI_INTR_CLAIMED); 2242 } else { 2243 /* 2244 * E1000_ICR_INT_ASSERTED bit was not set: 2245 * Don't claim this interrupt, return immediately. 2246 */ 2247 return (DDI_INTR_UNCLAIMED); 2248 } 2249 } 2250 2251 /* 2252 * e1000g_intr - ISR for PCI/PCI-X chipsets 2253 * 2254 * This interrupt service routine is for PCI/PCI-X adapters. 2255 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED 2256 * bit is set or not. 2257 */ 2258 static uint_t 2259 e1000g_intr(caddr_t arg) 2260 { 2261 struct e1000g *Adapter; 2262 uint32_t icr; 2263 2264 Adapter = (struct e1000g *)(uintptr_t)arg; 2265 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 2266 2267 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2268 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2269 return (DDI_INTR_CLAIMED); 2270 } 2271 2272 if (icr) { 2273 /* 2274 * Any bit was set in ICR: 2275 * Read(Clear) the ICR, claim this interrupt, 2276 * look for work to do. 2277 */ 2278 e1000g_intr_work(Adapter, icr); 2279 return (DDI_INTR_CLAIMED); 2280 } else { 2281 /* 2282 * No bit was set in ICR: 2283 * Don't claim this interrupt, return immediately. 2284 */ 2285 return (DDI_INTR_UNCLAIMED); 2286 } 2287 } 2288 2289 /* 2290 * e1000g_intr_work - actual processing of ISR 2291 * 2292 * Read(clear) the ICR contents and call appropriate interrupt 2293 * processing routines. 2294 */ 2295 static void 2296 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr) 2297 { 2298 struct e1000_hw *hw; 2299 hw = &Adapter->shared; 2300 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 2301 2302 Adapter->rx_pkt_cnt = 0; 2303 Adapter->tx_pkt_cnt = 0; 2304 2305 rw_enter(&Adapter->chip_lock, RW_READER); 2306 2307 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2308 rw_exit(&Adapter->chip_lock); 2309 return; 2310 } 2311 /* 2312 * Here we need to check the "e1000g_state" flag within the chip_lock to 2313 * ensure the receive routine will not execute when the adapter is 2314 * being reset. 2315 */ 2316 if (!(Adapter->e1000g_state & E1000G_STARTED)) { 2317 rw_exit(&Adapter->chip_lock); 2318 return; 2319 } 2320 2321 if (icr & E1000_ICR_RXT0) { 2322 mblk_t *mp = NULL; 2323 mblk_t *tail = NULL; 2324 e1000g_rx_ring_t *rx_ring; 2325 2326 rx_ring = Adapter->rx_ring; 2327 mutex_enter(&rx_ring->rx_lock); 2328 /* 2329 * Sometimes with legacy interrupts, it possible that 2330 * there is a single interrupt for Rx/Tx. In which 2331 * case, if poll flag is set, we shouldn't really 2332 * be doing Rx processing. 2333 */ 2334 if (!rx_ring->poll_flag) 2335 mp = e1000g_receive(rx_ring, &tail, 2336 E1000G_CHAIN_NO_LIMIT); 2337 mutex_exit(&rx_ring->rx_lock); 2338 rw_exit(&Adapter->chip_lock); 2339 if (mp != NULL) 2340 mac_rx_ring(Adapter->mh, rx_ring->mrh, 2341 mp, rx_ring->ring_gen_num); 2342 } else 2343 rw_exit(&Adapter->chip_lock); 2344 2345 if (icr & E1000_ICR_TXDW) { 2346 if (!Adapter->tx_intr_enable) 2347 e1000g_clear_tx_interrupt(Adapter); 2348 2349 /* Recycle the tx descriptors */ 2350 rw_enter(&Adapter->chip_lock, RW_READER); 2351 (void) e1000g_recycle(tx_ring); 2352 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr); 2353 rw_exit(&Adapter->chip_lock); 2354 2355 if (tx_ring->resched_needed && 2356 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) { 2357 tx_ring->resched_needed = B_FALSE; 2358 mac_tx_update(Adapter->mh); 2359 E1000G_STAT(tx_ring->stat_reschedule); 2360 } 2361 } 2362 2363 /* 2364 * The Receive Sequence errors RXSEQ and the link status change LSC 2365 * are checked to detect that the cable has been pulled out. For 2366 * the Wiseman 2.0 silicon, the receive sequence errors interrupt 2367 * are an indication that cable is not connected. 2368 */ 2369 if ((icr & E1000_ICR_RXSEQ) || 2370 (icr & E1000_ICR_LSC) || 2371 (icr & E1000_ICR_GPI_EN1)) { 2372 boolean_t link_changed; 2373 timeout_id_t tid = 0; 2374 2375 stop_watchdog_timer(Adapter); 2376 2377 rw_enter(&Adapter->chip_lock, RW_WRITER); 2378 2379 /* 2380 * Because we got a link-status-change interrupt, force 2381 * e1000_check_for_link() to look at phy 2382 */ 2383 Adapter->shared.mac.get_link_status = B_TRUE; 2384 2385 /* e1000g_link_check takes care of link status change */ 2386 link_changed = e1000g_link_check(Adapter); 2387 2388 /* Get new phy state */ 2389 e1000g_get_phy_state(Adapter); 2390 2391 /* 2392 * If the link timer has not timed out, we'll not notify 2393 * the upper layer with any link state until the link is up. 2394 */ 2395 if (link_changed && !Adapter->link_complete) { 2396 if (Adapter->link_state == LINK_STATE_UP) { 2397 mutex_enter(&Adapter->link_lock); 2398 Adapter->link_complete = B_TRUE; 2399 tid = Adapter->link_tid; 2400 Adapter->link_tid = 0; 2401 mutex_exit(&Adapter->link_lock); 2402 } else { 2403 link_changed = B_FALSE; 2404 } 2405 } 2406 rw_exit(&Adapter->chip_lock); 2407 2408 if (link_changed) { 2409 if (tid != 0) 2410 (void) untimeout(tid); 2411 2412 /* 2413 * Workaround for esb2. Data stuck in fifo on a link 2414 * down event. Stop receiver here and reset in watchdog. 2415 */ 2416 if ((Adapter->link_state == LINK_STATE_DOWN) && 2417 (Adapter->shared.mac.type == e1000_80003es2lan)) { 2418 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL); 2419 E1000_WRITE_REG(hw, E1000_RCTL, 2420 rctl & ~E1000_RCTL_EN); 2421 e1000g_log(Adapter, CE_WARN, 2422 "ESB2 receiver disabled"); 2423 Adapter->esb2_workaround = B_TRUE; 2424 } 2425 if (!Adapter->reset_flag) 2426 mac_link_update(Adapter->mh, 2427 Adapter->link_state); 2428 if (Adapter->link_state == LINK_STATE_UP) 2429 Adapter->reset_flag = B_FALSE; 2430 } 2431 2432 start_watchdog_timer(Adapter); 2433 } 2434 } 2435 2436 static void 2437 e1000g_init_unicst(struct e1000g *Adapter) 2438 { 2439 struct e1000_hw *hw; 2440 int slot; 2441 2442 hw = &Adapter->shared; 2443 2444 if (Adapter->init_count == 0) { 2445 /* Initialize the multiple unicast addresses */ 2446 Adapter->unicst_total = min(hw->mac.rar_entry_count, 2447 MAX_NUM_UNICAST_ADDRESSES); 2448 2449 /* 2450 * The common code does not correctly calculate the number of 2451 * rar's that could be reserved by firmware for the pch_lpt 2452 * macs. The interface has one primary rar, and 11 additional 2453 * ones. Those 11 additional ones are not always available. 2454 * According to the datasheet, we need to check a few of the 2455 * bits set in the FWSM register. If the value is zero, 2456 * everything is available. If the value is 1, none of the 2457 * additional registers are available. If the value is 2-7, only 2458 * that number are available. 2459 */ 2460 if (hw->mac.type == e1000_pch_lpt) { 2461 uint32_t locked, rar; 2462 2463 locked = E1000_READ_REG(hw, E1000_FWSM) & 2464 E1000_FWSM_WLOCK_MAC_MASK; 2465 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT; 2466 rar = 1; 2467 if (locked == 0) 2468 rar += 11; 2469 else if (locked == 1) 2470 rar += 0; 2471 else 2472 rar += locked; 2473 Adapter->unicst_total = min(rar, 2474 MAX_NUM_UNICAST_ADDRESSES); 2475 } 2476 2477 /* Workaround for an erratum of 82571 chipst */ 2478 if ((hw->mac.type == e1000_82571) && 2479 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2480 Adapter->unicst_total--; 2481 2482 /* VMware doesn't support multiple mac addresses properly */ 2483 if (hw->subsystem_vendor_id == 0x15ad) 2484 Adapter->unicst_total = 1; 2485 2486 Adapter->unicst_avail = Adapter->unicst_total; 2487 2488 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2489 /* Clear both the flag and MAC address */ 2490 Adapter->unicst_addr[slot].reg.high = 0; 2491 Adapter->unicst_addr[slot].reg.low = 0; 2492 } 2493 } else { 2494 /* Workaround for an erratum of 82571 chipst */ 2495 if ((hw->mac.type == e1000_82571) && 2496 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2497 e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); 2498 2499 /* Re-configure the RAR registers */ 2500 for (slot = 0; slot < Adapter->unicst_total; slot++) 2501 if (Adapter->unicst_addr[slot].mac.set == 1) 2502 e1000_rar_set(hw, 2503 Adapter->unicst_addr[slot].mac.addr, slot); 2504 } 2505 2506 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2507 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2508 } 2509 2510 static int 2511 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, 2512 int slot) 2513 { 2514 struct e1000_hw *hw; 2515 2516 hw = &Adapter->shared; 2517 2518 /* 2519 * The first revision of Wiseman silicon (rev 2.0) has an errata 2520 * that requires the receiver to be in reset when any of the 2521 * receive address registers (RAR regs) are accessed. The first 2522 * rev of Wiseman silicon also requires MWI to be disabled when 2523 * a global reset or a receive reset is issued. So before we 2524 * initialize the RARs, we check the rev of the Wiseman controller 2525 * and work around any necessary HW errata. 2526 */ 2527 if ((hw->mac.type == e1000_82542) && 2528 (hw->revision_id == E1000_REVISION_2)) { 2529 e1000_pci_clear_mwi(hw); 2530 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); 2531 msec_delay(5); 2532 } 2533 if (mac_addr == NULL) { 2534 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0); 2535 E1000_WRITE_FLUSH(hw); 2536 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0); 2537 E1000_WRITE_FLUSH(hw); 2538 /* Clear both the flag and MAC address */ 2539 Adapter->unicst_addr[slot].reg.high = 0; 2540 Adapter->unicst_addr[slot].reg.low = 0; 2541 } else { 2542 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, 2543 ETHERADDRL); 2544 e1000_rar_set(hw, (uint8_t *)mac_addr, slot); 2545 Adapter->unicst_addr[slot].mac.set = 1; 2546 } 2547 2548 /* Workaround for an erratum of 82571 chipst */ 2549 if (slot == 0) { 2550 if ((hw->mac.type == e1000_82571) && 2551 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2552 if (mac_addr == NULL) { 2553 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2554 slot << 1, 0); 2555 E1000_WRITE_FLUSH(hw); 2556 E1000_WRITE_REG_ARRAY(hw, E1000_RA, 2557 (slot << 1) + 1, 0); 2558 E1000_WRITE_FLUSH(hw); 2559 } else { 2560 e1000_rar_set(hw, (uint8_t *)mac_addr, 2561 LAST_RAR_ENTRY); 2562 } 2563 } 2564 2565 /* 2566 * If we are using Wiseman rev 2.0 silicon, we will have previously 2567 * put the receive in reset, and disabled MWI, to work around some 2568 * HW errata. Now we should take the receiver out of reset, and 2569 * re-enabled if MWI if it was previously enabled by the PCI BIOS. 2570 */ 2571 if ((hw->mac.type == e1000_82542) && 2572 (hw->revision_id == E1000_REVISION_2)) { 2573 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2574 msec_delay(1); 2575 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2576 e1000_pci_set_mwi(hw); 2577 e1000g_rx_setup(Adapter); 2578 } 2579 2580 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2581 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2582 return (EIO); 2583 } 2584 2585 return (0); 2586 } 2587 2588 static int 2589 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr) 2590 { 2591 struct e1000_hw *hw = &Adapter->shared; 2592 struct ether_addr *newtable; 2593 size_t new_len; 2594 size_t old_len; 2595 int res = 0; 2596 2597 if ((multiaddr[0] & 01) == 0) { 2598 res = EINVAL; 2599 e1000g_log(Adapter, CE_WARN, "Illegal multicast address"); 2600 goto done; 2601 } 2602 2603 if (Adapter->mcast_count >= Adapter->mcast_max_num) { 2604 res = ENOENT; 2605 e1000g_log(Adapter, CE_WARN, 2606 "Adapter requested more than %d mcast addresses", 2607 Adapter->mcast_max_num); 2608 goto done; 2609 } 2610 2611 2612 if (Adapter->mcast_count == Adapter->mcast_alloc_count) { 2613 old_len = Adapter->mcast_alloc_count * 2614 sizeof (struct ether_addr); 2615 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) * 2616 sizeof (struct ether_addr); 2617 2618 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2619 if (newtable == NULL) { 2620 res = ENOMEM; 2621 e1000g_log(Adapter, CE_WARN, 2622 "Not enough memory to alloc mcast table"); 2623 goto done; 2624 } 2625 2626 if (Adapter->mcast_table != NULL) { 2627 bcopy(Adapter->mcast_table, newtable, old_len); 2628 kmem_free(Adapter->mcast_table, old_len); 2629 } 2630 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE; 2631 Adapter->mcast_table = newtable; 2632 } 2633 2634 bcopy(multiaddr, 2635 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL); 2636 Adapter->mcast_count++; 2637 2638 /* 2639 * Update the MC table in the hardware 2640 */ 2641 e1000g_clear_interrupt(Adapter); 2642 2643 e1000_update_mc_addr_list(hw, 2644 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2645 2646 e1000g_mask_interrupt(Adapter); 2647 2648 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2649 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2650 res = EIO; 2651 } 2652 2653 done: 2654 return (res); 2655 } 2656 2657 static int 2658 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr) 2659 { 2660 struct e1000_hw *hw = &Adapter->shared; 2661 struct ether_addr *newtable; 2662 size_t new_len; 2663 size_t old_len; 2664 unsigned i; 2665 2666 for (i = 0; i < Adapter->mcast_count; i++) { 2667 if (bcmp(multiaddr, &Adapter->mcast_table[i], 2668 ETHERADDRL) == 0) { 2669 for (i++; i < Adapter->mcast_count; i++) { 2670 Adapter->mcast_table[i - 1] = 2671 Adapter->mcast_table[i]; 2672 } 2673 Adapter->mcast_count--; 2674 break; 2675 } 2676 } 2677 2678 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) > 2679 MCAST_ALLOC_SIZE) { 2680 old_len = Adapter->mcast_alloc_count * 2681 sizeof (struct ether_addr); 2682 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) * 2683 sizeof (struct ether_addr); 2684 2685 newtable = kmem_alloc(new_len, KM_NOSLEEP); 2686 if (newtable != NULL) { 2687 bcopy(Adapter->mcast_table, newtable, new_len); 2688 kmem_free(Adapter->mcast_table, old_len); 2689 2690 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE; 2691 Adapter->mcast_table = newtable; 2692 } 2693 } 2694 2695 /* 2696 * Update the MC table in the hardware 2697 */ 2698 e1000g_clear_interrupt(Adapter); 2699 2700 e1000_update_mc_addr_list(hw, 2701 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count); 2702 2703 e1000g_mask_interrupt(Adapter); 2704 2705 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2706 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2707 return (EIO); 2708 } 2709 2710 return (0); 2711 } 2712 2713 static void 2714 e1000g_release_multicast(struct e1000g *Adapter) 2715 { 2716 if (Adapter->mcast_table != NULL) { 2717 kmem_free(Adapter->mcast_table, 2718 Adapter->mcast_alloc_count * sizeof (struct ether_addr)); 2719 Adapter->mcast_table = NULL; 2720 } 2721 } 2722 2723 int 2724 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 2725 { 2726 struct e1000g *Adapter = (struct e1000g *)arg; 2727 int result; 2728 2729 rw_enter(&Adapter->chip_lock, RW_WRITER); 2730 2731 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2732 result = ECANCELED; 2733 goto done; 2734 } 2735 2736 result = (add) ? multicst_add(Adapter, addr) 2737 : multicst_remove(Adapter, addr); 2738 2739 done: 2740 rw_exit(&Adapter->chip_lock); 2741 return (result); 2742 2743 } 2744 2745 int 2746 e1000g_m_promisc(void *arg, boolean_t on) 2747 { 2748 struct e1000g *Adapter = (struct e1000g *)arg; 2749 uint32_t rctl; 2750 2751 rw_enter(&Adapter->chip_lock, RW_WRITER); 2752 2753 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2754 rw_exit(&Adapter->chip_lock); 2755 return (ECANCELED); 2756 } 2757 2758 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 2759 2760 if (on) 2761 rctl |= 2762 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 2763 else 2764 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 2765 2766 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 2767 2768 Adapter->e1000g_promisc = on; 2769 2770 rw_exit(&Adapter->chip_lock); 2771 2772 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2773 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2774 return (EIO); 2775 } 2776 2777 return (0); 2778 } 2779 2780 /* 2781 * Entry points to enable and disable interrupts at the granularity of 2782 * a group. 2783 * Turns the poll_mode for the whole adapter on and off to enable or 2784 * override the ring level polling control over the hardware interrupts. 2785 */ 2786 static int 2787 e1000g_rx_group_intr_enable(mac_intr_handle_t arg) 2788 { 2789 struct e1000g *adapter = (struct e1000g *)arg; 2790 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2791 2792 /* 2793 * Later interrupts at the granularity of the this ring will 2794 * invoke mac_rx() with NULL, indicating the need for another 2795 * software classification. 2796 * We have a single ring usable per adapter now, so we only need to 2797 * reset the rx handle for that one. 2798 * When more RX rings can be used, we should update each one of them. 2799 */ 2800 mutex_enter(&rx_ring->rx_lock); 2801 rx_ring->mrh = NULL; 2802 adapter->poll_mode = B_FALSE; 2803 mutex_exit(&rx_ring->rx_lock); 2804 return (0); 2805 } 2806 2807 static int 2808 e1000g_rx_group_intr_disable(mac_intr_handle_t arg) 2809 { 2810 struct e1000g *adapter = (struct e1000g *)arg; 2811 e1000g_rx_ring_t *rx_ring = adapter->rx_ring; 2812 2813 mutex_enter(&rx_ring->rx_lock); 2814 2815 /* 2816 * Later interrupts at the granularity of the this ring will 2817 * invoke mac_rx() with the handle for this ring; 2818 */ 2819 adapter->poll_mode = B_TRUE; 2820 rx_ring->mrh = rx_ring->mrh_init; 2821 mutex_exit(&rx_ring->rx_lock); 2822 return (0); 2823 } 2824 2825 /* 2826 * Entry points to enable and disable interrupts at the granularity of 2827 * a ring. 2828 * adapter poll_mode controls whether we actually proceed with hardware 2829 * interrupt toggling. 2830 */ 2831 static int 2832 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh) 2833 { 2834 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2835 struct e1000g *adapter = rx_ring->adapter; 2836 struct e1000_hw *hw = &adapter->shared; 2837 uint32_t intr_mask; 2838 2839 rw_enter(&adapter->chip_lock, RW_READER); 2840 2841 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2842 rw_exit(&adapter->chip_lock); 2843 return (0); 2844 } 2845 2846 mutex_enter(&rx_ring->rx_lock); 2847 rx_ring->poll_flag = 0; 2848 mutex_exit(&rx_ring->rx_lock); 2849 2850 /* Rx interrupt enabling for MSI and legacy */ 2851 intr_mask = E1000_READ_REG(hw, E1000_IMS); 2852 intr_mask |= E1000_IMS_RXT0; 2853 E1000_WRITE_REG(hw, E1000_IMS, intr_mask); 2854 E1000_WRITE_FLUSH(hw); 2855 2856 /* Trigger a Rx interrupt to check Rx ring */ 2857 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 2858 E1000_WRITE_FLUSH(hw); 2859 2860 rw_exit(&adapter->chip_lock); 2861 return (0); 2862 } 2863 2864 static int 2865 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh) 2866 { 2867 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh; 2868 struct e1000g *adapter = rx_ring->adapter; 2869 struct e1000_hw *hw = &adapter->shared; 2870 2871 rw_enter(&adapter->chip_lock, RW_READER); 2872 2873 if (adapter->e1000g_state & E1000G_SUSPENDED) { 2874 rw_exit(&adapter->chip_lock); 2875 return (0); 2876 } 2877 mutex_enter(&rx_ring->rx_lock); 2878 rx_ring->poll_flag = 1; 2879 mutex_exit(&rx_ring->rx_lock); 2880 2881 /* Rx interrupt disabling for MSI and legacy */ 2882 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0); 2883 E1000_WRITE_FLUSH(hw); 2884 2885 rw_exit(&adapter->chip_lock); 2886 return (0); 2887 } 2888 2889 /* 2890 * e1000g_unicst_find - Find the slot for the specified unicast address 2891 */ 2892 static int 2893 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr) 2894 { 2895 int slot; 2896 2897 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2898 if ((Adapter->unicst_addr[slot].mac.set == 1) && 2899 (bcmp(Adapter->unicst_addr[slot].mac.addr, 2900 mac_addr, ETHERADDRL) == 0)) 2901 return (slot); 2902 } 2903 2904 return (-1); 2905 } 2906 2907 /* 2908 * Entry points to add and remove a MAC address to a ring group. 2909 * The caller takes care of adding and removing the MAC addresses 2910 * to the filter via these two routines. 2911 */ 2912 2913 static int 2914 e1000g_addmac(void *arg, const uint8_t *mac_addr) 2915 { 2916 struct e1000g *Adapter = (struct e1000g *)arg; 2917 int slot, err; 2918 2919 rw_enter(&Adapter->chip_lock, RW_WRITER); 2920 2921 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2922 rw_exit(&Adapter->chip_lock); 2923 return (ECANCELED); 2924 } 2925 2926 if (e1000g_unicst_find(Adapter, mac_addr) != -1) { 2927 /* The same address is already in slot */ 2928 rw_exit(&Adapter->chip_lock); 2929 return (0); 2930 } 2931 2932 if (Adapter->unicst_avail == 0) { 2933 /* no slots available */ 2934 rw_exit(&Adapter->chip_lock); 2935 return (ENOSPC); 2936 } 2937 2938 /* Search for a free slot */ 2939 for (slot = 0; slot < Adapter->unicst_total; slot++) { 2940 if (Adapter->unicst_addr[slot].mac.set == 0) 2941 break; 2942 } 2943 ASSERT(slot < Adapter->unicst_total); 2944 2945 err = e1000g_unicst_set(Adapter, mac_addr, slot); 2946 if (err == 0) 2947 Adapter->unicst_avail--; 2948 2949 rw_exit(&Adapter->chip_lock); 2950 2951 return (err); 2952 } 2953 2954 static int 2955 e1000g_remmac(void *arg, const uint8_t *mac_addr) 2956 { 2957 struct e1000g *Adapter = (struct e1000g *)arg; 2958 int slot, err; 2959 2960 rw_enter(&Adapter->chip_lock, RW_WRITER); 2961 2962 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 2963 rw_exit(&Adapter->chip_lock); 2964 return (ECANCELED); 2965 } 2966 2967 slot = e1000g_unicst_find(Adapter, mac_addr); 2968 if (slot == -1) { 2969 rw_exit(&Adapter->chip_lock); 2970 return (EINVAL); 2971 } 2972 2973 ASSERT(Adapter->unicst_addr[slot].mac.set); 2974 2975 /* Clear this slot */ 2976 err = e1000g_unicst_set(Adapter, NULL, slot); 2977 if (err == 0) 2978 Adapter->unicst_avail++; 2979 2980 rw_exit(&Adapter->chip_lock); 2981 2982 return (err); 2983 } 2984 2985 static int 2986 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 2987 { 2988 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh; 2989 2990 mutex_enter(&rx_ring->rx_lock); 2991 rx_ring->ring_gen_num = mr_gen_num; 2992 mutex_exit(&rx_ring->rx_lock); 2993 return (0); 2994 } 2995 2996 /* 2997 * Callback funtion for MAC layer to register all rings. 2998 * 2999 * The hardware supports a single group with currently only one ring 3000 * available. 3001 * Though not offering virtualization ability per se, exposing the 3002 * group/ring still enables the polling and interrupt toggling. 3003 */ 3004 /* ARGSUSED */ 3005 void 3006 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index, 3007 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 3008 { 3009 struct e1000g *Adapter = (struct e1000g *)arg; 3010 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring; 3011 mac_intr_t *mintr; 3012 3013 /* 3014 * We advertised only RX group/rings, so the MAC framework shouldn't 3015 * ask for any thing else. 3016 */ 3017 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0); 3018 3019 rx_ring->mrh = rx_ring->mrh_init = rh; 3020 infop->mri_driver = (mac_ring_driver_t)rx_ring; 3021 infop->mri_start = e1000g_ring_start; 3022 infop->mri_stop = NULL; 3023 infop->mri_poll = e1000g_poll_ring; 3024 infop->mri_stat = e1000g_rx_ring_stat; 3025 3026 /* Ring level interrupts */ 3027 mintr = &infop->mri_intr; 3028 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 3029 mintr->mi_enable = e1000g_rx_ring_intr_enable; 3030 mintr->mi_disable = e1000g_rx_ring_intr_disable; 3031 if (Adapter->msi_enable) 3032 mintr->mi_ddi_handle = Adapter->htable[0]; 3033 } 3034 3035 /* ARGSUSED */ 3036 static void 3037 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index, 3038 mac_group_info_t *infop, mac_group_handle_t gh) 3039 { 3040 struct e1000g *Adapter = (struct e1000g *)arg; 3041 mac_intr_t *mintr; 3042 3043 /* 3044 * We advertised a single RX ring. Getting a request for anything else 3045 * signifies a bug in the MAC framework. 3046 */ 3047 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0); 3048 3049 Adapter->rx_group = gh; 3050 3051 infop->mgi_driver = (mac_group_driver_t)Adapter; 3052 infop->mgi_start = NULL; 3053 infop->mgi_stop = NULL; 3054 infop->mgi_addmac = e1000g_addmac; 3055 infop->mgi_remmac = e1000g_remmac; 3056 infop->mgi_count = 1; 3057 3058 /* Group level interrupts */ 3059 mintr = &infop->mgi_intr; 3060 mintr->mi_handle = (mac_intr_handle_t)Adapter; 3061 mintr->mi_enable = e1000g_rx_group_intr_enable; 3062 mintr->mi_disable = e1000g_rx_group_intr_disable; 3063 } 3064 3065 static boolean_t 3066 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 3067 { 3068 struct e1000g *Adapter = (struct e1000g *)arg; 3069 3070 switch (cap) { 3071 case MAC_CAPAB_HCKSUM: { 3072 uint32_t *txflags = cap_data; 3073 3074 if (Adapter->tx_hcksum_enable) 3075 *txflags = HCKSUM_IPHDRCKSUM | 3076 HCKSUM_INET_PARTIAL; 3077 else 3078 return (B_FALSE); 3079 break; 3080 } 3081 3082 case MAC_CAPAB_LSO: { 3083 mac_capab_lso_t *cap_lso = cap_data; 3084 3085 if (Adapter->lso_enable) { 3086 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4; 3087 cap_lso->lso_basic_tcp_ipv4.lso_max = 3088 E1000_LSO_MAXLEN; 3089 } else 3090 return (B_FALSE); 3091 break; 3092 } 3093 case MAC_CAPAB_RINGS: { 3094 mac_capab_rings_t *cap_rings = cap_data; 3095 3096 /* No TX rings exposed yet */ 3097 if (cap_rings->mr_type != MAC_RING_TYPE_RX) 3098 return (B_FALSE); 3099 3100 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC; 3101 cap_rings->mr_rnum = 1; 3102 cap_rings->mr_gnum = 1; 3103 cap_rings->mr_rget = e1000g_fill_ring; 3104 cap_rings->mr_gget = e1000g_fill_group; 3105 break; 3106 } 3107 default: 3108 return (B_FALSE); 3109 } 3110 return (B_TRUE); 3111 } 3112 3113 static boolean_t 3114 e1000g_param_locked(mac_prop_id_t pr_num) 3115 { 3116 /* 3117 * All en_* parameters are locked (read-only) while 3118 * the device is in any sort of loopback mode ... 3119 */ 3120 switch (pr_num) { 3121 case MAC_PROP_EN_1000FDX_CAP: 3122 case MAC_PROP_EN_1000HDX_CAP: 3123 case MAC_PROP_EN_100FDX_CAP: 3124 case MAC_PROP_EN_100HDX_CAP: 3125 case MAC_PROP_EN_10FDX_CAP: 3126 case MAC_PROP_EN_10HDX_CAP: 3127 case MAC_PROP_AUTONEG: 3128 case MAC_PROP_FLOWCTRL: 3129 return (B_TRUE); 3130 } 3131 return (B_FALSE); 3132 } 3133 3134 /* 3135 * callback function for set/get of properties 3136 */ 3137 static int 3138 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3139 uint_t pr_valsize, const void *pr_val) 3140 { 3141 struct e1000g *Adapter = arg; 3142 struct e1000_hw *hw = &Adapter->shared; 3143 struct e1000_fc_info *fc = &Adapter->shared.fc; 3144 int err = 0; 3145 link_flowctrl_t flowctrl; 3146 uint32_t cur_mtu, new_mtu; 3147 3148 rw_enter(&Adapter->chip_lock, RW_WRITER); 3149 3150 if (Adapter->e1000g_state & E1000G_SUSPENDED) { 3151 rw_exit(&Adapter->chip_lock); 3152 return (ECANCELED); 3153 } 3154 3155 if (Adapter->loopback_mode != E1000G_LB_NONE && 3156 e1000g_param_locked(pr_num)) { 3157 /* 3158 * All en_* parameters are locked (read-only) 3159 * while the device is in any sort of loopback mode. 3160 */ 3161 rw_exit(&Adapter->chip_lock); 3162 return (EBUSY); 3163 } 3164 3165 switch (pr_num) { 3166 case MAC_PROP_EN_1000FDX_CAP: 3167 if (hw->phy.media_type != e1000_media_type_copper) { 3168 err = ENOTSUP; 3169 break; 3170 } 3171 Adapter->param_en_1000fdx = *(uint8_t *)pr_val; 3172 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val; 3173 goto reset; 3174 case MAC_PROP_EN_100FDX_CAP: 3175 if (hw->phy.media_type != e1000_media_type_copper) { 3176 err = ENOTSUP; 3177 break; 3178 } 3179 Adapter->param_en_100fdx = *(uint8_t *)pr_val; 3180 Adapter->param_adv_100fdx = *(uint8_t *)pr_val; 3181 goto reset; 3182 case MAC_PROP_EN_100HDX_CAP: 3183 if (hw->phy.media_type != e1000_media_type_copper) { 3184 err = ENOTSUP; 3185 break; 3186 } 3187 Adapter->param_en_100hdx = *(uint8_t *)pr_val; 3188 Adapter->param_adv_100hdx = *(uint8_t *)pr_val; 3189 goto reset; 3190 case MAC_PROP_EN_10FDX_CAP: 3191 if (hw->phy.media_type != e1000_media_type_copper) { 3192 err = ENOTSUP; 3193 break; 3194 } 3195 Adapter->param_en_10fdx = *(uint8_t *)pr_val; 3196 Adapter->param_adv_10fdx = *(uint8_t *)pr_val; 3197 goto reset; 3198 case MAC_PROP_EN_10HDX_CAP: 3199 if (hw->phy.media_type != e1000_media_type_copper) { 3200 err = ENOTSUP; 3201 break; 3202 } 3203 Adapter->param_en_10hdx = *(uint8_t *)pr_val; 3204 Adapter->param_adv_10hdx = *(uint8_t *)pr_val; 3205 goto reset; 3206 case MAC_PROP_AUTONEG: 3207 if (hw->phy.media_type != e1000_media_type_copper) { 3208 err = ENOTSUP; 3209 break; 3210 } 3211 Adapter->param_adv_autoneg = *(uint8_t *)pr_val; 3212 goto reset; 3213 case MAC_PROP_FLOWCTRL: 3214 fc->send_xon = B_TRUE; 3215 bcopy(pr_val, &flowctrl, sizeof (flowctrl)); 3216 3217 switch (flowctrl) { 3218 default: 3219 err = EINVAL; 3220 break; 3221 case LINK_FLOWCTRL_NONE: 3222 fc->requested_mode = e1000_fc_none; 3223 break; 3224 case LINK_FLOWCTRL_RX: 3225 fc->requested_mode = e1000_fc_rx_pause; 3226 break; 3227 case LINK_FLOWCTRL_TX: 3228 fc->requested_mode = e1000_fc_tx_pause; 3229 break; 3230 case LINK_FLOWCTRL_BI: 3231 fc->requested_mode = e1000_fc_full; 3232 break; 3233 } 3234 reset: 3235 if (err == 0) { 3236 /* check PCH limits & reset the link */ 3237 e1000g_pch_limits(Adapter); 3238 if (e1000g_reset_link(Adapter) != DDI_SUCCESS) 3239 err = EINVAL; 3240 } 3241 break; 3242 case MAC_PROP_ADV_1000FDX_CAP: 3243 case MAC_PROP_ADV_1000HDX_CAP: 3244 case MAC_PROP_ADV_100FDX_CAP: 3245 case MAC_PROP_ADV_100HDX_CAP: 3246 case MAC_PROP_ADV_10FDX_CAP: 3247 case MAC_PROP_ADV_10HDX_CAP: 3248 case MAC_PROP_EN_1000HDX_CAP: 3249 case MAC_PROP_STATUS: 3250 case MAC_PROP_SPEED: 3251 case MAC_PROP_DUPLEX: 3252 err = ENOTSUP; /* read-only prop. Can't set this. */ 3253 break; 3254 case MAC_PROP_MTU: 3255 /* adapter must be stopped for an MTU change */ 3256 if (Adapter->e1000g_state & E1000G_STARTED) { 3257 err = EBUSY; 3258 break; 3259 } 3260 3261 cur_mtu = Adapter->default_mtu; 3262 3263 /* get new requested MTU */ 3264 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 3265 if (new_mtu == cur_mtu) { 3266 err = 0; 3267 break; 3268 } 3269 3270 if ((new_mtu < DEFAULT_MTU) || 3271 (new_mtu > Adapter->max_mtu)) { 3272 err = EINVAL; 3273 break; 3274 } 3275 3276 /* inform MAC framework of new MTU */ 3277 err = mac_maxsdu_update(Adapter->mh, new_mtu); 3278 3279 if (err == 0) { 3280 Adapter->default_mtu = new_mtu; 3281 Adapter->max_frame_size = 3282 e1000g_mtu2maxframe(new_mtu); 3283 3284 /* 3285 * check PCH limits & set buffer sizes to 3286 * match new MTU 3287 */ 3288 e1000g_pch_limits(Adapter); 3289 e1000g_set_bufsize(Adapter); 3290 3291 /* 3292 * decrease the number of descriptors and free 3293 * packets for jumbo frames to reduce tx/rx 3294 * resource consumption 3295 */ 3296 if (Adapter->max_frame_size >= 3297 (FRAME_SIZE_UPTO_4K)) { 3298 if (Adapter->tx_desc_num_flag == 0) 3299 Adapter->tx_desc_num = 3300 DEFAULT_JUMBO_NUM_TX_DESC; 3301 3302 if (Adapter->rx_desc_num_flag == 0) 3303 Adapter->rx_desc_num = 3304 DEFAULT_JUMBO_NUM_RX_DESC; 3305 3306 if (Adapter->tx_buf_num_flag == 0) 3307 Adapter->tx_freelist_num = 3308 DEFAULT_JUMBO_NUM_TX_BUF; 3309 3310 if (Adapter->rx_buf_num_flag == 0) 3311 Adapter->rx_freelist_limit = 3312 DEFAULT_JUMBO_NUM_RX_BUF; 3313 } else { 3314 if (Adapter->tx_desc_num_flag == 0) 3315 Adapter->tx_desc_num = 3316 DEFAULT_NUM_TX_DESCRIPTOR; 3317 3318 if (Adapter->rx_desc_num_flag == 0) 3319 Adapter->rx_desc_num = 3320 DEFAULT_NUM_RX_DESCRIPTOR; 3321 3322 if (Adapter->tx_buf_num_flag == 0) 3323 Adapter->tx_freelist_num = 3324 DEFAULT_NUM_TX_FREELIST; 3325 3326 if (Adapter->rx_buf_num_flag == 0) 3327 Adapter->rx_freelist_limit = 3328 DEFAULT_NUM_RX_FREELIST; 3329 } 3330 } 3331 break; 3332 case MAC_PROP_PRIVATE: 3333 err = e1000g_set_priv_prop(Adapter, pr_name, 3334 pr_valsize, pr_val); 3335 break; 3336 default: 3337 err = ENOTSUP; 3338 break; 3339 } 3340 rw_exit(&Adapter->chip_lock); 3341 return (err); 3342 } 3343 3344 static int 3345 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3346 uint_t pr_valsize, void *pr_val) 3347 { 3348 struct e1000g *Adapter = arg; 3349 struct e1000_fc_info *fc = &Adapter->shared.fc; 3350 int err = 0; 3351 link_flowctrl_t flowctrl; 3352 uint64_t tmp = 0; 3353 3354 switch (pr_num) { 3355 case MAC_PROP_DUPLEX: 3356 ASSERT(pr_valsize >= sizeof (link_duplex_t)); 3357 bcopy(&Adapter->link_duplex, pr_val, 3358 sizeof (link_duplex_t)); 3359 break; 3360 case MAC_PROP_SPEED: 3361 ASSERT(pr_valsize >= sizeof (uint64_t)); 3362 tmp = Adapter->link_speed * 1000000ull; 3363 bcopy(&tmp, pr_val, sizeof (tmp)); 3364 break; 3365 case MAC_PROP_AUTONEG: 3366 *(uint8_t *)pr_val = Adapter->param_adv_autoneg; 3367 break; 3368 case MAC_PROP_FLOWCTRL: 3369 ASSERT(pr_valsize >= sizeof (link_flowctrl_t)); 3370 switch (fc->current_mode) { 3371 case e1000_fc_none: 3372 flowctrl = LINK_FLOWCTRL_NONE; 3373 break; 3374 case e1000_fc_rx_pause: 3375 flowctrl = LINK_FLOWCTRL_RX; 3376 break; 3377 case e1000_fc_tx_pause: 3378 flowctrl = LINK_FLOWCTRL_TX; 3379 break; 3380 case e1000_fc_full: 3381 flowctrl = LINK_FLOWCTRL_BI; 3382 break; 3383 } 3384 bcopy(&flowctrl, pr_val, sizeof (flowctrl)); 3385 break; 3386 case MAC_PROP_ADV_1000FDX_CAP: 3387 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx; 3388 break; 3389 case MAC_PROP_EN_1000FDX_CAP: 3390 *(uint8_t *)pr_val = Adapter->param_en_1000fdx; 3391 break; 3392 case MAC_PROP_ADV_1000HDX_CAP: 3393 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx; 3394 break; 3395 case MAC_PROP_EN_1000HDX_CAP: 3396 *(uint8_t *)pr_val = Adapter->param_en_1000hdx; 3397 break; 3398 case MAC_PROP_ADV_100FDX_CAP: 3399 *(uint8_t *)pr_val = Adapter->param_adv_100fdx; 3400 break; 3401 case MAC_PROP_EN_100FDX_CAP: 3402 *(uint8_t *)pr_val = Adapter->param_en_100fdx; 3403 break; 3404 case MAC_PROP_ADV_100HDX_CAP: 3405 *(uint8_t *)pr_val = Adapter->param_adv_100hdx; 3406 break; 3407 case MAC_PROP_EN_100HDX_CAP: 3408 *(uint8_t *)pr_val = Adapter->param_en_100hdx; 3409 break; 3410 case MAC_PROP_ADV_10FDX_CAP: 3411 *(uint8_t *)pr_val = Adapter->param_adv_10fdx; 3412 break; 3413 case MAC_PROP_EN_10FDX_CAP: 3414 *(uint8_t *)pr_val = Adapter->param_en_10fdx; 3415 break; 3416 case MAC_PROP_ADV_10HDX_CAP: 3417 *(uint8_t *)pr_val = Adapter->param_adv_10hdx; 3418 break; 3419 case MAC_PROP_EN_10HDX_CAP: 3420 *(uint8_t *)pr_val = Adapter->param_en_10hdx; 3421 break; 3422 case MAC_PROP_ADV_100T4_CAP: 3423 case MAC_PROP_EN_100T4_CAP: 3424 *(uint8_t *)pr_val = Adapter->param_adv_100t4; 3425 break; 3426 case MAC_PROP_PRIVATE: 3427 err = e1000g_get_priv_prop(Adapter, pr_name, 3428 pr_valsize, pr_val); 3429 break; 3430 default: 3431 err = ENOTSUP; 3432 break; 3433 } 3434 3435 return (err); 3436 } 3437 3438 static void 3439 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num, 3440 mac_prop_info_handle_t prh) 3441 { 3442 struct e1000g *Adapter = arg; 3443 struct e1000_hw *hw = &Adapter->shared; 3444 3445 switch (pr_num) { 3446 case MAC_PROP_DUPLEX: 3447 case MAC_PROP_SPEED: 3448 case MAC_PROP_ADV_1000FDX_CAP: 3449 case MAC_PROP_ADV_1000HDX_CAP: 3450 case MAC_PROP_ADV_100FDX_CAP: 3451 case MAC_PROP_ADV_100HDX_CAP: 3452 case MAC_PROP_ADV_10FDX_CAP: 3453 case MAC_PROP_ADV_10HDX_CAP: 3454 case MAC_PROP_ADV_100T4_CAP: 3455 case MAC_PROP_EN_100T4_CAP: 3456 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3457 break; 3458 3459 case MAC_PROP_EN_1000FDX_CAP: 3460 if (hw->phy.media_type != e1000_media_type_copper) { 3461 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3462 } else { 3463 mac_prop_info_set_default_uint8(prh, 3464 ((Adapter->phy_ext_status & 3465 IEEE_ESR_1000T_FD_CAPS) || 3466 (Adapter->phy_ext_status & 3467 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0); 3468 } 3469 break; 3470 3471 case MAC_PROP_EN_100FDX_CAP: 3472 if (hw->phy.media_type != e1000_media_type_copper) { 3473 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3474 } else { 3475 mac_prop_info_set_default_uint8(prh, 3476 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 3477 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 3478 ? 1 : 0); 3479 } 3480 break; 3481 3482 case MAC_PROP_EN_100HDX_CAP: 3483 if (hw->phy.media_type != e1000_media_type_copper) { 3484 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3485 } else { 3486 mac_prop_info_set_default_uint8(prh, 3487 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 3488 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) 3489 ? 1 : 0); 3490 } 3491 break; 3492 3493 case MAC_PROP_EN_10FDX_CAP: 3494 if (hw->phy.media_type != e1000_media_type_copper) { 3495 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3496 } else { 3497 mac_prop_info_set_default_uint8(prh, 3498 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0); 3499 } 3500 break; 3501 3502 case MAC_PROP_EN_10HDX_CAP: 3503 if (hw->phy.media_type != e1000_media_type_copper) { 3504 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3505 } else { 3506 mac_prop_info_set_default_uint8(prh, 3507 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0); 3508 } 3509 break; 3510 3511 case MAC_PROP_EN_1000HDX_CAP: 3512 if (hw->phy.media_type != e1000_media_type_copper) 3513 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3514 break; 3515 3516 case MAC_PROP_AUTONEG: 3517 if (hw->phy.media_type != e1000_media_type_copper) { 3518 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3519 } else { 3520 mac_prop_info_set_default_uint8(prh, 3521 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) 3522 ? 1 : 0); 3523 } 3524 break; 3525 3526 case MAC_PROP_FLOWCTRL: 3527 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI); 3528 break; 3529 3530 case MAC_PROP_MTU: { 3531 struct e1000_mac_info *mac = &Adapter->shared.mac; 3532 struct e1000_phy_info *phy = &Adapter->shared.phy; 3533 uint32_t max; 3534 3535 /* some MAC types do not support jumbo frames */ 3536 if ((mac->type == e1000_ich8lan) || 3537 ((mac->type == e1000_ich9lan) && (phy->type == 3538 e1000_phy_ife))) { 3539 max = DEFAULT_MTU; 3540 } else { 3541 max = Adapter->max_mtu; 3542 } 3543 3544 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max); 3545 break; 3546 } 3547 case MAC_PROP_PRIVATE: { 3548 char valstr[64]; 3549 int value; 3550 3551 if (strcmp(pr_name, "_adv_pause_cap") == 0 || 3552 strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3553 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ); 3554 return; 3555 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3556 value = DEFAULT_TX_BCOPY_THRESHOLD; 3557 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3558 value = DEFAULT_TX_INTR_ENABLE; 3559 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3560 value = DEFAULT_TX_INTR_DELAY; 3561 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3562 value = DEFAULT_TX_INTR_ABS_DELAY; 3563 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3564 value = DEFAULT_RX_BCOPY_THRESHOLD; 3565 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3566 value = DEFAULT_RX_LIMIT_ON_INTR; 3567 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3568 value = DEFAULT_RX_INTR_DELAY; 3569 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3570 value = DEFAULT_RX_INTR_ABS_DELAY; 3571 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3572 value = DEFAULT_INTR_THROTTLING; 3573 } else if (strcmp(pr_name, "_intr_adaptive") == 0) { 3574 value = 1; 3575 } else { 3576 return; 3577 } 3578 3579 (void) snprintf(valstr, sizeof (valstr), "%d", value); 3580 mac_prop_info_set_default_str(prh, valstr); 3581 break; 3582 } 3583 } 3584 } 3585 3586 /* ARGSUSED2 */ 3587 static int 3588 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name, 3589 uint_t pr_valsize, const void *pr_val) 3590 { 3591 int err = 0; 3592 long result; 3593 struct e1000_hw *hw = &Adapter->shared; 3594 3595 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3596 if (pr_val == NULL) { 3597 err = EINVAL; 3598 return (err); 3599 } 3600 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3601 if (result < MIN_TX_BCOPY_THRESHOLD || 3602 result > MAX_TX_BCOPY_THRESHOLD) 3603 err = EINVAL; 3604 else { 3605 Adapter->tx_bcopy_thresh = (uint32_t)result; 3606 } 3607 return (err); 3608 } 3609 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3610 if (pr_val == NULL) { 3611 err = EINVAL; 3612 return (err); 3613 } 3614 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3615 if (result < 0 || result > 1) 3616 err = EINVAL; 3617 else { 3618 Adapter->tx_intr_enable = (result == 1) ? 3619 B_TRUE: B_FALSE; 3620 if (Adapter->tx_intr_enable) 3621 e1000g_mask_tx_interrupt(Adapter); 3622 else 3623 e1000g_clear_tx_interrupt(Adapter); 3624 if (e1000g_check_acc_handle( 3625 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3626 ddi_fm_service_impact(Adapter->dip, 3627 DDI_SERVICE_DEGRADED); 3628 err = EIO; 3629 } 3630 } 3631 return (err); 3632 } 3633 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3634 if (pr_val == NULL) { 3635 err = EINVAL; 3636 return (err); 3637 } 3638 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3639 if (result < MIN_TX_INTR_DELAY || 3640 result > MAX_TX_INTR_DELAY) 3641 err = EINVAL; 3642 else { 3643 Adapter->tx_intr_delay = (uint32_t)result; 3644 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay); 3645 if (e1000g_check_acc_handle( 3646 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3647 ddi_fm_service_impact(Adapter->dip, 3648 DDI_SERVICE_DEGRADED); 3649 err = EIO; 3650 } 3651 } 3652 return (err); 3653 } 3654 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3655 if (pr_val == NULL) { 3656 err = EINVAL; 3657 return (err); 3658 } 3659 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3660 if (result < MIN_TX_INTR_ABS_DELAY || 3661 result > MAX_TX_INTR_ABS_DELAY) 3662 err = EINVAL; 3663 else { 3664 Adapter->tx_intr_abs_delay = (uint32_t)result; 3665 E1000_WRITE_REG(hw, E1000_TADV, 3666 Adapter->tx_intr_abs_delay); 3667 if (e1000g_check_acc_handle( 3668 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3669 ddi_fm_service_impact(Adapter->dip, 3670 DDI_SERVICE_DEGRADED); 3671 err = EIO; 3672 } 3673 } 3674 return (err); 3675 } 3676 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3677 if (pr_val == NULL) { 3678 err = EINVAL; 3679 return (err); 3680 } 3681 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3682 if (result < MIN_RX_BCOPY_THRESHOLD || 3683 result > MAX_RX_BCOPY_THRESHOLD) 3684 err = EINVAL; 3685 else 3686 Adapter->rx_bcopy_thresh = (uint32_t)result; 3687 return (err); 3688 } 3689 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3690 if (pr_val == NULL) { 3691 err = EINVAL; 3692 return (err); 3693 } 3694 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3695 if (result < MIN_RX_LIMIT_ON_INTR || 3696 result > MAX_RX_LIMIT_ON_INTR) 3697 err = EINVAL; 3698 else 3699 Adapter->rx_limit_onintr = (uint32_t)result; 3700 return (err); 3701 } 3702 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3703 if (pr_val == NULL) { 3704 err = EINVAL; 3705 return (err); 3706 } 3707 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3708 if (result < MIN_RX_INTR_DELAY || 3709 result > MAX_RX_INTR_DELAY) 3710 err = EINVAL; 3711 else { 3712 Adapter->rx_intr_delay = (uint32_t)result; 3713 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay); 3714 if (e1000g_check_acc_handle( 3715 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3716 ddi_fm_service_impact(Adapter->dip, 3717 DDI_SERVICE_DEGRADED); 3718 err = EIO; 3719 } 3720 } 3721 return (err); 3722 } 3723 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3724 if (pr_val == NULL) { 3725 err = EINVAL; 3726 return (err); 3727 } 3728 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3729 if (result < MIN_RX_INTR_ABS_DELAY || 3730 result > MAX_RX_INTR_ABS_DELAY) 3731 err = EINVAL; 3732 else { 3733 Adapter->rx_intr_abs_delay = (uint32_t)result; 3734 E1000_WRITE_REG(hw, E1000_RADV, 3735 Adapter->rx_intr_abs_delay); 3736 if (e1000g_check_acc_handle( 3737 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3738 ddi_fm_service_impact(Adapter->dip, 3739 DDI_SERVICE_DEGRADED); 3740 err = EIO; 3741 } 3742 } 3743 return (err); 3744 } 3745 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3746 if (pr_val == NULL) { 3747 err = EINVAL; 3748 return (err); 3749 } 3750 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3751 if (result < MIN_INTR_THROTTLING || 3752 result > MAX_INTR_THROTTLING) 3753 err = EINVAL; 3754 else { 3755 if (hw->mac.type >= e1000_82540) { 3756 Adapter->intr_throttling_rate = 3757 (uint32_t)result; 3758 E1000_WRITE_REG(hw, E1000_ITR, 3759 Adapter->intr_throttling_rate); 3760 if (e1000g_check_acc_handle( 3761 Adapter->osdep.reg_handle) != DDI_FM_OK) { 3762 ddi_fm_service_impact(Adapter->dip, 3763 DDI_SERVICE_DEGRADED); 3764 err = EIO; 3765 } 3766 } else 3767 err = EINVAL; 3768 } 3769 return (err); 3770 } 3771 if (strcmp(pr_name, "_intr_adaptive") == 0) { 3772 if (pr_val == NULL) { 3773 err = EINVAL; 3774 return (err); 3775 } 3776 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3777 if (result < 0 || result > 1) 3778 err = EINVAL; 3779 else { 3780 if (hw->mac.type >= e1000_82540) { 3781 Adapter->intr_adaptive = (result == 1) ? 3782 B_TRUE : B_FALSE; 3783 } else { 3784 err = EINVAL; 3785 } 3786 } 3787 return (err); 3788 } 3789 return (ENOTSUP); 3790 } 3791 3792 static int 3793 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name, 3794 uint_t pr_valsize, void *pr_val) 3795 { 3796 int err = ENOTSUP; 3797 int value; 3798 3799 if (strcmp(pr_name, "_adv_pause_cap") == 0) { 3800 value = Adapter->param_adv_pause; 3801 err = 0; 3802 goto done; 3803 } 3804 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) { 3805 value = Adapter->param_adv_asym_pause; 3806 err = 0; 3807 goto done; 3808 } 3809 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3810 value = Adapter->tx_bcopy_thresh; 3811 err = 0; 3812 goto done; 3813 } 3814 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3815 value = Adapter->tx_intr_enable; 3816 err = 0; 3817 goto done; 3818 } 3819 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3820 value = Adapter->tx_intr_delay; 3821 err = 0; 3822 goto done; 3823 } 3824 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3825 value = Adapter->tx_intr_abs_delay; 3826 err = 0; 3827 goto done; 3828 } 3829 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3830 value = Adapter->rx_bcopy_thresh; 3831 err = 0; 3832 goto done; 3833 } 3834 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3835 value = Adapter->rx_limit_onintr; 3836 err = 0; 3837 goto done; 3838 } 3839 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3840 value = Adapter->rx_intr_delay; 3841 err = 0; 3842 goto done; 3843 } 3844 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3845 value = Adapter->rx_intr_abs_delay; 3846 err = 0; 3847 goto done; 3848 } 3849 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3850 value = Adapter->intr_throttling_rate; 3851 err = 0; 3852 goto done; 3853 } 3854 if (strcmp(pr_name, "_intr_adaptive") == 0) { 3855 value = Adapter->intr_adaptive; 3856 err = 0; 3857 goto done; 3858 } 3859 done: 3860 if (err == 0) { 3861 (void) snprintf(pr_val, pr_valsize, "%d", value); 3862 } 3863 return (err); 3864 } 3865 3866 /* 3867 * e1000g_get_conf - get configurations set in e1000g.conf 3868 * This routine gets user-configured values out of the configuration 3869 * file e1000g.conf. 3870 * 3871 * For each configurable value, there is a minimum, a maximum, and a 3872 * default. 3873 * If user does not configure a value, use the default. 3874 * If user configures below the minimum, use the minumum. 3875 * If user configures above the maximum, use the maxumum. 3876 */ 3877 static void 3878 e1000g_get_conf(struct e1000g *Adapter) 3879 { 3880 struct e1000_hw *hw = &Adapter->shared; 3881 boolean_t tbi_compatibility = B_FALSE; 3882 boolean_t is_jumbo = B_FALSE; 3883 int propval; 3884 /* 3885 * decrease the number of descriptors and free packets 3886 * for jumbo frames to reduce tx/rx resource consumption 3887 */ 3888 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) { 3889 is_jumbo = B_TRUE; 3890 } 3891 3892 /* 3893 * get each configurable property from e1000g.conf 3894 */ 3895 3896 /* 3897 * NumTxDescriptors 3898 */ 3899 Adapter->tx_desc_num_flag = 3900 e1000g_get_prop(Adapter, "NumTxDescriptors", 3901 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR, 3902 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC 3903 : DEFAULT_NUM_TX_DESCRIPTOR, &propval); 3904 Adapter->tx_desc_num = propval; 3905 3906 /* 3907 * NumRxDescriptors 3908 */ 3909 Adapter->rx_desc_num_flag = 3910 e1000g_get_prop(Adapter, "NumRxDescriptors", 3911 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR, 3912 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC 3913 : DEFAULT_NUM_RX_DESCRIPTOR, &propval); 3914 Adapter->rx_desc_num = propval; 3915 3916 /* 3917 * NumRxFreeList 3918 */ 3919 Adapter->rx_buf_num_flag = 3920 e1000g_get_prop(Adapter, "NumRxFreeList", 3921 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST, 3922 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF 3923 : DEFAULT_NUM_RX_FREELIST, &propval); 3924 Adapter->rx_freelist_limit = propval; 3925 3926 /* 3927 * NumTxPacketList 3928 */ 3929 Adapter->tx_buf_num_flag = 3930 e1000g_get_prop(Adapter, "NumTxPacketList", 3931 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST, 3932 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF 3933 : DEFAULT_NUM_TX_FREELIST, &propval); 3934 Adapter->tx_freelist_num = propval; 3935 3936 /* 3937 * FlowControl 3938 */ 3939 hw->fc.send_xon = B_TRUE; 3940 (void) e1000g_get_prop(Adapter, "FlowControl", 3941 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval); 3942 hw->fc.requested_mode = propval; 3943 /* 4 is the setting that says "let the eeprom decide" */ 3944 if (hw->fc.requested_mode == 4) 3945 hw->fc.requested_mode = e1000_fc_default; 3946 3947 /* 3948 * Max Num Receive Packets on Interrupt 3949 */ 3950 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets", 3951 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR, 3952 DEFAULT_RX_LIMIT_ON_INTR, &propval); 3953 Adapter->rx_limit_onintr = propval; 3954 3955 /* 3956 * PHY master slave setting 3957 */ 3958 (void) e1000g_get_prop(Adapter, "SetMasterSlave", 3959 e1000_ms_hw_default, e1000_ms_auto, 3960 e1000_ms_hw_default, &propval); 3961 hw->phy.ms_type = propval; 3962 3963 /* 3964 * Parameter which controls TBI mode workaround, which is only 3965 * needed on certain switches such as Cisco 6500/Foundry 3966 */ 3967 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable", 3968 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval); 3969 tbi_compatibility = (propval == 1); 3970 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility); 3971 3972 /* 3973 * MSI Enable 3974 */ 3975 (void) e1000g_get_prop(Adapter, "MSIEnable", 3976 0, 1, DEFAULT_MSI_ENABLE, &propval); 3977 Adapter->msi_enable = (propval == 1); 3978 3979 /* 3980 * Interrupt Throttling Rate 3981 */ 3982 (void) e1000g_get_prop(Adapter, "intr_throttling_rate", 3983 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 3984 DEFAULT_INTR_THROTTLING, &propval); 3985 Adapter->intr_throttling_rate = propval; 3986 3987 /* 3988 * Adaptive Interrupt Blanking Enable/Disable 3989 * It is enabled by default 3990 */ 3991 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1, 3992 &propval); 3993 Adapter->intr_adaptive = (propval == 1); 3994 3995 /* 3996 * Hardware checksum enable/disable parameter 3997 */ 3998 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable", 3999 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval); 4000 Adapter->tx_hcksum_enable = (propval == 1); 4001 /* 4002 * Checksum on/off selection via global parameters. 4003 * 4004 * If the chip is flagged as not capable of (correctly) 4005 * handling checksumming, we don't enable it on either 4006 * Rx or Tx side. Otherwise, we take this chip's settings 4007 * from the patchable global defaults. 4008 * 4009 * We advertise our capabilities only if TX offload is 4010 * enabled. On receive, the stack will accept checksummed 4011 * packets anyway, even if we haven't said we can deliver 4012 * them. 4013 */ 4014 switch (hw->mac.type) { 4015 case e1000_82540: 4016 case e1000_82544: 4017 case e1000_82545: 4018 case e1000_82545_rev_3: 4019 case e1000_82546: 4020 case e1000_82546_rev_3: 4021 case e1000_82571: 4022 case e1000_82572: 4023 case e1000_82573: 4024 case e1000_80003es2lan: 4025 break; 4026 /* 4027 * For the following Intel PRO/1000 chipsets, we have not 4028 * tested the hardware checksum offload capability, so we 4029 * disable the capability for them. 4030 * e1000_82542, 4031 * e1000_82543, 4032 * e1000_82541, 4033 * e1000_82541_rev_2, 4034 * e1000_82547, 4035 * e1000_82547_rev_2, 4036 */ 4037 default: 4038 Adapter->tx_hcksum_enable = B_FALSE; 4039 } 4040 4041 /* 4042 * Large Send Offloading(LSO) Enable/Disable 4043 * If the tx hardware checksum is not enabled, LSO should be 4044 * disabled. 4045 */ 4046 (void) e1000g_get_prop(Adapter, "lso_enable", 4047 0, 1, DEFAULT_LSO_ENABLE, &propval); 4048 Adapter->lso_enable = (propval == 1); 4049 4050 switch (hw->mac.type) { 4051 case e1000_82546: 4052 case e1000_82546_rev_3: 4053 if (Adapter->lso_enable) 4054 Adapter->lso_premature_issue = B_TRUE; 4055 /* FALLTHRU */ 4056 case e1000_82571: 4057 case e1000_82572: 4058 case e1000_82573: 4059 case e1000_80003es2lan: 4060 break; 4061 default: 4062 Adapter->lso_enable = B_FALSE; 4063 } 4064 4065 if (!Adapter->tx_hcksum_enable) { 4066 Adapter->lso_premature_issue = B_FALSE; 4067 Adapter->lso_enable = B_FALSE; 4068 } 4069 4070 /* 4071 * If mem_workaround_82546 is enabled, the rx buffer allocated by 4072 * e1000_82545, e1000_82546 and e1000_82546_rev_3 4073 * will not cross 64k boundary. 4074 */ 4075 (void) e1000g_get_prop(Adapter, "mem_workaround_82546", 4076 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval); 4077 Adapter->mem_workaround_82546 = (propval == 1); 4078 4079 /* 4080 * Max number of multicast addresses 4081 */ 4082 (void) e1000g_get_prop(Adapter, "mcast_max_num", 4083 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32, 4084 &propval); 4085 Adapter->mcast_max_num = propval; 4086 } 4087 4088 /* 4089 * e1000g_get_prop - routine to read properties 4090 * 4091 * Get a user-configure property value out of the configuration 4092 * file e1000g.conf. 4093 * 4094 * Caller provides name of the property, a default value, a minimum 4095 * value, a maximum value and a pointer to the returned property 4096 * value. 4097 * 4098 * Return B_TRUE if the configured value of the property is not a default 4099 * value, otherwise return B_FALSE. 4100 */ 4101 static boolean_t 4102 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */ 4103 char *propname, /* name of the property */ 4104 int minval, /* minimum acceptable value */ 4105 int maxval, /* maximim acceptable value */ 4106 int defval, /* default value */ 4107 int *propvalue) /* property value return to caller */ 4108 { 4109 int propval; /* value returned for requested property */ 4110 int *props; /* point to array of properties returned */ 4111 uint_t nprops; /* number of property value returned */ 4112 boolean_t ret = B_TRUE; 4113 4114 /* 4115 * get the array of properties from the config file 4116 */ 4117 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip, 4118 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) { 4119 /* got some properties, test if we got enough */ 4120 if (Adapter->instance < nprops) { 4121 propval = props[Adapter->instance]; 4122 } else { 4123 /* not enough properties configured */ 4124 propval = defval; 4125 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4126 "Not Enough %s values found in e1000g.conf" 4127 " - set to %d\n", 4128 propname, propval); 4129 ret = B_FALSE; 4130 } 4131 4132 /* free memory allocated for properties */ 4133 ddi_prop_free(props); 4134 4135 } else { 4136 propval = defval; 4137 ret = B_FALSE; 4138 } 4139 4140 /* 4141 * enforce limits 4142 */ 4143 if (propval > maxval) { 4144 propval = maxval; 4145 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4146 "Too High %s value in e1000g.conf - set to %d\n", 4147 propname, propval); 4148 } 4149 4150 if (propval < minval) { 4151 propval = minval; 4152 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 4153 "Too Low %s value in e1000g.conf - set to %d\n", 4154 propname, propval); 4155 } 4156 4157 *propvalue = propval; 4158 return (ret); 4159 } 4160 4161 static boolean_t 4162 e1000g_link_check(struct e1000g *Adapter) 4163 { 4164 uint16_t speed, duplex, phydata; 4165 boolean_t link_changed = B_FALSE; 4166 struct e1000_hw *hw; 4167 uint32_t reg_tarc; 4168 4169 hw = &Adapter->shared; 4170 4171 if (e1000g_link_up(Adapter)) { 4172 /* 4173 * The Link is up, check whether it was marked as down earlier 4174 */ 4175 if (Adapter->link_state != LINK_STATE_UP) { 4176 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex); 4177 Adapter->link_speed = speed; 4178 Adapter->link_duplex = duplex; 4179 Adapter->link_state = LINK_STATE_UP; 4180 link_changed = B_TRUE; 4181 4182 if (Adapter->link_speed == SPEED_1000) 4183 Adapter->stall_threshold = TX_STALL_TIME_2S; 4184 else 4185 Adapter->stall_threshold = TX_STALL_TIME_8S; 4186 4187 Adapter->tx_link_down_timeout = 0; 4188 4189 if ((hw->mac.type == e1000_82571) || 4190 (hw->mac.type == e1000_82572)) { 4191 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0)); 4192 if (speed == SPEED_1000) 4193 reg_tarc |= (1 << 21); 4194 else 4195 reg_tarc &= ~(1 << 21); 4196 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc); 4197 } 4198 } 4199 Adapter->smartspeed = 0; 4200 } else { 4201 if (Adapter->link_state != LINK_STATE_DOWN) { 4202 Adapter->link_speed = 0; 4203 Adapter->link_duplex = 0; 4204 Adapter->link_state = LINK_STATE_DOWN; 4205 link_changed = B_TRUE; 4206 4207 /* 4208 * SmartSpeed workaround for Tabor/TanaX, When the 4209 * driver loses link disable auto master/slave 4210 * resolution. 4211 */ 4212 if (hw->phy.type == e1000_phy_igp) { 4213 (void) e1000_read_phy_reg(hw, 4214 PHY_1000T_CTRL, &phydata); 4215 phydata |= CR_1000T_MS_ENABLE; 4216 (void) e1000_write_phy_reg(hw, 4217 PHY_1000T_CTRL, phydata); 4218 } 4219 } else { 4220 e1000g_smartspeed(Adapter); 4221 } 4222 4223 if (Adapter->e1000g_state & E1000G_STARTED) { 4224 if (Adapter->tx_link_down_timeout < 4225 MAX_TX_LINK_DOWN_TIMEOUT) { 4226 Adapter->tx_link_down_timeout++; 4227 } else if (Adapter->tx_link_down_timeout == 4228 MAX_TX_LINK_DOWN_TIMEOUT) { 4229 e1000g_tx_clean(Adapter); 4230 Adapter->tx_link_down_timeout++; 4231 } 4232 } 4233 } 4234 4235 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4236 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4237 4238 return (link_changed); 4239 } 4240 4241 /* 4242 * e1000g_reset_link - Using the link properties to setup the link 4243 */ 4244 int 4245 e1000g_reset_link(struct e1000g *Adapter) 4246 { 4247 struct e1000_mac_info *mac; 4248 struct e1000_phy_info *phy; 4249 struct e1000_hw *hw; 4250 boolean_t invalid; 4251 4252 mac = &Adapter->shared.mac; 4253 phy = &Adapter->shared.phy; 4254 hw = &Adapter->shared; 4255 invalid = B_FALSE; 4256 4257 if (hw->phy.media_type != e1000_media_type_copper) 4258 goto out; 4259 4260 if (Adapter->param_adv_autoneg == 1) { 4261 mac->autoneg = B_TRUE; 4262 phy->autoneg_advertised = 0; 4263 4264 /* 4265 * 1000hdx is not supported for autonegotiation 4266 */ 4267 if (Adapter->param_adv_1000fdx == 1) 4268 phy->autoneg_advertised |= ADVERTISE_1000_FULL; 4269 4270 if (Adapter->param_adv_100fdx == 1) 4271 phy->autoneg_advertised |= ADVERTISE_100_FULL; 4272 4273 if (Adapter->param_adv_100hdx == 1) 4274 phy->autoneg_advertised |= ADVERTISE_100_HALF; 4275 4276 if (Adapter->param_adv_10fdx == 1) 4277 phy->autoneg_advertised |= ADVERTISE_10_FULL; 4278 4279 if (Adapter->param_adv_10hdx == 1) 4280 phy->autoneg_advertised |= ADVERTISE_10_HALF; 4281 4282 if (phy->autoneg_advertised == 0) 4283 invalid = B_TRUE; 4284 } else { 4285 mac->autoneg = B_FALSE; 4286 4287 /* 4288 * For Intel copper cards, 1000fdx and 1000hdx are not 4289 * supported for forced link 4290 */ 4291 if (Adapter->param_adv_100fdx == 1) 4292 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4293 else if (Adapter->param_adv_100hdx == 1) 4294 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4295 else if (Adapter->param_adv_10fdx == 1) 4296 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4297 else if (Adapter->param_adv_10hdx == 1) 4298 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4299 else 4300 invalid = B_TRUE; 4301 4302 } 4303 4304 if (invalid) { 4305 e1000g_log(Adapter, CE_WARN, 4306 "Invalid link settings. Setup link to " 4307 "support autonegotiation with all link capabilities."); 4308 mac->autoneg = B_TRUE; 4309 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 4310 } 4311 4312 out: 4313 return (e1000_setup_link(&Adapter->shared)); 4314 } 4315 4316 static void 4317 e1000g_timer_tx_resched(struct e1000g *Adapter) 4318 { 4319 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 4320 4321 rw_enter(&Adapter->chip_lock, RW_READER); 4322 4323 if (tx_ring->resched_needed && 4324 ((ddi_get_lbolt() - tx_ring->resched_timestamp) > 4325 drv_usectohz(1000000)) && 4326 (Adapter->e1000g_state & E1000G_STARTED) && 4327 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) { 4328 tx_ring->resched_needed = B_FALSE; 4329 mac_tx_update(Adapter->mh); 4330 E1000G_STAT(tx_ring->stat_reschedule); 4331 E1000G_STAT(tx_ring->stat_timer_reschedule); 4332 } 4333 4334 rw_exit(&Adapter->chip_lock); 4335 } 4336 4337 static void 4338 e1000g_local_timer(void *ws) 4339 { 4340 struct e1000g *Adapter = (struct e1000g *)ws; 4341 struct e1000_hw *hw; 4342 e1000g_ether_addr_t ether_addr; 4343 boolean_t link_changed; 4344 4345 hw = &Adapter->shared; 4346 4347 if (Adapter->e1000g_state & E1000G_ERROR) { 4348 rw_enter(&Adapter->chip_lock, RW_WRITER); 4349 Adapter->e1000g_state &= ~E1000G_ERROR; 4350 rw_exit(&Adapter->chip_lock); 4351 4352 Adapter->reset_count++; 4353 if (e1000g_global_reset(Adapter)) { 4354 ddi_fm_service_impact(Adapter->dip, 4355 DDI_SERVICE_RESTORED); 4356 e1000g_timer_tx_resched(Adapter); 4357 } else 4358 ddi_fm_service_impact(Adapter->dip, 4359 DDI_SERVICE_LOST); 4360 return; 4361 } 4362 4363 if (e1000g_stall_check(Adapter)) { 4364 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 4365 "Tx stall detected. Activate automatic recovery.\n"); 4366 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL); 4367 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 4368 Adapter->reset_count++; 4369 if (e1000g_reset_adapter(Adapter)) { 4370 ddi_fm_service_impact(Adapter->dip, 4371 DDI_SERVICE_RESTORED); 4372 e1000g_timer_tx_resched(Adapter); 4373 } 4374 return; 4375 } 4376 4377 link_changed = B_FALSE; 4378 rw_enter(&Adapter->chip_lock, RW_READER); 4379 if (Adapter->link_complete) 4380 link_changed = e1000g_link_check(Adapter); 4381 rw_exit(&Adapter->chip_lock); 4382 4383 if (link_changed) { 4384 if (!Adapter->reset_flag && 4385 (Adapter->e1000g_state & E1000G_STARTED) && 4386 !(Adapter->e1000g_state & E1000G_SUSPENDED)) 4387 mac_link_update(Adapter->mh, Adapter->link_state); 4388 if (Adapter->link_state == LINK_STATE_UP) 4389 Adapter->reset_flag = B_FALSE; 4390 } 4391 /* 4392 * Workaround for esb2. Data stuck in fifo on a link 4393 * down event. Reset the adapter to recover it. 4394 */ 4395 if (Adapter->esb2_workaround) { 4396 Adapter->esb2_workaround = B_FALSE; 4397 (void) e1000g_reset_adapter(Adapter); 4398 return; 4399 } 4400 4401 /* 4402 * With 82571 controllers, any locally administered address will 4403 * be overwritten when there is a reset on the other port. 4404 * Detect this circumstance and correct it. 4405 */ 4406 if ((hw->mac.type == e1000_82571) && 4407 (e1000_get_laa_state_82571(hw) == B_TRUE)) { 4408 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0); 4409 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1); 4410 4411 ether_addr.reg.low = ntohl(ether_addr.reg.low); 4412 ether_addr.reg.high = ntohl(ether_addr.reg.high); 4413 4414 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) || 4415 (ether_addr.mac.addr[4] != hw->mac.addr[1]) || 4416 (ether_addr.mac.addr[3] != hw->mac.addr[2]) || 4417 (ether_addr.mac.addr[2] != hw->mac.addr[3]) || 4418 (ether_addr.mac.addr[1] != hw->mac.addr[4]) || 4419 (ether_addr.mac.addr[0] != hw->mac.addr[5])) { 4420 e1000_rar_set(hw, hw->mac.addr, 0); 4421 } 4422 } 4423 4424 /* 4425 * Long TTL workaround for 82541/82547 4426 */ 4427 (void) e1000_igp_ttl_workaround_82547(hw); 4428 4429 /* 4430 * Check for Adaptive IFS settings If there are lots of collisions 4431 * change the value in steps... 4432 * These properties should only be set for 10/100 4433 */ 4434 if ((hw->phy.media_type == e1000_media_type_copper) && 4435 ((Adapter->link_speed == SPEED_100) || 4436 (Adapter->link_speed == SPEED_10))) { 4437 e1000_update_adaptive(hw); 4438 } 4439 /* 4440 * Set Timer Interrupts 4441 */ 4442 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 4443 4444 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 4445 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4446 else 4447 e1000g_timer_tx_resched(Adapter); 4448 4449 restart_watchdog_timer(Adapter); 4450 } 4451 4452 /* 4453 * The function e1000g_link_timer() is called when the timer for link setup 4454 * is expired, which indicates the completion of the link setup. The link 4455 * state will not be updated until the link setup is completed. And the 4456 * link state will not be sent to the upper layer through mac_link_update() 4457 * in this function. It will be updated in the local timer routine or the 4458 * interrupt service routine after the interface is started (plumbed). 4459 */ 4460 static void 4461 e1000g_link_timer(void *arg) 4462 { 4463 struct e1000g *Adapter = (struct e1000g *)arg; 4464 4465 mutex_enter(&Adapter->link_lock); 4466 Adapter->link_complete = B_TRUE; 4467 Adapter->link_tid = 0; 4468 mutex_exit(&Adapter->link_lock); 4469 } 4470 4471 /* 4472 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf 4473 * 4474 * This function read the forced speed and duplex for 10/100 Mbps speeds 4475 * and also for 1000 Mbps speeds from the e1000g.conf file 4476 */ 4477 static void 4478 e1000g_force_speed_duplex(struct e1000g *Adapter) 4479 { 4480 int forced; 4481 int propval; 4482 struct e1000_mac_info *mac = &Adapter->shared.mac; 4483 struct e1000_phy_info *phy = &Adapter->shared.phy; 4484 4485 /* 4486 * get value out of config file 4487 */ 4488 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex", 4489 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced); 4490 4491 switch (forced) { 4492 case GDIAG_10_HALF: 4493 /* 4494 * Disable Auto Negotiation 4495 */ 4496 mac->autoneg = B_FALSE; 4497 mac->forced_speed_duplex = ADVERTISE_10_HALF; 4498 break; 4499 case GDIAG_10_FULL: 4500 /* 4501 * Disable Auto Negotiation 4502 */ 4503 mac->autoneg = B_FALSE; 4504 mac->forced_speed_duplex = ADVERTISE_10_FULL; 4505 break; 4506 case GDIAG_100_HALF: 4507 /* 4508 * Disable Auto Negotiation 4509 */ 4510 mac->autoneg = B_FALSE; 4511 mac->forced_speed_duplex = ADVERTISE_100_HALF; 4512 break; 4513 case GDIAG_100_FULL: 4514 /* 4515 * Disable Auto Negotiation 4516 */ 4517 mac->autoneg = B_FALSE; 4518 mac->forced_speed_duplex = ADVERTISE_100_FULL; 4519 break; 4520 case GDIAG_1000_FULL: 4521 /* 4522 * The gigabit spec requires autonegotiation. Therefore, 4523 * when the user wants to force the speed to 1000Mbps, we 4524 * enable AutoNeg, but only allow the harware to advertise 4525 * 1000Mbps. This is different from 10/100 operation, where 4526 * we are allowed to link without any negotiation. 4527 */ 4528 mac->autoneg = B_TRUE; 4529 phy->autoneg_advertised = ADVERTISE_1000_FULL; 4530 break; 4531 default: /* obey the setting of AutoNegAdvertised */ 4532 mac->autoneg = B_TRUE; 4533 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised", 4534 0, AUTONEG_ADVERTISE_SPEED_DEFAULT, 4535 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval); 4536 phy->autoneg_advertised = (uint16_t)propval; 4537 break; 4538 } /* switch */ 4539 } 4540 4541 /* 4542 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf 4543 * 4544 * This function reads MaxFrameSize from e1000g.conf 4545 */ 4546 static void 4547 e1000g_get_max_frame_size(struct e1000g *Adapter) 4548 { 4549 int max_frame; 4550 4551 /* 4552 * get value out of config file 4553 */ 4554 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0, 4555 &max_frame); 4556 4557 switch (max_frame) { 4558 case 0: 4559 Adapter->default_mtu = ETHERMTU; 4560 break; 4561 case 1: 4562 Adapter->default_mtu = FRAME_SIZE_UPTO_4K - 4563 sizeof (struct ether_vlan_header) - ETHERFCSL; 4564 break; 4565 case 2: 4566 Adapter->default_mtu = FRAME_SIZE_UPTO_8K - 4567 sizeof (struct ether_vlan_header) - ETHERFCSL; 4568 break; 4569 case 3: 4570 Adapter->default_mtu = FRAME_SIZE_UPTO_16K - 4571 sizeof (struct ether_vlan_header) - ETHERFCSL; 4572 break; 4573 default: 4574 Adapter->default_mtu = ETHERMTU; 4575 break; 4576 } /* switch */ 4577 4578 /* 4579 * If the user configed MTU is larger than the deivce's maximum MTU, 4580 * the MTU is set to the deivce's maximum value. 4581 */ 4582 if (Adapter->default_mtu > Adapter->max_mtu) 4583 Adapter->default_mtu = Adapter->max_mtu; 4584 4585 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu); 4586 } 4587 4588 /* 4589 * e1000g_pch_limits - Apply limits of the PCH silicon type 4590 * 4591 * At any frame size larger than the ethernet default, 4592 * prevent linking at 10/100 speeds. 4593 */ 4594 static void 4595 e1000g_pch_limits(struct e1000g *Adapter) 4596 { 4597 struct e1000_hw *hw = &Adapter->shared; 4598 4599 /* only applies to PCH silicon type */ 4600 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan) 4601 return; 4602 4603 /* only applies to frames larger than ethernet default */ 4604 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) { 4605 hw->mac.autoneg = B_TRUE; 4606 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; 4607 4608 Adapter->param_adv_autoneg = 1; 4609 Adapter->param_adv_1000fdx = 1; 4610 4611 Adapter->param_adv_100fdx = 0; 4612 Adapter->param_adv_100hdx = 0; 4613 Adapter->param_adv_10fdx = 0; 4614 Adapter->param_adv_10hdx = 0; 4615 4616 e1000g_param_sync(Adapter); 4617 } 4618 } 4619 4620 /* 4621 * e1000g_mtu2maxframe - convert given MTU to maximum frame size 4622 */ 4623 static uint32_t 4624 e1000g_mtu2maxframe(uint32_t mtu) 4625 { 4626 uint32_t maxframe; 4627 4628 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL; 4629 4630 return (maxframe); 4631 } 4632 4633 static void 4634 arm_watchdog_timer(struct e1000g *Adapter) 4635 { 4636 Adapter->watchdog_tid = 4637 timeout(e1000g_local_timer, 4638 (void *)Adapter, 1 * drv_usectohz(1000000)); 4639 } 4640 #pragma inline(arm_watchdog_timer) 4641 4642 static void 4643 enable_watchdog_timer(struct e1000g *Adapter) 4644 { 4645 mutex_enter(&Adapter->watchdog_lock); 4646 4647 if (!Adapter->watchdog_timer_enabled) { 4648 Adapter->watchdog_timer_enabled = B_TRUE; 4649 Adapter->watchdog_timer_started = B_TRUE; 4650 arm_watchdog_timer(Adapter); 4651 } 4652 4653 mutex_exit(&Adapter->watchdog_lock); 4654 } 4655 4656 static void 4657 disable_watchdog_timer(struct e1000g *Adapter) 4658 { 4659 timeout_id_t tid; 4660 4661 mutex_enter(&Adapter->watchdog_lock); 4662 4663 Adapter->watchdog_timer_enabled = B_FALSE; 4664 Adapter->watchdog_timer_started = B_FALSE; 4665 tid = Adapter->watchdog_tid; 4666 Adapter->watchdog_tid = 0; 4667 4668 mutex_exit(&Adapter->watchdog_lock); 4669 4670 if (tid != 0) 4671 (void) untimeout(tid); 4672 } 4673 4674 static void 4675 start_watchdog_timer(struct e1000g *Adapter) 4676 { 4677 mutex_enter(&Adapter->watchdog_lock); 4678 4679 if (Adapter->watchdog_timer_enabled) { 4680 if (!Adapter->watchdog_timer_started) { 4681 Adapter->watchdog_timer_started = B_TRUE; 4682 arm_watchdog_timer(Adapter); 4683 } 4684 } 4685 4686 mutex_exit(&Adapter->watchdog_lock); 4687 } 4688 4689 static void 4690 restart_watchdog_timer(struct e1000g *Adapter) 4691 { 4692 mutex_enter(&Adapter->watchdog_lock); 4693 4694 if (Adapter->watchdog_timer_started) 4695 arm_watchdog_timer(Adapter); 4696 4697 mutex_exit(&Adapter->watchdog_lock); 4698 } 4699 4700 static void 4701 stop_watchdog_timer(struct e1000g *Adapter) 4702 { 4703 timeout_id_t tid; 4704 4705 mutex_enter(&Adapter->watchdog_lock); 4706 4707 Adapter->watchdog_timer_started = B_FALSE; 4708 tid = Adapter->watchdog_tid; 4709 Adapter->watchdog_tid = 0; 4710 4711 mutex_exit(&Adapter->watchdog_lock); 4712 4713 if (tid != 0) 4714 (void) untimeout(tid); 4715 } 4716 4717 static void 4718 stop_link_timer(struct e1000g *Adapter) 4719 { 4720 timeout_id_t tid; 4721 4722 /* Disable the link timer */ 4723 mutex_enter(&Adapter->link_lock); 4724 4725 tid = Adapter->link_tid; 4726 Adapter->link_tid = 0; 4727 4728 mutex_exit(&Adapter->link_lock); 4729 4730 if (tid != 0) 4731 (void) untimeout(tid); 4732 } 4733 4734 static void 4735 stop_82547_timer(e1000g_tx_ring_t *tx_ring) 4736 { 4737 timeout_id_t tid; 4738 4739 /* Disable the tx timer for 82547 chipset */ 4740 mutex_enter(&tx_ring->tx_lock); 4741 4742 tx_ring->timer_enable_82547 = B_FALSE; 4743 tid = tx_ring->timer_id_82547; 4744 tx_ring->timer_id_82547 = 0; 4745 4746 mutex_exit(&tx_ring->tx_lock); 4747 4748 if (tid != 0) 4749 (void) untimeout(tid); 4750 } 4751 4752 void 4753 e1000g_clear_interrupt(struct e1000g *Adapter) 4754 { 4755 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 4756 0xffffffff & ~E1000_IMS_RXSEQ); 4757 } 4758 4759 void 4760 e1000g_mask_interrupt(struct e1000g *Adapter) 4761 { 4762 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, 4763 IMS_ENABLE_MASK & ~E1000_IMS_TXDW); 4764 4765 if (Adapter->tx_intr_enable) 4766 e1000g_mask_tx_interrupt(Adapter); 4767 } 4768 4769 /* 4770 * This routine is called by e1000g_quiesce(), therefore must not block. 4771 */ 4772 void 4773 e1000g_clear_all_interrupts(struct e1000g *Adapter) 4774 { 4775 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff); 4776 } 4777 4778 void 4779 e1000g_mask_tx_interrupt(struct e1000g *Adapter) 4780 { 4781 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW); 4782 } 4783 4784 void 4785 e1000g_clear_tx_interrupt(struct e1000g *Adapter) 4786 { 4787 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW); 4788 } 4789 4790 static void 4791 e1000g_smartspeed(struct e1000g *Adapter) 4792 { 4793 struct e1000_hw *hw = &Adapter->shared; 4794 uint16_t phy_status; 4795 uint16_t phy_ctrl; 4796 4797 /* 4798 * If we're not T-or-T, or we're not autoneg'ing, or we're not 4799 * advertising 1000Full, we don't even use the workaround 4800 */ 4801 if ((hw->phy.type != e1000_phy_igp) || 4802 !hw->mac.autoneg || 4803 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)) 4804 return; 4805 4806 /* 4807 * True if this is the first call of this function or after every 4808 * 30 seconds of not having link 4809 */ 4810 if (Adapter->smartspeed == 0) { 4811 /* 4812 * If Master/Slave config fault is asserted twice, we 4813 * assume back-to-back 4814 */ 4815 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4816 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4817 return; 4818 4819 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4820 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4821 return; 4822 /* 4823 * We're assuming back-2-back because our status register 4824 * insists! there's a fault in the master/slave 4825 * relationship that was "negotiated" 4826 */ 4827 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4828 /* 4829 * Is the phy configured for manual configuration of 4830 * master/slave? 4831 */ 4832 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4833 /* 4834 * Yes. Then disable manual configuration (enable 4835 * auto configuration) of master/slave 4836 */ 4837 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4838 (void) e1000_write_phy_reg(hw, 4839 PHY_1000T_CTRL, phy_ctrl); 4840 /* 4841 * Effectively starting the clock 4842 */ 4843 Adapter->smartspeed++; 4844 /* 4845 * Restart autonegotiation 4846 */ 4847 if (!e1000_phy_setup_autoneg(hw) && 4848 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 4849 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4850 MII_CR_RESTART_AUTO_NEG); 4851 (void) e1000_write_phy_reg(hw, 4852 PHY_CONTROL, phy_ctrl); 4853 } 4854 } 4855 return; 4856 /* 4857 * Has 6 seconds transpired still without link? Remember, 4858 * you should reset the smartspeed counter once you obtain 4859 * link 4860 */ 4861 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4862 /* 4863 * Yes. Remember, we did at the start determine that 4864 * there's a master/slave configuration fault, so we're 4865 * still assuming there's someone on the other end, but we 4866 * just haven't yet been able to talk to it. We then 4867 * re-enable auto configuration of master/slave to see if 4868 * we're running 2/3 pair cables. 4869 */ 4870 /* 4871 * If still no link, perhaps using 2/3 pair cable 4872 */ 4873 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4874 phy_ctrl |= CR_1000T_MS_ENABLE; 4875 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4876 /* 4877 * Restart autoneg with phy enabled for manual 4878 * configuration of master/slave 4879 */ 4880 if (!e1000_phy_setup_autoneg(hw) && 4881 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 4882 phy_ctrl |= 4883 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 4884 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 4885 } 4886 /* 4887 * Hopefully, there are no more faults and we've obtained 4888 * link as a result. 4889 */ 4890 } 4891 /* 4892 * Restart process after E1000_SMARTSPEED_MAX iterations (30 4893 * seconds) 4894 */ 4895 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4896 Adapter->smartspeed = 0; 4897 } 4898 4899 static boolean_t 4900 is_valid_mac_addr(uint8_t *mac_addr) 4901 { 4902 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 4903 const uint8_t addr_test2[6] = 4904 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4905 4906 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4907 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4908 return (B_FALSE); 4909 4910 return (B_TRUE); 4911 } 4912 4913 /* 4914 * e1000g_stall_check - check for tx stall 4915 * 4916 * This function checks if the adapter is stalled (in transmit). 4917 * 4918 * It is called each time the watchdog timeout is invoked. 4919 * If the transmit descriptor reclaim continuously fails, 4920 * the watchdog value will increment by 1. If the watchdog 4921 * value exceeds the threshold, the adapter is assumed to 4922 * have stalled and need to be reset. 4923 */ 4924 static boolean_t 4925 e1000g_stall_check(struct e1000g *Adapter) 4926 { 4927 e1000g_tx_ring_t *tx_ring; 4928 4929 tx_ring = Adapter->tx_ring; 4930 4931 if (Adapter->link_state != LINK_STATE_UP) 4932 return (B_FALSE); 4933 4934 (void) e1000g_recycle(tx_ring); 4935 4936 if (Adapter->stall_flag) 4937 return (B_TRUE); 4938 4939 return (B_FALSE); 4940 } 4941 4942 #ifdef E1000G_DEBUG 4943 static enum ioc_reply 4944 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp) 4945 { 4946 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd); 4947 e1000g_peekpoke_t *ppd; 4948 uint64_t mem_va; 4949 uint64_t maxoff; 4950 boolean_t peek; 4951 4952 switch (iocp->ioc_cmd) { 4953 4954 case E1000G_IOC_REG_PEEK: 4955 peek = B_TRUE; 4956 break; 4957 4958 case E1000G_IOC_REG_POKE: 4959 peek = B_FALSE; 4960 break; 4961 4962 deault: 4963 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 4964 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n", 4965 iocp->ioc_cmd); 4966 return (IOC_INVAL); 4967 } 4968 4969 /* 4970 * Validate format of ioctl 4971 */ 4972 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t)) 4973 return (IOC_INVAL); 4974 if (mp->b_cont == NULL) 4975 return (IOC_INVAL); 4976 4977 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr; 4978 4979 /* 4980 * Validate request parameters 4981 */ 4982 switch (ppd->pp_acc_space) { 4983 4984 default: 4985 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 4986 "e1000g_diag_ioctl: invalid access space 0x%X\n", 4987 ppd->pp_acc_space); 4988 return (IOC_INVAL); 4989 4990 case E1000G_PP_SPACE_REG: 4991 /* 4992 * Memory-mapped I/O space 4993 */ 4994 ASSERT(ppd->pp_acc_size == 4); 4995 if (ppd->pp_acc_size != 4) 4996 return (IOC_INVAL); 4997 4998 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 4999 return (IOC_INVAL); 5000 5001 mem_va = 0; 5002 maxoff = 0x10000; 5003 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg; 5004 break; 5005 5006 case E1000G_PP_SPACE_E1000G: 5007 /* 5008 * E1000g data structure! 5009 */ 5010 mem_va = (uintptr_t)e1000gp; 5011 maxoff = sizeof (struct e1000g); 5012 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem; 5013 break; 5014 5015 } 5016 5017 if (ppd->pp_acc_offset >= maxoff) 5018 return (IOC_INVAL); 5019 5020 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff) 5021 return (IOC_INVAL); 5022 5023 /* 5024 * All OK - go! 5025 */ 5026 ppd->pp_acc_offset += mem_va; 5027 (*ppfn)(e1000gp, ppd); 5028 return (peek ? IOC_REPLY : IOC_ACK); 5029 } 5030 5031 static void 5032 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5033 { 5034 ddi_acc_handle_t handle; 5035 uint32_t *regaddr; 5036 5037 handle = e1000gp->osdep.reg_handle; 5038 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5039 (uintptr_t)ppd->pp_acc_offset); 5040 5041 ppd->pp_acc_data = ddi_get32(handle, regaddr); 5042 } 5043 5044 static void 5045 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5046 { 5047 ddi_acc_handle_t handle; 5048 uint32_t *regaddr; 5049 uint32_t value; 5050 5051 handle = e1000gp->osdep.reg_handle; 5052 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr + 5053 (uintptr_t)ppd->pp_acc_offset); 5054 value = (uint32_t)ppd->pp_acc_data; 5055 5056 ddi_put32(handle, regaddr, value); 5057 } 5058 5059 static void 5060 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5061 { 5062 uint64_t value; 5063 void *vaddr; 5064 5065 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5066 5067 switch (ppd->pp_acc_size) { 5068 case 1: 5069 value = *(uint8_t *)vaddr; 5070 break; 5071 5072 case 2: 5073 value = *(uint16_t *)vaddr; 5074 break; 5075 5076 case 4: 5077 value = *(uint32_t *)vaddr; 5078 break; 5079 5080 case 8: 5081 value = *(uint64_t *)vaddr; 5082 break; 5083 } 5084 5085 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5086 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n", 5087 (void *)e1000gp, (void *)ppd, value, vaddr); 5088 5089 ppd->pp_acc_data = value; 5090 } 5091 5092 static void 5093 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 5094 { 5095 uint64_t value; 5096 void *vaddr; 5097 5098 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 5099 value = ppd->pp_acc_data; 5100 5101 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 5102 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n", 5103 (void *)e1000gp, (void *)ppd, value, vaddr); 5104 5105 switch (ppd->pp_acc_size) { 5106 case 1: 5107 *(uint8_t *)vaddr = (uint8_t)value; 5108 break; 5109 5110 case 2: 5111 *(uint16_t *)vaddr = (uint16_t)value; 5112 break; 5113 5114 case 4: 5115 *(uint32_t *)vaddr = (uint32_t)value; 5116 break; 5117 5118 case 8: 5119 *(uint64_t *)vaddr = (uint64_t)value; 5120 break; 5121 } 5122 } 5123 #endif 5124 5125 /* 5126 * Loopback Support 5127 */ 5128 static lb_property_t lb_normal = 5129 { normal, "normal", E1000G_LB_NONE }; 5130 static lb_property_t lb_external1000 = 5131 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 }; 5132 static lb_property_t lb_external100 = 5133 { external, "100Mbps", E1000G_LB_EXTERNAL_100 }; 5134 static lb_property_t lb_external10 = 5135 { external, "10Mbps", E1000G_LB_EXTERNAL_10 }; 5136 static lb_property_t lb_phy = 5137 { internal, "PHY", E1000G_LB_INTERNAL_PHY }; 5138 5139 static enum ioc_reply 5140 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp) 5141 { 5142 lb_info_sz_t *lbsp; 5143 lb_property_t *lbpp; 5144 struct e1000_hw *hw; 5145 uint32_t *lbmp; 5146 uint32_t size; 5147 uint32_t value; 5148 5149 hw = &Adapter->shared; 5150 5151 if (mp->b_cont == NULL) 5152 return (IOC_INVAL); 5153 5154 if (!e1000g_check_loopback_support(hw)) { 5155 e1000g_log(NULL, CE_WARN, 5156 "Loopback is not supported on e1000g%d", Adapter->instance); 5157 return (IOC_INVAL); 5158 } 5159 5160 switch (iocp->ioc_cmd) { 5161 default: 5162 return (IOC_INVAL); 5163 5164 case LB_GET_INFO_SIZE: 5165 size = sizeof (lb_info_sz_t); 5166 if (iocp->ioc_count != size) 5167 return (IOC_INVAL); 5168 5169 rw_enter(&Adapter->chip_lock, RW_WRITER); 5170 e1000g_get_phy_state(Adapter); 5171 5172 /* 5173 * Workaround for hardware faults. In order to get a stable 5174 * state of phy, we will wait for a specific interval and 5175 * try again. The time delay is an experiential value based 5176 * on our testing. 5177 */ 5178 msec_delay(100); 5179 e1000g_get_phy_state(Adapter); 5180 rw_exit(&Adapter->chip_lock); 5181 5182 value = sizeof (lb_normal); 5183 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5184 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5185 (hw->phy.media_type == e1000_media_type_fiber) || 5186 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5187 value += sizeof (lb_phy); 5188 switch (hw->mac.type) { 5189 case e1000_82571: 5190 case e1000_82572: 5191 case e1000_80003es2lan: 5192 value += sizeof (lb_external1000); 5193 break; 5194 } 5195 } 5196 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5197 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5198 value += sizeof (lb_external100); 5199 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5200 value += sizeof (lb_external10); 5201 5202 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 5203 *lbsp = value; 5204 break; 5205 5206 case LB_GET_INFO: 5207 value = sizeof (lb_normal); 5208 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5209 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5210 (hw->phy.media_type == e1000_media_type_fiber) || 5211 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5212 value += sizeof (lb_phy); 5213 switch (hw->mac.type) { 5214 case e1000_82571: 5215 case e1000_82572: 5216 case e1000_80003es2lan: 5217 value += sizeof (lb_external1000); 5218 break; 5219 } 5220 } 5221 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5222 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5223 value += sizeof (lb_external100); 5224 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5225 value += sizeof (lb_external10); 5226 5227 size = value; 5228 if (iocp->ioc_count != size) 5229 return (IOC_INVAL); 5230 5231 value = 0; 5232 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 5233 lbpp[value++] = lb_normal; 5234 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5235 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 5236 (hw->phy.media_type == e1000_media_type_fiber) || 5237 (hw->phy.media_type == e1000_media_type_internal_serdes)) { 5238 lbpp[value++] = lb_phy; 5239 switch (hw->mac.type) { 5240 case e1000_82571: 5241 case e1000_82572: 5242 case e1000_80003es2lan: 5243 lbpp[value++] = lb_external1000; 5244 break; 5245 } 5246 } 5247 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5248 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 5249 lbpp[value++] = lb_external100; 5250 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 5251 lbpp[value++] = lb_external10; 5252 break; 5253 5254 case LB_GET_MODE: 5255 size = sizeof (uint32_t); 5256 if (iocp->ioc_count != size) 5257 return (IOC_INVAL); 5258 5259 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5260 *lbmp = Adapter->loopback_mode; 5261 break; 5262 5263 case LB_SET_MODE: 5264 size = 0; 5265 if (iocp->ioc_count != sizeof (uint32_t)) 5266 return (IOC_INVAL); 5267 5268 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 5269 if (!e1000g_set_loopback_mode(Adapter, *lbmp)) 5270 return (IOC_INVAL); 5271 break; 5272 } 5273 5274 iocp->ioc_count = size; 5275 iocp->ioc_error = 0; 5276 5277 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 5278 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 5279 return (IOC_INVAL); 5280 } 5281 5282 return (IOC_REPLY); 5283 } 5284 5285 static boolean_t 5286 e1000g_check_loopback_support(struct e1000_hw *hw) 5287 { 5288 switch (hw->mac.type) { 5289 case e1000_82540: 5290 case e1000_82545: 5291 case e1000_82545_rev_3: 5292 case e1000_82546: 5293 case e1000_82546_rev_3: 5294 case e1000_82541: 5295 case e1000_82541_rev_2: 5296 case e1000_82547: 5297 case e1000_82547_rev_2: 5298 case e1000_82571: 5299 case e1000_82572: 5300 case e1000_82573: 5301 case e1000_82574: 5302 case e1000_80003es2lan: 5303 case e1000_ich9lan: 5304 case e1000_ich10lan: 5305 return (B_TRUE); 5306 } 5307 return (B_FALSE); 5308 } 5309 5310 static boolean_t 5311 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode) 5312 { 5313 struct e1000_hw *hw; 5314 int i, times; 5315 boolean_t link_up; 5316 5317 if (mode == Adapter->loopback_mode) 5318 return (B_TRUE); 5319 5320 hw = &Adapter->shared; 5321 times = 0; 5322 5323 Adapter->loopback_mode = mode; 5324 5325 if (mode == E1000G_LB_NONE) { 5326 /* Reset the chip */ 5327 hw->phy.autoneg_wait_to_complete = B_TRUE; 5328 (void) e1000g_reset_adapter(Adapter); 5329 hw->phy.autoneg_wait_to_complete = B_FALSE; 5330 return (B_TRUE); 5331 } 5332 5333 again: 5334 5335 rw_enter(&Adapter->chip_lock, RW_WRITER); 5336 5337 switch (mode) { 5338 default: 5339 rw_exit(&Adapter->chip_lock); 5340 return (B_FALSE); 5341 5342 case E1000G_LB_EXTERNAL_1000: 5343 e1000g_set_external_loopback_1000(Adapter); 5344 break; 5345 5346 case E1000G_LB_EXTERNAL_100: 5347 e1000g_set_external_loopback_100(Adapter); 5348 break; 5349 5350 case E1000G_LB_EXTERNAL_10: 5351 e1000g_set_external_loopback_10(Adapter); 5352 break; 5353 5354 case E1000G_LB_INTERNAL_PHY: 5355 e1000g_set_internal_loopback(Adapter); 5356 break; 5357 } 5358 5359 times++; 5360 5361 rw_exit(&Adapter->chip_lock); 5362 5363 /* Wait for link up */ 5364 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--) 5365 msec_delay(100); 5366 5367 rw_enter(&Adapter->chip_lock, RW_WRITER); 5368 5369 link_up = e1000g_link_up(Adapter); 5370 5371 rw_exit(&Adapter->chip_lock); 5372 5373 if (!link_up) { 5374 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5375 "Failed to get the link up"); 5376 if (times < 2) { 5377 /* Reset the link */ 5378 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5379 "Reset the link ..."); 5380 (void) e1000g_reset_adapter(Adapter); 5381 goto again; 5382 } 5383 5384 /* 5385 * Reset driver to loopback none when set loopback failed 5386 * for the second time. 5387 */ 5388 Adapter->loopback_mode = E1000G_LB_NONE; 5389 5390 /* Reset the chip */ 5391 hw->phy.autoneg_wait_to_complete = B_TRUE; 5392 (void) e1000g_reset_adapter(Adapter); 5393 hw->phy.autoneg_wait_to_complete = B_FALSE; 5394 5395 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 5396 "Set loopback mode failed, reset to loopback none"); 5397 5398 return (B_FALSE); 5399 } 5400 5401 return (B_TRUE); 5402 } 5403 5404 /* 5405 * The following loopback settings are from Intel's technical 5406 * document - "How To Loopback". All the register settings and 5407 * time delay values are directly inherited from the document 5408 * without more explanations available. 5409 */ 5410 static void 5411 e1000g_set_internal_loopback(struct e1000g *Adapter) 5412 { 5413 struct e1000_hw *hw; 5414 uint32_t ctrl; 5415 uint32_t status; 5416 uint16_t phy_ctrl; 5417 uint16_t phy_reg; 5418 uint32_t txcw; 5419 5420 hw = &Adapter->shared; 5421 5422 /* Disable Smart Power Down */ 5423 phy_spd_state(hw, B_FALSE); 5424 5425 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 5426 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10); 5427 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000; 5428 5429 switch (hw->mac.type) { 5430 case e1000_82540: 5431 case e1000_82545: 5432 case e1000_82545_rev_3: 5433 case e1000_82546: 5434 case e1000_82546_rev_3: 5435 case e1000_82573: 5436 /* Auto-MDI/MDIX off */ 5437 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 5438 /* Reset PHY to update Auto-MDI/MDIX */ 5439 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5440 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN); 5441 /* Reset PHY to auto-neg off and force 1000 */ 5442 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5443 phy_ctrl | MII_CR_RESET); 5444 /* 5445 * Disable PHY receiver for 82540/545/546 and 82573 Family. 5446 * See comments above e1000g_set_internal_loopback() for the 5447 * background. 5448 */ 5449 (void) e1000_write_phy_reg(hw, 29, 0x001F); 5450 (void) e1000_write_phy_reg(hw, 30, 0x8FFC); 5451 (void) e1000_write_phy_reg(hw, 29, 0x001A); 5452 (void) e1000_write_phy_reg(hw, 30, 0x8FF0); 5453 break; 5454 case e1000_80003es2lan: 5455 /* Force Link Up */ 5456 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 5457 0x1CC); 5458 /* Sets PCS loopback at 1Gbs */ 5459 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 5460 0x1046); 5461 break; 5462 } 5463 5464 /* 5465 * The following registers should be set for e1000_phy_bm phy type. 5466 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy. 5467 * For others, we do not need to set these registers. 5468 */ 5469 if (hw->phy.type == e1000_phy_bm) { 5470 /* Set Default MAC Interface speed to 1GB */ 5471 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg); 5472 phy_reg &= ~0x0007; 5473 phy_reg |= 0x006; 5474 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg); 5475 /* Assert SW reset for above settings to take effect */ 5476 (void) e1000_phy_commit(hw); 5477 msec_delay(1); 5478 /* Force Full Duplex */ 5479 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5480 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5481 phy_reg | 0x000C); 5482 /* Set Link Up (in force link) */ 5483 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg); 5484 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16), 5485 phy_reg | 0x0040); 5486 /* Force Link */ 5487 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg); 5488 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16), 5489 phy_reg | 0x0040); 5490 /* Set Early Link Enable */ 5491 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg); 5492 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20), 5493 phy_reg | 0x0400); 5494 } 5495 5496 /* Set loopback */ 5497 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK); 5498 5499 msec_delay(250); 5500 5501 /* Now set up the MAC to the same speed/duplex as the PHY. */ 5502 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5503 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5504 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5505 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5506 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ 5507 E1000_CTRL_FD); /* Force Duplex to FULL */ 5508 5509 switch (hw->mac.type) { 5510 case e1000_82540: 5511 case e1000_82545: 5512 case e1000_82545_rev_3: 5513 case e1000_82546: 5514 case e1000_82546_rev_3: 5515 /* 5516 * For some serdes we'll need to commit the writes now 5517 * so that the status is updated on link 5518 */ 5519 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 5520 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5521 msec_delay(100); 5522 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5523 } 5524 5525 if (hw->phy.media_type == e1000_media_type_copper) { 5526 /* Invert Loss of Signal */ 5527 ctrl |= E1000_CTRL_ILOS; 5528 } else { 5529 /* Set ILOS on fiber nic if half duplex is detected */ 5530 status = E1000_READ_REG(hw, E1000_STATUS); 5531 if ((status & E1000_STATUS_FD) == 0) 5532 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5533 } 5534 break; 5535 5536 case e1000_82571: 5537 case e1000_82572: 5538 /* 5539 * The fiber/SerDes versions of this adapter do not contain an 5540 * accessible PHY. Therefore, loopback beyond MAC must be done 5541 * using SerDes analog loopback. 5542 */ 5543 if (hw->phy.media_type != e1000_media_type_copper) { 5544 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5545 txcw = E1000_READ_REG(hw, E1000_TXCW); 5546 txcw &= ~((uint32_t)1 << 31); 5547 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5548 5549 /* 5550 * Write 0x410 to Serdes Control register 5551 * to enable Serdes analog loopback 5552 */ 5553 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5554 msec_delay(10); 5555 } 5556 5557 status = E1000_READ_REG(hw, E1000_STATUS); 5558 /* Set ILOS on fiber nic if half duplex is detected */ 5559 if ((hw->phy.media_type == e1000_media_type_fiber) && 5560 ((status & E1000_STATUS_FD) == 0 || 5561 (status & E1000_STATUS_LU) == 0)) 5562 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5563 else if (hw->phy.media_type == e1000_media_type_internal_serdes) 5564 ctrl |= E1000_CTRL_SLU; 5565 break; 5566 5567 case e1000_82573: 5568 ctrl |= E1000_CTRL_ILOS; 5569 break; 5570 case e1000_ich9lan: 5571 case e1000_ich10lan: 5572 ctrl |= E1000_CTRL_SLU; 5573 break; 5574 } 5575 if (hw->phy.type == e1000_phy_bm) 5576 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS; 5577 5578 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5579 } 5580 5581 static void 5582 e1000g_set_external_loopback_1000(struct e1000g *Adapter) 5583 { 5584 struct e1000_hw *hw; 5585 uint32_t rctl; 5586 uint32_t ctrl_ext; 5587 uint32_t ctrl; 5588 uint32_t status; 5589 uint32_t txcw; 5590 uint16_t phydata; 5591 5592 hw = &Adapter->shared; 5593 5594 /* Disable Smart Power Down */ 5595 phy_spd_state(hw, B_FALSE); 5596 5597 switch (hw->mac.type) { 5598 case e1000_82571: 5599 case e1000_82572: 5600 switch (hw->phy.media_type) { 5601 case e1000_media_type_copper: 5602 /* Force link up (Must be done before the PHY writes) */ 5603 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5604 ctrl |= E1000_CTRL_SLU; /* Force Link Up */ 5605 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5606 5607 rctl = E1000_READ_REG(hw, E1000_RCTL); 5608 rctl |= (E1000_RCTL_EN | 5609 E1000_RCTL_SBP | 5610 E1000_RCTL_UPE | 5611 E1000_RCTL_MPE | 5612 E1000_RCTL_LPE | 5613 E1000_RCTL_BAM); /* 0x803E */ 5614 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 5615 5616 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 5617 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA | 5618 E1000_CTRL_EXT_SDP6_DATA | 5619 E1000_CTRL_EXT_SDP3_DATA | 5620 E1000_CTRL_EXT_SDP4_DIR | 5621 E1000_CTRL_EXT_SDP6_DIR | 5622 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */ 5623 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 5624 5625 /* 5626 * This sequence tunes the PHY's SDP and no customer 5627 * settable values. For background, see comments above 5628 * e1000g_set_internal_loopback(). 5629 */ 5630 (void) e1000_write_phy_reg(hw, 0x0, 0x140); 5631 msec_delay(10); 5632 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00); 5633 (void) e1000_write_phy_reg(hw, 0x12, 0xC10); 5634 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10); 5635 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76); 5636 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1); 5637 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0); 5638 5639 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65); 5640 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C); 5641 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC); 5642 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C); 5643 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC); 5644 5645 msec_delay(50); 5646 break; 5647 case e1000_media_type_fiber: 5648 case e1000_media_type_internal_serdes: 5649 status = E1000_READ_REG(hw, E1000_STATUS); 5650 if (((status & E1000_STATUS_LU) == 0) || 5651 (hw->phy.media_type == 5652 e1000_media_type_internal_serdes)) { 5653 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5654 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 5655 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5656 } 5657 5658 /* Disable autoneg by setting bit 31 of TXCW to zero */ 5659 txcw = E1000_READ_REG(hw, E1000_TXCW); 5660 txcw &= ~((uint32_t)1 << 31); 5661 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 5662 5663 /* 5664 * Write 0x410 to Serdes Control register 5665 * to enable Serdes analog loopback 5666 */ 5667 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 5668 msec_delay(10); 5669 break; 5670 default: 5671 break; 5672 } 5673 break; 5674 case e1000_82574: 5675 case e1000_80003es2lan: 5676 case e1000_ich9lan: 5677 case e1000_ich10lan: 5678 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata); 5679 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16), 5680 phydata | (1 << 5)); 5681 Adapter->param_adv_autoneg = 1; 5682 Adapter->param_adv_1000fdx = 1; 5683 (void) e1000g_reset_link(Adapter); 5684 break; 5685 } 5686 } 5687 5688 static void 5689 e1000g_set_external_loopback_100(struct e1000g *Adapter) 5690 { 5691 struct e1000_hw *hw; 5692 uint32_t ctrl; 5693 uint16_t phy_ctrl; 5694 5695 hw = &Adapter->shared; 5696 5697 /* Disable Smart Power Down */ 5698 phy_spd_state(hw, B_FALSE); 5699 5700 phy_ctrl = (MII_CR_FULL_DUPLEX | 5701 MII_CR_SPEED_100); 5702 5703 /* Force 100/FD, reset PHY */ 5704 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5705 phy_ctrl | MII_CR_RESET); /* 0xA100 */ 5706 msec_delay(10); 5707 5708 /* Force 100/FD */ 5709 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5710 phy_ctrl); /* 0x2100 */ 5711 msec_delay(10); 5712 5713 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5714 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5715 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5716 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5717 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5718 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5719 E1000_CTRL_SPD_100 | /* Force Speed to 100 */ 5720 E1000_CTRL_FD); /* Force Duplex to FULL */ 5721 5722 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5723 } 5724 5725 static void 5726 e1000g_set_external_loopback_10(struct e1000g *Adapter) 5727 { 5728 struct e1000_hw *hw; 5729 uint32_t ctrl; 5730 uint16_t phy_ctrl; 5731 5732 hw = &Adapter->shared; 5733 5734 /* Disable Smart Power Down */ 5735 phy_spd_state(hw, B_FALSE); 5736 5737 phy_ctrl = (MII_CR_FULL_DUPLEX | 5738 MII_CR_SPEED_10); 5739 5740 /* Force 10/FD, reset PHY */ 5741 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5742 phy_ctrl | MII_CR_RESET); /* 0x8100 */ 5743 msec_delay(10); 5744 5745 /* Force 10/FD */ 5746 (void) e1000_write_phy_reg(hw, PHY_CONTROL, 5747 phy_ctrl); /* 0x0100 */ 5748 msec_delay(10); 5749 5750 /* Now setup the MAC to the same speed/duplex as the PHY. */ 5751 ctrl = E1000_READ_REG(hw, E1000_CTRL); 5752 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 5753 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 5754 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 5755 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 5756 E1000_CTRL_SPD_10 | /* Force Speed to 10 */ 5757 E1000_CTRL_FD); /* Force Duplex to FULL */ 5758 5759 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 5760 } 5761 5762 #ifdef __sparc 5763 static boolean_t 5764 e1000g_find_mac_address(struct e1000g *Adapter) 5765 { 5766 struct e1000_hw *hw = &Adapter->shared; 5767 uchar_t *bytes; 5768 struct ether_addr sysaddr; 5769 uint_t nelts; 5770 int err; 5771 boolean_t found = B_FALSE; 5772 5773 /* 5774 * The "vendor's factory-set address" may already have 5775 * been extracted from the chip, but if the property 5776 * "local-mac-address" is set we use that instead. 5777 * 5778 * We check whether it looks like an array of 6 5779 * bytes (which it should, if OBP set it). If we can't 5780 * make sense of it this way, we'll ignore it. 5781 */ 5782 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 5783 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 5784 if (err == DDI_PROP_SUCCESS) { 5785 if (nelts == ETHERADDRL) { 5786 while (nelts--) 5787 hw->mac.addr[nelts] = bytes[nelts]; 5788 found = B_TRUE; 5789 } 5790 ddi_prop_free(bytes); 5791 } 5792 5793 /* 5794 * Look up the OBP property "local-mac-address?". If the user has set 5795 * 'local-mac-address? = false', use "the system address" instead. 5796 */ 5797 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0, 5798 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 5799 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 5800 if (localetheraddr(NULL, &sysaddr) != 0) { 5801 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 5802 found = B_TRUE; 5803 } 5804 } 5805 ddi_prop_free(bytes); 5806 } 5807 5808 /* 5809 * Finally(!), if there's a valid "mac-address" property (created 5810 * if we netbooted from this interface), we must use this instead 5811 * of any of the above to ensure that the NFS/install server doesn't 5812 * get confused by the address changing as Solaris takes over! 5813 */ 5814 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 5815 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 5816 if (err == DDI_PROP_SUCCESS) { 5817 if (nelts == ETHERADDRL) { 5818 while (nelts--) 5819 hw->mac.addr[nelts] = bytes[nelts]; 5820 found = B_TRUE; 5821 } 5822 ddi_prop_free(bytes); 5823 } 5824 5825 if (found) { 5826 bcopy(hw->mac.addr, hw->mac.perm_addr, 5827 ETHERADDRL); 5828 } 5829 5830 return (found); 5831 } 5832 #endif 5833 5834 static int 5835 e1000g_add_intrs(struct e1000g *Adapter) 5836 { 5837 dev_info_t *devinfo; 5838 int intr_types; 5839 int rc; 5840 5841 devinfo = Adapter->dip; 5842 5843 /* Get supported interrupt types */ 5844 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 5845 5846 if (rc != DDI_SUCCESS) { 5847 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5848 "Get supported interrupt types failed: %d\n", rc); 5849 return (DDI_FAILURE); 5850 } 5851 5852 /* 5853 * Based on Intel Technical Advisory document (TA-160), there are some 5854 * cases where some older Intel PCI-X NICs may "advertise" to the OS 5855 * that it supports MSI, but in fact has problems. 5856 * So we should only enable MSI for PCI-E NICs and disable MSI for old 5857 * PCI/PCI-X NICs. 5858 */ 5859 if (Adapter->shared.mac.type < e1000_82571) 5860 Adapter->msi_enable = B_FALSE; 5861 5862 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) { 5863 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI); 5864 5865 if (rc != DDI_SUCCESS) { 5866 /* EMPTY */ 5867 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5868 "Add MSI failed, trying Legacy interrupts\n"); 5869 } else { 5870 Adapter->intr_type = DDI_INTR_TYPE_MSI; 5871 } 5872 } 5873 5874 if ((Adapter->intr_type == 0) && 5875 (intr_types & DDI_INTR_TYPE_FIXED)) { 5876 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED); 5877 5878 if (rc != DDI_SUCCESS) { 5879 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5880 "Add Legacy interrupts failed\n"); 5881 return (DDI_FAILURE); 5882 } 5883 5884 Adapter->intr_type = DDI_INTR_TYPE_FIXED; 5885 } 5886 5887 if (Adapter->intr_type == 0) { 5888 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 5889 "No interrupts registered\n"); 5890 return (DDI_FAILURE); 5891 } 5892 5893 return (DDI_SUCCESS); 5894 } 5895 5896 /* 5897 * e1000g_intr_add() handles MSI/Legacy interrupts 5898 */ 5899 static int 5900 e1000g_intr_add(struct e1000g *Adapter, int intr_type) 5901 { 5902 dev_info_t *devinfo; 5903 int count, avail, actual; 5904 int x, y, rc, inum = 0; 5905 int flag; 5906 ddi_intr_handler_t *intr_handler; 5907 5908 devinfo = Adapter->dip; 5909 5910 /* get number of interrupts */ 5911 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5912 if ((rc != DDI_SUCCESS) || (count == 0)) { 5913 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5914 "Get interrupt number failed. Return: %d, count: %d\n", 5915 rc, count); 5916 return (DDI_FAILURE); 5917 } 5918 5919 /* get number of available interrupts */ 5920 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 5921 if ((rc != DDI_SUCCESS) || (avail == 0)) { 5922 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5923 "Get interrupt available number failed. " 5924 "Return: %d, available: %d\n", rc, avail); 5925 return (DDI_FAILURE); 5926 } 5927 5928 if (avail < count) { 5929 /* EMPTY */ 5930 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5931 "Interrupts count: %d, available: %d\n", 5932 count, avail); 5933 } 5934 5935 /* Allocate an array of interrupt handles */ 5936 Adapter->intr_size = count * sizeof (ddi_intr_handle_t); 5937 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP); 5938 5939 /* Set NORMAL behavior for both MSI and FIXED interrupt */ 5940 flag = DDI_INTR_ALLOC_NORMAL; 5941 5942 /* call ddi_intr_alloc() */ 5943 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum, 5944 count, &actual, flag); 5945 5946 if ((rc != DDI_SUCCESS) || (actual == 0)) { 5947 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5948 "Allocate interrupts failed: %d\n", rc); 5949 5950 kmem_free(Adapter->htable, Adapter->intr_size); 5951 return (DDI_FAILURE); 5952 } 5953 5954 if (actual < count) { 5955 /* EMPTY */ 5956 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5957 "Interrupts requested: %d, received: %d\n", 5958 count, actual); 5959 } 5960 5961 Adapter->intr_cnt = actual; 5962 5963 /* Get priority for first msi, assume remaining are all the same */ 5964 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri); 5965 5966 if (rc != DDI_SUCCESS) { 5967 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5968 "Get interrupt priority failed: %d\n", rc); 5969 5970 /* Free already allocated intr */ 5971 for (y = 0; y < actual; y++) 5972 (void) ddi_intr_free(Adapter->htable[y]); 5973 5974 kmem_free(Adapter->htable, Adapter->intr_size); 5975 return (DDI_FAILURE); 5976 } 5977 5978 /* 5979 * In Legacy Interrupt mode, for PCI-Express adapters, we should 5980 * use the interrupt service routine e1000g_intr_pciexpress() 5981 * to avoid interrupt stealing when sharing interrupt with other 5982 * devices. 5983 */ 5984 if (Adapter->shared.mac.type < e1000_82571) 5985 intr_handler = (ddi_intr_handler_t *)e1000g_intr; 5986 else 5987 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress; 5988 5989 /* Call ddi_intr_add_handler() */ 5990 for (x = 0; x < actual; x++) { 5991 rc = ddi_intr_add_handler(Adapter->htable[x], 5992 intr_handler, (caddr_t)Adapter, NULL); 5993 5994 if (rc != DDI_SUCCESS) { 5995 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5996 "Add interrupt handler failed: %d\n", rc); 5997 5998 /* Remove already added handler */ 5999 for (y = 0; y < x; y++) 6000 (void) ddi_intr_remove_handler( 6001 Adapter->htable[y]); 6002 6003 /* Free already allocated intr */ 6004 for (y = 0; y < actual; y++) 6005 (void) ddi_intr_free(Adapter->htable[y]); 6006 6007 kmem_free(Adapter->htable, Adapter->intr_size); 6008 return (DDI_FAILURE); 6009 } 6010 } 6011 6012 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap); 6013 6014 if (rc != DDI_SUCCESS) { 6015 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6016 "Get interrupt cap failed: %d\n", rc); 6017 6018 /* Free already allocated intr */ 6019 for (y = 0; y < actual; y++) { 6020 (void) ddi_intr_remove_handler(Adapter->htable[y]); 6021 (void) ddi_intr_free(Adapter->htable[y]); 6022 } 6023 6024 kmem_free(Adapter->htable, Adapter->intr_size); 6025 return (DDI_FAILURE); 6026 } 6027 6028 return (DDI_SUCCESS); 6029 } 6030 6031 static int 6032 e1000g_rem_intrs(struct e1000g *Adapter) 6033 { 6034 int x; 6035 int rc; 6036 6037 for (x = 0; x < Adapter->intr_cnt; x++) { 6038 rc = ddi_intr_remove_handler(Adapter->htable[x]); 6039 if (rc != DDI_SUCCESS) { 6040 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6041 "Remove intr handler failed: %d\n", rc); 6042 return (DDI_FAILURE); 6043 } 6044 6045 rc = ddi_intr_free(Adapter->htable[x]); 6046 if (rc != DDI_SUCCESS) { 6047 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6048 "Free intr failed: %d\n", rc); 6049 return (DDI_FAILURE); 6050 } 6051 } 6052 6053 kmem_free(Adapter->htable, Adapter->intr_size); 6054 6055 return (DDI_SUCCESS); 6056 } 6057 6058 static int 6059 e1000g_enable_intrs(struct e1000g *Adapter) 6060 { 6061 int x; 6062 int rc; 6063 6064 /* Enable interrupts */ 6065 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6066 /* Call ddi_intr_block_enable() for MSI */ 6067 rc = ddi_intr_block_enable(Adapter->htable, 6068 Adapter->intr_cnt); 6069 if (rc != DDI_SUCCESS) { 6070 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6071 "Enable block intr failed: %d\n", rc); 6072 return (DDI_FAILURE); 6073 } 6074 } else { 6075 /* Call ddi_intr_enable() for Legacy/MSI non block enable */ 6076 for (x = 0; x < Adapter->intr_cnt; x++) { 6077 rc = ddi_intr_enable(Adapter->htable[x]); 6078 if (rc != DDI_SUCCESS) { 6079 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6080 "Enable intr failed: %d\n", rc); 6081 return (DDI_FAILURE); 6082 } 6083 } 6084 } 6085 6086 return (DDI_SUCCESS); 6087 } 6088 6089 static int 6090 e1000g_disable_intrs(struct e1000g *Adapter) 6091 { 6092 int x; 6093 int rc; 6094 6095 /* Disable all interrupts */ 6096 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 6097 rc = ddi_intr_block_disable(Adapter->htable, 6098 Adapter->intr_cnt); 6099 if (rc != DDI_SUCCESS) { 6100 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6101 "Disable block intr failed: %d\n", rc); 6102 return (DDI_FAILURE); 6103 } 6104 } else { 6105 for (x = 0; x < Adapter->intr_cnt; x++) { 6106 rc = ddi_intr_disable(Adapter->htable[x]); 6107 if (rc != DDI_SUCCESS) { 6108 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 6109 "Disable intr failed: %d\n", rc); 6110 return (DDI_FAILURE); 6111 } 6112 } 6113 } 6114 6115 return (DDI_SUCCESS); 6116 } 6117 6118 /* 6119 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter 6120 */ 6121 static void 6122 e1000g_get_phy_state(struct e1000g *Adapter) 6123 { 6124 struct e1000_hw *hw = &Adapter->shared; 6125 6126 if (hw->phy.media_type == e1000_media_type_copper) { 6127 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl); 6128 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status); 6129 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 6130 &Adapter->phy_an_adv); 6131 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, 6132 &Adapter->phy_an_exp); 6133 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS, 6134 &Adapter->phy_ext_status); 6135 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, 6136 &Adapter->phy_1000t_ctrl); 6137 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, 6138 &Adapter->phy_1000t_status); 6139 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY, 6140 &Adapter->phy_lp_able); 6141 6142 Adapter->param_autoneg_cap = 6143 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; 6144 Adapter->param_pause_cap = 6145 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6146 Adapter->param_asym_pause_cap = 6147 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6148 Adapter->param_1000fdx_cap = 6149 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 6150 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; 6151 Adapter->param_1000hdx_cap = 6152 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || 6153 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; 6154 Adapter->param_100t4_cap = 6155 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0; 6156 Adapter->param_100fdx_cap = 6157 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 6158 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; 6159 Adapter->param_100hdx_cap = 6160 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 6161 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; 6162 Adapter->param_10fdx_cap = 6163 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; 6164 Adapter->param_10hdx_cap = 6165 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; 6166 6167 Adapter->param_adv_autoneg = hw->mac.autoneg; 6168 Adapter->param_adv_pause = 6169 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 6170 Adapter->param_adv_asym_pause = 6171 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 6172 Adapter->param_adv_1000hdx = 6173 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; 6174 Adapter->param_adv_100t4 = 6175 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; 6176 if (Adapter->param_adv_autoneg == 1) { 6177 Adapter->param_adv_1000fdx = 6178 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) 6179 ? 1 : 0; 6180 Adapter->param_adv_100fdx = 6181 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) 6182 ? 1 : 0; 6183 Adapter->param_adv_100hdx = 6184 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) 6185 ? 1 : 0; 6186 Adapter->param_adv_10fdx = 6187 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; 6188 Adapter->param_adv_10hdx = 6189 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; 6190 } 6191 6192 Adapter->param_lp_autoneg = 6193 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; 6194 Adapter->param_lp_pause = 6195 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; 6196 Adapter->param_lp_asym_pause = 6197 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; 6198 Adapter->param_lp_1000fdx = 6199 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; 6200 Adapter->param_lp_1000hdx = 6201 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; 6202 Adapter->param_lp_100t4 = 6203 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; 6204 Adapter->param_lp_100fdx = 6205 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; 6206 Adapter->param_lp_100hdx = 6207 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; 6208 Adapter->param_lp_10fdx = 6209 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; 6210 Adapter->param_lp_10hdx = 6211 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; 6212 } else { 6213 /* 6214 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning, 6215 * it can only work with 1Gig Full Duplex Link Partner. 6216 */ 6217 Adapter->param_autoneg_cap = 0; 6218 Adapter->param_pause_cap = 1; 6219 Adapter->param_asym_pause_cap = 1; 6220 Adapter->param_1000fdx_cap = 1; 6221 Adapter->param_1000hdx_cap = 0; 6222 Adapter->param_100t4_cap = 0; 6223 Adapter->param_100fdx_cap = 0; 6224 Adapter->param_100hdx_cap = 0; 6225 Adapter->param_10fdx_cap = 0; 6226 Adapter->param_10hdx_cap = 0; 6227 6228 Adapter->param_adv_autoneg = 0; 6229 Adapter->param_adv_pause = 1; 6230 Adapter->param_adv_asym_pause = 1; 6231 Adapter->param_adv_1000fdx = 1; 6232 Adapter->param_adv_1000hdx = 0; 6233 Adapter->param_adv_100t4 = 0; 6234 Adapter->param_adv_100fdx = 0; 6235 Adapter->param_adv_100hdx = 0; 6236 Adapter->param_adv_10fdx = 0; 6237 Adapter->param_adv_10hdx = 0; 6238 6239 Adapter->param_lp_autoneg = 0; 6240 Adapter->param_lp_pause = 0; 6241 Adapter->param_lp_asym_pause = 0; 6242 Adapter->param_lp_1000fdx = 0; 6243 Adapter->param_lp_1000hdx = 0; 6244 Adapter->param_lp_100t4 = 0; 6245 Adapter->param_lp_100fdx = 0; 6246 Adapter->param_lp_100hdx = 0; 6247 Adapter->param_lp_10fdx = 0; 6248 Adapter->param_lp_10hdx = 0; 6249 } 6250 } 6251 6252 /* 6253 * FMA support 6254 */ 6255 6256 int 6257 e1000g_check_acc_handle(ddi_acc_handle_t handle) 6258 { 6259 ddi_fm_error_t de; 6260 6261 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 6262 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 6263 return (de.fme_status); 6264 } 6265 6266 int 6267 e1000g_check_dma_handle(ddi_dma_handle_t handle) 6268 { 6269 ddi_fm_error_t de; 6270 6271 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 6272 return (de.fme_status); 6273 } 6274 6275 /* 6276 * The IO fault service error handling callback function 6277 */ 6278 /* ARGSUSED2 */ 6279 static int 6280 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 6281 { 6282 /* 6283 * as the driver can always deal with an error in any dma or 6284 * access handle, we can just return the fme_status value. 6285 */ 6286 pci_ereport_post(dip, err, NULL); 6287 return (err->fme_status); 6288 } 6289 6290 static void 6291 e1000g_fm_init(struct e1000g *Adapter) 6292 { 6293 ddi_iblock_cookie_t iblk; 6294 int fma_dma_flag; 6295 6296 /* Only register with IO Fault Services if we have some capability */ 6297 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 6298 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 6299 } else { 6300 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 6301 } 6302 6303 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 6304 fma_dma_flag = 1; 6305 } else { 6306 fma_dma_flag = 0; 6307 } 6308 6309 (void) e1000g_set_fma_flags(fma_dma_flag); 6310 6311 if (Adapter->fm_capabilities) { 6312 6313 /* Register capabilities with IO Fault Services */ 6314 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk); 6315 6316 /* 6317 * Initialize pci ereport capabilities if ereport capable 6318 */ 6319 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6320 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6321 pci_ereport_setup(Adapter->dip); 6322 6323 /* 6324 * Register error callback if error callback capable 6325 */ 6326 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6327 ddi_fm_handler_register(Adapter->dip, 6328 e1000g_fm_error_cb, (void*) Adapter); 6329 } 6330 } 6331 6332 static void 6333 e1000g_fm_fini(struct e1000g *Adapter) 6334 { 6335 /* Only unregister FMA capabilities if we registered some */ 6336 if (Adapter->fm_capabilities) { 6337 6338 /* 6339 * Release any resources allocated by pci_ereport_setup() 6340 */ 6341 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 6342 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6343 pci_ereport_teardown(Adapter->dip); 6344 6345 /* 6346 * Un-register error callback if error callback capable 6347 */ 6348 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 6349 ddi_fm_handler_unregister(Adapter->dip); 6350 6351 /* Unregister from IO Fault Services */ 6352 mutex_enter(&e1000g_rx_detach_lock); 6353 ddi_fm_fini(Adapter->dip); 6354 if (Adapter->priv_dip != NULL) { 6355 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL; 6356 } 6357 mutex_exit(&e1000g_rx_detach_lock); 6358 } 6359 } 6360 6361 void 6362 e1000g_fm_ereport(struct e1000g *Adapter, char *detail) 6363 { 6364 uint64_t ena; 6365 char buf[FM_MAX_CLASS]; 6366 6367 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6368 ena = fm_ena_generate(0, FM_ENA_FMT1); 6369 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) { 6370 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP, 6371 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6372 } 6373 } 6374 6375 /* 6376 * quiesce(9E) entry point. 6377 * 6378 * This function is called when the system is single-threaded at high 6379 * PIL with preemption disabled. Therefore, this function must not be 6380 * blocked. 6381 * 6382 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 6383 * DDI_FAILURE indicates an error condition and should almost never happen. 6384 */ 6385 static int 6386 e1000g_quiesce(dev_info_t *devinfo) 6387 { 6388 struct e1000g *Adapter; 6389 6390 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 6391 6392 if (Adapter == NULL) 6393 return (DDI_FAILURE); 6394 6395 e1000g_clear_all_interrupts(Adapter); 6396 6397 (void) e1000_reset_hw(&Adapter->shared); 6398 6399 /* Setup our HW Tx Head & Tail descriptor pointers */ 6400 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0); 6401 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0); 6402 6403 /* Setup our HW Rx Head & Tail descriptor pointers */ 6404 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0); 6405 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0); 6406 6407 return (DDI_SUCCESS); 6408 } 6409 6410 /* 6411 * synchronize the adv* and en* parameters. 6412 * 6413 * See comments in <sys/dld.h> for details of the *_en_* 6414 * parameters. The usage of ndd for setting adv parameters will 6415 * synchronize all the en parameters with the e1000g parameters, 6416 * implicitly disabling any settings made via dladm. 6417 */ 6418 static void 6419 e1000g_param_sync(struct e1000g *Adapter) 6420 { 6421 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx; 6422 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx; 6423 Adapter->param_en_100fdx = Adapter->param_adv_100fdx; 6424 Adapter->param_en_100hdx = Adapter->param_adv_100hdx; 6425 Adapter->param_en_10fdx = Adapter->param_adv_10fdx; 6426 Adapter->param_en_10hdx = Adapter->param_adv_10hdx; 6427 } 6428 6429 /* 6430 * e1000g_get_driver_control - tell manageability firmware that the driver 6431 * has control. 6432 */ 6433 static void 6434 e1000g_get_driver_control(struct e1000_hw *hw) 6435 { 6436 uint32_t ctrl_ext; 6437 uint32_t swsm; 6438 6439 /* tell manageability firmware the driver has taken over */ 6440 switch (hw->mac.type) { 6441 case e1000_82573: 6442 swsm = E1000_READ_REG(hw, E1000_SWSM); 6443 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); 6444 break; 6445 case e1000_82571: 6446 case e1000_82572: 6447 case e1000_82574: 6448 case e1000_80003es2lan: 6449 case e1000_ich8lan: 6450 case e1000_ich9lan: 6451 case e1000_ich10lan: 6452 case e1000_pchlan: 6453 case e1000_pch2lan: 6454 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6455 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6456 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 6457 break; 6458 default: 6459 /* no manageability firmware: do nothing */ 6460 break; 6461 } 6462 } 6463 6464 /* 6465 * e1000g_release_driver_control - tell manageability firmware that the driver 6466 * has released control. 6467 */ 6468 static void 6469 e1000g_release_driver_control(struct e1000_hw *hw) 6470 { 6471 uint32_t ctrl_ext; 6472 uint32_t swsm; 6473 6474 /* tell manageability firmware the driver has released control */ 6475 switch (hw->mac.type) { 6476 case e1000_82573: 6477 swsm = E1000_READ_REG(hw, E1000_SWSM); 6478 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 6479 break; 6480 case e1000_82571: 6481 case e1000_82572: 6482 case e1000_82574: 6483 case e1000_80003es2lan: 6484 case e1000_ich8lan: 6485 case e1000_ich9lan: 6486 case e1000_ich10lan: 6487 case e1000_pchlan: 6488 case e1000_pch2lan: 6489 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 6490 E1000_WRITE_REG(hw, E1000_CTRL_EXT, 6491 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 6492 break; 6493 default: 6494 /* no manageability firmware: do nothing */ 6495 break; 6496 } 6497 } 6498 6499 /* 6500 * Restore e1000g promiscuous mode. 6501 */ 6502 static void 6503 e1000g_restore_promisc(struct e1000g *Adapter) 6504 { 6505 if (Adapter->e1000g_promisc) { 6506 uint32_t rctl; 6507 6508 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 6509 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 6510 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 6511 } 6512 } 6513