1 /* 2 * This file is provided under a CDDLv1 license. When using or 3 * redistributing this file, you may do so under this license. 4 * In redistributing this file this license must be included 5 * and no other modification of this header file is permitted. 6 * 7 * CDDL LICENSE SUMMARY 8 * 9 * Copyright(c) 1999 - 2008 Intel Corporation. All rights reserved. 10 * 11 * The contents of this file are subject to the terms of Version 12 * 1.0 of the Common Development and Distribution License (the "License"). 13 * 14 * You should have received a copy of the License with this software. 15 * You can obtain a copy of the License at 16 * http://www.opensolaris.org/os/licensing. 17 * See the License for the specific language governing permissions 18 * and limitations under the License. 19 */ 20 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms of the CDDLv1. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ********************************************************************** 30 * * 31 * Module Name: * 32 * e1000g_main.c * 33 * * 34 * Abstract: * 35 * This file contains the interface routines for the solaris OS. * 36 * It has all DDI entry point routines and GLD entry point routines. * 37 * * 38 * This file also contains routines that take care of initialization * 39 * uninit routine and interrupt routine. * 40 * * 41 * ********************************************************************** 42 */ 43 44 #include <sys/dlpi.h> 45 #include <sys/mac.h> 46 #include <sys/dld.h> 47 #include "e1000g_sw.h" 48 #include "e1000g_debug.h" 49 50 static char ident[] = "Intel PRO/1000 Ethernet 5.2.7"; 51 static char e1000g_string[] = "Intel(R) PRO/1000 Network Connection"; 52 static char e1000g_version[] = "Driver Ver. 5.2.7"; 53 54 /* 55 * Proto types for DDI entry points 56 */ 57 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t); 58 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t); 59 60 /* 61 * init and intr routines prototype 62 */ 63 static int e1000g_resume(dev_info_t *); 64 static int e1000g_suspend(dev_info_t *); 65 static uint_t e1000g_intr_pciexpress(caddr_t); 66 static uint_t e1000g_intr(caddr_t); 67 static void e1000g_intr_work(struct e1000g *, uint32_t); 68 #pragma inline(e1000g_intr_work) 69 static uint32_t e1000g_get_itr(uint32_t, uint32_t, uint32_t); 70 #pragma inline(e1000g_get_itr) 71 static int e1000g_init(struct e1000g *); 72 static int e1000g_start(struct e1000g *, boolean_t); 73 static void e1000g_stop(struct e1000g *, boolean_t); 74 static int e1000g_m_start(void *); 75 static void e1000g_m_stop(void *); 76 static int e1000g_m_promisc(void *, boolean_t); 77 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *); 78 static int e1000g_m_unicst(void *, const uint8_t *); 79 static int e1000g_m_unicst_add(void *, mac_multi_addr_t *); 80 static int e1000g_m_unicst_remove(void *, mac_addr_slot_t); 81 static int e1000g_m_unicst_modify(void *, mac_multi_addr_t *); 82 static int e1000g_m_unicst_get(void *, mac_multi_addr_t *); 83 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *); 84 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *); 85 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t, 86 uint_t, const void *); 87 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t, 88 uint_t, void *); 89 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t, 90 const void *); 91 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, 92 void *); 93 static void e1000g_init_locks(struct e1000g *); 94 static void e1000g_destroy_locks(struct e1000g *); 95 static int e1000g_identify_hardware(struct e1000g *); 96 static int e1000g_regs_map(struct e1000g *); 97 static int e1000g_set_driver_params(struct e1000g *); 98 static void e1000g_set_bufsize(struct e1000g *); 99 static int e1000g_register_mac(struct e1000g *); 100 static boolean_t e1000g_rx_drain(struct e1000g *); 101 static boolean_t e1000g_tx_drain(struct e1000g *); 102 static void e1000g_init_unicst(struct e1000g *); 103 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, mac_addr_slot_t); 104 105 /* 106 * Local routines 107 */ 108 static void e1000g_tx_clean(struct e1000g *); 109 static void e1000g_rx_clean(struct e1000g *); 110 static void e1000g_link_timer(void *); 111 static void e1000g_local_timer(void *); 112 static boolean_t e1000g_link_check(struct e1000g *); 113 static boolean_t e1000g_stall_check(struct e1000g *); 114 static void e1000g_smartspeed(struct e1000g *); 115 static void e1000g_get_conf(struct e1000g *); 116 static int e1000g_get_prop(struct e1000g *, char *, int, int, int); 117 static void enable_watchdog_timer(struct e1000g *); 118 static void disable_watchdog_timer(struct e1000g *); 119 static void start_watchdog_timer(struct e1000g *); 120 static void restart_watchdog_timer(struct e1000g *); 121 static void stop_watchdog_timer(struct e1000g *); 122 static void stop_link_timer(struct e1000g *); 123 static void stop_82547_timer(e1000g_tx_ring_t *); 124 static void e1000g_force_speed_duplex(struct e1000g *); 125 static void e1000g_get_max_frame_size(struct e1000g *); 126 static boolean_t is_valid_mac_addr(uint8_t *); 127 static void e1000g_unattach(dev_info_t *, struct e1000g *); 128 #ifdef E1000G_DEBUG 129 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *); 130 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *); 131 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *); 132 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *); 133 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *, 134 struct iocblk *, mblk_t *); 135 #endif 136 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *, 137 struct iocblk *, mblk_t *); 138 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t); 139 static void e1000g_set_internal_loopback(struct e1000g *); 140 static void e1000g_set_external_loopback_1000(struct e1000g *); 141 static void e1000g_set_external_loopback_100(struct e1000g *); 142 static void e1000g_set_external_loopback_10(struct e1000g *); 143 static int e1000g_add_intrs(struct e1000g *); 144 static int e1000g_intr_add(struct e1000g *, int); 145 static int e1000g_rem_intrs(struct e1000g *); 146 static int e1000g_enable_intrs(struct e1000g *); 147 static int e1000g_disable_intrs(struct e1000g *); 148 static boolean_t e1000g_link_up(struct e1000g *); 149 #ifdef __sparc 150 static boolean_t e1000g_find_mac_address(struct e1000g *); 151 #endif 152 static void e1000g_get_phy_state(struct e1000g *); 153 static void e1000g_free_priv_devi_node(struct e1000g *, boolean_t); 154 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 155 const void *impl_data); 156 static void e1000g_fm_init(struct e1000g *Adapter); 157 static void e1000g_fm_fini(struct e1000g *Adapter); 158 159 static struct cb_ops cb_ws_ops = { 160 nulldev, /* cb_open */ 161 nulldev, /* cb_close */ 162 nodev, /* cb_strategy */ 163 nodev, /* cb_print */ 164 nodev, /* cb_dump */ 165 nodev, /* cb_read */ 166 nodev, /* cb_write */ 167 nodev, /* cb_ioctl */ 168 nodev, /* cb_devmap */ 169 nodev, /* cb_mmap */ 170 nodev, /* cb_segmap */ 171 nochpoll, /* cb_chpoll */ 172 ddi_prop_op, /* cb_prop_op */ 173 NULL, /* cb_stream */ 174 D_MP | D_HOTPLUG, /* cb_flag */ 175 CB_REV, /* cb_rev */ 176 nodev, /* cb_aread */ 177 nodev /* cb_awrite */ 178 }; 179 180 static struct dev_ops ws_ops = { 181 DEVO_REV, /* devo_rev */ 182 0, /* devo_refcnt */ 183 NULL, /* devo_getinfo */ 184 nulldev, /* devo_identify */ 185 nulldev, /* devo_probe */ 186 e1000g_attach, /* devo_attach */ 187 e1000g_detach, /* devo_detach */ 188 nodev, /* devo_reset */ 189 &cb_ws_ops, /* devo_cb_ops */ 190 NULL, /* devo_bus_ops */ 191 ddi_power /* devo_power */ 192 }; 193 194 static struct modldrv modldrv = { 195 &mod_driverops, /* Type of module. This one is a driver */ 196 ident, /* Discription string */ 197 &ws_ops, /* driver ops */ 198 }; 199 200 static struct modlinkage modlinkage = { 201 MODREV_1, &modldrv, NULL 202 }; 203 204 /* Access attributes for register mapping */ 205 static ddi_device_acc_attr_t e1000g_regs_acc_attr = { 206 DDI_DEVICE_ATTR_V0, 207 DDI_STRUCTURE_LE_ACC, 208 DDI_STRICTORDER_ACC, 209 DDI_FLAGERR_ACC 210 }; 211 212 #define E1000G_M_CALLBACK_FLAGS \ 213 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 214 215 static mac_callbacks_t e1000g_m_callbacks = { 216 E1000G_M_CALLBACK_FLAGS, 217 e1000g_m_stat, 218 e1000g_m_start, 219 e1000g_m_stop, 220 e1000g_m_promisc, 221 e1000g_m_multicst, 222 e1000g_m_unicst, 223 e1000g_m_tx, 224 NULL, 225 e1000g_m_ioctl, 226 e1000g_m_getcapab, 227 NULL, 228 NULL, 229 e1000g_m_setprop, 230 e1000g_m_getprop 231 }; 232 233 /* 234 * Global variables 235 */ 236 uint32_t e1000g_mblks_pending = 0; 237 /* 238 * Workaround for Dynamic Reconfiguration support, for x86 platform only. 239 * Here we maintain a private dev_info list if e1000g_force_detach is 240 * enabled. If we force the driver to detach while there are still some 241 * rx buffers retained in the upper layer, we have to keep a copy of the 242 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data 243 * structure will be freed after the driver is detached. However when we 244 * finally free those rx buffers released by the upper layer, we need to 245 * refer to the dev_info to free the dma buffers. So we save a copy of 246 * the dev_info for this purpose. On x86 platform, we assume this copy 247 * of dev_info is always valid, but on SPARC platform, it could be invalid 248 * after the system board level DR operation. For this reason, the global 249 * variable e1000g_force_detach must be B_FALSE on SPARC platform. 250 */ 251 #ifdef __sparc 252 boolean_t e1000g_force_detach = B_FALSE; 253 #else 254 boolean_t e1000g_force_detach = B_TRUE; 255 #endif 256 private_devi_list_t *e1000g_private_devi_list = NULL; 257 258 /* 259 * The rwlock is defined to protect the whole processing of rx recycling 260 * and the rx packets release in detach processing to make them mutually 261 * exclusive. 262 * The rx recycling processes different rx packets in different threads, 263 * so it will be protected with RW_READER and it won't block any other rx 264 * recycling threads. 265 * While the detach processing will be protected with RW_WRITER to make 266 * it mutually exclusive with the rx recycling. 267 */ 268 krwlock_t e1000g_rx_detach_lock; 269 /* 270 * The rwlock e1000g_dma_type_lock is defined to protect the global flag 271 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA". 272 * If there are many e1000g instances, the system may run out of DVMA 273 * resources during the initialization of the instances, then the flag will 274 * be changed to "USE_DMA". Because different e1000g instances are initialized 275 * in parallel, we need to use this lock to protect the flag. 276 */ 277 krwlock_t e1000g_dma_type_lock; 278 279 280 /* 281 * Loadable module configuration entry points for the driver 282 */ 283 284 /* 285 * _init - module initialization 286 */ 287 int 288 _init(void) 289 { 290 int status; 291 292 mac_init_ops(&ws_ops, WSNAME); 293 status = mod_install(&modlinkage); 294 if (status != DDI_SUCCESS) 295 mac_fini_ops(&ws_ops); 296 else { 297 rw_init(&e1000g_rx_detach_lock, NULL, RW_DRIVER, NULL); 298 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL); 299 } 300 301 return (status); 302 } 303 304 /* 305 * _fini - module finalization 306 */ 307 int 308 _fini(void) 309 { 310 int status; 311 312 rw_enter(&e1000g_rx_detach_lock, RW_READER); 313 if (e1000g_mblks_pending != 0) { 314 rw_exit(&e1000g_rx_detach_lock); 315 return (EBUSY); 316 } 317 rw_exit(&e1000g_rx_detach_lock); 318 319 status = mod_remove(&modlinkage); 320 if (status == DDI_SUCCESS) { 321 mac_fini_ops(&ws_ops); 322 323 if (e1000g_force_detach) { 324 private_devi_list_t *devi_node; 325 326 rw_enter(&e1000g_rx_detach_lock, RW_WRITER); 327 while (e1000g_private_devi_list != NULL) { 328 devi_node = e1000g_private_devi_list; 329 e1000g_private_devi_list = 330 e1000g_private_devi_list->next; 331 332 kmem_free(devi_node->priv_dip, 333 sizeof (struct dev_info)); 334 kmem_free(devi_node, 335 sizeof (private_devi_list_t)); 336 } 337 rw_exit(&e1000g_rx_detach_lock); 338 } 339 340 rw_destroy(&e1000g_rx_detach_lock); 341 rw_destroy(&e1000g_dma_type_lock); 342 } 343 344 return (status); 345 } 346 347 /* 348 * _info - module information 349 */ 350 int 351 _info(struct modinfo *modinfop) 352 { 353 return (mod_info(&modlinkage, modinfop)); 354 } 355 356 /* 357 * e1000g_attach - driver attach 358 * 359 * This function is the device-specific initialization entry 360 * point. This entry point is required and must be written. 361 * The DDI_ATTACH command must be provided in the attach entry 362 * point. When attach() is called with cmd set to DDI_ATTACH, 363 * all normal kernel services (such as kmem_alloc(9F)) are 364 * available for use by the driver. 365 * 366 * The attach() function will be called once for each instance 367 * of the device on the system with cmd set to DDI_ATTACH. 368 * Until attach() succeeds, the only driver entry points which 369 * may be called are open(9E) and getinfo(9E). 370 */ 371 static int 372 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 373 { 374 struct e1000g *Adapter; 375 struct e1000_hw *hw; 376 struct e1000g_osdep *osdep; 377 int instance; 378 379 switch (cmd) { 380 default: 381 e1000g_log(NULL, CE_WARN, 382 "Unsupported command send to e1000g_attach... "); 383 return (DDI_FAILURE); 384 385 case DDI_RESUME: 386 return (e1000g_resume(devinfo)); 387 388 case DDI_ATTACH: 389 break; 390 } 391 392 /* 393 * get device instance number 394 */ 395 instance = ddi_get_instance(devinfo); 396 397 /* 398 * Allocate soft data structure 399 */ 400 Adapter = 401 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP); 402 403 Adapter->dip = devinfo; 404 Adapter->instance = instance; 405 Adapter->tx_ring->adapter = Adapter; 406 Adapter->rx_ring->adapter = Adapter; 407 408 hw = &Adapter->shared; 409 osdep = &Adapter->osdep; 410 hw->back = osdep; 411 osdep->adapter = Adapter; 412 413 ddi_set_driver_private(devinfo, (caddr_t)Adapter); 414 415 /* 416 * Initialize for fma support 417 */ 418 Adapter->fm_capabilities = e1000g_get_prop(Adapter, "fm-capable", 419 0, 0x0f, 420 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 421 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 422 e1000g_fm_init(Adapter); 423 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT; 424 425 /* 426 * PCI Configure 427 */ 428 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 429 e1000g_log(Adapter, CE_WARN, "PCI configuration failed"); 430 goto attach_fail; 431 } 432 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 433 434 /* 435 * Setup hardware 436 */ 437 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) { 438 e1000g_log(Adapter, CE_WARN, "Identify hardware failed"); 439 goto attach_fail; 440 } 441 442 /* 443 * Map in the device registers. 444 */ 445 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) { 446 e1000g_log(Adapter, CE_WARN, "Mapping registers failed"); 447 goto attach_fail; 448 } 449 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 450 451 /* 452 * Initialize driver parameters 453 */ 454 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) { 455 goto attach_fail; 456 } 457 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP; 458 459 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 460 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 461 goto attach_fail; 462 } 463 464 /* 465 * Initialize interrupts 466 */ 467 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) { 468 e1000g_log(Adapter, CE_WARN, "Add interrupts failed"); 469 goto attach_fail; 470 } 471 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 472 473 /* 474 * Initialize mutex's for this device. 475 * Do this before enabling the interrupt handler and 476 * register the softint to avoid the condition where 477 * interrupt handler can try using uninitialized mutex 478 */ 479 e1000g_init_locks(Adapter); 480 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS; 481 482 /* 483 * Initialize Driver Counters 484 */ 485 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) { 486 e1000g_log(Adapter, CE_WARN, "Init stats failed"); 487 goto attach_fail; 488 } 489 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS; 490 491 /* 492 * Initialize chip hardware and software structures 493 */ 494 if (e1000g_init(Adapter) != DDI_SUCCESS) { 495 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed"); 496 goto attach_fail; 497 } 498 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 499 500 /* 501 * Initialize NDD parameters 502 */ 503 if (e1000g_nd_init(Adapter) != DDI_SUCCESS) { 504 e1000g_log(Adapter, CE_WARN, "Init ndd failed"); 505 goto attach_fail; 506 } 507 Adapter->attach_progress |= ATTACH_PROGRESS_NDD; 508 509 /* 510 * Register the driver to the MAC 511 */ 512 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) { 513 e1000g_log(Adapter, CE_WARN, "Register MAC failed"); 514 goto attach_fail; 515 } 516 Adapter->attach_progress |= ATTACH_PROGRESS_MAC; 517 518 /* 519 * Now that mutex locks are initialized, and the chip is also 520 * initialized, enable interrupts. 521 */ 522 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) { 523 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed"); 524 goto attach_fail; 525 } 526 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 527 528 /* 529 * If e1000g_force_detach is enabled, in global private dip list, 530 * we will create a new entry, which maintains the priv_dip for DR 531 * supports after driver detached. 532 */ 533 if (e1000g_force_detach) { 534 private_devi_list_t *devi_node; 535 536 Adapter->priv_dip = 537 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP); 538 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip), 539 sizeof (struct dev_info)); 540 541 devi_node = 542 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP); 543 544 rw_enter(&e1000g_rx_detach_lock, RW_WRITER); 545 devi_node->priv_dip = Adapter->priv_dip; 546 devi_node->flag = E1000G_PRIV_DEVI_ATTACH; 547 devi_node->next = e1000g_private_devi_list; 548 e1000g_private_devi_list = devi_node; 549 rw_exit(&e1000g_rx_detach_lock); 550 } 551 552 cmn_err(CE_CONT, "!%s, %s\n", e1000g_string, e1000g_version); 553 554 return (DDI_SUCCESS); 555 556 attach_fail: 557 e1000g_unattach(devinfo, Adapter); 558 return (DDI_FAILURE); 559 } 560 561 static int 562 e1000g_register_mac(struct e1000g *Adapter) 563 { 564 struct e1000_hw *hw = &Adapter->shared; 565 mac_register_t *mac; 566 int err; 567 568 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 569 return (DDI_FAILURE); 570 571 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 572 mac->m_driver = Adapter; 573 mac->m_dip = Adapter->dip; 574 mac->m_src_addr = hw->mac.addr; 575 mac->m_callbacks = &e1000g_m_callbacks; 576 mac->m_min_sdu = 0; 577 mac->m_max_sdu = Adapter->default_mtu; 578 mac->m_margin = VLAN_TAGSZ; 579 580 err = mac_register(mac, &Adapter->mh); 581 mac_free(mac); 582 583 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE); 584 } 585 586 static int 587 e1000g_identify_hardware(struct e1000g *Adapter) 588 { 589 struct e1000_hw *hw = &Adapter->shared; 590 struct e1000g_osdep *osdep = &Adapter->osdep; 591 592 /* Get the device id */ 593 hw->vendor_id = 594 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 595 hw->device_id = 596 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 597 hw->revision_id = 598 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 599 hw->subsystem_device_id = 600 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 601 hw->subsystem_vendor_id = 602 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 603 604 if (e1000_set_mac_type(hw) != E1000_SUCCESS) { 605 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 606 "MAC type could not be set properly."); 607 return (DDI_FAILURE); 608 } 609 610 return (DDI_SUCCESS); 611 } 612 613 static int 614 e1000g_regs_map(struct e1000g *Adapter) 615 { 616 dev_info_t *devinfo = Adapter->dip; 617 struct e1000_hw *hw = &Adapter->shared; 618 struct e1000g_osdep *osdep = &Adapter->osdep; 619 off_t mem_size; 620 621 /* 622 * first get the size of device register to be mapped. The 623 * second parameter is the register we are interested. I our 624 * wiseman 0 is for config registers and 1 is for memory mapped 625 * registers Mem size should have memory mapped region size 626 */ 627 if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) { 628 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 629 "ddi_dev_regsize for registers failed"); 630 return (DDI_FAILURE); 631 } 632 633 if ((ddi_regs_map_setup(devinfo, 1, /* register of interest */ 634 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr, 635 &osdep->reg_handle)) != DDI_SUCCESS) { 636 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 637 "ddi_regs_map_setup for registers failed"); 638 goto regs_map_fail; 639 } 640 641 /* ICH needs to map flash memory */ 642 if (hw->mac.type == e1000_ich8lan || hw->mac.type == e1000_ich9lan) { 643 /* get flash size */ 644 if (ddi_dev_regsize(devinfo, ICH_FLASH_REG_SET, 645 &mem_size) != DDI_SUCCESS) { 646 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 647 "ddi_dev_regsize for ICH flash failed"); 648 goto regs_map_fail; 649 } 650 651 /* map flash in */ 652 if (ddi_regs_map_setup(devinfo, ICH_FLASH_REG_SET, 653 (caddr_t *)&hw->flash_address, 0, 654 mem_size, &e1000g_regs_acc_attr, 655 &osdep->ich_flash_handle) != DDI_SUCCESS) { 656 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 657 "ddi_regs_map_setup for ICH flash failed"); 658 goto regs_map_fail; 659 } 660 } 661 662 return (DDI_SUCCESS); 663 664 regs_map_fail: 665 if (osdep->reg_handle != NULL) 666 ddi_regs_map_free(&osdep->reg_handle); 667 668 return (DDI_FAILURE); 669 } 670 671 static int 672 e1000g_set_driver_params(struct e1000g *Adapter) 673 { 674 struct e1000_hw *hw; 675 e1000g_tx_ring_t *tx_ring; 676 uint32_t mem_bar, io_bar, bar64; 677 678 hw = &Adapter->shared; 679 680 /* Set MAC type and initialize hardware functions */ 681 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) { 682 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 683 "Could not setup hardware functions"); 684 return (DDI_FAILURE); 685 } 686 687 /* Get bus information */ 688 if (e1000_get_bus_info(hw) != E1000_SUCCESS) { 689 E1000G_DEBUGLOG_0(Adapter, CE_WARN, 690 "Could not get bus information"); 691 return (DDI_FAILURE); 692 } 693 694 /* get mem_base addr */ 695 mem_bar = pci_config_get32(Adapter->osdep.cfg_handle, PCI_CONF_BASE0); 696 bar64 = mem_bar & PCI_BASE_TYPE_ALL; 697 698 /* get io_base addr */ 699 if (hw->mac.type >= e1000_82544) { 700 if (bar64) { 701 /* IO BAR is different for 64 bit BAR mode */ 702 io_bar = pci_config_get32(Adapter->osdep.cfg_handle, 703 PCI_CONF_BASE4); 704 } else { 705 /* normal 32-bit BAR mode */ 706 io_bar = pci_config_get32(Adapter->osdep.cfg_handle, 707 PCI_CONF_BASE2); 708 } 709 hw->io_base = io_bar & PCI_BASE_IO_ADDR_M; 710 } else { 711 /* no I/O access for adapters prior to 82544 */ 712 hw->io_base = 0x0; 713 } 714 715 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word); 716 717 hw->mac.autoneg_failed = B_TRUE; 718 719 /* Set the wait_for_link flag to B_FALSE */ 720 hw->phy.wait_for_link = B_FALSE; 721 722 /* Adaptive IFS related changes */ 723 hw->mac.adaptive_ifs = B_TRUE; 724 725 /* Enable phy init script for IGP phy of 82541/82547 */ 726 if ((hw->mac.type == e1000_82547) || 727 (hw->mac.type == e1000_82541) || 728 (hw->mac.type == e1000_82547_rev_2) || 729 (hw->mac.type == e1000_82541_rev_2)) 730 e1000_init_script_state_82541(hw, B_TRUE); 731 732 /* Enable the TTL workaround for 82541/82547 */ 733 e1000_set_ttl_workaround_state_82541(hw, B_TRUE); 734 735 #ifdef __sparc 736 Adapter->strip_crc = B_TRUE; 737 #else 738 Adapter->strip_crc = B_FALSE; 739 #endif 740 741 /* Get conf file properties */ 742 e1000g_get_conf(Adapter); 743 744 /* Get speed/duplex settings in conf file */ 745 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL; 746 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 747 e1000g_force_speed_duplex(Adapter); 748 749 /* Get Jumbo Frames settings in conf file */ 750 e1000g_get_max_frame_size(Adapter); 751 752 /* Set Rx/Tx buffer size */ 753 e1000g_set_bufsize(Adapter); 754 755 /* Master Latency Timer */ 756 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER; 757 758 /* copper options */ 759 if (hw->media_type == e1000_media_type_copper) { 760 hw->phy.mdix = 0; /* AUTO_ALL_MODES */ 761 hw->phy.disable_polarity_correction = B_FALSE; 762 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */ 763 } 764 765 /* The initial link state should be "unknown" */ 766 Adapter->link_state = LINK_STATE_UNKNOWN; 767 768 /* Initialize rx parameters */ 769 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY; 770 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY; 771 772 /* Initialize tx parameters */ 773 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE; 774 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD; 775 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY; 776 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY; 777 778 tx_ring = Adapter->tx_ring; 779 tx_ring->frags_limit = 780 (hw->mac.max_frame_size / Adapter->tx_bcopy_thresh) + 2; 781 if (tx_ring->frags_limit > (MAX_TX_DESC_PER_PACKET >> 1)) 782 tx_ring->frags_limit = (MAX_TX_DESC_PER_PACKET >> 1); 783 784 /* Initialize rx parameters */ 785 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD; 786 787 return (DDI_SUCCESS); 788 } 789 790 static void 791 e1000g_set_bufsize(struct e1000g *Adapter) 792 { 793 struct e1000_mac_info *mac = &Adapter->shared.mac; 794 uint64_t rx_size; 795 uint64_t tx_size; 796 797 #ifdef __sparc 798 dev_info_t *devinfo = Adapter->dip; 799 ulong_t iommu_pagesize; 800 801 /* Get the system page size */ 802 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1); 803 iommu_pagesize = dvma_pagesize(devinfo); 804 if (iommu_pagesize != 0) { 805 if (Adapter->sys_page_sz == iommu_pagesize) { 806 if (iommu_pagesize > 0x4000) 807 Adapter->sys_page_sz = 0x4000; 808 } else { 809 if (Adapter->sys_page_sz > iommu_pagesize) 810 Adapter->sys_page_sz = iommu_pagesize; 811 } 812 } 813 Adapter->dvma_page_num = mac->max_frame_size / 814 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM; 815 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM); 816 #endif 817 818 mac->min_frame_size = ETHERMIN + ETHERFCSL; 819 820 rx_size = mac->max_frame_size + E1000G_IPALIGNPRESERVEROOM; 821 if ((rx_size > FRAME_SIZE_UPTO_2K) && (rx_size <= FRAME_SIZE_UPTO_4K)) 822 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K; 823 else if ((rx_size > FRAME_SIZE_UPTO_4K) && 824 (rx_size <= FRAME_SIZE_UPTO_8K)) 825 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K; 826 else if ((rx_size > FRAME_SIZE_UPTO_8K) && 827 (rx_size <= FRAME_SIZE_UPTO_16K)) 828 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K; 829 else 830 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K; 831 832 tx_size = mac->max_frame_size; 833 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K)) 834 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K; 835 else if ((tx_size > FRAME_SIZE_UPTO_4K) && 836 (tx_size <= FRAME_SIZE_UPTO_8K)) 837 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K; 838 else if ((tx_size > FRAME_SIZE_UPTO_8K) && 839 (tx_size <= FRAME_SIZE_UPTO_16K)) 840 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K; 841 else 842 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K; 843 844 #ifndef NO_82542_SUPPORT 845 /* 846 * For Wiseman adapters we have an requirement of having receive 847 * buffers aligned at 256 byte boundary. Since Livengood does not 848 * require this and forcing it for all hardwares will have 849 * performance implications, I am making it applicable only for 850 * Wiseman and for Jumbo frames enabled mode as rest of the time, 851 * it is okay to have normal frames...but it does involve a 852 * potential risk where we may loose data if buffer is not 853 * aligned...so all wiseman boards to have 256 byte aligned 854 * buffers 855 */ 856 if (mac->type < e1000_82543) 857 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE; 858 else 859 Adapter->rx_buf_align = 1; 860 #endif 861 } 862 863 /* 864 * e1000g_detach - driver detach 865 * 866 * The detach() function is the complement of the attach routine. 867 * If cmd is set to DDI_DETACH, detach() is used to remove the 868 * state associated with a given instance of a device node 869 * prior to the removal of that instance from the system. 870 * 871 * The detach() function will be called once for each instance 872 * of the device for which there has been a successful attach() 873 * once there are no longer any opens on the device. 874 * 875 * Interrupts routine are disabled, All memory allocated by this 876 * driver are freed. 877 */ 878 static int 879 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 880 { 881 struct e1000g *Adapter; 882 boolean_t rx_drain; 883 884 switch (cmd) { 885 default: 886 return (DDI_FAILURE); 887 888 case DDI_SUSPEND: 889 return (e1000g_suspend(devinfo)); 890 891 case DDI_DETACH: 892 break; 893 } 894 895 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 896 if (Adapter == NULL) 897 return (DDI_FAILURE); 898 899 if (mac_unregister(Adapter->mh) != 0) { 900 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed"); 901 return (DDI_FAILURE); 902 } 903 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC; 904 905 906 if (Adapter->chip_state != E1000G_STOP) 907 e1000g_stop(Adapter, B_TRUE); 908 909 rx_drain = e1000g_rx_drain(Adapter); 910 911 /* 912 * If e1000g_force_detach is enabled, driver detach is safe. 913 * We will let e1000g_free_priv_devi_node routine determine 914 * whether we need to free the priv_dip entry for current 915 * driver instance. 916 */ 917 if (e1000g_force_detach) { 918 e1000g_free_priv_devi_node(Adapter, rx_drain); 919 } else { 920 if (!rx_drain) 921 return (DDI_FAILURE); 922 } 923 924 e1000g_unattach(devinfo, Adapter); 925 926 return (DDI_SUCCESS); 927 } 928 929 /* 930 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance 931 * 932 * If free_flag is true, that indicates the upper layer is not holding 933 * the rx buffers, we could free the priv_dip entry safely. 934 * 935 * Otherwise, we have to keep this entry even after driver detached, 936 * and we also need to mark this entry with E1000G_PRIV_DEVI_DETACH flag, 937 * so that driver could free it while all of rx buffers are returned 938 * by upper layer later. 939 */ 940 static void 941 e1000g_free_priv_devi_node(struct e1000g *Adapter, boolean_t free_flag) 942 { 943 private_devi_list_t *devi_node, *devi_del; 944 945 rw_enter(&e1000g_rx_detach_lock, RW_WRITER); 946 ASSERT(e1000g_private_devi_list != NULL); 947 ASSERT(Adapter->priv_dip != NULL); 948 949 devi_node = e1000g_private_devi_list; 950 if (devi_node->priv_dip == Adapter->priv_dip) { 951 if (free_flag) { 952 e1000g_private_devi_list = 953 devi_node->next; 954 kmem_free(devi_node->priv_dip, 955 sizeof (struct dev_info)); 956 kmem_free(devi_node, 957 sizeof (private_devi_list_t)); 958 } else { 959 ASSERT(e1000g_mblks_pending != 0); 960 devi_node->flag = 961 E1000G_PRIV_DEVI_DETACH; 962 } 963 rw_exit(&e1000g_rx_detach_lock); 964 return; 965 } 966 967 devi_node = e1000g_private_devi_list; 968 while (devi_node->next != NULL) { 969 if (devi_node->next->priv_dip == Adapter->priv_dip) { 970 if (free_flag) { 971 devi_del = devi_node->next; 972 devi_node->next = devi_del->next; 973 kmem_free(devi_del->priv_dip, 974 sizeof (struct dev_info)); 975 kmem_free(devi_del, 976 sizeof (private_devi_list_t)); 977 } else { 978 ASSERT(e1000g_mblks_pending != 0); 979 devi_node->next->flag = 980 E1000G_PRIV_DEVI_DETACH; 981 } 982 break; 983 } 984 devi_node = devi_node->next; 985 } 986 rw_exit(&e1000g_rx_detach_lock); 987 } 988 989 static void 990 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter) 991 { 992 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 993 (void) e1000g_disable_intrs(Adapter); 994 } 995 996 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) { 997 (void) mac_unregister(Adapter->mh); 998 } 999 1000 if (Adapter->attach_progress & ATTACH_PROGRESS_NDD) { 1001 e1000g_nd_cleanup(Adapter); 1002 } 1003 1004 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1005 (void) e1000g_rem_intrs(Adapter); 1006 } 1007 1008 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) { 1009 (void) ddi_prop_remove_all(devinfo); 1010 } 1011 1012 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) { 1013 kstat_delete((kstat_t *)Adapter->e1000g_ksp); 1014 } 1015 1016 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) { 1017 stop_link_timer(Adapter); 1018 if (e1000_reset_hw(&Adapter->shared) != 0) { 1019 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1020 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1021 } 1022 } 1023 1024 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 1025 if (Adapter->osdep.reg_handle != NULL) 1026 ddi_regs_map_free(&Adapter->osdep.reg_handle); 1027 if (Adapter->osdep.ich_flash_handle != NULL) 1028 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle); 1029 } 1030 1031 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 1032 if (Adapter->osdep.cfg_handle != NULL) 1033 pci_config_teardown(&Adapter->osdep.cfg_handle); 1034 } 1035 1036 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) { 1037 e1000g_destroy_locks(Adapter); 1038 } 1039 1040 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) { 1041 e1000g_fm_fini(Adapter); 1042 } 1043 1044 e1000_remove_device(&Adapter->shared); 1045 1046 kmem_free((caddr_t)Adapter, sizeof (struct e1000g)); 1047 1048 /* 1049 * Another hotplug spec requirement, 1050 * run ddi_set_driver_private(devinfo, null); 1051 */ 1052 ddi_set_driver_private(devinfo, NULL); 1053 } 1054 1055 static void 1056 e1000g_init_locks(struct e1000g *Adapter) 1057 { 1058 e1000g_tx_ring_t *tx_ring; 1059 e1000g_rx_ring_t *rx_ring; 1060 1061 rw_init(&Adapter->chip_lock, NULL, 1062 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1063 mutex_init(&Adapter->link_lock, NULL, 1064 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1065 mutex_init(&Adapter->watchdog_lock, NULL, 1066 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1067 1068 tx_ring = Adapter->tx_ring; 1069 1070 mutex_init(&tx_ring->tx_lock, NULL, 1071 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1072 mutex_init(&tx_ring->usedlist_lock, NULL, 1073 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1074 mutex_init(&tx_ring->freelist_lock, NULL, 1075 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1076 1077 rx_ring = Adapter->rx_ring; 1078 1079 mutex_init(&rx_ring->freelist_lock, NULL, 1080 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri)); 1081 } 1082 1083 static void 1084 e1000g_destroy_locks(struct e1000g *Adapter) 1085 { 1086 e1000g_tx_ring_t *tx_ring; 1087 e1000g_rx_ring_t *rx_ring; 1088 1089 tx_ring = Adapter->tx_ring; 1090 mutex_destroy(&tx_ring->tx_lock); 1091 mutex_destroy(&tx_ring->usedlist_lock); 1092 mutex_destroy(&tx_ring->freelist_lock); 1093 1094 rx_ring = Adapter->rx_ring; 1095 mutex_destroy(&rx_ring->freelist_lock); 1096 1097 mutex_destroy(&Adapter->link_lock); 1098 mutex_destroy(&Adapter->watchdog_lock); 1099 rw_destroy(&Adapter->chip_lock); 1100 } 1101 1102 static int 1103 e1000g_resume(dev_info_t *devinfo) 1104 { 1105 struct e1000g *Adapter; 1106 1107 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1108 if (Adapter == NULL) 1109 return (DDI_FAILURE); 1110 1111 if (e1000g_start(Adapter, B_TRUE)) 1112 return (DDI_FAILURE); 1113 1114 return (DDI_SUCCESS); 1115 } 1116 1117 static int 1118 e1000g_suspend(dev_info_t *devinfo) 1119 { 1120 struct e1000g *Adapter; 1121 1122 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo); 1123 if (Adapter == NULL) 1124 return (DDI_FAILURE); 1125 1126 e1000g_stop(Adapter, B_TRUE); 1127 1128 return (DDI_SUCCESS); 1129 } 1130 1131 static int 1132 e1000g_init(struct e1000g *Adapter) 1133 { 1134 uint32_t pba; 1135 uint32_t high_water; 1136 struct e1000_hw *hw; 1137 clock_t link_timeout; 1138 1139 hw = &Adapter->shared; 1140 1141 rw_enter(&Adapter->chip_lock, RW_WRITER); 1142 1143 /* 1144 * reset to put the hardware in a known state 1145 * before we try to do anything with the eeprom 1146 */ 1147 if (e1000_reset_hw(hw) != 0) { 1148 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1149 goto init_fail; 1150 } 1151 1152 if (e1000_validate_nvm_checksum(hw) < 0) { 1153 /* 1154 * Some PCI-E parts fail the first check due to 1155 * the link being in sleep state. Call it again, 1156 * if it fails a second time its a real issue. 1157 */ 1158 if (e1000_validate_nvm_checksum(hw) < 0) { 1159 e1000g_log(Adapter, CE_WARN, 1160 "Invalid NVM checksum. Please contact " 1161 "the vendor to update the NVM."); 1162 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1163 goto init_fail; 1164 } 1165 } 1166 1167 #ifdef __sparc 1168 /* 1169 * Firstly, we try to get the local ethernet address from OBP. If 1170 * fail, we get from EEPROM of NIC card. 1171 */ 1172 if (!e1000g_find_mac_address(Adapter)) { 1173 if (e1000_read_mac_addr(hw) < 0) { 1174 e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); 1175 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1176 goto init_fail; 1177 } 1178 } 1179 #else 1180 /* Get the local ethernet address. */ 1181 if (e1000_read_mac_addr(hw) < 0) { 1182 e1000g_log(Adapter, CE_WARN, "Read mac addr failed"); 1183 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1184 goto init_fail; 1185 } 1186 #endif 1187 1188 /* check for valid mac address */ 1189 if (!is_valid_mac_addr(hw->mac.addr)) { 1190 e1000g_log(Adapter, CE_WARN, "Invalid mac addr"); 1191 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1192 goto init_fail; 1193 } 1194 1195 /* Set LAA state for 82571 chipset */ 1196 e1000_set_laa_state_82571(hw, B_TRUE); 1197 1198 /* Master Latency Timer implementation */ 1199 if (Adapter->master_latency_timer) { 1200 pci_config_put8(Adapter->osdep.cfg_handle, 1201 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer); 1202 } 1203 1204 if (hw->mac.type < e1000_82547) { 1205 /* 1206 * Total FIFO is 64K 1207 */ 1208 if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K) 1209 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1210 else 1211 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1212 } else if (hw->mac.type >= e1000_82571 && 1213 hw->mac.type <= e1000_82572) { 1214 /* 1215 * Total FIFO is 48K 1216 */ 1217 if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K) 1218 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */ 1219 else 1220 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */ 1221 } else if (hw->mac.type == e1000_ich8lan) { 1222 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */ 1223 } else if (hw->mac.type == e1000_ich9lan) { 1224 pba = E1000_PBA_12K; 1225 } else { 1226 /* 1227 * Total FIFO is 40K 1228 */ 1229 if (hw->mac.max_frame_size > FRAME_SIZE_UPTO_8K) 1230 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1231 else 1232 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1233 } 1234 E1000_WRITE_REG(hw, E1000_PBA, pba); 1235 1236 /* 1237 * These parameters set thresholds for the adapter's generation(Tx) 1238 * and response(Rx) to Ethernet PAUSE frames. These are just threshold 1239 * settings. Flow control is enabled or disabled in the configuration 1240 * file. 1241 * High-water mark is set down from the top of the rx fifo (not 1242 * sensitive to max_frame_size) and low-water is set just below 1243 * high-water mark. 1244 * The high water mark must be low enough to fit one full frame above 1245 * it in the rx FIFO. Should be the lower of: 1246 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early 1247 * receive size (assuming ERT set to E1000_ERT_2048), or the full 1248 * Rx FIFO size minus one full frame. 1249 */ 1250 high_water = min(((pba << 10) * 9 / 10), 1251 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_ich9lan) ? 1252 ((pba << 10) - (E1000_ERT_2048 << 3)) : 1253 ((pba << 10) - hw->mac.max_frame_size))); 1254 1255 hw->mac.fc_high_water = high_water & 0xFFF8; 1256 hw->mac.fc_low_water = hw->mac.fc_high_water - 8; 1257 1258 if (hw->mac.type == e1000_80003es2lan) 1259 hw->mac.fc_pause_time = 0xFFFF; 1260 else 1261 hw->mac.fc_pause_time = E1000_FC_PAUSE_TIME; 1262 hw->mac.fc_send_xon = B_TRUE; 1263 hw->mac.fc = hw->mac.original_fc; 1264 1265 /* 1266 * Reset the adapter hardware the second time. 1267 */ 1268 if (e1000_reset_hw(hw) != 0) { 1269 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1270 goto init_fail; 1271 } 1272 1273 /* disable wakeup control by default */ 1274 if (hw->mac.type >= e1000_82544) 1275 E1000_WRITE_REG(hw, E1000_WUC, 0); 1276 1277 /* MWI setup */ 1278 e1000_pci_set_mwi(hw); 1279 1280 /* 1281 * Configure/Initialize hardware 1282 */ 1283 if (e1000_init_hw(hw) < 0) { 1284 e1000g_log(Adapter, CE_WARN, "Initialize hw failed"); 1285 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1286 goto init_fail; 1287 } 1288 1289 /* Disable Smart Power Down */ 1290 phy_spd_state(hw, B_FALSE); 1291 1292 /* Make sure driver has control */ 1293 e1000g_get_driver_control(hw); 1294 1295 /* 1296 * Initialize unicast addresses. 1297 */ 1298 e1000g_init_unicst(Adapter); 1299 1300 /* 1301 * Setup and initialize the mctable structures. After this routine 1302 * completes Multicast table will be set 1303 */ 1304 e1000g_setup_multicast(Adapter); 1305 msec_delay(5); 1306 1307 /* 1308 * Implement Adaptive IFS 1309 */ 1310 e1000_reset_adaptive(hw); 1311 1312 /* Setup Interrupt Throttling Register */ 1313 if (hw->mac.type >= e1000_82540) { 1314 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate); 1315 } else 1316 Adapter->intr_adaptive = B_FALSE; 1317 1318 /* Start the timer for link setup */ 1319 if (hw->mac.autoneg) 1320 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000); 1321 else 1322 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000); 1323 1324 mutex_enter(&Adapter->link_lock); 1325 if (hw->phy.wait_for_link) { 1326 Adapter->link_complete = B_TRUE; 1327 } else { 1328 Adapter->link_complete = B_FALSE; 1329 Adapter->link_tid = timeout(e1000g_link_timer, 1330 (void *)Adapter, link_timeout); 1331 } 1332 mutex_exit(&Adapter->link_lock); 1333 1334 /* Enable PCI-Ex master */ 1335 if (hw->bus.type == e1000_bus_type_pci_express) { 1336 e1000_enable_pciex_master(hw); 1337 } 1338 1339 /* Save the state of the phy */ 1340 e1000g_get_phy_state(Adapter); 1341 1342 Adapter->init_count++; 1343 1344 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) { 1345 goto init_fail; 1346 } 1347 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1348 goto init_fail; 1349 } 1350 1351 rw_exit(&Adapter->chip_lock); 1352 1353 return (DDI_SUCCESS); 1354 1355 init_fail: 1356 rw_exit(&Adapter->chip_lock); 1357 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1358 return (DDI_FAILURE); 1359 } 1360 1361 /* 1362 * Check if the link is up 1363 */ 1364 static boolean_t 1365 e1000g_link_up(struct e1000g *Adapter) 1366 { 1367 struct e1000_hw *hw; 1368 boolean_t link_up; 1369 1370 hw = &Adapter->shared; 1371 1372 e1000_check_for_link(hw); 1373 1374 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU) || 1375 ((!hw->mac.get_link_status) && (hw->mac.type == e1000_82543)) || 1376 ((hw->media_type == e1000_media_type_internal_serdes) && 1377 (hw->mac.serdes_has_link))) { 1378 link_up = B_TRUE; 1379 } else { 1380 link_up = B_FALSE; 1381 } 1382 1383 return (link_up); 1384 } 1385 1386 static void 1387 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp) 1388 { 1389 struct iocblk *iocp; 1390 struct e1000g *e1000gp; 1391 enum ioc_reply status; 1392 int err; 1393 1394 iocp = (struct iocblk *)mp->b_rptr; 1395 iocp->ioc_error = 0; 1396 e1000gp = (struct e1000g *)arg; 1397 1398 ASSERT(e1000gp); 1399 if (e1000gp == NULL) { 1400 miocnak(q, mp, 0, EINVAL); 1401 return; 1402 } 1403 1404 switch (iocp->ioc_cmd) { 1405 1406 case LB_GET_INFO_SIZE: 1407 case LB_GET_INFO: 1408 case LB_GET_MODE: 1409 case LB_SET_MODE: 1410 status = e1000g_loopback_ioctl(e1000gp, iocp, mp); 1411 break; 1412 1413 case ND_GET: 1414 case ND_SET: 1415 status = e1000g_nd_ioctl(e1000gp, q, mp, iocp); 1416 break; 1417 1418 #ifdef E1000G_DEBUG 1419 case E1000G_IOC_REG_PEEK: 1420 case E1000G_IOC_REG_POKE: 1421 status = e1000g_pp_ioctl(e1000gp, iocp, mp); 1422 break; 1423 case E1000G_IOC_CHIP_RESET: 1424 e1000gp->reset_count++; 1425 if (e1000g_reset(e1000gp)) 1426 status = IOC_ACK; 1427 else 1428 status = IOC_INVAL; 1429 break; 1430 #endif 1431 default: 1432 status = IOC_INVAL; 1433 break; 1434 } 1435 1436 /* 1437 * Decide how to reply 1438 */ 1439 switch (status) { 1440 default: 1441 case IOC_INVAL: 1442 /* 1443 * Error, reply with a NAK and EINVAL or the specified error 1444 */ 1445 miocnak(q, mp, 0, iocp->ioc_error == 0 ? 1446 EINVAL : iocp->ioc_error); 1447 break; 1448 1449 case IOC_DONE: 1450 /* 1451 * OK, reply already sent 1452 */ 1453 break; 1454 1455 case IOC_ACK: 1456 /* 1457 * OK, reply with an ACK 1458 */ 1459 miocack(q, mp, 0, 0); 1460 break; 1461 1462 case IOC_REPLY: 1463 /* 1464 * OK, send prepared reply as ACK or NAK 1465 */ 1466 mp->b_datap->db_type = iocp->ioc_error == 0 ? 1467 M_IOCACK : M_IOCNAK; 1468 qreply(q, mp); 1469 break; 1470 } 1471 } 1472 1473 static int 1474 e1000g_m_start(void *arg) 1475 { 1476 struct e1000g *Adapter = (struct e1000g *)arg; 1477 1478 return (e1000g_start(Adapter, B_TRUE)); 1479 } 1480 1481 static int 1482 e1000g_start(struct e1000g *Adapter, boolean_t global) 1483 { 1484 if (global) { 1485 /* Allocate dma resources for descriptors and buffers */ 1486 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) { 1487 e1000g_log(Adapter, CE_WARN, 1488 "Alloc DMA resources failed"); 1489 return (ENOTACTIVE); 1490 } 1491 Adapter->rx_buffer_setup = B_FALSE; 1492 } 1493 1494 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) { 1495 if (e1000g_init(Adapter) != DDI_SUCCESS) { 1496 e1000g_log(Adapter, CE_WARN, 1497 "Adapter initialization failed"); 1498 if (global) 1499 e1000g_release_dma_resources(Adapter); 1500 return (ENOTACTIVE); 1501 } 1502 } 1503 1504 rw_enter(&Adapter->chip_lock, RW_WRITER); 1505 1506 /* Setup and initialize the transmit structures */ 1507 e1000g_tx_setup(Adapter); 1508 msec_delay(5); 1509 1510 /* Setup and initialize the receive structures */ 1511 e1000g_rx_setup(Adapter); 1512 msec_delay(5); 1513 1514 e1000g_mask_interrupt(Adapter); 1515 1516 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 1517 rw_exit(&Adapter->chip_lock); 1518 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1519 return (ENOTACTIVE); 1520 } 1521 1522 Adapter->chip_state = E1000G_START; 1523 Adapter->attach_progress |= ATTACH_PROGRESS_INIT; 1524 1525 rw_exit(&Adapter->chip_lock); 1526 1527 /* Enable and start the watchdog timer */ 1528 enable_watchdog_timer(Adapter); 1529 1530 return (0); 1531 } 1532 1533 static void 1534 e1000g_m_stop(void *arg) 1535 { 1536 struct e1000g *Adapter = (struct e1000g *)arg; 1537 1538 e1000g_stop(Adapter, B_TRUE); 1539 } 1540 1541 static void 1542 e1000g_stop(struct e1000g *Adapter, boolean_t global) 1543 { 1544 /* Set stop flags */ 1545 rw_enter(&Adapter->chip_lock, RW_WRITER); 1546 1547 Adapter->chip_state = E1000G_STOP; 1548 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT; 1549 1550 rw_exit(&Adapter->chip_lock); 1551 1552 /* Drain tx sessions */ 1553 (void) e1000g_tx_drain(Adapter); 1554 1555 /* Disable and stop all the timers */ 1556 disable_watchdog_timer(Adapter); 1557 stop_link_timer(Adapter); 1558 stop_82547_timer(Adapter->tx_ring); 1559 1560 /* Stop the chip and release pending resources */ 1561 rw_enter(&Adapter->chip_lock, RW_WRITER); 1562 1563 e1000g_clear_all_interrupts(Adapter); 1564 if (e1000_reset_hw(&Adapter->shared) != 0) { 1565 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE); 1566 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1567 } 1568 1569 /* Release resources still held by the TX descriptors */ 1570 e1000g_tx_clean(Adapter); 1571 1572 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 1573 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST); 1574 1575 /* Clean the pending rx jumbo packet fragment */ 1576 e1000g_rx_clean(Adapter); 1577 1578 rw_exit(&Adapter->chip_lock); 1579 1580 if (global) 1581 e1000g_release_dma_resources(Adapter); 1582 } 1583 1584 static void 1585 e1000g_rx_clean(struct e1000g *Adapter) 1586 { 1587 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring; 1588 1589 if (rx_ring->rx_mblk != NULL) { 1590 freemsg(rx_ring->rx_mblk); 1591 rx_ring->rx_mblk = NULL; 1592 rx_ring->rx_mblk_tail = NULL; 1593 rx_ring->rx_mblk_len = 0; 1594 } 1595 } 1596 1597 static void 1598 e1000g_tx_clean(struct e1000g *Adapter) 1599 { 1600 e1000g_tx_ring_t *tx_ring; 1601 p_tx_sw_packet_t packet; 1602 mblk_t *mp; 1603 mblk_t *nmp; 1604 uint32_t packet_count; 1605 1606 tx_ring = Adapter->tx_ring; 1607 1608 /* 1609 * Here we don't need to protect the lists using 1610 * the usedlist_lock and freelist_lock, for they 1611 * have been protected by the chip_lock. 1612 */ 1613 mp = NULL; 1614 nmp = NULL; 1615 packet_count = 0; 1616 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list); 1617 while (packet != NULL) { 1618 if (packet->mp != NULL) { 1619 /* Assemble the message chain */ 1620 if (mp == NULL) { 1621 mp = packet->mp; 1622 nmp = packet->mp; 1623 } else { 1624 nmp->b_next = packet->mp; 1625 nmp = packet->mp; 1626 } 1627 /* Disconnect the message from the sw packet */ 1628 packet->mp = NULL; 1629 } 1630 1631 e1000g_free_tx_swpkt(packet); 1632 packet_count++; 1633 1634 packet = (p_tx_sw_packet_t) 1635 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link); 1636 } 1637 1638 if (mp != NULL) 1639 freemsgchain(mp); 1640 1641 if (packet_count > 0) { 1642 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list); 1643 QUEUE_INIT_LIST(&tx_ring->used_list); 1644 1645 /* Setup TX descriptor pointers */ 1646 tx_ring->tbd_next = tx_ring->tbd_first; 1647 tx_ring->tbd_oldest = tx_ring->tbd_first; 1648 1649 /* Setup our HW Tx Head & Tail descriptor pointers */ 1650 E1000_WRITE_REG(&Adapter->shared, E1000_TDH, 0); 1651 E1000_WRITE_REG(&Adapter->shared, E1000_TDT, 0); 1652 } 1653 } 1654 1655 static boolean_t 1656 e1000g_tx_drain(struct e1000g *Adapter) 1657 { 1658 int i; 1659 boolean_t done; 1660 e1000g_tx_ring_t *tx_ring; 1661 1662 tx_ring = Adapter->tx_ring; 1663 1664 /* Allow up to 'wsdraintime' for pending xmit's to complete. */ 1665 for (i = 0; i < TX_DRAIN_TIME; i++) { 1666 mutex_enter(&tx_ring->usedlist_lock); 1667 done = IS_QUEUE_EMPTY(&tx_ring->used_list); 1668 mutex_exit(&tx_ring->usedlist_lock); 1669 1670 if (done) 1671 break; 1672 1673 msec_delay(1); 1674 } 1675 1676 return (done); 1677 } 1678 1679 static boolean_t 1680 e1000g_rx_drain(struct e1000g *Adapter) 1681 { 1682 e1000g_rx_ring_t *rx_ring; 1683 p_rx_sw_packet_t packet; 1684 boolean_t done; 1685 1686 rx_ring = Adapter->rx_ring; 1687 done = B_TRUE; 1688 1689 rw_enter(&e1000g_rx_detach_lock, RW_WRITER); 1690 1691 while (rx_ring->pending_list != NULL) { 1692 packet = rx_ring->pending_list; 1693 rx_ring->pending_list = 1694 rx_ring->pending_list->next; 1695 1696 if (packet->flag == E1000G_RX_SW_STOP) { 1697 packet->flag = E1000G_RX_SW_DETACH; 1698 done = B_FALSE; 1699 } else { 1700 ASSERT(packet->flag == E1000G_RX_SW_FREE); 1701 ASSERT(packet->mp == NULL); 1702 e1000g_free_rx_sw_packet(packet); 1703 } 1704 } 1705 1706 rw_exit(&e1000g_rx_detach_lock); 1707 1708 return (done); 1709 } 1710 1711 boolean_t 1712 e1000g_reset(struct e1000g *Adapter) 1713 { 1714 e1000g_stop(Adapter, B_FALSE); 1715 1716 if (e1000g_start(Adapter, B_FALSE)) { 1717 e1000g_log(Adapter, CE_WARN, "Reset failed"); 1718 return (B_FALSE); 1719 } 1720 1721 return (B_TRUE); 1722 } 1723 1724 boolean_t 1725 e1000g_global_reset(struct e1000g *Adapter) 1726 { 1727 e1000g_stop(Adapter, B_TRUE); 1728 1729 Adapter->init_count = 0; 1730 1731 if (e1000g_start(Adapter, B_TRUE)) { 1732 e1000g_log(Adapter, CE_WARN, "Reset failed"); 1733 return (B_FALSE); 1734 } 1735 1736 return (B_TRUE); 1737 } 1738 1739 /* 1740 * e1000g_intr_pciexpress - ISR for PCI Express chipsets 1741 * 1742 * This interrupt service routine is for PCI-Express adapters. 1743 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED 1744 * bit is set. 1745 */ 1746 static uint_t 1747 e1000g_intr_pciexpress(caddr_t arg) 1748 { 1749 struct e1000g *Adapter; 1750 uint32_t icr; 1751 1752 Adapter = (struct e1000g *)arg; 1753 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 1754 1755 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 1756 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 1757 1758 if (icr & E1000_ICR_INT_ASSERTED) { 1759 /* 1760 * E1000_ICR_INT_ASSERTED bit was set: 1761 * Read(Clear) the ICR, claim this interrupt, 1762 * look for work to do. 1763 */ 1764 e1000g_intr_work(Adapter, icr); 1765 return (DDI_INTR_CLAIMED); 1766 } else { 1767 /* 1768 * E1000_ICR_INT_ASSERTED bit was not set: 1769 * Don't claim this interrupt, return immediately. 1770 */ 1771 return (DDI_INTR_UNCLAIMED); 1772 } 1773 } 1774 1775 /* 1776 * e1000g_intr - ISR for PCI/PCI-X chipsets 1777 * 1778 * This interrupt service routine is for PCI/PCI-X adapters. 1779 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED 1780 * bit is set or not. 1781 */ 1782 static uint_t 1783 e1000g_intr(caddr_t arg) 1784 { 1785 struct e1000g *Adapter; 1786 uint32_t icr; 1787 1788 Adapter = (struct e1000g *)arg; 1789 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR); 1790 1791 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 1792 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 1793 1794 if (icr) { 1795 /* 1796 * Any bit was set in ICR: 1797 * Read(Clear) the ICR, claim this interrupt, 1798 * look for work to do. 1799 */ 1800 e1000g_intr_work(Adapter, icr); 1801 return (DDI_INTR_CLAIMED); 1802 } else { 1803 /* 1804 * No bit was set in ICR: 1805 * Don't claim this interrupt, return immediately. 1806 */ 1807 return (DDI_INTR_UNCLAIMED); 1808 } 1809 } 1810 1811 /* 1812 * e1000g_intr_work - actual processing of ISR 1813 * 1814 * Read(clear) the ICR contents and call appropriate interrupt 1815 * processing routines. 1816 */ 1817 static void 1818 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr) 1819 { 1820 struct e1000_hw *hw; 1821 hw = &Adapter->shared; 1822 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 1823 uint32_t itr; 1824 1825 Adapter->rx_pkt_cnt = 0; 1826 Adapter->tx_pkt_cnt = 0; 1827 1828 rw_enter(&Adapter->chip_lock, RW_READER); 1829 /* 1830 * Here we need to check the "chip_state" flag within the chip_lock to 1831 * ensure the receive routine will not execute when the adapter is 1832 * being reset. 1833 */ 1834 if (Adapter->chip_state != E1000G_START) { 1835 rw_exit(&Adapter->chip_lock); 1836 return; 1837 } 1838 1839 if (icr & E1000_ICR_RXT0) { 1840 mblk_t *mp; 1841 1842 mp = e1000g_receive(Adapter); 1843 1844 rw_exit(&Adapter->chip_lock); 1845 1846 if (mp != NULL) 1847 mac_rx(Adapter->mh, Adapter->mrh, mp); 1848 } else 1849 rw_exit(&Adapter->chip_lock); 1850 1851 if (icr & E1000_ICR_TXDW) { 1852 if (!Adapter->tx_intr_enable) 1853 e1000g_clear_tx_interrupt(Adapter); 1854 1855 /* Recycle the tx descriptors */ 1856 rw_enter(&Adapter->chip_lock, RW_READER); 1857 e1000g_recycle(tx_ring); 1858 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr); 1859 rw_exit(&Adapter->chip_lock); 1860 1861 /* Schedule the re-transmit */ 1862 if (tx_ring->resched_needed && 1863 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) { 1864 tx_ring->resched_needed = B_FALSE; 1865 mac_tx_update(Adapter->mh); 1866 E1000G_STAT(tx_ring->stat_reschedule); 1867 } 1868 } 1869 1870 if (Adapter->intr_adaptive) { 1871 itr = e1000g_get_itr(Adapter->rx_pkt_cnt, Adapter->tx_pkt_cnt, 1872 Adapter->intr_throttling_rate); 1873 if (itr) { 1874 E1000_WRITE_REG(hw, E1000_ITR, itr); 1875 Adapter->intr_throttling_rate = itr; 1876 } 1877 } 1878 1879 /* 1880 * The Receive Sequence errors RXSEQ and the link status change LSC 1881 * are checked to detect that the cable has been pulled out. For 1882 * the Wiseman 2.0 silicon, the receive sequence errors interrupt 1883 * are an indication that cable is not connected. 1884 */ 1885 if ((icr & E1000_ICR_RXSEQ) || 1886 (icr & E1000_ICR_LSC) || 1887 (icr & E1000_ICR_GPI_EN1)) { 1888 boolean_t link_changed; 1889 timeout_id_t tid = 0; 1890 1891 stop_watchdog_timer(Adapter); 1892 1893 rw_enter(&Adapter->chip_lock, RW_WRITER); 1894 1895 /* 1896 * Because we got a link-status-change interrupt, force 1897 * e1000_check_for_link() to look at phy 1898 */ 1899 Adapter->shared.mac.get_link_status = B_TRUE; 1900 1901 /* e1000g_link_check takes care of link status change */ 1902 link_changed = e1000g_link_check(Adapter); 1903 1904 /* Get new phy state */ 1905 e1000g_get_phy_state(Adapter); 1906 1907 /* 1908 * If the link timer has not timed out, we'll not notify 1909 * the upper layer with any link state until the link is up. 1910 */ 1911 if (link_changed && !Adapter->link_complete) { 1912 if (Adapter->link_state == LINK_STATE_UP) { 1913 mutex_enter(&Adapter->link_lock); 1914 Adapter->link_complete = B_TRUE; 1915 tid = Adapter->link_tid; 1916 Adapter->link_tid = 0; 1917 mutex_exit(&Adapter->link_lock); 1918 } else { 1919 link_changed = B_FALSE; 1920 } 1921 } 1922 rw_exit(&Adapter->chip_lock); 1923 1924 if (link_changed) { 1925 if (tid != 0) 1926 (void) untimeout(tid); 1927 1928 /* 1929 * Workaround for esb2. Data stuck in fifo on a link 1930 * down event. Reset the adapter to recover it. 1931 */ 1932 if ((Adapter->link_state == LINK_STATE_DOWN) && 1933 (Adapter->shared.mac.type == e1000_80003es2lan)) 1934 (void) e1000g_reset(Adapter); 1935 1936 mac_link_update(Adapter->mh, Adapter->link_state); 1937 } 1938 1939 start_watchdog_timer(Adapter); 1940 } 1941 } 1942 1943 static uint32_t 1944 e1000g_get_itr(uint32_t rx_packet, uint32_t tx_packet, uint32_t cur_itr) 1945 { 1946 uint32_t new_itr; 1947 1948 /* 1949 * Determine a propper itr according to rx/tx packet count 1950 * per interrupt, the value of itr are based on document 1951 * and testing. 1952 */ 1953 if ((rx_packet < DEFAULT_INTR_PACKET_LOW) || 1954 (tx_packet < DEFAULT_INTR_PACKET_LOW)) { 1955 new_itr = DEFAULT_INTR_THROTTLING_LOW; 1956 goto itr_done; 1957 } 1958 if ((rx_packet > DEFAULT_INTR_PACKET_HIGH) || 1959 (tx_packet > DEFAULT_INTR_PACKET_HIGH)) { 1960 new_itr = DEFAULT_INTR_THROTTLING_LOW; 1961 goto itr_done; 1962 } 1963 if (cur_itr < DEFAULT_INTR_THROTTLING_HIGH) { 1964 new_itr = cur_itr + (DEFAULT_INTR_THROTTLING_HIGH >> 2); 1965 if (new_itr > DEFAULT_INTR_THROTTLING_HIGH) 1966 new_itr = DEFAULT_INTR_THROTTLING_HIGH; 1967 } else 1968 new_itr = DEFAULT_INTR_THROTTLING_HIGH; 1969 1970 itr_done: 1971 if (cur_itr == new_itr) 1972 return (0); 1973 else 1974 return (new_itr); 1975 } 1976 1977 static void 1978 e1000g_init_unicst(struct e1000g *Adapter) 1979 { 1980 struct e1000_hw *hw; 1981 int slot; 1982 1983 hw = &Adapter->shared; 1984 1985 if (!Adapter->unicst_init) { 1986 /* Initialize the multiple unicast addresses */ 1987 Adapter->unicst_total = MAX_NUM_UNICAST_ADDRESSES; 1988 1989 if ((hw->mac.type == e1000_82571) && 1990 (e1000_get_laa_state_82571(hw) == B_TRUE)) 1991 Adapter->unicst_total--; 1992 1993 Adapter->unicst_avail = Adapter->unicst_total - 1; 1994 1995 /* Store the default mac address */ 1996 e1000_rar_set(hw, hw->mac.addr, 0); 1997 if ((hw->mac.type == e1000_82571) && 1998 (e1000_get_laa_state_82571(hw) == B_TRUE)) 1999 e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); 2000 2001 bcopy(hw->mac.addr, Adapter->unicst_addr[0].mac.addr, 2002 ETHERADDRL); 2003 Adapter->unicst_addr[0].mac.set = 1; 2004 2005 for (slot = 1; slot < Adapter->unicst_total; slot++) 2006 Adapter->unicst_addr[slot].mac.set = 0; 2007 2008 Adapter->unicst_init = B_TRUE; 2009 } else { 2010 /* Recover the default mac address */ 2011 bcopy(Adapter->unicst_addr[0].mac.addr, hw->mac.addr, 2012 ETHERADDRL); 2013 2014 /* Store the default mac address */ 2015 e1000_rar_set(hw, hw->mac.addr, 0); 2016 if ((hw->mac.type == e1000_82571) && 2017 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2018 e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY); 2019 2020 /* Re-configure the RAR registers */ 2021 for (slot = 1; slot < Adapter->unicst_total; slot++) 2022 e1000_rar_set(hw, 2023 Adapter->unicst_addr[slot].mac.addr, slot); 2024 } 2025 2026 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 2027 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2028 } 2029 2030 static int 2031 e1000g_m_unicst(void *arg, const uint8_t *mac_addr) 2032 { 2033 struct e1000g *Adapter; 2034 2035 Adapter = (struct e1000g *)arg; 2036 2037 /* Store the default MAC address */ 2038 bcopy(mac_addr, Adapter->shared.mac.addr, ETHERADDRL); 2039 2040 /* Set MAC address in address slot 0, which is the default address */ 2041 return (e1000g_unicst_set(Adapter, mac_addr, 0)); 2042 } 2043 2044 static int 2045 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr, 2046 mac_addr_slot_t slot) 2047 { 2048 struct e1000_hw *hw; 2049 2050 hw = &Adapter->shared; 2051 2052 rw_enter(&Adapter->chip_lock, RW_WRITER); 2053 2054 #ifndef NO_82542_SUPPORT 2055 /* 2056 * The first revision of Wiseman silicon (rev 2.0) has an errata 2057 * that requires the receiver to be in reset when any of the 2058 * receive address registers (RAR regs) are accessed. The first 2059 * rev of Wiseman silicon also requires MWI to be disabled when 2060 * a global reset or a receive reset is issued. So before we 2061 * initialize the RARs, we check the rev of the Wiseman controller 2062 * and work around any necessary HW errata. 2063 */ 2064 if ((hw->mac.type == e1000_82542) && 2065 (hw->revision_id == E1000_REVISION_2)) { 2066 e1000_pci_clear_mwi(hw); 2067 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); 2068 msec_delay(5); 2069 } 2070 #endif 2071 2072 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr, ETHERADDRL); 2073 e1000_rar_set(hw, (uint8_t *)mac_addr, slot); 2074 2075 if (slot == 0) { 2076 if ((hw->mac.type == e1000_82571) && 2077 (e1000_get_laa_state_82571(hw) == B_TRUE)) 2078 e1000_rar_set(hw, (uint8_t *)mac_addr, LAST_RAR_ENTRY); 2079 } 2080 2081 #ifndef NO_82542_SUPPORT 2082 /* 2083 * If we are using Wiseman rev 2.0 silicon, we will have previously 2084 * put the receive in reset, and disabled MWI, to work around some 2085 * HW errata. Now we should take the receiver out of reset, and 2086 * re-enabled if MWI if it was previously enabled by the PCI BIOS. 2087 */ 2088 if ((hw->mac.type == e1000_82542) && 2089 (hw->revision_id == E1000_REVISION_2)) { 2090 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2091 msec_delay(1); 2092 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2093 e1000_pci_set_mwi(hw); 2094 e1000g_rx_setup(Adapter); 2095 } 2096 #endif 2097 2098 rw_exit(&Adapter->chip_lock); 2099 2100 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2101 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2102 return (EIO); 2103 } 2104 2105 return (0); 2106 } 2107 2108 /* 2109 * e1000g_m_unicst_add() - will find an unused address slot, set the 2110 * address value to the one specified, reserve that slot and enable 2111 * the NIC to start filtering on the new MAC address. 2112 * Returns 0 on success. 2113 */ 2114 static int 2115 e1000g_m_unicst_add(void *arg, mac_multi_addr_t *maddr) 2116 { 2117 struct e1000g *Adapter = (struct e1000g *)arg; 2118 mac_addr_slot_t slot; 2119 int err; 2120 2121 if (mac_unicst_verify(Adapter->mh, 2122 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 2123 return (EINVAL); 2124 2125 rw_enter(&Adapter->chip_lock, RW_WRITER); 2126 if (Adapter->unicst_avail == 0) { 2127 /* no slots available */ 2128 rw_exit(&Adapter->chip_lock); 2129 return (ENOSPC); 2130 } 2131 2132 /* 2133 * Primary/default address is in slot 0. The next addresses 2134 * are the multiple MAC addresses. So multiple MAC address 0 2135 * is in slot 1, 1 in slot 2, and so on. So the first multiple 2136 * MAC address resides in slot 1. 2137 */ 2138 for (slot = 1; slot < Adapter->unicst_total; slot++) { 2139 if (Adapter->unicst_addr[slot].mac.set == 0) { 2140 Adapter->unicst_addr[slot].mac.set = 1; 2141 break; 2142 } 2143 } 2144 2145 ASSERT((slot > 0) && (slot < Adapter->unicst_total)); 2146 2147 Adapter->unicst_avail--; 2148 rw_exit(&Adapter->chip_lock); 2149 2150 maddr->mma_slot = slot; 2151 2152 if ((err = e1000g_unicst_set(Adapter, maddr->mma_addr, slot)) != 0) { 2153 rw_enter(&Adapter->chip_lock, RW_WRITER); 2154 Adapter->unicst_addr[slot].mac.set = 0; 2155 Adapter->unicst_avail++; 2156 rw_exit(&Adapter->chip_lock); 2157 } 2158 2159 return (err); 2160 } 2161 2162 /* 2163 * e1000g_m_unicst_remove() - removes a MAC address that was added by a 2164 * call to e1000g_m_unicst_add(). The slot number that was returned in 2165 * e1000g_m_unicst_add() is passed in the call to remove the address. 2166 * Returns 0 on success. 2167 */ 2168 static int 2169 e1000g_m_unicst_remove(void *arg, mac_addr_slot_t slot) 2170 { 2171 struct e1000g *Adapter = (struct e1000g *)arg; 2172 int err; 2173 2174 if ((slot <= 0) || (slot >= Adapter->unicst_total)) 2175 return (EINVAL); 2176 2177 rw_enter(&Adapter->chip_lock, RW_WRITER); 2178 if (Adapter->unicst_addr[slot].mac.set == 1) { 2179 Adapter->unicst_addr[slot].mac.set = 0; 2180 Adapter->unicst_avail++; 2181 rw_exit(&Adapter->chip_lock); 2182 2183 /* Copy the default address to the passed slot */ 2184 if (err = e1000g_unicst_set(Adapter, 2185 Adapter->unicst_addr[0].mac.addr, slot) != 0) { 2186 rw_enter(&Adapter->chip_lock, RW_WRITER); 2187 Adapter->unicst_addr[slot].mac.set = 1; 2188 Adapter->unicst_avail--; 2189 rw_exit(&Adapter->chip_lock); 2190 } 2191 return (err); 2192 } 2193 rw_exit(&Adapter->chip_lock); 2194 2195 return (EINVAL); 2196 } 2197 2198 /* 2199 * e1000g_m_unicst_modify() - modifies the value of an address that 2200 * has been added by e1000g_m_unicst_add(). The new address, address 2201 * length and the slot number that was returned in the call to add 2202 * should be passed to e1000g_m_unicst_modify(). mma_flags should be 2203 * set to 0. Returns 0 on success. 2204 */ 2205 static int 2206 e1000g_m_unicst_modify(void *arg, mac_multi_addr_t *maddr) 2207 { 2208 struct e1000g *Adapter = (struct e1000g *)arg; 2209 mac_addr_slot_t slot; 2210 2211 if (mac_unicst_verify(Adapter->mh, 2212 maddr->mma_addr, maddr->mma_addrlen) == B_FALSE) 2213 return (EINVAL); 2214 2215 slot = maddr->mma_slot; 2216 2217 if ((slot <= 0) || (slot >= Adapter->unicst_total)) 2218 return (EINVAL); 2219 2220 rw_enter(&Adapter->chip_lock, RW_WRITER); 2221 if (Adapter->unicst_addr[slot].mac.set == 1) { 2222 rw_exit(&Adapter->chip_lock); 2223 2224 return (e1000g_unicst_set(Adapter, maddr->mma_addr, slot)); 2225 } 2226 rw_exit(&Adapter->chip_lock); 2227 2228 return (EINVAL); 2229 } 2230 2231 /* 2232 * e1000g_m_unicst_get() - will get the MAC address and all other 2233 * information related to the address slot passed in mac_multi_addr_t. 2234 * mma_flags should be set to 0 in the call. 2235 * On return, mma_flags can take the following values: 2236 * 1) MMAC_SLOT_UNUSED 2237 * 2) MMAC_SLOT_USED | MMAC_VENDOR_ADDR 2238 * 3) MMAC_SLOT_UNUSED | MMAC_VENDOR_ADDR 2239 * 4) MMAC_SLOT_USED 2240 */ 2241 static int 2242 e1000g_m_unicst_get(void *arg, mac_multi_addr_t *maddr) 2243 { 2244 struct e1000g *Adapter = (struct e1000g *)arg; 2245 mac_addr_slot_t slot; 2246 2247 slot = maddr->mma_slot; 2248 2249 if ((slot <= 0) || (slot >= Adapter->unicst_total)) 2250 return (EINVAL); 2251 2252 rw_enter(&Adapter->chip_lock, RW_WRITER); 2253 if (Adapter->unicst_addr[slot].mac.set == 1) { 2254 bcopy(Adapter->unicst_addr[slot].mac.addr, 2255 maddr->mma_addr, ETHERADDRL); 2256 maddr->mma_flags = MMAC_SLOT_USED; 2257 } else { 2258 maddr->mma_flags = MMAC_SLOT_UNUSED; 2259 } 2260 rw_exit(&Adapter->chip_lock); 2261 2262 return (0); 2263 } 2264 2265 static int 2266 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr) 2267 { 2268 struct e1000_hw *hw = &Adapter->shared; 2269 unsigned i; 2270 int res = 0; 2271 2272 rw_enter(&Adapter->chip_lock, RW_WRITER); 2273 2274 if ((multiaddr[0] & 01) == 0) { 2275 res = EINVAL; 2276 goto done; 2277 } 2278 2279 if (Adapter->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 2280 res = ENOENT; 2281 goto done; 2282 } 2283 2284 bcopy(multiaddr, 2285 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL); 2286 Adapter->mcast_count++; 2287 2288 /* 2289 * Update the MC table in the hardware 2290 */ 2291 e1000g_clear_interrupt(Adapter); 2292 2293 e1000g_setup_multicast(Adapter); 2294 2295 #ifndef NO_82542_SUPPORT 2296 if ((hw->mac.type == e1000_82542) && 2297 (hw->revision_id == E1000_REVISION_2)) 2298 e1000g_rx_setup(Adapter); 2299 #endif 2300 2301 e1000g_mask_interrupt(Adapter); 2302 2303 done: 2304 rw_exit(&Adapter->chip_lock); 2305 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2306 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2307 res = EIO; 2308 } 2309 2310 return (res); 2311 } 2312 2313 static int 2314 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr) 2315 { 2316 struct e1000_hw *hw = &Adapter->shared; 2317 unsigned i; 2318 2319 rw_enter(&Adapter->chip_lock, RW_WRITER); 2320 2321 for (i = 0; i < Adapter->mcast_count; i++) { 2322 if (bcmp(multiaddr, &Adapter->mcast_table[i], 2323 ETHERADDRL) == 0) { 2324 for (i++; i < Adapter->mcast_count; i++) { 2325 Adapter->mcast_table[i - 1] = 2326 Adapter->mcast_table[i]; 2327 } 2328 Adapter->mcast_count--; 2329 break; 2330 } 2331 } 2332 2333 /* 2334 * Update the MC table in the hardware 2335 */ 2336 e1000g_clear_interrupt(Adapter); 2337 2338 e1000g_setup_multicast(Adapter); 2339 2340 #ifndef NO_82542_SUPPORT 2341 if ((hw->mac.type == e1000_82542) && 2342 (hw->revision_id == E1000_REVISION_2)) 2343 e1000g_rx_setup(Adapter); 2344 #endif 2345 2346 e1000g_mask_interrupt(Adapter); 2347 2348 done: 2349 rw_exit(&Adapter->chip_lock); 2350 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2351 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2352 return (EIO); 2353 } 2354 2355 return (0); 2356 } 2357 2358 /* 2359 * e1000g_setup_multicast - setup multicast data structures 2360 * 2361 * This routine initializes all of the multicast related structures. 2362 */ 2363 void 2364 e1000g_setup_multicast(struct e1000g *Adapter) 2365 { 2366 uint8_t *mc_addr_list; 2367 uint32_t mc_addr_count; 2368 uint32_t rctl; 2369 struct e1000_hw *hw; 2370 2371 hw = &Adapter->shared; 2372 2373 /* 2374 * The e1000g has the ability to do perfect filtering of 16 2375 * addresses. The driver uses one of the e1000g's 16 receive 2376 * address registers for its node/network/mac/individual address. 2377 * So, we have room for up to 15 multicast addresses in the CAM, 2378 * additional MC addresses are handled by the MTA (Multicast Table 2379 * Array) 2380 */ 2381 2382 rctl = E1000_READ_REG(hw, E1000_RCTL); 2383 2384 mc_addr_list = (uint8_t *)Adapter->mcast_table; 2385 2386 if (Adapter->mcast_count > MAX_NUM_MULTICAST_ADDRESSES) { 2387 E1000G_DEBUGLOG_1(Adapter, CE_WARN, 2388 "Adapter requested more than %d MC Addresses.\n", 2389 MAX_NUM_MULTICAST_ADDRESSES); 2390 mc_addr_count = MAX_NUM_MULTICAST_ADDRESSES; 2391 } else { 2392 /* 2393 * Set the number of MC addresses that we are being 2394 * requested to use 2395 */ 2396 mc_addr_count = Adapter->mcast_count; 2397 } 2398 #ifndef NO_82542_SUPPORT 2399 /* 2400 * The Wiseman 2.0 silicon has an errata by which the receiver will 2401 * hang while writing to the receive address registers if the receiver 2402 * is not in reset before writing to the registers. Updating the RAR 2403 * is done during the setting up of the multicast table, hence the 2404 * receiver has to be put in reset before updating the multicast table 2405 * and then taken out of reset at the end 2406 */ 2407 /* 2408 * if WMI was enabled then dis able it before issueing the global 2409 * reset to the hardware. 2410 */ 2411 /* 2412 * Only required for WISEMAN_2_0 2413 */ 2414 if ((hw->mac.type == e1000_82542) && 2415 (hw->revision_id == E1000_REVISION_2)) { 2416 e1000_pci_clear_mwi(hw); 2417 /* 2418 * The e1000g must be in reset before changing any RA 2419 * registers. Reset receive unit. The chip will remain in 2420 * the reset state until software explicitly restarts it. 2421 */ 2422 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST); 2423 /* Allow receiver time to go in to reset */ 2424 msec_delay(5); 2425 } 2426 #endif 2427 2428 e1000_mc_addr_list_update(hw, mc_addr_list, mc_addr_count, 2429 Adapter->unicst_total, hw->mac.rar_entry_count); 2430 2431 #ifndef NO_82542_SUPPORT 2432 /* 2433 * Only for Wiseman_2_0 2434 * If MWI was enabled then re-enable it after issueing (as we 2435 * disabled it up there) the receive reset command. 2436 * Wainwright does not have a receive reset command and only thing 2437 * close to it is global reset which will require tx setup also 2438 */ 2439 if ((hw->mac.type == e1000_82542) && 2440 (hw->revision_id == E1000_REVISION_2)) { 2441 /* 2442 * if WMI was enabled then reenable it after issueing the 2443 * global or receive reset to the hardware. 2444 */ 2445 2446 /* 2447 * Take receiver out of reset 2448 * clear E1000_RCTL_RST bit (and all others) 2449 */ 2450 E1000_WRITE_REG(hw, E1000_RCTL, 0); 2451 msec_delay(5); 2452 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2453 e1000_pci_set_mwi(hw); 2454 } 2455 #endif 2456 2457 /* 2458 * Restore original value 2459 */ 2460 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2461 } 2462 2463 int 2464 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr) 2465 { 2466 struct e1000g *Adapter = (struct e1000g *)arg; 2467 2468 return ((add) ? multicst_add(Adapter, addr) 2469 : multicst_remove(Adapter, addr)); 2470 } 2471 2472 int 2473 e1000g_m_promisc(void *arg, boolean_t on) 2474 { 2475 struct e1000g *Adapter = (struct e1000g *)arg; 2476 uint32_t rctl; 2477 2478 rw_enter(&Adapter->chip_lock, RW_WRITER); 2479 2480 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL); 2481 2482 if (on) 2483 rctl |= 2484 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM); 2485 else 2486 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE)); 2487 2488 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl); 2489 2490 Adapter->e1000g_promisc = on; 2491 2492 rw_exit(&Adapter->chip_lock); 2493 2494 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 2495 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 2496 return (EIO); 2497 } 2498 2499 return (0); 2500 } 2501 2502 static boolean_t 2503 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data) 2504 { 2505 struct e1000g *Adapter = (struct e1000g *)arg; 2506 struct e1000_hw *hw = &Adapter->shared; 2507 2508 switch (cap) { 2509 case MAC_CAPAB_HCKSUM: { 2510 uint32_t *txflags = cap_data; 2511 /* 2512 * Checksum on/off selection via global parameters. 2513 * 2514 * If the chip is flagged as not capable of (correctly) 2515 * handling checksumming, we don't enable it on either 2516 * Rx or Tx side. Otherwise, we take this chip's settings 2517 * from the patchable global defaults. 2518 * 2519 * We advertise our capabilities only if TX offload is 2520 * enabled. On receive, the stack will accept checksummed 2521 * packets anyway, even if we haven't said we can deliver 2522 * them. 2523 */ 2524 switch (hw->mac.type) { 2525 case e1000_82540: 2526 case e1000_82544: 2527 case e1000_82545: 2528 case e1000_82545_rev_3: 2529 case e1000_82546: 2530 case e1000_82546_rev_3: 2531 case e1000_82571: 2532 case e1000_82572: 2533 case e1000_82573: 2534 case e1000_80003es2lan: 2535 if (Adapter->tx_hcksum_enabled) 2536 *txflags = HCKSUM_IPHDRCKSUM | 2537 HCKSUM_INET_PARTIAL; 2538 else 2539 return (B_FALSE); 2540 break; 2541 2542 /* 2543 * For the following Intel PRO/1000 chipsets, we have not 2544 * tested the hardware checksum offload capability, so we 2545 * disable the capability for them. 2546 * e1000_82542, 2547 * e1000_82543, 2548 * e1000_82541, 2549 * e1000_82541_rev_2, 2550 * e1000_82547, 2551 * e1000_82547_rev_2, 2552 */ 2553 default: 2554 return (B_FALSE); 2555 } 2556 2557 break; 2558 } 2559 case MAC_CAPAB_POLL: 2560 /* 2561 * There's nothing for us to fill in, simply returning 2562 * B_TRUE stating that we support polling is sufficient. 2563 */ 2564 break; 2565 2566 case MAC_CAPAB_MULTIADDRESS: { 2567 multiaddress_capab_t *mmacp = cap_data; 2568 2569 /* 2570 * The number of MAC addresses made available by 2571 * this capability is one less than the total as 2572 * the primary address in slot 0 is counted in 2573 * the total. 2574 */ 2575 mmacp->maddr_naddr = Adapter->unicst_total - 1; 2576 mmacp->maddr_naddrfree = Adapter->unicst_avail; 2577 /* No multiple factory addresses, set mma_flag to 0 */ 2578 mmacp->maddr_flag = 0; 2579 mmacp->maddr_handle = Adapter; 2580 mmacp->maddr_add = e1000g_m_unicst_add; 2581 mmacp->maddr_remove = e1000g_m_unicst_remove; 2582 mmacp->maddr_modify = e1000g_m_unicst_modify; 2583 mmacp->maddr_get = e1000g_m_unicst_get; 2584 mmacp->maddr_reserve = NULL; 2585 break; 2586 } 2587 default: 2588 return (B_FALSE); 2589 } 2590 return (B_TRUE); 2591 } 2592 2593 static boolean_t 2594 e1000g_param_locked(mac_prop_id_t pr_num) 2595 { 2596 /* 2597 * All en_* parameters are locked (read-only) while 2598 * the device is in any sort of loopback mode ... 2599 */ 2600 switch (pr_num) { 2601 case DLD_PROP_EN_1000FDX_CAP: 2602 case DLD_PROP_EN_1000HDX_CAP: 2603 case DLD_PROP_EN_100FDX_CAP: 2604 case DLD_PROP_EN_100HDX_CAP: 2605 case DLD_PROP_EN_10FDX_CAP: 2606 case DLD_PROP_EN_10HDX_CAP: 2607 case DLD_PROP_AUTONEG: 2608 case DLD_PROP_FLOWCTRL: 2609 return (B_TRUE); 2610 } 2611 return (B_FALSE); 2612 } 2613 2614 /* 2615 * callback function for set/get of properties 2616 */ 2617 static int 2618 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 2619 uint_t pr_valsize, const void *pr_val) 2620 { 2621 struct e1000g *Adapter = arg; 2622 struct e1000_mac_info *mac = &Adapter->shared.mac; 2623 struct e1000_phy_info *phy = &Adapter->shared.phy; 2624 e1000g_tx_ring_t *tx_ring; 2625 int err = 0; 2626 link_flowctrl_t fc; 2627 uint64_t cur_mtu, new_mtu; 2628 uint64_t tmp = 0; 2629 2630 rw_enter(&Adapter->chip_lock, RW_WRITER); 2631 if (Adapter->loopback_mode != E1000G_LB_NONE && 2632 e1000g_param_locked(pr_num)) { 2633 /* 2634 * All en_* parameters are locked (read-only) 2635 * while the device is in any sort of loopback mode. 2636 */ 2637 rw_exit(&Adapter->chip_lock); 2638 return (EBUSY); 2639 } 2640 2641 switch (pr_num) { 2642 case DLD_PROP_EN_1000FDX_CAP: 2643 Adapter->param_en_1000fdx = *(uint8_t *)pr_val; 2644 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val; 2645 goto reset; 2646 case DLD_PROP_EN_1000HDX_CAP: 2647 Adapter->param_en_1000hdx = *(uint8_t *)pr_val; 2648 Adapter->param_adv_1000hdx = *(uint8_t *)pr_val; 2649 goto reset; 2650 case DLD_PROP_EN_100FDX_CAP: 2651 Adapter->param_en_100fdx = *(uint8_t *)pr_val; 2652 Adapter->param_adv_100fdx = *(uint8_t *)pr_val; 2653 goto reset; 2654 case DLD_PROP_EN_100HDX_CAP: 2655 Adapter->param_en_100hdx = *(uint8_t *)pr_val; 2656 Adapter->param_adv_100hdx = *(uint8_t *)pr_val; 2657 goto reset; 2658 case DLD_PROP_EN_10FDX_CAP: 2659 Adapter->param_en_10fdx = *(uint8_t *)pr_val; 2660 Adapter->param_adv_10fdx = *(uint8_t *)pr_val; 2661 goto reset; 2662 case DLD_PROP_EN_10HDX_CAP: 2663 Adapter->param_en_10hdx = *(uint8_t *)pr_val; 2664 Adapter->param_adv_10hdx = *(uint8_t *)pr_val; 2665 goto reset; 2666 case DLD_PROP_AUTONEG: 2667 Adapter->param_adv_autoneg = *(uint8_t *)pr_val; 2668 goto reset; 2669 case DLD_PROP_FLOWCTRL: 2670 mac->fc_send_xon = B_TRUE; 2671 bcopy(pr_val, &fc, sizeof (fc)); 2672 2673 switch (fc) { 2674 default: 2675 err = EINVAL; 2676 break; 2677 case LINK_FLOWCTRL_NONE: 2678 mac->fc = e1000_fc_none; 2679 break; 2680 case LINK_FLOWCTRL_RX: 2681 mac->fc = e1000_fc_rx_pause; 2682 break; 2683 case LINK_FLOWCTRL_TX: 2684 mac->fc = e1000_fc_tx_pause; 2685 break; 2686 case LINK_FLOWCTRL_BI: 2687 mac->fc = e1000_fc_full; 2688 break; 2689 } 2690 reset: 2691 if (err == 0) { 2692 if (e1000g_reset_link(Adapter) != DDI_SUCCESS) 2693 err = EINVAL; 2694 } 2695 break; 2696 case DLD_PROP_ADV_1000FDX_CAP: 2697 case DLD_PROP_ADV_1000HDX_CAP: 2698 case DLD_PROP_ADV_100FDX_CAP: 2699 case DLD_PROP_ADV_100HDX_CAP: 2700 case DLD_PROP_ADV_10FDX_CAP: 2701 case DLD_PROP_ADV_10HDX_CAP: 2702 case DLD_PROP_STATUS: 2703 case DLD_PROP_SPEED: 2704 case DLD_PROP_DUPLEX: 2705 err = ENOTSUP; /* read-only prop. Can't set this. */ 2706 break; 2707 case DLD_PROP_DEFMTU: 2708 cur_mtu = Adapter->default_mtu; 2709 bcopy(pr_val, &new_mtu, sizeof (new_mtu)); 2710 if (new_mtu == cur_mtu) { 2711 err = 0; 2712 break; 2713 } 2714 2715 tmp = new_mtu + sizeof (struct ether_vlan_header) + 2716 ETHERFCSL; 2717 if ((tmp < DEFAULT_FRAME_SIZE) || 2718 (tmp > MAXIMUM_FRAME_SIZE)) { 2719 err = EINVAL; 2720 break; 2721 } 2722 2723 /* ich8 doed not support jumbo frames */ 2724 if ((mac->type == e1000_ich8lan) && 2725 (tmp > DEFAULT_FRAME_SIZE)) { 2726 err = EINVAL; 2727 break; 2728 } 2729 /* ich9 does not do jumbo frames on one phy type */ 2730 if ((mac->type == e1000_ich9lan) && 2731 (phy->type == e1000_phy_ife) && 2732 (tmp > DEFAULT_FRAME_SIZE)) { 2733 err = EINVAL; 2734 break; 2735 } 2736 if (Adapter->chip_state != E1000G_STOP) { 2737 err = EBUSY; 2738 break; 2739 } 2740 2741 err = mac_maxsdu_update(Adapter->mh, new_mtu); 2742 if (err == 0) { 2743 mac->max_frame_size = tmp; 2744 Adapter->default_mtu = new_mtu; 2745 e1000g_set_bufsize(Adapter); 2746 tx_ring = Adapter->tx_ring; 2747 tx_ring->frags_limit = (mac->max_frame_size / 2748 Adapter->tx_bcopy_thresh) + 2; 2749 if (tx_ring->frags_limit > 2750 (MAX_TX_DESC_PER_PACKET >> 1)) 2751 tx_ring->frags_limit = 2752 (MAX_TX_DESC_PER_PACKET >> 1); 2753 } 2754 break; 2755 case DLD_PROP_PRIVATE: 2756 err = e1000g_set_priv_prop(Adapter, pr_name, 2757 pr_valsize, pr_val); 2758 break; 2759 default: 2760 err = ENOTSUP; 2761 break; 2762 } 2763 rw_exit(&Adapter->chip_lock); 2764 return (err); 2765 } 2766 2767 static int 2768 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num, 2769 uint_t pr_valsize, void *pr_val) 2770 { 2771 struct e1000g *Adapter = arg; 2772 struct e1000_mac_info *mac = &Adapter->shared.mac; 2773 int err = EINVAL; 2774 link_flowctrl_t fc; 2775 uint64_t tmp = 0; 2776 2777 bzero(pr_val, pr_valsize); 2778 switch (pr_num) { 2779 case DLD_PROP_DUPLEX: 2780 if (pr_valsize >= sizeof (uint8_t)) { 2781 *(uint8_t *)pr_val = Adapter->link_duplex; 2782 err = 0; 2783 } 2784 break; 2785 case DLD_PROP_SPEED: 2786 if (pr_valsize >= sizeof (uint64_t)) { 2787 tmp = Adapter->link_speed * 1000000ull; 2788 bcopy(&tmp, pr_val, sizeof (tmp)); 2789 err = 0; 2790 } 2791 break; 2792 case DLD_PROP_STATUS: 2793 if (pr_valsize >= sizeof (uint8_t)) { 2794 *(uint8_t *)pr_val = Adapter->link_state; 2795 err = 0; 2796 } 2797 break; 2798 case DLD_PROP_AUTONEG: 2799 if (pr_valsize >= sizeof (uint8_t)) { 2800 *(uint8_t *)pr_val = Adapter->param_adv_autoneg; 2801 err = 0; 2802 } 2803 break; 2804 case DLD_PROP_DEFMTU: 2805 if (pr_valsize >= sizeof (uint64_t)) { 2806 tmp = Adapter->default_mtu; 2807 bcopy(&tmp, pr_val, sizeof (tmp)); 2808 err = 0; 2809 } 2810 break; 2811 case DLD_PROP_FLOWCTRL: 2812 if (pr_valsize >= sizeof (link_flowctrl_t)) { 2813 switch (mac->fc) { 2814 case e1000_fc_none: 2815 fc = LINK_FLOWCTRL_NONE; 2816 break; 2817 case e1000_fc_rx_pause: 2818 fc = LINK_FLOWCTRL_RX; 2819 break; 2820 case e1000_fc_tx_pause: 2821 fc = LINK_FLOWCTRL_TX; 2822 break; 2823 case e1000_fc_full: 2824 fc = LINK_FLOWCTRL_BI; 2825 break; 2826 } 2827 bcopy(&fc, pr_val, sizeof (fc)); 2828 err = 0; 2829 } 2830 break; 2831 case DLD_PROP_ADV_1000FDX_CAP: 2832 if (pr_valsize >= sizeof (uint8_t)) { 2833 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx; 2834 err = 0; 2835 } 2836 break; 2837 case DLD_PROP_EN_1000FDX_CAP: 2838 if (pr_valsize >= sizeof (uint8_t)) { 2839 *(uint8_t *)pr_val = Adapter->param_en_1000fdx; 2840 err = 0; 2841 } 2842 break; 2843 case DLD_PROP_ADV_1000HDX_CAP: 2844 if (pr_valsize >= sizeof (uint8_t)) { 2845 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx; 2846 err = 0; 2847 } 2848 break; 2849 case DLD_PROP_EN_1000HDX_CAP: 2850 if (pr_valsize >= sizeof (uint8_t)) { 2851 *(uint8_t *)pr_val = Adapter->param_en_1000hdx; 2852 err = 0; 2853 } 2854 break; 2855 case DLD_PROP_ADV_100FDX_CAP: 2856 if (pr_valsize >= sizeof (uint8_t)) { 2857 *(uint8_t *)pr_val = Adapter->param_adv_100fdx; 2858 err = 0; 2859 } 2860 break; 2861 case DLD_PROP_EN_100FDX_CAP: 2862 if (pr_valsize >= sizeof (uint8_t)) { 2863 *(uint8_t *)pr_val = Adapter->param_en_100fdx; 2864 err = 0; 2865 } 2866 break; 2867 case DLD_PROP_ADV_100HDX_CAP: 2868 if (pr_valsize >= sizeof (uint8_t)) { 2869 *(uint8_t *)pr_val = Adapter->param_adv_100hdx; 2870 err = 0; 2871 } 2872 break; 2873 case DLD_PROP_EN_100HDX_CAP: 2874 if (pr_valsize >= sizeof (uint8_t)) { 2875 *(uint8_t *)pr_val = Adapter->param_en_100hdx; 2876 err = 0; 2877 } 2878 break; 2879 case DLD_PROP_ADV_10FDX_CAP: 2880 if (pr_valsize >= sizeof (uint8_t)) { 2881 *(uint8_t *)pr_val = Adapter->param_adv_10fdx; 2882 err = 0; 2883 } 2884 break; 2885 case DLD_PROP_EN_10FDX_CAP: 2886 if (pr_valsize >= sizeof (uint8_t)) { 2887 *(uint8_t *)pr_val = Adapter->param_en_10fdx; 2888 err = 0; 2889 } 2890 break; 2891 case DLD_PROP_ADV_10HDX_CAP: 2892 if (pr_valsize >= sizeof (uint8_t)) { 2893 *(uint8_t *)pr_val = Adapter->param_adv_10hdx; 2894 err = 0; 2895 } 2896 break; 2897 case DLD_PROP_EN_10HDX_CAP: 2898 if (pr_valsize >= sizeof (uint8_t)) { 2899 *(uint8_t *)pr_val = Adapter->param_en_10hdx; 2900 err = 0; 2901 } 2902 break; 2903 case DLD_PROP_PRIVATE: 2904 err = e1000g_get_priv_prop(Adapter, pr_name, 2905 pr_valsize, pr_val); 2906 break; 2907 default: 2908 err = ENOTSUP; 2909 break; 2910 } 2911 return (err); 2912 } 2913 2914 /* ARGUSED */ 2915 static int 2916 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name, 2917 uint_t pr_valsize, const void *pr_val) 2918 { 2919 int err = 0; 2920 long result; 2921 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring; 2922 struct e1000_hw *hw = &Adapter->shared; 2923 2924 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 2925 if (pr_val == NULL) { 2926 err = EINVAL; 2927 return (err); 2928 } 2929 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2930 if (result < MIN_TX_BCOPY_THRESHOLD || 2931 result > MAX_TX_BCOPY_THRESHOLD) 2932 err = EINVAL; 2933 else { 2934 Adapter->tx_bcopy_thresh = (uint32_t)result; 2935 tx_ring->frags_limit = (hw->mac.max_frame_size / 2936 Adapter->tx_bcopy_thresh) + 2; 2937 if (tx_ring->frags_limit > 2938 (MAX_TX_DESC_PER_PACKET >> 1)) 2939 tx_ring->frags_limit = 2940 (MAX_TX_DESC_PER_PACKET >> 1); 2941 } 2942 return (err); 2943 } 2944 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 2945 if (pr_val == NULL) { 2946 err = EINVAL; 2947 return (err); 2948 } 2949 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2950 if (result < 0 || result > 1) 2951 err = EINVAL; 2952 else { 2953 Adapter->tx_intr_enable = (result == 1) ? 2954 B_TRUE: B_FALSE; 2955 if (Adapter->tx_intr_enable) 2956 e1000g_mask_tx_interrupt(Adapter); 2957 else 2958 e1000g_clear_tx_interrupt(Adapter); 2959 if (e1000g_check_acc_handle( 2960 Adapter->osdep.reg_handle) != DDI_FM_OK) 2961 ddi_fm_service_impact(Adapter->dip, 2962 DDI_SERVICE_DEGRADED); 2963 } 2964 return (err); 2965 } 2966 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 2967 if (pr_val == NULL) { 2968 err = EINVAL; 2969 return (err); 2970 } 2971 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2972 if (result < MIN_TX_INTR_DELAY || 2973 result > MAX_TX_INTR_DELAY) 2974 err = EINVAL; 2975 else { 2976 Adapter->tx_intr_delay = (uint32_t)result; 2977 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay); 2978 if (e1000g_check_acc_handle( 2979 Adapter->osdep.reg_handle) != DDI_FM_OK) 2980 ddi_fm_service_impact(Adapter->dip, 2981 DDI_SERVICE_DEGRADED); 2982 } 2983 return (err); 2984 } 2985 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 2986 if (pr_val == NULL) { 2987 err = EINVAL; 2988 return (err); 2989 } 2990 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 2991 if (result < MIN_TX_INTR_ABS_DELAY || 2992 result > MAX_TX_INTR_ABS_DELAY) 2993 err = EINVAL; 2994 else { 2995 Adapter->tx_intr_abs_delay = (uint32_t)result; 2996 E1000_WRITE_REG(hw, E1000_TADV, 2997 Adapter->tx_intr_abs_delay); 2998 if (e1000g_check_acc_handle( 2999 Adapter->osdep.reg_handle) != DDI_FM_OK) 3000 ddi_fm_service_impact(Adapter->dip, 3001 DDI_SERVICE_DEGRADED); 3002 } 3003 return (err); 3004 } 3005 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3006 if (pr_val == NULL) { 3007 err = EINVAL; 3008 return (err); 3009 } 3010 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3011 if (result < MIN_RX_BCOPY_THRESHOLD || 3012 result > MAX_RX_BCOPY_THRESHOLD) 3013 err = EINVAL; 3014 else 3015 Adapter->rx_bcopy_thresh = (uint32_t)result; 3016 return (err); 3017 } 3018 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3019 if (pr_val == NULL) { 3020 err = EINVAL; 3021 return (err); 3022 } 3023 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3024 if (result < MIN_RX_LIMIT_ON_INTR || 3025 result > MAX_RX_LIMIT_ON_INTR) 3026 err = EINVAL; 3027 else 3028 Adapter->rx_limit_onintr = (uint32_t)result; 3029 return (err); 3030 } 3031 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3032 if (pr_val == NULL) { 3033 err = EINVAL; 3034 return (err); 3035 } 3036 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3037 if (result < MIN_RX_INTR_DELAY || 3038 result > MAX_RX_INTR_DELAY) 3039 err = EINVAL; 3040 else { 3041 Adapter->rx_intr_delay = (uint32_t)result; 3042 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay); 3043 if (e1000g_check_acc_handle( 3044 Adapter->osdep.reg_handle) != DDI_FM_OK) 3045 ddi_fm_service_impact(Adapter->dip, 3046 DDI_SERVICE_DEGRADED); 3047 } 3048 return (err); 3049 } 3050 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3051 if (pr_val == NULL) { 3052 err = EINVAL; 3053 return (err); 3054 } 3055 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3056 if (result < MIN_RX_INTR_ABS_DELAY || 3057 result > MAX_RX_INTR_ABS_DELAY) 3058 err = EINVAL; 3059 else { 3060 Adapter->rx_intr_abs_delay = (uint32_t)result; 3061 E1000_WRITE_REG(hw, E1000_RADV, 3062 Adapter->rx_intr_abs_delay); 3063 if (e1000g_check_acc_handle( 3064 Adapter->osdep.reg_handle) != DDI_FM_OK) 3065 ddi_fm_service_impact(Adapter->dip, 3066 DDI_SERVICE_DEGRADED); 3067 } 3068 return (err); 3069 } 3070 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3071 if (pr_val == NULL) { 3072 err = EINVAL; 3073 return (err); 3074 } 3075 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3076 if (result < MIN_INTR_THROTTLING || 3077 result > MAX_INTR_THROTTLING) 3078 err = EINVAL; 3079 else { 3080 if (hw->mac.type >= e1000_82540) { 3081 Adapter->intr_throttling_rate = 3082 (uint32_t)result; 3083 E1000_WRITE_REG(hw, E1000_ITR, 3084 Adapter->intr_throttling_rate); 3085 if (e1000g_check_acc_handle( 3086 Adapter->osdep.reg_handle) != DDI_FM_OK) 3087 ddi_fm_service_impact(Adapter->dip, 3088 DDI_SERVICE_DEGRADED); 3089 } else 3090 err = EINVAL; 3091 } 3092 return (err); 3093 } 3094 if (strcmp(pr_name, "_intr_adaptive") == 0) { 3095 if (pr_val == NULL) { 3096 err = EINVAL; 3097 return (err); 3098 } 3099 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3100 if (result < 0 || result > 1) 3101 err = EINVAL; 3102 else { 3103 if (hw->mac.type >= e1000_82540) { 3104 Adapter->intr_adaptive = (result == 1) ? 3105 B_TRUE : B_FALSE; 3106 } else { 3107 err = EINVAL; 3108 } 3109 } 3110 return (err); 3111 } 3112 if (strcmp(pr_name, "_tx_recycle_thresh") == 0) { 3113 if (pr_val == NULL) { 3114 err = EINVAL; 3115 return (err); 3116 } 3117 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3118 if (result < MIN_TX_RECYCLE_THRESHOLD || 3119 result > MAX_TX_RECYCLE_THRESHOLD) 3120 err = EINVAL; 3121 else 3122 Adapter->tx_recycle_thresh = (uint32_t)result; 3123 return (err); 3124 } 3125 if (strcmp(pr_name, "_tx_recycle_num") == 0) { 3126 if (pr_val == NULL) { 3127 err = EINVAL; 3128 return (err); 3129 } 3130 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result); 3131 if (result < MIN_TX_RECYCLE_NUM || 3132 result > MAX_TX_RECYCLE_NUM) 3133 err = EINVAL; 3134 else 3135 Adapter->tx_recycle_num = (uint32_t)result; 3136 return (err); 3137 } 3138 return (ENOTSUP); 3139 } 3140 3141 static int 3142 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name, 3143 uint_t pr_valsize, void *pr_val) 3144 { 3145 char valstr[MAXNAMELEN]; 3146 int err = ENOTSUP; 3147 uint_t strsize; 3148 3149 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) { 3150 (void) sprintf(valstr, "%d", Adapter->tx_bcopy_thresh); 3151 err = 0; 3152 goto done; 3153 } 3154 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) { 3155 (void) sprintf(valstr, "%d", Adapter->tx_intr_enable); 3156 err = 0; 3157 goto done; 3158 } 3159 if (strcmp(pr_name, "_tx_intr_delay") == 0) { 3160 (void) sprintf(valstr, "%d", Adapter->tx_intr_delay); 3161 err = 0; 3162 goto done; 3163 } 3164 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) { 3165 (void) sprintf(valstr, "%d", Adapter->tx_intr_abs_delay); 3166 err = 0; 3167 goto done; 3168 } 3169 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) { 3170 (void) sprintf(valstr, "%d", Adapter->rx_bcopy_thresh); 3171 err = 0; 3172 goto done; 3173 } 3174 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) { 3175 (void) sprintf(valstr, "%d", Adapter->rx_limit_onintr); 3176 err = 0; 3177 goto done; 3178 } 3179 if (strcmp(pr_name, "_rx_intr_delay") == 0) { 3180 (void) sprintf(valstr, "%d", Adapter->rx_intr_delay); 3181 err = 0; 3182 goto done; 3183 } 3184 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) { 3185 (void) sprintf(valstr, "%d", Adapter->rx_intr_abs_delay); 3186 err = 0; 3187 goto done; 3188 } 3189 if (strcmp(pr_name, "_intr_throttling_rate") == 0) { 3190 (void) sprintf(valstr, "%d", Adapter->intr_throttling_rate); 3191 err = 0; 3192 goto done; 3193 } 3194 if (strcmp(pr_name, "_intr_adaptive") == 0) { 3195 (void) sprintf(valstr, "%d", Adapter->intr_adaptive); 3196 err = 0; 3197 goto done; 3198 } 3199 if (strcmp(pr_name, "_tx_recycle_thresh") == 0) { 3200 (void) sprintf(valstr, "%d", Adapter->tx_recycle_thresh); 3201 err = 0; 3202 goto done; 3203 } 3204 if (strcmp(pr_name, "_tx_recycle_num") == 0) { 3205 (void) sprintf(valstr, "%d", Adapter->tx_recycle_num); 3206 err = 0; 3207 goto done; 3208 } 3209 done: 3210 if (err == 0) { 3211 strsize = (uint_t)strlen(valstr); 3212 if (pr_valsize < strsize) 3213 err = ENOBUFS; 3214 else 3215 (void) strlcpy(pr_val, valstr, pr_valsize); 3216 } 3217 return (err); 3218 } 3219 3220 /* 3221 * e1000g_get_conf - get configurations set in e1000g.conf 3222 * This routine gets user-configured values out of the configuration 3223 * file e1000g.conf. 3224 * 3225 * For each configurable value, there is a minimum, a maximum, and a 3226 * default. 3227 * If user does not configure a value, use the default. 3228 * If user configures below the minimum, use the minumum. 3229 * If user configures above the maximum, use the maxumum. 3230 */ 3231 static void 3232 e1000g_get_conf(struct e1000g *Adapter) 3233 { 3234 struct e1000_hw *hw = &Adapter->shared; 3235 boolean_t tbi_compatibility = B_FALSE; 3236 3237 /* 3238 * get each configurable property from e1000g.conf 3239 */ 3240 3241 /* 3242 * NumTxDescriptors 3243 */ 3244 Adapter->tx_desc_num = 3245 e1000g_get_prop(Adapter, "NumTxDescriptors", 3246 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR, 3247 DEFAULT_NUM_TX_DESCRIPTOR); 3248 3249 /* 3250 * NumRxDescriptors 3251 */ 3252 Adapter->rx_desc_num = 3253 e1000g_get_prop(Adapter, "NumRxDescriptors", 3254 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR, 3255 DEFAULT_NUM_RX_DESCRIPTOR); 3256 3257 /* 3258 * NumRxFreeList 3259 */ 3260 Adapter->rx_freelist_num = 3261 e1000g_get_prop(Adapter, "NumRxFreeList", 3262 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST, 3263 DEFAULT_NUM_RX_FREELIST); 3264 3265 /* 3266 * NumTxPacketList 3267 */ 3268 Adapter->tx_freelist_num = 3269 e1000g_get_prop(Adapter, "NumTxPacketList", 3270 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST, 3271 DEFAULT_NUM_TX_FREELIST); 3272 3273 /* 3274 * FlowControl 3275 */ 3276 hw->mac.fc_send_xon = B_TRUE; 3277 hw->mac.fc = 3278 e1000g_get_prop(Adapter, "FlowControl", 3279 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL); 3280 /* 4 is the setting that says "let the eeprom decide" */ 3281 if (hw->mac.fc == 4) 3282 hw->mac.fc = e1000_fc_default; 3283 3284 /* 3285 * Max Num Receive Packets on Interrupt 3286 */ 3287 Adapter->rx_limit_onintr = 3288 e1000g_get_prop(Adapter, "MaxNumReceivePackets", 3289 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR, 3290 DEFAULT_RX_LIMIT_ON_INTR); 3291 3292 /* 3293 * PHY master slave setting 3294 */ 3295 hw->phy.ms_type = 3296 e1000g_get_prop(Adapter, "SetMasterSlave", 3297 e1000_ms_hw_default, e1000_ms_auto, 3298 e1000_ms_hw_default); 3299 3300 /* 3301 * Parameter which controls TBI mode workaround, which is only 3302 * needed on certain switches such as Cisco 6500/Foundry 3303 */ 3304 tbi_compatibility = 3305 e1000g_get_prop(Adapter, "TbiCompatibilityEnable", 3306 0, 1, DEFAULT_TBI_COMPAT_ENABLE); 3307 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility); 3308 3309 /* 3310 * MSI Enable 3311 */ 3312 Adapter->msi_enabled = 3313 e1000g_get_prop(Adapter, "MSIEnable", 3314 0, 1, DEFAULT_MSI_ENABLE); 3315 3316 /* 3317 * Interrupt Throttling Rate 3318 */ 3319 Adapter->intr_throttling_rate = 3320 e1000g_get_prop(Adapter, "intr_throttling_rate", 3321 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 3322 DEFAULT_INTR_THROTTLING); 3323 3324 /* 3325 * Adaptive Interrupt Blanking Enable/Disable 3326 * It is enabled by default 3327 */ 3328 Adapter->intr_adaptive = 3329 (e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1) == 1) ? 3330 B_TRUE : B_FALSE; 3331 3332 /* 3333 * Tx recycle threshold 3334 */ 3335 Adapter->tx_recycle_thresh = 3336 e1000g_get_prop(Adapter, "tx_recycle_thresh", 3337 MIN_TX_RECYCLE_THRESHOLD, MAX_TX_RECYCLE_THRESHOLD, 3338 DEFAULT_TX_RECYCLE_THRESHOLD); 3339 3340 /* 3341 * Tx recycle descriptor number 3342 */ 3343 Adapter->tx_recycle_num = 3344 e1000g_get_prop(Adapter, "tx_recycle_num", 3345 MIN_TX_RECYCLE_NUM, MAX_TX_RECYCLE_NUM, 3346 DEFAULT_TX_RECYCLE_NUM); 3347 3348 /* 3349 * Hardware checksum enable/disable parameter 3350 */ 3351 Adapter->tx_hcksum_enabled = 3352 e1000g_get_prop(Adapter, "tx_hcksum_enabled", 3353 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3354 3355 } 3356 3357 /* 3358 * e1000g_get_prop - routine to read properties 3359 * 3360 * Get a user-configure property value out of the configuration 3361 * file e1000g.conf. 3362 * 3363 * Caller provides name of the property, a default value, a minimum 3364 * value, and a maximum value. 3365 * 3366 * Return configured value of the property, with default, minimum and 3367 * maximum properly applied. 3368 */ 3369 static int 3370 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */ 3371 char *propname, /* name of the property */ 3372 int minval, /* minimum acceptable value */ 3373 int maxval, /* maximim acceptable value */ 3374 int defval) /* default value */ 3375 { 3376 int propval; /* value returned for requested property */ 3377 int *props; /* point to array of properties returned */ 3378 uint_t nprops; /* number of property value returned */ 3379 3380 /* 3381 * get the array of properties from the config file 3382 */ 3383 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip, 3384 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) { 3385 /* got some properties, test if we got enough */ 3386 if (Adapter->instance < nprops) { 3387 propval = props[Adapter->instance]; 3388 } else { 3389 /* not enough properties configured */ 3390 propval = defval; 3391 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 3392 "Not Enough %s values found in e1000g.conf" 3393 " - set to %d\n", 3394 propname, propval); 3395 } 3396 3397 /* free memory allocated for properties */ 3398 ddi_prop_free(props); 3399 3400 } else { 3401 propval = defval; 3402 } 3403 3404 /* 3405 * enforce limits 3406 */ 3407 if (propval > maxval) { 3408 propval = maxval; 3409 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 3410 "Too High %s value in e1000g.conf - set to %d\n", 3411 propname, propval); 3412 } 3413 3414 if (propval < minval) { 3415 propval = minval; 3416 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL, 3417 "Too Low %s value in e1000g.conf - set to %d\n", 3418 propname, propval); 3419 } 3420 3421 return (propval); 3422 } 3423 3424 static boolean_t 3425 e1000g_link_check(struct e1000g *Adapter) 3426 { 3427 uint16_t speed, duplex, phydata; 3428 boolean_t link_changed = B_FALSE; 3429 struct e1000_hw *hw; 3430 uint32_t reg_tarc; 3431 3432 hw = &Adapter->shared; 3433 3434 if (e1000g_link_up(Adapter)) { 3435 /* 3436 * The Link is up, check whether it was marked as down earlier 3437 */ 3438 if (Adapter->link_state != LINK_STATE_UP) { 3439 e1000_get_speed_and_duplex(hw, &speed, &duplex); 3440 Adapter->link_speed = speed; 3441 Adapter->link_duplex = duplex; 3442 Adapter->link_state = LINK_STATE_UP; 3443 link_changed = B_TRUE; 3444 3445 Adapter->tx_link_down_timeout = 0; 3446 3447 if ((hw->mac.type == e1000_82571) || 3448 (hw->mac.type == e1000_82572)) { 3449 reg_tarc = E1000_READ_REG(hw, E1000_TARC0); 3450 if (speed == SPEED_1000) 3451 reg_tarc |= (1 << 21); 3452 else 3453 reg_tarc &= ~(1 << 21); 3454 E1000_WRITE_REG(hw, E1000_TARC0, reg_tarc); 3455 } 3456 } 3457 Adapter->smartspeed = 0; 3458 } else { 3459 if (Adapter->link_state != LINK_STATE_DOWN) { 3460 Adapter->link_speed = 0; 3461 Adapter->link_duplex = 0; 3462 Adapter->link_state = LINK_STATE_DOWN; 3463 link_changed = B_TRUE; 3464 3465 /* 3466 * SmartSpeed workaround for Tabor/TanaX, When the 3467 * driver loses link disable auto master/slave 3468 * resolution. 3469 */ 3470 if (hw->phy.type == e1000_phy_igp) { 3471 e1000_read_phy_reg(hw, 3472 PHY_1000T_CTRL, &phydata); 3473 phydata |= CR_1000T_MS_ENABLE; 3474 e1000_write_phy_reg(hw, 3475 PHY_1000T_CTRL, phydata); 3476 } 3477 } else { 3478 e1000g_smartspeed(Adapter); 3479 } 3480 3481 if (Adapter->chip_state == E1000G_START) { 3482 if (Adapter->tx_link_down_timeout < 3483 MAX_TX_LINK_DOWN_TIMEOUT) { 3484 Adapter->tx_link_down_timeout++; 3485 } else if (Adapter->tx_link_down_timeout == 3486 MAX_TX_LINK_DOWN_TIMEOUT) { 3487 e1000g_tx_clean(Adapter); 3488 Adapter->tx_link_down_timeout++; 3489 } 3490 } 3491 } 3492 3493 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 3494 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 3495 3496 return (link_changed); 3497 } 3498 3499 /* 3500 * e1000g_reset_link - Using the link properties to setup the link 3501 */ 3502 int 3503 e1000g_reset_link(struct e1000g *Adapter) 3504 { 3505 struct e1000_mac_info *mac; 3506 struct e1000_phy_info *phy; 3507 boolean_t invalid; 3508 3509 mac = &Adapter->shared.mac; 3510 phy = &Adapter->shared.phy; 3511 invalid = B_FALSE; 3512 3513 if (Adapter->param_adv_autoneg == 1) { 3514 mac->autoneg = B_TRUE; 3515 phy->autoneg_advertised = 0; 3516 3517 /* 3518 * 1000hdx is not supported for autonegotiation 3519 */ 3520 if (Adapter->param_adv_1000fdx == 1) 3521 phy->autoneg_advertised |= ADVERTISE_1000_FULL; 3522 3523 if (Adapter->param_adv_100fdx == 1) 3524 phy->autoneg_advertised |= ADVERTISE_100_FULL; 3525 3526 if (Adapter->param_adv_100hdx == 1) 3527 phy->autoneg_advertised |= ADVERTISE_100_HALF; 3528 3529 if (Adapter->param_adv_10fdx == 1) 3530 phy->autoneg_advertised |= ADVERTISE_10_FULL; 3531 3532 if (Adapter->param_adv_10hdx == 1) 3533 phy->autoneg_advertised |= ADVERTISE_10_HALF; 3534 3535 if (phy->autoneg_advertised == 0) 3536 invalid = B_TRUE; 3537 } else { 3538 mac->autoneg = B_FALSE; 3539 3540 /* 3541 * 1000fdx and 1000hdx are not supported for forced link 3542 */ 3543 if (Adapter->param_adv_100fdx == 1) 3544 mac->forced_speed_duplex = ADVERTISE_100_FULL; 3545 else if (Adapter->param_adv_100hdx == 1) 3546 mac->forced_speed_duplex = ADVERTISE_100_HALF; 3547 else if (Adapter->param_adv_10fdx == 1) 3548 mac->forced_speed_duplex = ADVERTISE_10_FULL; 3549 else if (Adapter->param_adv_10hdx == 1) 3550 mac->forced_speed_duplex = ADVERTISE_10_HALF; 3551 else 3552 invalid = B_TRUE; 3553 3554 } 3555 3556 if (invalid) { 3557 e1000g_log(Adapter, CE_WARN, 3558 "Invalid link sets. Setup link to" 3559 "support autonegotiation with all link capabilities."); 3560 mac->autoneg = B_TRUE; 3561 phy->autoneg_advertised = ADVERTISE_1000_FULL | 3562 ADVERTISE_100_FULL | ADVERTISE_100_HALF | 3563 ADVERTISE_10_FULL | ADVERTISE_10_HALF; 3564 } 3565 3566 return (e1000_setup_link(&Adapter->shared)); 3567 } 3568 3569 static void 3570 e1000g_local_timer(void *ws) 3571 { 3572 struct e1000g *Adapter = (struct e1000g *)ws; 3573 struct e1000_hw *hw; 3574 e1000g_ether_addr_t ether_addr; 3575 boolean_t link_changed; 3576 3577 hw = &Adapter->shared; 3578 3579 if (Adapter->chip_state == E1000G_ERROR) { 3580 Adapter->reset_count++; 3581 if (e1000g_global_reset(Adapter)) 3582 ddi_fm_service_impact(Adapter->dip, 3583 DDI_SERVICE_RESTORED); 3584 else 3585 ddi_fm_service_impact(Adapter->dip, 3586 DDI_SERVICE_LOST); 3587 return; 3588 } 3589 3590 if (e1000g_stall_check(Adapter)) { 3591 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 3592 "Tx stall detected. Activate automatic recovery.\n"); 3593 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL); 3594 Adapter->reset_count++; 3595 if (e1000g_reset(Adapter)) 3596 ddi_fm_service_impact(Adapter->dip, 3597 DDI_SERVICE_RESTORED); 3598 else 3599 ddi_fm_service_impact(Adapter->dip, 3600 DDI_SERVICE_LOST); 3601 return; 3602 } 3603 3604 link_changed = B_FALSE; 3605 rw_enter(&Adapter->chip_lock, RW_READER); 3606 if (Adapter->link_complete) 3607 link_changed = e1000g_link_check(Adapter); 3608 rw_exit(&Adapter->chip_lock); 3609 3610 if (link_changed) { 3611 /* 3612 * Workaround for esb2. Data stuck in fifo on a link 3613 * down event. Reset the adapter to recover it. 3614 */ 3615 if ((Adapter->link_state == LINK_STATE_DOWN) && 3616 (hw->mac.type == e1000_80003es2lan)) 3617 (void) e1000g_reset(Adapter); 3618 3619 mac_link_update(Adapter->mh, Adapter->link_state); 3620 } 3621 3622 /* 3623 * With 82571 controllers, any locally administered address will 3624 * be overwritten when there is a reset on the other port. 3625 * Detect this circumstance and correct it. 3626 */ 3627 if ((hw->mac.type == e1000_82571) && 3628 (e1000_get_laa_state_82571(hw) == B_TRUE)) { 3629 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0); 3630 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1); 3631 3632 ether_addr.reg.low = ntohl(ether_addr.reg.low); 3633 ether_addr.reg.high = ntohl(ether_addr.reg.high); 3634 3635 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) || 3636 (ether_addr.mac.addr[4] != hw->mac.addr[1]) || 3637 (ether_addr.mac.addr[3] != hw->mac.addr[2]) || 3638 (ether_addr.mac.addr[2] != hw->mac.addr[3]) || 3639 (ether_addr.mac.addr[1] != hw->mac.addr[4]) || 3640 (ether_addr.mac.addr[0] != hw->mac.addr[5])) { 3641 e1000_rar_set(hw, hw->mac.addr, 0); 3642 } 3643 } 3644 3645 /* 3646 * Long TTL workaround for 82541/82547 3647 */ 3648 e1000_igp_ttl_workaround_82547(hw); 3649 3650 /* 3651 * Check for Adaptive IFS settings If there are lots of collisions 3652 * change the value in steps... 3653 * These properties should only be set for 10/100 3654 */ 3655 if ((hw->media_type == e1000_media_type_copper) && 3656 ((Adapter->link_speed == SPEED_100) || 3657 (Adapter->link_speed == SPEED_10))) { 3658 e1000_update_adaptive(hw); 3659 } 3660 /* 3661 * Set Timer Interrupts 3662 */ 3663 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0); 3664 3665 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) 3666 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 3667 3668 restart_watchdog_timer(Adapter); 3669 } 3670 3671 /* 3672 * The function e1000g_link_timer() is called when the timer for link setup 3673 * is expired, which indicates the completion of the link setup. The link 3674 * state will not be updated until the link setup is completed. And the 3675 * link state will not be sent to the upper layer through mac_link_update() 3676 * in this function. It will be updated in the local timer routine or the 3677 * interrupt service routine after the interface is started (plumbed). 3678 */ 3679 static void 3680 e1000g_link_timer(void *arg) 3681 { 3682 struct e1000g *Adapter = (struct e1000g *)arg; 3683 3684 mutex_enter(&Adapter->link_lock); 3685 Adapter->link_complete = B_TRUE; 3686 Adapter->link_tid = 0; 3687 mutex_exit(&Adapter->link_lock); 3688 } 3689 3690 /* 3691 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf 3692 * 3693 * This function read the forced speed and duplex for 10/100 Mbps speeds 3694 * and also for 1000 Mbps speeds from the e1000g.conf file 3695 */ 3696 static void 3697 e1000g_force_speed_duplex(struct e1000g *Adapter) 3698 { 3699 int forced; 3700 struct e1000_mac_info *mac = &Adapter->shared.mac; 3701 struct e1000_phy_info *phy = &Adapter->shared.phy; 3702 3703 /* 3704 * get value out of config file 3705 */ 3706 forced = e1000g_get_prop(Adapter, "ForceSpeedDuplex", 3707 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY); 3708 3709 switch (forced) { 3710 case GDIAG_10_HALF: 3711 /* 3712 * Disable Auto Negotiation 3713 */ 3714 mac->autoneg = B_FALSE; 3715 mac->forced_speed_duplex = ADVERTISE_10_HALF; 3716 break; 3717 case GDIAG_10_FULL: 3718 /* 3719 * Disable Auto Negotiation 3720 */ 3721 mac->autoneg = B_FALSE; 3722 mac->forced_speed_duplex = ADVERTISE_10_FULL; 3723 break; 3724 case GDIAG_100_HALF: 3725 /* 3726 * Disable Auto Negotiation 3727 */ 3728 mac->autoneg = B_FALSE; 3729 mac->forced_speed_duplex = ADVERTISE_100_HALF; 3730 break; 3731 case GDIAG_100_FULL: 3732 /* 3733 * Disable Auto Negotiation 3734 */ 3735 mac->autoneg = B_FALSE; 3736 mac->forced_speed_duplex = ADVERTISE_100_FULL; 3737 break; 3738 case GDIAG_1000_FULL: 3739 /* 3740 * The gigabit spec requires autonegotiation. Therefore, 3741 * when the user wants to force the speed to 1000Mbps, we 3742 * enable AutoNeg, but only allow the harware to advertise 3743 * 1000Mbps. This is different from 10/100 operation, where 3744 * we are allowed to link without any negotiation. 3745 */ 3746 mac->autoneg = B_TRUE; 3747 phy->autoneg_advertised = ADVERTISE_1000_FULL; 3748 break; 3749 default: /* obey the setting of AutoNegAdvertised */ 3750 mac->autoneg = B_TRUE; 3751 phy->autoneg_advertised = 3752 (uint16_t)e1000g_get_prop(Adapter, "AutoNegAdvertised", 3753 0, AUTONEG_ADVERTISE_SPEED_DEFAULT, 3754 AUTONEG_ADVERTISE_SPEED_DEFAULT); 3755 break; 3756 } /* switch */ 3757 } 3758 3759 /* 3760 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf 3761 * 3762 * This function reads MaxFrameSize from e1000g.conf 3763 */ 3764 static void 3765 e1000g_get_max_frame_size(struct e1000g *Adapter) 3766 { 3767 int max_frame; 3768 struct e1000_mac_info *mac = &Adapter->shared.mac; 3769 struct e1000_phy_info *phy = &Adapter->shared.phy; 3770 3771 /* 3772 * get value out of config file 3773 */ 3774 max_frame = e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0); 3775 3776 switch (max_frame) { 3777 case 0: 3778 Adapter->default_mtu = ETHERMTU; 3779 break; 3780 /* 3781 * To avoid excessive memory allocation for rx buffers, 3782 * the bytes of E1000G_IPALIGNPRESERVEROOM are reserved. 3783 */ 3784 case 1: 3785 Adapter->default_mtu = FRAME_SIZE_UPTO_4K - 3786 sizeof (struct ether_vlan_header) - ETHERFCSL - 3787 E1000G_IPALIGNPRESERVEROOM; 3788 break; 3789 case 2: 3790 Adapter->default_mtu = FRAME_SIZE_UPTO_8K - 3791 sizeof (struct ether_vlan_header) - ETHERFCSL - 3792 E1000G_IPALIGNPRESERVEROOM; 3793 break; 3794 case 3: 3795 if (mac->type >= e1000_82571) 3796 Adapter->default_mtu = MAXIMUM_MTU; 3797 else 3798 Adapter->default_mtu = FRAME_SIZE_UPTO_16K - 3799 sizeof (struct ether_vlan_header) - ETHERFCSL - 3800 E1000G_IPALIGNPRESERVEROOM; 3801 break; 3802 default: 3803 Adapter->default_mtu = ETHERMTU; 3804 break; 3805 } /* switch */ 3806 3807 mac->max_frame_size = Adapter->default_mtu + 3808 sizeof (struct ether_vlan_header) + ETHERFCSL; 3809 3810 /* ich8 does not do jumbo frames */ 3811 if (mac->type == e1000_ich8lan) { 3812 mac->max_frame_size = ETHERMAX; 3813 } 3814 3815 /* ich9 does not do jumbo frames on one phy type */ 3816 if ((mac->type == e1000_ich9lan) && 3817 (phy->type == e1000_phy_ife)) { 3818 mac->max_frame_size = ETHERMAX; 3819 } 3820 } 3821 3822 static void 3823 arm_watchdog_timer(struct e1000g *Adapter) 3824 { 3825 Adapter->watchdog_tid = 3826 timeout(e1000g_local_timer, 3827 (void *)Adapter, 1 * drv_usectohz(1000000)); 3828 } 3829 #pragma inline(arm_watchdog_timer) 3830 3831 static void 3832 enable_watchdog_timer(struct e1000g *Adapter) 3833 { 3834 mutex_enter(&Adapter->watchdog_lock); 3835 3836 if (!Adapter->watchdog_timer_enabled) { 3837 Adapter->watchdog_timer_enabled = B_TRUE; 3838 Adapter->watchdog_timer_started = B_TRUE; 3839 arm_watchdog_timer(Adapter); 3840 } 3841 3842 mutex_exit(&Adapter->watchdog_lock); 3843 } 3844 3845 static void 3846 disable_watchdog_timer(struct e1000g *Adapter) 3847 { 3848 timeout_id_t tid; 3849 3850 mutex_enter(&Adapter->watchdog_lock); 3851 3852 Adapter->watchdog_timer_enabled = B_FALSE; 3853 Adapter->watchdog_timer_started = B_FALSE; 3854 tid = Adapter->watchdog_tid; 3855 Adapter->watchdog_tid = 0; 3856 3857 mutex_exit(&Adapter->watchdog_lock); 3858 3859 if (tid != 0) 3860 (void) untimeout(tid); 3861 } 3862 3863 static void 3864 start_watchdog_timer(struct e1000g *Adapter) 3865 { 3866 mutex_enter(&Adapter->watchdog_lock); 3867 3868 if (Adapter->watchdog_timer_enabled) { 3869 if (!Adapter->watchdog_timer_started) { 3870 Adapter->watchdog_timer_started = B_TRUE; 3871 arm_watchdog_timer(Adapter); 3872 } 3873 } 3874 3875 mutex_exit(&Adapter->watchdog_lock); 3876 } 3877 3878 static void 3879 restart_watchdog_timer(struct e1000g *Adapter) 3880 { 3881 mutex_enter(&Adapter->watchdog_lock); 3882 3883 if (Adapter->watchdog_timer_started) 3884 arm_watchdog_timer(Adapter); 3885 3886 mutex_exit(&Adapter->watchdog_lock); 3887 } 3888 3889 static void 3890 stop_watchdog_timer(struct e1000g *Adapter) 3891 { 3892 timeout_id_t tid; 3893 3894 mutex_enter(&Adapter->watchdog_lock); 3895 3896 Adapter->watchdog_timer_started = B_FALSE; 3897 tid = Adapter->watchdog_tid; 3898 Adapter->watchdog_tid = 0; 3899 3900 mutex_exit(&Adapter->watchdog_lock); 3901 3902 if (tid != 0) 3903 (void) untimeout(tid); 3904 } 3905 3906 static void 3907 stop_link_timer(struct e1000g *Adapter) 3908 { 3909 timeout_id_t tid; 3910 3911 /* Disable the link timer */ 3912 mutex_enter(&Adapter->link_lock); 3913 3914 tid = Adapter->link_tid; 3915 Adapter->link_tid = 0; 3916 3917 mutex_exit(&Adapter->link_lock); 3918 3919 if (tid != 0) 3920 (void) untimeout(tid); 3921 } 3922 3923 static void 3924 stop_82547_timer(e1000g_tx_ring_t *tx_ring) 3925 { 3926 timeout_id_t tid; 3927 3928 /* Disable the tx timer for 82547 chipset */ 3929 mutex_enter(&tx_ring->tx_lock); 3930 3931 tx_ring->timer_enable_82547 = B_FALSE; 3932 tid = tx_ring->timer_id_82547; 3933 tx_ring->timer_id_82547 = 0; 3934 3935 mutex_exit(&tx_ring->tx_lock); 3936 3937 if (tid != 0) 3938 (void) untimeout(tid); 3939 } 3940 3941 void 3942 e1000g_clear_interrupt(struct e1000g *Adapter) 3943 { 3944 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 3945 0xffffffff & ~E1000_IMS_RXSEQ); 3946 } 3947 3948 void 3949 e1000g_mask_interrupt(struct e1000g *Adapter) 3950 { 3951 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, 3952 IMS_ENABLE_MASK & ~E1000_IMS_TXDW); 3953 3954 if (Adapter->tx_intr_enable) 3955 e1000g_mask_tx_interrupt(Adapter); 3956 } 3957 3958 void 3959 e1000g_clear_all_interrupts(struct e1000g *Adapter) 3960 { 3961 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff); 3962 } 3963 3964 void 3965 e1000g_mask_tx_interrupt(struct e1000g *Adapter) 3966 { 3967 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW); 3968 } 3969 3970 void 3971 e1000g_clear_tx_interrupt(struct e1000g *Adapter) 3972 { 3973 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW); 3974 } 3975 3976 static void 3977 e1000g_smartspeed(struct e1000g *Adapter) 3978 { 3979 struct e1000_hw *hw = &Adapter->shared; 3980 uint16_t phy_status; 3981 uint16_t phy_ctrl; 3982 3983 /* 3984 * If we're not T-or-T, or we're not autoneg'ing, or we're not 3985 * advertising 1000Full, we don't even use the workaround 3986 */ 3987 if ((hw->phy.type != e1000_phy_igp) || 3988 !hw->mac.autoneg || 3989 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)) 3990 return; 3991 3992 /* 3993 * True if this is the first call of this function or after every 3994 * 30 seconds of not having link 3995 */ 3996 if (Adapter->smartspeed == 0) { 3997 /* 3998 * If Master/Slave config fault is asserted twice, we 3999 * assume back-to-back 4000 */ 4001 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4002 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4003 return; 4004 4005 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); 4006 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) 4007 return; 4008 /* 4009 * We're assuming back-2-back because our status register 4010 * insists! there's a fault in the master/slave 4011 * relationship that was "negotiated" 4012 */ 4013 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4014 /* 4015 * Is the phy configured for manual configuration of 4016 * master/slave? 4017 */ 4018 if (phy_ctrl & CR_1000T_MS_ENABLE) { 4019 /* 4020 * Yes. Then disable manual configuration (enable 4021 * auto configuration) of master/slave 4022 */ 4023 phy_ctrl &= ~CR_1000T_MS_ENABLE; 4024 e1000_write_phy_reg(hw, 4025 PHY_1000T_CTRL, phy_ctrl); 4026 /* 4027 * Effectively starting the clock 4028 */ 4029 Adapter->smartspeed++; 4030 /* 4031 * Restart autonegotiation 4032 */ 4033 if (!e1000_phy_setup_autoneg(hw) && 4034 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 4035 phy_ctrl |= (MII_CR_AUTO_NEG_EN | 4036 MII_CR_RESTART_AUTO_NEG); 4037 e1000_write_phy_reg(hw, 4038 PHY_CONTROL, phy_ctrl); 4039 } 4040 } 4041 return; 4042 /* 4043 * Has 6 seconds transpired still without link? Remember, 4044 * you should reset the smartspeed counter once you obtain 4045 * link 4046 */ 4047 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { 4048 /* 4049 * Yes. Remember, we did at the start determine that 4050 * there's a master/slave configuration fault, so we're 4051 * still assuming there's someone on the other end, but we 4052 * just haven't yet been able to talk to it. We then 4053 * re-enable auto configuration of master/slave to see if 4054 * we're running 2/3 pair cables. 4055 */ 4056 /* 4057 * If still no link, perhaps using 2/3 pair cable 4058 */ 4059 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); 4060 phy_ctrl |= CR_1000T_MS_ENABLE; 4061 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); 4062 /* 4063 * Restart autoneg with phy enabled for manual 4064 * configuration of master/slave 4065 */ 4066 if (!e1000_phy_setup_autoneg(hw) && 4067 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) { 4068 phy_ctrl |= 4069 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 4070 e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl); 4071 } 4072 /* 4073 * Hopefully, there are no more faults and we've obtained 4074 * link as a result. 4075 */ 4076 } 4077 /* 4078 * Restart process after E1000_SMARTSPEED_MAX iterations (30 4079 * seconds) 4080 */ 4081 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX) 4082 Adapter->smartspeed = 0; 4083 } 4084 4085 static boolean_t 4086 is_valid_mac_addr(uint8_t *mac_addr) 4087 { 4088 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 4089 const uint8_t addr_test2[6] = 4090 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4091 4092 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4093 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4094 return (B_FALSE); 4095 4096 return (B_TRUE); 4097 } 4098 4099 /* 4100 * e1000g_stall_check - check for tx stall 4101 * 4102 * This function checks if the adapter is stalled (in transmit). 4103 * 4104 * It is called each time the watchdog timeout is invoked. 4105 * If the transmit descriptor reclaim continuously fails, 4106 * the watchdog value will increment by 1. If the watchdog 4107 * value exceeds the threshold, the adapter is assumed to 4108 * have stalled and need to be reset. 4109 */ 4110 static boolean_t 4111 e1000g_stall_check(struct e1000g *Adapter) 4112 { 4113 e1000g_tx_ring_t *tx_ring; 4114 4115 tx_ring = Adapter->tx_ring; 4116 4117 if (Adapter->link_state != LINK_STATE_UP) 4118 return (B_FALSE); 4119 4120 if (tx_ring->recycle_fail > 0) 4121 tx_ring->stall_watchdog++; 4122 else 4123 tx_ring->stall_watchdog = 0; 4124 4125 if (tx_ring->stall_watchdog < E1000G_STALL_WATCHDOG_COUNT) 4126 return (B_FALSE); 4127 4128 tx_ring->stall_watchdog = 0; 4129 tx_ring->recycle_fail = 0; 4130 4131 return (B_TRUE); 4132 } 4133 4134 #ifdef E1000G_DEBUG 4135 static enum ioc_reply 4136 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp) 4137 { 4138 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd); 4139 e1000g_peekpoke_t *ppd; 4140 uint64_t mem_va; 4141 uint64_t maxoff; 4142 boolean_t peek; 4143 4144 switch (iocp->ioc_cmd) { 4145 4146 case E1000G_IOC_REG_PEEK: 4147 peek = B_TRUE; 4148 break; 4149 4150 case E1000G_IOC_REG_POKE: 4151 peek = B_FALSE; 4152 break; 4153 4154 deault: 4155 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 4156 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n", 4157 iocp->ioc_cmd); 4158 return (IOC_INVAL); 4159 } 4160 4161 /* 4162 * Validate format of ioctl 4163 */ 4164 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t)) 4165 return (IOC_INVAL); 4166 if (mp->b_cont == NULL) 4167 return (IOC_INVAL); 4168 4169 ppd = (e1000g_peekpoke_t *)mp->b_cont->b_rptr; 4170 4171 /* 4172 * Validate request parameters 4173 */ 4174 switch (ppd->pp_acc_space) { 4175 4176 default: 4177 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL, 4178 "e1000g_diag_ioctl: invalid access space 0x%X\n", 4179 ppd->pp_acc_space); 4180 return (IOC_INVAL); 4181 4182 case E1000G_PP_SPACE_REG: 4183 /* 4184 * Memory-mapped I/O space 4185 */ 4186 ASSERT(ppd->pp_acc_size == 4); 4187 if (ppd->pp_acc_size != 4) 4188 return (IOC_INVAL); 4189 4190 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0) 4191 return (IOC_INVAL); 4192 4193 mem_va = 0; 4194 maxoff = 0x10000; 4195 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg; 4196 break; 4197 4198 case E1000G_PP_SPACE_E1000G: 4199 /* 4200 * E1000g data structure! 4201 */ 4202 mem_va = (uintptr_t)e1000gp; 4203 maxoff = sizeof (struct e1000g); 4204 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem; 4205 break; 4206 4207 } 4208 4209 if (ppd->pp_acc_offset >= maxoff) 4210 return (IOC_INVAL); 4211 4212 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff) 4213 return (IOC_INVAL); 4214 4215 /* 4216 * All OK - go! 4217 */ 4218 ppd->pp_acc_offset += mem_va; 4219 (*ppfn)(e1000gp, ppd); 4220 return (peek ? IOC_REPLY : IOC_ACK); 4221 } 4222 4223 static void 4224 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 4225 { 4226 ddi_acc_handle_t handle; 4227 uint32_t *regaddr; 4228 4229 handle = e1000gp->osdep.reg_handle; 4230 regaddr = 4231 (uint32_t *)(e1000gp->shared.hw_addr + ppd->pp_acc_offset); 4232 4233 ppd->pp_acc_data = ddi_get32(handle, regaddr); 4234 } 4235 4236 static void 4237 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 4238 { 4239 ddi_acc_handle_t handle; 4240 uint32_t *regaddr; 4241 uint32_t value; 4242 4243 handle = e1000gp->osdep.reg_handle; 4244 regaddr = 4245 (uint32_t *)(e1000gp->shared.hw_addr + ppd->pp_acc_offset); 4246 value = (uint32_t)ppd->pp_acc_data; 4247 4248 ddi_put32(handle, regaddr, value); 4249 } 4250 4251 static void 4252 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 4253 { 4254 uint64_t value; 4255 void *vaddr; 4256 4257 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 4258 4259 switch (ppd->pp_acc_size) { 4260 case 1: 4261 value = *(uint8_t *)vaddr; 4262 break; 4263 4264 case 2: 4265 value = *(uint16_t *)vaddr; 4266 break; 4267 4268 case 4: 4269 value = *(uint32_t *)vaddr; 4270 break; 4271 4272 case 8: 4273 value = *(uint64_t *)vaddr; 4274 break; 4275 } 4276 4277 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 4278 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n", 4279 (void *)e1000gp, (void *)ppd, value, vaddr); 4280 4281 ppd->pp_acc_data = value; 4282 } 4283 4284 static void 4285 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd) 4286 { 4287 uint64_t value; 4288 void *vaddr; 4289 4290 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset; 4291 value = ppd->pp_acc_data; 4292 4293 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL, 4294 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n", 4295 (void *)e1000gp, (void *)ppd, value, vaddr); 4296 4297 switch (ppd->pp_acc_size) { 4298 case 1: 4299 *(uint8_t *)vaddr = (uint8_t)value; 4300 break; 4301 4302 case 2: 4303 *(uint16_t *)vaddr = (uint16_t)value; 4304 break; 4305 4306 case 4: 4307 *(uint32_t *)vaddr = (uint32_t)value; 4308 break; 4309 4310 case 8: 4311 *(uint64_t *)vaddr = (uint64_t)value; 4312 break; 4313 } 4314 } 4315 #endif 4316 4317 /* 4318 * Loopback Support 4319 */ 4320 static lb_property_t lb_normal = 4321 { normal, "normal", E1000G_LB_NONE }; 4322 static lb_property_t lb_external1000 = 4323 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 }; 4324 static lb_property_t lb_external100 = 4325 { external, "100Mbps", E1000G_LB_EXTERNAL_100 }; 4326 static lb_property_t lb_external10 = 4327 { external, "10Mbps", E1000G_LB_EXTERNAL_10 }; 4328 static lb_property_t lb_phy = 4329 { internal, "PHY", E1000G_LB_INTERNAL_PHY }; 4330 4331 static enum ioc_reply 4332 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp) 4333 { 4334 lb_info_sz_t *lbsp; 4335 lb_property_t *lbpp; 4336 struct e1000_hw *hw; 4337 uint32_t *lbmp; 4338 uint32_t size; 4339 uint32_t value; 4340 4341 hw = &Adapter->shared; 4342 4343 if (mp->b_cont == NULL) 4344 return (IOC_INVAL); 4345 4346 switch (iocp->ioc_cmd) { 4347 default: 4348 return (IOC_INVAL); 4349 4350 case LB_GET_INFO_SIZE: 4351 size = sizeof (lb_info_sz_t); 4352 if (iocp->ioc_count != size) 4353 return (IOC_INVAL); 4354 4355 rw_enter(&Adapter->chip_lock, RW_WRITER); 4356 e1000g_get_phy_state(Adapter); 4357 4358 /* 4359 * Workaround for hardware faults. In order to get a stable 4360 * state of phy, we will wait for a specific interval and 4361 * try again. The time delay is an experiential value based 4362 * on our testing. 4363 */ 4364 msec_delay(100); 4365 e1000g_get_phy_state(Adapter); 4366 rw_exit(&Adapter->chip_lock); 4367 4368 value = sizeof (lb_normal); 4369 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 4370 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 4371 (hw->media_type == e1000_media_type_fiber) || 4372 (hw->media_type == e1000_media_type_internal_serdes)) { 4373 value += sizeof (lb_phy); 4374 switch (hw->mac.type) { 4375 case e1000_82571: 4376 case e1000_82572: 4377 value += sizeof (lb_external1000); 4378 break; 4379 } 4380 } 4381 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 4382 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 4383 value += sizeof (lb_external100); 4384 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 4385 value += sizeof (lb_external10); 4386 4387 lbsp = (lb_info_sz_t *)mp->b_cont->b_rptr; 4388 *lbsp = value; 4389 break; 4390 4391 case LB_GET_INFO: 4392 value = sizeof (lb_normal); 4393 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 4394 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 4395 (hw->media_type == e1000_media_type_fiber) || 4396 (hw->media_type == e1000_media_type_internal_serdes)) { 4397 value += sizeof (lb_phy); 4398 switch (hw->mac.type) { 4399 case e1000_82571: 4400 case e1000_82572: 4401 value += sizeof (lb_external1000); 4402 break; 4403 } 4404 } 4405 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 4406 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 4407 value += sizeof (lb_external100); 4408 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 4409 value += sizeof (lb_external10); 4410 4411 size = value; 4412 if (iocp->ioc_count != size) 4413 return (IOC_INVAL); 4414 4415 value = 0; 4416 lbpp = (lb_property_t *)mp->b_cont->b_rptr; 4417 lbpp[value++] = lb_normal; 4418 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 4419 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) || 4420 (hw->media_type == e1000_media_type_fiber) || 4421 (hw->media_type == e1000_media_type_internal_serdes)) { 4422 lbpp[value++] = lb_phy; 4423 switch (hw->mac.type) { 4424 case e1000_82571: 4425 case e1000_82572: 4426 lbpp[value++] = lb_external1000; 4427 break; 4428 } 4429 } 4430 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 4431 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) 4432 lbpp[value++] = lb_external100; 4433 if (Adapter->phy_status & MII_SR_10T_FD_CAPS) 4434 lbpp[value++] = lb_external10; 4435 break; 4436 4437 case LB_GET_MODE: 4438 size = sizeof (uint32_t); 4439 if (iocp->ioc_count != size) 4440 return (IOC_INVAL); 4441 4442 lbmp = (uint32_t *)mp->b_cont->b_rptr; 4443 *lbmp = Adapter->loopback_mode; 4444 break; 4445 4446 case LB_SET_MODE: 4447 size = 0; 4448 if (iocp->ioc_count != sizeof (uint32_t)) 4449 return (IOC_INVAL); 4450 4451 lbmp = (uint32_t *)mp->b_cont->b_rptr; 4452 if (!e1000g_set_loopback_mode(Adapter, *lbmp)) 4453 return (IOC_INVAL); 4454 break; 4455 } 4456 4457 iocp->ioc_count = size; 4458 iocp->ioc_error = 0; 4459 4460 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { 4461 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); 4462 return (IOC_INVAL); 4463 } 4464 4465 return (IOC_REPLY); 4466 } 4467 4468 static boolean_t 4469 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode) 4470 { 4471 struct e1000_hw *hw; 4472 int i, times; 4473 boolean_t link_up; 4474 4475 if (mode == Adapter->loopback_mode) 4476 return (B_TRUE); 4477 4478 hw = &Adapter->shared; 4479 times = 0; 4480 4481 Adapter->loopback_mode = mode; 4482 4483 if (mode == E1000G_LB_NONE) { 4484 /* Reset the chip */ 4485 hw->phy.wait_for_link = B_TRUE; 4486 (void) e1000g_reset(Adapter); 4487 hw->phy.wait_for_link = B_FALSE; 4488 return (B_TRUE); 4489 } 4490 4491 again: 4492 4493 rw_enter(&Adapter->chip_lock, RW_WRITER); 4494 4495 switch (mode) { 4496 default: 4497 rw_exit(&Adapter->chip_lock); 4498 return (B_FALSE); 4499 4500 case E1000G_LB_EXTERNAL_1000: 4501 e1000g_set_external_loopback_1000(Adapter); 4502 break; 4503 4504 case E1000G_LB_EXTERNAL_100: 4505 e1000g_set_external_loopback_100(Adapter); 4506 break; 4507 4508 case E1000G_LB_EXTERNAL_10: 4509 e1000g_set_external_loopback_10(Adapter); 4510 break; 4511 4512 case E1000G_LB_INTERNAL_PHY: 4513 e1000g_set_internal_loopback(Adapter); 4514 break; 4515 } 4516 4517 times++; 4518 4519 rw_exit(&Adapter->chip_lock); 4520 4521 /* Wait for link up */ 4522 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--) 4523 msec_delay(100); 4524 4525 rw_enter(&Adapter->chip_lock, RW_WRITER); 4526 4527 link_up = e1000g_link_up(Adapter); 4528 4529 rw_exit(&Adapter->chip_lock); 4530 4531 if (!link_up) { 4532 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 4533 "Failed to get the link up"); 4534 if (times < 2) { 4535 /* Reset the link */ 4536 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, 4537 "Reset the link ..."); 4538 (void) e1000g_reset(Adapter); 4539 goto again; 4540 } 4541 } 4542 4543 return (B_TRUE); 4544 } 4545 4546 /* 4547 * The following loopback settings are from Intel's technical 4548 * document - "How To Loopback". All the register settings and 4549 * time delay values are directly inherited from the document 4550 * without more explanations available. 4551 */ 4552 static void 4553 e1000g_set_internal_loopback(struct e1000g *Adapter) 4554 { 4555 struct e1000_hw *hw; 4556 uint32_t ctrl; 4557 uint32_t status; 4558 uint16_t phy_ctrl; 4559 uint32_t txcw; 4560 4561 hw = &Adapter->shared; 4562 4563 /* Disable Smart Power Down */ 4564 phy_spd_state(hw, B_FALSE); 4565 4566 e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl); 4567 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10); 4568 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000; 4569 4570 switch (hw->mac.type) { 4571 case e1000_82540: 4572 case e1000_82545: 4573 case e1000_82545_rev_3: 4574 case e1000_82546: 4575 case e1000_82546_rev_3: 4576 case e1000_82573: 4577 /* Auto-MDI/MDIX off */ 4578 e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 4579 /* Reset PHY to update Auto-MDI/MDIX */ 4580 e1000_write_phy_reg(hw, PHY_CONTROL, 4581 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN); 4582 /* Reset PHY to auto-neg off and force 1000 */ 4583 e1000_write_phy_reg(hw, PHY_CONTROL, 4584 phy_ctrl | MII_CR_RESET); 4585 /* 4586 * Disable PHY receiver for 82540/545/546 and 82573 Family. 4587 * See comments above e1000g_set_internal_loopback() for the 4588 * background. 4589 */ 4590 e1000_write_phy_reg(hw, 29, 0x001F); 4591 e1000_write_phy_reg(hw, 30, 0x8FFC); 4592 e1000_write_phy_reg(hw, 29, 0x001A); 4593 e1000_write_phy_reg(hw, 30, 0x8FF0); 4594 break; 4595 } 4596 4597 /* Set loopback */ 4598 e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK); 4599 4600 msec_delay(250); 4601 4602 /* Now set up the MAC to the same speed/duplex as the PHY. */ 4603 ctrl = E1000_READ_REG(hw, E1000_CTRL); 4604 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 4605 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 4606 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 4607 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ 4608 E1000_CTRL_FD); /* Force Duplex to FULL */ 4609 4610 switch (hw->mac.type) { 4611 case e1000_82540: 4612 case e1000_82545: 4613 case e1000_82545_rev_3: 4614 case e1000_82546: 4615 case e1000_82546_rev_3: 4616 /* 4617 * For some serdes we'll need to commit the writes now 4618 * so that the status is updated on link 4619 */ 4620 if (hw->media_type == e1000_media_type_internal_serdes) { 4621 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 4622 msec_delay(100); 4623 ctrl = E1000_READ_REG(hw, E1000_CTRL); 4624 } 4625 4626 if (hw->media_type == e1000_media_type_copper) { 4627 /* Invert Loss of Signal */ 4628 ctrl |= E1000_CTRL_ILOS; 4629 } else { 4630 /* Set ILOS on fiber nic if half duplex is detected */ 4631 status = E1000_READ_REG(hw, E1000_STATUS); 4632 if ((status & E1000_STATUS_FD) == 0) 4633 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 4634 } 4635 break; 4636 4637 case e1000_82571: 4638 case e1000_82572: 4639 /* 4640 * The fiber/SerDes versions of this adapter do not contain an 4641 * accessible PHY. Therefore, loopback beyond MAC must be done 4642 * using SerDes analog loopback. 4643 */ 4644 if (hw->media_type != e1000_media_type_copper) { 4645 status = E1000_READ_REG(hw, E1000_STATUS); 4646 /* Set ILOS on fiber nic if half duplex is detected */ 4647 if (((status & E1000_STATUS_LU) == 0) || 4648 ((status & E1000_STATUS_FD) == 0) || 4649 (hw->media_type == 4650 e1000_media_type_internal_serdes)) 4651 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 4652 4653 /* Disable autoneg by setting bit 31 of TXCW to zero */ 4654 txcw = E1000_READ_REG(hw, E1000_TXCW); 4655 txcw &= ~((uint32_t)1 << 31); 4656 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 4657 4658 /* 4659 * Write 0x410 to Serdes Control register 4660 * to enable Serdes analog loopback 4661 */ 4662 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 4663 msec_delay(10); 4664 } 4665 break; 4666 4667 case e1000_82573: 4668 ctrl |= E1000_CTRL_ILOS; 4669 break; 4670 } 4671 4672 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 4673 4674 } 4675 4676 static void 4677 e1000g_set_external_loopback_1000(struct e1000g *Adapter) 4678 { 4679 struct e1000_hw *hw; 4680 uint32_t rctl; 4681 uint32_t ctrl_ext; 4682 uint32_t ctrl; 4683 uint32_t status; 4684 uint32_t txcw; 4685 4686 hw = &Adapter->shared; 4687 4688 /* Disable Smart Power Down */ 4689 phy_spd_state(hw, B_FALSE); 4690 4691 switch (hw->media_type) { 4692 case e1000_media_type_copper: 4693 /* Force link up (Must be done before the PHY writes) */ 4694 ctrl = E1000_READ_REG(hw, E1000_CTRL); 4695 ctrl |= E1000_CTRL_SLU; /* Force Link Up */ 4696 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 4697 4698 rctl = E1000_READ_REG(hw, E1000_RCTL); 4699 rctl |= (E1000_RCTL_EN | 4700 E1000_RCTL_SBP | 4701 E1000_RCTL_UPE | 4702 E1000_RCTL_MPE | 4703 E1000_RCTL_LPE | 4704 E1000_RCTL_BAM); /* 0x803E */ 4705 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 4706 4707 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); 4708 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA | 4709 E1000_CTRL_EXT_SDP6_DATA | 4710 E1000_CTRL_EXT_SDP7_DATA | 4711 E1000_CTRL_EXT_SDP4_DIR | 4712 E1000_CTRL_EXT_SDP6_DIR | 4713 E1000_CTRL_EXT_SDP7_DIR); /* 0x0DD0 */ 4714 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); 4715 4716 /* 4717 * This sequence tunes the PHY's SDP and no customer 4718 * settable values. For background, see comments above 4719 * e1000g_set_internal_loopback(). 4720 */ 4721 e1000_write_phy_reg(hw, 0x0, 0x140); 4722 msec_delay(10); 4723 e1000_write_phy_reg(hw, 0x9, 0x1A00); 4724 e1000_write_phy_reg(hw, 0x12, 0xC10); 4725 e1000_write_phy_reg(hw, 0x12, 0x1C10); 4726 e1000_write_phy_reg(hw, 0x1F37, 0x76); 4727 e1000_write_phy_reg(hw, 0x1F33, 0x1); 4728 e1000_write_phy_reg(hw, 0x1F33, 0x0); 4729 4730 e1000_write_phy_reg(hw, 0x1F35, 0x65); 4731 e1000_write_phy_reg(hw, 0x1837, 0x3F7C); 4732 e1000_write_phy_reg(hw, 0x1437, 0x3FDC); 4733 e1000_write_phy_reg(hw, 0x1237, 0x3F7C); 4734 e1000_write_phy_reg(hw, 0x1137, 0x3FDC); 4735 4736 msec_delay(50); 4737 break; 4738 case e1000_media_type_fiber: 4739 case e1000_media_type_internal_serdes: 4740 status = E1000_READ_REG(hw, E1000_STATUS); 4741 if (((status & E1000_STATUS_LU) == 0) || 4742 (hw->media_type == e1000_media_type_internal_serdes)) { 4743 ctrl = E1000_READ_REG(hw, E1000_CTRL); 4744 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU; 4745 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 4746 } 4747 4748 /* Disable autoneg by setting bit 31 of TXCW to zero */ 4749 txcw = E1000_READ_REG(hw, E1000_TXCW); 4750 txcw &= ~((uint32_t)1 << 31); 4751 E1000_WRITE_REG(hw, E1000_TXCW, txcw); 4752 4753 /* 4754 * Write 0x410 to Serdes Control register 4755 * to enable Serdes analog loopback 4756 */ 4757 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410); 4758 msec_delay(10); 4759 break; 4760 default: 4761 break; 4762 } 4763 } 4764 4765 static void 4766 e1000g_set_external_loopback_100(struct e1000g *Adapter) 4767 { 4768 struct e1000_hw *hw; 4769 uint32_t ctrl; 4770 uint16_t phy_ctrl; 4771 4772 hw = &Adapter->shared; 4773 4774 /* Disable Smart Power Down */ 4775 phy_spd_state(hw, B_FALSE); 4776 4777 phy_ctrl = (MII_CR_FULL_DUPLEX | 4778 MII_CR_SPEED_100); 4779 4780 /* Force 100/FD, reset PHY */ 4781 e1000_write_phy_reg(hw, PHY_CONTROL, 4782 phy_ctrl | MII_CR_RESET); /* 0xA100 */ 4783 msec_delay(10); 4784 4785 /* Force 100/FD */ 4786 e1000_write_phy_reg(hw, PHY_CONTROL, 4787 phy_ctrl); /* 0x2100 */ 4788 msec_delay(10); 4789 4790 /* Now setup the MAC to the same speed/duplex as the PHY. */ 4791 ctrl = E1000_READ_REG(hw, E1000_CTRL); 4792 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 4793 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 4794 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 4795 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 4796 E1000_CTRL_SPD_100 | /* Force Speed to 100 */ 4797 E1000_CTRL_FD); /* Force Duplex to FULL */ 4798 4799 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 4800 } 4801 4802 static void 4803 e1000g_set_external_loopback_10(struct e1000g *Adapter) 4804 { 4805 struct e1000_hw *hw; 4806 uint32_t ctrl; 4807 uint16_t phy_ctrl; 4808 4809 hw = &Adapter->shared; 4810 4811 /* Disable Smart Power Down */ 4812 phy_spd_state(hw, B_FALSE); 4813 4814 phy_ctrl = (MII_CR_FULL_DUPLEX | 4815 MII_CR_SPEED_10); 4816 4817 /* Force 10/FD, reset PHY */ 4818 e1000_write_phy_reg(hw, PHY_CONTROL, 4819 phy_ctrl | MII_CR_RESET); /* 0x8100 */ 4820 msec_delay(10); 4821 4822 /* Force 10/FD */ 4823 e1000_write_phy_reg(hw, PHY_CONTROL, 4824 phy_ctrl); /* 0x0100 */ 4825 msec_delay(10); 4826 4827 /* Now setup the MAC to the same speed/duplex as the PHY. */ 4828 ctrl = E1000_READ_REG(hw, E1000_CTRL); 4829 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ 4830 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */ 4831 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ 4832 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 4833 E1000_CTRL_SPD_10 | /* Force Speed to 10 */ 4834 E1000_CTRL_FD); /* Force Duplex to FULL */ 4835 4836 E1000_WRITE_REG(hw, E1000_CTRL, ctrl); 4837 } 4838 4839 #ifdef __sparc 4840 static boolean_t 4841 e1000g_find_mac_address(struct e1000g *Adapter) 4842 { 4843 struct e1000_hw *hw = &Adapter->shared; 4844 uchar_t *bytes; 4845 struct ether_addr sysaddr; 4846 uint_t nelts; 4847 int err; 4848 boolean_t found = B_FALSE; 4849 4850 /* 4851 * The "vendor's factory-set address" may already have 4852 * been extracted from the chip, but if the property 4853 * "local-mac-address" is set we use that instead. 4854 * 4855 * We check whether it looks like an array of 6 4856 * bytes (which it should, if OBP set it). If we can't 4857 * make sense of it this way, we'll ignore it. 4858 */ 4859 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 4860 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 4861 if (err == DDI_PROP_SUCCESS) { 4862 if (nelts == ETHERADDRL) { 4863 while (nelts--) 4864 hw->mac.addr[nelts] = bytes[nelts]; 4865 found = B_TRUE; 4866 } 4867 ddi_prop_free(bytes); 4868 } 4869 4870 /* 4871 * Look up the OBP property "local-mac-address?". If the user has set 4872 * 'local-mac-address? = false', use "the system address" instead. 4873 */ 4874 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0, 4875 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 4876 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 4877 if (localetheraddr(NULL, &sysaddr) != 0) { 4878 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 4879 found = B_TRUE; 4880 } 4881 } 4882 ddi_prop_free(bytes); 4883 } 4884 4885 /* 4886 * Finally(!), if there's a valid "mac-address" property (created 4887 * if we netbooted from this interface), we must use this instead 4888 * of any of the above to ensure that the NFS/install server doesn't 4889 * get confused by the address changing as Solaris takes over! 4890 */ 4891 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 4892 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 4893 if (err == DDI_PROP_SUCCESS) { 4894 if (nelts == ETHERADDRL) { 4895 while (nelts--) 4896 hw->mac.addr[nelts] = bytes[nelts]; 4897 found = B_TRUE; 4898 } 4899 ddi_prop_free(bytes); 4900 } 4901 4902 if (found) { 4903 bcopy(hw->mac.addr, hw->mac.perm_addr, 4904 ETHERADDRL); 4905 } 4906 4907 return (found); 4908 } 4909 #endif 4910 4911 static int 4912 e1000g_add_intrs(struct e1000g *Adapter) 4913 { 4914 dev_info_t *devinfo; 4915 int intr_types; 4916 int rc; 4917 4918 devinfo = Adapter->dip; 4919 4920 /* Get supported interrupt types */ 4921 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4922 4923 if (rc != DDI_SUCCESS) { 4924 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 4925 "Get supported interrupt types failed: %d\n", rc); 4926 return (DDI_FAILURE); 4927 } 4928 4929 /* 4930 * Based on Intel Technical Advisory document (TA-160), there are some 4931 * cases where some older Intel PCI-X NICs may "advertise" to the OS 4932 * that it supports MSI, but in fact has problems. 4933 * So we should only enable MSI for PCI-E NICs and disable MSI for old 4934 * PCI/PCI-X NICs. 4935 */ 4936 if (Adapter->shared.mac.type < e1000_82571) 4937 Adapter->msi_enabled = B_FALSE; 4938 4939 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enabled) { 4940 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI); 4941 4942 if (rc != DDI_SUCCESS) { 4943 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 4944 "Add MSI failed, trying Legacy interrupts\n"); 4945 } else { 4946 Adapter->intr_type = DDI_INTR_TYPE_MSI; 4947 } 4948 } 4949 4950 if ((Adapter->intr_type == 0) && 4951 (intr_types & DDI_INTR_TYPE_FIXED)) { 4952 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED); 4953 4954 if (rc != DDI_SUCCESS) { 4955 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 4956 "Add Legacy interrupts failed\n"); 4957 return (DDI_FAILURE); 4958 } 4959 4960 Adapter->intr_type = DDI_INTR_TYPE_FIXED; 4961 } 4962 4963 if (Adapter->intr_type == 0) { 4964 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL, 4965 "No interrupts registered\n"); 4966 return (DDI_FAILURE); 4967 } 4968 4969 return (DDI_SUCCESS); 4970 } 4971 4972 /* 4973 * e1000g_intr_add() handles MSI/Legacy interrupts 4974 */ 4975 static int 4976 e1000g_intr_add(struct e1000g *Adapter, int intr_type) 4977 { 4978 dev_info_t *devinfo; 4979 int count, avail, actual; 4980 int x, y, rc, inum = 0; 4981 int flag; 4982 ddi_intr_handler_t *intr_handler; 4983 4984 devinfo = Adapter->dip; 4985 4986 /* get number of interrupts */ 4987 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 4988 if ((rc != DDI_SUCCESS) || (count == 0)) { 4989 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 4990 "Get interrupt number failed. Return: %d, count: %d\n", 4991 rc, count); 4992 return (DDI_FAILURE); 4993 } 4994 4995 /* get number of available interrupts */ 4996 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 4997 if ((rc != DDI_SUCCESS) || (avail == 0)) { 4998 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 4999 "Get interrupt available number failed. " 5000 "Return: %d, available: %d\n", rc, avail); 5001 return (DDI_FAILURE); 5002 } 5003 5004 if (avail < count) { 5005 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5006 "Interrupts count: %d, available: %d\n", 5007 count, avail); 5008 } 5009 5010 /* Allocate an array of interrupt handles */ 5011 Adapter->intr_size = count * sizeof (ddi_intr_handle_t); 5012 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP); 5013 5014 /* Set NORMAL behavior for both MSI and FIXED interrupt */ 5015 flag = DDI_INTR_ALLOC_NORMAL; 5016 5017 /* call ddi_intr_alloc() */ 5018 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum, 5019 count, &actual, flag); 5020 5021 if ((rc != DDI_SUCCESS) || (actual == 0)) { 5022 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5023 "Allocate interrupts failed: %d\n", rc); 5024 5025 kmem_free(Adapter->htable, Adapter->intr_size); 5026 return (DDI_FAILURE); 5027 } 5028 5029 if (actual < count) { 5030 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL, 5031 "Interrupts requested: %d, received: %d\n", 5032 count, actual); 5033 } 5034 5035 Adapter->intr_cnt = actual; 5036 5037 /* Get priority for first msi, assume remaining are all the same */ 5038 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri); 5039 5040 if (rc != DDI_SUCCESS) { 5041 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5042 "Get interrupt priority failed: %d\n", rc); 5043 5044 /* Free already allocated intr */ 5045 for (y = 0; y < actual; y++) 5046 (void) ddi_intr_free(Adapter->htable[y]); 5047 5048 kmem_free(Adapter->htable, Adapter->intr_size); 5049 return (DDI_FAILURE); 5050 } 5051 5052 /* 5053 * In Legacy Interrupt mode, for PCI-Express adapters, we should 5054 * use the interrupt service routine e1000g_intr_pciexpress() 5055 * to avoid interrupt stealing when sharing interrupt with other 5056 * devices. 5057 */ 5058 if (Adapter->shared.mac.type < e1000_82571) 5059 intr_handler = (ddi_intr_handler_t *)e1000g_intr; 5060 else 5061 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress; 5062 5063 /* Call ddi_intr_add_handler() */ 5064 for (x = 0; x < actual; x++) { 5065 rc = ddi_intr_add_handler(Adapter->htable[x], 5066 intr_handler, (caddr_t)Adapter, NULL); 5067 5068 if (rc != DDI_SUCCESS) { 5069 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5070 "Add interrupt handler failed: %d\n", rc); 5071 5072 /* Remove already added handler */ 5073 for (y = 0; y < x; y++) 5074 (void) ddi_intr_remove_handler( 5075 Adapter->htable[y]); 5076 5077 /* Free already allocated intr */ 5078 for (y = 0; y < actual; y++) 5079 (void) ddi_intr_free(Adapter->htable[y]); 5080 5081 kmem_free(Adapter->htable, Adapter->intr_size); 5082 return (DDI_FAILURE); 5083 } 5084 } 5085 5086 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap); 5087 5088 if (rc != DDI_SUCCESS) { 5089 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5090 "Get interrupt cap failed: %d\n", rc); 5091 5092 /* Free already allocated intr */ 5093 for (y = 0; y < actual; y++) { 5094 (void) ddi_intr_remove_handler(Adapter->htable[y]); 5095 (void) ddi_intr_free(Adapter->htable[y]); 5096 } 5097 5098 kmem_free(Adapter->htable, Adapter->intr_size); 5099 return (DDI_FAILURE); 5100 } 5101 5102 return (DDI_SUCCESS); 5103 } 5104 5105 static int 5106 e1000g_rem_intrs(struct e1000g *Adapter) 5107 { 5108 int x; 5109 int rc; 5110 5111 for (x = 0; x < Adapter->intr_cnt; x++) { 5112 rc = ddi_intr_remove_handler(Adapter->htable[x]); 5113 if (rc != DDI_SUCCESS) { 5114 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5115 "Remove intr handler failed: %d\n", rc); 5116 return (DDI_FAILURE); 5117 } 5118 5119 rc = ddi_intr_free(Adapter->htable[x]); 5120 if (rc != DDI_SUCCESS) { 5121 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5122 "Free intr failed: %d\n", rc); 5123 return (DDI_FAILURE); 5124 } 5125 } 5126 5127 kmem_free(Adapter->htable, Adapter->intr_size); 5128 5129 return (DDI_SUCCESS); 5130 } 5131 5132 static int 5133 e1000g_enable_intrs(struct e1000g *Adapter) 5134 { 5135 int x; 5136 int rc; 5137 5138 /* Enable interrupts */ 5139 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 5140 /* Call ddi_intr_block_enable() for MSI */ 5141 rc = ddi_intr_block_enable(Adapter->htable, 5142 Adapter->intr_cnt); 5143 if (rc != DDI_SUCCESS) { 5144 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5145 "Enable block intr failed: %d\n", rc); 5146 return (DDI_FAILURE); 5147 } 5148 } else { 5149 /* Call ddi_intr_enable() for Legacy/MSI non block enable */ 5150 for (x = 0; x < Adapter->intr_cnt; x++) { 5151 rc = ddi_intr_enable(Adapter->htable[x]); 5152 if (rc != DDI_SUCCESS) { 5153 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5154 "Enable intr failed: %d\n", rc); 5155 return (DDI_FAILURE); 5156 } 5157 } 5158 } 5159 5160 return (DDI_SUCCESS); 5161 } 5162 5163 static int 5164 e1000g_disable_intrs(struct e1000g *Adapter) 5165 { 5166 int x; 5167 int rc; 5168 5169 /* Disable all interrupts */ 5170 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) { 5171 rc = ddi_intr_block_disable(Adapter->htable, 5172 Adapter->intr_cnt); 5173 if (rc != DDI_SUCCESS) { 5174 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5175 "Disable block intr failed: %d\n", rc); 5176 return (DDI_FAILURE); 5177 } 5178 } else { 5179 for (x = 0; x < Adapter->intr_cnt; x++) { 5180 rc = ddi_intr_disable(Adapter->htable[x]); 5181 if (rc != DDI_SUCCESS) { 5182 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL, 5183 "Disable intr failed: %d\n", rc); 5184 return (DDI_FAILURE); 5185 } 5186 } 5187 } 5188 5189 return (DDI_SUCCESS); 5190 } 5191 5192 /* 5193 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter 5194 */ 5195 static void 5196 e1000g_get_phy_state(struct e1000g *Adapter) 5197 { 5198 struct e1000_hw *hw = &Adapter->shared; 5199 5200 e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl); 5201 e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status); 5202 e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &Adapter->phy_an_adv); 5203 e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &Adapter->phy_an_exp); 5204 e1000_read_phy_reg(hw, PHY_EXT_STATUS, &Adapter->phy_ext_status); 5205 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &Adapter->phy_1000t_ctrl); 5206 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &Adapter->phy_1000t_status); 5207 e1000_read_phy_reg(hw, PHY_LP_ABILITY, &Adapter->phy_lp_able); 5208 5209 Adapter->param_autoneg_cap = 5210 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0; 5211 Adapter->param_pause_cap = 5212 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 5213 Adapter->param_asym_pause_cap = 5214 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 5215 Adapter->param_1000fdx_cap = 5216 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) || 5217 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0; 5218 Adapter->param_1000hdx_cap = 5219 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) || 5220 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0; 5221 Adapter->param_100t4_cap = 5222 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0; 5223 Adapter->param_100fdx_cap = 5224 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) || 5225 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0; 5226 Adapter->param_100hdx_cap = 5227 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) || 5228 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0; 5229 Adapter->param_10fdx_cap = 5230 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0; 5231 Adapter->param_10hdx_cap = 5232 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0; 5233 5234 Adapter->param_adv_autoneg = hw->mac.autoneg; 5235 Adapter->param_adv_pause = 5236 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0; 5237 Adapter->param_adv_asym_pause = 5238 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0; 5239 Adapter->param_adv_1000hdx = 5240 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0; 5241 Adapter->param_adv_100t4 = 5242 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0; 5243 if (Adapter->param_adv_autoneg == 1) { 5244 Adapter->param_adv_1000fdx = 5245 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS) ? 1 : 0; 5246 Adapter->param_adv_100fdx = 5247 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS) ? 1 : 0; 5248 Adapter->param_adv_100hdx = 5249 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS) ? 1 : 0; 5250 Adapter->param_adv_10fdx = 5251 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0; 5252 Adapter->param_adv_10hdx = 5253 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0; 5254 } 5255 5256 Adapter->param_lp_autoneg = 5257 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0; 5258 Adapter->param_lp_pause = 5259 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0; 5260 Adapter->param_lp_asym_pause = 5261 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0; 5262 Adapter->param_lp_1000fdx = 5263 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0; 5264 Adapter->param_lp_1000hdx = 5265 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0; 5266 Adapter->param_lp_100t4 = 5267 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0; 5268 Adapter->param_lp_100fdx = 5269 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0; 5270 Adapter->param_lp_100hdx = 5271 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0; 5272 Adapter->param_lp_10fdx = 5273 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0; 5274 Adapter->param_lp_10hdx = 5275 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0; 5276 } 5277 5278 /* 5279 * FMA support 5280 */ 5281 5282 int 5283 e1000g_check_acc_handle(ddi_acc_handle_t handle) 5284 { 5285 ddi_fm_error_t de; 5286 5287 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5288 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5289 return (de.fme_status); 5290 } 5291 5292 int 5293 e1000g_check_dma_handle(ddi_dma_handle_t handle) 5294 { 5295 ddi_fm_error_t de; 5296 5297 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5298 return (de.fme_status); 5299 } 5300 5301 /* 5302 * The IO fault service error handling callback function 5303 */ 5304 static int 5305 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5306 { 5307 /* 5308 * as the driver can always deal with an error in any dma or 5309 * access handle, we can just return the fme_status value. 5310 */ 5311 pci_ereport_post(dip, err, NULL); 5312 return (err->fme_status); 5313 } 5314 5315 static void 5316 e1000g_fm_init(struct e1000g *Adapter) 5317 { 5318 ddi_iblock_cookie_t iblk; 5319 int fma_acc_flag, fma_dma_flag; 5320 5321 /* Only register with IO Fault Services if we have some capability */ 5322 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5323 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5324 fma_acc_flag = 1; 5325 } else { 5326 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5327 fma_acc_flag = 0; 5328 } 5329 5330 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5331 fma_dma_flag = 1; 5332 } else { 5333 fma_dma_flag = 0; 5334 } 5335 5336 (void) e1000g_set_fma_flags(Adapter, fma_acc_flag, fma_dma_flag); 5337 5338 if (Adapter->fm_capabilities) { 5339 5340 /* Register capabilities with IO Fault Services */ 5341 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk); 5342 5343 /* 5344 * Initialize pci ereport capabilities if ereport capable 5345 */ 5346 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 5347 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 5348 pci_ereport_setup(Adapter->dip); 5349 5350 /* 5351 * Register error callback if error callback capable 5352 */ 5353 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 5354 ddi_fm_handler_register(Adapter->dip, 5355 e1000g_fm_error_cb, (void*) Adapter); 5356 } 5357 } 5358 5359 static void 5360 e1000g_fm_fini(struct e1000g *Adapter) 5361 { 5362 /* Only unregister FMA capabilities if we registered some */ 5363 if (Adapter->fm_capabilities) { 5364 5365 /* 5366 * Release any resources allocated by pci_ereport_setup() 5367 */ 5368 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) || 5369 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 5370 pci_ereport_teardown(Adapter->dip); 5371 5372 /* 5373 * Un-register error callback if error callback capable 5374 */ 5375 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities)) 5376 ddi_fm_handler_unregister(Adapter->dip); 5377 5378 /* Unregister from IO Fault Services */ 5379 ddi_fm_fini(Adapter->dip); 5380 } 5381 } 5382 5383 void 5384 e1000g_fm_ereport(struct e1000g *Adapter, char *detail) 5385 { 5386 uint64_t ena; 5387 char buf[FM_MAX_CLASS]; 5388 5389 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5390 ena = fm_ena_generate(0, FM_ENA_FMT1); 5391 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) { 5392 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP, 5393 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 5394 } 5395 } 5396