1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at: 10 * http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When using or redistributing this file, you may do so under the 15 * License only. No other modification of this header is permitted. 16 * 17 * If applicable, add the following below this CDDL HEADER, with the 18 * fields enclosed by brackets "[]" replaced with your own identifying 19 * information: Portions Copyright [yyyy] [name of copyright owner] 20 * 21 * CDDL HEADER END 22 */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms of the CDDL. 27 */ 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #include "ixgbe_sw.h" 32 33 static char ident[] = "Intel 10Gb Ethernet 1.0.1"; 34 35 /* 36 * Local function protoypes 37 */ 38 static int ixgbe_register_mac(ixgbe_t *); 39 static int ixgbe_identify_hardware(ixgbe_t *); 40 static int ixgbe_regs_map(ixgbe_t *); 41 static void ixgbe_init_properties(ixgbe_t *); 42 static int ixgbe_init_driver_settings(ixgbe_t *); 43 static void ixgbe_init_locks(ixgbe_t *); 44 static void ixgbe_destroy_locks(ixgbe_t *); 45 static int ixgbe_init(ixgbe_t *); 46 static int ixgbe_chip_start(ixgbe_t *); 47 static void ixgbe_chip_stop(ixgbe_t *); 48 static int ixgbe_reset(ixgbe_t *); 49 static void ixgbe_tx_clean(ixgbe_t *); 50 static boolean_t ixgbe_tx_drain(ixgbe_t *); 51 static boolean_t ixgbe_rx_drain(ixgbe_t *); 52 static int ixgbe_alloc_rings(ixgbe_t *); 53 static int ixgbe_init_rings(ixgbe_t *); 54 static void ixgbe_free_rings(ixgbe_t *); 55 static void ixgbe_fini_rings(ixgbe_t *); 56 static void ixgbe_setup_rings(ixgbe_t *); 57 static void ixgbe_setup_rx(ixgbe_t *); 58 static void ixgbe_setup_tx(ixgbe_t *); 59 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 60 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 61 static void ixgbe_setup_rss(ixgbe_t *); 62 static void ixgbe_init_unicst(ixgbe_t *); 63 static void ixgbe_setup_multicst(ixgbe_t *); 64 static void ixgbe_get_hw_state(ixgbe_t *); 65 static void ixgbe_get_conf(ixgbe_t *); 66 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 67 static boolean_t ixgbe_driver_link_check(ixgbe_t *); 68 static void ixgbe_local_timer(void *); 69 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 70 static void ixgbe_start_watchdog_timer(ixgbe_t *); 71 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 72 static void ixgbe_stop_watchdog_timer(ixgbe_t *); 73 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 74 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 75 static boolean_t is_valid_mac_addr(uint8_t *); 76 static boolean_t ixgbe_stall_check(ixgbe_t *); 77 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 78 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 79 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 80 static int ixgbe_alloc_intrs(ixgbe_t *); 81 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 82 static int ixgbe_add_intr_handlers(ixgbe_t *); 83 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 84 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 85 static void ixgbe_set_ivar(ixgbe_t *, uint16_t, uint8_t); 86 static int ixgbe_map_rings_to_vectors(ixgbe_t *); 87 static void ixgbe_setup_adapter_vector(ixgbe_t *); 88 static void ixgbe_rem_intr_handlers(ixgbe_t *); 89 static void ixgbe_rem_intrs(ixgbe_t *); 90 static int ixgbe_enable_intrs(ixgbe_t *); 91 static int ixgbe_disable_intrs(ixgbe_t *); 92 static uint_t ixgbe_intr_legacy(void *, void *); 93 static uint_t ixgbe_intr_msi(void *, void *); 94 static uint_t ixgbe_intr_rx(void *, void *); 95 static uint_t ixgbe_intr_tx_other(void *, void *); 96 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 97 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 98 static void ixgbe_intr_other_work(ixgbe_t *); 99 static void ixgbe_get_driver_control(struct ixgbe_hw *); 100 static void ixgbe_release_driver_control(struct ixgbe_hw *); 101 102 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 103 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 104 static int ixgbe_resume(dev_info_t *); 105 static int ixgbe_suspend(dev_info_t *); 106 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 107 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 108 109 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 110 const void *impl_data); 111 static void ixgbe_fm_init(ixgbe_t *); 112 static void ixgbe_fm_fini(ixgbe_t *); 113 114 static struct cb_ops ixgbe_cb_ops = { 115 nulldev, /* cb_open */ 116 nulldev, /* cb_close */ 117 nodev, /* cb_strategy */ 118 nodev, /* cb_print */ 119 nodev, /* cb_dump */ 120 nodev, /* cb_read */ 121 nodev, /* cb_write */ 122 nodev, /* cb_ioctl */ 123 nodev, /* cb_devmap */ 124 nodev, /* cb_mmap */ 125 nodev, /* cb_segmap */ 126 nochpoll, /* cb_chpoll */ 127 ddi_prop_op, /* cb_prop_op */ 128 NULL, /* cb_stream */ 129 D_MP | D_HOTPLUG, /* cb_flag */ 130 CB_REV, /* cb_rev */ 131 nodev, /* cb_aread */ 132 nodev /* cb_awrite */ 133 }; 134 135 static struct dev_ops ixgbe_dev_ops = { 136 DEVO_REV, /* devo_rev */ 137 0, /* devo_refcnt */ 138 NULL, /* devo_getinfo */ 139 nulldev, /* devo_identify */ 140 nulldev, /* devo_probe */ 141 ixgbe_attach, /* devo_attach */ 142 ixgbe_detach, /* devo_detach */ 143 nodev, /* devo_reset */ 144 &ixgbe_cb_ops, /* devo_cb_ops */ 145 NULL, /* devo_bus_ops */ 146 ddi_power /* devo_power */ 147 }; 148 149 static struct modldrv ixgbe_modldrv = { 150 &mod_driverops, /* Type of module. This one is a driver */ 151 ident, /* Discription string */ 152 &ixgbe_dev_ops /* driver ops */ 153 }; 154 155 static struct modlinkage ixgbe_modlinkage = { 156 MODREV_1, &ixgbe_modldrv, NULL 157 }; 158 159 /* 160 * Access attributes for register mapping 161 */ 162 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 163 DDI_DEVICE_ATTR_V0, 164 DDI_STRUCTURE_LE_ACC, 165 DDI_STRICTORDER_ACC, 166 DDI_FLAGERR_ACC 167 }; 168 169 /* 170 * Loopback property 171 */ 172 static lb_property_t lb_normal = { 173 normal, "normal", IXGBE_LB_NONE 174 }; 175 176 static lb_property_t lb_mac = { 177 internal, "MAC", IXGBE_LB_INTERNAL_MAC 178 }; 179 180 #define IXGBE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB) 181 182 static mac_callbacks_t ixgbe_m_callbacks = { 183 IXGBE_M_CALLBACK_FLAGS, 184 ixgbe_m_stat, 185 ixgbe_m_start, 186 ixgbe_m_stop, 187 ixgbe_m_promisc, 188 ixgbe_m_multicst, 189 ixgbe_m_unicst, 190 ixgbe_m_tx, 191 NULL, 192 ixgbe_m_ioctl, 193 ixgbe_m_getcapab 194 }; 195 196 /* 197 * Module Initialization Functions. 198 */ 199 200 int 201 _init(void) 202 { 203 int status; 204 205 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 206 207 status = mod_install(&ixgbe_modlinkage); 208 209 if (status != DDI_SUCCESS) { 210 mac_fini_ops(&ixgbe_dev_ops); 211 } 212 213 return (status); 214 } 215 216 int 217 _fini(void) 218 { 219 int status; 220 221 status = mod_remove(&ixgbe_modlinkage); 222 223 if (status == DDI_SUCCESS) { 224 mac_fini_ops(&ixgbe_dev_ops); 225 } 226 227 return (status); 228 } 229 230 int 231 _info(struct modinfo *modinfop) 232 { 233 int status; 234 235 status = mod_info(&ixgbe_modlinkage, modinfop); 236 237 return (status); 238 } 239 240 /* 241 * ixgbe_attach - Driver attach. 242 * 243 * This function is the device specific initialization entry 244 * point. This entry point is required and must be written. 245 * The DDI_ATTACH command must be provided in the attach entry 246 * point. When attach() is called with cmd set to DDI_ATTACH, 247 * all normal kernel services (such as kmem_alloc(9F)) are 248 * available for use by the driver. 249 * 250 * The attach() function will be called once for each instance 251 * of the device on the system with cmd set to DDI_ATTACH. 252 * Until attach() succeeds, the only driver entry points which 253 * may be called are open(9E) and getinfo(9E). 254 */ 255 static int 256 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 257 { 258 ixgbe_t *ixgbe; 259 struct ixgbe_osdep *osdep; 260 struct ixgbe_hw *hw; 261 int instance; 262 263 /* 264 * Check the command and perform corresponding operations 265 */ 266 switch (cmd) { 267 default: 268 return (DDI_FAILURE); 269 270 case DDI_RESUME: 271 return (ixgbe_resume(devinfo)); 272 273 case DDI_ATTACH: 274 break; 275 } 276 277 /* Get the device instance */ 278 instance = ddi_get_instance(devinfo); 279 280 /* Allocate memory for the instance data structure */ 281 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 282 283 ixgbe->dip = devinfo; 284 ixgbe->instance = instance; 285 286 hw = &ixgbe->hw; 287 osdep = &ixgbe->osdep; 288 hw->back = osdep; 289 osdep->ixgbe = ixgbe; 290 291 /* Attach the instance pointer to the dev_info data structure */ 292 ddi_set_driver_private(devinfo, ixgbe); 293 294 /* 295 * Initialize for fma support 296 */ 297 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 298 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 299 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 300 ixgbe_fm_init(ixgbe); 301 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 302 303 /* 304 * Map PCI config space registers 305 */ 306 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 307 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 308 goto attach_fail; 309 } 310 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 311 312 /* 313 * Identify the chipset family 314 */ 315 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 316 ixgbe_error(ixgbe, "Failed to identify hardware"); 317 goto attach_fail; 318 } 319 320 /* 321 * Map device registers 322 */ 323 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 324 ixgbe_error(ixgbe, "Failed to map device registers"); 325 goto attach_fail; 326 } 327 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 328 329 /* 330 * Initialize driver parameters 331 */ 332 ixgbe_init_properties(ixgbe); 333 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 334 335 /* 336 * Allocate interrupts 337 */ 338 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 339 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 340 goto attach_fail; 341 } 342 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 343 344 /* 345 * Allocate rx/tx rings based on the ring numbers. 346 * The actual numbers of rx/tx rings are decided by the number of 347 * allocated interrupt vectors, so we should allocate the rings after 348 * interrupts are allocated. 349 */ 350 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 351 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 352 goto attach_fail; 353 } 354 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 355 356 /* 357 * Map rings to interrupt vectors 358 */ 359 if (ixgbe_map_rings_to_vectors(ixgbe) != IXGBE_SUCCESS) { 360 ixgbe_error(ixgbe, "Failed to map rings to vectors"); 361 goto attach_fail; 362 } 363 364 /* 365 * Add interrupt handlers 366 */ 367 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 368 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 369 goto attach_fail; 370 } 371 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 372 373 /* 374 * Initialize driver parameters 375 */ 376 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 377 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 378 goto attach_fail; 379 } 380 381 /* 382 * Initialize mutexes for this device. 383 * Do this before enabling the interrupt handler and 384 * register the softint to avoid the condition where 385 * interrupt handler can try using uninitialized mutex. 386 */ 387 ixgbe_init_locks(ixgbe); 388 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 389 390 /* 391 * Initialize chipset hardware 392 */ 393 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 394 ixgbe_error(ixgbe, "Failed to initialize adapter"); 395 goto attach_fail; 396 } 397 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 398 399 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 400 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 401 goto attach_fail; 402 } 403 404 /* 405 * Initialize DMA and hardware settings for rx/tx rings 406 */ 407 if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) { 408 ixgbe_error(ixgbe, "Failed to initialize rings"); 409 goto attach_fail; 410 } 411 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS; 412 413 /* 414 * Initialize statistics 415 */ 416 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 417 ixgbe_error(ixgbe, "Failed to initialize statistics"); 418 goto attach_fail; 419 } 420 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 421 422 /* 423 * Initialize NDD parameters 424 */ 425 if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) { 426 ixgbe_error(ixgbe, "Failed to initialize ndd"); 427 goto attach_fail; 428 } 429 ixgbe->attach_progress |= ATTACH_PROGRESS_NDD; 430 431 /* 432 * Register the driver to the MAC 433 */ 434 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 435 ixgbe_error(ixgbe, "Failed to register MAC"); 436 goto attach_fail; 437 } 438 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 439 440 /* 441 * Now that mutex locks are initialized, and the chip is also 442 * initialized, enable interrupts. 443 */ 444 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 445 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 446 goto attach_fail; 447 } 448 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 449 450 ixgbe->ixgbe_state |= IXGBE_INITIALIZED; 451 452 return (DDI_SUCCESS); 453 454 attach_fail: 455 ixgbe_unconfigure(devinfo, ixgbe); 456 return (DDI_FAILURE); 457 } 458 459 /* 460 * ixgbe_detach - Driver detach. 461 * 462 * The detach() function is the complement of the attach routine. 463 * If cmd is set to DDI_DETACH, detach() is used to remove the 464 * state associated with a given instance of a device node 465 * prior to the removal of that instance from the system. 466 * 467 * The detach() function will be called once for each instance 468 * of the device for which there has been a successful attach() 469 * once there are no longer any opens on the device. 470 * 471 * Interrupts routine are disabled, All memory allocated by this 472 * driver are freed. 473 */ 474 static int 475 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 476 { 477 ixgbe_t *ixgbe; 478 479 /* 480 * Check detach command 481 */ 482 switch (cmd) { 483 default: 484 return (DDI_FAILURE); 485 486 case DDI_SUSPEND: 487 return (ixgbe_suspend(devinfo)); 488 489 case DDI_DETACH: 490 break; 491 } 492 493 494 /* 495 * Get the pointer to the driver private data structure 496 */ 497 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 498 if (ixgbe == NULL) 499 return (DDI_FAILURE); 500 501 /* 502 * Unregister MAC. If failed, we have to fail the detach 503 */ 504 if (mac_unregister(ixgbe->mac_hdl) != 0) { 505 ixgbe_error(ixgbe, "Failed to unregister MAC"); 506 return (DDI_FAILURE); 507 } 508 ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC; 509 510 /* 511 * If the device is still running, it needs to be stopped first. 512 * This check is necessary because under some specific circumstances, 513 * the detach routine can be called without stopping the interface 514 * first. 515 */ 516 mutex_enter(&ixgbe->gen_lock); 517 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 518 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 519 ixgbe_stop(ixgbe); 520 mutex_exit(&ixgbe->gen_lock); 521 /* Disable and stop the watchdog timer */ 522 ixgbe_disable_watchdog_timer(ixgbe); 523 } else 524 mutex_exit(&ixgbe->gen_lock); 525 526 /* 527 * Check if there are still rx buffers held by the upper layer. 528 * If so, fail the detach. 529 */ 530 if (!ixgbe_rx_drain(ixgbe)) 531 return (DDI_FAILURE); 532 533 /* 534 * Do the remaining unconfigure routines 535 */ 536 ixgbe_unconfigure(devinfo, ixgbe); 537 538 return (DDI_SUCCESS); 539 } 540 541 static void 542 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 543 { 544 /* 545 * Disable interrupt 546 */ 547 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 548 (void) ixgbe_disable_intrs(ixgbe); 549 } 550 551 /* 552 * Unregister MAC 553 */ 554 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 555 (void) mac_unregister(ixgbe->mac_hdl); 556 } 557 558 /* 559 * Free ndd parameters 560 */ 561 if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) { 562 ixgbe_nd_cleanup(ixgbe); 563 } 564 565 /* 566 * Free statistics 567 */ 568 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 569 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 570 } 571 572 /* 573 * Remove interrupt handlers 574 */ 575 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 576 ixgbe_rem_intr_handlers(ixgbe); 577 } 578 579 /* 580 * Remove interrupts 581 */ 582 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 583 ixgbe_rem_intrs(ixgbe); 584 } 585 586 /* 587 * Remove driver properties 588 */ 589 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 590 (void) ddi_prop_remove_all(devinfo); 591 } 592 593 /* 594 * Release the DMA resources of rx/tx rings 595 */ 596 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) { 597 ixgbe_fini_rings(ixgbe); 598 } 599 600 /* 601 * Stop the chipset 602 */ 603 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 604 mutex_enter(&ixgbe->gen_lock); 605 ixgbe_chip_stop(ixgbe); 606 mutex_exit(&ixgbe->gen_lock); 607 } 608 609 /* 610 * Free register handle 611 */ 612 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 613 if (ixgbe->osdep.reg_handle != NULL) 614 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 615 } 616 617 /* 618 * Free PCI config handle 619 */ 620 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 621 if (ixgbe->osdep.cfg_handle != NULL) 622 pci_config_teardown(&ixgbe->osdep.cfg_handle); 623 } 624 625 /* 626 * Free locks 627 */ 628 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 629 ixgbe_destroy_locks(ixgbe); 630 } 631 632 /* 633 * Free the rx/tx rings 634 */ 635 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 636 ixgbe_free_rings(ixgbe); 637 } 638 639 /* 640 * Unregister FMA capabilities 641 */ 642 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 643 ixgbe_fm_fini(ixgbe); 644 } 645 646 /* 647 * Free the driver data structure 648 */ 649 kmem_free(ixgbe, sizeof (ixgbe_t)); 650 651 ddi_set_driver_private(devinfo, NULL); 652 } 653 654 /* 655 * ixgbe_register_mac - Register the driver and its function pointers with 656 * the GLD interface. 657 */ 658 static int 659 ixgbe_register_mac(ixgbe_t *ixgbe) 660 { 661 struct ixgbe_hw *hw = &ixgbe->hw; 662 mac_register_t *mac; 663 int status; 664 665 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 666 return (IXGBE_FAILURE); 667 668 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 669 mac->m_driver = ixgbe; 670 mac->m_dip = ixgbe->dip; 671 mac->m_src_addr = hw->mac.addr; 672 mac->m_callbacks = &ixgbe_m_callbacks; 673 mac->m_min_sdu = 0; 674 mac->m_max_sdu = ixgbe->default_mtu; 675 mac->m_margin = VLAN_TAGSZ; 676 677 status = mac_register(mac, &ixgbe->mac_hdl); 678 679 mac_free(mac); 680 681 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 682 } 683 684 /* 685 * ixgbe_identify_hardware - Identify the type of the chipset. 686 */ 687 static int 688 ixgbe_identify_hardware(ixgbe_t *ixgbe) 689 { 690 struct ixgbe_hw *hw = &ixgbe->hw; 691 struct ixgbe_osdep *osdep = &ixgbe->osdep; 692 693 /* 694 * Get the device id 695 */ 696 hw->vendor_id = 697 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 698 hw->device_id = 699 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 700 hw->revision_id = 701 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 702 hw->subsystem_device_id = 703 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 704 hw->subsystem_vendor_id = 705 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 706 707 return (IXGBE_SUCCESS); 708 } 709 710 /* 711 * ixgbe_regs_map - Map the device registers. 712 * 713 */ 714 static int 715 ixgbe_regs_map(ixgbe_t *ixgbe) 716 { 717 dev_info_t *devinfo = ixgbe->dip; 718 struct ixgbe_hw *hw = &ixgbe->hw; 719 struct ixgbe_osdep *osdep = &ixgbe->osdep; 720 off_t mem_size; 721 722 /* 723 * First get the size of device registers to be mapped. 724 */ 725 if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) { 726 return (IXGBE_FAILURE); 727 } 728 729 /* 730 * Call ddi_regs_map_setup() to map registers 731 */ 732 if ((ddi_regs_map_setup(devinfo, 1, 733 (caddr_t *)&hw->hw_addr, 0, 734 mem_size, &ixgbe_regs_acc_attr, 735 &osdep->reg_handle)) != DDI_SUCCESS) { 736 return (IXGBE_FAILURE); 737 } 738 739 return (IXGBE_SUCCESS); 740 } 741 742 /* 743 * ixgbe_init_properties - Initialize driver properties. 744 */ 745 static void 746 ixgbe_init_properties(ixgbe_t *ixgbe) 747 { 748 /* 749 * Get conf file properties, including link settings 750 * jumbo frames, ring number, descriptor number, etc. 751 */ 752 ixgbe_get_conf(ixgbe); 753 } 754 755 /* 756 * ixgbe_init_driver_settings - Initialize driver settings. 757 * 758 * The settings include hardware function pointers, bus information, 759 * rx/tx rings settings, link state, and any other parameters that 760 * need to be setup during driver initialization. 761 */ 762 static int 763 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 764 { 765 struct ixgbe_hw *hw = &ixgbe->hw; 766 ixgbe_rx_ring_t *rx_ring; 767 ixgbe_tx_ring_t *tx_ring; 768 uint32_t rx_size; 769 uint32_t tx_size; 770 int i; 771 772 /* 773 * Initialize chipset specific hardware function pointers 774 */ 775 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 776 return (IXGBE_FAILURE); 777 } 778 779 /* 780 * Set rx buffer size 781 * 782 * The IP header alignment room is counted in the calculation. 783 * The rx buffer size is in unit of 1K that is required by the 784 * chipset hardware. 785 */ 786 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 787 ixgbe->rx_buf_size = ((rx_size >> 10) + 788 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 789 790 /* 791 * Set tx buffer size 792 */ 793 tx_size = ixgbe->max_frame_size; 794 ixgbe->tx_buf_size = ((tx_size >> 10) + 795 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 796 797 /* 798 * Initialize rx/tx rings parameters 799 */ 800 for (i = 0; i < ixgbe->num_rx_rings; i++) { 801 rx_ring = &ixgbe->rx_rings[i]; 802 rx_ring->index = i; 803 rx_ring->ixgbe = ixgbe; 804 805 rx_ring->ring_size = ixgbe->rx_ring_size; 806 rx_ring->free_list_size = ixgbe->rx_ring_size; 807 rx_ring->copy_thresh = ixgbe->rx_copy_thresh; 808 rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr; 809 } 810 811 for (i = 0; i < ixgbe->num_tx_rings; i++) { 812 tx_ring = &ixgbe->tx_rings[i]; 813 tx_ring->index = i; 814 tx_ring->ixgbe = ixgbe; 815 if (ixgbe->tx_head_wb_enable) 816 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 817 else 818 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 819 820 tx_ring->ring_size = ixgbe->tx_ring_size; 821 tx_ring->free_list_size = ixgbe->tx_ring_size + 822 (ixgbe->tx_ring_size >> 1); 823 tx_ring->copy_thresh = ixgbe->tx_copy_thresh; 824 tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh; 825 tx_ring->overload_thresh = ixgbe->tx_overload_thresh; 826 tx_ring->resched_thresh = ixgbe->tx_resched_thresh; 827 } 828 829 /* 830 * Initialize values of interrupt throttling rate 831 */ 832 for (i = 1; i < IXGBE_MAX_RING_VECTOR; i++) 833 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 834 835 /* 836 * The initial link state should be "unknown" 837 */ 838 ixgbe->link_state = LINK_STATE_UNKNOWN; 839 return (IXGBE_SUCCESS); 840 } 841 842 /* 843 * ixgbe_init_locks - Initialize locks. 844 */ 845 static void 846 ixgbe_init_locks(ixgbe_t *ixgbe) 847 { 848 ixgbe_rx_ring_t *rx_ring; 849 ixgbe_tx_ring_t *tx_ring; 850 int i; 851 852 for (i = 0; i < ixgbe->num_rx_rings; i++) { 853 rx_ring = &ixgbe->rx_rings[i]; 854 mutex_init(&rx_ring->rx_lock, NULL, 855 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 856 mutex_init(&rx_ring->recycle_lock, NULL, 857 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 858 } 859 860 for (i = 0; i < ixgbe->num_tx_rings; i++) { 861 tx_ring = &ixgbe->tx_rings[i]; 862 mutex_init(&tx_ring->tx_lock, NULL, 863 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 864 mutex_init(&tx_ring->recycle_lock, NULL, 865 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 866 mutex_init(&tx_ring->tcb_head_lock, NULL, 867 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 868 mutex_init(&tx_ring->tcb_tail_lock, NULL, 869 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 870 } 871 872 mutex_init(&ixgbe->gen_lock, NULL, 873 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 874 875 mutex_init(&ixgbe->watchdog_lock, NULL, 876 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 877 } 878 879 /* 880 * ixgbe_destroy_locks - Destroy locks. 881 */ 882 static void 883 ixgbe_destroy_locks(ixgbe_t *ixgbe) 884 { 885 ixgbe_rx_ring_t *rx_ring; 886 ixgbe_tx_ring_t *tx_ring; 887 int i; 888 889 for (i = 0; i < ixgbe->num_rx_rings; i++) { 890 rx_ring = &ixgbe->rx_rings[i]; 891 mutex_destroy(&rx_ring->rx_lock); 892 mutex_destroy(&rx_ring->recycle_lock); 893 } 894 895 for (i = 0; i < ixgbe->num_tx_rings; i++) { 896 tx_ring = &ixgbe->tx_rings[i]; 897 mutex_destroy(&tx_ring->tx_lock); 898 mutex_destroy(&tx_ring->recycle_lock); 899 mutex_destroy(&tx_ring->tcb_head_lock); 900 mutex_destroy(&tx_ring->tcb_tail_lock); 901 } 902 903 mutex_destroy(&ixgbe->gen_lock); 904 mutex_destroy(&ixgbe->watchdog_lock); 905 } 906 907 static int 908 ixgbe_resume(dev_info_t *devinfo) 909 { 910 ixgbe_t *ixgbe; 911 912 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 913 if (ixgbe == NULL) 914 return (DDI_FAILURE); 915 916 mutex_enter(&ixgbe->gen_lock); 917 918 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 919 if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) { 920 mutex_exit(&ixgbe->gen_lock); 921 return (DDI_FAILURE); 922 } 923 924 /* 925 * Enable and start the watchdog timer 926 */ 927 ixgbe_enable_watchdog_timer(ixgbe); 928 } 929 930 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 931 932 mutex_exit(&ixgbe->gen_lock); 933 934 return (DDI_SUCCESS); 935 } 936 937 static int 938 ixgbe_suspend(dev_info_t *devinfo) 939 { 940 ixgbe_t *ixgbe; 941 942 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 943 if (ixgbe == NULL) 944 return (DDI_FAILURE); 945 946 mutex_enter(&ixgbe->gen_lock); 947 948 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 949 950 ixgbe_stop(ixgbe); 951 952 mutex_exit(&ixgbe->gen_lock); 953 954 /* 955 * Disable and stop the watchdog timer 956 */ 957 ixgbe_disable_watchdog_timer(ixgbe); 958 959 return (DDI_SUCCESS); 960 } 961 962 /* 963 * ixgbe_init - Initialize the device. 964 */ 965 static int 966 ixgbe_init(ixgbe_t *ixgbe) 967 { 968 struct ixgbe_hw *hw = &ixgbe->hw; 969 970 mutex_enter(&ixgbe->gen_lock); 971 972 /* 973 * Reset chipset to put the hardware in a known state 974 * before we try to do anything with the eeprom. 975 */ 976 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) { 977 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 978 goto init_fail; 979 } 980 981 /* 982 * Need to init eeprom before validating the checksum. 983 */ 984 if (ixgbe_init_eeprom_params(hw) < 0) { 985 ixgbe_error(ixgbe, 986 "Unable to intitialize the eeprom interface."); 987 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 988 goto init_fail; 989 } 990 991 /* 992 * NVM validation 993 */ 994 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 995 /* 996 * Some PCI-E parts fail the first check due to 997 * the link being in sleep state. Call it again, 998 * if it fails a second time it's a real issue. 999 */ 1000 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1001 ixgbe_error(ixgbe, 1002 "Invalid NVM checksum. Please contact " 1003 "the vendor to update the NVM."); 1004 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1005 goto init_fail; 1006 } 1007 } 1008 1009 /* 1010 * Setup default flow control thresholds - enable/disable 1011 * & flow control type is controlled by ixgbe.conf 1012 */ 1013 hw->fc.high_water = DEFAULT_FCRTH; 1014 hw->fc.low_water = DEFAULT_FCRTL; 1015 hw->fc.pause_time = DEFAULT_FCPAUSE; 1016 hw->fc.send_xon = B_TRUE; 1017 1018 /* 1019 * Don't wait for auto-negotiation to complete 1020 */ 1021 hw->phy.autoneg_wait_to_complete = B_FALSE; 1022 1023 /* 1024 * Initialize link settings 1025 */ 1026 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1027 1028 /* 1029 * Initialize the chipset hardware 1030 */ 1031 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1032 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1033 goto init_fail; 1034 } 1035 1036 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 1037 goto init_fail; 1038 } 1039 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1040 goto init_fail; 1041 } 1042 1043 mutex_exit(&ixgbe->gen_lock); 1044 return (IXGBE_SUCCESS); 1045 1046 init_fail: 1047 /* 1048 * Reset PHY 1049 */ 1050 (void) ixgbe_reset_phy(hw); 1051 1052 mutex_exit(&ixgbe->gen_lock); 1053 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1054 return (IXGBE_FAILURE); 1055 } 1056 1057 /* 1058 * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and 1059 * initialize relevant hardware settings. 1060 */ 1061 static int 1062 ixgbe_init_rings(ixgbe_t *ixgbe) 1063 { 1064 int i; 1065 1066 /* 1067 * Allocate buffers for all the rx/tx rings 1068 */ 1069 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) 1070 return (IXGBE_FAILURE); 1071 1072 /* 1073 * Setup the rx/tx rings 1074 */ 1075 mutex_enter(&ixgbe->gen_lock); 1076 1077 for (i = 0; i < ixgbe->num_rx_rings; i++) 1078 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1079 for (i = 0; i < ixgbe->num_tx_rings; i++) 1080 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1081 1082 ixgbe_setup_rings(ixgbe); 1083 1084 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1085 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1086 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1087 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1088 1089 mutex_exit(&ixgbe->gen_lock); 1090 1091 return (IXGBE_SUCCESS); 1092 } 1093 1094 /* 1095 * ixgbe_fini_rings - Release DMA resources of all rx/tx rings. 1096 */ 1097 static void 1098 ixgbe_fini_rings(ixgbe_t *ixgbe) 1099 { 1100 /* 1101 * Release the DMA/memory resources of rx/tx rings 1102 */ 1103 ixgbe_free_dma(ixgbe); 1104 } 1105 1106 /* 1107 * ixgbe_chip_start - Initialize and start the chipset hardware. 1108 */ 1109 static int 1110 ixgbe_chip_start(ixgbe_t *ixgbe) 1111 { 1112 struct ixgbe_hw *hw = &ixgbe->hw; 1113 int i; 1114 1115 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1116 1117 /* 1118 * Get the mac address 1119 * This function should handle SPARC case correctly. 1120 */ 1121 if (!ixgbe_find_mac_address(ixgbe)) { 1122 ixgbe_error(ixgbe, "Failed to get the mac address"); 1123 return (IXGBE_FAILURE); 1124 } 1125 1126 /* 1127 * Validate the mac address 1128 */ 1129 (void) ixgbe_init_rx_addrs(hw); 1130 if (!is_valid_mac_addr(hw->mac.addr)) { 1131 ixgbe_error(ixgbe, "Invalid mac address"); 1132 return (IXGBE_FAILURE); 1133 } 1134 1135 /* 1136 * Configure/Initialize hardware 1137 */ 1138 if (ixgbe_init_hw(hw) != IXGBE_SUCCESS) { 1139 ixgbe_error(ixgbe, "Failed to initialize hardware"); 1140 return (IXGBE_FAILURE); 1141 } 1142 1143 /* 1144 * Setup adapter interrupt vectors 1145 */ 1146 ixgbe_setup_adapter_vector(ixgbe); 1147 1148 /* 1149 * Initialize unicast addresses. 1150 */ 1151 ixgbe_init_unicst(ixgbe); 1152 1153 /* 1154 * Setup and initialize the mctable structures. 1155 */ 1156 ixgbe_setup_multicst(ixgbe); 1157 1158 /* 1159 * Set interrupt throttling rate 1160 */ 1161 for (i = 0; i < ixgbe->intr_cnt; i++) 1162 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1163 1164 /* 1165 * Save the state of the phy 1166 */ 1167 ixgbe_get_hw_state(ixgbe); 1168 1169 /* 1170 * Make sure driver has control 1171 */ 1172 ixgbe_get_driver_control(hw); 1173 1174 return (IXGBE_SUCCESS); 1175 } 1176 1177 /* 1178 * ixgbe_chip_stop - Stop the chipset hardware 1179 */ 1180 static void 1181 ixgbe_chip_stop(ixgbe_t *ixgbe) 1182 { 1183 struct ixgbe_hw *hw = &ixgbe->hw; 1184 1185 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1186 1187 /* 1188 * Tell firmware driver is no longer in control 1189 */ 1190 ixgbe_release_driver_control(hw); 1191 1192 /* 1193 * Reset the chipset 1194 */ 1195 (void) ixgbe_reset_hw(hw); 1196 1197 /* 1198 * Reset PHY 1199 */ 1200 (void) ixgbe_reset_phy(hw); 1201 } 1202 1203 /* 1204 * ixgbe_reset - Reset the chipset and re-start the driver. 1205 * 1206 * It involves stopping and re-starting the chipset, 1207 * and re-configuring the rx/tx rings. 1208 */ 1209 static int 1210 ixgbe_reset(ixgbe_t *ixgbe) 1211 { 1212 int i; 1213 1214 mutex_enter(&ixgbe->gen_lock); 1215 1216 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1217 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 1218 1219 /* 1220 * Disable the adapter interrupts to stop any rx/tx activities 1221 * before draining pending data and resetting hardware. 1222 */ 1223 ixgbe_disable_adapter_interrupts(ixgbe); 1224 1225 /* 1226 * Drain the pending transmit packets 1227 */ 1228 (void) ixgbe_tx_drain(ixgbe); 1229 1230 for (i = 0; i < ixgbe->num_rx_rings; i++) 1231 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1232 for (i = 0; i < ixgbe->num_tx_rings; i++) 1233 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1234 1235 /* 1236 * Stop the chipset hardware 1237 */ 1238 ixgbe_chip_stop(ixgbe); 1239 1240 /* 1241 * Clean the pending tx data/resources 1242 */ 1243 ixgbe_tx_clean(ixgbe); 1244 1245 /* 1246 * Start the chipset hardware 1247 */ 1248 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1249 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1250 goto reset_failure; 1251 } 1252 1253 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1254 goto reset_failure; 1255 } 1256 1257 /* 1258 * Setup the rx/tx rings 1259 */ 1260 ixgbe_setup_rings(ixgbe); 1261 1262 /* 1263 * Enable adapter interrupts 1264 * The interrupts must be enabled after the driver state is START 1265 */ 1266 ixgbe_enable_adapter_interrupts(ixgbe); 1267 1268 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1269 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1270 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1271 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1272 1273 ixgbe->ixgbe_state |= IXGBE_STARTED; 1274 mutex_exit(&ixgbe->gen_lock); 1275 1276 return (IXGBE_SUCCESS); 1277 1278 reset_failure: 1279 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1280 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1281 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1282 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1283 1284 mutex_exit(&ixgbe->gen_lock); 1285 1286 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1287 1288 return (IXGBE_FAILURE); 1289 } 1290 1291 /* 1292 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1293 */ 1294 static void 1295 ixgbe_tx_clean(ixgbe_t *ixgbe) 1296 { 1297 ixgbe_tx_ring_t *tx_ring; 1298 tx_control_block_t *tcb; 1299 link_list_t pending_list; 1300 uint32_t desc_num; 1301 struct ixgbe_hw *hw = &ixgbe->hw; 1302 int i, j; 1303 1304 LINK_LIST_INIT(&pending_list); 1305 1306 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1307 tx_ring = &ixgbe->tx_rings[i]; 1308 1309 mutex_enter(&tx_ring->recycle_lock); 1310 1311 /* 1312 * Clean the pending tx data - the pending packets in the 1313 * work_list that have no chances to be transmitted again. 1314 * 1315 * We must ensure the chipset is stopped or the link is down 1316 * before cleaning the transmit packets. 1317 */ 1318 desc_num = 0; 1319 for (j = 0; j < tx_ring->ring_size; j++) { 1320 tcb = tx_ring->work_list[j]; 1321 if (tcb != NULL) { 1322 desc_num += tcb->desc_num; 1323 1324 tx_ring->work_list[j] = NULL; 1325 1326 ixgbe_free_tcb(tcb); 1327 1328 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1329 } 1330 } 1331 1332 if (desc_num > 0) { 1333 atomic_add_32(&tx_ring->tbd_free, desc_num); 1334 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1335 1336 /* 1337 * Reset the head and tail pointers of the tbd ring; 1338 * Reset the writeback head if it's enable. 1339 */ 1340 tx_ring->tbd_head = 0; 1341 tx_ring->tbd_tail = 0; 1342 if (ixgbe->tx_head_wb_enable) 1343 *tx_ring->tbd_head_wb = 0; 1344 1345 IXGBE_WRITE_REG(&ixgbe->hw, 1346 IXGBE_TDH(tx_ring->index), 0); 1347 IXGBE_WRITE_REG(&ixgbe->hw, 1348 IXGBE_TDT(tx_ring->index), 0); 1349 } 1350 1351 mutex_exit(&tx_ring->recycle_lock); 1352 1353 /* 1354 * Add the tx control blocks in the pending list to 1355 * the free list. 1356 */ 1357 ixgbe_put_free_list(tx_ring, &pending_list); 1358 } 1359 } 1360 1361 /* 1362 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1363 * transmitted. 1364 */ 1365 static boolean_t 1366 ixgbe_tx_drain(ixgbe_t *ixgbe) 1367 { 1368 ixgbe_tx_ring_t *tx_ring; 1369 boolean_t done; 1370 int i, j; 1371 1372 /* 1373 * Wait for a specific time to allow pending tx packets 1374 * to be transmitted. 1375 * 1376 * Check the counter tbd_free to see if transmission is done. 1377 * No lock protection is needed here. 1378 * 1379 * Return B_TRUE if all pending packets have been transmitted; 1380 * Otherwise return B_FALSE; 1381 */ 1382 for (i = 0; i < TX_DRAIN_TIME; i++) { 1383 1384 done = B_TRUE; 1385 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1386 tx_ring = &ixgbe->tx_rings[j]; 1387 done = done && 1388 (tx_ring->tbd_free == tx_ring->ring_size); 1389 } 1390 1391 if (done) 1392 break; 1393 1394 msec_delay(1); 1395 } 1396 1397 return (done); 1398 } 1399 1400 /* 1401 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1402 */ 1403 static boolean_t 1404 ixgbe_rx_drain(ixgbe_t *ixgbe) 1405 { 1406 ixgbe_rx_ring_t *rx_ring; 1407 boolean_t done; 1408 int i, j; 1409 1410 /* 1411 * Polling the rx free list to check if those rx buffers held by 1412 * the upper layer are released. 1413 * 1414 * Check the counter rcb_free to see if all pending buffers are 1415 * released. No lock protection is needed here. 1416 * 1417 * Return B_TRUE if all pending buffers have been released; 1418 * Otherwise return B_FALSE; 1419 */ 1420 for (i = 0; i < RX_DRAIN_TIME; i++) { 1421 1422 done = B_TRUE; 1423 for (j = 0; j < ixgbe->num_rx_rings; j++) { 1424 rx_ring = &ixgbe->rx_rings[j]; 1425 done = done && 1426 (rx_ring->rcb_free == rx_ring->free_list_size); 1427 } 1428 1429 if (done) 1430 break; 1431 1432 msec_delay(1); 1433 } 1434 1435 return (done); 1436 } 1437 1438 /* 1439 * ixgbe_start - Start the driver/chipset. 1440 */ 1441 int 1442 ixgbe_start(ixgbe_t *ixgbe) 1443 { 1444 int i; 1445 1446 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1447 1448 for (i = 0; i < ixgbe->num_rx_rings; i++) 1449 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1450 for (i = 0; i < ixgbe->num_tx_rings; i++) 1451 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1452 1453 /* 1454 * Start the chipset hardware 1455 */ 1456 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1457 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1458 goto start_failure; 1459 } 1460 1461 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1462 goto start_failure; 1463 } 1464 1465 /* 1466 * Setup the rx/tx rings 1467 */ 1468 ixgbe_setup_rings(ixgbe); 1469 1470 /* 1471 * Enable adapter interrupts 1472 * The interrupts must be enabled after the driver state is START 1473 */ 1474 ixgbe_enable_adapter_interrupts(ixgbe); 1475 1476 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1477 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1478 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1479 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1480 1481 return (IXGBE_SUCCESS); 1482 1483 start_failure: 1484 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1485 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1486 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1487 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1488 1489 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1490 1491 return (IXGBE_FAILURE); 1492 } 1493 1494 /* 1495 * ixgbe_stop - Stop the driver/chipset. 1496 */ 1497 void 1498 ixgbe_stop(ixgbe_t *ixgbe) 1499 { 1500 int i; 1501 1502 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1503 1504 /* 1505 * Disable the adapter interrupts 1506 */ 1507 ixgbe_disable_adapter_interrupts(ixgbe); 1508 1509 /* 1510 * Drain the pending tx packets 1511 */ 1512 (void) ixgbe_tx_drain(ixgbe); 1513 1514 for (i = 0; i < ixgbe->num_rx_rings; i++) 1515 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1516 for (i = 0; i < ixgbe->num_tx_rings; i++) 1517 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1518 1519 /* 1520 * Stop the chipset hardware 1521 */ 1522 ixgbe_chip_stop(ixgbe); 1523 1524 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1525 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1526 } 1527 1528 /* 1529 * Clean the pending tx data/resources 1530 */ 1531 ixgbe_tx_clean(ixgbe); 1532 1533 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1534 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1535 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1536 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1537 } 1538 1539 /* 1540 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 1541 */ 1542 static int 1543 ixgbe_alloc_rings(ixgbe_t *ixgbe) 1544 { 1545 /* 1546 * Allocate memory space for rx rings 1547 */ 1548 ixgbe->rx_rings = kmem_zalloc( 1549 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 1550 KM_NOSLEEP); 1551 1552 if (ixgbe->rx_rings == NULL) { 1553 return (IXGBE_FAILURE); 1554 } 1555 1556 /* 1557 * Allocate memory space for tx rings 1558 */ 1559 ixgbe->tx_rings = kmem_zalloc( 1560 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 1561 KM_NOSLEEP); 1562 1563 if (ixgbe->tx_rings == NULL) { 1564 kmem_free(ixgbe->rx_rings, 1565 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1566 ixgbe->rx_rings = NULL; 1567 return (IXGBE_FAILURE); 1568 } 1569 1570 return (IXGBE_SUCCESS); 1571 } 1572 1573 /* 1574 * ixgbe_free_rings - Free the memory space of rx/tx rings. 1575 */ 1576 static void 1577 ixgbe_free_rings(ixgbe_t *ixgbe) 1578 { 1579 if (ixgbe->rx_rings != NULL) { 1580 kmem_free(ixgbe->rx_rings, 1581 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1582 ixgbe->rx_rings = NULL; 1583 } 1584 1585 if (ixgbe->tx_rings != NULL) { 1586 kmem_free(ixgbe->tx_rings, 1587 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1588 ixgbe->tx_rings = NULL; 1589 } 1590 } 1591 1592 /* 1593 * ixgbe_setup_rings - Setup rx/tx rings. 1594 */ 1595 static void 1596 ixgbe_setup_rings(ixgbe_t *ixgbe) 1597 { 1598 /* 1599 * Setup the rx/tx rings, including the following: 1600 * 1601 * 1. Setup the descriptor ring and the control block buffers; 1602 * 2. Initialize necessary registers for receive/transmit; 1603 * 3. Initialize software pointers/parameters for receive/transmit; 1604 */ 1605 ixgbe_setup_rx(ixgbe); 1606 1607 ixgbe_setup_tx(ixgbe); 1608 } 1609 1610 static void 1611 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 1612 { 1613 ixgbe_t *ixgbe = rx_ring->ixgbe; 1614 struct ixgbe_hw *hw = &ixgbe->hw; 1615 rx_control_block_t *rcb; 1616 union ixgbe_adv_rx_desc *rbd; 1617 uint32_t size; 1618 uint32_t buf_low; 1619 uint32_t buf_high; 1620 uint32_t reg_val; 1621 int i; 1622 1623 ASSERT(mutex_owned(&rx_ring->rx_lock)); 1624 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1625 1626 for (i = 0; i < ixgbe->rx_ring_size; i++) { 1627 rcb = rx_ring->work_list[i]; 1628 rbd = &rx_ring->rbd_ring[i]; 1629 1630 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 1631 rbd->read.hdr_addr = NULL; 1632 } 1633 1634 /* 1635 * Initialize the length register 1636 */ 1637 size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc); 1638 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size); 1639 1640 /* 1641 * Initialize the base address registers 1642 */ 1643 buf_low = (uint32_t)rx_ring->rbd_area.dma_address; 1644 buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32); 1645 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high); 1646 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low); 1647 1648 /* 1649 * Setup head & tail pointers 1650 */ 1651 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1); 1652 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0); 1653 1654 rx_ring->rbd_next = 0; 1655 1656 /* 1657 * Note: Considering the case that the chipset is being reset 1658 * and there are still some buffers held by the upper layer, 1659 * we should not reset the values of rcb_head, rcb_tail and 1660 * rcb_free if the state is not IXGBE_UNKNOWN. 1661 */ 1662 if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) { 1663 rx_ring->rcb_head = 0; 1664 rx_ring->rcb_tail = 0; 1665 rx_ring->rcb_free = rx_ring->free_list_size; 1666 } 1667 1668 /* 1669 * Setup the Receive Descriptor Control Register (RXDCTL) 1670 * PTHRESH=32 descriptors (half the internal cache) 1671 * HTHRESH=0 descriptors (to minimize latency on fetch) 1672 * WTHRESH defaults to 1 (writeback each descriptor) 1673 */ 1674 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index)); 1675 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 1676 reg_val |= 0x0020; /* pthresh */ 1677 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val); 1678 1679 /* 1680 * Setup the Split and Replication Receive Control Register. 1681 * Set the rx buffer size and the advanced descriptor type. 1682 */ 1683 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 1684 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1685 1686 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val); 1687 } 1688 1689 static void 1690 ixgbe_setup_rx(ixgbe_t *ixgbe) 1691 { 1692 ixgbe_rx_ring_t *rx_ring; 1693 struct ixgbe_hw *hw = &ixgbe->hw; 1694 uint32_t reg_val; 1695 int i; 1696 1697 /* 1698 * Set filter control in FCTRL to accept broadcast packets and do 1699 * not pass pause frames to host. Flow control settings are already 1700 * in this register, so preserve them. 1701 */ 1702 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1703 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */ 1704 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */ 1705 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 1706 1707 /* 1708 * Enable the receive unit. This must be done after filter 1709 * control is set in FCTRL. 1710 */ 1711 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */ 1712 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */ 1713 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 1714 1715 /* 1716 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 1717 */ 1718 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1719 rx_ring = &ixgbe->rx_rings[i]; 1720 ixgbe_setup_rx_ring(rx_ring); 1721 } 1722 1723 /* 1724 * The Max Frame Size in MHADD will be internally increased by four 1725 * bytes if the packet has a VLAN field, so includes MTU, ethernet 1726 * header and frame check sequence. 1727 */ 1728 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header) 1729 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 1730 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 1731 1732 /* 1733 * Setup Jumbo Frame enable bit 1734 */ 1735 if (ixgbe->default_mtu > ETHERMTU) { 1736 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1737 reg_val |= IXGBE_HLREG0_JUMBOEN; 1738 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 1739 } 1740 1741 /* 1742 * Hardware checksum settings 1743 */ 1744 if (ixgbe->rx_hcksum_enable) { 1745 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 1746 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 1747 } 1748 1749 /* 1750 * Setup RSS for multiple receive queues 1751 */ 1752 if (ixgbe->num_rx_rings > 1) 1753 ixgbe_setup_rss(ixgbe); 1754 } 1755 1756 static void 1757 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 1758 { 1759 ixgbe_t *ixgbe = tx_ring->ixgbe; 1760 struct ixgbe_hw *hw = &ixgbe->hw; 1761 uint32_t size; 1762 uint32_t buf_low; 1763 uint32_t buf_high; 1764 uint32_t reg_val; 1765 1766 ASSERT(mutex_owned(&tx_ring->tx_lock)); 1767 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1768 1769 /* 1770 * Initialize the length register 1771 */ 1772 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 1773 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 1774 1775 /* 1776 * Initialize the base address registers 1777 */ 1778 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 1779 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 1780 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 1781 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 1782 1783 /* 1784 * setup TXDCTL(tx_ring->index) 1785 */ 1786 reg_val = IXGBE_TXDCTL_ENABLE; 1787 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 1788 1789 /* 1790 * Setup head & tail pointers 1791 */ 1792 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 1793 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 1794 1795 /* 1796 * Setup head write-back 1797 */ 1798 if (ixgbe->tx_head_wb_enable) { 1799 /* 1800 * The memory of the head write-back is allocated using 1801 * the extra tbd beyond the tail of the tbd ring. 1802 */ 1803 tx_ring->tbd_head_wb = (uint32_t *) 1804 ((uintptr_t)tx_ring->tbd_area.address + size); 1805 *tx_ring->tbd_head_wb = 0; 1806 1807 buf_low = (uint32_t) 1808 (tx_ring->tbd_area.dma_address + size); 1809 buf_high = (uint32_t) 1810 ((tx_ring->tbd_area.dma_address + size) >> 32); 1811 1812 /* Set the head write-back enable bit */ 1813 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 1814 1815 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 1816 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 1817 1818 /* 1819 * Turn off relaxed ordering for head write back or it will 1820 * cause problems with the tx recycling 1821 */ 1822 reg_val = IXGBE_READ_REG(hw, 1823 IXGBE_DCA_TXCTRL(tx_ring->index)); 1824 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1825 IXGBE_WRITE_REG(hw, 1826 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 1827 } else { 1828 tx_ring->tbd_head_wb = NULL; 1829 } 1830 1831 tx_ring->tbd_head = 0; 1832 tx_ring->tbd_tail = 0; 1833 tx_ring->tbd_free = tx_ring->ring_size; 1834 1835 /* 1836 * Note: Considering the case that the chipset is being reset, 1837 * and there are still some tcb in the pending list, 1838 * we should not reset the values of tcb_head, tcb_tail and 1839 * tcb_free if the state is not IXGBE_UNKNOWN. 1840 */ 1841 if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) { 1842 tx_ring->tcb_head = 0; 1843 tx_ring->tcb_tail = 0; 1844 tx_ring->tcb_free = tx_ring->free_list_size; 1845 } 1846 1847 /* 1848 * Initialize hardware checksum offload settings 1849 */ 1850 tx_ring->tx_context.hcksum_flags = 0; 1851 tx_ring->tx_context.ip_hdr_len = 0; 1852 tx_ring->tx_context.mac_hdr_len = 0; 1853 tx_ring->tx_context.l4_proto = 0; 1854 } 1855 1856 static void 1857 ixgbe_setup_tx(ixgbe_t *ixgbe) 1858 { 1859 struct ixgbe_hw *hw = &ixgbe->hw; 1860 ixgbe_tx_ring_t *tx_ring; 1861 uint32_t reg_val; 1862 int i; 1863 1864 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1865 tx_ring = &ixgbe->tx_rings[i]; 1866 ixgbe_setup_tx_ring(tx_ring); 1867 } 1868 1869 /* 1870 * Enable CRC appending and TX padding (for short tx frames) 1871 */ 1872 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1873 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 1874 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 1875 } 1876 1877 /* 1878 * ixgbe_setup_rss - Setup receive-side scaling feature. 1879 */ 1880 static void 1881 ixgbe_setup_rss(ixgbe_t *ixgbe) 1882 { 1883 struct ixgbe_hw *hw = &ixgbe->hw; 1884 uint32_t i, mrqc, rxcsum; 1885 uint32_t random; 1886 uint32_t reta; 1887 1888 /* 1889 * Fill out redirection table 1890 */ 1891 reta = 0; 1892 for (i = 0; i < 128; i++) { 1893 reta = (reta << 8) | (i % ixgbe->num_rx_rings); 1894 if ((i & 3) == 3) 1895 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 1896 } 1897 1898 /* 1899 * Fill out hash function seeds with a random constant 1900 */ 1901 for (i = 0; i < 10; i++) { 1902 (void) random_get_pseudo_bytes((uint8_t *)&random, 1903 sizeof (uint32_t)); 1904 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 1905 } 1906 1907 /* 1908 * Enable RSS & perform hash on these packet types 1909 */ 1910 mrqc = IXGBE_MRQC_RSSEN | 1911 IXGBE_MRQC_RSS_FIELD_IPV4 | 1912 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 1913 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 1914 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 1915 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 1916 IXGBE_MRQC_RSS_FIELD_IPV6 | 1917 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 1918 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 1919 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1920 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1921 1922 /* 1923 * Disable Packet Checksum to enable RSS for multiple receive queues. 1924 * It is an adapter hardware limitation that Packet Checksum is 1925 * mutually exclusive with RSS. 1926 */ 1927 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1928 rxcsum |= IXGBE_RXCSUM_PCSD; 1929 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 1930 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1931 } 1932 1933 /* 1934 * ixgbe_init_unicst - Initialize the unicast addresses. 1935 */ 1936 static void 1937 ixgbe_init_unicst(ixgbe_t *ixgbe) 1938 { 1939 struct ixgbe_hw *hw = &ixgbe->hw; 1940 int slot; 1941 /* 1942 * Here we should consider two situations: 1943 * 1944 * 1. Chipset is initialized the first time 1945 * Initialize the multiple unicast addresses, and 1946 * save the default mac address. 1947 * 1948 * 2. Chipset is reset 1949 * Recover the multiple unicast addresses from the 1950 * software data structure to the RAR registers. 1951 */ 1952 if (!ixgbe->unicst_init) { 1953 /* 1954 * Initialize the multiple unicast addresses 1955 */ 1956 ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES; 1957 1958 ixgbe->unicst_avail = ixgbe->unicst_total - 1; 1959 1960 bcopy(hw->mac.addr, ixgbe->unicst_addr[0].mac.addr, 1961 ETHERADDRL); 1962 ixgbe->unicst_addr[0].mac.set = 1; 1963 1964 for (slot = 1; slot < ixgbe->unicst_total; slot++) 1965 ixgbe->unicst_addr[slot].mac.set = 0; 1966 1967 ixgbe->unicst_init = B_TRUE; 1968 } else { 1969 /* 1970 * Recover the default mac address 1971 */ 1972 bcopy(ixgbe->unicst_addr[0].mac.addr, hw->mac.addr, 1973 ETHERADDRL); 1974 1975 /* Re-configure the RAR registers */ 1976 for (slot = 1; slot < ixgbe->unicst_total; slot++) 1977 (void) ixgbe_set_rar(hw, slot, 1978 ixgbe->unicst_addr[slot].mac.addr, NULL, NULL); 1979 } 1980 } 1981 /* 1982 * ixgbe_unicst_set - Set the unicast address to the specified slot. 1983 */ 1984 int 1985 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr, 1986 mac_addr_slot_t slot) 1987 { 1988 struct ixgbe_hw *hw = &ixgbe->hw; 1989 1990 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1991 1992 /* 1993 * Save the unicast address in the software data structure 1994 */ 1995 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 1996 1997 /* 1998 * Set the unicast address to the RAR register 1999 */ 2000 (void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, NULL); 2001 2002 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2003 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2004 return (EIO); 2005 } 2006 2007 return (0); 2008 } 2009 2010 /* 2011 * ixgbe_multicst_add - Add a multicst address. 2012 */ 2013 int 2014 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2015 { 2016 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2017 2018 if ((multiaddr[0] & 01) == 0) { 2019 return (EINVAL); 2020 } 2021 2022 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 2023 return (ENOENT); 2024 } 2025 2026 bcopy(multiaddr, 2027 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 2028 ixgbe->mcast_count++; 2029 2030 /* 2031 * Update the multicast table in the hardware 2032 */ 2033 ixgbe_setup_multicst(ixgbe); 2034 2035 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2036 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2037 return (EIO); 2038 } 2039 2040 return (0); 2041 } 2042 2043 /* 2044 * ixgbe_multicst_remove - Remove a multicst address. 2045 */ 2046 int 2047 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2048 { 2049 int i; 2050 2051 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2052 2053 for (i = 0; i < ixgbe->mcast_count; i++) { 2054 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 2055 ETHERADDRL) == 0) { 2056 for (i++; i < ixgbe->mcast_count; i++) { 2057 ixgbe->mcast_table[i - 1] = 2058 ixgbe->mcast_table[i]; 2059 } 2060 ixgbe->mcast_count--; 2061 break; 2062 } 2063 } 2064 2065 /* 2066 * Update the multicast table in the hardware 2067 */ 2068 ixgbe_setup_multicst(ixgbe); 2069 2070 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2071 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2072 return (EIO); 2073 } 2074 2075 return (0); 2076 } 2077 2078 /* 2079 * ixgbe_setup_multicast - Setup multicast data structures. 2080 * 2081 * This routine initializes all of the multicast related structures 2082 * and save them in the hardware registers. 2083 */ 2084 static void 2085 ixgbe_setup_multicst(ixgbe_t *ixgbe) 2086 { 2087 uint8_t *mc_addr_list; 2088 uint32_t mc_addr_count; 2089 struct ixgbe_hw *hw = &ixgbe->hw; 2090 2091 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2092 2093 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 2094 2095 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 2096 mc_addr_count = ixgbe->mcast_count; 2097 2098 /* 2099 * Update the multicast addresses to the MTA registers 2100 */ 2101 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2102 ixgbe_mc_table_itr); 2103 } 2104 2105 /* 2106 * ixgbe_get_conf - Get driver configurations set in driver.conf. 2107 * 2108 * This routine gets user-configured values out of the configuration 2109 * file ixgbe.conf. 2110 * 2111 * For each configurable value, there is a minimum, a maximum, and a 2112 * default. 2113 * If user does not configure a value, use the default. 2114 * If user configures below the minimum, use the minumum. 2115 * If user configures above the maximum, use the maxumum. 2116 */ 2117 static void 2118 ixgbe_get_conf(ixgbe_t *ixgbe) 2119 { 2120 struct ixgbe_hw *hw = &ixgbe->hw; 2121 uint32_t flow_control; 2122 2123 /* 2124 * ixgbe driver supports the following user configurations: 2125 * 2126 * Jumbo frame configuration: 2127 * default_mtu 2128 * 2129 * Ethernet flow control configuration: 2130 * flow_control 2131 * 2132 * Multiple rings configurations: 2133 * tx_queue_number 2134 * tx_ring_size 2135 * rx_queue_number 2136 * rx_ring_size 2137 * 2138 * Call ixgbe_get_prop() to get the value for a specific 2139 * configuration parameter. 2140 */ 2141 2142 /* 2143 * Jumbo frame configuration - max_frame_size controls host buffer 2144 * allocation, so includes MTU, ethernet header, vlan tag and 2145 * frame check sequence. 2146 */ 2147 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 2148 MIN_MTU, MAX_MTU, DEFAULT_MTU); 2149 2150 ixgbe->max_frame_size = ixgbe->default_mtu + 2151 sizeof (struct ether_vlan_header) + ETHERFCSL; 2152 2153 /* 2154 * Ethernet flow control configuration 2155 */ 2156 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 2157 ixgbe_fc_none, 3, ixgbe_fc_full); 2158 if (flow_control == 3) 2159 flow_control = ixgbe_fc_default; 2160 2161 hw->fc.type = flow_control; 2162 2163 /* 2164 * Multiple rings configurations 2165 */ 2166 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 2167 MIN_TX_QUEUE_NUM, MAX_TX_QUEUE_NUM, DEFAULT_TX_QUEUE_NUM); 2168 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 2169 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 2170 2171 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 2172 MIN_RX_QUEUE_NUM, MAX_RX_QUEUE_NUM, DEFAULT_RX_QUEUE_NUM); 2173 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 2174 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 2175 2176 /* 2177 * Tunable used to force an interrupt type. The only use is 2178 * for testing of the lesser interrupt types. 2179 * 0 = don't force interrupt type 2180 * 1 = force interrupt type MSIX 2181 * 2 = force interrupt type MSI 2182 * 3 = force interrupt type Legacy 2183 */ 2184 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 2185 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 2186 ixgbe_log(ixgbe, "interrupt force: %d\n", ixgbe->intr_force); 2187 2188 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 2189 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 2190 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 2191 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 2192 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 2193 0, 1, DEFAULT_LSO_ENABLE); 2194 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 2195 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 2196 2197 /* 2198 * ixgbe LSO needs the tx h/w checksum support. 2199 * LSO will be disabled if tx h/w checksum is not 2200 * enabled. 2201 */ 2202 if (ixgbe->tx_hcksum_enable == B_FALSE) { 2203 ixgbe->lso_enable = B_FALSE; 2204 } 2205 2206 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 2207 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 2208 DEFAULT_TX_COPY_THRESHOLD); 2209 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 2210 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 2211 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 2212 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 2213 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 2214 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 2215 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 2216 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 2217 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 2218 2219 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 2220 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 2221 DEFAULT_RX_COPY_THRESHOLD); 2222 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 2223 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 2224 DEFAULT_RX_LIMIT_PER_INTR); 2225 2226 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 2227 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 2228 DEFAULT_INTR_THROTTLING); 2229 } 2230 2231 /* 2232 * ixgbe_get_prop - Get a property value out of the configuration file 2233 * ixgbe.conf. 2234 * 2235 * Caller provides the name of the property, a default value, a minimum 2236 * value, and a maximum value. 2237 * 2238 * Return configured value of the property, with default, minimum and 2239 * maximum properly applied. 2240 */ 2241 static int 2242 ixgbe_get_prop(ixgbe_t *ixgbe, 2243 char *propname, /* name of the property */ 2244 int minval, /* minimum acceptable value */ 2245 int maxval, /* maximim acceptable value */ 2246 int defval) /* default value */ 2247 { 2248 int value; 2249 2250 /* 2251 * Call ddi_prop_get_int() to read the conf settings 2252 */ 2253 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 2254 DDI_PROP_DONTPASS, propname, defval); 2255 if (value > maxval) 2256 value = maxval; 2257 2258 if (value < minval) 2259 value = minval; 2260 2261 return (value); 2262 } 2263 2264 /* 2265 * ixgbe_driver_setup_link - Using the link properties to setup the link. 2266 */ 2267 int 2268 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 2269 { 2270 struct ixgbe_mac_info *mac; 2271 struct ixgbe_phy_info *phy; 2272 boolean_t invalid; 2273 2274 mac = &ixgbe->hw.mac; 2275 phy = &ixgbe->hw.phy; 2276 invalid = B_FALSE; 2277 2278 if (ixgbe->param_adv_autoneg_cap == 1) { 2279 mac->autoneg = B_TRUE; 2280 phy->autoneg_advertised = 0; 2281 2282 /* 2283 * No half duplex support with 10Gb parts 2284 */ 2285 if (ixgbe->param_adv_10000fdx_cap == 1) 2286 phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 2287 2288 if (ixgbe->param_adv_1000fdx_cap == 1) 2289 phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 2290 2291 if (ixgbe->param_adv_100fdx_cap == 1) 2292 phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 2293 2294 if (phy->autoneg_advertised == 0) 2295 invalid = B_TRUE; 2296 } else { 2297 ixgbe->hw.mac.autoneg = B_FALSE; 2298 } 2299 2300 if (invalid) { 2301 ixgbe_notice(ixgbe, "Invalid link settings. Setup link to " 2302 "autonegotiation with full link capabilities."); 2303 ixgbe->hw.mac.autoneg = B_TRUE; 2304 } 2305 2306 if (setup_hw) { 2307 if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS) 2308 return (IXGBE_FAILURE); 2309 } 2310 2311 return (IXGBE_SUCCESS); 2312 } 2313 2314 /* 2315 * ixgbe_driver_link_check - Link status processing. 2316 */ 2317 static boolean_t 2318 ixgbe_driver_link_check(ixgbe_t *ixgbe) 2319 { 2320 struct ixgbe_hw *hw = &ixgbe->hw; 2321 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 2322 boolean_t link_up = B_FALSE; 2323 boolean_t link_changed = B_FALSE; 2324 2325 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2326 2327 (void) ixgbe_check_link(hw, &speed, &link_up); 2328 if (link_up) { 2329 /* 2330 * The Link is up, check whether it was marked as down earlier 2331 */ 2332 if (ixgbe->link_state != LINK_STATE_UP) { 2333 switch (speed) { 2334 case IXGBE_LINK_SPEED_10GB_FULL: 2335 ixgbe->link_speed = SPEED_10GB; 2336 break; 2337 case IXGBE_LINK_SPEED_1GB_FULL: 2338 ixgbe->link_speed = SPEED_1GB; 2339 break; 2340 case IXGBE_LINK_SPEED_100_FULL: 2341 ixgbe->link_speed = SPEED_100; 2342 } 2343 ixgbe->link_duplex = LINK_DUPLEX_FULL; 2344 ixgbe->link_state = LINK_STATE_UP; 2345 ixgbe->link_down_timeout = 0; 2346 link_changed = B_TRUE; 2347 } 2348 } else { 2349 if (ixgbe->link_state != LINK_STATE_DOWN) { 2350 ixgbe->link_speed = 0; 2351 ixgbe->link_duplex = 0; 2352 ixgbe->link_state = LINK_STATE_DOWN; 2353 link_changed = B_TRUE; 2354 } 2355 2356 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 2357 if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) { 2358 ixgbe->link_down_timeout++; 2359 } else if (ixgbe->link_down_timeout == 2360 MAX_LINK_DOWN_TIMEOUT) { 2361 ixgbe_tx_clean(ixgbe); 2362 ixgbe->link_down_timeout++; 2363 } 2364 } 2365 } 2366 2367 return (link_changed); 2368 } 2369 2370 /* 2371 * ixgbe_local_timer - Driver watchdog function. 2372 * 2373 * This function will handle the transmit stall check, link status check and 2374 * other routines. 2375 */ 2376 static void 2377 ixgbe_local_timer(void *arg) 2378 { 2379 ixgbe_t *ixgbe = (ixgbe_t *)arg; 2380 2381 if (ixgbe_stall_check(ixgbe)) { 2382 ixgbe->reset_count++; 2383 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 2384 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 2385 } 2386 2387 ixgbe_restart_watchdog_timer(ixgbe); 2388 } 2389 2390 /* 2391 * ixgbe_stall_check - Check for transmit stall. 2392 * 2393 * This function checks if the adapter is stalled (in transmit). 2394 * 2395 * It is called each time the watchdog timeout is invoked. 2396 * If the transmit descriptor reclaim continuously fails, 2397 * the watchdog value will increment by 1. If the watchdog 2398 * value exceeds the threshold, the ixgbe is assumed to 2399 * have stalled and need to be reset. 2400 */ 2401 static boolean_t 2402 ixgbe_stall_check(ixgbe_t *ixgbe) 2403 { 2404 ixgbe_tx_ring_t *tx_ring; 2405 boolean_t result; 2406 int i; 2407 2408 if (ixgbe->link_state != LINK_STATE_UP) 2409 return (B_FALSE); 2410 2411 /* 2412 * If any tx ring is stalled, we'll reset the chipset 2413 */ 2414 result = B_FALSE; 2415 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2416 tx_ring = &ixgbe->tx_rings[i]; 2417 2418 if (tx_ring->recycle_fail > 0) 2419 tx_ring->stall_watchdog++; 2420 else 2421 tx_ring->stall_watchdog = 0; 2422 2423 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 2424 result = B_TRUE; 2425 break; 2426 } 2427 } 2428 2429 if (result) { 2430 tx_ring->stall_watchdog = 0; 2431 tx_ring->recycle_fail = 0; 2432 } 2433 2434 return (result); 2435 } 2436 2437 2438 /* 2439 * is_valid_mac_addr - Check if the mac address is valid. 2440 */ 2441 static boolean_t 2442 is_valid_mac_addr(uint8_t *mac_addr) 2443 { 2444 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 2445 const uint8_t addr_test2[6] = 2446 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 2447 2448 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 2449 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 2450 return (B_FALSE); 2451 2452 return (B_TRUE); 2453 } 2454 2455 static boolean_t 2456 ixgbe_find_mac_address(ixgbe_t *ixgbe) 2457 { 2458 #ifdef __sparc 2459 struct ixgbe_hw *hw = &ixgbe->hw; 2460 uchar_t *bytes; 2461 struct ether_addr sysaddr; 2462 uint_t nelts; 2463 int err; 2464 boolean_t found = B_FALSE; 2465 2466 /* 2467 * The "vendor's factory-set address" may already have 2468 * been extracted from the chip, but if the property 2469 * "local-mac-address" is set we use that instead. 2470 * 2471 * We check whether it looks like an array of 6 2472 * bytes (which it should, if OBP set it). If we can't 2473 * make sense of it this way, we'll ignore it. 2474 */ 2475 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 2476 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 2477 if (err == DDI_PROP_SUCCESS) { 2478 if (nelts == ETHERADDRL) { 2479 while (nelts--) 2480 hw->mac.addr[nelts] = bytes[nelts]; 2481 found = B_TRUE; 2482 } 2483 ddi_prop_free(bytes); 2484 } 2485 2486 /* 2487 * Look up the OBP property "local-mac-address?". If the user has set 2488 * 'local-mac-address? = false', use "the system address" instead. 2489 */ 2490 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 2491 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 2492 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 2493 if (localetheraddr(NULL, &sysaddr) != 0) { 2494 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 2495 found = B_TRUE; 2496 } 2497 } 2498 ddi_prop_free(bytes); 2499 } 2500 2501 /* 2502 * Finally(!), if there's a valid "mac-address" property (created 2503 * if we netbooted from this interface), we must use this instead 2504 * of any of the above to ensure that the NFS/install server doesn't 2505 * get confused by the address changing as Solaris takes over! 2506 */ 2507 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 2508 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 2509 if (err == DDI_PROP_SUCCESS) { 2510 if (nelts == ETHERADDRL) { 2511 while (nelts--) 2512 hw->mac.addr[nelts] = bytes[nelts]; 2513 found = B_TRUE; 2514 } 2515 ddi_prop_free(bytes); 2516 } 2517 2518 if (found) { 2519 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 2520 return (B_TRUE); 2521 } 2522 #else 2523 _NOTE(ARGUNUSED(ixgbe)); 2524 #endif 2525 2526 return (B_TRUE); 2527 } 2528 2529 #pragma inline(ixgbe_arm_watchdog_timer) 2530 static void 2531 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 2532 { 2533 /* 2534 * Fire a watchdog timer 2535 */ 2536 ixgbe->watchdog_tid = 2537 timeout(ixgbe_local_timer, 2538 (void *)ixgbe, 1 * drv_usectohz(1000000)); 2539 2540 } 2541 2542 /* 2543 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 2544 */ 2545 void 2546 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 2547 { 2548 mutex_enter(&ixgbe->watchdog_lock); 2549 2550 if (!ixgbe->watchdog_enable) { 2551 ixgbe->watchdog_enable = B_TRUE; 2552 ixgbe->watchdog_start = B_TRUE; 2553 ixgbe_arm_watchdog_timer(ixgbe); 2554 } 2555 2556 mutex_exit(&ixgbe->watchdog_lock); 2557 } 2558 2559 /* 2560 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 2561 */ 2562 void 2563 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 2564 { 2565 timeout_id_t tid; 2566 2567 mutex_enter(&ixgbe->watchdog_lock); 2568 2569 ixgbe->watchdog_enable = B_FALSE; 2570 ixgbe->watchdog_start = B_FALSE; 2571 tid = ixgbe->watchdog_tid; 2572 ixgbe->watchdog_tid = 0; 2573 2574 mutex_exit(&ixgbe->watchdog_lock); 2575 2576 if (tid != 0) 2577 (void) untimeout(tid); 2578 } 2579 2580 /* 2581 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 2582 */ 2583 static void 2584 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 2585 { 2586 mutex_enter(&ixgbe->watchdog_lock); 2587 2588 if (ixgbe->watchdog_enable) { 2589 if (!ixgbe->watchdog_start) { 2590 ixgbe->watchdog_start = B_TRUE; 2591 ixgbe_arm_watchdog_timer(ixgbe); 2592 } 2593 } 2594 2595 mutex_exit(&ixgbe->watchdog_lock); 2596 } 2597 2598 /* 2599 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 2600 */ 2601 static void 2602 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 2603 { 2604 mutex_enter(&ixgbe->watchdog_lock); 2605 2606 if (ixgbe->watchdog_start) 2607 ixgbe_arm_watchdog_timer(ixgbe); 2608 2609 mutex_exit(&ixgbe->watchdog_lock); 2610 } 2611 2612 /* 2613 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 2614 */ 2615 static void 2616 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 2617 { 2618 timeout_id_t tid; 2619 2620 mutex_enter(&ixgbe->watchdog_lock); 2621 2622 ixgbe->watchdog_start = B_FALSE; 2623 tid = ixgbe->watchdog_tid; 2624 ixgbe->watchdog_tid = 0; 2625 2626 mutex_exit(&ixgbe->watchdog_lock); 2627 2628 if (tid != 0) 2629 (void) untimeout(tid); 2630 } 2631 2632 /* 2633 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 2634 */ 2635 static void 2636 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 2637 { 2638 struct ixgbe_hw *hw = &ixgbe->hw; 2639 2640 /* 2641 * mask all interrupts off 2642 */ 2643 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 2644 2645 /* 2646 * for MSI-X, also disable autoclear 2647 */ 2648 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 2649 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 2650 } 2651 2652 IXGBE_WRITE_FLUSH(hw); 2653 } 2654 2655 /* 2656 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 2657 */ 2658 static void 2659 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 2660 { 2661 struct ixgbe_hw *hw = &ixgbe->hw; 2662 uint32_t eims, eiac, gpie; 2663 2664 gpie = 0; 2665 eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 2666 eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 2667 2668 /* 2669 * msi-x mode 2670 */ 2671 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 2672 /* enable autoclear but not on bits 29:20 */ 2673 eiac = (eims & ~0x3ff00000); 2674 2675 /* general purpose interrupt enable */ 2676 gpie |= (IXGBE_GPIE_MSIX_MODE | 2677 IXGBE_GPIE_PBA_SUPPORT |IXGBE_GPIE_OCD); 2678 /* 2679 * non-msi-x mode 2680 */ 2681 } else { 2682 2683 /* disable autoclear, leave gpie at default */ 2684 eiac = 0; 2685 } 2686 2687 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims); 2688 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 2689 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2690 IXGBE_WRITE_FLUSH(hw); 2691 } 2692 2693 /* 2694 * ixgbe_loopback_ioctl - Loopback support. 2695 */ 2696 enum ioc_reply 2697 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 2698 { 2699 lb_info_sz_t *lbsp; 2700 lb_property_t *lbpp; 2701 uint32_t *lbmp; 2702 uint32_t size; 2703 uint32_t value; 2704 2705 if (mp->b_cont == NULL) 2706 return (IOC_INVAL); 2707 2708 switch (iocp->ioc_cmd) { 2709 default: 2710 return (IOC_INVAL); 2711 2712 case LB_GET_INFO_SIZE: 2713 size = sizeof (lb_info_sz_t); 2714 if (iocp->ioc_count != size) 2715 return (IOC_INVAL); 2716 2717 value = sizeof (lb_normal); 2718 value += sizeof (lb_mac); 2719 2720 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 2721 *lbsp = value; 2722 break; 2723 2724 case LB_GET_INFO: 2725 value = sizeof (lb_normal); 2726 value += sizeof (lb_mac); 2727 2728 size = value; 2729 if (iocp->ioc_count != size) 2730 return (IOC_INVAL); 2731 2732 value = 0; 2733 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 2734 2735 lbpp[value++] = lb_normal; 2736 lbpp[value++] = lb_mac; 2737 break; 2738 2739 case LB_GET_MODE: 2740 size = sizeof (uint32_t); 2741 if (iocp->ioc_count != size) 2742 return (IOC_INVAL); 2743 2744 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 2745 *lbmp = ixgbe->loopback_mode; 2746 break; 2747 2748 case LB_SET_MODE: 2749 size = 0; 2750 if (iocp->ioc_count != sizeof (uint32_t)) 2751 return (IOC_INVAL); 2752 2753 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 2754 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 2755 return (IOC_INVAL); 2756 break; 2757 } 2758 2759 iocp->ioc_count = size; 2760 iocp->ioc_error = 0; 2761 2762 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2763 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2764 return (IOC_INVAL); 2765 } 2766 2767 return (IOC_REPLY); 2768 } 2769 2770 /* 2771 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 2772 */ 2773 static boolean_t 2774 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 2775 { 2776 struct ixgbe_hw *hw; 2777 2778 if (mode == ixgbe->loopback_mode) 2779 return (B_TRUE); 2780 2781 hw = &ixgbe->hw; 2782 2783 ixgbe->loopback_mode = mode; 2784 2785 if (mode == IXGBE_LB_NONE) { 2786 /* 2787 * Reset the chip 2788 */ 2789 hw->phy.autoneg_wait_to_complete = B_TRUE; 2790 (void) ixgbe_reset(ixgbe); 2791 hw->phy.autoneg_wait_to_complete = B_FALSE; 2792 return (B_TRUE); 2793 } 2794 2795 mutex_enter(&ixgbe->gen_lock); 2796 2797 switch (mode) { 2798 default: 2799 mutex_exit(&ixgbe->gen_lock); 2800 return (B_FALSE); 2801 2802 case IXGBE_LB_INTERNAL_MAC: 2803 ixgbe_set_internal_mac_loopback(ixgbe); 2804 break; 2805 } 2806 2807 mutex_exit(&ixgbe->gen_lock); 2808 2809 return (B_TRUE); 2810 } 2811 2812 /* 2813 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 2814 */ 2815 static void 2816 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 2817 { 2818 struct ixgbe_hw *hw; 2819 uint32_t reg; 2820 uint8_t atlas; 2821 2822 hw = &ixgbe->hw; 2823 2824 /* 2825 * Setup MAC loopback 2826 */ 2827 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 2828 reg |= IXGBE_HLREG0_LPBK; 2829 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 2830 2831 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 2832 reg &= ~IXGBE_AUTOC_LMS_MASK; 2833 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 2834 2835 /* 2836 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 2837 */ 2838 if (hw->mac.type == ixgbe_mac_82598EB) { 2839 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 2840 &atlas); 2841 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 2842 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 2843 atlas); 2844 2845 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 2846 &atlas); 2847 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 2848 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 2849 atlas); 2850 2851 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 2852 &atlas); 2853 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 2854 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 2855 atlas); 2856 2857 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 2858 &atlas); 2859 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 2860 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 2861 atlas); 2862 } 2863 } 2864 2865 #pragma inline(ixgbe_intr_rx_work) 2866 /* 2867 * ixgbe_intr_rx_work - RX processing of ISR. 2868 */ 2869 static void 2870 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 2871 { 2872 mblk_t *mp; 2873 2874 mutex_enter(&rx_ring->rx_lock); 2875 2876 mp = ixgbe_rx(rx_ring); 2877 mutex_exit(&rx_ring->rx_lock); 2878 2879 if (mp != NULL) 2880 mac_rx(rx_ring->ixgbe->mac_hdl, NULL, mp); 2881 } 2882 2883 #pragma inline(ixgbe_intr_tx_work) 2884 /* 2885 * ixgbe_intr_tx_work - TX processing of ISR. 2886 */ 2887 static void 2888 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 2889 { 2890 /* 2891 * Recycle the tx descriptors 2892 */ 2893 tx_ring->tx_recycle(tx_ring); 2894 2895 /* 2896 * Schedule the re-transmit 2897 */ 2898 if (tx_ring->reschedule && 2899 (tx_ring->tbd_free >= tx_ring->resched_thresh)) { 2900 tx_ring->reschedule = B_FALSE; 2901 mac_tx_update(tx_ring->ixgbe->mac_hdl); 2902 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 2903 } 2904 } 2905 2906 #pragma inline(ixgbe_intr_other_work) 2907 /* 2908 * ixgbe_intr_other_work - Other processing of ISR. 2909 */ 2910 static void 2911 ixgbe_intr_other_work(ixgbe_t *ixgbe) 2912 { 2913 boolean_t link_changed; 2914 2915 ixgbe_stop_watchdog_timer(ixgbe); 2916 2917 mutex_enter(&ixgbe->gen_lock); 2918 2919 /* 2920 * Take care of link status change 2921 */ 2922 link_changed = ixgbe_driver_link_check(ixgbe); 2923 2924 /* 2925 * Get new phy state 2926 */ 2927 ixgbe_get_hw_state(ixgbe); 2928 2929 mutex_exit(&ixgbe->gen_lock); 2930 2931 if (link_changed) 2932 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 2933 2934 ixgbe_start_watchdog_timer(ixgbe); 2935 } 2936 2937 /* 2938 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 2939 */ 2940 static uint_t 2941 ixgbe_intr_legacy(void *arg1, void *arg2) 2942 { 2943 _NOTE(ARGUNUSED(arg2)); 2944 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 2945 struct ixgbe_hw *hw = &ixgbe->hw; 2946 ixgbe_tx_ring_t *tx_ring; 2947 uint32_t eicr; 2948 mblk_t *mp; 2949 boolean_t tx_reschedule; 2950 boolean_t link_changed; 2951 uint_t result; 2952 2953 2954 mutex_enter(&ixgbe->gen_lock); 2955 2956 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 2957 mutex_exit(&ixgbe->gen_lock); 2958 return (DDI_INTR_UNCLAIMED); 2959 } 2960 2961 mp = NULL; 2962 tx_reschedule = B_FALSE; 2963 link_changed = B_FALSE; 2964 2965 /* 2966 * Any bit set in eicr: claim this interrupt 2967 */ 2968 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 2969 if (eicr) { 2970 /* 2971 * For legacy interrupt, we have only one interrupt, 2972 * so we have only one rx ring and one tx ring enabled. 2973 */ 2974 ASSERT(ixgbe->num_rx_rings == 1); 2975 ASSERT(ixgbe->num_tx_rings == 1); 2976 2977 /* 2978 * For legacy interrupt, we can't differentiate 2979 * between tx and rx, so always clean both 2980 */ 2981 if (eicr & IXGBE_EICR_RTX_QUEUE) { 2982 2983 /* 2984 * Clean the rx descriptors 2985 */ 2986 mp = ixgbe_rx(&ixgbe->rx_rings[0]); 2987 2988 /* 2989 * Recycle the tx descriptors 2990 */ 2991 tx_ring = &ixgbe->tx_rings[0]; 2992 tx_ring->tx_recycle(tx_ring); 2993 2994 /* 2995 * Schedule the re-transmit 2996 */ 2997 tx_reschedule = (tx_ring->reschedule && 2998 (tx_ring->tbd_free >= tx_ring->resched_thresh)); 2999 } 3000 3001 if (eicr & IXGBE_EICR_LSC) { 3002 3003 /* take care of link status change */ 3004 link_changed = ixgbe_driver_link_check(ixgbe); 3005 3006 /* Get new phy state */ 3007 ixgbe_get_hw_state(ixgbe); 3008 } 3009 3010 result = DDI_INTR_CLAIMED; 3011 } else { 3012 /* 3013 * No interrupt cause bits set: don't claim this interrupt. 3014 */ 3015 result = DDI_INTR_UNCLAIMED; 3016 } 3017 3018 mutex_exit(&ixgbe->gen_lock); 3019 3020 /* 3021 * Do the following work outside of the gen_lock 3022 */ 3023 if (mp != NULL) 3024 mac_rx(ixgbe->mac_hdl, NULL, mp); 3025 3026 if (tx_reschedule) { 3027 tx_ring->reschedule = B_FALSE; 3028 mac_tx_update(ixgbe->mac_hdl); 3029 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 3030 } 3031 3032 if (link_changed) 3033 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3034 3035 return (result); 3036 } 3037 3038 /* 3039 * ixgbe_intr_msi - Interrupt handler for MSI. 3040 */ 3041 static uint_t 3042 ixgbe_intr_msi(void *arg1, void *arg2) 3043 { 3044 _NOTE(ARGUNUSED(arg2)); 3045 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3046 struct ixgbe_hw *hw = &ixgbe->hw; 3047 uint32_t eicr; 3048 3049 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3050 3051 /* 3052 * For MSI interrupt, we have only one vector, 3053 * so we have only one rx ring and one tx ring enabled. 3054 */ 3055 ASSERT(ixgbe->num_rx_rings == 1); 3056 ASSERT(ixgbe->num_tx_rings == 1); 3057 3058 /* 3059 * For MSI interrupt, we can't differentiate 3060 * between tx and rx, so always clean both. 3061 */ 3062 if (eicr & IXGBE_EICR_RTX_QUEUE) { 3063 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 3064 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 3065 } 3066 3067 if (eicr & IXGBE_EICR_LSC) { 3068 ixgbe_intr_other_work(ixgbe); 3069 } 3070 3071 return (DDI_INTR_CLAIMED); 3072 } 3073 3074 /* 3075 * ixgbe_intr_rx - Interrupt handler for rx. 3076 */ 3077 static uint_t 3078 ixgbe_intr_rx(void *arg1, void *arg2) 3079 { 3080 _NOTE(ARGUNUSED(arg2)); 3081 ixgbe_ring_vector_t *vect = (ixgbe_ring_vector_t *)arg1; 3082 ixgbe_t *ixgbe = vect->ixgbe; 3083 int r_idx; 3084 3085 /* 3086 * clean each rx ring that has its bit set in the map 3087 */ 3088 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 3089 3090 while (r_idx >= 0) { 3091 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 3092 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 3093 (ixgbe->num_rx_rings - 1)); 3094 } 3095 3096 return (DDI_INTR_CLAIMED); 3097 } 3098 3099 /* 3100 * ixgbe_intr_tx_other - Interrupt handler for both tx and other. 3101 * 3102 * Always look for Tx cleanup work. Only look for other work if the right 3103 * bits are set in the Interrupt Cause Register. 3104 */ 3105 static uint_t 3106 ixgbe_intr_tx_other(void *arg1, void *arg2) 3107 { 3108 _NOTE(ARGUNUSED(arg2)); 3109 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3110 struct ixgbe_hw *hw = &ixgbe->hw; 3111 uint32_t eicr; 3112 3113 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3114 3115 /* 3116 * Always look for Tx cleanup work. We don't have separate 3117 * transmit vectors, so we have only one tx ring enabled. 3118 */ 3119 ASSERT(ixgbe->num_tx_rings == 1); 3120 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 3121 3122 /* 3123 * Check for "other" causes. 3124 */ 3125 if (eicr & IXGBE_EICR_LSC) { 3126 ixgbe_intr_other_work(ixgbe); 3127 } 3128 3129 return (DDI_INTR_CLAIMED); 3130 } 3131 3132 /* 3133 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 3134 * 3135 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 3136 * if not successful, try Legacy. 3137 * ixgbe->intr_force can be used to force sequence to start with 3138 * any of the 3 types. 3139 * If MSI-X is not used, number of tx/rx rings is forced to 1. 3140 */ 3141 static int 3142 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 3143 { 3144 dev_info_t *devinfo; 3145 int intr_types; 3146 int rc; 3147 3148 devinfo = ixgbe->dip; 3149 3150 /* 3151 * Get supported interrupt types 3152 */ 3153 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 3154 3155 if (rc != DDI_SUCCESS) { 3156 ixgbe_log(ixgbe, 3157 "Get supported interrupt types failed: %d", rc); 3158 return (IXGBE_FAILURE); 3159 } 3160 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 3161 3162 ixgbe->intr_type = 0; 3163 3164 /* 3165 * Install MSI-X interrupts 3166 */ 3167 if ((intr_types & DDI_INTR_TYPE_MSIX) && 3168 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 3169 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 3170 if (rc == IXGBE_SUCCESS) 3171 return (IXGBE_SUCCESS); 3172 3173 ixgbe_log(ixgbe, 3174 "Allocate MSI-X failed, trying MSI interrupts..."); 3175 } 3176 3177 /* 3178 * MSI-X not used, force rings to 1 3179 */ 3180 ixgbe->num_rx_rings = 1; 3181 ixgbe->num_tx_rings = 1; 3182 ixgbe_log(ixgbe, 3183 "MSI-X not used, force rx and tx queue number to 1"); 3184 3185 /* 3186 * Install MSI interrupts 3187 */ 3188 if ((intr_types & DDI_INTR_TYPE_MSI) && 3189 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 3190 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 3191 if (rc == IXGBE_SUCCESS) 3192 return (IXGBE_SUCCESS); 3193 3194 ixgbe_log(ixgbe, 3195 "Allocate MSI failed, trying Legacy interrupts..."); 3196 } 3197 3198 /* 3199 * Install legacy interrupts 3200 */ 3201 if (intr_types & DDI_INTR_TYPE_FIXED) { 3202 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 3203 if (rc == IXGBE_SUCCESS) 3204 return (IXGBE_SUCCESS); 3205 3206 ixgbe_log(ixgbe, 3207 "Allocate Legacy interrupts failed"); 3208 } 3209 3210 /* 3211 * If none of the 3 types succeeded, return failure 3212 */ 3213 return (IXGBE_FAILURE); 3214 } 3215 3216 /* 3217 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 3218 * 3219 * For legacy and MSI, only 1 handle is needed. For MSI-X, 3220 * if fewer than 2 handles are available, return failure. 3221 * Upon success, this sets the number of Rx rings to a number that 3222 * matches the handles available for Rx interrupts. 3223 */ 3224 static int 3225 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 3226 { 3227 dev_info_t *devinfo; 3228 int request, count, avail, actual; 3229 int rx_rings, minimum; 3230 int rc; 3231 3232 devinfo = ixgbe->dip; 3233 3234 /* 3235 * Currently only 1 tx ring is supported. More tx rings 3236 * will be supported with future enhancement. 3237 */ 3238 if (ixgbe->num_tx_rings > 1) { 3239 ixgbe->num_tx_rings = 1; 3240 ixgbe_log(ixgbe, 3241 "Use only 1 MSI-X vector for tx, " 3242 "force tx queue number to 1"); 3243 } 3244 3245 switch (intr_type) { 3246 case DDI_INTR_TYPE_FIXED: 3247 request = 1; /* Request 1 legacy interrupt handle */ 3248 minimum = 1; 3249 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 3250 break; 3251 3252 case DDI_INTR_TYPE_MSI: 3253 request = 1; /* Request 1 MSI interrupt handle */ 3254 minimum = 1; 3255 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 3256 break; 3257 3258 case DDI_INTR_TYPE_MSIX: 3259 /* 3260 * Best number of vectors for the adapter is 3261 * # rx rings + # tx rings + 1 for other 3262 * But currently we only support number of vectors of 3263 * # rx rings + 1 for tx & other 3264 */ 3265 request = ixgbe->num_rx_rings + 1; 3266 minimum = 2; 3267 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 3268 break; 3269 3270 default: 3271 ixgbe_log(ixgbe, 3272 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 3273 intr_type); 3274 return (IXGBE_FAILURE); 3275 } 3276 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 3277 request, minimum); 3278 3279 /* 3280 * Get number of supported interrupts 3281 */ 3282 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 3283 if ((rc != DDI_SUCCESS) || (count < minimum)) { 3284 ixgbe_log(ixgbe, 3285 "Get interrupt number failed. Return: %d, count: %d", 3286 rc, count); 3287 return (IXGBE_FAILURE); 3288 } 3289 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 3290 3291 /* 3292 * Get number of available interrupts 3293 */ 3294 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 3295 if ((rc != DDI_SUCCESS) || (avail < minimum)) { 3296 ixgbe_log(ixgbe, 3297 "Get interrupt available number failed. " 3298 "Return: %d, available: %d", rc, avail); 3299 return (IXGBE_FAILURE); 3300 } 3301 IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail); 3302 3303 if (avail < request) { 3304 ixgbe_log(ixgbe, "Request %d handles, %d available", 3305 request, avail); 3306 request = avail; 3307 } 3308 3309 actual = 0; 3310 ixgbe->intr_cnt = 0; 3311 3312 /* 3313 * Allocate an array of interrupt handles 3314 */ 3315 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 3316 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 3317 3318 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 3319 request, &actual, DDI_INTR_ALLOC_NORMAL); 3320 if (rc != DDI_SUCCESS) { 3321 ixgbe_log(ixgbe, "Allocate interrupts failed. " 3322 "return: %d, request: %d, actual: %d", 3323 rc, request, actual); 3324 goto alloc_handle_fail; 3325 } 3326 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 3327 3328 ixgbe->intr_cnt = actual; 3329 3330 /* 3331 * Now we know the actual number of vectors. Here we assume that 3332 * tx and other will share 1 vector and all remaining (must be at 3333 * least 1 remaining) will be used for rx. 3334 */ 3335 if (actual < minimum) { 3336 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 3337 actual); 3338 goto alloc_handle_fail; 3339 } 3340 3341 /* 3342 * For MSI-X, actual might force us to reduce number of rx rings 3343 */ 3344 if (intr_type == DDI_INTR_TYPE_MSIX) { 3345 rx_rings = actual - 1; 3346 if (rx_rings < ixgbe->num_rx_rings) { 3347 ixgbe_log(ixgbe, 3348 "MSI-X vectors force Rx queue number to %d", 3349 rx_rings); 3350 ixgbe->num_rx_rings = rx_rings; 3351 } 3352 } 3353 3354 /* 3355 * Get priority for first vector, assume remaining are all the same 3356 */ 3357 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 3358 if (rc != DDI_SUCCESS) { 3359 ixgbe_log(ixgbe, 3360 "Get interrupt priority failed: %d", rc); 3361 goto alloc_handle_fail; 3362 } 3363 3364 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 3365 if (rc != DDI_SUCCESS) { 3366 ixgbe_log(ixgbe, 3367 "Get interrupt cap failed: %d", rc); 3368 goto alloc_handle_fail; 3369 } 3370 3371 ixgbe->intr_type = intr_type; 3372 3373 return (IXGBE_SUCCESS); 3374 3375 alloc_handle_fail: 3376 ixgbe_rem_intrs(ixgbe); 3377 3378 return (IXGBE_FAILURE); 3379 } 3380 3381 /* 3382 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 3383 * 3384 * Before adding the interrupt handlers, the interrupt vectors have 3385 * been allocated, and the rx/tx rings have also been allocated. 3386 */ 3387 static int 3388 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 3389 { 3390 ixgbe_rx_ring_t *rx_ring; 3391 int vector; 3392 int rc; 3393 int i; 3394 3395 vector = 0; 3396 3397 switch (ixgbe->intr_type) { 3398 case DDI_INTR_TYPE_MSIX: 3399 /* 3400 * Add interrupt handler for tx + other 3401 */ 3402 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3403 (ddi_intr_handler_t *)ixgbe_intr_tx_other, 3404 (void *)ixgbe, NULL); 3405 if (rc != DDI_SUCCESS) { 3406 ixgbe_log(ixgbe, 3407 "Add tx/other interrupt handler failed: %d", rc); 3408 return (IXGBE_FAILURE); 3409 } 3410 vector++; 3411 3412 /* 3413 * Add interrupt handler for each rx ring 3414 */ 3415 for (i = 0; i < ixgbe->num_rx_rings; i++) { 3416 rx_ring = &ixgbe->rx_rings[i]; 3417 3418 /* 3419 * install pointer to vect_map[vector] 3420 */ 3421 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3422 (ddi_intr_handler_t *)ixgbe_intr_rx, 3423 (void *)&ixgbe->vect_map[vector], NULL); 3424 3425 if (rc != DDI_SUCCESS) { 3426 ixgbe_log(ixgbe, 3427 "Add rx interrupt handler failed. " 3428 "return: %d, rx ring: %d", rc, i); 3429 for (vector--; vector >= 0; vector--) { 3430 (void) ddi_intr_remove_handler( 3431 ixgbe->htable[vector]); 3432 } 3433 return (IXGBE_FAILURE); 3434 } 3435 3436 rx_ring->intr_vector = vector; 3437 3438 vector++; 3439 } 3440 break; 3441 3442 case DDI_INTR_TYPE_MSI: 3443 /* 3444 * Add interrupt handlers for the only vector 3445 */ 3446 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3447 (ddi_intr_handler_t *)ixgbe_intr_msi, 3448 (void *)ixgbe, NULL); 3449 3450 if (rc != DDI_SUCCESS) { 3451 ixgbe_log(ixgbe, 3452 "Add MSI interrupt handler failed: %d", rc); 3453 return (IXGBE_FAILURE); 3454 } 3455 3456 rx_ring = &ixgbe->rx_rings[0]; 3457 rx_ring->intr_vector = vector; 3458 3459 vector++; 3460 break; 3461 3462 case DDI_INTR_TYPE_FIXED: 3463 /* 3464 * Add interrupt handlers for the only vector 3465 */ 3466 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3467 (ddi_intr_handler_t *)ixgbe_intr_legacy, 3468 (void *)ixgbe, NULL); 3469 3470 if (rc != DDI_SUCCESS) { 3471 ixgbe_log(ixgbe, 3472 "Add legacy interrupt handler failed: %d", rc); 3473 return (IXGBE_FAILURE); 3474 } 3475 3476 rx_ring = &ixgbe->rx_rings[0]; 3477 rx_ring->intr_vector = vector; 3478 3479 vector++; 3480 break; 3481 3482 default: 3483 return (IXGBE_FAILURE); 3484 } 3485 3486 ASSERT(vector == ixgbe->intr_cnt); 3487 3488 return (IXGBE_SUCCESS); 3489 } 3490 3491 #pragma inline(ixgbe_map_rxring_to_vector) 3492 /* 3493 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 3494 */ 3495 static void 3496 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 3497 { 3498 ixgbe->vect_map[v_idx].ixgbe = ixgbe; 3499 3500 /* 3501 * Set bit in map 3502 */ 3503 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 3504 3505 /* 3506 * Count bits set 3507 */ 3508 ixgbe->vect_map[v_idx].rxr_cnt++; 3509 3510 /* 3511 * Remember bit position 3512 */ 3513 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 3514 } 3515 3516 #pragma inline(ixgbe_map_txring_to_vector) 3517 /* 3518 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 3519 */ 3520 static void 3521 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 3522 { 3523 ixgbe->vect_map[v_idx].ixgbe = ixgbe; 3524 3525 /* 3526 * Set bit in map 3527 */ 3528 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 3529 3530 /* 3531 * Count bits set 3532 */ 3533 ixgbe->vect_map[v_idx].txr_cnt++; 3534 3535 /* 3536 * Remember bit position 3537 */ 3538 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 3539 } 3540 3541 /* 3542 * ixgbe_set_ivar - Set the given entry in the given interrupt vector 3543 * allocation register (IVAR). 3544 */ 3545 static void 3546 ixgbe_set_ivar(ixgbe_t *ixgbe, uint16_t int_alloc_entry, uint8_t msix_vector) 3547 { 3548 struct ixgbe_hw *hw = &ixgbe->hw; 3549 u32 ivar, index; 3550 3551 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 3552 index = (int_alloc_entry >> 2) & 0x1F; 3553 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3554 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); 3555 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); 3556 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 3557 } 3558 3559 /* 3560 * ixgbe_map_rings_to_vectors - Map descriptor rings to interrupt vectors. 3561 * 3562 * For msi-x, this currently implements only the scheme which is 3563 * 1 vector for tx + other, 1 vector for each rx ring. 3564 */ 3565 static int 3566 ixgbe_map_rings_to_vectors(ixgbe_t *ixgbe) 3567 { 3568 int i, vector = 0; 3569 int vect_remain = ixgbe->intr_cnt; 3570 3571 /* initialize vector map */ 3572 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 3573 3574 /* 3575 * non-MSI-X case is very simple: all interrupts on vector 0 3576 */ 3577 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 3578 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 3579 ixgbe_map_txring_to_vector(ixgbe, 0, 0); 3580 return (IXGBE_SUCCESS); 3581 } 3582 3583 /* 3584 * Ring/vector mapping for MSI-X 3585 */ 3586 3587 /* 3588 * Map vector 0 to tx 3589 */ 3590 ixgbe_map_txring_to_vector(ixgbe, 0, vector++); 3591 vect_remain--; 3592 3593 /* 3594 * Map remaining vectors to rx rings 3595 */ 3596 for (i = 0; i < vect_remain; i++) { 3597 ixgbe_map_rxring_to_vector(ixgbe, i, vector++); 3598 } 3599 3600 return (IXGBE_SUCCESS); 3601 } 3602 3603 /* 3604 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 3605 * 3606 * This relies on queue/vector mapping already set up in the 3607 * vect_map[] structures 3608 */ 3609 static void 3610 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 3611 { 3612 struct ixgbe_hw *hw = &ixgbe->hw; 3613 ixgbe_ring_vector_t *vect; /* vector bitmap */ 3614 int r_idx; /* ring index */ 3615 int v_idx; /* vector index */ 3616 3617 /* 3618 * Clear any previous entries 3619 */ 3620 for (v_idx = 0; v_idx < IXGBE_IVAR_REG_NUM; v_idx++) 3621 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 3622 3623 /* 3624 * "Other" is always on vector 0 3625 */ 3626 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0); 3627 3628 /* 3629 * For each interrupt vector, populate the IVAR table 3630 */ 3631 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 3632 vect = &ixgbe->vect_map[v_idx]; 3633 3634 /* 3635 * For each rx ring bit set 3636 */ 3637 r_idx = bt_getlowbit(vect->rx_map, 0, 3638 (ixgbe->num_rx_rings - 1)); 3639 3640 while (r_idx >= 0) { 3641 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx), 3642 v_idx); 3643 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 3644 (ixgbe->num_rx_rings - 1)); 3645 } 3646 3647 /* 3648 * For each tx ring bit set 3649 */ 3650 r_idx = bt_getlowbit(vect->tx_map, 0, 3651 (ixgbe->num_tx_rings - 1)); 3652 3653 while (r_idx >= 0) { 3654 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_TX_QUEUE(r_idx), 3655 v_idx); 3656 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 3657 (ixgbe->num_tx_rings - 1)); 3658 } 3659 } 3660 } 3661 3662 /* 3663 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 3664 */ 3665 static void 3666 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 3667 { 3668 int i; 3669 int rc; 3670 3671 for (i = 0; i < ixgbe->intr_cnt; i++) { 3672 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 3673 if (rc != DDI_SUCCESS) { 3674 IXGBE_DEBUGLOG_1(ixgbe, 3675 "Remove intr handler failed: %d", rc); 3676 } 3677 } 3678 } 3679 3680 /* 3681 * ixgbe_rem_intrs - Remove the allocated interrupts. 3682 */ 3683 static void 3684 ixgbe_rem_intrs(ixgbe_t *ixgbe) 3685 { 3686 int i; 3687 int rc; 3688 3689 for (i = 0; i < ixgbe->intr_cnt; i++) { 3690 rc = ddi_intr_free(ixgbe->htable[i]); 3691 if (rc != DDI_SUCCESS) { 3692 IXGBE_DEBUGLOG_1(ixgbe, 3693 "Free intr failed: %d", rc); 3694 } 3695 } 3696 3697 kmem_free(ixgbe->htable, ixgbe->intr_size); 3698 ixgbe->htable = NULL; 3699 } 3700 3701 /* 3702 * ixgbe_enable_intrs - Enable all the ddi interrupts. 3703 */ 3704 static int 3705 ixgbe_enable_intrs(ixgbe_t *ixgbe) 3706 { 3707 int i; 3708 int rc; 3709 3710 /* 3711 * Enable interrupts 3712 */ 3713 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 3714 /* 3715 * Call ddi_intr_block_enable() for MSI 3716 */ 3717 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 3718 if (rc != DDI_SUCCESS) { 3719 ixgbe_log(ixgbe, 3720 "Enable block intr failed: %d", rc); 3721 return (IXGBE_FAILURE); 3722 } 3723 } else { 3724 /* 3725 * Call ddi_intr_enable() for Legacy/MSI non block enable 3726 */ 3727 for (i = 0; i < ixgbe->intr_cnt; i++) { 3728 rc = ddi_intr_enable(ixgbe->htable[i]); 3729 if (rc != DDI_SUCCESS) { 3730 ixgbe_log(ixgbe, 3731 "Enable intr failed: %d", rc); 3732 return (IXGBE_FAILURE); 3733 } 3734 } 3735 } 3736 3737 return (IXGBE_SUCCESS); 3738 } 3739 3740 /* 3741 * ixgbe_disable_intrs - Disable all the interrupts. 3742 */ 3743 static int 3744 ixgbe_disable_intrs(ixgbe_t *ixgbe) 3745 { 3746 int i; 3747 int rc; 3748 3749 /* 3750 * Disable all interrupts 3751 */ 3752 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 3753 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 3754 if (rc != DDI_SUCCESS) { 3755 ixgbe_log(ixgbe, 3756 "Disable block intr failed: %d", rc); 3757 return (IXGBE_FAILURE); 3758 } 3759 } else { 3760 for (i = 0; i < ixgbe->intr_cnt; i++) { 3761 rc = ddi_intr_disable(ixgbe->htable[i]); 3762 if (rc != DDI_SUCCESS) { 3763 ixgbe_log(ixgbe, 3764 "Disable intr failed: %d", rc); 3765 return (IXGBE_FAILURE); 3766 } 3767 } 3768 } 3769 3770 return (IXGBE_SUCCESS); 3771 } 3772 3773 /* 3774 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 3775 */ 3776 static void 3777 ixgbe_get_hw_state(ixgbe_t *ixgbe) 3778 { 3779 struct ixgbe_hw *hw = &ixgbe->hw; 3780 uint32_t links; 3781 uint32_t pcs1g_anlp = 0; 3782 uint32_t pcs1g_ana = 0; 3783 3784 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3785 ixgbe->param_lp_1000fdx_cap = 0; 3786 ixgbe->param_lp_100fdx_cap = 0; 3787 3788 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 3789 if (links & IXGBE_LINKS_PCS_1G_EN) { 3790 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 3791 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 3792 3793 ixgbe->param_lp_1000fdx_cap = 3794 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 3795 ixgbe->param_lp_100fdx_cap = 3796 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 3797 } 3798 3799 ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 3800 ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 3801 } 3802 3803 /* 3804 * ixgbe_get_driver_control - Notify that driver is in control of device. 3805 */ 3806 static void 3807 ixgbe_get_driver_control(struct ixgbe_hw *hw) 3808 { 3809 uint32_t ctrl_ext; 3810 3811 /* 3812 * Notify firmware that driver is in control of device 3813 */ 3814 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3815 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 3816 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3817 } 3818 3819 /* 3820 * ixgbe_release_driver_control - Notify that driver is no longer in control 3821 * of device. 3822 */ 3823 static void 3824 ixgbe_release_driver_control(struct ixgbe_hw *hw) 3825 { 3826 uint32_t ctrl_ext; 3827 3828 /* 3829 * Notify firmware that driver is no longer in control of device 3830 */ 3831 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3832 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 3833 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3834 } 3835 3836 /* 3837 * ixgbe_atomic_reserve - Atomic decrease operation. 3838 */ 3839 int 3840 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 3841 { 3842 uint32_t oldval; 3843 uint32_t newval; 3844 3845 /* 3846 * ATOMICALLY 3847 */ 3848 do { 3849 oldval = *count_p; 3850 if (oldval < n) 3851 return (-1); 3852 newval = oldval - n; 3853 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 3854 3855 return (newval); 3856 } 3857 3858 /* 3859 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 3860 */ 3861 static uint8_t * 3862 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 3863 { 3864 _NOTE(ARGUNUSED(hw)); 3865 _NOTE(ARGUNUSED(vmdq)); 3866 uint8_t *addr = *upd_ptr; 3867 uint8_t *new_ptr; 3868 3869 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 3870 *upd_ptr = new_ptr; 3871 return (addr); 3872 } 3873 3874 /* 3875 * FMA support 3876 */ 3877 int 3878 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 3879 { 3880 ddi_fm_error_t de; 3881 3882 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 3883 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 3884 return (de.fme_status); 3885 } 3886 3887 int 3888 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 3889 { 3890 ddi_fm_error_t de; 3891 3892 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 3893 return (de.fme_status); 3894 } 3895 3896 /* 3897 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 3898 */ 3899 static int 3900 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 3901 { 3902 _NOTE(ARGUNUSED(impl_data)); 3903 /* 3904 * as the driver can always deal with an error in any dma or 3905 * access handle, we can just return the fme_status value. 3906 */ 3907 pci_ereport_post(dip, err, NULL); 3908 return (err->fme_status); 3909 } 3910 3911 static void 3912 ixgbe_fm_init(ixgbe_t *ixgbe) 3913 { 3914 ddi_iblock_cookie_t iblk; 3915 int fma_acc_flag, fma_dma_flag; 3916 3917 /* 3918 * Only register with IO Fault Services if we have some capability 3919 */ 3920 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 3921 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 3922 fma_acc_flag = 1; 3923 } else { 3924 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3925 fma_acc_flag = 0; 3926 } 3927 3928 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 3929 fma_dma_flag = 1; 3930 } else { 3931 fma_dma_flag = 0; 3932 } 3933 3934 ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag); 3935 3936 if (ixgbe->fm_capabilities) { 3937 3938 /* 3939 * Register capabilities with IO Fault Services 3940 */ 3941 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 3942 3943 /* 3944 * Initialize pci ereport capabilities if ereport capable 3945 */ 3946 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 3947 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3948 pci_ereport_setup(ixgbe->dip); 3949 3950 /* 3951 * Register error callback if error callback capable 3952 */ 3953 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3954 ddi_fm_handler_register(ixgbe->dip, 3955 ixgbe_fm_error_cb, (void*) ixgbe); 3956 } 3957 } 3958 3959 static void 3960 ixgbe_fm_fini(ixgbe_t *ixgbe) 3961 { 3962 /* 3963 * Only unregister FMA capabilities if they are registered 3964 */ 3965 if (ixgbe->fm_capabilities) { 3966 3967 /* 3968 * Release any resources allocated by pci_ereport_setup() 3969 */ 3970 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 3971 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3972 pci_ereport_teardown(ixgbe->dip); 3973 3974 /* 3975 * Un-register error callback if error callback capable 3976 */ 3977 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3978 ddi_fm_handler_unregister(ixgbe->dip); 3979 3980 /* 3981 * Unregister from IO Fault Service 3982 */ 3983 ddi_fm_fini(ixgbe->dip); 3984 } 3985 } 3986 3987 void 3988 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 3989 { 3990 uint64_t ena; 3991 char buf[FM_MAX_CLASS]; 3992 3993 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 3994 ena = fm_ena_generate(0, FM_ENA_FMT1); 3995 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 3996 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 3997 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 3998 } 3999 } 4000