1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at: 10 * http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When using or redistributing this file, you may do so under the 15 * License only. No other modification of this header is permitted. 16 * 17 * If applicable, add the following below this CDDL HEADER, with the 18 * fields enclosed by brackets "[]" replaced with your own identifying 19 * information: Portions Copyright [yyyy] [name of copyright owner] 20 * 21 * CDDL HEADER END 22 */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms of the CDDL. 27 */ 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #include "ixgbe_sw.h" 32 33 static char ident[] = "Intel 10Gb Ethernet 1.0.2"; 34 35 /* 36 * Local function protoypes 37 */ 38 static int ixgbe_register_mac(ixgbe_t *); 39 static int ixgbe_identify_hardware(ixgbe_t *); 40 static int ixgbe_regs_map(ixgbe_t *); 41 static void ixgbe_init_properties(ixgbe_t *); 42 static int ixgbe_init_driver_settings(ixgbe_t *); 43 static void ixgbe_init_locks(ixgbe_t *); 44 static void ixgbe_destroy_locks(ixgbe_t *); 45 static int ixgbe_init(ixgbe_t *); 46 static int ixgbe_chip_start(ixgbe_t *); 47 static void ixgbe_chip_stop(ixgbe_t *); 48 static int ixgbe_reset(ixgbe_t *); 49 static void ixgbe_tx_clean(ixgbe_t *); 50 static boolean_t ixgbe_tx_drain(ixgbe_t *); 51 static boolean_t ixgbe_rx_drain(ixgbe_t *); 52 static int ixgbe_alloc_rings(ixgbe_t *); 53 static int ixgbe_init_rings(ixgbe_t *); 54 static void ixgbe_free_rings(ixgbe_t *); 55 static void ixgbe_fini_rings(ixgbe_t *); 56 static void ixgbe_setup_rings(ixgbe_t *); 57 static void ixgbe_setup_rx(ixgbe_t *); 58 static void ixgbe_setup_tx(ixgbe_t *); 59 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 60 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 61 static void ixgbe_setup_rss(ixgbe_t *); 62 static void ixgbe_init_unicst(ixgbe_t *); 63 static void ixgbe_setup_multicst(ixgbe_t *); 64 static void ixgbe_get_hw_state(ixgbe_t *); 65 static void ixgbe_get_conf(ixgbe_t *); 66 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 67 static boolean_t ixgbe_driver_link_check(ixgbe_t *); 68 static void ixgbe_local_timer(void *); 69 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 70 static void ixgbe_start_watchdog_timer(ixgbe_t *); 71 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 72 static void ixgbe_stop_watchdog_timer(ixgbe_t *); 73 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 74 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 75 static boolean_t is_valid_mac_addr(uint8_t *); 76 static boolean_t ixgbe_stall_check(ixgbe_t *); 77 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 78 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 79 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 80 static int ixgbe_alloc_intrs(ixgbe_t *); 81 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 82 static int ixgbe_add_intr_handlers(ixgbe_t *); 83 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 84 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 85 static void ixgbe_set_ivar(ixgbe_t *, uint16_t, uint8_t); 86 static int ixgbe_map_rings_to_vectors(ixgbe_t *); 87 static void ixgbe_setup_adapter_vector(ixgbe_t *); 88 static void ixgbe_rem_intr_handlers(ixgbe_t *); 89 static void ixgbe_rem_intrs(ixgbe_t *); 90 static int ixgbe_enable_intrs(ixgbe_t *); 91 static int ixgbe_disable_intrs(ixgbe_t *); 92 static uint_t ixgbe_intr_legacy(void *, void *); 93 static uint_t ixgbe_intr_msi(void *, void *); 94 static uint_t ixgbe_intr_rx(void *, void *); 95 static uint_t ixgbe_intr_tx_other(void *, void *); 96 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 97 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 98 static void ixgbe_intr_other_work(ixgbe_t *); 99 static void ixgbe_get_driver_control(struct ixgbe_hw *); 100 static void ixgbe_release_driver_control(struct ixgbe_hw *); 101 102 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 103 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 104 static int ixgbe_resume(dev_info_t *); 105 static int ixgbe_suspend(dev_info_t *); 106 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 107 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 108 109 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 110 const void *impl_data); 111 static void ixgbe_fm_init(ixgbe_t *); 112 static void ixgbe_fm_fini(ixgbe_t *); 113 114 static struct cb_ops ixgbe_cb_ops = { 115 nulldev, /* cb_open */ 116 nulldev, /* cb_close */ 117 nodev, /* cb_strategy */ 118 nodev, /* cb_print */ 119 nodev, /* cb_dump */ 120 nodev, /* cb_read */ 121 nodev, /* cb_write */ 122 nodev, /* cb_ioctl */ 123 nodev, /* cb_devmap */ 124 nodev, /* cb_mmap */ 125 nodev, /* cb_segmap */ 126 nochpoll, /* cb_chpoll */ 127 ddi_prop_op, /* cb_prop_op */ 128 NULL, /* cb_stream */ 129 D_MP | D_HOTPLUG, /* cb_flag */ 130 CB_REV, /* cb_rev */ 131 nodev, /* cb_aread */ 132 nodev /* cb_awrite */ 133 }; 134 135 static struct dev_ops ixgbe_dev_ops = { 136 DEVO_REV, /* devo_rev */ 137 0, /* devo_refcnt */ 138 NULL, /* devo_getinfo */ 139 nulldev, /* devo_identify */ 140 nulldev, /* devo_probe */ 141 ixgbe_attach, /* devo_attach */ 142 ixgbe_detach, /* devo_detach */ 143 nodev, /* devo_reset */ 144 &ixgbe_cb_ops, /* devo_cb_ops */ 145 NULL, /* devo_bus_ops */ 146 ddi_power /* devo_power */ 147 }; 148 149 static struct modldrv ixgbe_modldrv = { 150 &mod_driverops, /* Type of module. This one is a driver */ 151 ident, /* Discription string */ 152 &ixgbe_dev_ops /* driver ops */ 153 }; 154 155 static struct modlinkage ixgbe_modlinkage = { 156 MODREV_1, &ixgbe_modldrv, NULL 157 }; 158 159 /* 160 * Access attributes for register mapping 161 */ 162 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 163 DDI_DEVICE_ATTR_V0, 164 DDI_STRUCTURE_LE_ACC, 165 DDI_STRICTORDER_ACC, 166 DDI_FLAGERR_ACC 167 }; 168 169 /* 170 * Loopback property 171 */ 172 static lb_property_t lb_normal = { 173 normal, "normal", IXGBE_LB_NONE 174 }; 175 176 static lb_property_t lb_mac = { 177 internal, "MAC", IXGBE_LB_INTERNAL_MAC 178 }; 179 180 #define IXGBE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB) 181 182 static mac_callbacks_t ixgbe_m_callbacks = { 183 IXGBE_M_CALLBACK_FLAGS, 184 ixgbe_m_stat, 185 ixgbe_m_start, 186 ixgbe_m_stop, 187 ixgbe_m_promisc, 188 ixgbe_m_multicst, 189 ixgbe_m_unicst, 190 ixgbe_m_tx, 191 NULL, 192 ixgbe_m_ioctl, 193 ixgbe_m_getcapab 194 }; 195 196 /* 197 * Module Initialization Functions. 198 */ 199 200 int 201 _init(void) 202 { 203 int status; 204 205 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 206 207 status = mod_install(&ixgbe_modlinkage); 208 209 if (status != DDI_SUCCESS) { 210 mac_fini_ops(&ixgbe_dev_ops); 211 } 212 213 return (status); 214 } 215 216 int 217 _fini(void) 218 { 219 int status; 220 221 status = mod_remove(&ixgbe_modlinkage); 222 223 if (status == DDI_SUCCESS) { 224 mac_fini_ops(&ixgbe_dev_ops); 225 } 226 227 return (status); 228 } 229 230 int 231 _info(struct modinfo *modinfop) 232 { 233 int status; 234 235 status = mod_info(&ixgbe_modlinkage, modinfop); 236 237 return (status); 238 } 239 240 /* 241 * ixgbe_attach - Driver attach. 242 * 243 * This function is the device specific initialization entry 244 * point. This entry point is required and must be written. 245 * The DDI_ATTACH command must be provided in the attach entry 246 * point. When attach() is called with cmd set to DDI_ATTACH, 247 * all normal kernel services (such as kmem_alloc(9F)) are 248 * available for use by the driver. 249 * 250 * The attach() function will be called once for each instance 251 * of the device on the system with cmd set to DDI_ATTACH. 252 * Until attach() succeeds, the only driver entry points which 253 * may be called are open(9E) and getinfo(9E). 254 */ 255 static int 256 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 257 { 258 ixgbe_t *ixgbe; 259 struct ixgbe_osdep *osdep; 260 struct ixgbe_hw *hw; 261 int instance; 262 263 /* 264 * Check the command and perform corresponding operations 265 */ 266 switch (cmd) { 267 default: 268 return (DDI_FAILURE); 269 270 case DDI_RESUME: 271 return (ixgbe_resume(devinfo)); 272 273 case DDI_ATTACH: 274 break; 275 } 276 277 /* Get the device instance */ 278 instance = ddi_get_instance(devinfo); 279 280 /* Allocate memory for the instance data structure */ 281 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 282 283 ixgbe->dip = devinfo; 284 ixgbe->instance = instance; 285 286 hw = &ixgbe->hw; 287 osdep = &ixgbe->osdep; 288 hw->back = osdep; 289 osdep->ixgbe = ixgbe; 290 291 /* Attach the instance pointer to the dev_info data structure */ 292 ddi_set_driver_private(devinfo, ixgbe); 293 294 /* 295 * Initialize for fma support 296 */ 297 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 298 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 299 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 300 ixgbe_fm_init(ixgbe); 301 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 302 303 /* 304 * Map PCI config space registers 305 */ 306 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 307 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 308 goto attach_fail; 309 } 310 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 311 312 /* 313 * Identify the chipset family 314 */ 315 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 316 ixgbe_error(ixgbe, "Failed to identify hardware"); 317 goto attach_fail; 318 } 319 320 /* 321 * Map device registers 322 */ 323 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 324 ixgbe_error(ixgbe, "Failed to map device registers"); 325 goto attach_fail; 326 } 327 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 328 329 /* 330 * Initialize driver parameters 331 */ 332 ixgbe_init_properties(ixgbe); 333 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 334 335 /* 336 * Allocate interrupts 337 */ 338 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 339 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 340 goto attach_fail; 341 } 342 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 343 344 /* 345 * Allocate rx/tx rings based on the ring numbers. 346 * The actual numbers of rx/tx rings are decided by the number of 347 * allocated interrupt vectors, so we should allocate the rings after 348 * interrupts are allocated. 349 */ 350 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 351 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 352 goto attach_fail; 353 } 354 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 355 356 /* 357 * Map rings to interrupt vectors 358 */ 359 if (ixgbe_map_rings_to_vectors(ixgbe) != IXGBE_SUCCESS) { 360 ixgbe_error(ixgbe, "Failed to map rings to vectors"); 361 goto attach_fail; 362 } 363 364 /* 365 * Add interrupt handlers 366 */ 367 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 368 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 369 goto attach_fail; 370 } 371 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 372 373 /* 374 * Initialize driver parameters 375 */ 376 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 377 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 378 goto attach_fail; 379 } 380 381 /* 382 * Initialize mutexes for this device. 383 * Do this before enabling the interrupt handler and 384 * register the softint to avoid the condition where 385 * interrupt handler can try using uninitialized mutex. 386 */ 387 ixgbe_init_locks(ixgbe); 388 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 389 390 /* 391 * Initialize chipset hardware 392 */ 393 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 394 ixgbe_error(ixgbe, "Failed to initialize adapter"); 395 goto attach_fail; 396 } 397 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 398 399 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 400 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 401 goto attach_fail; 402 } 403 404 /* 405 * Initialize DMA and hardware settings for rx/tx rings 406 */ 407 if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) { 408 ixgbe_error(ixgbe, "Failed to initialize rings"); 409 goto attach_fail; 410 } 411 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS; 412 413 /* 414 * Initialize statistics 415 */ 416 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 417 ixgbe_error(ixgbe, "Failed to initialize statistics"); 418 goto attach_fail; 419 } 420 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 421 422 /* 423 * Initialize NDD parameters 424 */ 425 if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) { 426 ixgbe_error(ixgbe, "Failed to initialize ndd"); 427 goto attach_fail; 428 } 429 ixgbe->attach_progress |= ATTACH_PROGRESS_NDD; 430 431 /* 432 * Register the driver to the MAC 433 */ 434 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 435 ixgbe_error(ixgbe, "Failed to register MAC"); 436 goto attach_fail; 437 } 438 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 439 440 /* 441 * Now that mutex locks are initialized, and the chip is also 442 * initialized, enable interrupts. 443 */ 444 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 445 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 446 goto attach_fail; 447 } 448 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 449 450 ixgbe->ixgbe_state |= IXGBE_INITIALIZED; 451 452 return (DDI_SUCCESS); 453 454 attach_fail: 455 ixgbe_unconfigure(devinfo, ixgbe); 456 return (DDI_FAILURE); 457 } 458 459 /* 460 * ixgbe_detach - Driver detach. 461 * 462 * The detach() function is the complement of the attach routine. 463 * If cmd is set to DDI_DETACH, detach() is used to remove the 464 * state associated with a given instance of a device node 465 * prior to the removal of that instance from the system. 466 * 467 * The detach() function will be called once for each instance 468 * of the device for which there has been a successful attach() 469 * once there are no longer any opens on the device. 470 * 471 * Interrupts routine are disabled, All memory allocated by this 472 * driver are freed. 473 */ 474 static int 475 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 476 { 477 ixgbe_t *ixgbe; 478 479 /* 480 * Check detach command 481 */ 482 switch (cmd) { 483 default: 484 return (DDI_FAILURE); 485 486 case DDI_SUSPEND: 487 return (ixgbe_suspend(devinfo)); 488 489 case DDI_DETACH: 490 break; 491 } 492 493 494 /* 495 * Get the pointer to the driver private data structure 496 */ 497 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 498 if (ixgbe == NULL) 499 return (DDI_FAILURE); 500 501 /* 502 * Unregister MAC. If failed, we have to fail the detach 503 */ 504 if (mac_unregister(ixgbe->mac_hdl) != 0) { 505 ixgbe_error(ixgbe, "Failed to unregister MAC"); 506 return (DDI_FAILURE); 507 } 508 ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC; 509 510 /* 511 * If the device is still running, it needs to be stopped first. 512 * This check is necessary because under some specific circumstances, 513 * the detach routine can be called without stopping the interface 514 * first. 515 */ 516 mutex_enter(&ixgbe->gen_lock); 517 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 518 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 519 ixgbe_stop(ixgbe); 520 mutex_exit(&ixgbe->gen_lock); 521 /* Disable and stop the watchdog timer */ 522 ixgbe_disable_watchdog_timer(ixgbe); 523 } else 524 mutex_exit(&ixgbe->gen_lock); 525 526 /* 527 * Check if there are still rx buffers held by the upper layer. 528 * If so, fail the detach. 529 */ 530 if (!ixgbe_rx_drain(ixgbe)) 531 return (DDI_FAILURE); 532 533 /* 534 * Do the remaining unconfigure routines 535 */ 536 ixgbe_unconfigure(devinfo, ixgbe); 537 538 return (DDI_SUCCESS); 539 } 540 541 static void 542 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 543 { 544 /* 545 * Disable interrupt 546 */ 547 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 548 (void) ixgbe_disable_intrs(ixgbe); 549 } 550 551 /* 552 * Unregister MAC 553 */ 554 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 555 (void) mac_unregister(ixgbe->mac_hdl); 556 } 557 558 /* 559 * Free ndd parameters 560 */ 561 if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) { 562 ixgbe_nd_cleanup(ixgbe); 563 } 564 565 /* 566 * Free statistics 567 */ 568 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 569 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 570 } 571 572 /* 573 * Remove interrupt handlers 574 */ 575 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 576 ixgbe_rem_intr_handlers(ixgbe); 577 } 578 579 /* 580 * Remove interrupts 581 */ 582 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 583 ixgbe_rem_intrs(ixgbe); 584 } 585 586 /* 587 * Remove driver properties 588 */ 589 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 590 (void) ddi_prop_remove_all(devinfo); 591 } 592 593 /* 594 * Release the DMA resources of rx/tx rings 595 */ 596 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) { 597 ixgbe_fini_rings(ixgbe); 598 } 599 600 /* 601 * Stop the chipset 602 */ 603 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 604 mutex_enter(&ixgbe->gen_lock); 605 ixgbe_chip_stop(ixgbe); 606 mutex_exit(&ixgbe->gen_lock); 607 } 608 609 /* 610 * Free register handle 611 */ 612 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 613 if (ixgbe->osdep.reg_handle != NULL) 614 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 615 } 616 617 /* 618 * Free PCI config handle 619 */ 620 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 621 if (ixgbe->osdep.cfg_handle != NULL) 622 pci_config_teardown(&ixgbe->osdep.cfg_handle); 623 } 624 625 /* 626 * Free locks 627 */ 628 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 629 ixgbe_destroy_locks(ixgbe); 630 } 631 632 /* 633 * Free the rx/tx rings 634 */ 635 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 636 ixgbe_free_rings(ixgbe); 637 } 638 639 /* 640 * Unregister FMA capabilities 641 */ 642 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 643 ixgbe_fm_fini(ixgbe); 644 } 645 646 /* 647 * Free the driver data structure 648 */ 649 kmem_free(ixgbe, sizeof (ixgbe_t)); 650 651 ddi_set_driver_private(devinfo, NULL); 652 } 653 654 /* 655 * ixgbe_register_mac - Register the driver and its function pointers with 656 * the GLD interface. 657 */ 658 static int 659 ixgbe_register_mac(ixgbe_t *ixgbe) 660 { 661 struct ixgbe_hw *hw = &ixgbe->hw; 662 mac_register_t *mac; 663 int status; 664 665 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 666 return (IXGBE_FAILURE); 667 668 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 669 mac->m_driver = ixgbe; 670 mac->m_dip = ixgbe->dip; 671 mac->m_src_addr = hw->mac.addr; 672 mac->m_callbacks = &ixgbe_m_callbacks; 673 mac->m_min_sdu = 0; 674 mac->m_max_sdu = ixgbe->default_mtu; 675 mac->m_margin = VLAN_TAGSZ; 676 677 status = mac_register(mac, &ixgbe->mac_hdl); 678 679 mac_free(mac); 680 681 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 682 } 683 684 /* 685 * ixgbe_identify_hardware - Identify the type of the chipset. 686 */ 687 static int 688 ixgbe_identify_hardware(ixgbe_t *ixgbe) 689 { 690 struct ixgbe_hw *hw = &ixgbe->hw; 691 struct ixgbe_osdep *osdep = &ixgbe->osdep; 692 693 /* 694 * Get the device id 695 */ 696 hw->vendor_id = 697 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 698 hw->device_id = 699 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 700 hw->revision_id = 701 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 702 hw->subsystem_device_id = 703 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 704 hw->subsystem_vendor_id = 705 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 706 707 return (IXGBE_SUCCESS); 708 } 709 710 /* 711 * ixgbe_regs_map - Map the device registers. 712 * 713 */ 714 static int 715 ixgbe_regs_map(ixgbe_t *ixgbe) 716 { 717 dev_info_t *devinfo = ixgbe->dip; 718 struct ixgbe_hw *hw = &ixgbe->hw; 719 struct ixgbe_osdep *osdep = &ixgbe->osdep; 720 off_t mem_size; 721 722 /* 723 * First get the size of device registers to be mapped. 724 */ 725 if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) { 726 return (IXGBE_FAILURE); 727 } 728 729 /* 730 * Call ddi_regs_map_setup() to map registers 731 */ 732 if ((ddi_regs_map_setup(devinfo, 1, 733 (caddr_t *)&hw->hw_addr, 0, 734 mem_size, &ixgbe_regs_acc_attr, 735 &osdep->reg_handle)) != DDI_SUCCESS) { 736 return (IXGBE_FAILURE); 737 } 738 739 return (IXGBE_SUCCESS); 740 } 741 742 /* 743 * ixgbe_init_properties - Initialize driver properties. 744 */ 745 static void 746 ixgbe_init_properties(ixgbe_t *ixgbe) 747 { 748 /* 749 * Get conf file properties, including link settings 750 * jumbo frames, ring number, descriptor number, etc. 751 */ 752 ixgbe_get_conf(ixgbe); 753 } 754 755 /* 756 * ixgbe_init_driver_settings - Initialize driver settings. 757 * 758 * The settings include hardware function pointers, bus information, 759 * rx/tx rings settings, link state, and any other parameters that 760 * need to be setup during driver initialization. 761 */ 762 static int 763 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 764 { 765 struct ixgbe_hw *hw = &ixgbe->hw; 766 ixgbe_rx_ring_t *rx_ring; 767 ixgbe_tx_ring_t *tx_ring; 768 uint32_t rx_size; 769 uint32_t tx_size; 770 int i; 771 772 /* 773 * Initialize chipset specific hardware function pointers 774 */ 775 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 776 return (IXGBE_FAILURE); 777 } 778 779 /* 780 * Set rx buffer size 781 * 782 * The IP header alignment room is counted in the calculation. 783 * The rx buffer size is in unit of 1K that is required by the 784 * chipset hardware. 785 */ 786 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 787 ixgbe->rx_buf_size = ((rx_size >> 10) + 788 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 789 790 /* 791 * Set tx buffer size 792 */ 793 tx_size = ixgbe->max_frame_size; 794 ixgbe->tx_buf_size = ((tx_size >> 10) + 795 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 796 797 /* 798 * Initialize rx/tx rings parameters 799 */ 800 for (i = 0; i < ixgbe->num_rx_rings; i++) { 801 rx_ring = &ixgbe->rx_rings[i]; 802 rx_ring->index = i; 803 rx_ring->ixgbe = ixgbe; 804 805 rx_ring->ring_size = ixgbe->rx_ring_size; 806 rx_ring->free_list_size = ixgbe->rx_ring_size; 807 rx_ring->copy_thresh = ixgbe->rx_copy_thresh; 808 rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr; 809 } 810 811 for (i = 0; i < ixgbe->num_tx_rings; i++) { 812 tx_ring = &ixgbe->tx_rings[i]; 813 tx_ring->index = i; 814 tx_ring->ixgbe = ixgbe; 815 if (ixgbe->tx_head_wb_enable) 816 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 817 else 818 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 819 820 tx_ring->ring_size = ixgbe->tx_ring_size; 821 tx_ring->free_list_size = ixgbe->tx_ring_size + 822 (ixgbe->tx_ring_size >> 1); 823 tx_ring->copy_thresh = ixgbe->tx_copy_thresh; 824 tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh; 825 tx_ring->overload_thresh = ixgbe->tx_overload_thresh; 826 tx_ring->resched_thresh = ixgbe->tx_resched_thresh; 827 } 828 829 /* 830 * Initialize values of interrupt throttling rate 831 */ 832 for (i = 1; i < IXGBE_MAX_RING_VECTOR; i++) 833 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 834 835 /* 836 * The initial link state should be "unknown" 837 */ 838 ixgbe->link_state = LINK_STATE_UNKNOWN; 839 return (IXGBE_SUCCESS); 840 } 841 842 /* 843 * ixgbe_init_locks - Initialize locks. 844 */ 845 static void 846 ixgbe_init_locks(ixgbe_t *ixgbe) 847 { 848 ixgbe_rx_ring_t *rx_ring; 849 ixgbe_tx_ring_t *tx_ring; 850 int i; 851 852 for (i = 0; i < ixgbe->num_rx_rings; i++) { 853 rx_ring = &ixgbe->rx_rings[i]; 854 mutex_init(&rx_ring->rx_lock, NULL, 855 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 856 mutex_init(&rx_ring->recycle_lock, NULL, 857 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 858 } 859 860 for (i = 0; i < ixgbe->num_tx_rings; i++) { 861 tx_ring = &ixgbe->tx_rings[i]; 862 mutex_init(&tx_ring->tx_lock, NULL, 863 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 864 mutex_init(&tx_ring->recycle_lock, NULL, 865 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 866 mutex_init(&tx_ring->tcb_head_lock, NULL, 867 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 868 mutex_init(&tx_ring->tcb_tail_lock, NULL, 869 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 870 } 871 872 mutex_init(&ixgbe->gen_lock, NULL, 873 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 874 875 mutex_init(&ixgbe->watchdog_lock, NULL, 876 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 877 } 878 879 /* 880 * ixgbe_destroy_locks - Destroy locks. 881 */ 882 static void 883 ixgbe_destroy_locks(ixgbe_t *ixgbe) 884 { 885 ixgbe_rx_ring_t *rx_ring; 886 ixgbe_tx_ring_t *tx_ring; 887 int i; 888 889 for (i = 0; i < ixgbe->num_rx_rings; i++) { 890 rx_ring = &ixgbe->rx_rings[i]; 891 mutex_destroy(&rx_ring->rx_lock); 892 mutex_destroy(&rx_ring->recycle_lock); 893 } 894 895 for (i = 0; i < ixgbe->num_tx_rings; i++) { 896 tx_ring = &ixgbe->tx_rings[i]; 897 mutex_destroy(&tx_ring->tx_lock); 898 mutex_destroy(&tx_ring->recycle_lock); 899 mutex_destroy(&tx_ring->tcb_head_lock); 900 mutex_destroy(&tx_ring->tcb_tail_lock); 901 } 902 903 mutex_destroy(&ixgbe->gen_lock); 904 mutex_destroy(&ixgbe->watchdog_lock); 905 } 906 907 static int 908 ixgbe_resume(dev_info_t *devinfo) 909 { 910 ixgbe_t *ixgbe; 911 912 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 913 if (ixgbe == NULL) 914 return (DDI_FAILURE); 915 916 mutex_enter(&ixgbe->gen_lock); 917 918 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 919 if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) { 920 mutex_exit(&ixgbe->gen_lock); 921 return (DDI_FAILURE); 922 } 923 924 /* 925 * Enable and start the watchdog timer 926 */ 927 ixgbe_enable_watchdog_timer(ixgbe); 928 } 929 930 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 931 932 mutex_exit(&ixgbe->gen_lock); 933 934 return (DDI_SUCCESS); 935 } 936 937 static int 938 ixgbe_suspend(dev_info_t *devinfo) 939 { 940 ixgbe_t *ixgbe; 941 942 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 943 if (ixgbe == NULL) 944 return (DDI_FAILURE); 945 946 mutex_enter(&ixgbe->gen_lock); 947 948 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 949 950 ixgbe_stop(ixgbe); 951 952 mutex_exit(&ixgbe->gen_lock); 953 954 /* 955 * Disable and stop the watchdog timer 956 */ 957 ixgbe_disable_watchdog_timer(ixgbe); 958 959 return (DDI_SUCCESS); 960 } 961 962 /* 963 * ixgbe_init - Initialize the device. 964 */ 965 static int 966 ixgbe_init(ixgbe_t *ixgbe) 967 { 968 struct ixgbe_hw *hw = &ixgbe->hw; 969 970 mutex_enter(&ixgbe->gen_lock); 971 972 /* 973 * Reset chipset to put the hardware in a known state 974 * before we try to do anything with the eeprom. 975 */ 976 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) { 977 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 978 goto init_fail; 979 } 980 981 /* 982 * Need to init eeprom before validating the checksum. 983 */ 984 if (ixgbe_init_eeprom_params(hw) < 0) { 985 ixgbe_error(ixgbe, 986 "Unable to intitialize the eeprom interface."); 987 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 988 goto init_fail; 989 } 990 991 /* 992 * NVM validation 993 */ 994 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 995 /* 996 * Some PCI-E parts fail the first check due to 997 * the link being in sleep state. Call it again, 998 * if it fails a second time it's a real issue. 999 */ 1000 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1001 ixgbe_error(ixgbe, 1002 "Invalid NVM checksum. Please contact " 1003 "the vendor to update the NVM."); 1004 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1005 goto init_fail; 1006 } 1007 } 1008 1009 /* 1010 * Setup default flow control thresholds - enable/disable 1011 * & flow control type is controlled by ixgbe.conf 1012 */ 1013 hw->fc.high_water = DEFAULT_FCRTH; 1014 hw->fc.low_water = DEFAULT_FCRTL; 1015 hw->fc.pause_time = DEFAULT_FCPAUSE; 1016 hw->fc.send_xon = B_TRUE; 1017 1018 /* 1019 * Don't wait for auto-negotiation to complete 1020 */ 1021 hw->phy.autoneg_wait_to_complete = B_FALSE; 1022 1023 /* 1024 * Initialize link settings 1025 */ 1026 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1027 1028 /* 1029 * Initialize the chipset hardware 1030 */ 1031 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1032 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1033 goto init_fail; 1034 } 1035 1036 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 1037 goto init_fail; 1038 } 1039 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1040 goto init_fail; 1041 } 1042 1043 mutex_exit(&ixgbe->gen_lock); 1044 return (IXGBE_SUCCESS); 1045 1046 init_fail: 1047 /* 1048 * Reset PHY 1049 */ 1050 (void) ixgbe_reset_phy(hw); 1051 1052 mutex_exit(&ixgbe->gen_lock); 1053 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1054 return (IXGBE_FAILURE); 1055 } 1056 1057 /* 1058 * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and 1059 * initialize relevant hardware settings. 1060 */ 1061 static int 1062 ixgbe_init_rings(ixgbe_t *ixgbe) 1063 { 1064 int i; 1065 1066 /* 1067 * Allocate buffers for all the rx/tx rings 1068 */ 1069 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) 1070 return (IXGBE_FAILURE); 1071 1072 /* 1073 * Setup the rx/tx rings 1074 */ 1075 mutex_enter(&ixgbe->gen_lock); 1076 1077 for (i = 0; i < ixgbe->num_rx_rings; i++) 1078 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1079 for (i = 0; i < ixgbe->num_tx_rings; i++) 1080 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1081 1082 ixgbe_setup_rings(ixgbe); 1083 1084 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1085 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1086 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1087 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1088 1089 mutex_exit(&ixgbe->gen_lock); 1090 1091 return (IXGBE_SUCCESS); 1092 } 1093 1094 /* 1095 * ixgbe_fini_rings - Release DMA resources of all rx/tx rings. 1096 */ 1097 static void 1098 ixgbe_fini_rings(ixgbe_t *ixgbe) 1099 { 1100 /* 1101 * Release the DMA/memory resources of rx/tx rings 1102 */ 1103 ixgbe_free_dma(ixgbe); 1104 } 1105 1106 /* 1107 * ixgbe_chip_start - Initialize and start the chipset hardware. 1108 */ 1109 static int 1110 ixgbe_chip_start(ixgbe_t *ixgbe) 1111 { 1112 struct ixgbe_hw *hw = &ixgbe->hw; 1113 int i; 1114 1115 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1116 1117 /* 1118 * Get the mac address 1119 * This function should handle SPARC case correctly. 1120 */ 1121 if (!ixgbe_find_mac_address(ixgbe)) { 1122 ixgbe_error(ixgbe, "Failed to get the mac address"); 1123 return (IXGBE_FAILURE); 1124 } 1125 1126 /* 1127 * Validate the mac address 1128 */ 1129 (void) ixgbe_init_rx_addrs(hw); 1130 if (!is_valid_mac_addr(hw->mac.addr)) { 1131 ixgbe_error(ixgbe, "Invalid mac address"); 1132 return (IXGBE_FAILURE); 1133 } 1134 1135 /* 1136 * Configure/Initialize hardware 1137 */ 1138 if (ixgbe_init_hw(hw) != IXGBE_SUCCESS) { 1139 ixgbe_error(ixgbe, "Failed to initialize hardware"); 1140 return (IXGBE_FAILURE); 1141 } 1142 1143 /* 1144 * Setup adapter interrupt vectors 1145 */ 1146 ixgbe_setup_adapter_vector(ixgbe); 1147 1148 /* 1149 * Initialize unicast addresses. 1150 */ 1151 ixgbe_init_unicst(ixgbe); 1152 1153 /* 1154 * Setup and initialize the mctable structures. 1155 */ 1156 ixgbe_setup_multicst(ixgbe); 1157 1158 /* 1159 * Set interrupt throttling rate 1160 */ 1161 for (i = 0; i < ixgbe->intr_cnt; i++) 1162 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1163 1164 /* 1165 * Save the state of the phy 1166 */ 1167 ixgbe_get_hw_state(ixgbe); 1168 1169 /* 1170 * Make sure driver has control 1171 */ 1172 ixgbe_get_driver_control(hw); 1173 1174 return (IXGBE_SUCCESS); 1175 } 1176 1177 /* 1178 * ixgbe_chip_stop - Stop the chipset hardware 1179 */ 1180 static void 1181 ixgbe_chip_stop(ixgbe_t *ixgbe) 1182 { 1183 struct ixgbe_hw *hw = &ixgbe->hw; 1184 1185 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1186 1187 /* 1188 * Tell firmware driver is no longer in control 1189 */ 1190 ixgbe_release_driver_control(hw); 1191 1192 /* 1193 * Reset the chipset 1194 */ 1195 (void) ixgbe_reset_hw(hw); 1196 1197 /* 1198 * Reset PHY 1199 */ 1200 (void) ixgbe_reset_phy(hw); 1201 } 1202 1203 /* 1204 * ixgbe_reset - Reset the chipset and re-start the driver. 1205 * 1206 * It involves stopping and re-starting the chipset, 1207 * and re-configuring the rx/tx rings. 1208 */ 1209 static int 1210 ixgbe_reset(ixgbe_t *ixgbe) 1211 { 1212 int i; 1213 1214 mutex_enter(&ixgbe->gen_lock); 1215 1216 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1217 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 1218 1219 /* 1220 * Disable the adapter interrupts to stop any rx/tx activities 1221 * before draining pending data and resetting hardware. 1222 */ 1223 ixgbe_disable_adapter_interrupts(ixgbe); 1224 1225 /* 1226 * Drain the pending transmit packets 1227 */ 1228 (void) ixgbe_tx_drain(ixgbe); 1229 1230 for (i = 0; i < ixgbe->num_rx_rings; i++) 1231 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1232 for (i = 0; i < ixgbe->num_tx_rings; i++) 1233 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1234 1235 /* 1236 * Stop the chipset hardware 1237 */ 1238 ixgbe_chip_stop(ixgbe); 1239 1240 /* 1241 * Clean the pending tx data/resources 1242 */ 1243 ixgbe_tx_clean(ixgbe); 1244 1245 /* 1246 * Start the chipset hardware 1247 */ 1248 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1249 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1250 goto reset_failure; 1251 } 1252 1253 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1254 goto reset_failure; 1255 } 1256 1257 /* 1258 * Setup the rx/tx rings 1259 */ 1260 ixgbe_setup_rings(ixgbe); 1261 1262 /* 1263 * Enable adapter interrupts 1264 * The interrupts must be enabled after the driver state is START 1265 */ 1266 ixgbe_enable_adapter_interrupts(ixgbe); 1267 1268 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1269 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1270 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1271 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1272 1273 ixgbe->ixgbe_state |= IXGBE_STARTED; 1274 mutex_exit(&ixgbe->gen_lock); 1275 1276 return (IXGBE_SUCCESS); 1277 1278 reset_failure: 1279 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1280 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1281 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1282 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1283 1284 mutex_exit(&ixgbe->gen_lock); 1285 1286 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1287 1288 return (IXGBE_FAILURE); 1289 } 1290 1291 /* 1292 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1293 */ 1294 static void 1295 ixgbe_tx_clean(ixgbe_t *ixgbe) 1296 { 1297 ixgbe_tx_ring_t *tx_ring; 1298 tx_control_block_t *tcb; 1299 link_list_t pending_list; 1300 uint32_t desc_num; 1301 struct ixgbe_hw *hw = &ixgbe->hw; 1302 int i, j; 1303 1304 LINK_LIST_INIT(&pending_list); 1305 1306 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1307 tx_ring = &ixgbe->tx_rings[i]; 1308 1309 mutex_enter(&tx_ring->recycle_lock); 1310 1311 /* 1312 * Clean the pending tx data - the pending packets in the 1313 * work_list that have no chances to be transmitted again. 1314 * 1315 * We must ensure the chipset is stopped or the link is down 1316 * before cleaning the transmit packets. 1317 */ 1318 desc_num = 0; 1319 for (j = 0; j < tx_ring->ring_size; j++) { 1320 tcb = tx_ring->work_list[j]; 1321 if (tcb != NULL) { 1322 desc_num += tcb->desc_num; 1323 1324 tx_ring->work_list[j] = NULL; 1325 1326 ixgbe_free_tcb(tcb); 1327 1328 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1329 } 1330 } 1331 1332 if (desc_num > 0) { 1333 atomic_add_32(&tx_ring->tbd_free, desc_num); 1334 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1335 1336 /* 1337 * Reset the head and tail pointers of the tbd ring; 1338 * Reset the writeback head if it's enable. 1339 */ 1340 tx_ring->tbd_head = 0; 1341 tx_ring->tbd_tail = 0; 1342 if (ixgbe->tx_head_wb_enable) 1343 *tx_ring->tbd_head_wb = 0; 1344 1345 IXGBE_WRITE_REG(&ixgbe->hw, 1346 IXGBE_TDH(tx_ring->index), 0); 1347 IXGBE_WRITE_REG(&ixgbe->hw, 1348 IXGBE_TDT(tx_ring->index), 0); 1349 } 1350 1351 mutex_exit(&tx_ring->recycle_lock); 1352 1353 /* 1354 * Add the tx control blocks in the pending list to 1355 * the free list. 1356 */ 1357 ixgbe_put_free_list(tx_ring, &pending_list); 1358 } 1359 } 1360 1361 /* 1362 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1363 * transmitted. 1364 */ 1365 static boolean_t 1366 ixgbe_tx_drain(ixgbe_t *ixgbe) 1367 { 1368 ixgbe_tx_ring_t *tx_ring; 1369 boolean_t done; 1370 int i, j; 1371 1372 /* 1373 * Wait for a specific time to allow pending tx packets 1374 * to be transmitted. 1375 * 1376 * Check the counter tbd_free to see if transmission is done. 1377 * No lock protection is needed here. 1378 * 1379 * Return B_TRUE if all pending packets have been transmitted; 1380 * Otherwise return B_FALSE; 1381 */ 1382 for (i = 0; i < TX_DRAIN_TIME; i++) { 1383 1384 done = B_TRUE; 1385 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1386 tx_ring = &ixgbe->tx_rings[j]; 1387 done = done && 1388 (tx_ring->tbd_free == tx_ring->ring_size); 1389 } 1390 1391 if (done) 1392 break; 1393 1394 msec_delay(1); 1395 } 1396 1397 return (done); 1398 } 1399 1400 /* 1401 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1402 */ 1403 static boolean_t 1404 ixgbe_rx_drain(ixgbe_t *ixgbe) 1405 { 1406 ixgbe_rx_ring_t *rx_ring; 1407 boolean_t done; 1408 int i, j; 1409 1410 /* 1411 * Polling the rx free list to check if those rx buffers held by 1412 * the upper layer are released. 1413 * 1414 * Check the counter rcb_free to see if all pending buffers are 1415 * released. No lock protection is needed here. 1416 * 1417 * Return B_TRUE if all pending buffers have been released; 1418 * Otherwise return B_FALSE; 1419 */ 1420 for (i = 0; i < RX_DRAIN_TIME; i++) { 1421 1422 done = B_TRUE; 1423 for (j = 0; j < ixgbe->num_rx_rings; j++) { 1424 rx_ring = &ixgbe->rx_rings[j]; 1425 done = done && 1426 (rx_ring->rcb_free == rx_ring->free_list_size); 1427 } 1428 1429 if (done) 1430 break; 1431 1432 msec_delay(1); 1433 } 1434 1435 return (done); 1436 } 1437 1438 /* 1439 * ixgbe_start - Start the driver/chipset. 1440 */ 1441 int 1442 ixgbe_start(ixgbe_t *ixgbe) 1443 { 1444 int i; 1445 1446 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1447 1448 for (i = 0; i < ixgbe->num_rx_rings; i++) 1449 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1450 for (i = 0; i < ixgbe->num_tx_rings; i++) 1451 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1452 1453 /* 1454 * Start the chipset hardware 1455 */ 1456 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1457 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1458 goto start_failure; 1459 } 1460 1461 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1462 goto start_failure; 1463 } 1464 1465 /* 1466 * Setup the rx/tx rings 1467 */ 1468 ixgbe_setup_rings(ixgbe); 1469 1470 /* 1471 * Enable adapter interrupts 1472 * The interrupts must be enabled after the driver state is START 1473 */ 1474 ixgbe_enable_adapter_interrupts(ixgbe); 1475 1476 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1477 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1478 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1479 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1480 1481 return (IXGBE_SUCCESS); 1482 1483 start_failure: 1484 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1485 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1486 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1487 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1488 1489 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1490 1491 return (IXGBE_FAILURE); 1492 } 1493 1494 /* 1495 * ixgbe_stop - Stop the driver/chipset. 1496 */ 1497 void 1498 ixgbe_stop(ixgbe_t *ixgbe) 1499 { 1500 int i; 1501 1502 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1503 1504 /* 1505 * Disable the adapter interrupts 1506 */ 1507 ixgbe_disable_adapter_interrupts(ixgbe); 1508 1509 /* 1510 * Drain the pending tx packets 1511 */ 1512 (void) ixgbe_tx_drain(ixgbe); 1513 1514 for (i = 0; i < ixgbe->num_rx_rings; i++) 1515 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1516 for (i = 0; i < ixgbe->num_tx_rings; i++) 1517 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1518 1519 /* 1520 * Stop the chipset hardware 1521 */ 1522 ixgbe_chip_stop(ixgbe); 1523 1524 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1525 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1526 } 1527 1528 /* 1529 * Clean the pending tx data/resources 1530 */ 1531 ixgbe_tx_clean(ixgbe); 1532 1533 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1534 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1535 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1536 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1537 } 1538 1539 /* 1540 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 1541 */ 1542 static int 1543 ixgbe_alloc_rings(ixgbe_t *ixgbe) 1544 { 1545 /* 1546 * Allocate memory space for rx rings 1547 */ 1548 ixgbe->rx_rings = kmem_zalloc( 1549 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 1550 KM_NOSLEEP); 1551 1552 if (ixgbe->rx_rings == NULL) { 1553 return (IXGBE_FAILURE); 1554 } 1555 1556 /* 1557 * Allocate memory space for tx rings 1558 */ 1559 ixgbe->tx_rings = kmem_zalloc( 1560 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 1561 KM_NOSLEEP); 1562 1563 if (ixgbe->tx_rings == NULL) { 1564 kmem_free(ixgbe->rx_rings, 1565 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1566 ixgbe->rx_rings = NULL; 1567 return (IXGBE_FAILURE); 1568 } 1569 1570 return (IXGBE_SUCCESS); 1571 } 1572 1573 /* 1574 * ixgbe_free_rings - Free the memory space of rx/tx rings. 1575 */ 1576 static void 1577 ixgbe_free_rings(ixgbe_t *ixgbe) 1578 { 1579 if (ixgbe->rx_rings != NULL) { 1580 kmem_free(ixgbe->rx_rings, 1581 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1582 ixgbe->rx_rings = NULL; 1583 } 1584 1585 if (ixgbe->tx_rings != NULL) { 1586 kmem_free(ixgbe->tx_rings, 1587 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1588 ixgbe->tx_rings = NULL; 1589 } 1590 } 1591 1592 /* 1593 * ixgbe_setup_rings - Setup rx/tx rings. 1594 */ 1595 static void 1596 ixgbe_setup_rings(ixgbe_t *ixgbe) 1597 { 1598 /* 1599 * Setup the rx/tx rings, including the following: 1600 * 1601 * 1. Setup the descriptor ring and the control block buffers; 1602 * 2. Initialize necessary registers for receive/transmit; 1603 * 3. Initialize software pointers/parameters for receive/transmit; 1604 */ 1605 ixgbe_setup_rx(ixgbe); 1606 1607 ixgbe_setup_tx(ixgbe); 1608 } 1609 1610 static void 1611 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 1612 { 1613 ixgbe_t *ixgbe = rx_ring->ixgbe; 1614 struct ixgbe_hw *hw = &ixgbe->hw; 1615 rx_control_block_t *rcb; 1616 union ixgbe_adv_rx_desc *rbd; 1617 uint32_t size; 1618 uint32_t buf_low; 1619 uint32_t buf_high; 1620 uint32_t reg_val; 1621 int i; 1622 1623 ASSERT(mutex_owned(&rx_ring->rx_lock)); 1624 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1625 1626 for (i = 0; i < ixgbe->rx_ring_size; i++) { 1627 rcb = rx_ring->work_list[i]; 1628 rbd = &rx_ring->rbd_ring[i]; 1629 1630 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 1631 rbd->read.hdr_addr = NULL; 1632 } 1633 1634 /* 1635 * Initialize the length register 1636 */ 1637 size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc); 1638 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size); 1639 1640 /* 1641 * Initialize the base address registers 1642 */ 1643 buf_low = (uint32_t)rx_ring->rbd_area.dma_address; 1644 buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32); 1645 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high); 1646 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low); 1647 1648 /* 1649 * Setup head & tail pointers 1650 */ 1651 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1); 1652 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0); 1653 1654 rx_ring->rbd_next = 0; 1655 1656 /* 1657 * Note: Considering the case that the chipset is being reset 1658 * and there are still some buffers held by the upper layer, 1659 * we should not reset the values of rcb_head, rcb_tail and 1660 * rcb_free if the state is not IXGBE_UNKNOWN. 1661 */ 1662 if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) { 1663 rx_ring->rcb_head = 0; 1664 rx_ring->rcb_tail = 0; 1665 rx_ring->rcb_free = rx_ring->free_list_size; 1666 } 1667 1668 /* 1669 * Setup the Receive Descriptor Control Register (RXDCTL) 1670 * PTHRESH=32 descriptors (half the internal cache) 1671 * HTHRESH=0 descriptors (to minimize latency on fetch) 1672 * WTHRESH defaults to 1 (writeback each descriptor) 1673 */ 1674 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index)); 1675 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 1676 reg_val |= 0x0020; /* pthresh */ 1677 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val); 1678 1679 /* 1680 * Setup the Split and Replication Receive Control Register. 1681 * Set the rx buffer size and the advanced descriptor type. 1682 */ 1683 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 1684 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1685 1686 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val); 1687 } 1688 1689 static void 1690 ixgbe_setup_rx(ixgbe_t *ixgbe) 1691 { 1692 ixgbe_rx_ring_t *rx_ring; 1693 struct ixgbe_hw *hw = &ixgbe->hw; 1694 uint32_t reg_val; 1695 int i; 1696 1697 /* 1698 * Set filter control in FCTRL to accept broadcast packets and do 1699 * not pass pause frames to host. Flow control settings are already 1700 * in this register, so preserve them. 1701 */ 1702 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1703 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */ 1704 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */ 1705 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 1706 1707 /* 1708 * Enable the receive unit. This must be done after filter 1709 * control is set in FCTRL. 1710 */ 1711 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */ 1712 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */ 1713 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 1714 1715 /* 1716 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 1717 */ 1718 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1719 rx_ring = &ixgbe->rx_rings[i]; 1720 ixgbe_setup_rx_ring(rx_ring); 1721 } 1722 1723 /* 1724 * The Max Frame Size in MHADD will be internally increased by four 1725 * bytes if the packet has a VLAN field, so includes MTU, ethernet 1726 * header and frame check sequence. 1727 */ 1728 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header) 1729 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 1730 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 1731 1732 /* 1733 * Setup Jumbo Frame enable bit 1734 */ 1735 if (ixgbe->default_mtu > ETHERMTU) { 1736 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1737 reg_val |= IXGBE_HLREG0_JUMBOEN; 1738 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 1739 } 1740 1741 /* 1742 * Hardware checksum settings 1743 */ 1744 if (ixgbe->rx_hcksum_enable) { 1745 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 1746 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 1747 } 1748 1749 /* 1750 * Setup RSS for multiple receive queues 1751 */ 1752 if (ixgbe->num_rx_rings > 1) 1753 ixgbe_setup_rss(ixgbe); 1754 } 1755 1756 static void 1757 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 1758 { 1759 ixgbe_t *ixgbe = tx_ring->ixgbe; 1760 struct ixgbe_hw *hw = &ixgbe->hw; 1761 uint32_t size; 1762 uint32_t buf_low; 1763 uint32_t buf_high; 1764 uint32_t reg_val; 1765 1766 ASSERT(mutex_owned(&tx_ring->tx_lock)); 1767 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1768 1769 /* 1770 * Initialize the length register 1771 */ 1772 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 1773 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 1774 1775 /* 1776 * Initialize the base address registers 1777 */ 1778 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 1779 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 1780 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 1781 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 1782 1783 /* 1784 * setup TXDCTL(tx_ring->index) 1785 */ 1786 reg_val = IXGBE_TXDCTL_ENABLE; 1787 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 1788 1789 /* 1790 * Setup head & tail pointers 1791 */ 1792 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 1793 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 1794 1795 /* 1796 * Setup head write-back 1797 */ 1798 if (ixgbe->tx_head_wb_enable) { 1799 /* 1800 * The memory of the head write-back is allocated using 1801 * the extra tbd beyond the tail of the tbd ring. 1802 */ 1803 tx_ring->tbd_head_wb = (uint32_t *) 1804 ((uintptr_t)tx_ring->tbd_area.address + size); 1805 *tx_ring->tbd_head_wb = 0; 1806 1807 buf_low = (uint32_t) 1808 (tx_ring->tbd_area.dma_address + size); 1809 buf_high = (uint32_t) 1810 ((tx_ring->tbd_area.dma_address + size) >> 32); 1811 1812 /* Set the head write-back enable bit */ 1813 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 1814 1815 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 1816 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 1817 1818 /* 1819 * Turn off relaxed ordering for head write back or it will 1820 * cause problems with the tx recycling 1821 */ 1822 reg_val = IXGBE_READ_REG(hw, 1823 IXGBE_DCA_TXCTRL(tx_ring->index)); 1824 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1825 IXGBE_WRITE_REG(hw, 1826 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 1827 } else { 1828 tx_ring->tbd_head_wb = NULL; 1829 } 1830 1831 tx_ring->tbd_head = 0; 1832 tx_ring->tbd_tail = 0; 1833 tx_ring->tbd_free = tx_ring->ring_size; 1834 1835 /* 1836 * Note: Considering the case that the chipset is being reset, 1837 * and there are still some tcb in the pending list, 1838 * we should not reset the values of tcb_head, tcb_tail and 1839 * tcb_free if the state is not IXGBE_UNKNOWN. 1840 */ 1841 if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) { 1842 tx_ring->tcb_head = 0; 1843 tx_ring->tcb_tail = 0; 1844 tx_ring->tcb_free = tx_ring->free_list_size; 1845 } 1846 1847 /* 1848 * Initialize the s/w context structure 1849 */ 1850 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 1851 } 1852 1853 static void 1854 ixgbe_setup_tx(ixgbe_t *ixgbe) 1855 { 1856 struct ixgbe_hw *hw = &ixgbe->hw; 1857 ixgbe_tx_ring_t *tx_ring; 1858 uint32_t reg_val; 1859 int i; 1860 1861 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1862 tx_ring = &ixgbe->tx_rings[i]; 1863 ixgbe_setup_tx_ring(tx_ring); 1864 } 1865 1866 /* 1867 * Enable CRC appending and TX padding (for short tx frames) 1868 */ 1869 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1870 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 1871 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 1872 } 1873 1874 /* 1875 * ixgbe_setup_rss - Setup receive-side scaling feature. 1876 */ 1877 static void 1878 ixgbe_setup_rss(ixgbe_t *ixgbe) 1879 { 1880 struct ixgbe_hw *hw = &ixgbe->hw; 1881 uint32_t i, mrqc, rxcsum; 1882 uint32_t random; 1883 uint32_t reta; 1884 1885 /* 1886 * Fill out redirection table 1887 */ 1888 reta = 0; 1889 for (i = 0; i < 128; i++) { 1890 reta = (reta << 8) | (i % ixgbe->num_rx_rings); 1891 if ((i & 3) == 3) 1892 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 1893 } 1894 1895 /* 1896 * Fill out hash function seeds with a random constant 1897 */ 1898 for (i = 0; i < 10; i++) { 1899 (void) random_get_pseudo_bytes((uint8_t *)&random, 1900 sizeof (uint32_t)); 1901 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 1902 } 1903 1904 /* 1905 * Enable RSS & perform hash on these packet types 1906 */ 1907 mrqc = IXGBE_MRQC_RSSEN | 1908 IXGBE_MRQC_RSS_FIELD_IPV4 | 1909 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 1910 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 1911 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 1912 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 1913 IXGBE_MRQC_RSS_FIELD_IPV6 | 1914 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 1915 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 1916 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1917 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1918 1919 /* 1920 * Disable Packet Checksum to enable RSS for multiple receive queues. 1921 * It is an adapter hardware limitation that Packet Checksum is 1922 * mutually exclusive with RSS. 1923 */ 1924 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1925 rxcsum |= IXGBE_RXCSUM_PCSD; 1926 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 1927 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1928 } 1929 1930 /* 1931 * ixgbe_init_unicst - Initialize the unicast addresses. 1932 */ 1933 static void 1934 ixgbe_init_unicst(ixgbe_t *ixgbe) 1935 { 1936 struct ixgbe_hw *hw = &ixgbe->hw; 1937 int slot; 1938 /* 1939 * Here we should consider two situations: 1940 * 1941 * 1. Chipset is initialized the first time 1942 * Initialize the multiple unicast addresses, and 1943 * save the default mac address. 1944 * 1945 * 2. Chipset is reset 1946 * Recover the multiple unicast addresses from the 1947 * software data structure to the RAR registers. 1948 */ 1949 if (!ixgbe->unicst_init) { 1950 /* 1951 * Initialize the multiple unicast addresses 1952 */ 1953 ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES; 1954 1955 ixgbe->unicst_avail = ixgbe->unicst_total - 1; 1956 1957 bcopy(hw->mac.addr, ixgbe->unicst_addr[0].mac.addr, 1958 ETHERADDRL); 1959 ixgbe->unicst_addr[0].mac.set = 1; 1960 1961 for (slot = 1; slot < ixgbe->unicst_total; slot++) 1962 ixgbe->unicst_addr[slot].mac.set = 0; 1963 1964 ixgbe->unicst_init = B_TRUE; 1965 } else { 1966 /* 1967 * Recover the default mac address 1968 */ 1969 bcopy(ixgbe->unicst_addr[0].mac.addr, hw->mac.addr, 1970 ETHERADDRL); 1971 1972 /* Re-configure the RAR registers */ 1973 for (slot = 1; slot < ixgbe->unicst_total; slot++) 1974 (void) ixgbe_set_rar(hw, slot, 1975 ixgbe->unicst_addr[slot].mac.addr, NULL, NULL); 1976 } 1977 } 1978 /* 1979 * ixgbe_unicst_set - Set the unicast address to the specified slot. 1980 */ 1981 int 1982 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr, 1983 mac_addr_slot_t slot) 1984 { 1985 struct ixgbe_hw *hw = &ixgbe->hw; 1986 1987 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1988 1989 /* 1990 * Save the unicast address in the software data structure 1991 */ 1992 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 1993 1994 /* 1995 * Set the unicast address to the RAR register 1996 */ 1997 (void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, NULL); 1998 1999 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2000 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2001 return (EIO); 2002 } 2003 2004 return (0); 2005 } 2006 2007 /* 2008 * ixgbe_multicst_add - Add a multicst address. 2009 */ 2010 int 2011 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2012 { 2013 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2014 2015 if ((multiaddr[0] & 01) == 0) { 2016 return (EINVAL); 2017 } 2018 2019 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 2020 return (ENOENT); 2021 } 2022 2023 bcopy(multiaddr, 2024 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 2025 ixgbe->mcast_count++; 2026 2027 /* 2028 * Update the multicast table in the hardware 2029 */ 2030 ixgbe_setup_multicst(ixgbe); 2031 2032 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2033 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2034 return (EIO); 2035 } 2036 2037 return (0); 2038 } 2039 2040 /* 2041 * ixgbe_multicst_remove - Remove a multicst address. 2042 */ 2043 int 2044 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2045 { 2046 int i; 2047 2048 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2049 2050 for (i = 0; i < ixgbe->mcast_count; i++) { 2051 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 2052 ETHERADDRL) == 0) { 2053 for (i++; i < ixgbe->mcast_count; i++) { 2054 ixgbe->mcast_table[i - 1] = 2055 ixgbe->mcast_table[i]; 2056 } 2057 ixgbe->mcast_count--; 2058 break; 2059 } 2060 } 2061 2062 /* 2063 * Update the multicast table in the hardware 2064 */ 2065 ixgbe_setup_multicst(ixgbe); 2066 2067 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2068 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2069 return (EIO); 2070 } 2071 2072 return (0); 2073 } 2074 2075 /* 2076 * ixgbe_setup_multicast - Setup multicast data structures. 2077 * 2078 * This routine initializes all of the multicast related structures 2079 * and save them in the hardware registers. 2080 */ 2081 static void 2082 ixgbe_setup_multicst(ixgbe_t *ixgbe) 2083 { 2084 uint8_t *mc_addr_list; 2085 uint32_t mc_addr_count; 2086 struct ixgbe_hw *hw = &ixgbe->hw; 2087 2088 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2089 2090 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 2091 2092 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 2093 mc_addr_count = ixgbe->mcast_count; 2094 2095 /* 2096 * Update the multicast addresses to the MTA registers 2097 */ 2098 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2099 ixgbe_mc_table_itr); 2100 } 2101 2102 /* 2103 * ixgbe_get_conf - Get driver configurations set in driver.conf. 2104 * 2105 * This routine gets user-configured values out of the configuration 2106 * file ixgbe.conf. 2107 * 2108 * For each configurable value, there is a minimum, a maximum, and a 2109 * default. 2110 * If user does not configure a value, use the default. 2111 * If user configures below the minimum, use the minumum. 2112 * If user configures above the maximum, use the maxumum. 2113 */ 2114 static void 2115 ixgbe_get_conf(ixgbe_t *ixgbe) 2116 { 2117 struct ixgbe_hw *hw = &ixgbe->hw; 2118 uint32_t flow_control; 2119 2120 /* 2121 * ixgbe driver supports the following user configurations: 2122 * 2123 * Jumbo frame configuration: 2124 * default_mtu 2125 * 2126 * Ethernet flow control configuration: 2127 * flow_control 2128 * 2129 * Multiple rings configurations: 2130 * tx_queue_number 2131 * tx_ring_size 2132 * rx_queue_number 2133 * rx_ring_size 2134 * 2135 * Call ixgbe_get_prop() to get the value for a specific 2136 * configuration parameter. 2137 */ 2138 2139 /* 2140 * Jumbo frame configuration - max_frame_size controls host buffer 2141 * allocation, so includes MTU, ethernet header, vlan tag and 2142 * frame check sequence. 2143 */ 2144 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 2145 MIN_MTU, MAX_MTU, DEFAULT_MTU); 2146 2147 ixgbe->max_frame_size = ixgbe->default_mtu + 2148 sizeof (struct ether_vlan_header) + ETHERFCSL; 2149 2150 /* 2151 * Ethernet flow control configuration 2152 */ 2153 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 2154 ixgbe_fc_none, 3, ixgbe_fc_full); 2155 if (flow_control == 3) 2156 flow_control = ixgbe_fc_default; 2157 2158 hw->fc.type = flow_control; 2159 2160 /* 2161 * Multiple rings configurations 2162 */ 2163 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 2164 MIN_TX_QUEUE_NUM, MAX_TX_QUEUE_NUM, DEFAULT_TX_QUEUE_NUM); 2165 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 2166 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 2167 2168 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 2169 MIN_RX_QUEUE_NUM, MAX_RX_QUEUE_NUM, DEFAULT_RX_QUEUE_NUM); 2170 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 2171 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 2172 2173 /* 2174 * Tunable used to force an interrupt type. The only use is 2175 * for testing of the lesser interrupt types. 2176 * 0 = don't force interrupt type 2177 * 1 = force interrupt type MSIX 2178 * 2 = force interrupt type MSI 2179 * 3 = force interrupt type Legacy 2180 */ 2181 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 2182 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 2183 ixgbe_log(ixgbe, "interrupt force: %d\n", ixgbe->intr_force); 2184 2185 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 2186 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 2187 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 2188 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 2189 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 2190 0, 1, DEFAULT_LSO_ENABLE); 2191 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 2192 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 2193 2194 /* 2195 * ixgbe LSO needs the tx h/w checksum support. 2196 * LSO will be disabled if tx h/w checksum is not 2197 * enabled. 2198 */ 2199 if (ixgbe->tx_hcksum_enable == B_FALSE) { 2200 ixgbe->lso_enable = B_FALSE; 2201 } 2202 2203 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 2204 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 2205 DEFAULT_TX_COPY_THRESHOLD); 2206 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 2207 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 2208 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 2209 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 2210 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 2211 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 2212 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 2213 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 2214 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 2215 2216 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 2217 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 2218 DEFAULT_RX_COPY_THRESHOLD); 2219 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 2220 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 2221 DEFAULT_RX_LIMIT_PER_INTR); 2222 2223 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 2224 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 2225 DEFAULT_INTR_THROTTLING); 2226 } 2227 2228 /* 2229 * ixgbe_get_prop - Get a property value out of the configuration file 2230 * ixgbe.conf. 2231 * 2232 * Caller provides the name of the property, a default value, a minimum 2233 * value, and a maximum value. 2234 * 2235 * Return configured value of the property, with default, minimum and 2236 * maximum properly applied. 2237 */ 2238 static int 2239 ixgbe_get_prop(ixgbe_t *ixgbe, 2240 char *propname, /* name of the property */ 2241 int minval, /* minimum acceptable value */ 2242 int maxval, /* maximim acceptable value */ 2243 int defval) /* default value */ 2244 { 2245 int value; 2246 2247 /* 2248 * Call ddi_prop_get_int() to read the conf settings 2249 */ 2250 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 2251 DDI_PROP_DONTPASS, propname, defval); 2252 if (value > maxval) 2253 value = maxval; 2254 2255 if (value < minval) 2256 value = minval; 2257 2258 return (value); 2259 } 2260 2261 /* 2262 * ixgbe_driver_setup_link - Using the link properties to setup the link. 2263 */ 2264 int 2265 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 2266 { 2267 struct ixgbe_mac_info *mac; 2268 struct ixgbe_phy_info *phy; 2269 boolean_t invalid; 2270 2271 mac = &ixgbe->hw.mac; 2272 phy = &ixgbe->hw.phy; 2273 invalid = B_FALSE; 2274 2275 if (ixgbe->param_adv_autoneg_cap == 1) { 2276 mac->autoneg = B_TRUE; 2277 phy->autoneg_advertised = 0; 2278 2279 /* 2280 * No half duplex support with 10Gb parts 2281 */ 2282 if (ixgbe->param_adv_10000fdx_cap == 1) 2283 phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 2284 2285 if (ixgbe->param_adv_1000fdx_cap == 1) 2286 phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 2287 2288 if (ixgbe->param_adv_100fdx_cap == 1) 2289 phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 2290 2291 if (phy->autoneg_advertised == 0) 2292 invalid = B_TRUE; 2293 } else { 2294 ixgbe->hw.mac.autoneg = B_FALSE; 2295 } 2296 2297 if (invalid) { 2298 ixgbe_notice(ixgbe, "Invalid link settings. Setup link to " 2299 "autonegotiation with full link capabilities."); 2300 ixgbe->hw.mac.autoneg = B_TRUE; 2301 } 2302 2303 if (setup_hw) { 2304 if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS) 2305 return (IXGBE_FAILURE); 2306 } 2307 2308 return (IXGBE_SUCCESS); 2309 } 2310 2311 /* 2312 * ixgbe_driver_link_check - Link status processing. 2313 */ 2314 static boolean_t 2315 ixgbe_driver_link_check(ixgbe_t *ixgbe) 2316 { 2317 struct ixgbe_hw *hw = &ixgbe->hw; 2318 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 2319 boolean_t link_up = B_FALSE; 2320 boolean_t link_changed = B_FALSE; 2321 2322 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2323 2324 (void) ixgbe_check_link(hw, &speed, &link_up); 2325 if (link_up) { 2326 /* 2327 * The Link is up, check whether it was marked as down earlier 2328 */ 2329 if (ixgbe->link_state != LINK_STATE_UP) { 2330 switch (speed) { 2331 case IXGBE_LINK_SPEED_10GB_FULL: 2332 ixgbe->link_speed = SPEED_10GB; 2333 break; 2334 case IXGBE_LINK_SPEED_1GB_FULL: 2335 ixgbe->link_speed = SPEED_1GB; 2336 break; 2337 case IXGBE_LINK_SPEED_100_FULL: 2338 ixgbe->link_speed = SPEED_100; 2339 } 2340 ixgbe->link_duplex = LINK_DUPLEX_FULL; 2341 ixgbe->link_state = LINK_STATE_UP; 2342 ixgbe->link_down_timeout = 0; 2343 link_changed = B_TRUE; 2344 } 2345 } else { 2346 if (ixgbe->link_state != LINK_STATE_DOWN) { 2347 ixgbe->link_speed = 0; 2348 ixgbe->link_duplex = 0; 2349 ixgbe->link_state = LINK_STATE_DOWN; 2350 link_changed = B_TRUE; 2351 } 2352 2353 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 2354 if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) { 2355 ixgbe->link_down_timeout++; 2356 } else if (ixgbe->link_down_timeout == 2357 MAX_LINK_DOWN_TIMEOUT) { 2358 ixgbe_tx_clean(ixgbe); 2359 ixgbe->link_down_timeout++; 2360 } 2361 } 2362 } 2363 2364 return (link_changed); 2365 } 2366 2367 /* 2368 * ixgbe_local_timer - Driver watchdog function. 2369 * 2370 * This function will handle the transmit stall check, link status check and 2371 * other routines. 2372 */ 2373 static void 2374 ixgbe_local_timer(void *arg) 2375 { 2376 ixgbe_t *ixgbe = (ixgbe_t *)arg; 2377 2378 if (ixgbe_stall_check(ixgbe)) { 2379 ixgbe->reset_count++; 2380 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 2381 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 2382 } 2383 2384 ixgbe_restart_watchdog_timer(ixgbe); 2385 } 2386 2387 /* 2388 * ixgbe_stall_check - Check for transmit stall. 2389 * 2390 * This function checks if the adapter is stalled (in transmit). 2391 * 2392 * It is called each time the watchdog timeout is invoked. 2393 * If the transmit descriptor reclaim continuously fails, 2394 * the watchdog value will increment by 1. If the watchdog 2395 * value exceeds the threshold, the ixgbe is assumed to 2396 * have stalled and need to be reset. 2397 */ 2398 static boolean_t 2399 ixgbe_stall_check(ixgbe_t *ixgbe) 2400 { 2401 ixgbe_tx_ring_t *tx_ring; 2402 boolean_t result; 2403 int i; 2404 2405 if (ixgbe->link_state != LINK_STATE_UP) 2406 return (B_FALSE); 2407 2408 /* 2409 * If any tx ring is stalled, we'll reset the chipset 2410 */ 2411 result = B_FALSE; 2412 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2413 tx_ring = &ixgbe->tx_rings[i]; 2414 2415 if (tx_ring->recycle_fail > 0) 2416 tx_ring->stall_watchdog++; 2417 else 2418 tx_ring->stall_watchdog = 0; 2419 2420 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 2421 result = B_TRUE; 2422 break; 2423 } 2424 } 2425 2426 if (result) { 2427 tx_ring->stall_watchdog = 0; 2428 tx_ring->recycle_fail = 0; 2429 } 2430 2431 return (result); 2432 } 2433 2434 2435 /* 2436 * is_valid_mac_addr - Check if the mac address is valid. 2437 */ 2438 static boolean_t 2439 is_valid_mac_addr(uint8_t *mac_addr) 2440 { 2441 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 2442 const uint8_t addr_test2[6] = 2443 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 2444 2445 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 2446 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 2447 return (B_FALSE); 2448 2449 return (B_TRUE); 2450 } 2451 2452 static boolean_t 2453 ixgbe_find_mac_address(ixgbe_t *ixgbe) 2454 { 2455 #ifdef __sparc 2456 struct ixgbe_hw *hw = &ixgbe->hw; 2457 uchar_t *bytes; 2458 struct ether_addr sysaddr; 2459 uint_t nelts; 2460 int err; 2461 boolean_t found = B_FALSE; 2462 2463 /* 2464 * The "vendor's factory-set address" may already have 2465 * been extracted from the chip, but if the property 2466 * "local-mac-address" is set we use that instead. 2467 * 2468 * We check whether it looks like an array of 6 2469 * bytes (which it should, if OBP set it). If we can't 2470 * make sense of it this way, we'll ignore it. 2471 */ 2472 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 2473 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 2474 if (err == DDI_PROP_SUCCESS) { 2475 if (nelts == ETHERADDRL) { 2476 while (nelts--) 2477 hw->mac.addr[nelts] = bytes[nelts]; 2478 found = B_TRUE; 2479 } 2480 ddi_prop_free(bytes); 2481 } 2482 2483 /* 2484 * Look up the OBP property "local-mac-address?". If the user has set 2485 * 'local-mac-address? = false', use "the system address" instead. 2486 */ 2487 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 2488 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 2489 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 2490 if (localetheraddr(NULL, &sysaddr) != 0) { 2491 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 2492 found = B_TRUE; 2493 } 2494 } 2495 ddi_prop_free(bytes); 2496 } 2497 2498 /* 2499 * Finally(!), if there's a valid "mac-address" property (created 2500 * if we netbooted from this interface), we must use this instead 2501 * of any of the above to ensure that the NFS/install server doesn't 2502 * get confused by the address changing as Solaris takes over! 2503 */ 2504 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 2505 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 2506 if (err == DDI_PROP_SUCCESS) { 2507 if (nelts == ETHERADDRL) { 2508 while (nelts--) 2509 hw->mac.addr[nelts] = bytes[nelts]; 2510 found = B_TRUE; 2511 } 2512 ddi_prop_free(bytes); 2513 } 2514 2515 if (found) { 2516 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 2517 return (B_TRUE); 2518 } 2519 #else 2520 _NOTE(ARGUNUSED(ixgbe)); 2521 #endif 2522 2523 return (B_TRUE); 2524 } 2525 2526 #pragma inline(ixgbe_arm_watchdog_timer) 2527 static void 2528 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 2529 { 2530 /* 2531 * Fire a watchdog timer 2532 */ 2533 ixgbe->watchdog_tid = 2534 timeout(ixgbe_local_timer, 2535 (void *)ixgbe, 1 * drv_usectohz(1000000)); 2536 2537 } 2538 2539 /* 2540 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 2541 */ 2542 void 2543 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 2544 { 2545 mutex_enter(&ixgbe->watchdog_lock); 2546 2547 if (!ixgbe->watchdog_enable) { 2548 ixgbe->watchdog_enable = B_TRUE; 2549 ixgbe->watchdog_start = B_TRUE; 2550 ixgbe_arm_watchdog_timer(ixgbe); 2551 } 2552 2553 mutex_exit(&ixgbe->watchdog_lock); 2554 } 2555 2556 /* 2557 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 2558 */ 2559 void 2560 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 2561 { 2562 timeout_id_t tid; 2563 2564 mutex_enter(&ixgbe->watchdog_lock); 2565 2566 ixgbe->watchdog_enable = B_FALSE; 2567 ixgbe->watchdog_start = B_FALSE; 2568 tid = ixgbe->watchdog_tid; 2569 ixgbe->watchdog_tid = 0; 2570 2571 mutex_exit(&ixgbe->watchdog_lock); 2572 2573 if (tid != 0) 2574 (void) untimeout(tid); 2575 } 2576 2577 /* 2578 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 2579 */ 2580 static void 2581 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 2582 { 2583 mutex_enter(&ixgbe->watchdog_lock); 2584 2585 if (ixgbe->watchdog_enable) { 2586 if (!ixgbe->watchdog_start) { 2587 ixgbe->watchdog_start = B_TRUE; 2588 ixgbe_arm_watchdog_timer(ixgbe); 2589 } 2590 } 2591 2592 mutex_exit(&ixgbe->watchdog_lock); 2593 } 2594 2595 /* 2596 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 2597 */ 2598 static void 2599 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 2600 { 2601 mutex_enter(&ixgbe->watchdog_lock); 2602 2603 if (ixgbe->watchdog_start) 2604 ixgbe_arm_watchdog_timer(ixgbe); 2605 2606 mutex_exit(&ixgbe->watchdog_lock); 2607 } 2608 2609 /* 2610 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 2611 */ 2612 static void 2613 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 2614 { 2615 timeout_id_t tid; 2616 2617 mutex_enter(&ixgbe->watchdog_lock); 2618 2619 ixgbe->watchdog_start = B_FALSE; 2620 tid = ixgbe->watchdog_tid; 2621 ixgbe->watchdog_tid = 0; 2622 2623 mutex_exit(&ixgbe->watchdog_lock); 2624 2625 if (tid != 0) 2626 (void) untimeout(tid); 2627 } 2628 2629 /* 2630 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 2631 */ 2632 static void 2633 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 2634 { 2635 struct ixgbe_hw *hw = &ixgbe->hw; 2636 2637 /* 2638 * mask all interrupts off 2639 */ 2640 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 2641 2642 /* 2643 * for MSI-X, also disable autoclear 2644 */ 2645 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 2646 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 2647 } 2648 2649 IXGBE_WRITE_FLUSH(hw); 2650 } 2651 2652 /* 2653 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 2654 */ 2655 static void 2656 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 2657 { 2658 struct ixgbe_hw *hw = &ixgbe->hw; 2659 uint32_t eims, eiac, gpie; 2660 2661 gpie = 0; 2662 eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 2663 eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 2664 2665 /* 2666 * msi-x mode 2667 */ 2668 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 2669 /* enable autoclear but not on bits 29:20 */ 2670 eiac = (eims & ~0x3ff00000); 2671 2672 /* general purpose interrupt enable */ 2673 gpie |= (IXGBE_GPIE_MSIX_MODE | 2674 IXGBE_GPIE_PBA_SUPPORT |IXGBE_GPIE_OCD); 2675 /* 2676 * non-msi-x mode 2677 */ 2678 } else { 2679 2680 /* disable autoclear, leave gpie at default */ 2681 eiac = 0; 2682 } 2683 2684 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims); 2685 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 2686 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2687 IXGBE_WRITE_FLUSH(hw); 2688 } 2689 2690 /* 2691 * ixgbe_loopback_ioctl - Loopback support. 2692 */ 2693 enum ioc_reply 2694 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 2695 { 2696 lb_info_sz_t *lbsp; 2697 lb_property_t *lbpp; 2698 uint32_t *lbmp; 2699 uint32_t size; 2700 uint32_t value; 2701 2702 if (mp->b_cont == NULL) 2703 return (IOC_INVAL); 2704 2705 switch (iocp->ioc_cmd) { 2706 default: 2707 return (IOC_INVAL); 2708 2709 case LB_GET_INFO_SIZE: 2710 size = sizeof (lb_info_sz_t); 2711 if (iocp->ioc_count != size) 2712 return (IOC_INVAL); 2713 2714 value = sizeof (lb_normal); 2715 value += sizeof (lb_mac); 2716 2717 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 2718 *lbsp = value; 2719 break; 2720 2721 case LB_GET_INFO: 2722 value = sizeof (lb_normal); 2723 value += sizeof (lb_mac); 2724 2725 size = value; 2726 if (iocp->ioc_count != size) 2727 return (IOC_INVAL); 2728 2729 value = 0; 2730 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 2731 2732 lbpp[value++] = lb_normal; 2733 lbpp[value++] = lb_mac; 2734 break; 2735 2736 case LB_GET_MODE: 2737 size = sizeof (uint32_t); 2738 if (iocp->ioc_count != size) 2739 return (IOC_INVAL); 2740 2741 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 2742 *lbmp = ixgbe->loopback_mode; 2743 break; 2744 2745 case LB_SET_MODE: 2746 size = 0; 2747 if (iocp->ioc_count != sizeof (uint32_t)) 2748 return (IOC_INVAL); 2749 2750 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 2751 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 2752 return (IOC_INVAL); 2753 break; 2754 } 2755 2756 iocp->ioc_count = size; 2757 iocp->ioc_error = 0; 2758 2759 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2760 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2761 return (IOC_INVAL); 2762 } 2763 2764 return (IOC_REPLY); 2765 } 2766 2767 /* 2768 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 2769 */ 2770 static boolean_t 2771 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 2772 { 2773 struct ixgbe_hw *hw; 2774 2775 if (mode == ixgbe->loopback_mode) 2776 return (B_TRUE); 2777 2778 hw = &ixgbe->hw; 2779 2780 ixgbe->loopback_mode = mode; 2781 2782 if (mode == IXGBE_LB_NONE) { 2783 /* 2784 * Reset the chip 2785 */ 2786 hw->phy.autoneg_wait_to_complete = B_TRUE; 2787 (void) ixgbe_reset(ixgbe); 2788 hw->phy.autoneg_wait_to_complete = B_FALSE; 2789 return (B_TRUE); 2790 } 2791 2792 mutex_enter(&ixgbe->gen_lock); 2793 2794 switch (mode) { 2795 default: 2796 mutex_exit(&ixgbe->gen_lock); 2797 return (B_FALSE); 2798 2799 case IXGBE_LB_INTERNAL_MAC: 2800 ixgbe_set_internal_mac_loopback(ixgbe); 2801 break; 2802 } 2803 2804 mutex_exit(&ixgbe->gen_lock); 2805 2806 return (B_TRUE); 2807 } 2808 2809 /* 2810 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 2811 */ 2812 static void 2813 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 2814 { 2815 struct ixgbe_hw *hw; 2816 uint32_t reg; 2817 uint8_t atlas; 2818 2819 hw = &ixgbe->hw; 2820 2821 /* 2822 * Setup MAC loopback 2823 */ 2824 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 2825 reg |= IXGBE_HLREG0_LPBK; 2826 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 2827 2828 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 2829 reg &= ~IXGBE_AUTOC_LMS_MASK; 2830 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 2831 2832 /* 2833 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 2834 */ 2835 if (hw->mac.type == ixgbe_mac_82598EB) { 2836 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 2837 &atlas); 2838 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 2839 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 2840 atlas); 2841 2842 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 2843 &atlas); 2844 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 2845 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 2846 atlas); 2847 2848 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 2849 &atlas); 2850 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 2851 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 2852 atlas); 2853 2854 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 2855 &atlas); 2856 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 2857 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 2858 atlas); 2859 } 2860 } 2861 2862 #pragma inline(ixgbe_intr_rx_work) 2863 /* 2864 * ixgbe_intr_rx_work - RX processing of ISR. 2865 */ 2866 static void 2867 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 2868 { 2869 mblk_t *mp; 2870 2871 mutex_enter(&rx_ring->rx_lock); 2872 2873 mp = ixgbe_rx(rx_ring); 2874 mutex_exit(&rx_ring->rx_lock); 2875 2876 if (mp != NULL) 2877 mac_rx(rx_ring->ixgbe->mac_hdl, NULL, mp); 2878 } 2879 2880 #pragma inline(ixgbe_intr_tx_work) 2881 /* 2882 * ixgbe_intr_tx_work - TX processing of ISR. 2883 */ 2884 static void 2885 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 2886 { 2887 /* 2888 * Recycle the tx descriptors 2889 */ 2890 tx_ring->tx_recycle(tx_ring); 2891 2892 /* 2893 * Schedule the re-transmit 2894 */ 2895 if (tx_ring->reschedule && 2896 (tx_ring->tbd_free >= tx_ring->resched_thresh)) { 2897 tx_ring->reschedule = B_FALSE; 2898 mac_tx_update(tx_ring->ixgbe->mac_hdl); 2899 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 2900 } 2901 } 2902 2903 #pragma inline(ixgbe_intr_other_work) 2904 /* 2905 * ixgbe_intr_other_work - Other processing of ISR. 2906 */ 2907 static void 2908 ixgbe_intr_other_work(ixgbe_t *ixgbe) 2909 { 2910 boolean_t link_changed; 2911 2912 ixgbe_stop_watchdog_timer(ixgbe); 2913 2914 mutex_enter(&ixgbe->gen_lock); 2915 2916 /* 2917 * Take care of link status change 2918 */ 2919 link_changed = ixgbe_driver_link_check(ixgbe); 2920 2921 /* 2922 * Get new phy state 2923 */ 2924 ixgbe_get_hw_state(ixgbe); 2925 2926 mutex_exit(&ixgbe->gen_lock); 2927 2928 if (link_changed) 2929 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 2930 2931 ixgbe_start_watchdog_timer(ixgbe); 2932 } 2933 2934 /* 2935 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 2936 */ 2937 static uint_t 2938 ixgbe_intr_legacy(void *arg1, void *arg2) 2939 { 2940 _NOTE(ARGUNUSED(arg2)); 2941 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 2942 struct ixgbe_hw *hw = &ixgbe->hw; 2943 ixgbe_tx_ring_t *tx_ring; 2944 uint32_t eicr; 2945 mblk_t *mp; 2946 boolean_t tx_reschedule; 2947 boolean_t link_changed; 2948 uint_t result; 2949 2950 2951 mutex_enter(&ixgbe->gen_lock); 2952 2953 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 2954 mutex_exit(&ixgbe->gen_lock); 2955 return (DDI_INTR_UNCLAIMED); 2956 } 2957 2958 mp = NULL; 2959 tx_reschedule = B_FALSE; 2960 link_changed = B_FALSE; 2961 2962 /* 2963 * Any bit set in eicr: claim this interrupt 2964 */ 2965 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 2966 if (eicr) { 2967 /* 2968 * For legacy interrupt, we have only one interrupt, 2969 * so we have only one rx ring and one tx ring enabled. 2970 */ 2971 ASSERT(ixgbe->num_rx_rings == 1); 2972 ASSERT(ixgbe->num_tx_rings == 1); 2973 2974 /* 2975 * For legacy interrupt, we can't differentiate 2976 * between tx and rx, so always clean both 2977 */ 2978 if (eicr & IXGBE_EICR_RTX_QUEUE) { 2979 2980 /* 2981 * Clean the rx descriptors 2982 */ 2983 mp = ixgbe_rx(&ixgbe->rx_rings[0]); 2984 2985 /* 2986 * Recycle the tx descriptors 2987 */ 2988 tx_ring = &ixgbe->tx_rings[0]; 2989 tx_ring->tx_recycle(tx_ring); 2990 2991 /* 2992 * Schedule the re-transmit 2993 */ 2994 tx_reschedule = (tx_ring->reschedule && 2995 (tx_ring->tbd_free >= tx_ring->resched_thresh)); 2996 } 2997 2998 if (eicr & IXGBE_EICR_LSC) { 2999 3000 /* take care of link status change */ 3001 link_changed = ixgbe_driver_link_check(ixgbe); 3002 3003 /* Get new phy state */ 3004 ixgbe_get_hw_state(ixgbe); 3005 } 3006 3007 result = DDI_INTR_CLAIMED; 3008 } else { 3009 /* 3010 * No interrupt cause bits set: don't claim this interrupt. 3011 */ 3012 result = DDI_INTR_UNCLAIMED; 3013 } 3014 3015 mutex_exit(&ixgbe->gen_lock); 3016 3017 /* 3018 * Do the following work outside of the gen_lock 3019 */ 3020 if (mp != NULL) 3021 mac_rx(ixgbe->mac_hdl, NULL, mp); 3022 3023 if (tx_reschedule) { 3024 tx_ring->reschedule = B_FALSE; 3025 mac_tx_update(ixgbe->mac_hdl); 3026 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 3027 } 3028 3029 if (link_changed) 3030 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3031 3032 return (result); 3033 } 3034 3035 /* 3036 * ixgbe_intr_msi - Interrupt handler for MSI. 3037 */ 3038 static uint_t 3039 ixgbe_intr_msi(void *arg1, void *arg2) 3040 { 3041 _NOTE(ARGUNUSED(arg2)); 3042 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3043 struct ixgbe_hw *hw = &ixgbe->hw; 3044 uint32_t eicr; 3045 3046 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3047 3048 /* 3049 * For MSI interrupt, we have only one vector, 3050 * so we have only one rx ring and one tx ring enabled. 3051 */ 3052 ASSERT(ixgbe->num_rx_rings == 1); 3053 ASSERT(ixgbe->num_tx_rings == 1); 3054 3055 /* 3056 * For MSI interrupt, we can't differentiate 3057 * between tx and rx, so always clean both. 3058 */ 3059 if (eicr & IXGBE_EICR_RTX_QUEUE) { 3060 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 3061 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 3062 } 3063 3064 if (eicr & IXGBE_EICR_LSC) { 3065 ixgbe_intr_other_work(ixgbe); 3066 } 3067 3068 return (DDI_INTR_CLAIMED); 3069 } 3070 3071 /* 3072 * ixgbe_intr_rx - Interrupt handler for rx. 3073 */ 3074 static uint_t 3075 ixgbe_intr_rx(void *arg1, void *arg2) 3076 { 3077 _NOTE(ARGUNUSED(arg2)); 3078 ixgbe_ring_vector_t *vect = (ixgbe_ring_vector_t *)arg1; 3079 ixgbe_t *ixgbe = vect->ixgbe; 3080 int r_idx; 3081 3082 /* 3083 * clean each rx ring that has its bit set in the map 3084 */ 3085 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 3086 3087 while (r_idx >= 0) { 3088 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 3089 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 3090 (ixgbe->num_rx_rings - 1)); 3091 } 3092 3093 return (DDI_INTR_CLAIMED); 3094 } 3095 3096 /* 3097 * ixgbe_intr_tx_other - Interrupt handler for both tx and other. 3098 * 3099 * Always look for Tx cleanup work. Only look for other work if the right 3100 * bits are set in the Interrupt Cause Register. 3101 */ 3102 static uint_t 3103 ixgbe_intr_tx_other(void *arg1, void *arg2) 3104 { 3105 _NOTE(ARGUNUSED(arg2)); 3106 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3107 struct ixgbe_hw *hw = &ixgbe->hw; 3108 uint32_t eicr; 3109 3110 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3111 3112 /* 3113 * Always look for Tx cleanup work. We don't have separate 3114 * transmit vectors, so we have only one tx ring enabled. 3115 */ 3116 ASSERT(ixgbe->num_tx_rings == 1); 3117 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 3118 3119 /* 3120 * Check for "other" causes. 3121 */ 3122 if (eicr & IXGBE_EICR_LSC) { 3123 ixgbe_intr_other_work(ixgbe); 3124 } 3125 3126 return (DDI_INTR_CLAIMED); 3127 } 3128 3129 /* 3130 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 3131 * 3132 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 3133 * if not successful, try Legacy. 3134 * ixgbe->intr_force can be used to force sequence to start with 3135 * any of the 3 types. 3136 * If MSI-X is not used, number of tx/rx rings is forced to 1. 3137 */ 3138 static int 3139 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 3140 { 3141 dev_info_t *devinfo; 3142 int intr_types; 3143 int rc; 3144 3145 devinfo = ixgbe->dip; 3146 3147 /* 3148 * Get supported interrupt types 3149 */ 3150 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 3151 3152 if (rc != DDI_SUCCESS) { 3153 ixgbe_log(ixgbe, 3154 "Get supported interrupt types failed: %d", rc); 3155 return (IXGBE_FAILURE); 3156 } 3157 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 3158 3159 ixgbe->intr_type = 0; 3160 3161 /* 3162 * Install MSI-X interrupts 3163 */ 3164 if ((intr_types & DDI_INTR_TYPE_MSIX) && 3165 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 3166 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 3167 if (rc == IXGBE_SUCCESS) 3168 return (IXGBE_SUCCESS); 3169 3170 ixgbe_log(ixgbe, 3171 "Allocate MSI-X failed, trying MSI interrupts..."); 3172 } 3173 3174 /* 3175 * MSI-X not used, force rings to 1 3176 */ 3177 ixgbe->num_rx_rings = 1; 3178 ixgbe->num_tx_rings = 1; 3179 ixgbe_log(ixgbe, 3180 "MSI-X not used, force rx and tx queue number to 1"); 3181 3182 /* 3183 * Install MSI interrupts 3184 */ 3185 if ((intr_types & DDI_INTR_TYPE_MSI) && 3186 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 3187 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 3188 if (rc == IXGBE_SUCCESS) 3189 return (IXGBE_SUCCESS); 3190 3191 ixgbe_log(ixgbe, 3192 "Allocate MSI failed, trying Legacy interrupts..."); 3193 } 3194 3195 /* 3196 * Install legacy interrupts 3197 */ 3198 if (intr_types & DDI_INTR_TYPE_FIXED) { 3199 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 3200 if (rc == IXGBE_SUCCESS) 3201 return (IXGBE_SUCCESS); 3202 3203 ixgbe_log(ixgbe, 3204 "Allocate Legacy interrupts failed"); 3205 } 3206 3207 /* 3208 * If none of the 3 types succeeded, return failure 3209 */ 3210 return (IXGBE_FAILURE); 3211 } 3212 3213 /* 3214 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 3215 * 3216 * For legacy and MSI, only 1 handle is needed. For MSI-X, 3217 * if fewer than 2 handles are available, return failure. 3218 * Upon success, this sets the number of Rx rings to a number that 3219 * matches the handles available for Rx interrupts. 3220 */ 3221 static int 3222 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 3223 { 3224 dev_info_t *devinfo; 3225 int request, count, avail, actual; 3226 int rx_rings, minimum; 3227 int rc; 3228 3229 devinfo = ixgbe->dip; 3230 3231 /* 3232 * Currently only 1 tx ring is supported. More tx rings 3233 * will be supported with future enhancement. 3234 */ 3235 if (ixgbe->num_tx_rings > 1) { 3236 ixgbe->num_tx_rings = 1; 3237 ixgbe_log(ixgbe, 3238 "Use only 1 MSI-X vector for tx, " 3239 "force tx queue number to 1"); 3240 } 3241 3242 switch (intr_type) { 3243 case DDI_INTR_TYPE_FIXED: 3244 request = 1; /* Request 1 legacy interrupt handle */ 3245 minimum = 1; 3246 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 3247 break; 3248 3249 case DDI_INTR_TYPE_MSI: 3250 request = 1; /* Request 1 MSI interrupt handle */ 3251 minimum = 1; 3252 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 3253 break; 3254 3255 case DDI_INTR_TYPE_MSIX: 3256 /* 3257 * Best number of vectors for the adapter is 3258 * # rx rings + # tx rings + 1 for other 3259 * But currently we only support number of vectors of 3260 * # rx rings + 1 for tx & other 3261 */ 3262 request = ixgbe->num_rx_rings + 1; 3263 minimum = 2; 3264 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 3265 break; 3266 3267 default: 3268 ixgbe_log(ixgbe, 3269 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 3270 intr_type); 3271 return (IXGBE_FAILURE); 3272 } 3273 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 3274 request, minimum); 3275 3276 /* 3277 * Get number of supported interrupts 3278 */ 3279 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 3280 if ((rc != DDI_SUCCESS) || (count < minimum)) { 3281 ixgbe_log(ixgbe, 3282 "Get interrupt number failed. Return: %d, count: %d", 3283 rc, count); 3284 return (IXGBE_FAILURE); 3285 } 3286 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 3287 3288 /* 3289 * Get number of available interrupts 3290 */ 3291 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 3292 if ((rc != DDI_SUCCESS) || (avail < minimum)) { 3293 ixgbe_log(ixgbe, 3294 "Get interrupt available number failed. " 3295 "Return: %d, available: %d", rc, avail); 3296 return (IXGBE_FAILURE); 3297 } 3298 IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail); 3299 3300 if (avail < request) { 3301 ixgbe_log(ixgbe, "Request %d handles, %d available", 3302 request, avail); 3303 request = avail; 3304 } 3305 3306 actual = 0; 3307 ixgbe->intr_cnt = 0; 3308 3309 /* 3310 * Allocate an array of interrupt handles 3311 */ 3312 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 3313 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 3314 3315 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 3316 request, &actual, DDI_INTR_ALLOC_NORMAL); 3317 if (rc != DDI_SUCCESS) { 3318 ixgbe_log(ixgbe, "Allocate interrupts failed. " 3319 "return: %d, request: %d, actual: %d", 3320 rc, request, actual); 3321 goto alloc_handle_fail; 3322 } 3323 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 3324 3325 ixgbe->intr_cnt = actual; 3326 3327 /* 3328 * Now we know the actual number of vectors. Here we assume that 3329 * tx and other will share 1 vector and all remaining (must be at 3330 * least 1 remaining) will be used for rx. 3331 */ 3332 if (actual < minimum) { 3333 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 3334 actual); 3335 goto alloc_handle_fail; 3336 } 3337 3338 /* 3339 * For MSI-X, actual might force us to reduce number of rx rings 3340 */ 3341 if (intr_type == DDI_INTR_TYPE_MSIX) { 3342 rx_rings = actual - 1; 3343 if (rx_rings < ixgbe->num_rx_rings) { 3344 ixgbe_log(ixgbe, 3345 "MSI-X vectors force Rx queue number to %d", 3346 rx_rings); 3347 ixgbe->num_rx_rings = rx_rings; 3348 } 3349 } 3350 3351 /* 3352 * Get priority for first vector, assume remaining are all the same 3353 */ 3354 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 3355 if (rc != DDI_SUCCESS) { 3356 ixgbe_log(ixgbe, 3357 "Get interrupt priority failed: %d", rc); 3358 goto alloc_handle_fail; 3359 } 3360 3361 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 3362 if (rc != DDI_SUCCESS) { 3363 ixgbe_log(ixgbe, 3364 "Get interrupt cap failed: %d", rc); 3365 goto alloc_handle_fail; 3366 } 3367 3368 ixgbe->intr_type = intr_type; 3369 3370 return (IXGBE_SUCCESS); 3371 3372 alloc_handle_fail: 3373 ixgbe_rem_intrs(ixgbe); 3374 3375 return (IXGBE_FAILURE); 3376 } 3377 3378 /* 3379 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 3380 * 3381 * Before adding the interrupt handlers, the interrupt vectors have 3382 * been allocated, and the rx/tx rings have also been allocated. 3383 */ 3384 static int 3385 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 3386 { 3387 ixgbe_rx_ring_t *rx_ring; 3388 int vector; 3389 int rc; 3390 int i; 3391 3392 vector = 0; 3393 3394 switch (ixgbe->intr_type) { 3395 case DDI_INTR_TYPE_MSIX: 3396 /* 3397 * Add interrupt handler for tx + other 3398 */ 3399 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3400 (ddi_intr_handler_t *)ixgbe_intr_tx_other, 3401 (void *)ixgbe, NULL); 3402 if (rc != DDI_SUCCESS) { 3403 ixgbe_log(ixgbe, 3404 "Add tx/other interrupt handler failed: %d", rc); 3405 return (IXGBE_FAILURE); 3406 } 3407 vector++; 3408 3409 /* 3410 * Add interrupt handler for each rx ring 3411 */ 3412 for (i = 0; i < ixgbe->num_rx_rings; i++) { 3413 rx_ring = &ixgbe->rx_rings[i]; 3414 3415 /* 3416 * install pointer to vect_map[vector] 3417 */ 3418 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3419 (ddi_intr_handler_t *)ixgbe_intr_rx, 3420 (void *)&ixgbe->vect_map[vector], NULL); 3421 3422 if (rc != DDI_SUCCESS) { 3423 ixgbe_log(ixgbe, 3424 "Add rx interrupt handler failed. " 3425 "return: %d, rx ring: %d", rc, i); 3426 for (vector--; vector >= 0; vector--) { 3427 (void) ddi_intr_remove_handler( 3428 ixgbe->htable[vector]); 3429 } 3430 return (IXGBE_FAILURE); 3431 } 3432 3433 rx_ring->intr_vector = vector; 3434 3435 vector++; 3436 } 3437 break; 3438 3439 case DDI_INTR_TYPE_MSI: 3440 /* 3441 * Add interrupt handlers for the only vector 3442 */ 3443 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3444 (ddi_intr_handler_t *)ixgbe_intr_msi, 3445 (void *)ixgbe, NULL); 3446 3447 if (rc != DDI_SUCCESS) { 3448 ixgbe_log(ixgbe, 3449 "Add MSI interrupt handler failed: %d", rc); 3450 return (IXGBE_FAILURE); 3451 } 3452 3453 rx_ring = &ixgbe->rx_rings[0]; 3454 rx_ring->intr_vector = vector; 3455 3456 vector++; 3457 break; 3458 3459 case DDI_INTR_TYPE_FIXED: 3460 /* 3461 * Add interrupt handlers for the only vector 3462 */ 3463 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3464 (ddi_intr_handler_t *)ixgbe_intr_legacy, 3465 (void *)ixgbe, NULL); 3466 3467 if (rc != DDI_SUCCESS) { 3468 ixgbe_log(ixgbe, 3469 "Add legacy interrupt handler failed: %d", rc); 3470 return (IXGBE_FAILURE); 3471 } 3472 3473 rx_ring = &ixgbe->rx_rings[0]; 3474 rx_ring->intr_vector = vector; 3475 3476 vector++; 3477 break; 3478 3479 default: 3480 return (IXGBE_FAILURE); 3481 } 3482 3483 ASSERT(vector == ixgbe->intr_cnt); 3484 3485 return (IXGBE_SUCCESS); 3486 } 3487 3488 #pragma inline(ixgbe_map_rxring_to_vector) 3489 /* 3490 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 3491 */ 3492 static void 3493 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 3494 { 3495 ixgbe->vect_map[v_idx].ixgbe = ixgbe; 3496 3497 /* 3498 * Set bit in map 3499 */ 3500 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 3501 3502 /* 3503 * Count bits set 3504 */ 3505 ixgbe->vect_map[v_idx].rxr_cnt++; 3506 3507 /* 3508 * Remember bit position 3509 */ 3510 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 3511 } 3512 3513 #pragma inline(ixgbe_map_txring_to_vector) 3514 /* 3515 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 3516 */ 3517 static void 3518 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 3519 { 3520 ixgbe->vect_map[v_idx].ixgbe = ixgbe; 3521 3522 /* 3523 * Set bit in map 3524 */ 3525 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 3526 3527 /* 3528 * Count bits set 3529 */ 3530 ixgbe->vect_map[v_idx].txr_cnt++; 3531 3532 /* 3533 * Remember bit position 3534 */ 3535 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 3536 } 3537 3538 /* 3539 * ixgbe_set_ivar - Set the given entry in the given interrupt vector 3540 * allocation register (IVAR). 3541 */ 3542 static void 3543 ixgbe_set_ivar(ixgbe_t *ixgbe, uint16_t int_alloc_entry, uint8_t msix_vector) 3544 { 3545 struct ixgbe_hw *hw = &ixgbe->hw; 3546 u32 ivar, index; 3547 3548 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 3549 index = (int_alloc_entry >> 2) & 0x1F; 3550 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3551 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); 3552 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); 3553 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 3554 } 3555 3556 /* 3557 * ixgbe_map_rings_to_vectors - Map descriptor rings to interrupt vectors. 3558 * 3559 * For msi-x, this currently implements only the scheme which is 3560 * 1 vector for tx + other, 1 vector for each rx ring. 3561 */ 3562 static int 3563 ixgbe_map_rings_to_vectors(ixgbe_t *ixgbe) 3564 { 3565 int i, vector = 0; 3566 int vect_remain = ixgbe->intr_cnt; 3567 3568 /* initialize vector map */ 3569 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 3570 3571 /* 3572 * non-MSI-X case is very simple: all interrupts on vector 0 3573 */ 3574 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 3575 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 3576 ixgbe_map_txring_to_vector(ixgbe, 0, 0); 3577 return (IXGBE_SUCCESS); 3578 } 3579 3580 /* 3581 * Ring/vector mapping for MSI-X 3582 */ 3583 3584 /* 3585 * Map vector 0 to tx 3586 */ 3587 ixgbe_map_txring_to_vector(ixgbe, 0, vector++); 3588 vect_remain--; 3589 3590 /* 3591 * Map remaining vectors to rx rings 3592 */ 3593 for (i = 0; i < vect_remain; i++) { 3594 ixgbe_map_rxring_to_vector(ixgbe, i, vector++); 3595 } 3596 3597 return (IXGBE_SUCCESS); 3598 } 3599 3600 /* 3601 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 3602 * 3603 * This relies on queue/vector mapping already set up in the 3604 * vect_map[] structures 3605 */ 3606 static void 3607 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 3608 { 3609 struct ixgbe_hw *hw = &ixgbe->hw; 3610 ixgbe_ring_vector_t *vect; /* vector bitmap */ 3611 int r_idx; /* ring index */ 3612 int v_idx; /* vector index */ 3613 3614 /* 3615 * Clear any previous entries 3616 */ 3617 for (v_idx = 0; v_idx < IXGBE_IVAR_REG_NUM; v_idx++) 3618 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 3619 3620 /* 3621 * "Other" is always on vector 0 3622 */ 3623 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0); 3624 3625 /* 3626 * For each interrupt vector, populate the IVAR table 3627 */ 3628 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 3629 vect = &ixgbe->vect_map[v_idx]; 3630 3631 /* 3632 * For each rx ring bit set 3633 */ 3634 r_idx = bt_getlowbit(vect->rx_map, 0, 3635 (ixgbe->num_rx_rings - 1)); 3636 3637 while (r_idx >= 0) { 3638 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx), 3639 v_idx); 3640 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 3641 (ixgbe->num_rx_rings - 1)); 3642 } 3643 3644 /* 3645 * For each tx ring bit set 3646 */ 3647 r_idx = bt_getlowbit(vect->tx_map, 0, 3648 (ixgbe->num_tx_rings - 1)); 3649 3650 while (r_idx >= 0) { 3651 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_TX_QUEUE(r_idx), 3652 v_idx); 3653 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 3654 (ixgbe->num_tx_rings - 1)); 3655 } 3656 } 3657 } 3658 3659 /* 3660 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 3661 */ 3662 static void 3663 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 3664 { 3665 int i; 3666 int rc; 3667 3668 for (i = 0; i < ixgbe->intr_cnt; i++) { 3669 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 3670 if (rc != DDI_SUCCESS) { 3671 IXGBE_DEBUGLOG_1(ixgbe, 3672 "Remove intr handler failed: %d", rc); 3673 } 3674 } 3675 } 3676 3677 /* 3678 * ixgbe_rem_intrs - Remove the allocated interrupts. 3679 */ 3680 static void 3681 ixgbe_rem_intrs(ixgbe_t *ixgbe) 3682 { 3683 int i; 3684 int rc; 3685 3686 for (i = 0; i < ixgbe->intr_cnt; i++) { 3687 rc = ddi_intr_free(ixgbe->htable[i]); 3688 if (rc != DDI_SUCCESS) { 3689 IXGBE_DEBUGLOG_1(ixgbe, 3690 "Free intr failed: %d", rc); 3691 } 3692 } 3693 3694 kmem_free(ixgbe->htable, ixgbe->intr_size); 3695 ixgbe->htable = NULL; 3696 } 3697 3698 /* 3699 * ixgbe_enable_intrs - Enable all the ddi interrupts. 3700 */ 3701 static int 3702 ixgbe_enable_intrs(ixgbe_t *ixgbe) 3703 { 3704 int i; 3705 int rc; 3706 3707 /* 3708 * Enable interrupts 3709 */ 3710 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 3711 /* 3712 * Call ddi_intr_block_enable() for MSI 3713 */ 3714 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 3715 if (rc != DDI_SUCCESS) { 3716 ixgbe_log(ixgbe, 3717 "Enable block intr failed: %d", rc); 3718 return (IXGBE_FAILURE); 3719 } 3720 } else { 3721 /* 3722 * Call ddi_intr_enable() for Legacy/MSI non block enable 3723 */ 3724 for (i = 0; i < ixgbe->intr_cnt; i++) { 3725 rc = ddi_intr_enable(ixgbe->htable[i]); 3726 if (rc != DDI_SUCCESS) { 3727 ixgbe_log(ixgbe, 3728 "Enable intr failed: %d", rc); 3729 return (IXGBE_FAILURE); 3730 } 3731 } 3732 } 3733 3734 return (IXGBE_SUCCESS); 3735 } 3736 3737 /* 3738 * ixgbe_disable_intrs - Disable all the interrupts. 3739 */ 3740 static int 3741 ixgbe_disable_intrs(ixgbe_t *ixgbe) 3742 { 3743 int i; 3744 int rc; 3745 3746 /* 3747 * Disable all interrupts 3748 */ 3749 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 3750 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 3751 if (rc != DDI_SUCCESS) { 3752 ixgbe_log(ixgbe, 3753 "Disable block intr failed: %d", rc); 3754 return (IXGBE_FAILURE); 3755 } 3756 } else { 3757 for (i = 0; i < ixgbe->intr_cnt; i++) { 3758 rc = ddi_intr_disable(ixgbe->htable[i]); 3759 if (rc != DDI_SUCCESS) { 3760 ixgbe_log(ixgbe, 3761 "Disable intr failed: %d", rc); 3762 return (IXGBE_FAILURE); 3763 } 3764 } 3765 } 3766 3767 return (IXGBE_SUCCESS); 3768 } 3769 3770 /* 3771 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 3772 */ 3773 static void 3774 ixgbe_get_hw_state(ixgbe_t *ixgbe) 3775 { 3776 struct ixgbe_hw *hw = &ixgbe->hw; 3777 uint32_t links; 3778 uint32_t pcs1g_anlp = 0; 3779 uint32_t pcs1g_ana = 0; 3780 3781 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3782 ixgbe->param_lp_1000fdx_cap = 0; 3783 ixgbe->param_lp_100fdx_cap = 0; 3784 3785 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 3786 if (links & IXGBE_LINKS_PCS_1G_EN) { 3787 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 3788 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 3789 3790 ixgbe->param_lp_1000fdx_cap = 3791 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 3792 ixgbe->param_lp_100fdx_cap = 3793 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 3794 } 3795 3796 ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 3797 ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 3798 } 3799 3800 /* 3801 * ixgbe_get_driver_control - Notify that driver is in control of device. 3802 */ 3803 static void 3804 ixgbe_get_driver_control(struct ixgbe_hw *hw) 3805 { 3806 uint32_t ctrl_ext; 3807 3808 /* 3809 * Notify firmware that driver is in control of device 3810 */ 3811 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3812 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 3813 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3814 } 3815 3816 /* 3817 * ixgbe_release_driver_control - Notify that driver is no longer in control 3818 * of device. 3819 */ 3820 static void 3821 ixgbe_release_driver_control(struct ixgbe_hw *hw) 3822 { 3823 uint32_t ctrl_ext; 3824 3825 /* 3826 * Notify firmware that driver is no longer in control of device 3827 */ 3828 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3829 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 3830 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3831 } 3832 3833 /* 3834 * ixgbe_atomic_reserve - Atomic decrease operation. 3835 */ 3836 int 3837 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 3838 { 3839 uint32_t oldval; 3840 uint32_t newval; 3841 3842 /* 3843 * ATOMICALLY 3844 */ 3845 do { 3846 oldval = *count_p; 3847 if (oldval < n) 3848 return (-1); 3849 newval = oldval - n; 3850 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 3851 3852 return (newval); 3853 } 3854 3855 /* 3856 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 3857 */ 3858 static uint8_t * 3859 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 3860 { 3861 _NOTE(ARGUNUSED(hw)); 3862 _NOTE(ARGUNUSED(vmdq)); 3863 uint8_t *addr = *upd_ptr; 3864 uint8_t *new_ptr; 3865 3866 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 3867 *upd_ptr = new_ptr; 3868 return (addr); 3869 } 3870 3871 /* 3872 * FMA support 3873 */ 3874 int 3875 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 3876 { 3877 ddi_fm_error_t de; 3878 3879 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 3880 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 3881 return (de.fme_status); 3882 } 3883 3884 int 3885 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 3886 { 3887 ddi_fm_error_t de; 3888 3889 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 3890 return (de.fme_status); 3891 } 3892 3893 /* 3894 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 3895 */ 3896 static int 3897 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 3898 { 3899 _NOTE(ARGUNUSED(impl_data)); 3900 /* 3901 * as the driver can always deal with an error in any dma or 3902 * access handle, we can just return the fme_status value. 3903 */ 3904 pci_ereport_post(dip, err, NULL); 3905 return (err->fme_status); 3906 } 3907 3908 static void 3909 ixgbe_fm_init(ixgbe_t *ixgbe) 3910 { 3911 ddi_iblock_cookie_t iblk; 3912 int fma_acc_flag, fma_dma_flag; 3913 3914 /* 3915 * Only register with IO Fault Services if we have some capability 3916 */ 3917 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 3918 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 3919 fma_acc_flag = 1; 3920 } else { 3921 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3922 fma_acc_flag = 0; 3923 } 3924 3925 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 3926 fma_dma_flag = 1; 3927 } else { 3928 fma_dma_flag = 0; 3929 } 3930 3931 ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag); 3932 3933 if (ixgbe->fm_capabilities) { 3934 3935 /* 3936 * Register capabilities with IO Fault Services 3937 */ 3938 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 3939 3940 /* 3941 * Initialize pci ereport capabilities if ereport capable 3942 */ 3943 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 3944 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3945 pci_ereport_setup(ixgbe->dip); 3946 3947 /* 3948 * Register error callback if error callback capable 3949 */ 3950 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3951 ddi_fm_handler_register(ixgbe->dip, 3952 ixgbe_fm_error_cb, (void*) ixgbe); 3953 } 3954 } 3955 3956 static void 3957 ixgbe_fm_fini(ixgbe_t *ixgbe) 3958 { 3959 /* 3960 * Only unregister FMA capabilities if they are registered 3961 */ 3962 if (ixgbe->fm_capabilities) { 3963 3964 /* 3965 * Release any resources allocated by pci_ereport_setup() 3966 */ 3967 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 3968 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3969 pci_ereport_teardown(ixgbe->dip); 3970 3971 /* 3972 * Un-register error callback if error callback capable 3973 */ 3974 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3975 ddi_fm_handler_unregister(ixgbe->dip); 3976 3977 /* 3978 * Unregister from IO Fault Service 3979 */ 3980 ddi_fm_fini(ixgbe->dip); 3981 } 3982 } 3983 3984 void 3985 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 3986 { 3987 uint64_t ena; 3988 char buf[FM_MAX_CLASS]; 3989 3990 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 3991 ena = fm_ena_generate(0, FM_ENA_FMT1); 3992 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 3993 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 3994 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 3995 } 3996 } 3997