1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2008 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at: 10 * http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When using or redistributing this file, you may do so under the 15 * License only. No other modification of this header is permitted. 16 * 17 * If applicable, add the following below this CDDL HEADER, with the 18 * fields enclosed by brackets "[]" replaced with your own identifying 19 * information: Portions Copyright [yyyy] [name of copyright owner] 20 * 21 * CDDL HEADER END 22 */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms of the CDDL. 27 */ 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #include "ixgbe_sw.h" 32 33 static char ident[] = "Intel 10Gb Ethernet 1.0.0"; 34 35 /* 36 * Local function protoypes 37 */ 38 static int ixgbe_register_mac(ixgbe_t *); 39 static int ixgbe_identify_hardware(ixgbe_t *); 40 static int ixgbe_regs_map(ixgbe_t *); 41 static void ixgbe_init_properties(ixgbe_t *); 42 static int ixgbe_init_driver_settings(ixgbe_t *); 43 static void ixgbe_init_locks(ixgbe_t *); 44 static void ixgbe_destroy_locks(ixgbe_t *); 45 static int ixgbe_init(ixgbe_t *); 46 static int ixgbe_chip_start(ixgbe_t *); 47 static void ixgbe_chip_stop(ixgbe_t *); 48 static int ixgbe_reset(ixgbe_t *); 49 static void ixgbe_tx_clean(ixgbe_t *); 50 static boolean_t ixgbe_tx_drain(ixgbe_t *); 51 static boolean_t ixgbe_rx_drain(ixgbe_t *); 52 static int ixgbe_alloc_rings(ixgbe_t *); 53 static int ixgbe_init_rings(ixgbe_t *); 54 static void ixgbe_free_rings(ixgbe_t *); 55 static void ixgbe_fini_rings(ixgbe_t *); 56 static void ixgbe_setup_rings(ixgbe_t *); 57 static void ixgbe_setup_rx(ixgbe_t *); 58 static void ixgbe_setup_tx(ixgbe_t *); 59 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 60 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 61 static void ixgbe_setup_rss(ixgbe_t *); 62 static void ixgbe_init_unicst(ixgbe_t *); 63 static void ixgbe_setup_multicst(ixgbe_t *); 64 static void ixgbe_get_hw_state(ixgbe_t *); 65 static void ixgbe_get_conf(ixgbe_t *); 66 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 67 static boolean_t ixgbe_driver_link_check(ixgbe_t *); 68 static void ixgbe_local_timer(void *); 69 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 70 static void ixgbe_start_watchdog_timer(ixgbe_t *); 71 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 72 static void ixgbe_stop_watchdog_timer(ixgbe_t *); 73 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 74 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 75 static boolean_t is_valid_mac_addr(uint8_t *); 76 static boolean_t ixgbe_stall_check(ixgbe_t *); 77 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 78 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 79 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 80 static int ixgbe_alloc_intrs(ixgbe_t *); 81 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 82 static int ixgbe_add_intr_handlers(ixgbe_t *); 83 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 84 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 85 static void ixgbe_set_ivar(ixgbe_t *, uint16_t, uint8_t); 86 static int ixgbe_map_rings_to_vectors(ixgbe_t *); 87 static void ixgbe_setup_adapter_vector(ixgbe_t *); 88 static void ixgbe_rem_intr_handlers(ixgbe_t *); 89 static void ixgbe_rem_intrs(ixgbe_t *); 90 static int ixgbe_enable_intrs(ixgbe_t *); 91 static int ixgbe_disable_intrs(ixgbe_t *); 92 static uint_t ixgbe_intr_legacy(void *, void *); 93 static uint_t ixgbe_intr_msi(void *, void *); 94 static uint_t ixgbe_intr_rx(void *, void *); 95 static uint_t ixgbe_intr_tx_other(void *, void *); 96 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 97 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 98 static void ixgbe_intr_other_work(ixgbe_t *); 99 static void ixgbe_get_driver_control(struct ixgbe_hw *); 100 static void ixgbe_release_driver_control(struct ixgbe_hw *); 101 102 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 103 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 104 static int ixgbe_resume(dev_info_t *); 105 static int ixgbe_suspend(dev_info_t *); 106 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 107 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 108 109 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 110 const void *impl_data); 111 static void ixgbe_fm_init(ixgbe_t *); 112 static void ixgbe_fm_fini(ixgbe_t *); 113 114 static struct cb_ops ixgbe_cb_ops = { 115 nulldev, /* cb_open */ 116 nulldev, /* cb_close */ 117 nodev, /* cb_strategy */ 118 nodev, /* cb_print */ 119 nodev, /* cb_dump */ 120 nodev, /* cb_read */ 121 nodev, /* cb_write */ 122 nodev, /* cb_ioctl */ 123 nodev, /* cb_devmap */ 124 nodev, /* cb_mmap */ 125 nodev, /* cb_segmap */ 126 nochpoll, /* cb_chpoll */ 127 ddi_prop_op, /* cb_prop_op */ 128 NULL, /* cb_stream */ 129 D_MP | D_HOTPLUG, /* cb_flag */ 130 CB_REV, /* cb_rev */ 131 nodev, /* cb_aread */ 132 nodev /* cb_awrite */ 133 }; 134 135 static struct dev_ops ixgbe_dev_ops = { 136 DEVO_REV, /* devo_rev */ 137 0, /* devo_refcnt */ 138 NULL, /* devo_getinfo */ 139 nulldev, /* devo_identify */ 140 nulldev, /* devo_probe */ 141 ixgbe_attach, /* devo_attach */ 142 ixgbe_detach, /* devo_detach */ 143 nodev, /* devo_reset */ 144 &ixgbe_cb_ops, /* devo_cb_ops */ 145 NULL, /* devo_bus_ops */ 146 ddi_power /* devo_power */ 147 }; 148 149 static struct modldrv ixgbe_modldrv = { 150 &mod_driverops, /* Type of module. This one is a driver */ 151 ident, /* Discription string */ 152 &ixgbe_dev_ops /* driver ops */ 153 }; 154 155 static struct modlinkage ixgbe_modlinkage = { 156 MODREV_1, &ixgbe_modldrv, NULL 157 }; 158 159 /* 160 * Access attributes for register mapping 161 */ 162 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 163 DDI_DEVICE_ATTR_V0, 164 DDI_STRUCTURE_LE_ACC, 165 DDI_STRICTORDER_ACC, 166 DDI_FLAGERR_ACC 167 }; 168 169 /* 170 * Loopback property 171 */ 172 static lb_property_t lb_normal = { 173 normal, "normal", IXGBE_LB_NONE 174 }; 175 176 static lb_property_t lb_mac = { 177 internal, "MAC", IXGBE_LB_INTERNAL_MAC 178 }; 179 180 #define IXGBE_M_CALLBACK_FLAGS (MC_IOCTL | MC_GETCAPAB) 181 182 static mac_callbacks_t ixgbe_m_callbacks = { 183 IXGBE_M_CALLBACK_FLAGS, 184 ixgbe_m_stat, 185 ixgbe_m_start, 186 ixgbe_m_stop, 187 ixgbe_m_promisc, 188 ixgbe_m_multicst, 189 ixgbe_m_unicst, 190 ixgbe_m_tx, 191 NULL, 192 ixgbe_m_ioctl, 193 ixgbe_m_getcapab 194 }; 195 196 /* 197 * Module Initialization Functions. 198 */ 199 200 int 201 _init(void) 202 { 203 int status; 204 205 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 206 207 status = mod_install(&ixgbe_modlinkage); 208 209 if (status != DDI_SUCCESS) { 210 mac_fini_ops(&ixgbe_dev_ops); 211 } 212 213 return (status); 214 } 215 216 int 217 _fini(void) 218 { 219 int status; 220 221 status = mod_remove(&ixgbe_modlinkage); 222 223 if (status == DDI_SUCCESS) { 224 mac_fini_ops(&ixgbe_dev_ops); 225 } 226 227 return (status); 228 } 229 230 int 231 _info(struct modinfo *modinfop) 232 { 233 int status; 234 235 status = mod_info(&ixgbe_modlinkage, modinfop); 236 237 return (status); 238 } 239 240 /* 241 * ixgbe_attach - Driver attach. 242 * 243 * This function is the device specific initialization entry 244 * point. This entry point is required and must be written. 245 * The DDI_ATTACH command must be provided in the attach entry 246 * point. When attach() is called with cmd set to DDI_ATTACH, 247 * all normal kernel services (such as kmem_alloc(9F)) are 248 * available for use by the driver. 249 * 250 * The attach() function will be called once for each instance 251 * of the device on the system with cmd set to DDI_ATTACH. 252 * Until attach() succeeds, the only driver entry points which 253 * may be called are open(9E) and getinfo(9E). 254 */ 255 static int 256 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 257 { 258 ixgbe_t *ixgbe; 259 struct ixgbe_osdep *osdep; 260 struct ixgbe_hw *hw; 261 int instance; 262 263 /* 264 * Check the command and perform corresponding operations 265 */ 266 switch (cmd) { 267 default: 268 return (DDI_FAILURE); 269 270 case DDI_RESUME: 271 return (ixgbe_resume(devinfo)); 272 273 case DDI_ATTACH: 274 break; 275 } 276 277 /* Get the device instance */ 278 instance = ddi_get_instance(devinfo); 279 280 /* Allocate memory for the instance data structure */ 281 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 282 283 ixgbe->dip = devinfo; 284 ixgbe->instance = instance; 285 286 hw = &ixgbe->hw; 287 osdep = &ixgbe->osdep; 288 hw->back = osdep; 289 osdep->ixgbe = ixgbe; 290 291 /* Attach the instance pointer to the dev_info data structure */ 292 ddi_set_driver_private(devinfo, ixgbe); 293 294 /* 295 * Initialize for fma support 296 */ 297 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, "PROP_FM_CAPABLE", 298 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 299 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 300 ixgbe_fm_init(ixgbe); 301 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 302 303 /* 304 * Map PCI config space registers 305 */ 306 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 307 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 308 goto attach_fail; 309 } 310 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 311 312 /* 313 * Identify the chipset family 314 */ 315 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 316 ixgbe_error(ixgbe, "Failed to identify hardware"); 317 goto attach_fail; 318 } 319 320 /* 321 * Map device registers 322 */ 323 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 324 ixgbe_error(ixgbe, "Failed to map device registers"); 325 goto attach_fail; 326 } 327 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 328 329 /* 330 * Initialize driver parameters 331 */ 332 ixgbe_init_properties(ixgbe); 333 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 334 335 /* 336 * Allocate interrupts 337 */ 338 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 339 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 340 goto attach_fail; 341 } 342 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 343 344 /* 345 * Allocate rx/tx rings based on the ring numbers. 346 * The actual numbers of rx/tx rings are decided by the number of 347 * allocated interrupt vectors, so we should allocate the rings after 348 * interrupts are allocated. 349 */ 350 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 351 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 352 goto attach_fail; 353 } 354 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 355 356 /* 357 * Map rings to interrupt vectors 358 */ 359 if (ixgbe_map_rings_to_vectors(ixgbe) != IXGBE_SUCCESS) { 360 ixgbe_error(ixgbe, "Failed to map rings to vectors"); 361 goto attach_fail; 362 } 363 364 /* 365 * Add interrupt handlers 366 */ 367 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 368 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 369 goto attach_fail; 370 } 371 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 372 373 /* 374 * Initialize driver parameters 375 */ 376 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 377 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 378 goto attach_fail; 379 } 380 381 /* 382 * Initialize mutexes for this device. 383 * Do this before enabling the interrupt handler and 384 * register the softint to avoid the condition where 385 * interrupt handler can try using uninitialized mutex. 386 */ 387 ixgbe_init_locks(ixgbe); 388 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 389 390 /* 391 * Initialize chipset hardware 392 */ 393 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 394 ixgbe_error(ixgbe, "Failed to initialize adapter"); 395 goto attach_fail; 396 } 397 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 398 399 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 400 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 401 goto attach_fail; 402 } 403 404 /* 405 * Initialize DMA and hardware settings for rx/tx rings 406 */ 407 if (ixgbe_init_rings(ixgbe) != IXGBE_SUCCESS) { 408 ixgbe_error(ixgbe, "Failed to initialize rings"); 409 goto attach_fail; 410 } 411 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT_RINGS; 412 413 /* 414 * Initialize statistics 415 */ 416 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 417 ixgbe_error(ixgbe, "Failed to initialize statistics"); 418 goto attach_fail; 419 } 420 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 421 422 /* 423 * Initialize NDD parameters 424 */ 425 if (ixgbe_nd_init(ixgbe) != IXGBE_SUCCESS) { 426 ixgbe_error(ixgbe, "Failed to initialize ndd"); 427 goto attach_fail; 428 } 429 ixgbe->attach_progress |= ATTACH_PROGRESS_NDD; 430 431 /* 432 * Register the driver to the MAC 433 */ 434 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 435 ixgbe_error(ixgbe, "Failed to register MAC"); 436 goto attach_fail; 437 } 438 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 439 440 /* 441 * Now that mutex locks are initialized, and the chip is also 442 * initialized, enable interrupts. 443 */ 444 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 445 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 446 goto attach_fail; 447 } 448 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 449 450 ixgbe->ixgbe_state |= IXGBE_INITIALIZED; 451 452 return (DDI_SUCCESS); 453 454 attach_fail: 455 ixgbe_unconfigure(devinfo, ixgbe); 456 return (DDI_FAILURE); 457 } 458 459 /* 460 * ixgbe_detach - Driver detach. 461 * 462 * The detach() function is the complement of the attach routine. 463 * If cmd is set to DDI_DETACH, detach() is used to remove the 464 * state associated with a given instance of a device node 465 * prior to the removal of that instance from the system. 466 * 467 * The detach() function will be called once for each instance 468 * of the device for which there has been a successful attach() 469 * once there are no longer any opens on the device. 470 * 471 * Interrupts routine are disabled, All memory allocated by this 472 * driver are freed. 473 */ 474 static int 475 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 476 { 477 ixgbe_t *ixgbe; 478 479 /* 480 * Check detach command 481 */ 482 switch (cmd) { 483 default: 484 return (DDI_FAILURE); 485 486 case DDI_SUSPEND: 487 return (ixgbe_suspend(devinfo)); 488 489 case DDI_DETACH: 490 break; 491 } 492 493 494 /* 495 * Get the pointer to the driver private data structure 496 */ 497 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 498 if (ixgbe == NULL) 499 return (DDI_FAILURE); 500 501 /* 502 * Unregister MAC. If failed, we have to fail the detach 503 */ 504 if (mac_unregister(ixgbe->mac_hdl) != 0) { 505 ixgbe_error(ixgbe, "Failed to unregister MAC"); 506 return (DDI_FAILURE); 507 } 508 ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC; 509 510 /* 511 * If the device is still running, it needs to be stopped first. 512 * This check is necessary because under some specific circumstances, 513 * the detach routine can be called without stopping the interface 514 * first. 515 */ 516 mutex_enter(&ixgbe->gen_lock); 517 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 518 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 519 ixgbe_stop(ixgbe); 520 mutex_exit(&ixgbe->gen_lock); 521 /* Disable and stop the watchdog timer */ 522 ixgbe_disable_watchdog_timer(ixgbe); 523 } else 524 mutex_exit(&ixgbe->gen_lock); 525 526 /* 527 * Check if there are still rx buffers held by the upper layer. 528 * If so, fail the detach. 529 */ 530 if (!ixgbe_rx_drain(ixgbe)) 531 return (DDI_FAILURE); 532 533 /* 534 * Do the remaining unconfigure routines 535 */ 536 ixgbe_unconfigure(devinfo, ixgbe); 537 538 return (DDI_SUCCESS); 539 } 540 541 static void 542 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 543 { 544 /* 545 * Disable interrupt 546 */ 547 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 548 (void) ixgbe_disable_intrs(ixgbe); 549 } 550 551 /* 552 * Unregister MAC 553 */ 554 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 555 (void) mac_unregister(ixgbe->mac_hdl); 556 } 557 558 /* 559 * Free ndd parameters 560 */ 561 if (ixgbe->attach_progress & ATTACH_PROGRESS_NDD) { 562 ixgbe_nd_cleanup(ixgbe); 563 } 564 565 /* 566 * Free statistics 567 */ 568 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 569 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 570 } 571 572 /* 573 * Remove interrupt handlers 574 */ 575 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 576 ixgbe_rem_intr_handlers(ixgbe); 577 } 578 579 /* 580 * Remove interrupts 581 */ 582 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 583 ixgbe_rem_intrs(ixgbe); 584 } 585 586 /* 587 * Remove driver properties 588 */ 589 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 590 (void) ddi_prop_remove_all(devinfo); 591 } 592 593 /* 594 * Release the DMA resources of rx/tx rings 595 */ 596 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT_RINGS) { 597 ixgbe_fini_rings(ixgbe); 598 } 599 600 /* 601 * Stop the chipset 602 */ 603 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 604 mutex_enter(&ixgbe->gen_lock); 605 ixgbe_chip_stop(ixgbe); 606 mutex_exit(&ixgbe->gen_lock); 607 } 608 609 /* 610 * Free register handle 611 */ 612 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 613 if (ixgbe->osdep.reg_handle != NULL) 614 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 615 } 616 617 /* 618 * Free PCI config handle 619 */ 620 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 621 if (ixgbe->osdep.cfg_handle != NULL) 622 pci_config_teardown(&ixgbe->osdep.cfg_handle); 623 } 624 625 /* 626 * Free locks 627 */ 628 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 629 ixgbe_destroy_locks(ixgbe); 630 } 631 632 /* 633 * Free the rx/tx rings 634 */ 635 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 636 ixgbe_free_rings(ixgbe); 637 } 638 639 /* 640 * Unregister FMA capabilities 641 */ 642 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 643 ixgbe_fm_fini(ixgbe); 644 } 645 646 /* 647 * Free the driver data structure 648 */ 649 kmem_free(ixgbe, sizeof (ixgbe_t)); 650 651 ddi_set_driver_private(devinfo, NULL); 652 } 653 654 /* 655 * ixgbe_register_mac - Register the driver and its function pointers with 656 * the GLD interface. 657 */ 658 static int 659 ixgbe_register_mac(ixgbe_t *ixgbe) 660 { 661 struct ixgbe_hw *hw = &ixgbe->hw; 662 mac_register_t *mac; 663 int status; 664 665 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 666 return (IXGBE_FAILURE); 667 668 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 669 mac->m_driver = ixgbe; 670 mac->m_dip = ixgbe->dip; 671 mac->m_src_addr = hw->mac.addr; 672 mac->m_callbacks = &ixgbe_m_callbacks; 673 mac->m_min_sdu = 0; 674 mac->m_max_sdu = ixgbe->default_mtu; 675 mac->m_margin = VLAN_TAGSZ; 676 677 status = mac_register(mac, &ixgbe->mac_hdl); 678 679 mac_free(mac); 680 681 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 682 } 683 684 /* 685 * ixgbe_identify_hardware - Identify the type of the chipset. 686 */ 687 static int 688 ixgbe_identify_hardware(ixgbe_t *ixgbe) 689 { 690 struct ixgbe_hw *hw = &ixgbe->hw; 691 struct ixgbe_osdep *osdep = &ixgbe->osdep; 692 693 /* 694 * Get the device id 695 */ 696 hw->vendor_id = 697 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 698 hw->device_id = 699 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 700 hw->revision_id = 701 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 702 hw->subsystem_device_id = 703 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 704 hw->subsystem_vendor_id = 705 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 706 707 return (IXGBE_SUCCESS); 708 } 709 710 /* 711 * ixgbe_regs_map - Map the device registers. 712 * 713 */ 714 static int 715 ixgbe_regs_map(ixgbe_t *ixgbe) 716 { 717 dev_info_t *devinfo = ixgbe->dip; 718 struct ixgbe_hw *hw = &ixgbe->hw; 719 struct ixgbe_osdep *osdep = &ixgbe->osdep; 720 off_t mem_size; 721 722 /* 723 * First get the size of device registers to be mapped. 724 */ 725 if (ddi_dev_regsize(devinfo, 1, &mem_size) != DDI_SUCCESS) { 726 return (IXGBE_FAILURE); 727 } 728 729 /* 730 * Call ddi_regs_map_setup() to map registers 731 */ 732 if ((ddi_regs_map_setup(devinfo, 1, 733 (caddr_t *)&hw->hw_addr, 0, 734 mem_size, &ixgbe_regs_acc_attr, 735 &osdep->reg_handle)) != DDI_SUCCESS) { 736 return (IXGBE_FAILURE); 737 } 738 739 return (IXGBE_SUCCESS); 740 } 741 742 /* 743 * ixgbe_init_properties - Initialize driver properties. 744 */ 745 static void 746 ixgbe_init_properties(ixgbe_t *ixgbe) 747 { 748 /* 749 * Get conf file properties, including link settings 750 * jumbo frames, ring number, descriptor number, etc. 751 */ 752 ixgbe_get_conf(ixgbe); 753 } 754 755 /* 756 * ixgbe_init_driver_settings - Initialize driver settings. 757 * 758 * The settings include hardware function pointers, bus information, 759 * rx/tx rings settings, link state, and any other parameters that 760 * need to be setup during driver initialization. 761 */ 762 static int 763 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 764 { 765 struct ixgbe_hw *hw = &ixgbe->hw; 766 ixgbe_rx_ring_t *rx_ring; 767 ixgbe_tx_ring_t *tx_ring; 768 uint32_t rx_size; 769 uint32_t tx_size; 770 int i; 771 772 /* 773 * Initialize chipset specific hardware function pointers 774 */ 775 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 776 return (IXGBE_FAILURE); 777 } 778 779 /* 780 * Set rx buffer size 781 * 782 * The IP header alignment room is counted in the calculation. 783 * The rx buffer size is in unit of 1K that is required by the 784 * chipset hardware. 785 */ 786 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 787 ixgbe->rx_buf_size = ((rx_size >> 10) + 788 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 789 790 /* 791 * Set tx buffer size 792 */ 793 tx_size = ixgbe->max_frame_size; 794 ixgbe->tx_buf_size = ((tx_size >> 10) + 795 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 796 797 /* 798 * Initialize rx/tx rings parameters 799 */ 800 for (i = 0; i < ixgbe->num_rx_rings; i++) { 801 rx_ring = &ixgbe->rx_rings[i]; 802 rx_ring->index = i; 803 rx_ring->ixgbe = ixgbe; 804 805 rx_ring->ring_size = ixgbe->rx_ring_size; 806 rx_ring->free_list_size = ixgbe->rx_ring_size; 807 rx_ring->copy_thresh = ixgbe->rx_copy_thresh; 808 rx_ring->limit_per_intr = ixgbe->rx_limit_per_intr; 809 } 810 811 for (i = 0; i < ixgbe->num_tx_rings; i++) { 812 tx_ring = &ixgbe->tx_rings[i]; 813 tx_ring->index = i; 814 tx_ring->ixgbe = ixgbe; 815 if (ixgbe->tx_head_wb_enable) 816 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 817 else 818 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 819 820 tx_ring->ring_size = ixgbe->tx_ring_size; 821 tx_ring->free_list_size = ixgbe->tx_ring_size + 822 (ixgbe->tx_ring_size >> 1); 823 tx_ring->copy_thresh = ixgbe->tx_copy_thresh; 824 tx_ring->recycle_thresh = ixgbe->tx_recycle_thresh; 825 tx_ring->overload_thresh = ixgbe->tx_overload_thresh; 826 tx_ring->resched_thresh = ixgbe->tx_resched_thresh; 827 } 828 829 /* 830 * Initialize values of interrupt throttling rate 831 */ 832 for (i = 1; i < IXGBE_MAX_RING_VECTOR; i++) 833 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 834 835 /* 836 * The initial link state should be "unknown" 837 */ 838 ixgbe->link_state = LINK_STATE_UNKNOWN; 839 return (IXGBE_SUCCESS); 840 } 841 842 /* 843 * ixgbe_init_locks - Initialize locks. 844 */ 845 static void 846 ixgbe_init_locks(ixgbe_t *ixgbe) 847 { 848 ixgbe_rx_ring_t *rx_ring; 849 ixgbe_tx_ring_t *tx_ring; 850 int i; 851 852 for (i = 0; i < ixgbe->num_rx_rings; i++) { 853 rx_ring = &ixgbe->rx_rings[i]; 854 mutex_init(&rx_ring->rx_lock, NULL, 855 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 856 mutex_init(&rx_ring->recycle_lock, NULL, 857 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 858 } 859 860 for (i = 0; i < ixgbe->num_tx_rings; i++) { 861 tx_ring = &ixgbe->tx_rings[i]; 862 mutex_init(&tx_ring->tx_lock, NULL, 863 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 864 mutex_init(&tx_ring->recycle_lock, NULL, 865 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 866 mutex_init(&tx_ring->tcb_head_lock, NULL, 867 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 868 mutex_init(&tx_ring->tcb_tail_lock, NULL, 869 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 870 } 871 872 mutex_init(&ixgbe->gen_lock, NULL, 873 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 874 875 mutex_init(&ixgbe->watchdog_lock, NULL, 876 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 877 } 878 879 /* 880 * ixgbe_destroy_locks - Destroy locks. 881 */ 882 static void 883 ixgbe_destroy_locks(ixgbe_t *ixgbe) 884 { 885 ixgbe_rx_ring_t *rx_ring; 886 ixgbe_tx_ring_t *tx_ring; 887 int i; 888 889 for (i = 0; i < ixgbe->num_rx_rings; i++) { 890 rx_ring = &ixgbe->rx_rings[i]; 891 mutex_destroy(&rx_ring->rx_lock); 892 mutex_destroy(&rx_ring->recycle_lock); 893 } 894 895 for (i = 0; i < ixgbe->num_tx_rings; i++) { 896 tx_ring = &ixgbe->tx_rings[i]; 897 mutex_destroy(&tx_ring->tx_lock); 898 mutex_destroy(&tx_ring->recycle_lock); 899 mutex_destroy(&tx_ring->tcb_head_lock); 900 mutex_destroy(&tx_ring->tcb_tail_lock); 901 } 902 903 mutex_destroy(&ixgbe->gen_lock); 904 mutex_destroy(&ixgbe->watchdog_lock); 905 } 906 907 static int 908 ixgbe_resume(dev_info_t *devinfo) 909 { 910 ixgbe_t *ixgbe; 911 912 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 913 if (ixgbe == NULL) 914 return (DDI_FAILURE); 915 916 mutex_enter(&ixgbe->gen_lock); 917 918 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 919 if (ixgbe_start(ixgbe) != IXGBE_SUCCESS) { 920 mutex_exit(&ixgbe->gen_lock); 921 return (DDI_FAILURE); 922 } 923 924 /* 925 * Enable and start the watchdog timer 926 */ 927 ixgbe_enable_watchdog_timer(ixgbe); 928 } 929 930 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 931 932 mutex_exit(&ixgbe->gen_lock); 933 934 return (DDI_SUCCESS); 935 } 936 937 static int 938 ixgbe_suspend(dev_info_t *devinfo) 939 { 940 ixgbe_t *ixgbe; 941 942 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 943 if (ixgbe == NULL) 944 return (DDI_FAILURE); 945 946 mutex_enter(&ixgbe->gen_lock); 947 948 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 949 950 ixgbe_stop(ixgbe); 951 952 mutex_exit(&ixgbe->gen_lock); 953 954 /* 955 * Disable and stop the watchdog timer 956 */ 957 ixgbe_disable_watchdog_timer(ixgbe); 958 959 return (DDI_SUCCESS); 960 } 961 962 /* 963 * ixgbe_init - Initialize the device. 964 */ 965 static int 966 ixgbe_init(ixgbe_t *ixgbe) 967 { 968 struct ixgbe_hw *hw = &ixgbe->hw; 969 970 mutex_enter(&ixgbe->gen_lock); 971 972 /* 973 * Reset chipset to put the hardware in a known state 974 * before we try to do anything with the eeprom. 975 */ 976 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) { 977 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 978 goto init_fail; 979 } 980 981 /* 982 * Need to init eeprom before validating the checksum. 983 */ 984 if (ixgbe_init_eeprom_params(hw) < 0) { 985 ixgbe_error(ixgbe, 986 "Unable to intitialize the eeprom interface."); 987 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 988 goto init_fail; 989 } 990 991 /* 992 * NVM validation 993 */ 994 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 995 /* 996 * Some PCI-E parts fail the first check due to 997 * the link being in sleep state. Call it again, 998 * if it fails a second time it's a real issue. 999 */ 1000 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1001 ixgbe_error(ixgbe, 1002 "Invalid NVM checksum. Please contact " 1003 "the vendor to update the NVM."); 1004 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1005 goto init_fail; 1006 } 1007 } 1008 1009 /* 1010 * Setup default flow control thresholds - enable/disable 1011 * & flow control type is controlled by ixgbe.conf 1012 */ 1013 hw->fc.high_water = DEFAULT_FCRTH; 1014 hw->fc.low_water = DEFAULT_FCRTL; 1015 hw->fc.pause_time = DEFAULT_FCPAUSE; 1016 hw->fc.send_xon = B_TRUE; 1017 1018 /* 1019 * Don't wait for auto-negotiation to complete 1020 */ 1021 hw->phy.autoneg_wait_to_complete = B_FALSE; 1022 1023 /* 1024 * Initialize link settings 1025 */ 1026 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1027 1028 /* 1029 * Initialize the chipset hardware 1030 */ 1031 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1032 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1033 goto init_fail; 1034 } 1035 1036 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 1037 goto init_fail; 1038 } 1039 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1040 goto init_fail; 1041 } 1042 1043 mutex_exit(&ixgbe->gen_lock); 1044 return (IXGBE_SUCCESS); 1045 1046 init_fail: 1047 /* 1048 * Reset PHY 1049 */ 1050 (void) ixgbe_reset_phy(hw); 1051 1052 mutex_exit(&ixgbe->gen_lock); 1053 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1054 return (IXGBE_FAILURE); 1055 } 1056 1057 /* 1058 * ixgbe_init_rings - Allocate DMA resources for all rx/tx rings and 1059 * initialize relevant hardware settings. 1060 */ 1061 static int 1062 ixgbe_init_rings(ixgbe_t *ixgbe) 1063 { 1064 int i; 1065 1066 /* 1067 * Allocate buffers for all the rx/tx rings 1068 */ 1069 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) 1070 return (IXGBE_FAILURE); 1071 1072 /* 1073 * Setup the rx/tx rings 1074 */ 1075 mutex_enter(&ixgbe->gen_lock); 1076 1077 for (i = 0; i < ixgbe->num_rx_rings; i++) 1078 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1079 for (i = 0; i < ixgbe->num_tx_rings; i++) 1080 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1081 1082 ixgbe_setup_rings(ixgbe); 1083 1084 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1085 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1086 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1087 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1088 1089 mutex_exit(&ixgbe->gen_lock); 1090 1091 return (IXGBE_SUCCESS); 1092 } 1093 1094 /* 1095 * ixgbe_fini_rings - Release DMA resources of all rx/tx rings. 1096 */ 1097 static void 1098 ixgbe_fini_rings(ixgbe_t *ixgbe) 1099 { 1100 /* 1101 * Release the DMA/memory resources of rx/tx rings 1102 */ 1103 ixgbe_free_dma(ixgbe); 1104 } 1105 1106 /* 1107 * ixgbe_chip_start - Initialize and start the chipset hardware. 1108 */ 1109 static int 1110 ixgbe_chip_start(ixgbe_t *ixgbe) 1111 { 1112 struct ixgbe_hw *hw = &ixgbe->hw; 1113 int i; 1114 1115 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1116 1117 /* 1118 * Get the mac address 1119 * This function should handle SPARC case correctly. 1120 */ 1121 if (!ixgbe_find_mac_address(ixgbe)) { 1122 ixgbe_error(ixgbe, "Failed to get the mac address"); 1123 return (IXGBE_FAILURE); 1124 } 1125 1126 /* 1127 * Validate the mac address 1128 */ 1129 (void) ixgbe_init_rx_addrs(hw); 1130 if (!is_valid_mac_addr(hw->mac.addr)) { 1131 ixgbe_error(ixgbe, "Invalid mac address"); 1132 return (IXGBE_FAILURE); 1133 } 1134 1135 /* 1136 * Configure/Initialize hardware 1137 */ 1138 if (ixgbe_init_hw(hw) != IXGBE_SUCCESS) { 1139 ixgbe_error(ixgbe, "Failed to initialize hardware"); 1140 return (IXGBE_FAILURE); 1141 } 1142 1143 /* 1144 * Setup adapter interrupt vectors 1145 */ 1146 ixgbe_setup_adapter_vector(ixgbe); 1147 1148 /* 1149 * Initialize unicast addresses. 1150 */ 1151 ixgbe_init_unicst(ixgbe); 1152 1153 /* 1154 * Setup and initialize the mctable structures. 1155 */ 1156 ixgbe_setup_multicst(ixgbe); 1157 1158 /* 1159 * Set interrupt throttling rate 1160 */ 1161 for (i = 0; i < ixgbe->intr_cnt; i++) 1162 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1163 1164 /* 1165 * Save the state of the phy 1166 */ 1167 ixgbe_get_hw_state(ixgbe); 1168 1169 /* 1170 * Make sure driver has control 1171 */ 1172 ixgbe_get_driver_control(hw); 1173 1174 return (IXGBE_SUCCESS); 1175 } 1176 1177 /* 1178 * ixgbe_chip_stop - Stop the chipset hardware 1179 */ 1180 static void 1181 ixgbe_chip_stop(ixgbe_t *ixgbe) 1182 { 1183 struct ixgbe_hw *hw = &ixgbe->hw; 1184 1185 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1186 1187 /* 1188 * Tell firmware driver is no longer in control 1189 */ 1190 ixgbe_release_driver_control(hw); 1191 1192 /* 1193 * Reset the chipset 1194 */ 1195 (void) ixgbe_reset_hw(hw); 1196 1197 /* 1198 * Reset PHY 1199 */ 1200 (void) ixgbe_reset_phy(hw); 1201 } 1202 1203 /* 1204 * ixgbe_reset - Reset the chipset and re-start the driver. 1205 * 1206 * It involves stopping and re-starting the chipset, 1207 * and re-configuring the rx/tx rings. 1208 */ 1209 static int 1210 ixgbe_reset(ixgbe_t *ixgbe) 1211 { 1212 int i; 1213 1214 mutex_enter(&ixgbe->gen_lock); 1215 1216 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1217 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 1218 1219 /* 1220 * Disable the adapter interrupts to stop any rx/tx activities 1221 * before draining pending data and resetting hardware. 1222 */ 1223 ixgbe_disable_adapter_interrupts(ixgbe); 1224 1225 /* 1226 * Drain the pending transmit packets 1227 */ 1228 (void) ixgbe_tx_drain(ixgbe); 1229 1230 for (i = 0; i < ixgbe->num_rx_rings; i++) 1231 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1232 for (i = 0; i < ixgbe->num_tx_rings; i++) 1233 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1234 1235 /* 1236 * Stop the chipset hardware 1237 */ 1238 ixgbe_chip_stop(ixgbe); 1239 1240 /* 1241 * Clean the pending tx data/resources 1242 */ 1243 ixgbe_tx_clean(ixgbe); 1244 1245 /* 1246 * Start the chipset hardware 1247 */ 1248 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1249 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1250 goto reset_failure; 1251 } 1252 1253 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1254 goto reset_failure; 1255 } 1256 1257 /* 1258 * Setup the rx/tx rings 1259 */ 1260 ixgbe_setup_rings(ixgbe); 1261 1262 /* 1263 * Enable adapter interrupts 1264 * The interrupts must be enabled after the driver state is START 1265 */ 1266 ixgbe_enable_adapter_interrupts(ixgbe); 1267 1268 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1269 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1270 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1271 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1272 1273 ixgbe->ixgbe_state |= IXGBE_STARTED; 1274 mutex_exit(&ixgbe->gen_lock); 1275 1276 return (IXGBE_SUCCESS); 1277 1278 reset_failure: 1279 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1280 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1281 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1282 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1283 1284 mutex_exit(&ixgbe->gen_lock); 1285 1286 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1287 1288 return (IXGBE_FAILURE); 1289 } 1290 1291 /* 1292 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1293 */ 1294 static void 1295 ixgbe_tx_clean(ixgbe_t *ixgbe) 1296 { 1297 ixgbe_tx_ring_t *tx_ring; 1298 tx_control_block_t *tcb; 1299 link_list_t pending_list; 1300 uint32_t desc_num; 1301 struct ixgbe_hw *hw = &ixgbe->hw; 1302 int i, j; 1303 1304 LINK_LIST_INIT(&pending_list); 1305 1306 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1307 tx_ring = &ixgbe->tx_rings[i]; 1308 1309 mutex_enter(&tx_ring->recycle_lock); 1310 1311 /* 1312 * Clean the pending tx data - the pending packets in the 1313 * work_list that have no chances to be transmitted again. 1314 * 1315 * We must ensure the chipset is stopped or the link is down 1316 * before cleaning the transmit packets. 1317 */ 1318 desc_num = 0; 1319 for (j = 0; j < tx_ring->ring_size; j++) { 1320 tcb = tx_ring->work_list[j]; 1321 if (tcb != NULL) { 1322 desc_num += tcb->desc_num; 1323 1324 tx_ring->work_list[j] = NULL; 1325 1326 ixgbe_free_tcb(tcb); 1327 1328 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1329 } 1330 } 1331 1332 if (desc_num > 0) { 1333 atomic_add_32(&tx_ring->tbd_free, desc_num); 1334 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1335 1336 /* 1337 * Reset the head and tail pointers of the tbd ring; 1338 * Reset the writeback head if it's enable. 1339 */ 1340 tx_ring->tbd_head = 0; 1341 tx_ring->tbd_tail = 0; 1342 if (ixgbe->tx_head_wb_enable) 1343 *tx_ring->tbd_head_wb = 0; 1344 1345 IXGBE_WRITE_REG(&ixgbe->hw, 1346 IXGBE_TDH(tx_ring->index), 0); 1347 IXGBE_WRITE_REG(&ixgbe->hw, 1348 IXGBE_TDT(tx_ring->index), 0); 1349 } 1350 1351 mutex_exit(&tx_ring->recycle_lock); 1352 1353 /* 1354 * Add the tx control blocks in the pending list to 1355 * the free list. 1356 */ 1357 ixgbe_put_free_list(tx_ring, &pending_list); 1358 } 1359 } 1360 1361 /* 1362 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1363 * transmitted. 1364 */ 1365 static boolean_t 1366 ixgbe_tx_drain(ixgbe_t *ixgbe) 1367 { 1368 ixgbe_tx_ring_t *tx_ring; 1369 boolean_t done; 1370 int i, j; 1371 1372 /* 1373 * Wait for a specific time to allow pending tx packets 1374 * to be transmitted. 1375 * 1376 * Check the counter tbd_free to see if transmission is done. 1377 * No lock protection is needed here. 1378 * 1379 * Return B_TRUE if all pending packets have been transmitted; 1380 * Otherwise return B_FALSE; 1381 */ 1382 for (i = 0; i < TX_DRAIN_TIME; i++) { 1383 1384 done = B_TRUE; 1385 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1386 tx_ring = &ixgbe->tx_rings[j]; 1387 done = done && 1388 (tx_ring->tbd_free == tx_ring->ring_size); 1389 } 1390 1391 if (done) 1392 break; 1393 1394 msec_delay(1); 1395 } 1396 1397 return (done); 1398 } 1399 1400 /* 1401 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1402 */ 1403 static boolean_t 1404 ixgbe_rx_drain(ixgbe_t *ixgbe) 1405 { 1406 ixgbe_rx_ring_t *rx_ring; 1407 boolean_t done; 1408 int i, j; 1409 1410 /* 1411 * Polling the rx free list to check if those rx buffers held by 1412 * the upper layer are released. 1413 * 1414 * Check the counter rcb_free to see if all pending buffers are 1415 * released. No lock protection is needed here. 1416 * 1417 * Return B_TRUE if all pending buffers have been released; 1418 * Otherwise return B_FALSE; 1419 */ 1420 for (i = 0; i < RX_DRAIN_TIME; i++) { 1421 1422 done = B_TRUE; 1423 for (j = 0; j < ixgbe->num_rx_rings; j++) { 1424 rx_ring = &ixgbe->rx_rings[j]; 1425 done = done && 1426 (rx_ring->rcb_free == rx_ring->free_list_size); 1427 } 1428 1429 if (done) 1430 break; 1431 1432 msec_delay(1); 1433 } 1434 1435 return (done); 1436 } 1437 1438 /* 1439 * ixgbe_start - Start the driver/chipset. 1440 */ 1441 int 1442 ixgbe_start(ixgbe_t *ixgbe) 1443 { 1444 int i; 1445 1446 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1447 1448 for (i = 0; i < ixgbe->num_rx_rings; i++) 1449 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1450 for (i = 0; i < ixgbe->num_tx_rings; i++) 1451 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1452 1453 /* 1454 * Start the chipset hardware 1455 */ 1456 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1457 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1458 goto start_failure; 1459 } 1460 1461 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1462 goto start_failure; 1463 } 1464 1465 /* 1466 * Setup the rx/tx rings 1467 */ 1468 ixgbe_setup_rings(ixgbe); 1469 1470 /* 1471 * Enable adapter interrupts 1472 * The interrupts must be enabled after the driver state is START 1473 */ 1474 ixgbe_enable_adapter_interrupts(ixgbe); 1475 1476 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1477 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1478 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1479 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1480 1481 return (IXGBE_SUCCESS); 1482 1483 start_failure: 1484 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1485 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1486 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1487 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1488 1489 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1490 1491 return (IXGBE_FAILURE); 1492 } 1493 1494 /* 1495 * ixgbe_stop - Stop the driver/chipset. 1496 */ 1497 void 1498 ixgbe_stop(ixgbe_t *ixgbe) 1499 { 1500 int i; 1501 1502 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1503 1504 /* 1505 * Disable the adapter interrupts 1506 */ 1507 ixgbe_disable_adapter_interrupts(ixgbe); 1508 1509 /* 1510 * Drain the pending tx packets 1511 */ 1512 (void) ixgbe_tx_drain(ixgbe); 1513 1514 for (i = 0; i < ixgbe->num_rx_rings; i++) 1515 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1516 for (i = 0; i < ixgbe->num_tx_rings; i++) 1517 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1518 1519 /* 1520 * Stop the chipset hardware 1521 */ 1522 ixgbe_chip_stop(ixgbe); 1523 1524 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1525 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1526 } 1527 1528 /* 1529 * Clean the pending tx data/resources 1530 */ 1531 ixgbe_tx_clean(ixgbe); 1532 1533 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1534 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1535 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1536 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1537 } 1538 1539 /* 1540 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 1541 */ 1542 static int 1543 ixgbe_alloc_rings(ixgbe_t *ixgbe) 1544 { 1545 /* 1546 * Allocate memory space for rx rings 1547 */ 1548 ixgbe->rx_rings = kmem_zalloc( 1549 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 1550 KM_NOSLEEP); 1551 1552 if (ixgbe->rx_rings == NULL) { 1553 return (IXGBE_FAILURE); 1554 } 1555 1556 /* 1557 * Allocate memory space for tx rings 1558 */ 1559 ixgbe->tx_rings = kmem_zalloc( 1560 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 1561 KM_NOSLEEP); 1562 1563 if (ixgbe->tx_rings == NULL) { 1564 kmem_free(ixgbe->rx_rings, 1565 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1566 ixgbe->rx_rings = NULL; 1567 return (IXGBE_FAILURE); 1568 } 1569 1570 return (IXGBE_SUCCESS); 1571 } 1572 1573 /* 1574 * ixgbe_free_rings - Free the memory space of rx/tx rings. 1575 */ 1576 static void 1577 ixgbe_free_rings(ixgbe_t *ixgbe) 1578 { 1579 if (ixgbe->rx_rings != NULL) { 1580 kmem_free(ixgbe->rx_rings, 1581 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1582 ixgbe->rx_rings = NULL; 1583 } 1584 1585 if (ixgbe->tx_rings != NULL) { 1586 kmem_free(ixgbe->tx_rings, 1587 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1588 ixgbe->tx_rings = NULL; 1589 } 1590 } 1591 1592 /* 1593 * ixgbe_setup_rings - Setup rx/tx rings. 1594 */ 1595 static void 1596 ixgbe_setup_rings(ixgbe_t *ixgbe) 1597 { 1598 /* 1599 * Setup the rx/tx rings, including the following: 1600 * 1601 * 1. Setup the descriptor ring and the control block buffers; 1602 * 2. Initialize necessary registers for receive/transmit; 1603 * 3. Initialize software pointers/parameters for receive/transmit; 1604 */ 1605 ixgbe_setup_rx(ixgbe); 1606 1607 ixgbe_setup_tx(ixgbe); 1608 } 1609 1610 static void 1611 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 1612 { 1613 ixgbe_t *ixgbe = rx_ring->ixgbe; 1614 struct ixgbe_hw *hw = &ixgbe->hw; 1615 rx_control_block_t *rcb; 1616 union ixgbe_adv_rx_desc *rbd; 1617 uint32_t size; 1618 uint32_t buf_low; 1619 uint32_t buf_high; 1620 uint32_t reg_val; 1621 int i; 1622 1623 ASSERT(mutex_owned(&rx_ring->rx_lock)); 1624 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1625 1626 for (i = 0; i < ixgbe->rx_ring_size; i++) { 1627 rcb = rx_ring->work_list[i]; 1628 rbd = &rx_ring->rbd_ring[i]; 1629 1630 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 1631 rbd->read.hdr_addr = NULL; 1632 } 1633 1634 /* 1635 * Initialize the length register 1636 */ 1637 size = rx_ring->ring_size * sizeof (union ixgbe_adv_rx_desc); 1638 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size); 1639 1640 /* 1641 * Initialize the base address registers 1642 */ 1643 buf_low = (uint32_t)rx_ring->rbd_area.dma_address; 1644 buf_high = (uint32_t)(rx_ring->rbd_area.dma_address >> 32); 1645 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high); 1646 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low); 1647 1648 /* 1649 * Setup head & tail pointers 1650 */ 1651 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_ring->ring_size - 1); 1652 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0); 1653 1654 rx_ring->rbd_next = 0; 1655 1656 /* 1657 * Note: Considering the case that the chipset is being reset 1658 * and there are still some buffers held by the upper layer, 1659 * we should not reset the values of rcb_head, rcb_tail and 1660 * rcb_free if the state is not IXGBE_UNKNOWN. 1661 */ 1662 if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) { 1663 rx_ring->rcb_head = 0; 1664 rx_ring->rcb_tail = 0; 1665 rx_ring->rcb_free = rx_ring->free_list_size; 1666 } 1667 1668 /* 1669 * Setup the Receive Descriptor Control Register (RXDCTL) 1670 * PTHRESH=32 descriptors (half the internal cache) 1671 * HTHRESH=0 descriptors (to minimize latency on fetch) 1672 * WTHRESH defaults to 1 (writeback each descriptor) 1673 */ 1674 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index)); 1675 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 1676 reg_val |= 0x0020; /* pthresh */ 1677 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val); 1678 1679 /* 1680 * Setup the Split and Replication Receive Control Register. 1681 * Set the rx buffer size and the advanced descriptor type. 1682 */ 1683 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 1684 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1685 1686 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val); 1687 } 1688 1689 static void 1690 ixgbe_setup_rx(ixgbe_t *ixgbe) 1691 { 1692 ixgbe_rx_ring_t *rx_ring; 1693 struct ixgbe_hw *hw = &ixgbe->hw; 1694 uint32_t reg_val; 1695 int i; 1696 1697 /* 1698 * Set filter control in FCTRL to accept broadcast packets and do 1699 * not pass pause frames to host. Flow control settings are already 1700 * in this register, so preserve them. 1701 */ 1702 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1703 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */ 1704 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */ 1705 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 1706 1707 /* 1708 * Enable the receive unit. This must be done after filter 1709 * control is set in FCTRL. 1710 */ 1711 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */ 1712 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */ 1713 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 1714 1715 /* 1716 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 1717 */ 1718 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1719 rx_ring = &ixgbe->rx_rings[i]; 1720 ixgbe_setup_rx_ring(rx_ring); 1721 } 1722 1723 /* 1724 * The Max Frame Size in MHADD will be internally increased by four 1725 * bytes if the packet has a VLAN field, so includes MTU, ethernet 1726 * header and frame check sequence. 1727 */ 1728 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header) 1729 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 1730 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 1731 1732 /* 1733 * Setup Jumbo Frame enable bit 1734 */ 1735 if (ixgbe->default_mtu > ETHERMTU) { 1736 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1737 reg_val |= IXGBE_HLREG0_JUMBOEN; 1738 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 1739 } 1740 1741 /* 1742 * Hardware checksum settings 1743 */ 1744 if (ixgbe->rx_hcksum_enable) { 1745 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 1746 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 1747 } 1748 1749 /* 1750 * Setup RSS for multiple receive queues 1751 */ 1752 if (ixgbe->num_rx_rings > 1) 1753 ixgbe_setup_rss(ixgbe); 1754 } 1755 1756 static void 1757 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 1758 { 1759 ixgbe_t *ixgbe = tx_ring->ixgbe; 1760 struct ixgbe_hw *hw = &ixgbe->hw; 1761 uint32_t size; 1762 uint32_t buf_low; 1763 uint32_t buf_high; 1764 uint32_t reg_val; 1765 1766 ASSERT(mutex_owned(&tx_ring->tx_lock)); 1767 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1768 1769 /* 1770 * Initialize the length register 1771 */ 1772 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 1773 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 1774 1775 /* 1776 * Initialize the base address registers 1777 */ 1778 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 1779 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 1780 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 1781 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 1782 1783 /* 1784 * setup TXDCTL(tx_ring->index) 1785 */ 1786 reg_val = IXGBE_TXDCTL_ENABLE; 1787 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 1788 1789 /* 1790 * Setup head & tail pointers 1791 */ 1792 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 1793 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 1794 1795 /* 1796 * Setup head write-back 1797 */ 1798 if (ixgbe->tx_head_wb_enable) { 1799 /* 1800 * The memory of the head write-back is allocated using 1801 * the extra tbd beyond the tail of the tbd ring. 1802 */ 1803 tx_ring->tbd_head_wb = (uint32_t *) 1804 ((uintptr_t)tx_ring->tbd_area.address + size); 1805 *tx_ring->tbd_head_wb = 0; 1806 1807 buf_low = (uint32_t) 1808 (tx_ring->tbd_area.dma_address + size); 1809 buf_high = (uint32_t) 1810 ((tx_ring->tbd_area.dma_address + size) >> 32); 1811 1812 /* Set the head write-back enable bit */ 1813 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 1814 1815 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 1816 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 1817 1818 /* 1819 * Turn off relaxed ordering for head write back or it will 1820 * cause problems with the tx recycling 1821 */ 1822 reg_val = IXGBE_READ_REG(hw, 1823 IXGBE_DCA_TXCTRL(tx_ring->index)); 1824 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1825 IXGBE_WRITE_REG(hw, 1826 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 1827 } else { 1828 tx_ring->tbd_head_wb = NULL; 1829 } 1830 1831 tx_ring->tbd_head = 0; 1832 tx_ring->tbd_tail = 0; 1833 tx_ring->tbd_free = tx_ring->ring_size; 1834 1835 /* 1836 * Note: Considering the case that the chipset is being reset, 1837 * and there are still some tcb in the pending list, 1838 * we should not reset the values of tcb_head, tcb_tail and 1839 * tcb_free if the state is not IXGBE_UNKNOWN. 1840 */ 1841 if (ixgbe->ixgbe_state == IXGBE_UNKNOWN) { 1842 tx_ring->tcb_head = 0; 1843 tx_ring->tcb_tail = 0; 1844 tx_ring->tcb_free = tx_ring->free_list_size; 1845 } 1846 1847 /* 1848 * Initialize hardware checksum offload settings 1849 */ 1850 tx_ring->hcksum_context.hcksum_flags = 0; 1851 tx_ring->hcksum_context.ip_hdr_len = 0; 1852 tx_ring->hcksum_context.mac_hdr_len = 0; 1853 tx_ring->hcksum_context.l4_proto = 0; 1854 } 1855 1856 static void 1857 ixgbe_setup_tx(ixgbe_t *ixgbe) 1858 { 1859 ixgbe_tx_ring_t *tx_ring; 1860 int i; 1861 1862 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1863 tx_ring = &ixgbe->tx_rings[i]; 1864 ixgbe_setup_tx_ring(tx_ring); 1865 } 1866 } 1867 1868 /* 1869 * ixgbe_setup_rss - Setup receive-side scaling feature. 1870 */ 1871 static void 1872 ixgbe_setup_rss(ixgbe_t *ixgbe) 1873 { 1874 struct ixgbe_hw *hw = &ixgbe->hw; 1875 uint32_t i, j, mrqc, rxcsum; 1876 uint32_t random; 1877 uint32_t reta; 1878 1879 /* 1880 * Fill out redirection table 1881 */ 1882 j = 0; 1883 reta = 0; 1884 for (i = 0; i < 128; i++) { 1885 reta = (reta << 8) | (j * 0x11); 1886 if (j == 3) 1887 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 1888 j = ((j + 1) % 4); 1889 } 1890 1891 /* 1892 * Fill out hash function seeds with a random constant 1893 */ 1894 for (i = 0; i < 10; i++) { 1895 (void) random_get_pseudo_bytes((uint8_t *)&random, 1896 sizeof (uint32_t)); 1897 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 1898 } 1899 1900 /* 1901 * enable RSS & perform hash on these packet types 1902 */ 1903 mrqc = IXGBE_MRQC_RSSEN | 1904 IXGBE_MRQC_RSS_FIELD_IPV4 | 1905 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 1906 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 1907 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 1908 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 1909 IXGBE_MRQC_RSS_FIELD_IPV6 | 1910 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 1911 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 1912 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1913 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1914 1915 /* 1916 * Disable Packet Checksum to enable RSS for multiple receive queues. 1917 * 1918 * It is an adapter hardware limitation that Packet Checksum is 1919 * mutually exclusive with RSS. 1920 */ 1921 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1922 rxcsum |= IXGBE_RXCSUM_PCSD; 1923 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 1924 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 1925 } 1926 1927 /* 1928 * ixgbe_init_unicst - Initialize the unicast addresses. 1929 */ 1930 static void 1931 ixgbe_init_unicst(ixgbe_t *ixgbe) 1932 { 1933 struct ixgbe_hw *hw = &ixgbe->hw; 1934 int slot; 1935 /* 1936 * Here we should consider two situations: 1937 * 1938 * 1. Chipset is initialized the first time 1939 * Initialize the multiple unicast addresses, and 1940 * save the default mac address. 1941 * 1942 * 2. Chipset is reset 1943 * Recover the multiple unicast addresses from the 1944 * software data structure to the RAR registers. 1945 */ 1946 if (!ixgbe->unicst_init) { 1947 /* 1948 * Initialize the multiple unicast addresses 1949 */ 1950 ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES; 1951 1952 ixgbe->unicst_avail = ixgbe->unicst_total - 1; 1953 1954 bcopy(hw->mac.addr, ixgbe->unicst_addr[0].mac.addr, 1955 ETHERADDRL); 1956 ixgbe->unicst_addr[0].mac.set = 1; 1957 1958 for (slot = 1; slot < ixgbe->unicst_total; slot++) 1959 ixgbe->unicst_addr[slot].mac.set = 0; 1960 1961 ixgbe->unicst_init = B_TRUE; 1962 } else { 1963 /* 1964 * Recover the default mac address 1965 */ 1966 bcopy(ixgbe->unicst_addr[0].mac.addr, hw->mac.addr, 1967 ETHERADDRL); 1968 1969 /* Re-configure the RAR registers */ 1970 for (slot = 1; slot < ixgbe->unicst_total; slot++) 1971 (void) ixgbe_set_rar(hw, slot, 1972 ixgbe->unicst_addr[slot].mac.addr, NULL, NULL); 1973 } 1974 } 1975 /* 1976 * ixgbe_unicst_set - Set the unicast address to the specified slot. 1977 */ 1978 int 1979 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr, 1980 mac_addr_slot_t slot) 1981 { 1982 struct ixgbe_hw *hw = &ixgbe->hw; 1983 1984 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1985 1986 /* 1987 * Save the unicast address in the software data structure 1988 */ 1989 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 1990 1991 /* 1992 * Set the unicast address to the RAR register 1993 */ 1994 (void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, NULL); 1995 1996 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1997 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 1998 return (EIO); 1999 } 2000 2001 return (0); 2002 } 2003 2004 /* 2005 * ixgbe_multicst_add - Add a multicst address. 2006 */ 2007 int 2008 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2009 { 2010 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2011 2012 if ((multiaddr[0] & 01) == 0) { 2013 return (EINVAL); 2014 } 2015 2016 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 2017 return (ENOENT); 2018 } 2019 2020 bcopy(multiaddr, 2021 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 2022 ixgbe->mcast_count++; 2023 2024 /* 2025 * Update the multicast table in the hardware 2026 */ 2027 ixgbe_setup_multicst(ixgbe); 2028 2029 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2030 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2031 return (EIO); 2032 } 2033 2034 return (0); 2035 } 2036 2037 /* 2038 * ixgbe_multicst_remove - Remove a multicst address. 2039 */ 2040 int 2041 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2042 { 2043 int i; 2044 2045 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2046 2047 for (i = 0; i < ixgbe->mcast_count; i++) { 2048 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 2049 ETHERADDRL) == 0) { 2050 for (i++; i < ixgbe->mcast_count; i++) { 2051 ixgbe->mcast_table[i - 1] = 2052 ixgbe->mcast_table[i]; 2053 } 2054 ixgbe->mcast_count--; 2055 break; 2056 } 2057 } 2058 2059 /* 2060 * Update the multicast table in the hardware 2061 */ 2062 ixgbe_setup_multicst(ixgbe); 2063 2064 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2065 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2066 return (EIO); 2067 } 2068 2069 return (0); 2070 } 2071 2072 /* 2073 * ixgbe_setup_multicast - Setup multicast data structures. 2074 * 2075 * This routine initializes all of the multicast related structures 2076 * and save them in the hardware registers. 2077 */ 2078 static void 2079 ixgbe_setup_multicst(ixgbe_t *ixgbe) 2080 { 2081 uint8_t *mc_addr_list; 2082 uint32_t mc_addr_count; 2083 struct ixgbe_hw *hw = &ixgbe->hw; 2084 2085 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2086 2087 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 2088 2089 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 2090 mc_addr_count = ixgbe->mcast_count; 2091 2092 /* 2093 * Update the multicast addresses to the MTA registers 2094 */ 2095 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2096 ixgbe_mc_table_itr); 2097 } 2098 2099 /* 2100 * ixgbe_get_conf - Get driver configurations set in driver.conf. 2101 * 2102 * This routine gets user-configured values out of the configuration 2103 * file ixgbe.conf. 2104 * 2105 * For each configurable value, there is a minimum, a maximum, and a 2106 * default. 2107 * If user does not configure a value, use the default. 2108 * If user configures below the minimum, use the minumum. 2109 * If user configures above the maximum, use the maxumum. 2110 */ 2111 static void 2112 ixgbe_get_conf(ixgbe_t *ixgbe) 2113 { 2114 struct ixgbe_hw *hw = &ixgbe->hw; 2115 uint32_t flow_control; 2116 2117 /* 2118 * ixgbe driver supports the following user configurations: 2119 * 2120 * Jumbo frame configuration: 2121 * default_mtu 2122 * 2123 * Ethernet flow control configuration: 2124 * flow_control 2125 * 2126 * Multiple rings configurations: 2127 * tx_queue_number 2128 * tx_ring_size 2129 * rx_queue_number 2130 * rx_ring_size 2131 * 2132 * Call ixgbe_get_prop() to get the value for a specific 2133 * configuration parameter. 2134 */ 2135 2136 /* 2137 * Jumbo frame configuration - max_frame_size controls host buffer 2138 * allocation, so includes MTU, ethernet header, vlan tag and 2139 * frame check sequence. 2140 */ 2141 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 2142 MIN_MTU, MAX_MTU, DEFAULT_MTU); 2143 2144 ixgbe->max_frame_size = ixgbe->default_mtu + 2145 sizeof (struct ether_vlan_header) + ETHERFCSL; 2146 2147 /* 2148 * Ethernet flow control configuration 2149 */ 2150 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 2151 ixgbe_fc_none, 3, ixgbe_fc_full); 2152 if (flow_control == 3) 2153 flow_control = ixgbe_fc_default; 2154 2155 hw->fc.type = flow_control; 2156 2157 /* 2158 * Multiple rings configurations 2159 */ 2160 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 2161 MIN_TX_QUEUE_NUM, MAX_TX_QUEUE_NUM, DEFAULT_TX_QUEUE_NUM); 2162 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 2163 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 2164 2165 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 2166 MIN_RX_QUEUE_NUM, MAX_RX_QUEUE_NUM, DEFAULT_RX_QUEUE_NUM); 2167 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 2168 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 2169 2170 /* 2171 * Tunable used to force an interrupt type. The only use is 2172 * for testing of the lesser interrupt types. 2173 * 0 = don't force interrupt type 2174 * 1 = force interrupt type MSIX 2175 * 2 = force interrupt type MSI 2176 * 3 = force interrupt type Legacy 2177 */ 2178 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 2179 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 2180 ixgbe_log(ixgbe, "interrupt force: %d\n", ixgbe->intr_force); 2181 2182 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 2183 0, 1, 1); 2184 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 2185 0, 1, 1); 2186 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 2187 0, 1, 0); 2188 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 2189 0, 1, 1); 2190 2191 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 2192 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 2193 DEFAULT_TX_COPY_THRESHOLD); 2194 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 2195 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 2196 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 2197 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 2198 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 2199 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 2200 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 2201 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 2202 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 2203 2204 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 2205 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 2206 DEFAULT_RX_COPY_THRESHOLD); 2207 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 2208 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 2209 DEFAULT_RX_LIMIT_PER_INTR); 2210 2211 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 2212 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING, 2213 DEFAULT_INTR_THROTTLING); 2214 } 2215 2216 /* 2217 * ixgbe_get_prop - Get a property value out of the configuration file 2218 * ixgbe.conf. 2219 * 2220 * Caller provides the name of the property, a default value, a minimum 2221 * value, and a maximum value. 2222 * 2223 * Return configured value of the property, with default, minimum and 2224 * maximum properly applied. 2225 */ 2226 static int 2227 ixgbe_get_prop(ixgbe_t *ixgbe, 2228 char *propname, /* name of the property */ 2229 int minval, /* minimum acceptable value */ 2230 int maxval, /* maximim acceptable value */ 2231 int defval) /* default value */ 2232 { 2233 int value; 2234 2235 /* 2236 * Call ddi_prop_get_int() to read the conf settings 2237 */ 2238 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 2239 DDI_PROP_DONTPASS, propname, defval); 2240 if (value > maxval) 2241 value = maxval; 2242 2243 if (value < minval) 2244 value = minval; 2245 2246 return (value); 2247 } 2248 2249 /* 2250 * ixgbe_driver_setup_link - Using the link properties to setup the link. 2251 */ 2252 int 2253 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 2254 { 2255 struct ixgbe_mac_info *mac; 2256 struct ixgbe_phy_info *phy; 2257 boolean_t invalid; 2258 2259 mac = &ixgbe->hw.mac; 2260 phy = &ixgbe->hw.phy; 2261 invalid = B_FALSE; 2262 2263 if (ixgbe->param_adv_autoneg_cap == 1) { 2264 mac->autoneg = B_TRUE; 2265 phy->autoneg_advertised = 0; 2266 2267 /* 2268 * No half duplex support with 10Gb parts 2269 */ 2270 if (ixgbe->param_adv_10000fdx_cap == 1) 2271 phy->autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 2272 2273 if (ixgbe->param_adv_1000fdx_cap == 1) 2274 phy->autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 2275 2276 if (ixgbe->param_adv_100fdx_cap == 1) 2277 phy->autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 2278 2279 if (phy->autoneg_advertised == 0) 2280 invalid = B_TRUE; 2281 } else { 2282 ixgbe->hw.mac.autoneg = B_FALSE; 2283 } 2284 2285 if (invalid) { 2286 ixgbe_notice(ixgbe, "Invalid link settings. Setup link to " 2287 "autonegotiation with full link capabilities."); 2288 ixgbe->hw.mac.autoneg = B_TRUE; 2289 } 2290 2291 if (setup_hw) { 2292 if (ixgbe_setup_link(&ixgbe->hw) != IXGBE_SUCCESS) 2293 return (IXGBE_FAILURE); 2294 } 2295 2296 return (IXGBE_SUCCESS); 2297 } 2298 2299 /* 2300 * ixgbe_driver_link_check - Link status processing. 2301 */ 2302 static boolean_t 2303 ixgbe_driver_link_check(ixgbe_t *ixgbe) 2304 { 2305 struct ixgbe_hw *hw = &ixgbe->hw; 2306 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 2307 boolean_t link_up = B_FALSE; 2308 boolean_t link_changed = B_FALSE; 2309 2310 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2311 2312 (void) ixgbe_check_link(hw, &speed, &link_up); 2313 if (link_up) { 2314 /* 2315 * The Link is up, check whether it was marked as down earlier 2316 */ 2317 if (ixgbe->link_state != LINK_STATE_UP) { 2318 switch (speed) { 2319 case IXGBE_LINK_SPEED_10GB_FULL: 2320 ixgbe->link_speed = SPEED_10GB; 2321 break; 2322 case IXGBE_LINK_SPEED_1GB_FULL: 2323 ixgbe->link_speed = SPEED_1GB; 2324 break; 2325 case IXGBE_LINK_SPEED_100_FULL: 2326 ixgbe->link_speed = SPEED_100; 2327 } 2328 ixgbe->link_duplex = LINK_DUPLEX_FULL; 2329 ixgbe->link_state = LINK_STATE_UP; 2330 ixgbe->link_down_timeout = 0; 2331 link_changed = B_TRUE; 2332 } 2333 } else { 2334 if (ixgbe->link_state != LINK_STATE_DOWN) { 2335 ixgbe->link_speed = 0; 2336 ixgbe->link_duplex = 0; 2337 ixgbe->link_state = LINK_STATE_DOWN; 2338 link_changed = B_TRUE; 2339 } 2340 2341 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 2342 if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) { 2343 ixgbe->link_down_timeout++; 2344 } else if (ixgbe->link_down_timeout == 2345 MAX_LINK_DOWN_TIMEOUT) { 2346 ixgbe_tx_clean(ixgbe); 2347 ixgbe->link_down_timeout++; 2348 } 2349 } 2350 } 2351 2352 return (link_changed); 2353 } 2354 2355 /* 2356 * ixgbe_local_timer - Driver watchdog function. 2357 * 2358 * This function will handle the transmit stall check, link status check and 2359 * other routines. 2360 */ 2361 static void 2362 ixgbe_local_timer(void *arg) 2363 { 2364 ixgbe_t *ixgbe = (ixgbe_t *)arg; 2365 2366 if (ixgbe_stall_check(ixgbe)) { 2367 ixgbe->reset_count++; 2368 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 2369 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 2370 } 2371 2372 ixgbe_restart_watchdog_timer(ixgbe); 2373 } 2374 2375 /* 2376 * ixgbe_stall_check - Check for transmit stall. 2377 * 2378 * This function checks if the adapter is stalled (in transmit). 2379 * 2380 * It is called each time the watchdog timeout is invoked. 2381 * If the transmit descriptor reclaim continuously fails, 2382 * the watchdog value will increment by 1. If the watchdog 2383 * value exceeds the threshold, the ixgbe is assumed to 2384 * have stalled and need to be reset. 2385 */ 2386 static boolean_t 2387 ixgbe_stall_check(ixgbe_t *ixgbe) 2388 { 2389 ixgbe_tx_ring_t *tx_ring; 2390 boolean_t result; 2391 int i; 2392 2393 if (ixgbe->link_state != LINK_STATE_UP) 2394 return (B_FALSE); 2395 2396 /* 2397 * If any tx ring is stalled, we'll reset the chipset 2398 */ 2399 result = B_FALSE; 2400 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2401 tx_ring = &ixgbe->tx_rings[i]; 2402 2403 if (tx_ring->recycle_fail > 0) 2404 tx_ring->stall_watchdog++; 2405 else 2406 tx_ring->stall_watchdog = 0; 2407 2408 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 2409 result = B_TRUE; 2410 break; 2411 } 2412 } 2413 2414 if (result) { 2415 tx_ring->stall_watchdog = 0; 2416 tx_ring->recycle_fail = 0; 2417 } 2418 2419 return (result); 2420 } 2421 2422 2423 /* 2424 * is_valid_mac_addr - Check if the mac address is valid. 2425 */ 2426 static boolean_t 2427 is_valid_mac_addr(uint8_t *mac_addr) 2428 { 2429 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 2430 const uint8_t addr_test2[6] = 2431 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 2432 2433 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 2434 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 2435 return (B_FALSE); 2436 2437 return (B_TRUE); 2438 } 2439 2440 static boolean_t 2441 ixgbe_find_mac_address(ixgbe_t *ixgbe) 2442 { 2443 #ifdef __sparc 2444 struct ixgbe_hw *hw = &ixgbe->hw; 2445 uchar_t *bytes; 2446 struct ether_addr sysaddr; 2447 uint_t nelts; 2448 int err; 2449 boolean_t found = B_FALSE; 2450 2451 /* 2452 * The "vendor's factory-set address" may already have 2453 * been extracted from the chip, but if the property 2454 * "local-mac-address" is set we use that instead. 2455 * 2456 * We check whether it looks like an array of 6 2457 * bytes (which it should, if OBP set it). If we can't 2458 * make sense of it this way, we'll ignore it. 2459 */ 2460 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 2461 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 2462 if (err == DDI_PROP_SUCCESS) { 2463 if (nelts == ETHERADDRL) { 2464 while (nelts--) 2465 hw->mac.addr[nelts] = bytes[nelts]; 2466 found = B_TRUE; 2467 } 2468 ddi_prop_free(bytes); 2469 } 2470 2471 /* 2472 * Look up the OBP property "local-mac-address?". If the user has set 2473 * 'local-mac-address? = false', use "the system address" instead. 2474 */ 2475 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 2476 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 2477 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 2478 if (localetheraddr(NULL, &sysaddr) != 0) { 2479 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 2480 found = B_TRUE; 2481 } 2482 } 2483 ddi_prop_free(bytes); 2484 } 2485 2486 /* 2487 * Finally(!), if there's a valid "mac-address" property (created 2488 * if we netbooted from this interface), we must use this instead 2489 * of any of the above to ensure that the NFS/install server doesn't 2490 * get confused by the address changing as Solaris takes over! 2491 */ 2492 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 2493 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 2494 if (err == DDI_PROP_SUCCESS) { 2495 if (nelts == ETHERADDRL) { 2496 while (nelts--) 2497 hw->mac.addr[nelts] = bytes[nelts]; 2498 found = B_TRUE; 2499 } 2500 ddi_prop_free(bytes); 2501 } 2502 2503 if (found) { 2504 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 2505 return (B_TRUE); 2506 } 2507 #else 2508 _NOTE(ARGUNUSED(ixgbe)); 2509 #endif 2510 2511 return (B_TRUE); 2512 } 2513 2514 #pragma inline(ixgbe_arm_watchdog_timer) 2515 static void 2516 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 2517 { 2518 /* 2519 * Fire a watchdog timer 2520 */ 2521 ixgbe->watchdog_tid = 2522 timeout(ixgbe_local_timer, 2523 (void *)ixgbe, 1 * drv_usectohz(1000000)); 2524 2525 } 2526 2527 /* 2528 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 2529 */ 2530 void 2531 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 2532 { 2533 mutex_enter(&ixgbe->watchdog_lock); 2534 2535 if (!ixgbe->watchdog_enable) { 2536 ixgbe->watchdog_enable = B_TRUE; 2537 ixgbe->watchdog_start = B_TRUE; 2538 ixgbe_arm_watchdog_timer(ixgbe); 2539 } 2540 2541 mutex_exit(&ixgbe->watchdog_lock); 2542 } 2543 2544 /* 2545 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 2546 */ 2547 void 2548 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 2549 { 2550 timeout_id_t tid; 2551 2552 mutex_enter(&ixgbe->watchdog_lock); 2553 2554 ixgbe->watchdog_enable = B_FALSE; 2555 ixgbe->watchdog_start = B_FALSE; 2556 tid = ixgbe->watchdog_tid; 2557 ixgbe->watchdog_tid = 0; 2558 2559 mutex_exit(&ixgbe->watchdog_lock); 2560 2561 if (tid != 0) 2562 (void) untimeout(tid); 2563 } 2564 2565 /* 2566 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 2567 */ 2568 static void 2569 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 2570 { 2571 mutex_enter(&ixgbe->watchdog_lock); 2572 2573 if (ixgbe->watchdog_enable) { 2574 if (!ixgbe->watchdog_start) { 2575 ixgbe->watchdog_start = B_TRUE; 2576 ixgbe_arm_watchdog_timer(ixgbe); 2577 } 2578 } 2579 2580 mutex_exit(&ixgbe->watchdog_lock); 2581 } 2582 2583 /* 2584 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 2585 */ 2586 static void 2587 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 2588 { 2589 mutex_enter(&ixgbe->watchdog_lock); 2590 2591 if (ixgbe->watchdog_start) 2592 ixgbe_arm_watchdog_timer(ixgbe); 2593 2594 mutex_exit(&ixgbe->watchdog_lock); 2595 } 2596 2597 /* 2598 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 2599 */ 2600 static void 2601 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 2602 { 2603 timeout_id_t tid; 2604 2605 mutex_enter(&ixgbe->watchdog_lock); 2606 2607 ixgbe->watchdog_start = B_FALSE; 2608 tid = ixgbe->watchdog_tid; 2609 ixgbe->watchdog_tid = 0; 2610 2611 mutex_exit(&ixgbe->watchdog_lock); 2612 2613 if (tid != 0) 2614 (void) untimeout(tid); 2615 } 2616 2617 /* 2618 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 2619 */ 2620 static void 2621 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 2622 { 2623 struct ixgbe_hw *hw = &ixgbe->hw; 2624 2625 /* 2626 * mask all interrupts off 2627 */ 2628 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 2629 2630 /* 2631 * for MSI-X, also disable autoclear 2632 */ 2633 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 2634 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 2635 } 2636 2637 IXGBE_WRITE_FLUSH(hw); 2638 } 2639 2640 /* 2641 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 2642 */ 2643 static void 2644 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 2645 { 2646 struct ixgbe_hw *hw = &ixgbe->hw; 2647 uint32_t eims, eiac, gpie; 2648 2649 gpie = 0; 2650 eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 2651 eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 2652 2653 /* 2654 * msi-x mode 2655 */ 2656 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 2657 /* enable autoclear but not on bits 29:20 */ 2658 eiac = (eims & ~0x3ff00000); 2659 2660 /* general purpose interrupt enable */ 2661 gpie |= (IXGBE_GPIE_MSIX_MODE | 2662 IXGBE_GPIE_PBA_SUPPORT |IXGBE_GPIE_OCD); 2663 /* 2664 * non-msi-x mode 2665 */ 2666 } else { 2667 2668 /* disable autoclear, leave gpie at default */ 2669 eiac = 0; 2670 } 2671 2672 IXGBE_WRITE_REG(hw, IXGBE_EIMS, eims); 2673 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 2674 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 2675 IXGBE_WRITE_FLUSH(hw); 2676 } 2677 2678 /* 2679 * ixgbe_loopback_ioctl - Loopback support. 2680 */ 2681 enum ioc_reply 2682 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 2683 { 2684 lb_info_sz_t *lbsp; 2685 lb_property_t *lbpp; 2686 uint32_t *lbmp; 2687 uint32_t size; 2688 uint32_t value; 2689 2690 if (mp->b_cont == NULL) 2691 return (IOC_INVAL); 2692 2693 switch (iocp->ioc_cmd) { 2694 default: 2695 return (IOC_INVAL); 2696 2697 case LB_GET_INFO_SIZE: 2698 size = sizeof (lb_info_sz_t); 2699 if (iocp->ioc_count != size) 2700 return (IOC_INVAL); 2701 2702 value = sizeof (lb_normal); 2703 value += sizeof (lb_mac); 2704 2705 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 2706 *lbsp = value; 2707 break; 2708 2709 case LB_GET_INFO: 2710 value = sizeof (lb_normal); 2711 value += sizeof (lb_mac); 2712 2713 size = value; 2714 if (iocp->ioc_count != size) 2715 return (IOC_INVAL); 2716 2717 value = 0; 2718 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 2719 2720 lbpp[value++] = lb_normal; 2721 lbpp[value++] = lb_mac; 2722 break; 2723 2724 case LB_GET_MODE: 2725 size = sizeof (uint32_t); 2726 if (iocp->ioc_count != size) 2727 return (IOC_INVAL); 2728 2729 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 2730 *lbmp = ixgbe->loopback_mode; 2731 break; 2732 2733 case LB_SET_MODE: 2734 size = 0; 2735 if (iocp->ioc_count != sizeof (uint32_t)) 2736 return (IOC_INVAL); 2737 2738 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 2739 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 2740 return (IOC_INVAL); 2741 break; 2742 } 2743 2744 iocp->ioc_count = size; 2745 iocp->ioc_error = 0; 2746 2747 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2748 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2749 return (IOC_INVAL); 2750 } 2751 2752 return (IOC_REPLY); 2753 } 2754 2755 /* 2756 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 2757 */ 2758 static boolean_t 2759 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 2760 { 2761 struct ixgbe_hw *hw; 2762 2763 if (mode == ixgbe->loopback_mode) 2764 return (B_TRUE); 2765 2766 hw = &ixgbe->hw; 2767 2768 ixgbe->loopback_mode = mode; 2769 2770 if (mode == IXGBE_LB_NONE) { 2771 /* 2772 * Reset the chip 2773 */ 2774 hw->phy.autoneg_wait_to_complete = B_TRUE; 2775 (void) ixgbe_reset(ixgbe); 2776 hw->phy.autoneg_wait_to_complete = B_FALSE; 2777 return (B_TRUE); 2778 } 2779 2780 mutex_enter(&ixgbe->gen_lock); 2781 2782 switch (mode) { 2783 default: 2784 mutex_exit(&ixgbe->gen_lock); 2785 return (B_FALSE); 2786 2787 case IXGBE_LB_INTERNAL_MAC: 2788 ixgbe_set_internal_mac_loopback(ixgbe); 2789 break; 2790 } 2791 2792 mutex_exit(&ixgbe->gen_lock); 2793 2794 return (B_TRUE); 2795 } 2796 2797 /* 2798 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 2799 */ 2800 static void 2801 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 2802 { 2803 struct ixgbe_hw *hw; 2804 uint32_t reg; 2805 uint8_t atlas; 2806 2807 hw = &ixgbe->hw; 2808 2809 /* 2810 * Setup MAC loopback 2811 */ 2812 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 2813 reg |= IXGBE_HLREG0_LPBK; 2814 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 2815 2816 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 2817 reg &= ~IXGBE_AUTOC_LMS_MASK; 2818 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 2819 2820 /* 2821 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 2822 */ 2823 if (hw->mac.type == ixgbe_mac_82598EB) { 2824 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 2825 &atlas); 2826 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 2827 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 2828 atlas); 2829 2830 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 2831 &atlas); 2832 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 2833 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 2834 atlas); 2835 2836 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 2837 &atlas); 2838 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 2839 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 2840 atlas); 2841 2842 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 2843 &atlas); 2844 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 2845 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 2846 atlas); 2847 } 2848 } 2849 2850 #pragma inline(ixgbe_intr_rx_work) 2851 /* 2852 * ixgbe_intr_rx_work - RX processing of ISR. 2853 */ 2854 static void 2855 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 2856 { 2857 mblk_t *mp; 2858 2859 mutex_enter(&rx_ring->rx_lock); 2860 2861 mp = ixgbe_rx(rx_ring); 2862 mutex_exit(&rx_ring->rx_lock); 2863 2864 if (mp != NULL) 2865 mac_rx(rx_ring->ixgbe->mac_hdl, NULL, mp); 2866 } 2867 2868 #pragma inline(ixgbe_intr_tx_work) 2869 /* 2870 * ixgbe_intr_tx_work - TX processing of ISR. 2871 */ 2872 static void 2873 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 2874 { 2875 /* 2876 * Recycle the tx descriptors 2877 */ 2878 tx_ring->tx_recycle(tx_ring); 2879 2880 /* 2881 * Schedule the re-transmit 2882 */ 2883 if (tx_ring->reschedule && 2884 (tx_ring->tbd_free >= tx_ring->resched_thresh)) { 2885 tx_ring->reschedule = B_FALSE; 2886 mac_tx_update(tx_ring->ixgbe->mac_hdl); 2887 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 2888 } 2889 } 2890 2891 #pragma inline(ixgbe_intr_other_work) 2892 /* 2893 * ixgbe_intr_other_work - Other processing of ISR. 2894 */ 2895 static void 2896 ixgbe_intr_other_work(ixgbe_t *ixgbe) 2897 { 2898 boolean_t link_changed; 2899 2900 ixgbe_stop_watchdog_timer(ixgbe); 2901 2902 mutex_enter(&ixgbe->gen_lock); 2903 2904 /* 2905 * Take care of link status change 2906 */ 2907 link_changed = ixgbe_driver_link_check(ixgbe); 2908 2909 /* 2910 * Get new phy state 2911 */ 2912 ixgbe_get_hw_state(ixgbe); 2913 2914 mutex_exit(&ixgbe->gen_lock); 2915 2916 if (link_changed) 2917 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 2918 2919 ixgbe_start_watchdog_timer(ixgbe); 2920 } 2921 2922 /* 2923 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 2924 */ 2925 static uint_t 2926 ixgbe_intr_legacy(void *arg1, void *arg2) 2927 { 2928 _NOTE(ARGUNUSED(arg2)); 2929 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 2930 struct ixgbe_hw *hw = &ixgbe->hw; 2931 ixgbe_tx_ring_t *tx_ring; 2932 uint32_t eicr; 2933 mblk_t *mp; 2934 boolean_t tx_reschedule; 2935 boolean_t link_changed; 2936 uint_t result; 2937 2938 2939 mutex_enter(&ixgbe->gen_lock); 2940 2941 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 2942 mutex_exit(&ixgbe->gen_lock); 2943 return (DDI_INTR_UNCLAIMED); 2944 } 2945 2946 mp = NULL; 2947 tx_reschedule = B_FALSE; 2948 link_changed = B_FALSE; 2949 2950 /* 2951 * Any bit set in eicr: claim this interrupt 2952 */ 2953 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 2954 if (eicr) { 2955 /* 2956 * For legacy interrupt, we have only one interrupt, 2957 * so we have only one rx ring and one tx ring enabled. 2958 */ 2959 ASSERT(ixgbe->num_rx_rings == 1); 2960 ASSERT(ixgbe->num_tx_rings == 1); 2961 2962 /* 2963 * For legacy interrupt, we can't differentiate 2964 * between tx and rx, so always clean both 2965 */ 2966 if (eicr & IXGBE_EICR_RTX_QUEUE) { 2967 2968 /* 2969 * Clean the rx descriptors 2970 */ 2971 mp = ixgbe_rx(&ixgbe->rx_rings[0]); 2972 2973 /* 2974 * Recycle the tx descriptors 2975 */ 2976 tx_ring = &ixgbe->tx_rings[0]; 2977 tx_ring->tx_recycle(tx_ring); 2978 2979 /* 2980 * Schedule the re-transmit 2981 */ 2982 tx_reschedule = (tx_ring->reschedule && 2983 (tx_ring->tbd_free >= tx_ring->resched_thresh)); 2984 } 2985 2986 if (eicr & IXGBE_EICR_LSC) { 2987 2988 /* take care of link status change */ 2989 link_changed = ixgbe_driver_link_check(ixgbe); 2990 2991 /* Get new phy state */ 2992 ixgbe_get_hw_state(ixgbe); 2993 } 2994 2995 result = DDI_INTR_CLAIMED; 2996 } else { 2997 /* 2998 * No interrupt cause bits set: don't claim this interrupt. 2999 */ 3000 result = DDI_INTR_UNCLAIMED; 3001 } 3002 3003 mutex_exit(&ixgbe->gen_lock); 3004 3005 /* 3006 * Do the following work outside of the gen_lock 3007 */ 3008 if (mp != NULL) 3009 mac_rx(ixgbe->mac_hdl, NULL, mp); 3010 3011 if (tx_reschedule) { 3012 tx_ring->reschedule = B_FALSE; 3013 mac_tx_update(ixgbe->mac_hdl); 3014 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 3015 } 3016 3017 if (link_changed) 3018 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3019 3020 return (result); 3021 } 3022 3023 /* 3024 * ixgbe_intr_msi - Interrupt handler for MSI. 3025 */ 3026 static uint_t 3027 ixgbe_intr_msi(void *arg1, void *arg2) 3028 { 3029 _NOTE(ARGUNUSED(arg2)); 3030 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3031 struct ixgbe_hw *hw = &ixgbe->hw; 3032 uint32_t eicr; 3033 3034 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3035 3036 /* 3037 * For MSI interrupt, we have only one vector, 3038 * so we have only one rx ring and one tx ring enabled. 3039 */ 3040 ASSERT(ixgbe->num_rx_rings == 1); 3041 ASSERT(ixgbe->num_tx_rings == 1); 3042 3043 /* 3044 * For MSI interrupt, we can't differentiate 3045 * between tx and rx, so always clean both. 3046 */ 3047 if (eicr & IXGBE_EICR_RTX_QUEUE) { 3048 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 3049 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 3050 } 3051 3052 if (eicr & IXGBE_EICR_LSC) { 3053 ixgbe_intr_other_work(ixgbe); 3054 } 3055 3056 return (DDI_INTR_CLAIMED); 3057 } 3058 3059 /* 3060 * ixgbe_intr_rx - Interrupt handler for rx. 3061 */ 3062 static uint_t 3063 ixgbe_intr_rx(void *arg1, void *arg2) 3064 { 3065 _NOTE(ARGUNUSED(arg2)); 3066 ixgbe_ring_vector_t *vect = (ixgbe_ring_vector_t *)arg1; 3067 ixgbe_t *ixgbe = vect->ixgbe; 3068 int r_idx; 3069 3070 /* 3071 * clean each rx ring that has its bit set in the map 3072 */ 3073 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 3074 3075 while (r_idx >= 0) { 3076 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 3077 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 3078 (ixgbe->num_rx_rings - 1)); 3079 } 3080 3081 return (DDI_INTR_CLAIMED); 3082 } 3083 3084 /* 3085 * ixgbe_intr_tx_other - Interrupt handler for both tx and other. 3086 * 3087 * Always look for Tx cleanup work. Only look for other work if the right 3088 * bits are set in the Interrupt Cause Register. 3089 */ 3090 static uint_t 3091 ixgbe_intr_tx_other(void *arg1, void *arg2) 3092 { 3093 _NOTE(ARGUNUSED(arg2)); 3094 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3095 struct ixgbe_hw *hw = &ixgbe->hw; 3096 uint32_t eicr; 3097 3098 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3099 3100 /* 3101 * Always look for Tx cleanup work. We don't have separate 3102 * transmit vectors, so we have only one tx ring enabled. 3103 */ 3104 ASSERT(ixgbe->num_tx_rings == 1); 3105 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 3106 3107 /* 3108 * Check for "other" causes. 3109 */ 3110 if (eicr & IXGBE_EICR_LSC) { 3111 ixgbe_intr_other_work(ixgbe); 3112 } 3113 3114 return (DDI_INTR_CLAIMED); 3115 } 3116 3117 /* 3118 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 3119 * 3120 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 3121 * if not successful, try Legacy. 3122 * ixgbe->intr_force can be used to force sequence to start with 3123 * any of the 3 types. 3124 * If MSI-X is not used, number of tx/rx rings is forced to 1. 3125 */ 3126 static int 3127 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 3128 { 3129 dev_info_t *devinfo; 3130 int intr_types; 3131 int rc; 3132 3133 devinfo = ixgbe->dip; 3134 3135 /* 3136 * Get supported interrupt types 3137 */ 3138 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 3139 3140 if (rc != DDI_SUCCESS) { 3141 ixgbe_log(ixgbe, 3142 "Get supported interrupt types failed: %d", rc); 3143 return (IXGBE_FAILURE); 3144 } 3145 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 3146 3147 ixgbe->intr_type = 0; 3148 3149 /* 3150 * Install MSI-X interrupts 3151 */ 3152 if ((intr_types & DDI_INTR_TYPE_MSIX) && 3153 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 3154 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 3155 if (rc == IXGBE_SUCCESS) 3156 return (IXGBE_SUCCESS); 3157 3158 ixgbe_log(ixgbe, 3159 "Allocate MSI-X failed, trying MSI interrupts..."); 3160 } 3161 3162 /* 3163 * MSI-X not used, force rings to 1 3164 */ 3165 ixgbe->num_rx_rings = 1; 3166 ixgbe->num_tx_rings = 1; 3167 ixgbe_log(ixgbe, 3168 "MSI-X not used, force rx and tx queue number to 1"); 3169 3170 /* 3171 * Install MSI interrupts 3172 */ 3173 if ((intr_types & DDI_INTR_TYPE_MSI) && 3174 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 3175 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 3176 if (rc == IXGBE_SUCCESS) 3177 return (IXGBE_SUCCESS); 3178 3179 ixgbe_log(ixgbe, 3180 "Allocate MSI failed, trying Legacy interrupts..."); 3181 } 3182 3183 /* 3184 * Install legacy interrupts 3185 */ 3186 if (intr_types & DDI_INTR_TYPE_FIXED) { 3187 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 3188 if (rc == IXGBE_SUCCESS) 3189 return (IXGBE_SUCCESS); 3190 3191 ixgbe_log(ixgbe, 3192 "Allocate Legacy interrupts failed"); 3193 } 3194 3195 /* 3196 * If none of the 3 types succeeded, return failure 3197 */ 3198 return (IXGBE_FAILURE); 3199 } 3200 3201 /* 3202 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 3203 * 3204 * For legacy and MSI, only 1 handle is needed. For MSI-X, 3205 * if fewer than 2 handles are available, return failure. 3206 * Upon success, this sets the number of Rx rings to a number that 3207 * matches the handles available for Rx interrupts. 3208 */ 3209 static int 3210 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 3211 { 3212 dev_info_t *devinfo; 3213 int request, count, avail, actual; 3214 int rx_rings, minimum; 3215 int rc; 3216 3217 devinfo = ixgbe->dip; 3218 3219 /* 3220 * Currently only 1 tx ring is supported. More tx rings 3221 * will be supported with future enhancement. 3222 */ 3223 if (ixgbe->num_tx_rings > 1) { 3224 ixgbe->num_tx_rings = 1; 3225 ixgbe_log(ixgbe, 3226 "Use only 1 MSI-X vector for tx, " 3227 "force tx queue number to 1"); 3228 } 3229 3230 switch (intr_type) { 3231 case DDI_INTR_TYPE_FIXED: 3232 request = 1; /* Request 1 legacy interrupt handle */ 3233 minimum = 1; 3234 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 3235 break; 3236 3237 case DDI_INTR_TYPE_MSI: 3238 request = 1; /* Request 1 MSI interrupt handle */ 3239 minimum = 1; 3240 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 3241 break; 3242 3243 case DDI_INTR_TYPE_MSIX: 3244 /* 3245 * Best number of vectors for the adapter is 3246 * # rx rings + # tx rings + 1 for other 3247 * But currently we only support number of vectors of 3248 * # rx rings + 1 for tx & other 3249 */ 3250 request = ixgbe->num_rx_rings + 1; 3251 minimum = 2; 3252 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 3253 break; 3254 3255 default: 3256 ixgbe_log(ixgbe, 3257 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 3258 intr_type); 3259 return (IXGBE_FAILURE); 3260 } 3261 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 3262 request, minimum); 3263 3264 /* 3265 * Get number of supported interrupts 3266 */ 3267 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 3268 if ((rc != DDI_SUCCESS) || (count < minimum)) { 3269 ixgbe_log(ixgbe, 3270 "Get interrupt number failed. Return: %d, count: %d", 3271 rc, count); 3272 return (IXGBE_FAILURE); 3273 } 3274 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 3275 3276 /* 3277 * Get number of available interrupts 3278 */ 3279 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 3280 if ((rc != DDI_SUCCESS) || (avail < minimum)) { 3281 ixgbe_log(ixgbe, 3282 "Get interrupt available number failed. " 3283 "Return: %d, available: %d", rc, avail); 3284 return (IXGBE_FAILURE); 3285 } 3286 IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail); 3287 3288 if (avail < request) { 3289 ixgbe_log(ixgbe, "Request %d handles, %d available", 3290 request, avail); 3291 request = avail; 3292 } 3293 3294 actual = 0; 3295 ixgbe->intr_cnt = 0; 3296 3297 /* 3298 * Allocate an array of interrupt handles 3299 */ 3300 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 3301 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 3302 3303 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 3304 request, &actual, DDI_INTR_ALLOC_NORMAL); 3305 if (rc != DDI_SUCCESS) { 3306 ixgbe_log(ixgbe, "Allocate interrupts failed. " 3307 "return: %d, request: %d, actual: %d", 3308 rc, request, actual); 3309 goto alloc_handle_fail; 3310 } 3311 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 3312 3313 ixgbe->intr_cnt = actual; 3314 3315 /* 3316 * Now we know the actual number of vectors. Here we assume that 3317 * tx and other will share 1 vector and all remaining (must be at 3318 * least 1 remaining) will be used for rx. 3319 */ 3320 if (actual < minimum) { 3321 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 3322 actual); 3323 goto alloc_handle_fail; 3324 } 3325 3326 /* 3327 * For MSI-X, actual might force us to reduce number of rx rings 3328 */ 3329 if (intr_type == DDI_INTR_TYPE_MSIX) { 3330 rx_rings = actual - 1; 3331 if (rx_rings < ixgbe->num_rx_rings) { 3332 ixgbe_log(ixgbe, 3333 "MSI-X vectors force Rx queue number to %d", 3334 rx_rings); 3335 ixgbe->num_rx_rings = rx_rings; 3336 } 3337 } 3338 3339 /* 3340 * Get priority for first vector, assume remaining are all the same 3341 */ 3342 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 3343 if (rc != DDI_SUCCESS) { 3344 ixgbe_log(ixgbe, 3345 "Get interrupt priority failed: %d", rc); 3346 goto alloc_handle_fail; 3347 } 3348 3349 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 3350 if (rc != DDI_SUCCESS) { 3351 ixgbe_log(ixgbe, 3352 "Get interrupt cap failed: %d", rc); 3353 goto alloc_handle_fail; 3354 } 3355 3356 ixgbe->intr_type = intr_type; 3357 3358 return (IXGBE_SUCCESS); 3359 3360 alloc_handle_fail: 3361 ixgbe_rem_intrs(ixgbe); 3362 3363 return (IXGBE_FAILURE); 3364 } 3365 3366 /* 3367 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 3368 * 3369 * Before adding the interrupt handlers, the interrupt vectors have 3370 * been allocated, and the rx/tx rings have also been allocated. 3371 */ 3372 static int 3373 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 3374 { 3375 ixgbe_rx_ring_t *rx_ring; 3376 int vector; 3377 int rc; 3378 int i; 3379 3380 vector = 0; 3381 3382 switch (ixgbe->intr_type) { 3383 case DDI_INTR_TYPE_MSIX: 3384 /* 3385 * Add interrupt handler for tx + other 3386 */ 3387 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3388 (ddi_intr_handler_t *)ixgbe_intr_tx_other, 3389 (void *)ixgbe, NULL); 3390 if (rc != DDI_SUCCESS) { 3391 ixgbe_log(ixgbe, 3392 "Add tx/other interrupt handler failed: %d", rc); 3393 return (IXGBE_FAILURE); 3394 } 3395 vector++; 3396 3397 /* 3398 * Add interrupt handler for each rx ring 3399 */ 3400 for (i = 0; i < ixgbe->num_rx_rings; i++) { 3401 rx_ring = &ixgbe->rx_rings[i]; 3402 3403 /* 3404 * install pointer to vect_map[vector] 3405 */ 3406 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3407 (ddi_intr_handler_t *)ixgbe_intr_rx, 3408 (void *)&ixgbe->vect_map[vector], NULL); 3409 3410 if (rc != DDI_SUCCESS) { 3411 ixgbe_log(ixgbe, 3412 "Add rx interrupt handler failed. " 3413 "return: %d, rx ring: %d", rc, i); 3414 for (vector--; vector >= 0; vector--) { 3415 (void) ddi_intr_remove_handler( 3416 ixgbe->htable[vector]); 3417 } 3418 return (IXGBE_FAILURE); 3419 } 3420 3421 rx_ring->intr_vector = vector; 3422 3423 vector++; 3424 } 3425 break; 3426 3427 case DDI_INTR_TYPE_MSI: 3428 /* 3429 * Add interrupt handlers for the only vector 3430 */ 3431 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3432 (ddi_intr_handler_t *)ixgbe_intr_msi, 3433 (void *)ixgbe, NULL); 3434 3435 if (rc != DDI_SUCCESS) { 3436 ixgbe_log(ixgbe, 3437 "Add MSI interrupt handler failed: %d", rc); 3438 return (IXGBE_FAILURE); 3439 } 3440 3441 rx_ring = &ixgbe->rx_rings[0]; 3442 rx_ring->intr_vector = vector; 3443 3444 vector++; 3445 break; 3446 3447 case DDI_INTR_TYPE_FIXED: 3448 /* 3449 * Add interrupt handlers for the only vector 3450 */ 3451 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3452 (ddi_intr_handler_t *)ixgbe_intr_legacy, 3453 (void *)ixgbe, NULL); 3454 3455 if (rc != DDI_SUCCESS) { 3456 ixgbe_log(ixgbe, 3457 "Add legacy interrupt handler failed: %d", rc); 3458 return (IXGBE_FAILURE); 3459 } 3460 3461 rx_ring = &ixgbe->rx_rings[0]; 3462 rx_ring->intr_vector = vector; 3463 3464 vector++; 3465 break; 3466 3467 default: 3468 return (IXGBE_FAILURE); 3469 } 3470 3471 ASSERT(vector == ixgbe->intr_cnt); 3472 3473 return (IXGBE_SUCCESS); 3474 } 3475 3476 #pragma inline(ixgbe_map_rxring_to_vector) 3477 /* 3478 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 3479 */ 3480 static void 3481 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 3482 { 3483 ixgbe->vect_map[v_idx].ixgbe = ixgbe; 3484 3485 /* 3486 * Set bit in map 3487 */ 3488 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 3489 3490 /* 3491 * Count bits set 3492 */ 3493 ixgbe->vect_map[v_idx].rxr_cnt++; 3494 3495 /* 3496 * Remember bit position 3497 */ 3498 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 3499 } 3500 3501 #pragma inline(ixgbe_map_txring_to_vector) 3502 /* 3503 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 3504 */ 3505 static void 3506 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 3507 { 3508 ixgbe->vect_map[v_idx].ixgbe = ixgbe; 3509 3510 /* 3511 * Set bit in map 3512 */ 3513 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 3514 3515 /* 3516 * Count bits set 3517 */ 3518 ixgbe->vect_map[v_idx].txr_cnt++; 3519 3520 /* 3521 * Remember bit position 3522 */ 3523 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 3524 } 3525 3526 /* 3527 * ixgbe_set_ivar - Set the given entry in the given interrupt vector 3528 * allocation register (IVAR). 3529 */ 3530 static void 3531 ixgbe_set_ivar(ixgbe_t *ixgbe, uint16_t int_alloc_entry, uint8_t msix_vector) 3532 { 3533 struct ixgbe_hw *hw = &ixgbe->hw; 3534 u32 ivar, index; 3535 3536 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 3537 index = (int_alloc_entry >> 2) & 0x1F; 3538 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3539 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3))); 3540 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3))); 3541 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 3542 } 3543 3544 /* 3545 * ixgbe_map_rings_to_vectors - Map descriptor rings to interrupt vectors. 3546 * 3547 * For msi-x, this currently implements only the scheme which is 3548 * 1 vector for tx + other, 1 vector for each rx ring. 3549 */ 3550 static int 3551 ixgbe_map_rings_to_vectors(ixgbe_t *ixgbe) 3552 { 3553 int i, vector = 0; 3554 int vect_remain = ixgbe->intr_cnt; 3555 3556 /* initialize vector map */ 3557 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 3558 3559 /* 3560 * non-MSI-X case is very simple: all interrupts on vector 0 3561 */ 3562 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 3563 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 3564 ixgbe_map_txring_to_vector(ixgbe, 0, 0); 3565 return (IXGBE_SUCCESS); 3566 } 3567 3568 /* 3569 * Ring/vector mapping for MSI-X 3570 */ 3571 3572 /* 3573 * Map vector 0 to tx 3574 */ 3575 ixgbe_map_txring_to_vector(ixgbe, 0, vector++); 3576 vect_remain--; 3577 3578 /* 3579 * Map remaining vectors to rx rings 3580 */ 3581 for (i = 0; i < vect_remain; i++) { 3582 ixgbe_map_rxring_to_vector(ixgbe, i, vector++); 3583 } 3584 3585 return (IXGBE_SUCCESS); 3586 } 3587 3588 /* 3589 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 3590 * 3591 * This relies on queue/vector mapping already set up in the 3592 * vect_map[] structures 3593 */ 3594 static void 3595 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 3596 { 3597 struct ixgbe_hw *hw = &ixgbe->hw; 3598 ixgbe_ring_vector_t *vect; /* vector bitmap */ 3599 int r_idx; /* ring index */ 3600 int v_idx; /* vector index */ 3601 3602 /* 3603 * Clear any previous entries 3604 */ 3605 for (v_idx = 0; v_idx < IXGBE_IVAR_REG_NUM; v_idx++) 3606 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 3607 3608 /* 3609 * "Other" is always on vector 0 3610 */ 3611 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0); 3612 3613 /* 3614 * For each interrupt vector, populate the IVAR table 3615 */ 3616 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 3617 vect = &ixgbe->vect_map[v_idx]; 3618 3619 /* 3620 * For each rx ring bit set 3621 */ 3622 r_idx = bt_getlowbit(vect->rx_map, 0, 3623 (ixgbe->num_rx_rings - 1)); 3624 3625 while (r_idx >= 0) { 3626 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_RX_QUEUE(r_idx), 3627 v_idx); 3628 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 3629 (ixgbe->num_rx_rings - 1)); 3630 } 3631 3632 /* 3633 * For each tx ring bit set 3634 */ 3635 r_idx = bt_getlowbit(vect->tx_map, 0, 3636 (ixgbe->num_tx_rings - 1)); 3637 3638 while (r_idx >= 0) { 3639 ixgbe_set_ivar(ixgbe, IXGBE_IVAR_TX_QUEUE(r_idx), 3640 v_idx); 3641 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 3642 (ixgbe->num_tx_rings - 1)); 3643 } 3644 } 3645 } 3646 3647 /* 3648 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 3649 */ 3650 static void 3651 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 3652 { 3653 int i; 3654 int rc; 3655 3656 for (i = 0; i < ixgbe->intr_cnt; i++) { 3657 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 3658 if (rc != DDI_SUCCESS) { 3659 IXGBE_DEBUGLOG_1(ixgbe, 3660 "Remove intr handler failed: %d", rc); 3661 } 3662 } 3663 } 3664 3665 /* 3666 * ixgbe_rem_intrs - Remove the allocated interrupts. 3667 */ 3668 static void 3669 ixgbe_rem_intrs(ixgbe_t *ixgbe) 3670 { 3671 int i; 3672 int rc; 3673 3674 for (i = 0; i < ixgbe->intr_cnt; i++) { 3675 rc = ddi_intr_free(ixgbe->htable[i]); 3676 if (rc != DDI_SUCCESS) { 3677 IXGBE_DEBUGLOG_1(ixgbe, 3678 "Free intr failed: %d", rc); 3679 } 3680 } 3681 3682 kmem_free(ixgbe->htable, ixgbe->intr_size); 3683 ixgbe->htable = NULL; 3684 } 3685 3686 /* 3687 * ixgbe_enable_intrs - Enable all the ddi interrupts. 3688 */ 3689 static int 3690 ixgbe_enable_intrs(ixgbe_t *ixgbe) 3691 { 3692 int i; 3693 int rc; 3694 3695 /* 3696 * Enable interrupts 3697 */ 3698 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 3699 /* 3700 * Call ddi_intr_block_enable() for MSI 3701 */ 3702 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 3703 if (rc != DDI_SUCCESS) { 3704 ixgbe_log(ixgbe, 3705 "Enable block intr failed: %d", rc); 3706 return (IXGBE_FAILURE); 3707 } 3708 } else { 3709 /* 3710 * Call ddi_intr_enable() for Legacy/MSI non block enable 3711 */ 3712 for (i = 0; i < ixgbe->intr_cnt; i++) { 3713 rc = ddi_intr_enable(ixgbe->htable[i]); 3714 if (rc != DDI_SUCCESS) { 3715 ixgbe_log(ixgbe, 3716 "Enable intr failed: %d", rc); 3717 return (IXGBE_FAILURE); 3718 } 3719 } 3720 } 3721 3722 return (IXGBE_SUCCESS); 3723 } 3724 3725 /* 3726 * ixgbe_disable_intrs - Disable all the interrupts. 3727 */ 3728 static int 3729 ixgbe_disable_intrs(ixgbe_t *ixgbe) 3730 { 3731 int i; 3732 int rc; 3733 3734 /* 3735 * Disable all interrupts 3736 */ 3737 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 3738 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 3739 if (rc != DDI_SUCCESS) { 3740 ixgbe_log(ixgbe, 3741 "Disable block intr failed: %d", rc); 3742 return (IXGBE_FAILURE); 3743 } 3744 } else { 3745 for (i = 0; i < ixgbe->intr_cnt; i++) { 3746 rc = ddi_intr_disable(ixgbe->htable[i]); 3747 if (rc != DDI_SUCCESS) { 3748 ixgbe_log(ixgbe, 3749 "Disable intr failed: %d", rc); 3750 return (IXGBE_FAILURE); 3751 } 3752 } 3753 } 3754 3755 return (IXGBE_SUCCESS); 3756 } 3757 3758 /* 3759 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 3760 */ 3761 static void 3762 ixgbe_get_hw_state(ixgbe_t *ixgbe) 3763 { 3764 struct ixgbe_hw *hw = &ixgbe->hw; 3765 uint32_t links; 3766 uint32_t pcs1g_anlp = 0; 3767 uint32_t pcs1g_ana = 0; 3768 3769 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3770 ixgbe->param_lp_1000fdx_cap = 0; 3771 ixgbe->param_lp_100fdx_cap = 0; 3772 3773 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 3774 if (links & IXGBE_LINKS_PCS_1G_EN) { 3775 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 3776 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 3777 3778 ixgbe->param_lp_1000fdx_cap = 3779 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 3780 ixgbe->param_lp_100fdx_cap = 3781 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 3782 } 3783 3784 ixgbe->param_1000fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 3785 ixgbe->param_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 3786 } 3787 3788 /* 3789 * ixgbe_get_driver_control - Notify that driver is in control of device. 3790 */ 3791 static void 3792 ixgbe_get_driver_control(struct ixgbe_hw *hw) 3793 { 3794 uint32_t ctrl_ext; 3795 3796 /* 3797 * Notify firmware that driver is in control of device 3798 */ 3799 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3800 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 3801 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3802 } 3803 3804 /* 3805 * ixgbe_release_driver_control - Notify that driver is no longer in control 3806 * of device. 3807 */ 3808 static void 3809 ixgbe_release_driver_control(struct ixgbe_hw *hw) 3810 { 3811 uint32_t ctrl_ext; 3812 3813 /* 3814 * Notify firmware that driver is no longer in control of device 3815 */ 3816 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 3817 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 3818 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 3819 } 3820 3821 /* 3822 * ixgbe_atomic_reserve - Atomic decrease operation. 3823 */ 3824 int 3825 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 3826 { 3827 uint32_t oldval; 3828 uint32_t newval; 3829 3830 /* 3831 * ATOMICALLY 3832 */ 3833 do { 3834 oldval = *count_p; 3835 if (oldval < n) 3836 return (-1); 3837 newval = oldval - n; 3838 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 3839 3840 return (newval); 3841 } 3842 3843 /* 3844 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 3845 */ 3846 static uint8_t * 3847 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 3848 { 3849 _NOTE(ARGUNUSED(hw)); 3850 _NOTE(ARGUNUSED(vmdq)); 3851 uint8_t *addr = *upd_ptr; 3852 uint8_t *new_ptr; 3853 3854 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 3855 *upd_ptr = new_ptr; 3856 return (addr); 3857 } 3858 3859 /* 3860 * FMA support 3861 */ 3862 int 3863 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 3864 { 3865 ddi_fm_error_t de; 3866 3867 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 3868 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 3869 return (de.fme_status); 3870 } 3871 3872 int 3873 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 3874 { 3875 ddi_fm_error_t de; 3876 3877 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 3878 return (de.fme_status); 3879 } 3880 3881 /* 3882 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 3883 */ 3884 static int 3885 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 3886 { 3887 _NOTE(ARGUNUSED(impl_data)); 3888 /* 3889 * as the driver can always deal with an error in any dma or 3890 * access handle, we can just return the fme_status value. 3891 */ 3892 pci_ereport_post(dip, err, NULL); 3893 return (err->fme_status); 3894 } 3895 3896 static void 3897 ixgbe_fm_init(ixgbe_t *ixgbe) 3898 { 3899 ddi_iblock_cookie_t iblk; 3900 int fma_acc_flag, fma_dma_flag; 3901 3902 /* 3903 * Only register with IO Fault Services if we have some capability 3904 */ 3905 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 3906 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 3907 fma_acc_flag = 1; 3908 } else { 3909 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 3910 fma_acc_flag = 0; 3911 } 3912 3913 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 3914 fma_dma_flag = 1; 3915 } else { 3916 fma_dma_flag = 0; 3917 } 3918 3919 ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag); 3920 3921 if (ixgbe->fm_capabilities) { 3922 3923 /* 3924 * Register capabilities with IO Fault Services 3925 */ 3926 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 3927 3928 /* 3929 * Initialize pci ereport capabilities if ereport capable 3930 */ 3931 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 3932 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3933 pci_ereport_setup(ixgbe->dip); 3934 3935 /* 3936 * Register error callback if error callback capable 3937 */ 3938 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3939 ddi_fm_handler_register(ixgbe->dip, 3940 ixgbe_fm_error_cb, (void*) ixgbe); 3941 } 3942 } 3943 3944 static void 3945 ixgbe_fm_fini(ixgbe_t *ixgbe) 3946 { 3947 /* 3948 * Only unregister FMA capabilities if they are registered 3949 */ 3950 if (ixgbe->fm_capabilities) { 3951 3952 /* 3953 * Release any resources allocated by pci_ereport_setup() 3954 */ 3955 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 3956 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3957 pci_ereport_teardown(ixgbe->dip); 3958 3959 /* 3960 * Un-register error callback if error callback capable 3961 */ 3962 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 3963 ddi_fm_handler_unregister(ixgbe->dip); 3964 3965 /* 3966 * Unregister from IO Fault Service 3967 */ 3968 ddi_fm_fini(ixgbe->dip); 3969 } 3970 } 3971 3972 void 3973 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 3974 { 3975 uint64_t ena; 3976 char buf[FM_MAX_CLASS]; 3977 3978 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 3979 ena = fm_ena_generate(0, FM_ENA_FMT1); 3980 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 3981 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 3982 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 3983 } 3984 } 3985