1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2009 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 25 * Use is subject to license terms. 26 */ 27 28 #include "ixgbe_sw.h" 29 30 static char ident[] = "Intel 10Gb Ethernet"; 31 static char ixgbe_version[] = "ixgbe 1.1.2"; 32 33 /* 34 * Local function protoypes 35 */ 36 static int ixgbe_register_mac(ixgbe_t *); 37 static int ixgbe_identify_hardware(ixgbe_t *); 38 static int ixgbe_regs_map(ixgbe_t *); 39 static void ixgbe_init_properties(ixgbe_t *); 40 static int ixgbe_init_driver_settings(ixgbe_t *); 41 static void ixgbe_init_locks(ixgbe_t *); 42 static void ixgbe_destroy_locks(ixgbe_t *); 43 static int ixgbe_init(ixgbe_t *); 44 static int ixgbe_chip_start(ixgbe_t *); 45 static void ixgbe_chip_stop(ixgbe_t *); 46 static int ixgbe_reset(ixgbe_t *); 47 static void ixgbe_tx_clean(ixgbe_t *); 48 static boolean_t ixgbe_tx_drain(ixgbe_t *); 49 static boolean_t ixgbe_rx_drain(ixgbe_t *); 50 static int ixgbe_alloc_rings(ixgbe_t *); 51 static void ixgbe_free_rings(ixgbe_t *); 52 static int ixgbe_alloc_rx_data(ixgbe_t *); 53 static void ixgbe_free_rx_data(ixgbe_t *); 54 static void ixgbe_setup_rings(ixgbe_t *); 55 static void ixgbe_setup_rx(ixgbe_t *); 56 static void ixgbe_setup_tx(ixgbe_t *); 57 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 58 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 59 static void ixgbe_setup_rss(ixgbe_t *); 60 static void ixgbe_init_unicst(ixgbe_t *); 61 static int ixgbe_unicst_set(ixgbe_t *, const uint8_t *, int); 62 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 63 static void ixgbe_setup_multicst(ixgbe_t *); 64 static void ixgbe_get_hw_state(ixgbe_t *); 65 static void ixgbe_get_conf(ixgbe_t *); 66 static void ixgbe_init_params(ixgbe_t *); 67 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 68 static void ixgbe_driver_link_check(void *); 69 static void ixgbe_sfp_check(void *); 70 static void ixgbe_local_timer(void *); 71 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 72 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 73 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 74 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 75 static boolean_t is_valid_mac_addr(uint8_t *); 76 static boolean_t ixgbe_stall_check(ixgbe_t *); 77 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 78 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 79 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 80 static int ixgbe_alloc_intrs(ixgbe_t *); 81 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 82 static int ixgbe_add_intr_handlers(ixgbe_t *); 83 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 84 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 85 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 86 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 87 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 88 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 89 static void ixgbe_setup_adapter_vector(ixgbe_t *); 90 static void ixgbe_rem_intr_handlers(ixgbe_t *); 91 static void ixgbe_rem_intrs(ixgbe_t *); 92 static int ixgbe_enable_intrs(ixgbe_t *); 93 static int ixgbe_disable_intrs(ixgbe_t *); 94 static uint_t ixgbe_intr_legacy(void *, void *); 95 static uint_t ixgbe_intr_msi(void *, void *); 96 static uint_t ixgbe_intr_msix(void *, void *); 97 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 98 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 99 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 100 static void ixgbe_get_driver_control(struct ixgbe_hw *); 101 static int ixgbe_addmac(void *, const uint8_t *); 102 static int ixgbe_remmac(void *, const uint8_t *); 103 static void ixgbe_release_driver_control(struct ixgbe_hw *); 104 105 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 106 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 107 static int ixgbe_resume(dev_info_t *); 108 static int ixgbe_suspend(dev_info_t *); 109 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 110 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 111 112 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 113 const void *impl_data); 114 static void ixgbe_fm_init(ixgbe_t *); 115 static void ixgbe_fm_fini(ixgbe_t *); 116 117 mac_priv_prop_t ixgbe_priv_props[] = { 118 {"_tx_copy_thresh", MAC_PROP_PERM_RW}, 119 {"_tx_recycle_thresh", MAC_PROP_PERM_RW}, 120 {"_tx_overload_thresh", MAC_PROP_PERM_RW}, 121 {"_tx_resched_thresh", MAC_PROP_PERM_RW}, 122 {"_rx_copy_thresh", MAC_PROP_PERM_RW}, 123 {"_rx_limit_per_intr", MAC_PROP_PERM_RW}, 124 {"_intr_throttling", MAC_PROP_PERM_RW}, 125 {"_adv_pause_cap", MAC_PROP_PERM_READ}, 126 {"_adv_asym_pause_cap", MAC_PROP_PERM_READ} 127 }; 128 129 #define IXGBE_MAX_PRIV_PROPS \ 130 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 131 132 static struct cb_ops ixgbe_cb_ops = { 133 nulldev, /* cb_open */ 134 nulldev, /* cb_close */ 135 nodev, /* cb_strategy */ 136 nodev, /* cb_print */ 137 nodev, /* cb_dump */ 138 nodev, /* cb_read */ 139 nodev, /* cb_write */ 140 nodev, /* cb_ioctl */ 141 nodev, /* cb_devmap */ 142 nodev, /* cb_mmap */ 143 nodev, /* cb_segmap */ 144 nochpoll, /* cb_chpoll */ 145 ddi_prop_op, /* cb_prop_op */ 146 NULL, /* cb_stream */ 147 D_MP | D_HOTPLUG, /* cb_flag */ 148 CB_REV, /* cb_rev */ 149 nodev, /* cb_aread */ 150 nodev /* cb_awrite */ 151 }; 152 153 static struct dev_ops ixgbe_dev_ops = { 154 DEVO_REV, /* devo_rev */ 155 0, /* devo_refcnt */ 156 NULL, /* devo_getinfo */ 157 nulldev, /* devo_identify */ 158 nulldev, /* devo_probe */ 159 ixgbe_attach, /* devo_attach */ 160 ixgbe_detach, /* devo_detach */ 161 nodev, /* devo_reset */ 162 &ixgbe_cb_ops, /* devo_cb_ops */ 163 NULL, /* devo_bus_ops */ 164 ddi_power, /* devo_power */ 165 ddi_quiesce_not_supported, /* devo_quiesce */ 166 }; 167 168 static struct modldrv ixgbe_modldrv = { 169 &mod_driverops, /* Type of module. This one is a driver */ 170 ident, /* Discription string */ 171 &ixgbe_dev_ops /* driver ops */ 172 }; 173 174 static struct modlinkage ixgbe_modlinkage = { 175 MODREV_1, &ixgbe_modldrv, NULL 176 }; 177 178 /* 179 * Access attributes for register mapping 180 */ 181 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 182 DDI_DEVICE_ATTR_V0, 183 DDI_STRUCTURE_LE_ACC, 184 DDI_STRICTORDER_ACC, 185 DDI_FLAGERR_ACC 186 }; 187 188 /* 189 * Loopback property 190 */ 191 static lb_property_t lb_normal = { 192 normal, "normal", IXGBE_LB_NONE 193 }; 194 195 static lb_property_t lb_mac = { 196 internal, "MAC", IXGBE_LB_INTERNAL_MAC 197 }; 198 199 static lb_property_t lb_external = { 200 external, "External", IXGBE_LB_EXTERNAL 201 }; 202 203 #define IXGBE_M_CALLBACK_FLAGS \ 204 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP) 205 206 static mac_callbacks_t ixgbe_m_callbacks = { 207 IXGBE_M_CALLBACK_FLAGS, 208 ixgbe_m_stat, 209 ixgbe_m_start, 210 ixgbe_m_stop, 211 ixgbe_m_promisc, 212 ixgbe_m_multicst, 213 NULL, 214 NULL, 215 ixgbe_m_ioctl, 216 ixgbe_m_getcapab, 217 NULL, 218 NULL, 219 ixgbe_m_setprop, 220 ixgbe_m_getprop 221 }; 222 223 /* 224 * Initialize capabilities of each supported adapter type 225 */ 226 static adapter_info_t ixgbe_82598eb_cap = { 227 64, /* maximum number of rx queues */ 228 1, /* minimum number of rx queues */ 229 8, /* default number of rx queues */ 230 32, /* maximum number of tx queues */ 231 1, /* minimum number of tx queues */ 232 8, /* default number of tx queues */ 233 16366, /* maximum MTU size */ 234 0xFFFF, /* maximum interrupt throttle rate */ 235 0, /* minimum interrupt throttle rate */ 236 200, /* default interrupt throttle rate */ 237 18, /* maximum total msix vectors */ 238 16, /* maximum number of ring vectors */ 239 2, /* maximum number of other vectors */ 240 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 241 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 242 | IXGBE_FLAG_RSS_CAPABLE 243 | IXGBE_FLAG_VMDQ_CAPABLE) 244 }; 245 246 static adapter_info_t ixgbe_82599eb_cap = { 247 128, /* maximum number of rx queues */ 248 1, /* minimum number of rx queues */ 249 8, /* default number of rx queues */ 250 128, /* maximum number of tx queues */ 251 1, /* minimum number of tx queues */ 252 8, /* default number of tx queues */ 253 15500, /* maximum MTU size */ 254 0xFF8, /* maximum interrupt throttle rate */ 255 0, /* minimum interrupt throttle rate */ 256 200, /* default interrupt throttle rate */ 257 64, /* maximum total msix vectors */ 258 16, /* maximum number of ring vectors */ 259 2, /* maximum number of other vectors */ 260 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 261 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 262 | IXGBE_FLAG_RSS_CAPABLE 263 | IXGBE_FLAG_VMDQ_CAPABLE) 264 }; 265 266 /* 267 * Module Initialization Functions. 268 */ 269 270 int 271 _init(void) 272 { 273 int status; 274 275 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 276 277 status = mod_install(&ixgbe_modlinkage); 278 279 if (status != DDI_SUCCESS) { 280 mac_fini_ops(&ixgbe_dev_ops); 281 } 282 283 return (status); 284 } 285 286 int 287 _fini(void) 288 { 289 int status; 290 291 status = mod_remove(&ixgbe_modlinkage); 292 293 if (status == DDI_SUCCESS) { 294 mac_fini_ops(&ixgbe_dev_ops); 295 } 296 297 return (status); 298 } 299 300 int 301 _info(struct modinfo *modinfop) 302 { 303 int status; 304 305 status = mod_info(&ixgbe_modlinkage, modinfop); 306 307 return (status); 308 } 309 310 /* 311 * ixgbe_attach - Driver attach. 312 * 313 * This function is the device specific initialization entry 314 * point. This entry point is required and must be written. 315 * The DDI_ATTACH command must be provided in the attach entry 316 * point. When attach() is called with cmd set to DDI_ATTACH, 317 * all normal kernel services (such as kmem_alloc(9F)) are 318 * available for use by the driver. 319 * 320 * The attach() function will be called once for each instance 321 * of the device on the system with cmd set to DDI_ATTACH. 322 * Until attach() succeeds, the only driver entry points which 323 * may be called are open(9E) and getinfo(9E). 324 */ 325 static int 326 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 327 { 328 ixgbe_t *ixgbe; 329 struct ixgbe_osdep *osdep; 330 struct ixgbe_hw *hw; 331 int instance; 332 char taskqname[32]; 333 334 /* 335 * Check the command and perform corresponding operations 336 */ 337 switch (cmd) { 338 default: 339 return (DDI_FAILURE); 340 341 case DDI_RESUME: 342 return (ixgbe_resume(devinfo)); 343 344 case DDI_ATTACH: 345 break; 346 } 347 348 /* Get the device instance */ 349 instance = ddi_get_instance(devinfo); 350 351 /* Allocate memory for the instance data structure */ 352 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 353 354 ixgbe->dip = devinfo; 355 ixgbe->instance = instance; 356 357 hw = &ixgbe->hw; 358 osdep = &ixgbe->osdep; 359 hw->back = osdep; 360 osdep->ixgbe = ixgbe; 361 362 /* Attach the instance pointer to the dev_info data structure */ 363 ddi_set_driver_private(devinfo, ixgbe); 364 365 /* 366 * Initialize for fma support 367 */ 368 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 369 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 370 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 371 ixgbe_fm_init(ixgbe); 372 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 373 374 /* 375 * Map PCI config space registers 376 */ 377 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 378 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 379 goto attach_fail; 380 } 381 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 382 383 /* 384 * Identify the chipset family 385 */ 386 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 387 ixgbe_error(ixgbe, "Failed to identify hardware"); 388 goto attach_fail; 389 } 390 391 /* 392 * Map device registers 393 */ 394 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 395 ixgbe_error(ixgbe, "Failed to map device registers"); 396 goto attach_fail; 397 } 398 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 399 400 /* 401 * Initialize driver parameters 402 */ 403 ixgbe_init_properties(ixgbe); 404 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 405 406 /* 407 * Allocate interrupts 408 */ 409 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 410 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 411 goto attach_fail; 412 } 413 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 414 415 /* 416 * Allocate rx/tx rings based on the ring numbers. 417 * The actual numbers of rx/tx rings are decided by the number of 418 * allocated interrupt vectors, so we should allocate the rings after 419 * interrupts are allocated. 420 */ 421 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 422 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 423 goto attach_fail; 424 } 425 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 426 427 /* 428 * Map rings to interrupt vectors 429 */ 430 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 431 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 432 goto attach_fail; 433 } 434 435 /* 436 * Add interrupt handlers 437 */ 438 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 439 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 440 goto attach_fail; 441 } 442 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 443 444 /* 445 * Create a taskq for link-status-change 446 */ 447 (void) sprintf(taskqname, "ixgbe%d_taskq", instance); 448 if ((ixgbe->lsc_taskq = ddi_taskq_create(devinfo, taskqname, 449 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 450 ixgbe_error(ixgbe, "taskq_create failed"); 451 goto attach_fail; 452 } 453 ixgbe->attach_progress |= ATTACH_PROGRESS_LSC_TASKQ; 454 455 /* 456 * Initialize driver parameters 457 */ 458 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 459 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 460 goto attach_fail; 461 } 462 463 /* 464 * Initialize mutexes for this device. 465 * Do this before enabling the interrupt handler and 466 * register the softint to avoid the condition where 467 * interrupt handler can try using uninitialized mutex. 468 */ 469 ixgbe_init_locks(ixgbe); 470 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 471 472 /* 473 * Initialize chipset hardware 474 */ 475 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 476 ixgbe_error(ixgbe, "Failed to initialize adapter"); 477 goto attach_fail; 478 } 479 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 480 481 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 482 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 483 goto attach_fail; 484 } 485 486 /* 487 * Initialize statistics 488 */ 489 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 490 ixgbe_error(ixgbe, "Failed to initialize statistics"); 491 goto attach_fail; 492 } 493 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 494 495 /* 496 * Register the driver to the MAC 497 */ 498 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 499 ixgbe_error(ixgbe, "Failed to register MAC"); 500 goto attach_fail; 501 } 502 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 503 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 504 505 /* 506 * Now that mutex locks are initialized, and the chip is also 507 * initialized, enable interrupts. 508 */ 509 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 510 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 511 goto attach_fail; 512 } 513 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 514 515 ixgbe_log(ixgbe, "%s", ixgbe_version); 516 ixgbe->ixgbe_state |= IXGBE_INITIALIZED; 517 518 return (DDI_SUCCESS); 519 520 attach_fail: 521 ixgbe_unconfigure(devinfo, ixgbe); 522 return (DDI_FAILURE); 523 } 524 525 /* 526 * ixgbe_detach - Driver detach. 527 * 528 * The detach() function is the complement of the attach routine. 529 * If cmd is set to DDI_DETACH, detach() is used to remove the 530 * state associated with a given instance of a device node 531 * prior to the removal of that instance from the system. 532 * 533 * The detach() function will be called once for each instance 534 * of the device for which there has been a successful attach() 535 * once there are no longer any opens on the device. 536 * 537 * Interrupts routine are disabled, All memory allocated by this 538 * driver are freed. 539 */ 540 static int 541 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 542 { 543 ixgbe_t *ixgbe; 544 545 /* 546 * Check detach command 547 */ 548 switch (cmd) { 549 default: 550 return (DDI_FAILURE); 551 552 case DDI_SUSPEND: 553 return (ixgbe_suspend(devinfo)); 554 555 case DDI_DETACH: 556 break; 557 } 558 559 560 /* 561 * Get the pointer to the driver private data structure 562 */ 563 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 564 if (ixgbe == NULL) 565 return (DDI_FAILURE); 566 567 /* 568 * Unregister MAC. If failed, we have to fail the detach 569 */ 570 if (mac_unregister(ixgbe->mac_hdl) != 0) { 571 ixgbe_error(ixgbe, "Failed to unregister MAC"); 572 return (DDI_FAILURE); 573 } 574 ixgbe->attach_progress &= ~ATTACH_PROGRESS_MAC; 575 576 /* 577 * If the device is still running, it needs to be stopped first. 578 * This check is necessary because under some specific circumstances, 579 * the detach routine can be called without stopping the interface 580 * first. 581 */ 582 mutex_enter(&ixgbe->gen_lock); 583 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 584 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 585 ixgbe_stop(ixgbe, B_TRUE); 586 mutex_exit(&ixgbe->gen_lock); 587 /* Disable and stop the watchdog timer */ 588 ixgbe_disable_watchdog_timer(ixgbe); 589 } else 590 mutex_exit(&ixgbe->gen_lock); 591 592 /* 593 * Check if there are still rx buffers held by the upper layer. 594 * If so, fail the detach. 595 */ 596 if (!ixgbe_rx_drain(ixgbe)) 597 return (DDI_FAILURE); 598 599 /* 600 * Do the remaining unconfigure routines 601 */ 602 ixgbe_unconfigure(devinfo, ixgbe); 603 604 return (DDI_SUCCESS); 605 } 606 607 static void 608 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 609 { 610 /* 611 * Disable interrupt 612 */ 613 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 614 (void) ixgbe_disable_intrs(ixgbe); 615 } 616 617 /* 618 * Unregister MAC 619 */ 620 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 621 (void) mac_unregister(ixgbe->mac_hdl); 622 } 623 624 /* 625 * Free statistics 626 */ 627 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 628 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 629 } 630 631 /* 632 * Remove interrupt handlers 633 */ 634 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 635 ixgbe_rem_intr_handlers(ixgbe); 636 } 637 638 /* 639 * Remove taskq for link-status-change 640 */ 641 if (ixgbe->attach_progress & ATTACH_PROGRESS_LSC_TASKQ) { 642 ddi_taskq_destroy(ixgbe->lsc_taskq); 643 } 644 645 /* 646 * Remove interrupts 647 */ 648 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 649 ixgbe_rem_intrs(ixgbe); 650 } 651 652 /* 653 * Remove driver properties 654 */ 655 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 656 (void) ddi_prop_remove_all(devinfo); 657 } 658 659 /* 660 * Stop the chipset 661 */ 662 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 663 mutex_enter(&ixgbe->gen_lock); 664 ixgbe_chip_stop(ixgbe); 665 mutex_exit(&ixgbe->gen_lock); 666 } 667 668 /* 669 * Free register handle 670 */ 671 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 672 if (ixgbe->osdep.reg_handle != NULL) 673 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 674 } 675 676 /* 677 * Free PCI config handle 678 */ 679 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 680 if (ixgbe->osdep.cfg_handle != NULL) 681 pci_config_teardown(&ixgbe->osdep.cfg_handle); 682 } 683 684 /* 685 * Free locks 686 */ 687 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 688 ixgbe_destroy_locks(ixgbe); 689 } 690 691 /* 692 * Free the rx/tx rings 693 */ 694 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 695 ixgbe_free_rings(ixgbe); 696 } 697 698 /* 699 * Unregister FMA capabilities 700 */ 701 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 702 ixgbe_fm_fini(ixgbe); 703 } 704 705 /* 706 * Free the driver data structure 707 */ 708 kmem_free(ixgbe, sizeof (ixgbe_t)); 709 710 ddi_set_driver_private(devinfo, NULL); 711 } 712 713 /* 714 * ixgbe_register_mac - Register the driver and its function pointers with 715 * the GLD interface. 716 */ 717 static int 718 ixgbe_register_mac(ixgbe_t *ixgbe) 719 { 720 struct ixgbe_hw *hw = &ixgbe->hw; 721 mac_register_t *mac; 722 int status; 723 724 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 725 return (IXGBE_FAILURE); 726 727 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 728 mac->m_driver = ixgbe; 729 mac->m_dip = ixgbe->dip; 730 mac->m_src_addr = hw->mac.addr; 731 mac->m_callbacks = &ixgbe_m_callbacks; 732 mac->m_min_sdu = 0; 733 mac->m_max_sdu = ixgbe->default_mtu; 734 mac->m_margin = VLAN_TAGSZ; 735 mac->m_priv_props = ixgbe_priv_props; 736 mac->m_priv_prop_count = IXGBE_MAX_PRIV_PROPS; 737 mac->m_v12n = MAC_VIRT_LEVEL1; 738 739 status = mac_register(mac, &ixgbe->mac_hdl); 740 741 mac_free(mac); 742 743 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 744 } 745 746 /* 747 * ixgbe_identify_hardware - Identify the type of the chipset. 748 */ 749 static int 750 ixgbe_identify_hardware(ixgbe_t *ixgbe) 751 { 752 struct ixgbe_hw *hw = &ixgbe->hw; 753 struct ixgbe_osdep *osdep = &ixgbe->osdep; 754 755 /* 756 * Get the device id 757 */ 758 hw->vendor_id = 759 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 760 hw->device_id = 761 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 762 hw->revision_id = 763 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 764 hw->subsystem_device_id = 765 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 766 hw->subsystem_vendor_id = 767 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 768 769 /* 770 * Set the mac type of the adapter based on the device id 771 */ 772 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 773 return (IXGBE_FAILURE); 774 } 775 776 /* 777 * Install adapter capabilities 778 */ 779 switch (hw->mac.type) { 780 case ixgbe_mac_82598EB: 781 ixgbe_log(ixgbe, "identify 82598 adapter\n"); 782 ixgbe->capab = &ixgbe_82598eb_cap; 783 784 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 785 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 786 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 787 } 788 ixgbe->capab->other_intr |= IXGBE_EICR_LSC; 789 790 break; 791 case ixgbe_mac_82599EB: 792 ixgbe_log(ixgbe, "identify 82599 adapter\n"); 793 ixgbe->capab = &ixgbe_82599eb_cap; 794 795 ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 | 796 IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC); 797 798 break; 799 default: 800 ixgbe_log(ixgbe, 801 "adapter not supported in ixgbe_identify_hardware(): %d\n", 802 hw->mac.type); 803 return (IXGBE_FAILURE); 804 } 805 806 return (IXGBE_SUCCESS); 807 } 808 809 /* 810 * ixgbe_regs_map - Map the device registers. 811 * 812 */ 813 static int 814 ixgbe_regs_map(ixgbe_t *ixgbe) 815 { 816 dev_info_t *devinfo = ixgbe->dip; 817 struct ixgbe_hw *hw = &ixgbe->hw; 818 struct ixgbe_osdep *osdep = &ixgbe->osdep; 819 off_t mem_size; 820 821 /* 822 * First get the size of device registers to be mapped. 823 */ 824 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 825 != DDI_SUCCESS) { 826 return (IXGBE_FAILURE); 827 } 828 829 /* 830 * Call ddi_regs_map_setup() to map registers 831 */ 832 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 833 (caddr_t *)&hw->hw_addr, 0, 834 mem_size, &ixgbe_regs_acc_attr, 835 &osdep->reg_handle)) != DDI_SUCCESS) { 836 return (IXGBE_FAILURE); 837 } 838 839 return (IXGBE_SUCCESS); 840 } 841 842 /* 843 * ixgbe_init_properties - Initialize driver properties. 844 */ 845 static void 846 ixgbe_init_properties(ixgbe_t *ixgbe) 847 { 848 /* 849 * Get conf file properties, including link settings 850 * jumbo frames, ring number, descriptor number, etc. 851 */ 852 ixgbe_get_conf(ixgbe); 853 854 ixgbe_init_params(ixgbe); 855 } 856 857 /* 858 * ixgbe_init_driver_settings - Initialize driver settings. 859 * 860 * The settings include hardware function pointers, bus information, 861 * rx/tx rings settings, link state, and any other parameters that 862 * need to be setup during driver initialization. 863 */ 864 static int 865 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 866 { 867 struct ixgbe_hw *hw = &ixgbe->hw; 868 dev_info_t *devinfo = ixgbe->dip; 869 ixgbe_rx_ring_t *rx_ring; 870 ixgbe_tx_ring_t *tx_ring; 871 uint32_t rx_size; 872 uint32_t tx_size; 873 int i; 874 875 /* 876 * Initialize chipset specific hardware function pointers 877 */ 878 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 879 return (IXGBE_FAILURE); 880 } 881 882 /* 883 * Get the system page size 884 */ 885 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 886 887 /* 888 * Set rx buffer size 889 * 890 * The IP header alignment room is counted in the calculation. 891 * The rx buffer size is in unit of 1K that is required by the 892 * chipset hardware. 893 */ 894 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 895 ixgbe->rx_buf_size = ((rx_size >> 10) + 896 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 897 898 /* 899 * Set tx buffer size 900 */ 901 tx_size = ixgbe->max_frame_size; 902 ixgbe->tx_buf_size = ((tx_size >> 10) + 903 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 904 905 /* 906 * Initialize rx/tx rings parameters 907 */ 908 for (i = 0; i < ixgbe->num_rx_rings; i++) { 909 rx_ring = &ixgbe->rx_rings[i]; 910 rx_ring->index = i; 911 rx_ring->ixgbe = ixgbe; 912 } 913 914 for (i = 0; i < ixgbe->num_tx_rings; i++) { 915 tx_ring = &ixgbe->tx_rings[i]; 916 tx_ring->index = i; 917 tx_ring->ixgbe = ixgbe; 918 if (ixgbe->tx_head_wb_enable) 919 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 920 else 921 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 922 923 tx_ring->ring_size = ixgbe->tx_ring_size; 924 tx_ring->free_list_size = ixgbe->tx_ring_size + 925 (ixgbe->tx_ring_size >> 1); 926 } 927 928 /* 929 * Initialize values of interrupt throttling rate 930 */ 931 for (i = 1; i < MAX_INTR_VECTOR; i++) 932 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 933 934 /* 935 * The initial link state should be "unknown" 936 */ 937 ixgbe->link_state = LINK_STATE_UNKNOWN; 938 939 return (IXGBE_SUCCESS); 940 } 941 942 /* 943 * ixgbe_init_locks - Initialize locks. 944 */ 945 static void 946 ixgbe_init_locks(ixgbe_t *ixgbe) 947 { 948 ixgbe_rx_ring_t *rx_ring; 949 ixgbe_tx_ring_t *tx_ring; 950 int i; 951 952 for (i = 0; i < ixgbe->num_rx_rings; i++) { 953 rx_ring = &ixgbe->rx_rings[i]; 954 mutex_init(&rx_ring->rx_lock, NULL, 955 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 956 } 957 958 for (i = 0; i < ixgbe->num_tx_rings; i++) { 959 tx_ring = &ixgbe->tx_rings[i]; 960 mutex_init(&tx_ring->tx_lock, NULL, 961 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 962 mutex_init(&tx_ring->recycle_lock, NULL, 963 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 964 mutex_init(&tx_ring->tcb_head_lock, NULL, 965 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 966 mutex_init(&tx_ring->tcb_tail_lock, NULL, 967 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 968 } 969 970 mutex_init(&ixgbe->gen_lock, NULL, 971 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 972 973 mutex_init(&ixgbe->watchdog_lock, NULL, 974 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 975 } 976 977 /* 978 * ixgbe_destroy_locks - Destroy locks. 979 */ 980 static void 981 ixgbe_destroy_locks(ixgbe_t *ixgbe) 982 { 983 ixgbe_rx_ring_t *rx_ring; 984 ixgbe_tx_ring_t *tx_ring; 985 int i; 986 987 for (i = 0; i < ixgbe->num_rx_rings; i++) { 988 rx_ring = &ixgbe->rx_rings[i]; 989 mutex_destroy(&rx_ring->rx_lock); 990 } 991 992 for (i = 0; i < ixgbe->num_tx_rings; i++) { 993 tx_ring = &ixgbe->tx_rings[i]; 994 mutex_destroy(&tx_ring->tx_lock); 995 mutex_destroy(&tx_ring->recycle_lock); 996 mutex_destroy(&tx_ring->tcb_head_lock); 997 mutex_destroy(&tx_ring->tcb_tail_lock); 998 } 999 1000 mutex_destroy(&ixgbe->gen_lock); 1001 mutex_destroy(&ixgbe->watchdog_lock); 1002 } 1003 1004 static int 1005 ixgbe_resume(dev_info_t *devinfo) 1006 { 1007 ixgbe_t *ixgbe; 1008 1009 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1010 if (ixgbe == NULL) 1011 return (DDI_FAILURE); 1012 1013 mutex_enter(&ixgbe->gen_lock); 1014 1015 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1016 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1017 mutex_exit(&ixgbe->gen_lock); 1018 return (DDI_FAILURE); 1019 } 1020 1021 /* 1022 * Enable and start the watchdog timer 1023 */ 1024 ixgbe_enable_watchdog_timer(ixgbe); 1025 } 1026 1027 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 1028 1029 mutex_exit(&ixgbe->gen_lock); 1030 1031 return (DDI_SUCCESS); 1032 } 1033 1034 static int 1035 ixgbe_suspend(dev_info_t *devinfo) 1036 { 1037 ixgbe_t *ixgbe; 1038 1039 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1040 if (ixgbe == NULL) 1041 return (DDI_FAILURE); 1042 1043 mutex_enter(&ixgbe->gen_lock); 1044 1045 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 1046 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1047 mutex_exit(&ixgbe->gen_lock); 1048 return (DDI_SUCCESS); 1049 } 1050 ixgbe_stop(ixgbe, B_FALSE); 1051 1052 mutex_exit(&ixgbe->gen_lock); 1053 1054 /* 1055 * Disable and stop the watchdog timer 1056 */ 1057 ixgbe_disable_watchdog_timer(ixgbe); 1058 1059 return (DDI_SUCCESS); 1060 } 1061 1062 /* 1063 * ixgbe_init - Initialize the device. 1064 */ 1065 static int 1066 ixgbe_init(ixgbe_t *ixgbe) 1067 { 1068 struct ixgbe_hw *hw = &ixgbe->hw; 1069 1070 mutex_enter(&ixgbe->gen_lock); 1071 1072 /* 1073 * Reset chipset to put the hardware in a known state 1074 * before we try to do anything with the eeprom. 1075 */ 1076 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) { 1077 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1078 goto init_fail; 1079 } 1080 1081 /* 1082 * Need to init eeprom before validating the checksum. 1083 */ 1084 if (ixgbe_init_eeprom_params(hw) < 0) { 1085 ixgbe_error(ixgbe, 1086 "Unable to intitialize the eeprom interface."); 1087 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1088 goto init_fail; 1089 } 1090 1091 /* 1092 * NVM validation 1093 */ 1094 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1095 /* 1096 * Some PCI-E parts fail the first check due to 1097 * the link being in sleep state. Call it again, 1098 * if it fails a second time it's a real issue. 1099 */ 1100 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1101 ixgbe_error(ixgbe, 1102 "Invalid NVM checksum. Please contact " 1103 "the vendor to update the NVM."); 1104 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1105 goto init_fail; 1106 } 1107 } 1108 1109 /* 1110 * Setup default flow control thresholds - enable/disable 1111 * & flow control type is controlled by ixgbe.conf 1112 */ 1113 hw->fc.high_water = DEFAULT_FCRTH; 1114 hw->fc.low_water = DEFAULT_FCRTL; 1115 hw->fc.pause_time = DEFAULT_FCPAUSE; 1116 hw->fc.send_xon = B_TRUE; 1117 1118 /* 1119 * Initialize link settings 1120 */ 1121 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1122 1123 /* 1124 * Initialize the chipset hardware 1125 */ 1126 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1127 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1128 goto init_fail; 1129 } 1130 1131 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 1132 goto init_fail; 1133 } 1134 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1135 goto init_fail; 1136 } 1137 1138 mutex_exit(&ixgbe->gen_lock); 1139 return (IXGBE_SUCCESS); 1140 1141 init_fail: 1142 /* 1143 * Reset PHY 1144 */ 1145 (void) ixgbe_reset_phy(hw); 1146 1147 mutex_exit(&ixgbe->gen_lock); 1148 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1149 return (IXGBE_FAILURE); 1150 } 1151 1152 /* 1153 * ixgbe_chip_start - Initialize and start the chipset hardware. 1154 */ 1155 static int 1156 ixgbe_chip_start(ixgbe_t *ixgbe) 1157 { 1158 struct ixgbe_hw *hw = &ixgbe->hw; 1159 int ret_val, i; 1160 1161 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1162 1163 /* 1164 * Get the mac address 1165 * This function should handle SPARC case correctly. 1166 */ 1167 if (!ixgbe_find_mac_address(ixgbe)) { 1168 ixgbe_error(ixgbe, "Failed to get the mac address"); 1169 return (IXGBE_FAILURE); 1170 } 1171 1172 /* 1173 * Validate the mac address 1174 */ 1175 (void) ixgbe_init_rx_addrs(hw); 1176 if (!is_valid_mac_addr(hw->mac.addr)) { 1177 ixgbe_error(ixgbe, "Invalid mac address"); 1178 return (IXGBE_FAILURE); 1179 } 1180 1181 /* 1182 * Configure/Initialize hardware 1183 */ 1184 ret_val = ixgbe_init_hw(hw); 1185 if (ret_val != IXGBE_SUCCESS) { 1186 if (ret_val == IXGBE_ERR_EEPROM_VERSION) { 1187 ixgbe_error(ixgbe, 1188 "This 82599 device is pre-release and contains" 1189 " outdated firmware, please contact your hardware" 1190 " vendor for a replacement."); 1191 } else { 1192 ixgbe_error(ixgbe, "Failed to initialize hardware"); 1193 return (IXGBE_FAILURE); 1194 } 1195 } 1196 1197 /* 1198 * Setup adapter interrupt vectors 1199 */ 1200 ixgbe_setup_adapter_vector(ixgbe); 1201 1202 /* 1203 * Initialize unicast addresses. 1204 */ 1205 ixgbe_init_unicst(ixgbe); 1206 1207 /* 1208 * Setup and initialize the mctable structures. 1209 */ 1210 ixgbe_setup_multicst(ixgbe); 1211 1212 /* 1213 * Set interrupt throttling rate 1214 */ 1215 for (i = 0; i < ixgbe->intr_cnt; i++) { 1216 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1217 } 1218 1219 /* 1220 * Save the state of the phy 1221 */ 1222 ixgbe_get_hw_state(ixgbe); 1223 1224 /* 1225 * Make sure driver has control 1226 */ 1227 ixgbe_get_driver_control(hw); 1228 1229 return (IXGBE_SUCCESS); 1230 } 1231 1232 /* 1233 * ixgbe_chip_stop - Stop the chipset hardware 1234 */ 1235 static void 1236 ixgbe_chip_stop(ixgbe_t *ixgbe) 1237 { 1238 struct ixgbe_hw *hw = &ixgbe->hw; 1239 1240 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1241 1242 /* 1243 * Tell firmware driver is no longer in control 1244 */ 1245 ixgbe_release_driver_control(hw); 1246 1247 /* 1248 * Reset the chipset 1249 */ 1250 (void) ixgbe_reset_hw(hw); 1251 1252 /* 1253 * Reset PHY 1254 */ 1255 (void) ixgbe_reset_phy(hw); 1256 } 1257 1258 /* 1259 * ixgbe_reset - Reset the chipset and re-start the driver. 1260 * 1261 * It involves stopping and re-starting the chipset, 1262 * and re-configuring the rx/tx rings. 1263 */ 1264 static int 1265 ixgbe_reset(ixgbe_t *ixgbe) 1266 { 1267 /* 1268 * Disable and stop the watchdog timer 1269 */ 1270 ixgbe_disable_watchdog_timer(ixgbe); 1271 1272 mutex_enter(&ixgbe->gen_lock); 1273 1274 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1275 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 1276 1277 ixgbe_stop(ixgbe, B_FALSE); 1278 1279 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1280 mutex_exit(&ixgbe->gen_lock); 1281 return (IXGBE_FAILURE); 1282 } 1283 1284 ixgbe->ixgbe_state |= IXGBE_STARTED; 1285 mutex_exit(&ixgbe->gen_lock); 1286 1287 /* 1288 * Enable and start the watchdog timer 1289 */ 1290 ixgbe_enable_watchdog_timer(ixgbe); 1291 1292 return (IXGBE_SUCCESS); 1293 } 1294 1295 /* 1296 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1297 */ 1298 static void 1299 ixgbe_tx_clean(ixgbe_t *ixgbe) 1300 { 1301 ixgbe_tx_ring_t *tx_ring; 1302 tx_control_block_t *tcb; 1303 link_list_t pending_list; 1304 uint32_t desc_num; 1305 int i, j; 1306 1307 LINK_LIST_INIT(&pending_list); 1308 1309 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1310 tx_ring = &ixgbe->tx_rings[i]; 1311 1312 mutex_enter(&tx_ring->recycle_lock); 1313 1314 /* 1315 * Clean the pending tx data - the pending packets in the 1316 * work_list that have no chances to be transmitted again. 1317 * 1318 * We must ensure the chipset is stopped or the link is down 1319 * before cleaning the transmit packets. 1320 */ 1321 desc_num = 0; 1322 for (j = 0; j < tx_ring->ring_size; j++) { 1323 tcb = tx_ring->work_list[j]; 1324 if (tcb != NULL) { 1325 desc_num += tcb->desc_num; 1326 1327 tx_ring->work_list[j] = NULL; 1328 1329 ixgbe_free_tcb(tcb); 1330 1331 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1332 } 1333 } 1334 1335 if (desc_num > 0) { 1336 atomic_add_32(&tx_ring->tbd_free, desc_num); 1337 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1338 1339 /* 1340 * Reset the head and tail pointers of the tbd ring; 1341 * Reset the writeback head if it's enable. 1342 */ 1343 tx_ring->tbd_head = 0; 1344 tx_ring->tbd_tail = 0; 1345 if (ixgbe->tx_head_wb_enable) 1346 *tx_ring->tbd_head_wb = 0; 1347 1348 IXGBE_WRITE_REG(&ixgbe->hw, 1349 IXGBE_TDH(tx_ring->index), 0); 1350 IXGBE_WRITE_REG(&ixgbe->hw, 1351 IXGBE_TDT(tx_ring->index), 0); 1352 } 1353 1354 mutex_exit(&tx_ring->recycle_lock); 1355 1356 /* 1357 * Add the tx control blocks in the pending list to 1358 * the free list. 1359 */ 1360 ixgbe_put_free_list(tx_ring, &pending_list); 1361 } 1362 } 1363 1364 /* 1365 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1366 * transmitted. 1367 */ 1368 static boolean_t 1369 ixgbe_tx_drain(ixgbe_t *ixgbe) 1370 { 1371 ixgbe_tx_ring_t *tx_ring; 1372 boolean_t done; 1373 int i, j; 1374 1375 /* 1376 * Wait for a specific time to allow pending tx packets 1377 * to be transmitted. 1378 * 1379 * Check the counter tbd_free to see if transmission is done. 1380 * No lock protection is needed here. 1381 * 1382 * Return B_TRUE if all pending packets have been transmitted; 1383 * Otherwise return B_FALSE; 1384 */ 1385 for (i = 0; i < TX_DRAIN_TIME; i++) { 1386 1387 done = B_TRUE; 1388 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1389 tx_ring = &ixgbe->tx_rings[j]; 1390 done = done && 1391 (tx_ring->tbd_free == tx_ring->ring_size); 1392 } 1393 1394 if (done) 1395 break; 1396 1397 msec_delay(1); 1398 } 1399 1400 return (done); 1401 } 1402 1403 /* 1404 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1405 */ 1406 static boolean_t 1407 ixgbe_rx_drain(ixgbe_t *ixgbe) 1408 { 1409 boolean_t done = B_TRUE; 1410 int i; 1411 1412 /* 1413 * Polling the rx free list to check if those rx buffers held by 1414 * the upper layer are released. 1415 * 1416 * Check the counter rcb_free to see if all pending buffers are 1417 * released. No lock protection is needed here. 1418 * 1419 * Return B_TRUE if all pending buffers have been released; 1420 * Otherwise return B_FALSE; 1421 */ 1422 for (i = 0; i < RX_DRAIN_TIME; i++) { 1423 done = (ixgbe->rcb_pending == 0); 1424 1425 if (done) 1426 break; 1427 1428 msec_delay(1); 1429 } 1430 1431 return (done); 1432 } 1433 1434 /* 1435 * ixgbe_start - Start the driver/chipset. 1436 */ 1437 int 1438 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1439 { 1440 int i; 1441 1442 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1443 1444 if (alloc_buffer) { 1445 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1446 ixgbe_error(ixgbe, 1447 "Failed to allocate software receive rings"); 1448 return (IXGBE_FAILURE); 1449 } 1450 1451 /* Allocate buffers for all the rx/tx rings */ 1452 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1453 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1454 return (IXGBE_FAILURE); 1455 } 1456 1457 ixgbe->tx_ring_init = B_TRUE; 1458 } else { 1459 ixgbe->tx_ring_init = B_FALSE; 1460 } 1461 1462 for (i = 0; i < ixgbe->num_rx_rings; i++) 1463 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1464 for (i = 0; i < ixgbe->num_tx_rings; i++) 1465 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1466 1467 /* 1468 * Start the chipset hardware 1469 */ 1470 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1471 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1472 goto start_failure; 1473 } 1474 1475 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1476 goto start_failure; 1477 } 1478 1479 /* 1480 * Setup the rx/tx rings 1481 */ 1482 ixgbe_setup_rings(ixgbe); 1483 1484 /* 1485 * Enable adapter interrupts 1486 * The interrupts must be enabled after the driver state is START 1487 */ 1488 ixgbe_enable_adapter_interrupts(ixgbe); 1489 1490 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1491 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1492 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1493 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1494 1495 return (IXGBE_SUCCESS); 1496 1497 start_failure: 1498 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1499 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1500 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1501 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1502 1503 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1504 1505 return (IXGBE_FAILURE); 1506 } 1507 1508 /* 1509 * ixgbe_stop - Stop the driver/chipset. 1510 */ 1511 void 1512 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1513 { 1514 int i; 1515 1516 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1517 1518 /* 1519 * Disable the adapter interrupts 1520 */ 1521 ixgbe_disable_adapter_interrupts(ixgbe); 1522 1523 /* 1524 * Drain the pending tx packets 1525 */ 1526 (void) ixgbe_tx_drain(ixgbe); 1527 1528 for (i = 0; i < ixgbe->num_rx_rings; i++) 1529 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1530 for (i = 0; i < ixgbe->num_tx_rings; i++) 1531 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1532 1533 /* 1534 * Stop the chipset hardware 1535 */ 1536 ixgbe_chip_stop(ixgbe); 1537 1538 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1539 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1540 } 1541 1542 /* 1543 * Clean the pending tx data/resources 1544 */ 1545 ixgbe_tx_clean(ixgbe); 1546 1547 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1548 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1549 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1550 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1551 1552 if (ixgbe->link_state == LINK_STATE_UP) { 1553 ixgbe->link_state = LINK_STATE_UNKNOWN; 1554 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1555 } 1556 1557 if (free_buffer) { 1558 /* 1559 * Release the DMA/memory resources of rx/tx rings 1560 */ 1561 ixgbe_free_dma(ixgbe); 1562 ixgbe_free_rx_data(ixgbe); 1563 } 1564 } 1565 1566 /* 1567 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 1568 */ 1569 static int 1570 ixgbe_alloc_rings(ixgbe_t *ixgbe) 1571 { 1572 /* 1573 * Allocate memory space for rx rings 1574 */ 1575 ixgbe->rx_rings = kmem_zalloc( 1576 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 1577 KM_NOSLEEP); 1578 1579 if (ixgbe->rx_rings == NULL) { 1580 return (IXGBE_FAILURE); 1581 } 1582 1583 /* 1584 * Allocate memory space for tx rings 1585 */ 1586 ixgbe->tx_rings = kmem_zalloc( 1587 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 1588 KM_NOSLEEP); 1589 1590 if (ixgbe->tx_rings == NULL) { 1591 kmem_free(ixgbe->rx_rings, 1592 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1593 ixgbe->rx_rings = NULL; 1594 return (IXGBE_FAILURE); 1595 } 1596 1597 /* 1598 * Allocate memory space for rx ring groups 1599 */ 1600 ixgbe->rx_groups = kmem_zalloc( 1601 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 1602 KM_NOSLEEP); 1603 1604 if (ixgbe->rx_groups == NULL) { 1605 kmem_free(ixgbe->rx_rings, 1606 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1607 kmem_free(ixgbe->tx_rings, 1608 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1609 ixgbe->rx_rings = NULL; 1610 ixgbe->tx_rings = NULL; 1611 return (IXGBE_FAILURE); 1612 } 1613 1614 return (IXGBE_SUCCESS); 1615 } 1616 1617 /* 1618 * ixgbe_free_rings - Free the memory space of rx/tx rings. 1619 */ 1620 static void 1621 ixgbe_free_rings(ixgbe_t *ixgbe) 1622 { 1623 if (ixgbe->rx_rings != NULL) { 1624 kmem_free(ixgbe->rx_rings, 1625 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1626 ixgbe->rx_rings = NULL; 1627 } 1628 1629 if (ixgbe->tx_rings != NULL) { 1630 kmem_free(ixgbe->tx_rings, 1631 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1632 ixgbe->tx_rings = NULL; 1633 } 1634 1635 if (ixgbe->rx_groups != NULL) { 1636 kmem_free(ixgbe->rx_groups, 1637 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 1638 ixgbe->rx_groups = NULL; 1639 } 1640 } 1641 1642 static int 1643 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 1644 { 1645 ixgbe_rx_ring_t *rx_ring; 1646 int i; 1647 1648 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1649 rx_ring = &ixgbe->rx_rings[i]; 1650 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 1651 goto alloc_rx_rings_failure; 1652 } 1653 return (IXGBE_SUCCESS); 1654 1655 alloc_rx_rings_failure: 1656 ixgbe_free_rx_data(ixgbe); 1657 return (IXGBE_FAILURE); 1658 } 1659 1660 static void 1661 ixgbe_free_rx_data(ixgbe_t *ixgbe) 1662 { 1663 ixgbe_rx_ring_t *rx_ring; 1664 ixgbe_rx_data_t *rx_data; 1665 int i; 1666 1667 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1668 rx_ring = &ixgbe->rx_rings[i]; 1669 1670 mutex_enter(&ixgbe->rx_pending_lock); 1671 rx_data = rx_ring->rx_data; 1672 1673 if (rx_data != NULL) { 1674 rx_data->flag |= IXGBE_RX_STOPPED; 1675 1676 if (rx_data->rcb_pending == 0) { 1677 ixgbe_free_rx_ring_data(rx_data); 1678 rx_ring->rx_data = NULL; 1679 } 1680 } 1681 1682 mutex_exit(&ixgbe->rx_pending_lock); 1683 } 1684 } 1685 1686 /* 1687 * ixgbe_setup_rings - Setup rx/tx rings. 1688 */ 1689 static void 1690 ixgbe_setup_rings(ixgbe_t *ixgbe) 1691 { 1692 /* 1693 * Setup the rx/tx rings, including the following: 1694 * 1695 * 1. Setup the descriptor ring and the control block buffers; 1696 * 2. Initialize necessary registers for receive/transmit; 1697 * 3. Initialize software pointers/parameters for receive/transmit; 1698 */ 1699 ixgbe_setup_rx(ixgbe); 1700 1701 ixgbe_setup_tx(ixgbe); 1702 } 1703 1704 static void 1705 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 1706 { 1707 ixgbe_t *ixgbe = rx_ring->ixgbe; 1708 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 1709 struct ixgbe_hw *hw = &ixgbe->hw; 1710 rx_control_block_t *rcb; 1711 union ixgbe_adv_rx_desc *rbd; 1712 uint32_t size; 1713 uint32_t buf_low; 1714 uint32_t buf_high; 1715 uint32_t reg_val; 1716 int i; 1717 1718 ASSERT(mutex_owned(&rx_ring->rx_lock)); 1719 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1720 1721 for (i = 0; i < ixgbe->rx_ring_size; i++) { 1722 rcb = rx_data->work_list[i]; 1723 rbd = &rx_data->rbd_ring[i]; 1724 1725 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 1726 rbd->read.hdr_addr = NULL; 1727 } 1728 1729 /* 1730 * Initialize the length register 1731 */ 1732 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 1733 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->index), size); 1734 1735 /* 1736 * Initialize the base address registers 1737 */ 1738 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 1739 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 1740 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->index), buf_high); 1741 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->index), buf_low); 1742 1743 /* 1744 * Setup head & tail pointers 1745 */ 1746 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->index), rx_data->ring_size - 1); 1747 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->index), 0); 1748 1749 rx_data->rbd_next = 0; 1750 1751 /* 1752 * Setup the Receive Descriptor Control Register (RXDCTL) 1753 * PTHRESH=32 descriptors (half the internal cache) 1754 * HTHRESH=0 descriptors (to minimize latency on fetch) 1755 * WTHRESH defaults to 1 (writeback each descriptor) 1756 */ 1757 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->index)); 1758 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 1759 1760 /* Not a valid value for 82599 */ 1761 if (hw->mac.type < ixgbe_mac_82599EB) { 1762 reg_val |= 0x0020; /* pthresh */ 1763 } 1764 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->index), reg_val); 1765 1766 if (hw->mac.type == ixgbe_mac_82599EB) { 1767 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 1768 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 1769 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 1770 } 1771 1772 /* 1773 * Setup the Split and Replication Receive Control Register. 1774 * Set the rx buffer size and the advanced descriptor type. 1775 */ 1776 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 1777 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1778 reg_val |= IXGBE_SRRCTL_DROP_EN; 1779 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->index), reg_val); 1780 } 1781 1782 static void 1783 ixgbe_setup_rx(ixgbe_t *ixgbe) 1784 { 1785 ixgbe_rx_ring_t *rx_ring; 1786 struct ixgbe_hw *hw = &ixgbe->hw; 1787 ixgbe_rx_group_t *rx_group; 1788 uint32_t reg_val; 1789 uint32_t ring_mapping; 1790 int i; 1791 1792 /* PSRTYPE must be configured for 82599 */ 1793 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 1794 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 1795 #define IXGBE_PSRTYPE_L2_PKT 0x00001000 1796 reg_val |= IXGBE_PSRTYPE_L2_PKT; 1797 reg_val |= 0xE0000000; 1798 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 1799 1800 /* 1801 * Set filter control in FCTRL to accept broadcast packets and do 1802 * not pass pause frames to host. Flow control settings are already 1803 * in this register, so preserve them. 1804 */ 1805 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1806 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */ 1807 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */ 1808 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 1809 1810 /* 1811 * Enable the receive unit. This must be done after filter 1812 * control is set in FCTRL. 1813 */ 1814 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */ 1815 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */ 1816 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 1817 1818 /* 1819 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 1820 */ 1821 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1822 rx_ring = &ixgbe->rx_rings[i]; 1823 ixgbe_setup_rx_ring(rx_ring); 1824 } 1825 1826 /* 1827 * Setup rx groups. 1828 */ 1829 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1830 rx_group = &ixgbe->rx_groups[i]; 1831 rx_group->index = i; 1832 rx_group->ixgbe = ixgbe; 1833 } 1834 1835 /* 1836 * Setup the per-ring statistics mapping. 1837 */ 1838 ring_mapping = 0; 1839 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1840 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 1841 if ((i & 0x3) == 0x3) { 1842 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping); 1843 ring_mapping = 0; 1844 } 1845 } 1846 if ((i & 0x3) != 0x3) 1847 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i >> 2), ring_mapping); 1848 1849 /* 1850 * The Max Frame Size in MHADD/MAXFRS will be internally increased 1851 * by four bytes if the packet has a VLAN field, so includes MTU, 1852 * ethernet header and frame check sequence. 1853 * Register is MAXFRS in 82599. 1854 */ 1855 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header) 1856 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 1857 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 1858 1859 /* 1860 * Setup Jumbo Frame enable bit 1861 */ 1862 if (ixgbe->default_mtu > ETHERMTU) { 1863 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1864 reg_val |= IXGBE_HLREG0_JUMBOEN; 1865 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 1866 } 1867 1868 /* 1869 * Hardware checksum settings 1870 */ 1871 if (ixgbe->rx_hcksum_enable) { 1872 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 1873 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 1874 } 1875 1876 /* 1877 * Setup RSS for multiple receive queues 1878 */ 1879 if (ixgbe->num_rx_rings > 1) 1880 ixgbe_setup_rss(ixgbe); 1881 } 1882 1883 static void 1884 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 1885 { 1886 ixgbe_t *ixgbe = tx_ring->ixgbe; 1887 struct ixgbe_hw *hw = &ixgbe->hw; 1888 uint32_t size; 1889 uint32_t buf_low; 1890 uint32_t buf_high; 1891 uint32_t reg_val; 1892 1893 ASSERT(mutex_owned(&tx_ring->tx_lock)); 1894 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1895 1896 /* 1897 * Initialize the length register 1898 */ 1899 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 1900 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 1901 1902 /* 1903 * Initialize the base address registers 1904 */ 1905 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 1906 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 1907 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 1908 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 1909 1910 /* 1911 * Setup head & tail pointers 1912 */ 1913 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 1914 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 1915 1916 /* 1917 * Setup head write-back 1918 */ 1919 if (ixgbe->tx_head_wb_enable) { 1920 /* 1921 * The memory of the head write-back is allocated using 1922 * the extra tbd beyond the tail of the tbd ring. 1923 */ 1924 tx_ring->tbd_head_wb = (uint32_t *) 1925 ((uintptr_t)tx_ring->tbd_area.address + size); 1926 *tx_ring->tbd_head_wb = 0; 1927 1928 buf_low = (uint32_t) 1929 (tx_ring->tbd_area.dma_address + size); 1930 buf_high = (uint32_t) 1931 ((tx_ring->tbd_area.dma_address + size) >> 32); 1932 1933 /* Set the head write-back enable bit */ 1934 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 1935 1936 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 1937 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 1938 1939 /* 1940 * Turn off relaxed ordering for head write back or it will 1941 * cause problems with the tx recycling 1942 */ 1943 reg_val = IXGBE_READ_REG(hw, 1944 IXGBE_DCA_TXCTRL(tx_ring->index)); 1945 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1946 IXGBE_WRITE_REG(hw, 1947 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 1948 } else { 1949 tx_ring->tbd_head_wb = NULL; 1950 } 1951 1952 tx_ring->tbd_head = 0; 1953 tx_ring->tbd_tail = 0; 1954 tx_ring->tbd_free = tx_ring->ring_size; 1955 1956 if (ixgbe->tx_ring_init == B_TRUE) { 1957 tx_ring->tcb_head = 0; 1958 tx_ring->tcb_tail = 0; 1959 tx_ring->tcb_free = tx_ring->free_list_size; 1960 } 1961 1962 /* 1963 * Initialize the s/w context structure 1964 */ 1965 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 1966 } 1967 1968 static void 1969 ixgbe_setup_tx(ixgbe_t *ixgbe) 1970 { 1971 struct ixgbe_hw *hw = &ixgbe->hw; 1972 ixgbe_tx_ring_t *tx_ring; 1973 uint32_t reg_val; 1974 uint32_t ring_mapping; 1975 int i; 1976 1977 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1978 tx_ring = &ixgbe->tx_rings[i]; 1979 ixgbe_setup_tx_ring(tx_ring); 1980 } 1981 1982 /* 1983 * Setup the per-ring statistics mapping. 1984 */ 1985 ring_mapping = 0; 1986 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1987 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 1988 if ((i & 0x3) == 0x3) { 1989 if (hw->mac.type >= ixgbe_mac_82599EB) { 1990 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 1991 ring_mapping); 1992 } else { 1993 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 1994 ring_mapping); 1995 } 1996 ring_mapping = 0; 1997 } 1998 } 1999 if ((i & 0x3) != 0x3) 2000 if (hw->mac.type >= ixgbe_mac_82599EB) { 2001 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2002 } else { 2003 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2004 } 2005 2006 /* 2007 * Enable CRC appending and TX padding (for short tx frames) 2008 */ 2009 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2010 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2011 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2012 2013 /* 2014 * enable DMA for 82599 parts 2015 */ 2016 if (hw->mac.type == ixgbe_mac_82599EB) { 2017 /* DMATXCTL.TE must be set after all Tx config is complete */ 2018 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2019 reg_val |= IXGBE_DMATXCTL_TE; 2020 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2021 } 2022 2023 /* 2024 * Enabling tx queues .. 2025 * For 82599 must be done after DMATXCTL.TE is set 2026 */ 2027 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2028 tx_ring = &ixgbe->tx_rings[i]; 2029 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2030 reg_val |= IXGBE_TXDCTL_ENABLE; 2031 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2032 } 2033 } 2034 2035 /* 2036 * ixgbe_setup_rss - Setup receive-side scaling feature. 2037 */ 2038 static void 2039 ixgbe_setup_rss(ixgbe_t *ixgbe) 2040 { 2041 struct ixgbe_hw *hw = &ixgbe->hw; 2042 uint32_t i, mrqc, rxcsum; 2043 uint32_t random; 2044 uint32_t reta; 2045 2046 /* 2047 * Fill out redirection table 2048 */ 2049 reta = 0; 2050 for (i = 0; i < 128; i++) { 2051 reta = (reta << 8) | (i % ixgbe->num_rx_rings); 2052 if ((i & 3) == 3) 2053 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2054 } 2055 2056 /* 2057 * Fill out hash function seeds with a random constant 2058 */ 2059 for (i = 0; i < 10; i++) { 2060 (void) random_get_pseudo_bytes((uint8_t *)&random, 2061 sizeof (uint32_t)); 2062 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 2063 } 2064 2065 /* 2066 * Enable RSS & perform hash on these packet types 2067 */ 2068 mrqc = IXGBE_MRQC_RSSEN | 2069 IXGBE_MRQC_RSS_FIELD_IPV4 | 2070 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2071 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2072 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2073 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2074 IXGBE_MRQC_RSS_FIELD_IPV6 | 2075 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2076 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2077 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2078 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2079 2080 /* 2081 * Disable Packet Checksum to enable RSS for multiple receive queues. 2082 * It is an adapter hardware limitation that Packet Checksum is 2083 * mutually exclusive with RSS. 2084 */ 2085 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2086 rxcsum |= IXGBE_RXCSUM_PCSD; 2087 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 2088 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2089 } 2090 2091 /* 2092 * ixgbe_init_unicst - Initialize the unicast addresses. 2093 */ 2094 static void 2095 ixgbe_init_unicst(ixgbe_t *ixgbe) 2096 { 2097 struct ixgbe_hw *hw = &ixgbe->hw; 2098 uint8_t *mac_addr; 2099 int slot; 2100 /* 2101 * Here we should consider two situations: 2102 * 2103 * 1. Chipset is initialized at the first time, 2104 * Clear all the multiple unicast addresses. 2105 * 2106 * 2. Chipset is reset 2107 * Recover the multiple unicast addresses from the 2108 * software data structure to the RAR registers. 2109 */ 2110 if (!ixgbe->unicst_init) { 2111 /* 2112 * Initialize the multiple unicast addresses 2113 */ 2114 ixgbe->unicst_total = MAX_NUM_UNICAST_ADDRESSES; 2115 ixgbe->unicst_avail = ixgbe->unicst_total; 2116 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2117 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 2118 bzero(mac_addr, ETHERADDRL); 2119 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 2120 ixgbe->unicst_addr[slot].mac.set = 0; 2121 } 2122 ixgbe->unicst_init = B_TRUE; 2123 } else { 2124 /* Re-configure the RAR registers */ 2125 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2126 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 2127 if (ixgbe->unicst_addr[slot].mac.set == 1) { 2128 (void) ixgbe_set_rar(hw, slot, mac_addr, 2129 NULL, IXGBE_RAH_AV); 2130 } else { 2131 bzero(mac_addr, ETHERADDRL); 2132 (void) ixgbe_set_rar(hw, slot, mac_addr, 2133 NULL, NULL); 2134 } 2135 } 2136 } 2137 } 2138 2139 /* 2140 * ixgbe_unicst_set - Set the unicast address to the specified slot. 2141 */ 2142 int 2143 ixgbe_unicst_set(ixgbe_t *ixgbe, const uint8_t *mac_addr, 2144 int slot) 2145 { 2146 struct ixgbe_hw *hw = &ixgbe->hw; 2147 2148 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2149 2150 /* 2151 * Save the unicast address in the software data structure 2152 */ 2153 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 2154 2155 /* 2156 * Set the unicast address to the RAR register 2157 */ 2158 (void) ixgbe_set_rar(hw, slot, (uint8_t *)mac_addr, NULL, IXGBE_RAH_AV); 2159 2160 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2161 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2162 return (EIO); 2163 } 2164 2165 return (0); 2166 } 2167 2168 /* 2169 * ixgbe_unicst_find - Find the slot for the specified unicast address 2170 */ 2171 int 2172 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 2173 { 2174 int slot; 2175 2176 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2177 2178 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2179 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 2180 mac_addr, ETHERADDRL) == 0) 2181 return (slot); 2182 } 2183 2184 return (-1); 2185 } 2186 2187 /* 2188 * ixgbe_multicst_add - Add a multicst address. 2189 */ 2190 int 2191 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2192 { 2193 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2194 2195 if ((multiaddr[0] & 01) == 0) { 2196 return (EINVAL); 2197 } 2198 2199 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 2200 return (ENOENT); 2201 } 2202 2203 bcopy(multiaddr, 2204 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 2205 ixgbe->mcast_count++; 2206 2207 /* 2208 * Update the multicast table in the hardware 2209 */ 2210 ixgbe_setup_multicst(ixgbe); 2211 2212 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2213 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2214 return (EIO); 2215 } 2216 2217 return (0); 2218 } 2219 2220 /* 2221 * ixgbe_multicst_remove - Remove a multicst address. 2222 */ 2223 int 2224 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2225 { 2226 int i; 2227 2228 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2229 2230 for (i = 0; i < ixgbe->mcast_count; i++) { 2231 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 2232 ETHERADDRL) == 0) { 2233 for (i++; i < ixgbe->mcast_count; i++) { 2234 ixgbe->mcast_table[i - 1] = 2235 ixgbe->mcast_table[i]; 2236 } 2237 ixgbe->mcast_count--; 2238 break; 2239 } 2240 } 2241 2242 /* 2243 * Update the multicast table in the hardware 2244 */ 2245 ixgbe_setup_multicst(ixgbe); 2246 2247 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2248 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2249 return (EIO); 2250 } 2251 2252 return (0); 2253 } 2254 2255 /* 2256 * ixgbe_setup_multicast - Setup multicast data structures. 2257 * 2258 * This routine initializes all of the multicast related structures 2259 * and save them in the hardware registers. 2260 */ 2261 static void 2262 ixgbe_setup_multicst(ixgbe_t *ixgbe) 2263 { 2264 uint8_t *mc_addr_list; 2265 uint32_t mc_addr_count; 2266 struct ixgbe_hw *hw = &ixgbe->hw; 2267 2268 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2269 2270 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 2271 2272 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 2273 mc_addr_count = ixgbe->mcast_count; 2274 2275 /* 2276 * Update the multicast addresses to the MTA registers 2277 */ 2278 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2279 ixgbe_mc_table_itr); 2280 } 2281 2282 /* 2283 * ixgbe_get_conf - Get driver configurations set in driver.conf. 2284 * 2285 * This routine gets user-configured values out of the configuration 2286 * file ixgbe.conf. 2287 * 2288 * For each configurable value, there is a minimum, a maximum, and a 2289 * default. 2290 * If user does not configure a value, use the default. 2291 * If user configures below the minimum, use the minumum. 2292 * If user configures above the maximum, use the maxumum. 2293 */ 2294 static void 2295 ixgbe_get_conf(ixgbe_t *ixgbe) 2296 { 2297 struct ixgbe_hw *hw = &ixgbe->hw; 2298 uint32_t flow_control; 2299 2300 /* 2301 * ixgbe driver supports the following user configurations: 2302 * 2303 * Jumbo frame configuration: 2304 * default_mtu 2305 * 2306 * Ethernet flow control configuration: 2307 * flow_control 2308 * 2309 * Multiple rings configurations: 2310 * tx_queue_number 2311 * tx_ring_size 2312 * rx_queue_number 2313 * rx_ring_size 2314 * 2315 * Call ixgbe_get_prop() to get the value for a specific 2316 * configuration parameter. 2317 */ 2318 2319 /* 2320 * Jumbo frame configuration - max_frame_size controls host buffer 2321 * allocation, so includes MTU, ethernet header, vlan tag and 2322 * frame check sequence. 2323 */ 2324 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 2325 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 2326 2327 ixgbe->max_frame_size = ixgbe->default_mtu + 2328 sizeof (struct ether_vlan_header) + ETHERFCSL; 2329 2330 /* 2331 * Ethernet flow control configuration 2332 */ 2333 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 2334 ixgbe_fc_none, 3, ixgbe_fc_none); 2335 if (flow_control == 3) 2336 flow_control = ixgbe_fc_default; 2337 2338 /* 2339 * fc.requested mode is what the user requests. After autoneg, 2340 * fc.current_mode will be the flow_control mode that was negotiated. 2341 */ 2342 hw->fc.requested_mode = flow_control; 2343 2344 /* 2345 * Multiple rings configurations 2346 */ 2347 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 2348 ixgbe->capab->min_tx_que_num, 2349 ixgbe->capab->max_tx_que_num, 2350 ixgbe->capab->def_tx_que_num); 2351 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 2352 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 2353 2354 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 2355 ixgbe->capab->min_rx_que_num, 2356 ixgbe->capab->max_rx_que_num, 2357 ixgbe->capab->def_rx_que_num); 2358 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 2359 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 2360 2361 /* 2362 * Multiple groups configuration 2363 */ 2364 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 2365 MIN_RX_GROUP_NUM, MAX_RX_GROUP_NUM, DEFAULT_RX_GROUP_NUM); 2366 2367 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 2368 0, 1, DEFAULT_MR_ENABLE); 2369 2370 if (ixgbe->mr_enable == B_FALSE) { 2371 ixgbe->num_tx_rings = 1; 2372 ixgbe->num_rx_rings = 1; 2373 ixgbe->num_rx_groups = 1; 2374 } 2375 2376 /* 2377 * Tunable used to force an interrupt type. The only use is 2378 * for testing of the lesser interrupt types. 2379 * 0 = don't force interrupt type 2380 * 1 = force interrupt type MSI-X 2381 * 2 = force interrupt type MSI 2382 * 3 = force interrupt type Legacy 2383 */ 2384 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 2385 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 2386 2387 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 2388 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 2389 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 2390 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 2391 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 2392 0, 1, DEFAULT_LSO_ENABLE); 2393 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 2394 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 2395 2396 /* Head Write Back not recommended for 82599 */ 2397 if (hw->mac.type >= ixgbe_mac_82599EB) { 2398 ixgbe->tx_head_wb_enable = B_FALSE; 2399 } 2400 2401 /* 2402 * ixgbe LSO needs the tx h/w checksum support. 2403 * LSO will be disabled if tx h/w checksum is not 2404 * enabled. 2405 */ 2406 if (ixgbe->tx_hcksum_enable == B_FALSE) { 2407 ixgbe->lso_enable = B_FALSE; 2408 } 2409 2410 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 2411 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 2412 DEFAULT_TX_COPY_THRESHOLD); 2413 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 2414 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 2415 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 2416 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 2417 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 2418 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 2419 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 2420 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 2421 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 2422 2423 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 2424 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 2425 DEFAULT_RX_COPY_THRESHOLD); 2426 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 2427 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 2428 DEFAULT_RX_LIMIT_PER_INTR); 2429 2430 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 2431 ixgbe->capab->min_intr_throttle, 2432 ixgbe->capab->max_intr_throttle, 2433 ixgbe->capab->def_intr_throttle); 2434 /* 2435 * 82599 requires the interupt throttling rate is 2436 * a multiple of 8. This is enforced by the register 2437 * definiton. 2438 */ 2439 if (hw->mac.type == ixgbe_mac_82599EB) 2440 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 2441 } 2442 2443 static void 2444 ixgbe_init_params(ixgbe_t *ixgbe) 2445 { 2446 ixgbe->param_en_10000fdx_cap = 1; 2447 ixgbe->param_en_1000fdx_cap = 1; 2448 ixgbe->param_en_100fdx_cap = 1; 2449 ixgbe->param_adv_10000fdx_cap = 1; 2450 ixgbe->param_adv_1000fdx_cap = 1; 2451 ixgbe->param_adv_100fdx_cap = 1; 2452 2453 ixgbe->param_pause_cap = 1; 2454 ixgbe->param_asym_pause_cap = 1; 2455 ixgbe->param_rem_fault = 0; 2456 2457 ixgbe->param_adv_autoneg_cap = 1; 2458 ixgbe->param_adv_pause_cap = 1; 2459 ixgbe->param_adv_asym_pause_cap = 1; 2460 ixgbe->param_adv_rem_fault = 0; 2461 2462 ixgbe->param_lp_10000fdx_cap = 0; 2463 ixgbe->param_lp_1000fdx_cap = 0; 2464 ixgbe->param_lp_100fdx_cap = 0; 2465 ixgbe->param_lp_autoneg_cap = 0; 2466 ixgbe->param_lp_pause_cap = 0; 2467 ixgbe->param_lp_asym_pause_cap = 0; 2468 ixgbe->param_lp_rem_fault = 0; 2469 } 2470 2471 /* 2472 * ixgbe_get_prop - Get a property value out of the configuration file 2473 * ixgbe.conf. 2474 * 2475 * Caller provides the name of the property, a default value, a minimum 2476 * value, and a maximum value. 2477 * 2478 * Return configured value of the property, with default, minimum and 2479 * maximum properly applied. 2480 */ 2481 static int 2482 ixgbe_get_prop(ixgbe_t *ixgbe, 2483 char *propname, /* name of the property */ 2484 int minval, /* minimum acceptable value */ 2485 int maxval, /* maximim acceptable value */ 2486 int defval) /* default value */ 2487 { 2488 int value; 2489 2490 /* 2491 * Call ddi_prop_get_int() to read the conf settings 2492 */ 2493 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 2494 DDI_PROP_DONTPASS, propname, defval); 2495 if (value > maxval) 2496 value = maxval; 2497 2498 if (value < minval) 2499 value = minval; 2500 2501 return (value); 2502 } 2503 2504 /* 2505 * ixgbe_driver_setup_link - Using the link properties to setup the link. 2506 */ 2507 int 2508 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 2509 { 2510 u32 autoneg_advertised = 0; 2511 2512 /* 2513 * No half duplex support with 10Gb parts 2514 */ 2515 if (ixgbe->param_adv_10000fdx_cap == 1) 2516 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 2517 2518 if (ixgbe->param_adv_1000fdx_cap == 1) 2519 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 2520 2521 if (ixgbe->param_adv_100fdx_cap == 1) 2522 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 2523 2524 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) { 2525 ixgbe_notice(ixgbe, "Invalid link settings. Setup link " 2526 "to autonegotiation with full link capabilities."); 2527 2528 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL | 2529 IXGBE_LINK_SPEED_1GB_FULL | 2530 IXGBE_LINK_SPEED_100_FULL; 2531 } 2532 2533 if (setup_hw) { 2534 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised, 2535 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) { 2536 ixgbe_notice(ixgbe, "Setup link failed on this " 2537 "device."); 2538 return (IXGBE_FAILURE); 2539 } 2540 } 2541 2542 return (IXGBE_SUCCESS); 2543 } 2544 2545 /* 2546 * ixgbe_driver_link_check - Link status processing done in taskq. 2547 */ 2548 static void 2549 ixgbe_driver_link_check(void *arg) 2550 { 2551 ixgbe_t *ixgbe = (ixgbe_t *)arg; 2552 struct ixgbe_hw *hw = &ixgbe->hw; 2553 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 2554 boolean_t link_up = B_FALSE; 2555 boolean_t link_changed = B_FALSE; 2556 2557 mutex_enter(&ixgbe->gen_lock); 2558 2559 /* check for link, wait the full time */ 2560 (void) ixgbe_check_link(hw, &speed, &link_up, true); 2561 if (link_up) { 2562 /* Link is up, enable flow control settings */ 2563 (void) ixgbe_fc_enable(hw, 0); 2564 2565 /* 2566 * The Link is up, check whether it was marked as down earlier 2567 */ 2568 if (ixgbe->link_state != LINK_STATE_UP) { 2569 switch (speed) { 2570 case IXGBE_LINK_SPEED_10GB_FULL: 2571 ixgbe->link_speed = SPEED_10GB; 2572 break; 2573 case IXGBE_LINK_SPEED_1GB_FULL: 2574 ixgbe->link_speed = SPEED_1GB; 2575 break; 2576 case IXGBE_LINK_SPEED_100_FULL: 2577 ixgbe->link_speed = SPEED_100; 2578 } 2579 ixgbe->link_duplex = LINK_DUPLEX_FULL; 2580 ixgbe->link_state = LINK_STATE_UP; 2581 ixgbe->link_down_timeout = 0; 2582 link_changed = B_TRUE; 2583 } 2584 } else { 2585 if (ixgbe->link_state != LINK_STATE_DOWN) { 2586 ixgbe->link_speed = 0; 2587 ixgbe->link_duplex = 0; 2588 ixgbe->link_state = LINK_STATE_DOWN; 2589 link_changed = B_TRUE; 2590 } 2591 2592 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 2593 if (ixgbe->link_down_timeout < MAX_LINK_DOWN_TIMEOUT) { 2594 ixgbe->link_down_timeout++; 2595 } else if (ixgbe->link_down_timeout == 2596 MAX_LINK_DOWN_TIMEOUT) { 2597 ixgbe_tx_clean(ixgbe); 2598 ixgbe->link_down_timeout++; 2599 } 2600 } 2601 } 2602 2603 /* 2604 * this is only reached after a link-status-change interrupt 2605 * so always get new phy state 2606 */ 2607 ixgbe_get_hw_state(ixgbe); 2608 2609 /* re-enable the interrupt, which was automasked */ 2610 ixgbe->eims |= IXGBE_EICR_LSC; 2611 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 2612 2613 mutex_exit(&ixgbe->gen_lock); 2614 2615 /* outside the gen_lock */ 2616 if (link_changed) { 2617 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 2618 } 2619 } 2620 2621 /* 2622 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 2623 */ 2624 static void 2625 ixgbe_sfp_check(void *arg) 2626 { 2627 ixgbe_t *ixgbe = (ixgbe_t *)arg; 2628 uint32_t eicr = ixgbe->eicr; 2629 struct ixgbe_hw *hw = &ixgbe->hw; 2630 2631 if (eicr & IXGBE_EICR_GPI_SDP1) { 2632 /* clear the interrupt */ 2633 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 2634 2635 /* if link up, do multispeed fiber setup */ 2636 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 2637 B_TRUE, B_TRUE); 2638 ixgbe_driver_link_check(ixgbe); 2639 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 2640 /* clear the interrupt */ 2641 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 2642 2643 /* if link up, do sfp module setup */ 2644 (void) hw->mac.ops.setup_sfp(hw); 2645 2646 /* do multispeed fiber setup */ 2647 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 2648 B_TRUE, B_TRUE); 2649 ixgbe_driver_link_check(ixgbe); 2650 } 2651 } 2652 2653 /* 2654 * ixgbe_local_timer - Driver watchdog function. 2655 * 2656 * This function will handle the transmit stall check, link status check and 2657 * other routines. 2658 */ 2659 static void 2660 ixgbe_local_timer(void *arg) 2661 { 2662 ixgbe_t *ixgbe = (ixgbe_t *)arg; 2663 2664 if (ixgbe_stall_check(ixgbe)) { 2665 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2666 ixgbe->reset_count++; 2667 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 2668 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 2669 } 2670 2671 ixgbe_restart_watchdog_timer(ixgbe); 2672 } 2673 2674 /* 2675 * ixgbe_stall_check - Check for transmit stall. 2676 * 2677 * This function checks if the adapter is stalled (in transmit). 2678 * 2679 * It is called each time the watchdog timeout is invoked. 2680 * If the transmit descriptor reclaim continuously fails, 2681 * the watchdog value will increment by 1. If the watchdog 2682 * value exceeds the threshold, the ixgbe is assumed to 2683 * have stalled and need to be reset. 2684 */ 2685 static boolean_t 2686 ixgbe_stall_check(ixgbe_t *ixgbe) 2687 { 2688 ixgbe_tx_ring_t *tx_ring; 2689 boolean_t result; 2690 int i; 2691 2692 if (ixgbe->link_state != LINK_STATE_UP) 2693 return (B_FALSE); 2694 2695 /* 2696 * If any tx ring is stalled, we'll reset the chipset 2697 */ 2698 result = B_FALSE; 2699 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2700 tx_ring = &ixgbe->tx_rings[i]; 2701 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 2702 tx_ring->tx_recycle(tx_ring); 2703 } 2704 2705 if (tx_ring->recycle_fail > 0) 2706 tx_ring->stall_watchdog++; 2707 else 2708 tx_ring->stall_watchdog = 0; 2709 2710 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 2711 result = B_TRUE; 2712 break; 2713 } 2714 } 2715 2716 if (result) { 2717 tx_ring->stall_watchdog = 0; 2718 tx_ring->recycle_fail = 0; 2719 } 2720 2721 return (result); 2722 } 2723 2724 2725 /* 2726 * is_valid_mac_addr - Check if the mac address is valid. 2727 */ 2728 static boolean_t 2729 is_valid_mac_addr(uint8_t *mac_addr) 2730 { 2731 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 2732 const uint8_t addr_test2[6] = 2733 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 2734 2735 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 2736 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 2737 return (B_FALSE); 2738 2739 return (B_TRUE); 2740 } 2741 2742 static boolean_t 2743 ixgbe_find_mac_address(ixgbe_t *ixgbe) 2744 { 2745 #ifdef __sparc 2746 struct ixgbe_hw *hw = &ixgbe->hw; 2747 uchar_t *bytes; 2748 struct ether_addr sysaddr; 2749 uint_t nelts; 2750 int err; 2751 boolean_t found = B_FALSE; 2752 2753 /* 2754 * The "vendor's factory-set address" may already have 2755 * been extracted from the chip, but if the property 2756 * "local-mac-address" is set we use that instead. 2757 * 2758 * We check whether it looks like an array of 6 2759 * bytes (which it should, if OBP set it). If we can't 2760 * make sense of it this way, we'll ignore it. 2761 */ 2762 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 2763 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 2764 if (err == DDI_PROP_SUCCESS) { 2765 if (nelts == ETHERADDRL) { 2766 while (nelts--) 2767 hw->mac.addr[nelts] = bytes[nelts]; 2768 found = B_TRUE; 2769 } 2770 ddi_prop_free(bytes); 2771 } 2772 2773 /* 2774 * Look up the OBP property "local-mac-address?". If the user has set 2775 * 'local-mac-address? = false', use "the system address" instead. 2776 */ 2777 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 2778 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 2779 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 2780 if (localetheraddr(NULL, &sysaddr) != 0) { 2781 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 2782 found = B_TRUE; 2783 } 2784 } 2785 ddi_prop_free(bytes); 2786 } 2787 2788 /* 2789 * Finally(!), if there's a valid "mac-address" property (created 2790 * if we netbooted from this interface), we must use this instead 2791 * of any of the above to ensure that the NFS/install server doesn't 2792 * get confused by the address changing as Solaris takes over! 2793 */ 2794 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 2795 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 2796 if (err == DDI_PROP_SUCCESS) { 2797 if (nelts == ETHERADDRL) { 2798 while (nelts--) 2799 hw->mac.addr[nelts] = bytes[nelts]; 2800 found = B_TRUE; 2801 } 2802 ddi_prop_free(bytes); 2803 } 2804 2805 if (found) { 2806 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 2807 return (B_TRUE); 2808 } 2809 #else 2810 _NOTE(ARGUNUSED(ixgbe)); 2811 #endif 2812 2813 return (B_TRUE); 2814 } 2815 2816 #pragma inline(ixgbe_arm_watchdog_timer) 2817 static void 2818 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 2819 { 2820 /* 2821 * Fire a watchdog timer 2822 */ 2823 ixgbe->watchdog_tid = 2824 timeout(ixgbe_local_timer, 2825 (void *)ixgbe, 1 * drv_usectohz(1000000)); 2826 2827 } 2828 2829 /* 2830 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 2831 */ 2832 void 2833 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 2834 { 2835 mutex_enter(&ixgbe->watchdog_lock); 2836 2837 if (!ixgbe->watchdog_enable) { 2838 ixgbe->watchdog_enable = B_TRUE; 2839 ixgbe->watchdog_start = B_TRUE; 2840 ixgbe_arm_watchdog_timer(ixgbe); 2841 } 2842 2843 mutex_exit(&ixgbe->watchdog_lock); 2844 } 2845 2846 /* 2847 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 2848 */ 2849 void 2850 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 2851 { 2852 timeout_id_t tid; 2853 2854 mutex_enter(&ixgbe->watchdog_lock); 2855 2856 ixgbe->watchdog_enable = B_FALSE; 2857 ixgbe->watchdog_start = B_FALSE; 2858 tid = ixgbe->watchdog_tid; 2859 ixgbe->watchdog_tid = 0; 2860 2861 mutex_exit(&ixgbe->watchdog_lock); 2862 2863 if (tid != 0) 2864 (void) untimeout(tid); 2865 } 2866 2867 /* 2868 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 2869 */ 2870 void 2871 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 2872 { 2873 mutex_enter(&ixgbe->watchdog_lock); 2874 2875 if (ixgbe->watchdog_enable) { 2876 if (!ixgbe->watchdog_start) { 2877 ixgbe->watchdog_start = B_TRUE; 2878 ixgbe_arm_watchdog_timer(ixgbe); 2879 } 2880 } 2881 2882 mutex_exit(&ixgbe->watchdog_lock); 2883 } 2884 2885 /* 2886 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 2887 */ 2888 static void 2889 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 2890 { 2891 mutex_enter(&ixgbe->watchdog_lock); 2892 2893 if (ixgbe->watchdog_start) 2894 ixgbe_arm_watchdog_timer(ixgbe); 2895 2896 mutex_exit(&ixgbe->watchdog_lock); 2897 } 2898 2899 /* 2900 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 2901 */ 2902 void 2903 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 2904 { 2905 timeout_id_t tid; 2906 2907 mutex_enter(&ixgbe->watchdog_lock); 2908 2909 ixgbe->watchdog_start = B_FALSE; 2910 tid = ixgbe->watchdog_tid; 2911 ixgbe->watchdog_tid = 0; 2912 2913 mutex_exit(&ixgbe->watchdog_lock); 2914 2915 if (tid != 0) 2916 (void) untimeout(tid); 2917 } 2918 2919 /* 2920 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 2921 */ 2922 static void 2923 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 2924 { 2925 struct ixgbe_hw *hw = &ixgbe->hw; 2926 2927 /* 2928 * mask all interrupts off 2929 */ 2930 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 2931 2932 /* 2933 * for MSI-X, also disable autoclear 2934 */ 2935 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 2936 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 2937 } 2938 2939 IXGBE_WRITE_FLUSH(hw); 2940 } 2941 2942 /* 2943 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 2944 */ 2945 static void 2946 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 2947 { 2948 struct ixgbe_hw *hw = &ixgbe->hw; 2949 uint32_t eiac, eiam; 2950 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 2951 2952 /* interrupt types to enable */ 2953 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 2954 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 2955 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 2956 2957 /* enable automask on "other" causes that this adapter can generate */ 2958 eiam = ixgbe->capab->other_intr; 2959 2960 /* 2961 * msi-x mode 2962 */ 2963 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 2964 /* enable autoclear but not on bits 29:20 */ 2965 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 2966 2967 /* general purpose interrupt enable */ 2968 gpie |= (IXGBE_GPIE_MSIX_MODE 2969 | IXGBE_GPIE_PBA_SUPPORT 2970 | IXGBE_GPIE_OCD 2971 | IXGBE_GPIE_EIAME); 2972 /* 2973 * non-msi-x mode 2974 */ 2975 } else { 2976 2977 /* disable autoclear, leave gpie at default */ 2978 eiac = 0; 2979 2980 /* 2981 * General purpose interrupt enable. 2982 * For 82599, extended interrupt automask enable 2983 * only in MSI or MSI-X mode 2984 */ 2985 if ((hw->mac.type < ixgbe_mac_82599EB) || 2986 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 2987 gpie |= IXGBE_GPIE_EIAME; 2988 } 2989 } 2990 /* Enable specific interrupts for 82599 */ 2991 if (hw->mac.type == ixgbe_mac_82599EB) { 2992 gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */ 2993 gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */ 2994 } 2995 2996 /* write to interrupt control registers */ 2997 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 2998 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 2999 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 3000 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3001 IXGBE_WRITE_FLUSH(hw); 3002 } 3003 3004 /* 3005 * ixgbe_loopback_ioctl - Loopback support. 3006 */ 3007 enum ioc_reply 3008 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 3009 { 3010 lb_info_sz_t *lbsp; 3011 lb_property_t *lbpp; 3012 uint32_t *lbmp; 3013 uint32_t size; 3014 uint32_t value; 3015 3016 if (mp->b_cont == NULL) 3017 return (IOC_INVAL); 3018 3019 switch (iocp->ioc_cmd) { 3020 default: 3021 return (IOC_INVAL); 3022 3023 case LB_GET_INFO_SIZE: 3024 size = sizeof (lb_info_sz_t); 3025 if (iocp->ioc_count != size) 3026 return (IOC_INVAL); 3027 3028 value = sizeof (lb_normal); 3029 value += sizeof (lb_mac); 3030 value += sizeof (lb_external); 3031 3032 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 3033 *lbsp = value; 3034 break; 3035 3036 case LB_GET_INFO: 3037 value = sizeof (lb_normal); 3038 value += sizeof (lb_mac); 3039 value += sizeof (lb_external); 3040 3041 size = value; 3042 if (iocp->ioc_count != size) 3043 return (IOC_INVAL); 3044 3045 value = 0; 3046 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 3047 3048 lbpp[value++] = lb_normal; 3049 lbpp[value++] = lb_mac; 3050 lbpp[value++] = lb_external; 3051 break; 3052 3053 case LB_GET_MODE: 3054 size = sizeof (uint32_t); 3055 if (iocp->ioc_count != size) 3056 return (IOC_INVAL); 3057 3058 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3059 *lbmp = ixgbe->loopback_mode; 3060 break; 3061 3062 case LB_SET_MODE: 3063 size = 0; 3064 if (iocp->ioc_count != sizeof (uint32_t)) 3065 return (IOC_INVAL); 3066 3067 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3068 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 3069 return (IOC_INVAL); 3070 break; 3071 } 3072 3073 iocp->ioc_count = size; 3074 iocp->ioc_error = 0; 3075 3076 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3077 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3078 return (IOC_INVAL); 3079 } 3080 3081 return (IOC_REPLY); 3082 } 3083 3084 /* 3085 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 3086 */ 3087 static boolean_t 3088 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 3089 { 3090 if (mode == ixgbe->loopback_mode) 3091 return (B_TRUE); 3092 3093 ixgbe->loopback_mode = mode; 3094 3095 if (mode == IXGBE_LB_NONE) { 3096 /* 3097 * Reset the chip 3098 */ 3099 (void) ixgbe_reset(ixgbe); 3100 return (B_TRUE); 3101 } 3102 3103 mutex_enter(&ixgbe->gen_lock); 3104 3105 switch (mode) { 3106 default: 3107 mutex_exit(&ixgbe->gen_lock); 3108 return (B_FALSE); 3109 3110 case IXGBE_LB_EXTERNAL: 3111 break; 3112 3113 case IXGBE_LB_INTERNAL_MAC: 3114 ixgbe_set_internal_mac_loopback(ixgbe); 3115 break; 3116 } 3117 3118 mutex_exit(&ixgbe->gen_lock); 3119 3120 return (B_TRUE); 3121 } 3122 3123 /* 3124 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 3125 */ 3126 static void 3127 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 3128 { 3129 struct ixgbe_hw *hw; 3130 uint32_t reg; 3131 uint8_t atlas; 3132 3133 hw = &ixgbe->hw; 3134 3135 /* 3136 * Setup MAC loopback 3137 */ 3138 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 3139 reg |= IXGBE_HLREG0_LPBK; 3140 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 3141 3142 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 3143 reg &= ~IXGBE_AUTOC_LMS_MASK; 3144 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 3145 3146 /* 3147 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 3148 */ 3149 if (hw->mac.type == ixgbe_mac_82598EB) { 3150 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 3151 &atlas); 3152 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 3153 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 3154 atlas); 3155 3156 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 3157 &atlas); 3158 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 3159 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 3160 atlas); 3161 3162 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 3163 &atlas); 3164 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 3165 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 3166 atlas); 3167 3168 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 3169 &atlas); 3170 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 3171 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 3172 atlas); 3173 } 3174 } 3175 3176 #pragma inline(ixgbe_intr_rx_work) 3177 /* 3178 * ixgbe_intr_rx_work - RX processing of ISR. 3179 */ 3180 static void 3181 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 3182 { 3183 mblk_t *mp; 3184 3185 mutex_enter(&rx_ring->rx_lock); 3186 3187 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 3188 mutex_exit(&rx_ring->rx_lock); 3189 3190 if (mp != NULL) 3191 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 3192 rx_ring->ring_gen_num); 3193 } 3194 3195 #pragma inline(ixgbe_intr_tx_work) 3196 /* 3197 * ixgbe_intr_tx_work - TX processing of ISR. 3198 */ 3199 static void 3200 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 3201 { 3202 ixgbe_t *ixgbe = tx_ring->ixgbe; 3203 3204 /* 3205 * Recycle the tx descriptors 3206 */ 3207 tx_ring->tx_recycle(tx_ring); 3208 3209 /* 3210 * Schedule the re-transmit 3211 */ 3212 if (tx_ring->reschedule && 3213 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 3214 tx_ring->reschedule = B_FALSE; 3215 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 3216 tx_ring->ring_handle); 3217 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 3218 } 3219 } 3220 3221 #pragma inline(ixgbe_intr_other_work) 3222 /* 3223 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 3224 */ 3225 static void 3226 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 3227 { 3228 struct ixgbe_hw *hw = &ixgbe->hw; 3229 /* 3230 * dispatch taskq to handle link status change 3231 */ 3232 if (eicr & IXGBE_EICR_LSC) { 3233 if ((ddi_taskq_dispatch(ixgbe->lsc_taskq, 3234 ixgbe_driver_link_check, (void *)ixgbe, DDI_NOSLEEP)) 3235 != DDI_SUCCESS) { 3236 ixgbe_log(ixgbe, "Fail to dispatch taskq"); 3237 } 3238 } 3239 3240 /* 3241 * check for fan failure on adapters with fans 3242 */ 3243 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 3244 (eicr & IXGBE_EICR_GPI_SDP1)) { 3245 if (hw->mac.type < ixgbe_mac_82599EB) { 3246 ixgbe_log(ixgbe, 3247 "Fan has stopped, replace the adapter\n"); 3248 3249 /* re-enable the interrupt, which was automasked */ 3250 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 3251 } 3252 } 3253 3254 /* 3255 * Do SFP check for 82599 3256 */ 3257 if (hw->mac.type == ixgbe_mac_82599EB) { 3258 if ((ddi_taskq_dispatch(ixgbe->lsc_taskq, 3259 ixgbe_sfp_check, (void *)ixgbe, 3260 DDI_NOSLEEP)) != DDI_SUCCESS) { 3261 ixgbe_log(ixgbe, "No memory available to dispatch " 3262 "taskq for SFP check"); 3263 } 3264 } 3265 } 3266 3267 /* 3268 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 3269 */ 3270 static uint_t 3271 ixgbe_intr_legacy(void *arg1, void *arg2) 3272 { 3273 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3274 struct ixgbe_hw *hw = &ixgbe->hw; 3275 ixgbe_tx_ring_t *tx_ring; 3276 ixgbe_rx_ring_t *rx_ring; 3277 uint32_t eicr; 3278 mblk_t *mp; 3279 boolean_t tx_reschedule; 3280 uint_t result; 3281 3282 _NOTE(ARGUNUSED(arg2)); 3283 3284 mutex_enter(&ixgbe->gen_lock); 3285 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 3286 mutex_exit(&ixgbe->gen_lock); 3287 return (DDI_INTR_UNCLAIMED); 3288 } 3289 3290 mp = NULL; 3291 tx_reschedule = B_FALSE; 3292 3293 /* 3294 * Any bit set in eicr: claim this interrupt 3295 */ 3296 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3297 if (eicr) { 3298 /* 3299 * For legacy interrupt, we have only one interrupt, 3300 * so we have only one rx ring and one tx ring enabled. 3301 */ 3302 ASSERT(ixgbe->num_rx_rings == 1); 3303 ASSERT(ixgbe->num_tx_rings == 1); 3304 3305 /* 3306 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 3307 */ 3308 if (eicr & 0x1) { 3309 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 3310 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 3311 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 3312 /* 3313 * Clean the rx descriptors 3314 */ 3315 rx_ring = &ixgbe->rx_rings[0]; 3316 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 3317 } 3318 3319 /* 3320 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 3321 */ 3322 if (eicr & 0x2) { 3323 /* 3324 * Recycle the tx descriptors 3325 */ 3326 tx_ring = &ixgbe->tx_rings[0]; 3327 tx_ring->tx_recycle(tx_ring); 3328 3329 /* 3330 * Schedule the re-transmit 3331 */ 3332 tx_reschedule = (tx_ring->reschedule && 3333 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 3334 } 3335 3336 /* any interrupt type other than tx/rx */ 3337 if (eicr & ixgbe->capab->other_intr) { 3338 if (hw->mac.type < ixgbe_mac_82599EB) { 3339 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 3340 } 3341 if (hw->mac.type == ixgbe_mac_82599EB) { 3342 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 3343 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 3344 } 3345 ixgbe_intr_other_work(ixgbe, eicr); 3346 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 3347 } 3348 3349 mutex_exit(&ixgbe->gen_lock); 3350 3351 result = DDI_INTR_CLAIMED; 3352 } else { 3353 mutex_exit(&ixgbe->gen_lock); 3354 3355 /* 3356 * No interrupt cause bits set: don't claim this interrupt. 3357 */ 3358 result = DDI_INTR_UNCLAIMED; 3359 } 3360 3361 /* re-enable the interrupts which were automasked */ 3362 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3363 3364 /* 3365 * Do the following work outside of the gen_lock 3366 */ 3367 if (mp != NULL) { 3368 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 3369 rx_ring->ring_gen_num); 3370 } 3371 3372 if (tx_reschedule) { 3373 tx_ring->reschedule = B_FALSE; 3374 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 3375 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 3376 } 3377 3378 return (result); 3379 } 3380 3381 /* 3382 * ixgbe_intr_msi - Interrupt handler for MSI. 3383 */ 3384 static uint_t 3385 ixgbe_intr_msi(void *arg1, void *arg2) 3386 { 3387 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3388 struct ixgbe_hw *hw = &ixgbe->hw; 3389 uint32_t eicr; 3390 3391 _NOTE(ARGUNUSED(arg2)); 3392 3393 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3394 3395 /* 3396 * For MSI interrupt, we have only one vector, 3397 * so we have only one rx ring and one tx ring enabled. 3398 */ 3399 ASSERT(ixgbe->num_rx_rings == 1); 3400 ASSERT(ixgbe->num_tx_rings == 1); 3401 3402 /* 3403 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 3404 */ 3405 if (eicr & 0x1) { 3406 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 3407 } 3408 3409 /* 3410 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 3411 */ 3412 if (eicr & 0x2) { 3413 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 3414 } 3415 3416 /* any interrupt type other than tx/rx */ 3417 if (eicr & ixgbe->capab->other_intr) { 3418 mutex_enter(&ixgbe->gen_lock); 3419 if (hw->mac.type < ixgbe_mac_82599EB) { 3420 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 3421 } 3422 if (hw->mac.type == ixgbe_mac_82599EB) { 3423 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 3424 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 3425 } 3426 ixgbe_intr_other_work(ixgbe, eicr); 3427 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 3428 mutex_exit(&ixgbe->gen_lock); 3429 } 3430 3431 /* re-enable the interrupts which were automasked */ 3432 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3433 3434 return (DDI_INTR_CLAIMED); 3435 } 3436 3437 /* 3438 * ixgbe_intr_msix - Interrupt handler for MSI-X. 3439 */ 3440 static uint_t 3441 ixgbe_intr_msix(void *arg1, void *arg2) 3442 { 3443 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 3444 ixgbe_t *ixgbe = vect->ixgbe; 3445 struct ixgbe_hw *hw = &ixgbe->hw; 3446 uint32_t eicr; 3447 int r_idx = 0; 3448 3449 _NOTE(ARGUNUSED(arg2)); 3450 3451 /* 3452 * Clean each rx ring that has its bit set in the map 3453 */ 3454 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 3455 while (r_idx >= 0) { 3456 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 3457 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 3458 (ixgbe->num_rx_rings - 1)); 3459 } 3460 3461 /* 3462 * Clean each tx ring that has its bit set in the map 3463 */ 3464 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 3465 while (r_idx >= 0) { 3466 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 3467 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 3468 (ixgbe->num_tx_rings - 1)); 3469 } 3470 3471 3472 /* 3473 * Clean other interrupt (link change) that has its bit set in the map 3474 */ 3475 if (BT_TEST(vect->other_map, 0) == 1) { 3476 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3477 3478 /* 3479 * Need check cause bits and only other causes will 3480 * be processed 3481 */ 3482 /* any interrupt type other than tx/rx */ 3483 if (eicr & ixgbe->capab->other_intr) { 3484 if (hw->mac.type < ixgbe_mac_82599EB) { 3485 mutex_enter(&ixgbe->gen_lock); 3486 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 3487 ixgbe_intr_other_work(ixgbe, eicr); 3488 mutex_exit(&ixgbe->gen_lock); 3489 } else { 3490 if (hw->mac.type == ixgbe_mac_82599EB) { 3491 mutex_enter(&ixgbe->gen_lock); 3492 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 3493 ixgbe_intr_other_work(ixgbe, eicr); 3494 mutex_exit(&ixgbe->gen_lock); 3495 } 3496 } 3497 } 3498 3499 /* re-enable the interrupts which were automasked */ 3500 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3501 } 3502 3503 return (DDI_INTR_CLAIMED); 3504 } 3505 3506 /* 3507 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 3508 * 3509 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 3510 * if not successful, try Legacy. 3511 * ixgbe->intr_force can be used to force sequence to start with 3512 * any of the 3 types. 3513 * If MSI-X is not used, number of tx/rx rings is forced to 1. 3514 */ 3515 static int 3516 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 3517 { 3518 dev_info_t *devinfo; 3519 int intr_types; 3520 int rc; 3521 3522 devinfo = ixgbe->dip; 3523 3524 /* 3525 * Get supported interrupt types 3526 */ 3527 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 3528 3529 if (rc != DDI_SUCCESS) { 3530 ixgbe_log(ixgbe, 3531 "Get supported interrupt types failed: %d", rc); 3532 return (IXGBE_FAILURE); 3533 } 3534 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 3535 3536 ixgbe->intr_type = 0; 3537 3538 /* 3539 * Install MSI-X interrupts 3540 */ 3541 if ((intr_types & DDI_INTR_TYPE_MSIX) && 3542 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 3543 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 3544 if (rc == IXGBE_SUCCESS) 3545 return (IXGBE_SUCCESS); 3546 3547 ixgbe_log(ixgbe, 3548 "Allocate MSI-X failed, trying MSI interrupts..."); 3549 } 3550 3551 /* 3552 * MSI-X not used, force rings and groups to 1 3553 */ 3554 ixgbe->num_rx_rings = 1; 3555 ixgbe->num_rx_groups = 1; 3556 ixgbe->num_tx_rings = 1; 3557 ixgbe_log(ixgbe, 3558 "MSI-X not used, force rings and groups number to 1"); 3559 3560 /* 3561 * Install MSI interrupts 3562 */ 3563 if ((intr_types & DDI_INTR_TYPE_MSI) && 3564 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 3565 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 3566 if (rc == IXGBE_SUCCESS) 3567 return (IXGBE_SUCCESS); 3568 3569 ixgbe_log(ixgbe, 3570 "Allocate MSI failed, trying Legacy interrupts..."); 3571 } 3572 3573 /* 3574 * Install legacy interrupts 3575 */ 3576 if (intr_types & DDI_INTR_TYPE_FIXED) { 3577 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 3578 if (rc == IXGBE_SUCCESS) 3579 return (IXGBE_SUCCESS); 3580 3581 ixgbe_log(ixgbe, 3582 "Allocate Legacy interrupts failed"); 3583 } 3584 3585 /* 3586 * If none of the 3 types succeeded, return failure 3587 */ 3588 return (IXGBE_FAILURE); 3589 } 3590 3591 /* 3592 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 3593 * 3594 * For legacy and MSI, only 1 handle is needed. For MSI-X, 3595 * if fewer than 2 handles are available, return failure. 3596 * Upon success, this maps the vectors to rx and tx rings for 3597 * interrupts. 3598 */ 3599 static int 3600 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 3601 { 3602 dev_info_t *devinfo; 3603 int request, count, avail, actual; 3604 int minimum; 3605 int rc; 3606 3607 devinfo = ixgbe->dip; 3608 3609 switch (intr_type) { 3610 case DDI_INTR_TYPE_FIXED: 3611 request = 1; /* Request 1 legacy interrupt handle */ 3612 minimum = 1; 3613 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 3614 break; 3615 3616 case DDI_INTR_TYPE_MSI: 3617 request = 1; /* Request 1 MSI interrupt handle */ 3618 minimum = 1; 3619 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 3620 break; 3621 3622 case DDI_INTR_TYPE_MSIX: 3623 /* 3624 * Best number of vectors for the adapter is 3625 * # rx rings + # tx rings. 3626 */ 3627 request = ixgbe->num_rx_rings + ixgbe->num_tx_rings; 3628 if (request > ixgbe->capab->max_ring_vect) 3629 request = ixgbe->capab->max_ring_vect; 3630 minimum = 2; 3631 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 3632 break; 3633 3634 default: 3635 ixgbe_log(ixgbe, 3636 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 3637 intr_type); 3638 return (IXGBE_FAILURE); 3639 } 3640 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 3641 request, minimum); 3642 3643 /* 3644 * Get number of supported interrupts 3645 */ 3646 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 3647 if ((rc != DDI_SUCCESS) || (count < minimum)) { 3648 ixgbe_log(ixgbe, 3649 "Get interrupt number failed. Return: %d, count: %d", 3650 rc, count); 3651 return (IXGBE_FAILURE); 3652 } 3653 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 3654 3655 /* 3656 * Get number of available interrupts 3657 */ 3658 rc = ddi_intr_get_navail(devinfo, intr_type, &avail); 3659 if ((rc != DDI_SUCCESS) || (avail < minimum)) { 3660 ixgbe_log(ixgbe, 3661 "Get interrupt available number failed. " 3662 "Return: %d, available: %d", rc, avail); 3663 return (IXGBE_FAILURE); 3664 } 3665 IXGBE_DEBUGLOG_1(ixgbe, "interrupts available: %d", avail); 3666 3667 if (avail < request) { 3668 ixgbe_log(ixgbe, "Request %d handles, %d available", 3669 request, avail); 3670 request = avail; 3671 } 3672 3673 actual = 0; 3674 ixgbe->intr_cnt = 0; 3675 3676 /* 3677 * Allocate an array of interrupt handles 3678 */ 3679 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 3680 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 3681 3682 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 3683 request, &actual, DDI_INTR_ALLOC_NORMAL); 3684 if (rc != DDI_SUCCESS) { 3685 ixgbe_log(ixgbe, "Allocate interrupts failed. " 3686 "return: %d, request: %d, actual: %d", 3687 rc, request, actual); 3688 goto alloc_handle_fail; 3689 } 3690 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 3691 3692 ixgbe->intr_cnt = actual; 3693 3694 /* 3695 * Now we know the actual number of vectors. Here we map the vector 3696 * to other, rx rings and tx ring. 3697 */ 3698 if (actual < minimum) { 3699 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 3700 actual); 3701 goto alloc_handle_fail; 3702 } 3703 3704 /* 3705 * Get priority for first vector, assume remaining are all the same 3706 */ 3707 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 3708 if (rc != DDI_SUCCESS) { 3709 ixgbe_log(ixgbe, 3710 "Get interrupt priority failed: %d", rc); 3711 goto alloc_handle_fail; 3712 } 3713 3714 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 3715 if (rc != DDI_SUCCESS) { 3716 ixgbe_log(ixgbe, 3717 "Get interrupt cap failed: %d", rc); 3718 goto alloc_handle_fail; 3719 } 3720 3721 ixgbe->intr_type = intr_type; 3722 3723 return (IXGBE_SUCCESS); 3724 3725 alloc_handle_fail: 3726 ixgbe_rem_intrs(ixgbe); 3727 3728 return (IXGBE_FAILURE); 3729 } 3730 3731 /* 3732 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 3733 * 3734 * Before adding the interrupt handlers, the interrupt vectors have 3735 * been allocated, and the rx/tx rings have also been allocated. 3736 */ 3737 static int 3738 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 3739 { 3740 int vector = 0; 3741 int rc; 3742 3743 switch (ixgbe->intr_type) { 3744 case DDI_INTR_TYPE_MSIX: 3745 /* 3746 * Add interrupt handler for all vectors 3747 */ 3748 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 3749 /* 3750 * install pointer to vect_map[vector] 3751 */ 3752 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3753 (ddi_intr_handler_t *)ixgbe_intr_msix, 3754 (void *)&ixgbe->vect_map[vector], NULL); 3755 3756 if (rc != DDI_SUCCESS) { 3757 ixgbe_log(ixgbe, 3758 "Add rx interrupt handler failed. " 3759 "return: %d, vector: %d", rc, vector); 3760 for (vector--; vector >= 0; vector--) { 3761 (void) ddi_intr_remove_handler( 3762 ixgbe->htable[vector]); 3763 } 3764 return (IXGBE_FAILURE); 3765 } 3766 } 3767 3768 break; 3769 3770 case DDI_INTR_TYPE_MSI: 3771 /* 3772 * Add interrupt handlers for the only vector 3773 */ 3774 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3775 (ddi_intr_handler_t *)ixgbe_intr_msi, 3776 (void *)ixgbe, NULL); 3777 3778 if (rc != DDI_SUCCESS) { 3779 ixgbe_log(ixgbe, 3780 "Add MSI interrupt handler failed: %d", rc); 3781 return (IXGBE_FAILURE); 3782 } 3783 3784 break; 3785 3786 case DDI_INTR_TYPE_FIXED: 3787 /* 3788 * Add interrupt handlers for the only vector 3789 */ 3790 rc = ddi_intr_add_handler(ixgbe->htable[vector], 3791 (ddi_intr_handler_t *)ixgbe_intr_legacy, 3792 (void *)ixgbe, NULL); 3793 3794 if (rc != DDI_SUCCESS) { 3795 ixgbe_log(ixgbe, 3796 "Add legacy interrupt handler failed: %d", rc); 3797 return (IXGBE_FAILURE); 3798 } 3799 3800 break; 3801 3802 default: 3803 return (IXGBE_FAILURE); 3804 } 3805 3806 return (IXGBE_SUCCESS); 3807 } 3808 3809 #pragma inline(ixgbe_map_rxring_to_vector) 3810 /* 3811 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 3812 */ 3813 static void 3814 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 3815 { 3816 /* 3817 * Set bit in map 3818 */ 3819 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 3820 3821 /* 3822 * Count bits set 3823 */ 3824 ixgbe->vect_map[v_idx].rxr_cnt++; 3825 3826 /* 3827 * Remember bit position 3828 */ 3829 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 3830 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 3831 } 3832 3833 #pragma inline(ixgbe_map_txring_to_vector) 3834 /* 3835 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 3836 */ 3837 static void 3838 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 3839 { 3840 /* 3841 * Set bit in map 3842 */ 3843 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 3844 3845 /* 3846 * Count bits set 3847 */ 3848 ixgbe->vect_map[v_idx].txr_cnt++; 3849 3850 /* 3851 * Remember bit position 3852 */ 3853 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 3854 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 3855 } 3856 3857 /* 3858 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 3859 * allocation register (IVAR). 3860 * cause: 3861 * -1 : other cause 3862 * 0 : rx 3863 * 1 : tx 3864 */ 3865 static void 3866 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 3867 int8_t cause) 3868 { 3869 struct ixgbe_hw *hw = &ixgbe->hw; 3870 u32 ivar, index; 3871 3872 switch (hw->mac.type) { 3873 case ixgbe_mac_82598EB: 3874 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 3875 if (cause == -1) { 3876 cause = 0; 3877 } 3878 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 3879 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3880 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 3881 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 3882 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 3883 break; 3884 case ixgbe_mac_82599EB: 3885 if (cause == -1) { 3886 /* other causes */ 3887 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 3888 index = (intr_alloc_entry & 1) * 8; 3889 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3890 ivar &= ~(0xFF << index); 3891 ivar |= (msix_vector << index); 3892 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3893 } else { 3894 /* tx or rx causes */ 3895 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 3896 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 3897 ivar = IXGBE_READ_REG(hw, 3898 IXGBE_IVAR(intr_alloc_entry >> 1)); 3899 ivar &= ~(0xFF << index); 3900 ivar |= (msix_vector << index); 3901 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 3902 ivar); 3903 } 3904 break; 3905 default: 3906 break; 3907 } 3908 } 3909 3910 /* 3911 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 3912 * given interrupt vector allocation register (IVAR). 3913 * cause: 3914 * -1 : other cause 3915 * 0 : rx 3916 * 1 : tx 3917 */ 3918 static void 3919 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 3920 { 3921 struct ixgbe_hw *hw = &ixgbe->hw; 3922 u32 ivar, index; 3923 3924 switch (hw->mac.type) { 3925 case ixgbe_mac_82598EB: 3926 if (cause == -1) { 3927 cause = 0; 3928 } 3929 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 3930 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3931 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 3932 (intr_alloc_entry & 0x3))); 3933 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 3934 break; 3935 case ixgbe_mac_82599EB: 3936 if (cause == -1) { 3937 /* other causes */ 3938 index = (intr_alloc_entry & 1) * 8; 3939 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3940 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 3941 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3942 } else { 3943 /* tx or rx causes */ 3944 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 3945 ivar = IXGBE_READ_REG(hw, 3946 IXGBE_IVAR(intr_alloc_entry >> 1)); 3947 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 3948 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 3949 ivar); 3950 } 3951 break; 3952 default: 3953 break; 3954 } 3955 } 3956 3957 /* 3958 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 3959 * given interrupt vector allocation register (IVAR). 3960 * cause: 3961 * -1 : other cause 3962 * 0 : rx 3963 * 1 : tx 3964 */ 3965 static void 3966 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 3967 { 3968 struct ixgbe_hw *hw = &ixgbe->hw; 3969 u32 ivar, index; 3970 3971 switch (hw->mac.type) { 3972 case ixgbe_mac_82598EB: 3973 if (cause == -1) { 3974 cause = 0; 3975 } 3976 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 3977 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 3978 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 3979 (intr_alloc_entry & 0x3))); 3980 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 3981 break; 3982 case ixgbe_mac_82599EB: 3983 if (cause == -1) { 3984 /* other causes */ 3985 index = (intr_alloc_entry & 1) * 8; 3986 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 3987 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 3988 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 3989 } else { 3990 /* tx or rx causes */ 3991 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 3992 ivar = IXGBE_READ_REG(hw, 3993 IXGBE_IVAR(intr_alloc_entry >> 1)); 3994 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 3995 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 3996 ivar); 3997 } 3998 break; 3999 default: 4000 break; 4001 } 4002 } 4003 4004 /* 4005 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 4006 * 4007 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 4008 * to vector[0 - (intr_cnt -1)]. 4009 */ 4010 static int 4011 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 4012 { 4013 int i, vector = 0; 4014 4015 /* initialize vector map */ 4016 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 4017 for (i = 0; i < ixgbe->intr_cnt; i++) { 4018 ixgbe->vect_map[i].ixgbe = ixgbe; 4019 } 4020 4021 /* 4022 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 4023 * tx rings[0] on RTxQ[1]. 4024 */ 4025 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 4026 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 4027 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 4028 return (IXGBE_SUCCESS); 4029 } 4030 4031 /* 4032 * Interrupts/vectors mapping for MSI-X 4033 */ 4034 4035 /* 4036 * Map other interrupt to vector 0, 4037 * Set bit in map and count the bits set. 4038 */ 4039 BT_SET(ixgbe->vect_map[vector].other_map, 0); 4040 ixgbe->vect_map[vector].other_cnt++; 4041 vector++; 4042 4043 /* 4044 * Map rx ring interrupts to vectors 4045 */ 4046 for (i = 0; i < ixgbe->num_rx_rings; i++) { 4047 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 4048 vector = (vector +1) % ixgbe->intr_cnt; 4049 } 4050 4051 /* 4052 * Map tx ring interrupts to vectors 4053 */ 4054 for (i = 0; i < ixgbe->num_tx_rings; i++) { 4055 ixgbe_map_txring_to_vector(ixgbe, i, vector); 4056 vector = (vector +1) % ixgbe->intr_cnt; 4057 } 4058 4059 return (IXGBE_SUCCESS); 4060 } 4061 4062 /* 4063 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 4064 * 4065 * This relies on ring/vector mapping already set up in the 4066 * vect_map[] structures 4067 */ 4068 static void 4069 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 4070 { 4071 struct ixgbe_hw *hw = &ixgbe->hw; 4072 ixgbe_intr_vector_t *vect; /* vector bitmap */ 4073 int r_idx; /* ring index */ 4074 int v_idx; /* vector index */ 4075 4076 /* 4077 * Clear any previous entries 4078 */ 4079 switch (hw->mac.type) { 4080 case ixgbe_mac_82598EB: 4081 for (v_idx = 0; v_idx < 25; v_idx++) 4082 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 4083 4084 break; 4085 case ixgbe_mac_82599EB: 4086 for (v_idx = 0; v_idx < 64; v_idx++) 4087 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 4088 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 4089 4090 break; 4091 default: 4092 break; 4093 } 4094 4095 /* 4096 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 4097 * tx rings[0] will use RTxQ[1]. 4098 */ 4099 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 4100 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 4101 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 4102 return; 4103 } 4104 4105 /* 4106 * For MSI-X interrupt, "Other" is always on vector[0]. 4107 */ 4108 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 4109 4110 /* 4111 * For each interrupt vector, populate the IVAR table 4112 */ 4113 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 4114 vect = &ixgbe->vect_map[v_idx]; 4115 4116 /* 4117 * For each rx ring bit set 4118 */ 4119 r_idx = bt_getlowbit(vect->rx_map, 0, 4120 (ixgbe->num_rx_rings - 1)); 4121 4122 while (r_idx >= 0) { 4123 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 0); 4124 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4125 (ixgbe->num_rx_rings - 1)); 4126 } 4127 4128 /* 4129 * For each tx ring bit set 4130 */ 4131 r_idx = bt_getlowbit(vect->tx_map, 0, 4132 (ixgbe->num_tx_rings - 1)); 4133 4134 while (r_idx >= 0) { 4135 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 4136 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4137 (ixgbe->num_tx_rings - 1)); 4138 } 4139 } 4140 } 4141 4142 /* 4143 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 4144 */ 4145 static void 4146 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 4147 { 4148 int i; 4149 int rc; 4150 4151 for (i = 0; i < ixgbe->intr_cnt; i++) { 4152 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 4153 if (rc != DDI_SUCCESS) { 4154 IXGBE_DEBUGLOG_1(ixgbe, 4155 "Remove intr handler failed: %d", rc); 4156 } 4157 } 4158 } 4159 4160 /* 4161 * ixgbe_rem_intrs - Remove the allocated interrupts. 4162 */ 4163 static void 4164 ixgbe_rem_intrs(ixgbe_t *ixgbe) 4165 { 4166 int i; 4167 int rc; 4168 4169 for (i = 0; i < ixgbe->intr_cnt; i++) { 4170 rc = ddi_intr_free(ixgbe->htable[i]); 4171 if (rc != DDI_SUCCESS) { 4172 IXGBE_DEBUGLOG_1(ixgbe, 4173 "Free intr failed: %d", rc); 4174 } 4175 } 4176 4177 kmem_free(ixgbe->htable, ixgbe->intr_size); 4178 ixgbe->htable = NULL; 4179 } 4180 4181 /* 4182 * ixgbe_enable_intrs - Enable all the ddi interrupts. 4183 */ 4184 static int 4185 ixgbe_enable_intrs(ixgbe_t *ixgbe) 4186 { 4187 int i; 4188 int rc; 4189 4190 /* 4191 * Enable interrupts 4192 */ 4193 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 4194 /* 4195 * Call ddi_intr_block_enable() for MSI 4196 */ 4197 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 4198 if (rc != DDI_SUCCESS) { 4199 ixgbe_log(ixgbe, 4200 "Enable block intr failed: %d", rc); 4201 return (IXGBE_FAILURE); 4202 } 4203 } else { 4204 /* 4205 * Call ddi_intr_enable() for Legacy/MSI non block enable 4206 */ 4207 for (i = 0; i < ixgbe->intr_cnt; i++) { 4208 rc = ddi_intr_enable(ixgbe->htable[i]); 4209 if (rc != DDI_SUCCESS) { 4210 ixgbe_log(ixgbe, 4211 "Enable intr failed: %d", rc); 4212 return (IXGBE_FAILURE); 4213 } 4214 } 4215 } 4216 4217 return (IXGBE_SUCCESS); 4218 } 4219 4220 /* 4221 * ixgbe_disable_intrs - Disable all the interrupts. 4222 */ 4223 static int 4224 ixgbe_disable_intrs(ixgbe_t *ixgbe) 4225 { 4226 int i; 4227 int rc; 4228 4229 /* 4230 * Disable all interrupts 4231 */ 4232 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 4233 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 4234 if (rc != DDI_SUCCESS) { 4235 ixgbe_log(ixgbe, 4236 "Disable block intr failed: %d", rc); 4237 return (IXGBE_FAILURE); 4238 } 4239 } else { 4240 for (i = 0; i < ixgbe->intr_cnt; i++) { 4241 rc = ddi_intr_disable(ixgbe->htable[i]); 4242 if (rc != DDI_SUCCESS) { 4243 ixgbe_log(ixgbe, 4244 "Disable intr failed: %d", rc); 4245 return (IXGBE_FAILURE); 4246 } 4247 } 4248 } 4249 4250 return (IXGBE_SUCCESS); 4251 } 4252 4253 /* 4254 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 4255 */ 4256 static void 4257 ixgbe_get_hw_state(ixgbe_t *ixgbe) 4258 { 4259 struct ixgbe_hw *hw = &ixgbe->hw; 4260 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 4261 boolean_t link_up = B_FALSE; 4262 uint32_t pcs1g_anlp = 0; 4263 uint32_t pcs1g_ana = 0; 4264 4265 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4266 ixgbe->param_lp_1000fdx_cap = 0; 4267 ixgbe->param_lp_100fdx_cap = 0; 4268 4269 /* check for link, don't wait */ 4270 (void) ixgbe_check_link(hw, &speed, &link_up, false); 4271 if (link_up) { 4272 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 4273 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 4274 4275 ixgbe->param_lp_1000fdx_cap = 4276 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 4277 ixgbe->param_lp_100fdx_cap = 4278 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 4279 } 4280 4281 ixgbe->param_adv_1000fdx_cap = 4282 (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 4283 ixgbe->param_adv_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 4284 } 4285 4286 /* 4287 * ixgbe_get_driver_control - Notify that driver is in control of device. 4288 */ 4289 static void 4290 ixgbe_get_driver_control(struct ixgbe_hw *hw) 4291 { 4292 uint32_t ctrl_ext; 4293 4294 /* 4295 * Notify firmware that driver is in control of device 4296 */ 4297 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 4298 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 4299 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 4300 } 4301 4302 /* 4303 * ixgbe_release_driver_control - Notify that driver is no longer in control 4304 * of device. 4305 */ 4306 static void 4307 ixgbe_release_driver_control(struct ixgbe_hw *hw) 4308 { 4309 uint32_t ctrl_ext; 4310 4311 /* 4312 * Notify firmware that driver is no longer in control of device 4313 */ 4314 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 4315 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 4316 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 4317 } 4318 4319 /* 4320 * ixgbe_atomic_reserve - Atomic decrease operation. 4321 */ 4322 int 4323 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 4324 { 4325 uint32_t oldval; 4326 uint32_t newval; 4327 4328 /* 4329 * ATOMICALLY 4330 */ 4331 do { 4332 oldval = *count_p; 4333 if (oldval < n) 4334 return (-1); 4335 newval = oldval - n; 4336 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 4337 4338 return (newval); 4339 } 4340 4341 /* 4342 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 4343 */ 4344 static uint8_t * 4345 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 4346 { 4347 uint8_t *addr = *upd_ptr; 4348 uint8_t *new_ptr; 4349 4350 _NOTE(ARGUNUSED(hw)); 4351 _NOTE(ARGUNUSED(vmdq)); 4352 4353 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 4354 *upd_ptr = new_ptr; 4355 return (addr); 4356 } 4357 4358 /* 4359 * FMA support 4360 */ 4361 int 4362 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 4363 { 4364 ddi_fm_error_t de; 4365 4366 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 4367 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 4368 return (de.fme_status); 4369 } 4370 4371 int 4372 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 4373 { 4374 ddi_fm_error_t de; 4375 4376 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 4377 return (de.fme_status); 4378 } 4379 4380 /* 4381 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 4382 */ 4383 static int 4384 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 4385 { 4386 _NOTE(ARGUNUSED(impl_data)); 4387 /* 4388 * as the driver can always deal with an error in any dma or 4389 * access handle, we can just return the fme_status value. 4390 */ 4391 pci_ereport_post(dip, err, NULL); 4392 return (err->fme_status); 4393 } 4394 4395 static void 4396 ixgbe_fm_init(ixgbe_t *ixgbe) 4397 { 4398 ddi_iblock_cookie_t iblk; 4399 int fma_acc_flag, fma_dma_flag; 4400 4401 /* 4402 * Only register with IO Fault Services if we have some capability 4403 */ 4404 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 4405 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 4406 fma_acc_flag = 1; 4407 } else { 4408 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 4409 fma_acc_flag = 0; 4410 } 4411 4412 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 4413 fma_dma_flag = 1; 4414 } else { 4415 fma_dma_flag = 0; 4416 } 4417 4418 ixgbe_set_fma_flags(fma_acc_flag, fma_dma_flag); 4419 4420 if (ixgbe->fm_capabilities) { 4421 4422 /* 4423 * Register capabilities with IO Fault Services 4424 */ 4425 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 4426 4427 /* 4428 * Initialize pci ereport capabilities if ereport capable 4429 */ 4430 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 4431 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 4432 pci_ereport_setup(ixgbe->dip); 4433 4434 /* 4435 * Register error callback if error callback capable 4436 */ 4437 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 4438 ddi_fm_handler_register(ixgbe->dip, 4439 ixgbe_fm_error_cb, (void*) ixgbe); 4440 } 4441 } 4442 4443 static void 4444 ixgbe_fm_fini(ixgbe_t *ixgbe) 4445 { 4446 /* 4447 * Only unregister FMA capabilities if they are registered 4448 */ 4449 if (ixgbe->fm_capabilities) { 4450 4451 /* 4452 * Release any resources allocated by pci_ereport_setup() 4453 */ 4454 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 4455 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 4456 pci_ereport_teardown(ixgbe->dip); 4457 4458 /* 4459 * Un-register error callback if error callback capable 4460 */ 4461 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 4462 ddi_fm_handler_unregister(ixgbe->dip); 4463 4464 /* 4465 * Unregister from IO Fault Service 4466 */ 4467 ddi_fm_fini(ixgbe->dip); 4468 } 4469 } 4470 4471 void 4472 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 4473 { 4474 uint64_t ena; 4475 char buf[FM_MAX_CLASS]; 4476 4477 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 4478 ena = fm_ena_generate(0, FM_ENA_FMT1); 4479 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 4480 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 4481 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 4482 } 4483 } 4484 4485 static int 4486 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 4487 { 4488 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 4489 4490 mutex_enter(&rx_ring->rx_lock); 4491 rx_ring->ring_gen_num = mr_gen_num; 4492 mutex_exit(&rx_ring->rx_lock); 4493 return (0); 4494 } 4495 4496 /* 4497 * Callback funtion for MAC layer to register all rings. 4498 */ 4499 /* ARGSUSED */ 4500 void 4501 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index, 4502 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 4503 { 4504 ixgbe_t *ixgbe = (ixgbe_t *)arg; 4505 mac_intr_t *mintr = &infop->mri_intr; 4506 4507 switch (rtype) { 4508 case MAC_RING_TYPE_RX: { 4509 ASSERT(rg_index == 0); 4510 ASSERT(ring_index < ixgbe->num_rx_rings); 4511 4512 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[ring_index]; 4513 rx_ring->ring_handle = rh; 4514 4515 infop->mri_driver = (mac_ring_driver_t)rx_ring; 4516 infop->mri_start = ixgbe_ring_start; 4517 infop->mri_stop = NULL; 4518 infop->mri_poll = ixgbe_ring_rx_poll; 4519 4520 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 4521 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 4522 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 4523 4524 break; 4525 } 4526 case MAC_RING_TYPE_TX: { 4527 ASSERT(rg_index == -1); 4528 ASSERT(ring_index < ixgbe->num_tx_rings); 4529 4530 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 4531 tx_ring->ring_handle = rh; 4532 4533 infop->mri_driver = (mac_ring_driver_t)tx_ring; 4534 infop->mri_start = NULL; 4535 infop->mri_stop = NULL; 4536 infop->mri_tx = ixgbe_ring_tx; 4537 4538 break; 4539 } 4540 default: 4541 break; 4542 } 4543 } 4544 4545 /* 4546 * Callback funtion for MAC layer to register all groups. 4547 */ 4548 void 4549 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 4550 mac_group_info_t *infop, mac_group_handle_t gh) 4551 { 4552 ixgbe_t *ixgbe = (ixgbe_t *)arg; 4553 4554 switch (rtype) { 4555 case MAC_RING_TYPE_RX: { 4556 ixgbe_rx_group_t *rx_group; 4557 4558 rx_group = &ixgbe->rx_groups[index]; 4559 rx_group->group_handle = gh; 4560 4561 infop->mgi_driver = (mac_group_driver_t)rx_group; 4562 infop->mgi_start = NULL; 4563 infop->mgi_stop = NULL; 4564 infop->mgi_addmac = ixgbe_addmac; 4565 infop->mgi_remmac = ixgbe_remmac; 4566 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 4567 4568 break; 4569 } 4570 case MAC_RING_TYPE_TX: 4571 break; 4572 default: 4573 break; 4574 } 4575 } 4576 4577 /* 4578 * Enable interrupt on the specificed rx ring. 4579 */ 4580 int 4581 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 4582 { 4583 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 4584 ixgbe_t *ixgbe = rx_ring->ixgbe; 4585 int r_idx = rx_ring->index; 4586 int v_idx = rx_ring->intr_vector; 4587 4588 mutex_enter(&ixgbe->gen_lock); 4589 ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 0); 4590 4591 /* 4592 * To enable interrupt by setting the VAL bit of given interrupt 4593 * vector allocation register (IVAR). 4594 */ 4595 ixgbe_enable_ivar(ixgbe, r_idx, 0); 4596 4597 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 4598 4599 /* 4600 * To trigger a Rx interrupt to on this ring 4601 */ 4602 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 4603 IXGBE_WRITE_FLUSH(&ixgbe->hw); 4604 4605 mutex_exit(&ixgbe->gen_lock); 4606 4607 return (0); 4608 } 4609 4610 /* 4611 * Disable interrupt on the specificed rx ring. 4612 */ 4613 int 4614 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 4615 { 4616 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 4617 ixgbe_t *ixgbe = rx_ring->ixgbe; 4618 int r_idx = rx_ring->index; 4619 int v_idx = rx_ring->intr_vector; 4620 4621 mutex_enter(&ixgbe->gen_lock); 4622 ASSERT(BT_TEST(ixgbe->vect_map[v_idx].rx_map, r_idx) == 1); 4623 4624 /* 4625 * To disable interrupt by clearing the VAL bit of given interrupt 4626 * vector allocation register (IVAR). 4627 */ 4628 ixgbe_disable_ivar(ixgbe, r_idx, 0); 4629 4630 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 4631 4632 mutex_exit(&ixgbe->gen_lock); 4633 4634 return (0); 4635 } 4636 4637 /* 4638 * Add a mac address. 4639 */ 4640 static int 4641 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 4642 { 4643 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 4644 ixgbe_t *ixgbe = rx_group->ixgbe; 4645 int slot; 4646 int err; 4647 4648 mutex_enter(&ixgbe->gen_lock); 4649 4650 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4651 mutex_exit(&ixgbe->gen_lock); 4652 return (ECANCELED); 4653 } 4654 4655 if (ixgbe->unicst_avail == 0) { 4656 /* no slots available */ 4657 mutex_exit(&ixgbe->gen_lock); 4658 return (ENOSPC); 4659 } 4660 4661 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 4662 if (ixgbe->unicst_addr[slot].mac.set == 0) 4663 break; 4664 } 4665 4666 ASSERT((slot >= 0) && (slot < ixgbe->unicst_total)); 4667 4668 if ((err = ixgbe_unicst_set(ixgbe, mac_addr, slot)) == 0) { 4669 ixgbe->unicst_addr[slot].mac.set = 1; 4670 ixgbe->unicst_avail--; 4671 } 4672 4673 mutex_exit(&ixgbe->gen_lock); 4674 4675 return (err); 4676 } 4677 4678 /* 4679 * Remove a mac address. 4680 */ 4681 static int 4682 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 4683 { 4684 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 4685 ixgbe_t *ixgbe = rx_group->ixgbe; 4686 int slot; 4687 int err; 4688 4689 mutex_enter(&ixgbe->gen_lock); 4690 4691 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4692 mutex_exit(&ixgbe->gen_lock); 4693 return (ECANCELED); 4694 } 4695 4696 slot = ixgbe_unicst_find(ixgbe, mac_addr); 4697 if (slot == -1) { 4698 mutex_exit(&ixgbe->gen_lock); 4699 return (EINVAL); 4700 } 4701 4702 if (ixgbe->unicst_addr[slot].mac.set == 0) { 4703 mutex_exit(&ixgbe->gen_lock); 4704 return (EINVAL); 4705 } 4706 4707 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 4708 if ((err = ixgbe_unicst_set(ixgbe, 4709 ixgbe->unicst_addr[slot].mac.addr, slot)) == 0) { 4710 ixgbe->unicst_addr[slot].mac.set = 0; 4711 ixgbe->unicst_avail++; 4712 } 4713 4714 mutex_exit(&ixgbe->gen_lock); 4715 4716 return (err); 4717 } 4718