1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 25 * Use is subject to license terms. 26 */ 27 28 #include "ixgbe_sw.h" 29 30 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 31 static char ixgbe_version[] = "ixgbe 1.1.5"; 32 33 /* 34 * Local function protoypes 35 */ 36 static int ixgbe_register_mac(ixgbe_t *); 37 static int ixgbe_identify_hardware(ixgbe_t *); 38 static int ixgbe_regs_map(ixgbe_t *); 39 static void ixgbe_init_properties(ixgbe_t *); 40 static int ixgbe_init_driver_settings(ixgbe_t *); 41 static void ixgbe_init_locks(ixgbe_t *); 42 static void ixgbe_destroy_locks(ixgbe_t *); 43 static int ixgbe_init(ixgbe_t *); 44 static int ixgbe_chip_start(ixgbe_t *); 45 static void ixgbe_chip_stop(ixgbe_t *); 46 static int ixgbe_reset(ixgbe_t *); 47 static void ixgbe_tx_clean(ixgbe_t *); 48 static boolean_t ixgbe_tx_drain(ixgbe_t *); 49 static boolean_t ixgbe_rx_drain(ixgbe_t *); 50 static int ixgbe_alloc_rings(ixgbe_t *); 51 static void ixgbe_free_rings(ixgbe_t *); 52 static int ixgbe_alloc_rx_data(ixgbe_t *); 53 static void ixgbe_free_rx_data(ixgbe_t *); 54 static void ixgbe_setup_rings(ixgbe_t *); 55 static void ixgbe_setup_rx(ixgbe_t *); 56 static void ixgbe_setup_tx(ixgbe_t *); 57 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 58 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 59 static void ixgbe_setup_rss(ixgbe_t *); 60 static void ixgbe_setup_vmdq(ixgbe_t *); 61 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 62 static void ixgbe_init_unicst(ixgbe_t *); 63 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 64 static void ixgbe_setup_multicst(ixgbe_t *); 65 static void ixgbe_get_hw_state(ixgbe_t *); 66 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 67 static void ixgbe_get_conf(ixgbe_t *); 68 static void ixgbe_init_params(ixgbe_t *); 69 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 70 static void ixgbe_driver_link_check(ixgbe_t *); 71 static void ixgbe_sfp_check(void *); 72 static void ixgbe_link_timer(void *); 73 static void ixgbe_local_timer(void *); 74 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 75 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 76 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 77 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 78 static boolean_t is_valid_mac_addr(uint8_t *); 79 static boolean_t ixgbe_stall_check(ixgbe_t *); 80 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 81 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 82 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 83 static int ixgbe_alloc_intrs(ixgbe_t *); 84 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 85 static int ixgbe_add_intr_handlers(ixgbe_t *); 86 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 87 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 88 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 89 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 90 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 91 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 92 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 93 static void ixgbe_setup_adapter_vector(ixgbe_t *); 94 static void ixgbe_rem_intr_handlers(ixgbe_t *); 95 static void ixgbe_rem_intrs(ixgbe_t *); 96 static int ixgbe_enable_intrs(ixgbe_t *); 97 static int ixgbe_disable_intrs(ixgbe_t *); 98 static uint_t ixgbe_intr_legacy(void *, void *); 99 static uint_t ixgbe_intr_msi(void *, void *); 100 static uint_t ixgbe_intr_msix(void *, void *); 101 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 102 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 103 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 104 static void ixgbe_get_driver_control(struct ixgbe_hw *); 105 static int ixgbe_addmac(void *, const uint8_t *); 106 static int ixgbe_remmac(void *, const uint8_t *); 107 static void ixgbe_release_driver_control(struct ixgbe_hw *); 108 109 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 110 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 111 static int ixgbe_resume(dev_info_t *); 112 static int ixgbe_suspend(dev_info_t *); 113 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 114 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 115 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 116 static int ixgbe_intr_cb_register(ixgbe_t *); 117 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 118 119 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 120 const void *impl_data); 121 static void ixgbe_fm_init(ixgbe_t *); 122 static void ixgbe_fm_fini(ixgbe_t *); 123 124 char *ixgbe_priv_props[] = { 125 "_tx_copy_thresh", 126 "_tx_recycle_thresh", 127 "_tx_overload_thresh", 128 "_tx_resched_thresh", 129 "_rx_copy_thresh", 130 "_rx_limit_per_intr", 131 "_intr_throttling", 132 "_adv_pause_cap", 133 "_adv_asym_pause_cap", 134 NULL 135 }; 136 137 #define IXGBE_MAX_PRIV_PROPS \ 138 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 139 140 static struct cb_ops ixgbe_cb_ops = { 141 nulldev, /* cb_open */ 142 nulldev, /* cb_close */ 143 nodev, /* cb_strategy */ 144 nodev, /* cb_print */ 145 nodev, /* cb_dump */ 146 nodev, /* cb_read */ 147 nodev, /* cb_write */ 148 nodev, /* cb_ioctl */ 149 nodev, /* cb_devmap */ 150 nodev, /* cb_mmap */ 151 nodev, /* cb_segmap */ 152 nochpoll, /* cb_chpoll */ 153 ddi_prop_op, /* cb_prop_op */ 154 NULL, /* cb_stream */ 155 D_MP | D_HOTPLUG, /* cb_flag */ 156 CB_REV, /* cb_rev */ 157 nodev, /* cb_aread */ 158 nodev /* cb_awrite */ 159 }; 160 161 static struct dev_ops ixgbe_dev_ops = { 162 DEVO_REV, /* devo_rev */ 163 0, /* devo_refcnt */ 164 NULL, /* devo_getinfo */ 165 nulldev, /* devo_identify */ 166 nulldev, /* devo_probe */ 167 ixgbe_attach, /* devo_attach */ 168 ixgbe_detach, /* devo_detach */ 169 nodev, /* devo_reset */ 170 &ixgbe_cb_ops, /* devo_cb_ops */ 171 NULL, /* devo_bus_ops */ 172 ddi_power, /* devo_power */ 173 ddi_quiesce_not_supported, /* devo_quiesce */ 174 }; 175 176 static struct modldrv ixgbe_modldrv = { 177 &mod_driverops, /* Type of module. This one is a driver */ 178 ixgbe_ident, /* Discription string */ 179 &ixgbe_dev_ops /* driver ops */ 180 }; 181 182 static struct modlinkage ixgbe_modlinkage = { 183 MODREV_1, &ixgbe_modldrv, NULL 184 }; 185 186 /* 187 * Access attributes for register mapping 188 */ 189 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 190 DDI_DEVICE_ATTR_V1, 191 DDI_STRUCTURE_LE_ACC, 192 DDI_STRICTORDER_ACC, 193 DDI_FLAGERR_ACC 194 }; 195 196 /* 197 * Loopback property 198 */ 199 static lb_property_t lb_normal = { 200 normal, "normal", IXGBE_LB_NONE 201 }; 202 203 static lb_property_t lb_mac = { 204 internal, "MAC", IXGBE_LB_INTERNAL_MAC 205 }; 206 207 static lb_property_t lb_external = { 208 external, "External", IXGBE_LB_EXTERNAL 209 }; 210 211 #define IXGBE_M_CALLBACK_FLAGS \ 212 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 213 214 static mac_callbacks_t ixgbe_m_callbacks = { 215 IXGBE_M_CALLBACK_FLAGS, 216 ixgbe_m_stat, 217 ixgbe_m_start, 218 ixgbe_m_stop, 219 ixgbe_m_promisc, 220 ixgbe_m_multicst, 221 NULL, 222 NULL, 223 NULL, 224 ixgbe_m_ioctl, 225 ixgbe_m_getcapab, 226 NULL, 227 NULL, 228 ixgbe_m_setprop, 229 ixgbe_m_getprop, 230 ixgbe_m_propinfo 231 }; 232 233 /* 234 * Initialize capabilities of each supported adapter type 235 */ 236 static adapter_info_t ixgbe_82598eb_cap = { 237 64, /* maximum number of rx queues */ 238 1, /* minimum number of rx queues */ 239 64, /* default number of rx queues */ 240 16, /* maximum number of rx groups */ 241 1, /* minimum number of rx groups */ 242 1, /* default number of rx groups */ 243 32, /* maximum number of tx queues */ 244 1, /* minimum number of tx queues */ 245 8, /* default number of tx queues */ 246 16366, /* maximum MTU size */ 247 0xFFFF, /* maximum interrupt throttle rate */ 248 0, /* minimum interrupt throttle rate */ 249 200, /* default interrupt throttle rate */ 250 18, /* maximum total msix vectors */ 251 16, /* maximum number of ring vectors */ 252 2, /* maximum number of other vectors */ 253 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 254 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 255 | IXGBE_FLAG_RSS_CAPABLE 256 | IXGBE_FLAG_VMDQ_CAPABLE) 257 }; 258 259 static adapter_info_t ixgbe_82599eb_cap = { 260 128, /* maximum number of rx queues */ 261 1, /* minimum number of rx queues */ 262 128, /* default number of rx queues */ 263 64, /* maximum number of rx groups */ 264 1, /* minimum number of rx groups */ 265 1, /* default number of rx groups */ 266 128, /* maximum number of tx queues */ 267 1, /* minimum number of tx queues */ 268 8, /* default number of tx queues */ 269 15500, /* maximum MTU size */ 270 0xFF8, /* maximum interrupt throttle rate */ 271 0, /* minimum interrupt throttle rate */ 272 200, /* default interrupt throttle rate */ 273 64, /* maximum total msix vectors */ 274 16, /* maximum number of ring vectors */ 275 2, /* maximum number of other vectors */ 276 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 277 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 278 | IXGBE_FLAG_RSS_CAPABLE 279 | IXGBE_FLAG_VMDQ_CAPABLE 280 | IXGBE_FLAG_RSC_CAPABLE) 281 }; 282 283 /* 284 * Module Initialization Functions. 285 */ 286 287 int 288 _init(void) 289 { 290 int status; 291 292 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 293 294 status = mod_install(&ixgbe_modlinkage); 295 296 if (status != DDI_SUCCESS) { 297 mac_fini_ops(&ixgbe_dev_ops); 298 } 299 300 return (status); 301 } 302 303 int 304 _fini(void) 305 { 306 int status; 307 308 status = mod_remove(&ixgbe_modlinkage); 309 310 if (status == DDI_SUCCESS) { 311 mac_fini_ops(&ixgbe_dev_ops); 312 } 313 314 return (status); 315 } 316 317 int 318 _info(struct modinfo *modinfop) 319 { 320 int status; 321 322 status = mod_info(&ixgbe_modlinkage, modinfop); 323 324 return (status); 325 } 326 327 /* 328 * ixgbe_attach - Driver attach. 329 * 330 * This function is the device specific initialization entry 331 * point. This entry point is required and must be written. 332 * The DDI_ATTACH command must be provided in the attach entry 333 * point. When attach() is called with cmd set to DDI_ATTACH, 334 * all normal kernel services (such as kmem_alloc(9F)) are 335 * available for use by the driver. 336 * 337 * The attach() function will be called once for each instance 338 * of the device on the system with cmd set to DDI_ATTACH. 339 * Until attach() succeeds, the only driver entry points which 340 * may be called are open(9E) and getinfo(9E). 341 */ 342 static int 343 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 344 { 345 ixgbe_t *ixgbe; 346 struct ixgbe_osdep *osdep; 347 struct ixgbe_hw *hw; 348 int instance; 349 char taskqname[32]; 350 351 /* 352 * Check the command and perform corresponding operations 353 */ 354 switch (cmd) { 355 default: 356 return (DDI_FAILURE); 357 358 case DDI_RESUME: 359 return (ixgbe_resume(devinfo)); 360 361 case DDI_ATTACH: 362 break; 363 } 364 365 /* Get the device instance */ 366 instance = ddi_get_instance(devinfo); 367 368 /* Allocate memory for the instance data structure */ 369 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 370 371 ixgbe->dip = devinfo; 372 ixgbe->instance = instance; 373 374 hw = &ixgbe->hw; 375 osdep = &ixgbe->osdep; 376 hw->back = osdep; 377 osdep->ixgbe = ixgbe; 378 379 /* Attach the instance pointer to the dev_info data structure */ 380 ddi_set_driver_private(devinfo, ixgbe); 381 382 /* 383 * Initialize for fma support 384 */ 385 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 386 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 387 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 388 ixgbe_fm_init(ixgbe); 389 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 390 391 /* 392 * Map PCI config space registers 393 */ 394 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 395 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 396 goto attach_fail; 397 } 398 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 399 400 /* 401 * Identify the chipset family 402 */ 403 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 404 ixgbe_error(ixgbe, "Failed to identify hardware"); 405 goto attach_fail; 406 } 407 408 /* 409 * Map device registers 410 */ 411 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 412 ixgbe_error(ixgbe, "Failed to map device registers"); 413 goto attach_fail; 414 } 415 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 416 417 /* 418 * Initialize driver parameters 419 */ 420 ixgbe_init_properties(ixgbe); 421 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 422 423 /* 424 * Register interrupt callback 425 */ 426 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 427 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 428 goto attach_fail; 429 } 430 431 /* 432 * Allocate interrupts 433 */ 434 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 435 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 436 goto attach_fail; 437 } 438 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 439 440 /* 441 * Allocate rx/tx rings based on the ring numbers. 442 * The actual numbers of rx/tx rings are decided by the number of 443 * allocated interrupt vectors, so we should allocate the rings after 444 * interrupts are allocated. 445 */ 446 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 447 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 448 goto attach_fail; 449 } 450 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 451 452 /* 453 * Map rings to interrupt vectors 454 */ 455 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 456 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 457 goto attach_fail; 458 } 459 460 /* 461 * Add interrupt handlers 462 */ 463 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 464 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 465 goto attach_fail; 466 } 467 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 468 469 /* 470 * Create a taskq for sfp-change 471 */ 472 (void) sprintf(taskqname, "ixgbe%d_taskq", instance); 473 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 474 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 475 ixgbe_error(ixgbe, "taskq_create failed"); 476 goto attach_fail; 477 } 478 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 479 480 /* 481 * Initialize driver parameters 482 */ 483 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 484 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 485 goto attach_fail; 486 } 487 488 /* 489 * Initialize mutexes for this device. 490 * Do this before enabling the interrupt handler and 491 * register the softint to avoid the condition where 492 * interrupt handler can try using uninitialized mutex. 493 */ 494 ixgbe_init_locks(ixgbe); 495 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 496 497 /* 498 * Initialize chipset hardware 499 */ 500 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 501 ixgbe_error(ixgbe, "Failed to initialize adapter"); 502 goto attach_fail; 503 } 504 ixgbe->link_check_complete = B_FALSE; 505 ixgbe->link_check_hrtime = gethrtime() + 506 (IXGBE_LINK_UP_TIME * 100000000ULL); 507 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 508 509 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 510 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 511 goto attach_fail; 512 } 513 514 /* 515 * Initialize statistics 516 */ 517 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 518 ixgbe_error(ixgbe, "Failed to initialize statistics"); 519 goto attach_fail; 520 } 521 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 522 523 /* 524 * Register the driver to the MAC 525 */ 526 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 527 ixgbe_error(ixgbe, "Failed to register MAC"); 528 goto attach_fail; 529 } 530 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 531 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 532 533 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 534 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 535 if (ixgbe->periodic_id == 0) { 536 ixgbe_error(ixgbe, "Failed to add the link check timer"); 537 goto attach_fail; 538 } 539 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 540 541 /* 542 * Now that mutex locks are initialized, and the chip is also 543 * initialized, enable interrupts. 544 */ 545 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 546 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 547 goto attach_fail; 548 } 549 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 550 551 ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version); 552 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 553 554 return (DDI_SUCCESS); 555 556 attach_fail: 557 ixgbe_unconfigure(devinfo, ixgbe); 558 return (DDI_FAILURE); 559 } 560 561 /* 562 * ixgbe_detach - Driver detach. 563 * 564 * The detach() function is the complement of the attach routine. 565 * If cmd is set to DDI_DETACH, detach() is used to remove the 566 * state associated with a given instance of a device node 567 * prior to the removal of that instance from the system. 568 * 569 * The detach() function will be called once for each instance 570 * of the device for which there has been a successful attach() 571 * once there are no longer any opens on the device. 572 * 573 * Interrupts routine are disabled, All memory allocated by this 574 * driver are freed. 575 */ 576 static int 577 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 578 { 579 ixgbe_t *ixgbe; 580 581 /* 582 * Check detach command 583 */ 584 switch (cmd) { 585 default: 586 return (DDI_FAILURE); 587 588 case DDI_SUSPEND: 589 return (ixgbe_suspend(devinfo)); 590 591 case DDI_DETACH: 592 break; 593 } 594 595 /* 596 * Get the pointer to the driver private data structure 597 */ 598 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 599 if (ixgbe == NULL) 600 return (DDI_FAILURE); 601 602 /* 603 * If the device is still running, it needs to be stopped first. 604 * This check is necessary because under some specific circumstances, 605 * the detach routine can be called without stopping the interface 606 * first. 607 */ 608 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 609 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 610 mutex_enter(&ixgbe->gen_lock); 611 ixgbe_stop(ixgbe, B_TRUE); 612 mutex_exit(&ixgbe->gen_lock); 613 /* Disable and stop the watchdog timer */ 614 ixgbe_disable_watchdog_timer(ixgbe); 615 } 616 617 /* 618 * Check if there are still rx buffers held by the upper layer. 619 * If so, fail the detach. 620 */ 621 if (!ixgbe_rx_drain(ixgbe)) 622 return (DDI_FAILURE); 623 624 /* 625 * Do the remaining unconfigure routines 626 */ 627 ixgbe_unconfigure(devinfo, ixgbe); 628 629 return (DDI_SUCCESS); 630 } 631 632 static void 633 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 634 { 635 /* 636 * Disable interrupt 637 */ 638 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 639 (void) ixgbe_disable_intrs(ixgbe); 640 } 641 642 /* 643 * remove the link check timer 644 */ 645 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 646 if (ixgbe->periodic_id != NULL) { 647 ddi_periodic_delete(ixgbe->periodic_id); 648 ixgbe->periodic_id = NULL; 649 } 650 } 651 652 /* 653 * Unregister MAC 654 */ 655 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 656 (void) mac_unregister(ixgbe->mac_hdl); 657 } 658 659 /* 660 * Free statistics 661 */ 662 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 663 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 664 } 665 666 /* 667 * Remove interrupt handlers 668 */ 669 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 670 ixgbe_rem_intr_handlers(ixgbe); 671 } 672 673 /* 674 * Remove taskq for sfp-status-change 675 */ 676 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 677 ddi_taskq_destroy(ixgbe->sfp_taskq); 678 } 679 680 /* 681 * Remove interrupts 682 */ 683 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 684 ixgbe_rem_intrs(ixgbe); 685 } 686 687 /* 688 * Unregister interrupt callback handler 689 */ 690 (void) ddi_cb_unregister(ixgbe->cb_hdl); 691 692 /* 693 * Remove driver properties 694 */ 695 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 696 (void) ddi_prop_remove_all(devinfo); 697 } 698 699 /* 700 * Stop the chipset 701 */ 702 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 703 mutex_enter(&ixgbe->gen_lock); 704 ixgbe_chip_stop(ixgbe); 705 mutex_exit(&ixgbe->gen_lock); 706 } 707 708 /* 709 * Free register handle 710 */ 711 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 712 if (ixgbe->osdep.reg_handle != NULL) 713 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 714 } 715 716 /* 717 * Free PCI config handle 718 */ 719 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 720 if (ixgbe->osdep.cfg_handle != NULL) 721 pci_config_teardown(&ixgbe->osdep.cfg_handle); 722 } 723 724 /* 725 * Free locks 726 */ 727 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 728 ixgbe_destroy_locks(ixgbe); 729 } 730 731 /* 732 * Free the rx/tx rings 733 */ 734 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 735 ixgbe_free_rings(ixgbe); 736 } 737 738 /* 739 * Unregister FMA capabilities 740 */ 741 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 742 ixgbe_fm_fini(ixgbe); 743 } 744 745 /* 746 * Free the driver data structure 747 */ 748 kmem_free(ixgbe, sizeof (ixgbe_t)); 749 750 ddi_set_driver_private(devinfo, NULL); 751 } 752 753 /* 754 * ixgbe_register_mac - Register the driver and its function pointers with 755 * the GLD interface. 756 */ 757 static int 758 ixgbe_register_mac(ixgbe_t *ixgbe) 759 { 760 struct ixgbe_hw *hw = &ixgbe->hw; 761 mac_register_t *mac; 762 int status; 763 764 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 765 return (IXGBE_FAILURE); 766 767 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 768 mac->m_driver = ixgbe; 769 mac->m_dip = ixgbe->dip; 770 mac->m_src_addr = hw->mac.addr; 771 mac->m_callbacks = &ixgbe_m_callbacks; 772 mac->m_min_sdu = 0; 773 mac->m_max_sdu = ixgbe->default_mtu; 774 mac->m_margin = VLAN_TAGSZ; 775 mac->m_priv_props = ixgbe_priv_props; 776 mac->m_v12n = MAC_VIRT_LEVEL1; 777 778 status = mac_register(mac, &ixgbe->mac_hdl); 779 780 mac_free(mac); 781 782 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 783 } 784 785 /* 786 * ixgbe_identify_hardware - Identify the type of the chipset. 787 */ 788 static int 789 ixgbe_identify_hardware(ixgbe_t *ixgbe) 790 { 791 struct ixgbe_hw *hw = &ixgbe->hw; 792 struct ixgbe_osdep *osdep = &ixgbe->osdep; 793 794 /* 795 * Get the device id 796 */ 797 hw->vendor_id = 798 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 799 hw->device_id = 800 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 801 hw->revision_id = 802 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 803 hw->subsystem_device_id = 804 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 805 hw->subsystem_vendor_id = 806 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 807 808 /* 809 * Set the mac type of the adapter based on the device id 810 */ 811 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 812 return (IXGBE_FAILURE); 813 } 814 815 /* 816 * Install adapter capabilities 817 */ 818 switch (hw->mac.type) { 819 case ixgbe_mac_82598EB: 820 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 821 ixgbe->capab = &ixgbe_82598eb_cap; 822 823 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 824 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 825 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 826 } 827 ixgbe->capab->other_intr |= IXGBE_EICR_LSC; 828 829 break; 830 case ixgbe_mac_82599EB: 831 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 832 ixgbe->capab = &ixgbe_82599eb_cap; 833 834 ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 | 835 IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC); 836 837 break; 838 default: 839 IXGBE_DEBUGLOG_1(ixgbe, 840 "adapter not supported in ixgbe_identify_hardware(): %d\n", 841 hw->mac.type); 842 return (IXGBE_FAILURE); 843 } 844 845 return (IXGBE_SUCCESS); 846 } 847 848 /* 849 * ixgbe_regs_map - Map the device registers. 850 * 851 */ 852 static int 853 ixgbe_regs_map(ixgbe_t *ixgbe) 854 { 855 dev_info_t *devinfo = ixgbe->dip; 856 struct ixgbe_hw *hw = &ixgbe->hw; 857 struct ixgbe_osdep *osdep = &ixgbe->osdep; 858 off_t mem_size; 859 860 /* 861 * First get the size of device registers to be mapped. 862 */ 863 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 864 != DDI_SUCCESS) { 865 return (IXGBE_FAILURE); 866 } 867 868 /* 869 * Call ddi_regs_map_setup() to map registers 870 */ 871 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 872 (caddr_t *)&hw->hw_addr, 0, 873 mem_size, &ixgbe_regs_acc_attr, 874 &osdep->reg_handle)) != DDI_SUCCESS) { 875 return (IXGBE_FAILURE); 876 } 877 878 return (IXGBE_SUCCESS); 879 } 880 881 /* 882 * ixgbe_init_properties - Initialize driver properties. 883 */ 884 static void 885 ixgbe_init_properties(ixgbe_t *ixgbe) 886 { 887 /* 888 * Get conf file properties, including link settings 889 * jumbo frames, ring number, descriptor number, etc. 890 */ 891 ixgbe_get_conf(ixgbe); 892 893 ixgbe_init_params(ixgbe); 894 } 895 896 /* 897 * ixgbe_init_driver_settings - Initialize driver settings. 898 * 899 * The settings include hardware function pointers, bus information, 900 * rx/tx rings settings, link state, and any other parameters that 901 * need to be setup during driver initialization. 902 */ 903 static int 904 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 905 { 906 struct ixgbe_hw *hw = &ixgbe->hw; 907 dev_info_t *devinfo = ixgbe->dip; 908 ixgbe_rx_ring_t *rx_ring; 909 ixgbe_rx_group_t *rx_group; 910 ixgbe_tx_ring_t *tx_ring; 911 uint32_t rx_size; 912 uint32_t tx_size; 913 uint32_t ring_per_group; 914 int i; 915 916 /* 917 * Initialize chipset specific hardware function pointers 918 */ 919 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 920 return (IXGBE_FAILURE); 921 } 922 923 /* 924 * Get the system page size 925 */ 926 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 927 928 /* 929 * Set rx buffer size 930 * 931 * The IP header alignment room is counted in the calculation. 932 * The rx buffer size is in unit of 1K that is required by the 933 * chipset hardware. 934 */ 935 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 936 ixgbe->rx_buf_size = ((rx_size >> 10) + 937 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 938 939 /* 940 * Set tx buffer size 941 */ 942 tx_size = ixgbe->max_frame_size; 943 ixgbe->tx_buf_size = ((tx_size >> 10) + 944 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 945 946 /* 947 * Initialize rx/tx rings/groups parameters 948 */ 949 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 950 for (i = 0; i < ixgbe->num_rx_rings; i++) { 951 rx_ring = &ixgbe->rx_rings[i]; 952 rx_ring->index = i; 953 rx_ring->ixgbe = ixgbe; 954 rx_ring->group_index = i / ring_per_group; 955 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 956 } 957 958 for (i = 0; i < ixgbe->num_rx_groups; i++) { 959 rx_group = &ixgbe->rx_groups[i]; 960 rx_group->index = i; 961 rx_group->ixgbe = ixgbe; 962 } 963 964 for (i = 0; i < ixgbe->num_tx_rings; i++) { 965 tx_ring = &ixgbe->tx_rings[i]; 966 tx_ring->index = i; 967 tx_ring->ixgbe = ixgbe; 968 if (ixgbe->tx_head_wb_enable) 969 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 970 else 971 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 972 973 tx_ring->ring_size = ixgbe->tx_ring_size; 974 tx_ring->free_list_size = ixgbe->tx_ring_size + 975 (ixgbe->tx_ring_size >> 1); 976 } 977 978 /* 979 * Initialize values of interrupt throttling rate 980 */ 981 for (i = 1; i < MAX_INTR_VECTOR; i++) 982 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 983 984 /* 985 * The initial link state should be "unknown" 986 */ 987 ixgbe->link_state = LINK_STATE_UNKNOWN; 988 989 return (IXGBE_SUCCESS); 990 } 991 992 /* 993 * ixgbe_init_locks - Initialize locks. 994 */ 995 static void 996 ixgbe_init_locks(ixgbe_t *ixgbe) 997 { 998 ixgbe_rx_ring_t *rx_ring; 999 ixgbe_tx_ring_t *tx_ring; 1000 int i; 1001 1002 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1003 rx_ring = &ixgbe->rx_rings[i]; 1004 mutex_init(&rx_ring->rx_lock, NULL, 1005 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1006 } 1007 1008 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1009 tx_ring = &ixgbe->tx_rings[i]; 1010 mutex_init(&tx_ring->tx_lock, NULL, 1011 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1012 mutex_init(&tx_ring->recycle_lock, NULL, 1013 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1014 mutex_init(&tx_ring->tcb_head_lock, NULL, 1015 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1016 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1017 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1018 } 1019 1020 mutex_init(&ixgbe->gen_lock, NULL, 1021 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1022 1023 mutex_init(&ixgbe->watchdog_lock, NULL, 1024 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1025 } 1026 1027 /* 1028 * ixgbe_destroy_locks - Destroy locks. 1029 */ 1030 static void 1031 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1032 { 1033 ixgbe_rx_ring_t *rx_ring; 1034 ixgbe_tx_ring_t *tx_ring; 1035 int i; 1036 1037 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1038 rx_ring = &ixgbe->rx_rings[i]; 1039 mutex_destroy(&rx_ring->rx_lock); 1040 } 1041 1042 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1043 tx_ring = &ixgbe->tx_rings[i]; 1044 mutex_destroy(&tx_ring->tx_lock); 1045 mutex_destroy(&tx_ring->recycle_lock); 1046 mutex_destroy(&tx_ring->tcb_head_lock); 1047 mutex_destroy(&tx_ring->tcb_tail_lock); 1048 } 1049 1050 mutex_destroy(&ixgbe->gen_lock); 1051 mutex_destroy(&ixgbe->watchdog_lock); 1052 } 1053 1054 static int 1055 ixgbe_resume(dev_info_t *devinfo) 1056 { 1057 ixgbe_t *ixgbe; 1058 int i; 1059 1060 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1061 if (ixgbe == NULL) 1062 return (DDI_FAILURE); 1063 1064 mutex_enter(&ixgbe->gen_lock); 1065 1066 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1067 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1068 mutex_exit(&ixgbe->gen_lock); 1069 return (DDI_FAILURE); 1070 } 1071 1072 /* 1073 * Enable and start the watchdog timer 1074 */ 1075 ixgbe_enable_watchdog_timer(ixgbe); 1076 } 1077 1078 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1079 1080 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1081 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1082 mac_tx_ring_update(ixgbe->mac_hdl, 1083 ixgbe->tx_rings[i].ring_handle); 1084 } 1085 } 1086 1087 mutex_exit(&ixgbe->gen_lock); 1088 1089 return (DDI_SUCCESS); 1090 } 1091 1092 static int 1093 ixgbe_suspend(dev_info_t *devinfo) 1094 { 1095 ixgbe_t *ixgbe; 1096 1097 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1098 if (ixgbe == NULL) 1099 return (DDI_FAILURE); 1100 1101 mutex_enter(&ixgbe->gen_lock); 1102 1103 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1104 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1105 mutex_exit(&ixgbe->gen_lock); 1106 return (DDI_SUCCESS); 1107 } 1108 ixgbe_stop(ixgbe, B_FALSE); 1109 1110 mutex_exit(&ixgbe->gen_lock); 1111 1112 /* 1113 * Disable and stop the watchdog timer 1114 */ 1115 ixgbe_disable_watchdog_timer(ixgbe); 1116 1117 return (DDI_SUCCESS); 1118 } 1119 1120 /* 1121 * ixgbe_init - Initialize the device. 1122 */ 1123 static int 1124 ixgbe_init(ixgbe_t *ixgbe) 1125 { 1126 struct ixgbe_hw *hw = &ixgbe->hw; 1127 1128 mutex_enter(&ixgbe->gen_lock); 1129 1130 /* 1131 * Reset chipset to put the hardware in a known state 1132 * before we try to do anything with the eeprom. 1133 */ 1134 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) { 1135 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1136 goto init_fail; 1137 } 1138 1139 /* 1140 * Need to init eeprom before validating the checksum. 1141 */ 1142 if (ixgbe_init_eeprom_params(hw) < 0) { 1143 ixgbe_error(ixgbe, 1144 "Unable to intitialize the eeprom interface."); 1145 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1146 goto init_fail; 1147 } 1148 1149 /* 1150 * NVM validation 1151 */ 1152 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1153 /* 1154 * Some PCI-E parts fail the first check due to 1155 * the link being in sleep state. Call it again, 1156 * if it fails a second time it's a real issue. 1157 */ 1158 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1159 ixgbe_error(ixgbe, 1160 "Invalid NVM checksum. Please contact " 1161 "the vendor to update the NVM."); 1162 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1163 goto init_fail; 1164 } 1165 } 1166 1167 /* 1168 * Setup default flow control thresholds - enable/disable 1169 * & flow control type is controlled by ixgbe.conf 1170 */ 1171 hw->fc.high_water = DEFAULT_FCRTH; 1172 hw->fc.low_water = DEFAULT_FCRTL; 1173 hw->fc.pause_time = DEFAULT_FCPAUSE; 1174 hw->fc.send_xon = B_TRUE; 1175 1176 /* 1177 * Initialize link settings 1178 */ 1179 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1180 1181 /* 1182 * Initialize the chipset hardware 1183 */ 1184 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1185 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1186 goto init_fail; 1187 } 1188 1189 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1190 goto init_fail; 1191 } 1192 1193 mutex_exit(&ixgbe->gen_lock); 1194 return (IXGBE_SUCCESS); 1195 1196 init_fail: 1197 /* 1198 * Reset PHY 1199 */ 1200 (void) ixgbe_reset_phy(hw); 1201 1202 mutex_exit(&ixgbe->gen_lock); 1203 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1204 return (IXGBE_FAILURE); 1205 } 1206 1207 /* 1208 * ixgbe_chip_start - Initialize and start the chipset hardware. 1209 */ 1210 static int 1211 ixgbe_chip_start(ixgbe_t *ixgbe) 1212 { 1213 struct ixgbe_hw *hw = &ixgbe->hw; 1214 int ret_val, i; 1215 1216 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1217 1218 /* 1219 * Get the mac address 1220 * This function should handle SPARC case correctly. 1221 */ 1222 if (!ixgbe_find_mac_address(ixgbe)) { 1223 ixgbe_error(ixgbe, "Failed to get the mac address"); 1224 return (IXGBE_FAILURE); 1225 } 1226 1227 /* 1228 * Validate the mac address 1229 */ 1230 (void) ixgbe_init_rx_addrs(hw); 1231 if (!is_valid_mac_addr(hw->mac.addr)) { 1232 ixgbe_error(ixgbe, "Invalid mac address"); 1233 return (IXGBE_FAILURE); 1234 } 1235 1236 /* 1237 * Configure/Initialize hardware 1238 */ 1239 ret_val = ixgbe_init_hw(hw); 1240 if (ret_val != IXGBE_SUCCESS) { 1241 if (ret_val == IXGBE_ERR_EEPROM_VERSION) { 1242 ixgbe_error(ixgbe, 1243 "This 82599 device is pre-release and contains" 1244 " outdated firmware, please contact your hardware" 1245 " vendor for a replacement."); 1246 } else { 1247 ixgbe_error(ixgbe, "Failed to initialize hardware"); 1248 return (IXGBE_FAILURE); 1249 } 1250 } 1251 1252 /* 1253 * Re-enable relaxed ordering for performance. It is disabled 1254 * by default in the hardware init. 1255 */ 1256 ixgbe_enable_relaxed_ordering(hw); 1257 1258 /* 1259 * Setup adapter interrupt vectors 1260 */ 1261 ixgbe_setup_adapter_vector(ixgbe); 1262 1263 /* 1264 * Initialize unicast addresses. 1265 */ 1266 ixgbe_init_unicst(ixgbe); 1267 1268 /* 1269 * Setup and initialize the mctable structures. 1270 */ 1271 ixgbe_setup_multicst(ixgbe); 1272 1273 /* 1274 * Set interrupt throttling rate 1275 */ 1276 for (i = 0; i < ixgbe->intr_cnt; i++) { 1277 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1278 } 1279 1280 /* 1281 * Save the state of the phy 1282 */ 1283 ixgbe_get_hw_state(ixgbe); 1284 1285 /* 1286 * Make sure driver has control 1287 */ 1288 ixgbe_get_driver_control(hw); 1289 1290 return (IXGBE_SUCCESS); 1291 } 1292 1293 /* 1294 * ixgbe_chip_stop - Stop the chipset hardware 1295 */ 1296 static void 1297 ixgbe_chip_stop(ixgbe_t *ixgbe) 1298 { 1299 struct ixgbe_hw *hw = &ixgbe->hw; 1300 1301 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1302 1303 /* 1304 * Tell firmware driver is no longer in control 1305 */ 1306 ixgbe_release_driver_control(hw); 1307 1308 /* 1309 * Reset the chipset 1310 */ 1311 (void) ixgbe_reset_hw(hw); 1312 1313 /* 1314 * Reset PHY 1315 */ 1316 (void) ixgbe_reset_phy(hw); 1317 } 1318 1319 /* 1320 * ixgbe_reset - Reset the chipset and re-start the driver. 1321 * 1322 * It involves stopping and re-starting the chipset, 1323 * and re-configuring the rx/tx rings. 1324 */ 1325 static int 1326 ixgbe_reset(ixgbe_t *ixgbe) 1327 { 1328 int i; 1329 1330 /* 1331 * Disable and stop the watchdog timer 1332 */ 1333 ixgbe_disable_watchdog_timer(ixgbe); 1334 1335 mutex_enter(&ixgbe->gen_lock); 1336 1337 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1338 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1339 1340 ixgbe_stop(ixgbe, B_FALSE); 1341 1342 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1343 mutex_exit(&ixgbe->gen_lock); 1344 return (IXGBE_FAILURE); 1345 } 1346 1347 /* 1348 * After resetting, need to recheck the link status. 1349 */ 1350 ixgbe->link_check_complete = B_FALSE; 1351 ixgbe->link_check_hrtime = gethrtime() + 1352 (IXGBE_LINK_UP_TIME * 100000000ULL); 1353 1354 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1355 1356 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1357 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1358 mac_tx_ring_update(ixgbe->mac_hdl, 1359 ixgbe->tx_rings[i].ring_handle); 1360 } 1361 } 1362 1363 mutex_exit(&ixgbe->gen_lock); 1364 1365 /* 1366 * Enable and start the watchdog timer 1367 */ 1368 ixgbe_enable_watchdog_timer(ixgbe); 1369 1370 return (IXGBE_SUCCESS); 1371 } 1372 1373 /* 1374 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1375 */ 1376 static void 1377 ixgbe_tx_clean(ixgbe_t *ixgbe) 1378 { 1379 ixgbe_tx_ring_t *tx_ring; 1380 tx_control_block_t *tcb; 1381 link_list_t pending_list; 1382 uint32_t desc_num; 1383 int i, j; 1384 1385 LINK_LIST_INIT(&pending_list); 1386 1387 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1388 tx_ring = &ixgbe->tx_rings[i]; 1389 1390 mutex_enter(&tx_ring->recycle_lock); 1391 1392 /* 1393 * Clean the pending tx data - the pending packets in the 1394 * work_list that have no chances to be transmitted again. 1395 * 1396 * We must ensure the chipset is stopped or the link is down 1397 * before cleaning the transmit packets. 1398 */ 1399 desc_num = 0; 1400 for (j = 0; j < tx_ring->ring_size; j++) { 1401 tcb = tx_ring->work_list[j]; 1402 if (tcb != NULL) { 1403 desc_num += tcb->desc_num; 1404 1405 tx_ring->work_list[j] = NULL; 1406 1407 ixgbe_free_tcb(tcb); 1408 1409 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1410 } 1411 } 1412 1413 if (desc_num > 0) { 1414 atomic_add_32(&tx_ring->tbd_free, desc_num); 1415 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1416 1417 /* 1418 * Reset the head and tail pointers of the tbd ring; 1419 * Reset the writeback head if it's enable. 1420 */ 1421 tx_ring->tbd_head = 0; 1422 tx_ring->tbd_tail = 0; 1423 if (ixgbe->tx_head_wb_enable) 1424 *tx_ring->tbd_head_wb = 0; 1425 1426 IXGBE_WRITE_REG(&ixgbe->hw, 1427 IXGBE_TDH(tx_ring->index), 0); 1428 IXGBE_WRITE_REG(&ixgbe->hw, 1429 IXGBE_TDT(tx_ring->index), 0); 1430 } 1431 1432 mutex_exit(&tx_ring->recycle_lock); 1433 1434 /* 1435 * Add the tx control blocks in the pending list to 1436 * the free list. 1437 */ 1438 ixgbe_put_free_list(tx_ring, &pending_list); 1439 } 1440 } 1441 1442 /* 1443 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1444 * transmitted. 1445 */ 1446 static boolean_t 1447 ixgbe_tx_drain(ixgbe_t *ixgbe) 1448 { 1449 ixgbe_tx_ring_t *tx_ring; 1450 boolean_t done; 1451 int i, j; 1452 1453 /* 1454 * Wait for a specific time to allow pending tx packets 1455 * to be transmitted. 1456 * 1457 * Check the counter tbd_free to see if transmission is done. 1458 * No lock protection is needed here. 1459 * 1460 * Return B_TRUE if all pending packets have been transmitted; 1461 * Otherwise return B_FALSE; 1462 */ 1463 for (i = 0; i < TX_DRAIN_TIME; i++) { 1464 1465 done = B_TRUE; 1466 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1467 tx_ring = &ixgbe->tx_rings[j]; 1468 done = done && 1469 (tx_ring->tbd_free == tx_ring->ring_size); 1470 } 1471 1472 if (done) 1473 break; 1474 1475 msec_delay(1); 1476 } 1477 1478 return (done); 1479 } 1480 1481 /* 1482 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1483 */ 1484 static boolean_t 1485 ixgbe_rx_drain(ixgbe_t *ixgbe) 1486 { 1487 boolean_t done = B_TRUE; 1488 int i; 1489 1490 /* 1491 * Polling the rx free list to check if those rx buffers held by 1492 * the upper layer are released. 1493 * 1494 * Check the counter rcb_free to see if all pending buffers are 1495 * released. No lock protection is needed here. 1496 * 1497 * Return B_TRUE if all pending buffers have been released; 1498 * Otherwise return B_FALSE; 1499 */ 1500 for (i = 0; i < RX_DRAIN_TIME; i++) { 1501 done = (ixgbe->rcb_pending == 0); 1502 1503 if (done) 1504 break; 1505 1506 msec_delay(1); 1507 } 1508 1509 return (done); 1510 } 1511 1512 /* 1513 * ixgbe_start - Start the driver/chipset. 1514 */ 1515 int 1516 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1517 { 1518 int i; 1519 1520 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1521 1522 if (alloc_buffer) { 1523 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1524 ixgbe_error(ixgbe, 1525 "Failed to allocate software receive rings"); 1526 return (IXGBE_FAILURE); 1527 } 1528 1529 /* Allocate buffers for all the rx/tx rings */ 1530 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1531 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1532 return (IXGBE_FAILURE); 1533 } 1534 1535 ixgbe->tx_ring_init = B_TRUE; 1536 } else { 1537 ixgbe->tx_ring_init = B_FALSE; 1538 } 1539 1540 for (i = 0; i < ixgbe->num_rx_rings; i++) 1541 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1542 for (i = 0; i < ixgbe->num_tx_rings; i++) 1543 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1544 1545 /* 1546 * Start the chipset hardware 1547 */ 1548 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1549 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1550 goto start_failure; 1551 } 1552 1553 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1554 goto start_failure; 1555 } 1556 1557 /* 1558 * Setup the rx/tx rings 1559 */ 1560 ixgbe_setup_rings(ixgbe); 1561 1562 /* 1563 * ixgbe_start() will be called when resetting, however if reset 1564 * happens, we need to clear the ERROR and STALL flags before 1565 * enabling the interrupts. 1566 */ 1567 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR | IXGBE_STALL)); 1568 1569 /* 1570 * Enable adapter interrupts 1571 * The interrupts must be enabled after the driver state is START 1572 */ 1573 ixgbe_enable_adapter_interrupts(ixgbe); 1574 1575 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1576 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1577 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1578 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1579 1580 return (IXGBE_SUCCESS); 1581 1582 start_failure: 1583 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1584 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1585 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1586 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1587 1588 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1589 1590 return (IXGBE_FAILURE); 1591 } 1592 1593 /* 1594 * ixgbe_stop - Stop the driver/chipset. 1595 */ 1596 void 1597 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1598 { 1599 int i; 1600 1601 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1602 1603 /* 1604 * Disable the adapter interrupts 1605 */ 1606 ixgbe_disable_adapter_interrupts(ixgbe); 1607 1608 /* 1609 * Drain the pending tx packets 1610 */ 1611 (void) ixgbe_tx_drain(ixgbe); 1612 1613 for (i = 0; i < ixgbe->num_rx_rings; i++) 1614 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1615 for (i = 0; i < ixgbe->num_tx_rings; i++) 1616 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1617 1618 /* 1619 * Stop the chipset hardware 1620 */ 1621 ixgbe_chip_stop(ixgbe); 1622 1623 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1624 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1625 } 1626 1627 /* 1628 * Clean the pending tx data/resources 1629 */ 1630 ixgbe_tx_clean(ixgbe); 1631 1632 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1633 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1634 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1635 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1636 1637 if (ixgbe->link_state == LINK_STATE_UP) { 1638 ixgbe->link_state = LINK_STATE_UNKNOWN; 1639 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1640 } 1641 1642 if (free_buffer) { 1643 /* 1644 * Release the DMA/memory resources of rx/tx rings 1645 */ 1646 ixgbe_free_dma(ixgbe); 1647 ixgbe_free_rx_data(ixgbe); 1648 } 1649 } 1650 1651 /* 1652 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1653 */ 1654 /* ARGSUSED */ 1655 static int 1656 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1657 void *arg1, void *arg2) 1658 { 1659 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 1660 1661 switch (cbaction) { 1662 /* IRM callback */ 1663 int count; 1664 case DDI_CB_INTR_ADD: 1665 case DDI_CB_INTR_REMOVE: 1666 count = (int)(uintptr_t)cbarg; 1667 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 1668 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 1669 int, ixgbe->intr_cnt); 1670 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 1671 DDI_SUCCESS) { 1672 ixgbe_error(ixgbe, 1673 "IRM CB: Failed to adjust interrupts"); 1674 goto cb_fail; 1675 } 1676 break; 1677 default: 1678 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 1679 cbaction); 1680 return (DDI_ENOTSUP); 1681 } 1682 return (DDI_SUCCESS); 1683 cb_fail: 1684 return (DDI_FAILURE); 1685 } 1686 1687 /* 1688 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 1689 */ 1690 static int 1691 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 1692 { 1693 int i, rc, actual; 1694 1695 if (count == 0) 1696 return (DDI_SUCCESS); 1697 1698 if ((cbaction == DDI_CB_INTR_ADD && 1699 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 1700 (cbaction == DDI_CB_INTR_REMOVE && 1701 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 1702 return (DDI_FAILURE); 1703 1704 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1705 return (DDI_FAILURE); 1706 } 1707 1708 for (i = 0; i < ixgbe->num_rx_rings; i++) 1709 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 1710 for (i = 0; i < ixgbe->num_tx_rings; i++) 1711 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 1712 1713 mutex_enter(&ixgbe->gen_lock); 1714 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 1715 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 1716 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 1717 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 1718 1719 ixgbe_stop(ixgbe, B_FALSE); 1720 /* 1721 * Disable interrupts 1722 */ 1723 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1724 rc = ixgbe_disable_intrs(ixgbe); 1725 ASSERT(rc == IXGBE_SUCCESS); 1726 } 1727 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 1728 1729 /* 1730 * Remove interrupt handlers 1731 */ 1732 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1733 ixgbe_rem_intr_handlers(ixgbe); 1734 } 1735 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 1736 1737 /* 1738 * Clear vect_map 1739 */ 1740 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 1741 switch (cbaction) { 1742 case DDI_CB_INTR_ADD: 1743 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 1744 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 1745 DDI_INTR_ALLOC_NORMAL); 1746 if (rc != DDI_SUCCESS || actual != count) { 1747 ixgbe_log(ixgbe, "Adjust interrupts failed." 1748 "return: %d, irm cb size: %d, actual: %d", 1749 rc, count, actual); 1750 goto intr_adjust_fail; 1751 } 1752 ixgbe->intr_cnt += count; 1753 break; 1754 1755 case DDI_CB_INTR_REMOVE: 1756 for (i = ixgbe->intr_cnt - count; 1757 i < ixgbe->intr_cnt; i ++) { 1758 rc = ddi_intr_free(ixgbe->htable[i]); 1759 ixgbe->htable[i] = NULL; 1760 if (rc != DDI_SUCCESS) { 1761 ixgbe_log(ixgbe, "Adjust interrupts failed." 1762 "return: %d, irm cb size: %d, actual: %d", 1763 rc, count, actual); 1764 goto intr_adjust_fail; 1765 } 1766 } 1767 ixgbe->intr_cnt -= count; 1768 break; 1769 } 1770 1771 /* 1772 * Get priority for first vector, assume remaining are all the same 1773 */ 1774 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 1775 if (rc != DDI_SUCCESS) { 1776 ixgbe_log(ixgbe, 1777 "Get interrupt priority failed: %d", rc); 1778 goto intr_adjust_fail; 1779 } 1780 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 1781 if (rc != DDI_SUCCESS) { 1782 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 1783 goto intr_adjust_fail; 1784 } 1785 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 1786 1787 /* 1788 * Map rings to interrupt vectors 1789 */ 1790 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 1791 ixgbe_error(ixgbe, 1792 "IRM CB: Failed to map interrupts to vectors"); 1793 goto intr_adjust_fail; 1794 } 1795 1796 /* 1797 * Add interrupt handlers 1798 */ 1799 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 1800 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 1801 goto intr_adjust_fail; 1802 } 1803 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 1804 1805 /* 1806 * Now that mutex locks are initialized, and the chip is also 1807 * initialized, enable interrupts. 1808 */ 1809 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 1810 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 1811 goto intr_adjust_fail; 1812 } 1813 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 1814 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1815 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 1816 goto intr_adjust_fail; 1817 } 1818 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 1819 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 1820 ixgbe->ixgbe_state |= IXGBE_STARTED; 1821 mutex_exit(&ixgbe->gen_lock); 1822 1823 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1824 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 1825 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 1826 } 1827 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1828 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 1829 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 1830 } 1831 1832 /* Wakeup all Tx rings */ 1833 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1834 mac_tx_ring_update(ixgbe->mac_hdl, 1835 ixgbe->tx_rings[i].ring_handle); 1836 } 1837 1838 IXGBE_DEBUGLOG_3(ixgbe, 1839 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 1840 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 1841 return (DDI_SUCCESS); 1842 1843 intr_adjust_fail: 1844 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1845 mutex_exit(&ixgbe->gen_lock); 1846 return (DDI_FAILURE); 1847 } 1848 1849 /* 1850 * ixgbe_intr_cb_register - Register interrupt callback function. 1851 */ 1852 static int 1853 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 1854 { 1855 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 1856 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 1857 return (IXGBE_FAILURE); 1858 } 1859 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 1860 return (IXGBE_SUCCESS); 1861 } 1862 1863 /* 1864 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 1865 */ 1866 static int 1867 ixgbe_alloc_rings(ixgbe_t *ixgbe) 1868 { 1869 /* 1870 * Allocate memory space for rx rings 1871 */ 1872 ixgbe->rx_rings = kmem_zalloc( 1873 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 1874 KM_NOSLEEP); 1875 1876 if (ixgbe->rx_rings == NULL) { 1877 return (IXGBE_FAILURE); 1878 } 1879 1880 /* 1881 * Allocate memory space for tx rings 1882 */ 1883 ixgbe->tx_rings = kmem_zalloc( 1884 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 1885 KM_NOSLEEP); 1886 1887 if (ixgbe->tx_rings == NULL) { 1888 kmem_free(ixgbe->rx_rings, 1889 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1890 ixgbe->rx_rings = NULL; 1891 return (IXGBE_FAILURE); 1892 } 1893 1894 /* 1895 * Allocate memory space for rx ring groups 1896 */ 1897 ixgbe->rx_groups = kmem_zalloc( 1898 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 1899 KM_NOSLEEP); 1900 1901 if (ixgbe->rx_groups == NULL) { 1902 kmem_free(ixgbe->rx_rings, 1903 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1904 kmem_free(ixgbe->tx_rings, 1905 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1906 ixgbe->rx_rings = NULL; 1907 ixgbe->tx_rings = NULL; 1908 return (IXGBE_FAILURE); 1909 } 1910 1911 return (IXGBE_SUCCESS); 1912 } 1913 1914 /* 1915 * ixgbe_free_rings - Free the memory space of rx/tx rings. 1916 */ 1917 static void 1918 ixgbe_free_rings(ixgbe_t *ixgbe) 1919 { 1920 if (ixgbe->rx_rings != NULL) { 1921 kmem_free(ixgbe->rx_rings, 1922 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1923 ixgbe->rx_rings = NULL; 1924 } 1925 1926 if (ixgbe->tx_rings != NULL) { 1927 kmem_free(ixgbe->tx_rings, 1928 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1929 ixgbe->tx_rings = NULL; 1930 } 1931 1932 if (ixgbe->rx_groups != NULL) { 1933 kmem_free(ixgbe->rx_groups, 1934 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 1935 ixgbe->rx_groups = NULL; 1936 } 1937 } 1938 1939 static int 1940 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 1941 { 1942 ixgbe_rx_ring_t *rx_ring; 1943 int i; 1944 1945 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1946 rx_ring = &ixgbe->rx_rings[i]; 1947 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 1948 goto alloc_rx_rings_failure; 1949 } 1950 return (IXGBE_SUCCESS); 1951 1952 alloc_rx_rings_failure: 1953 ixgbe_free_rx_data(ixgbe); 1954 return (IXGBE_FAILURE); 1955 } 1956 1957 static void 1958 ixgbe_free_rx_data(ixgbe_t *ixgbe) 1959 { 1960 ixgbe_rx_ring_t *rx_ring; 1961 ixgbe_rx_data_t *rx_data; 1962 int i; 1963 1964 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1965 rx_ring = &ixgbe->rx_rings[i]; 1966 1967 mutex_enter(&ixgbe->rx_pending_lock); 1968 rx_data = rx_ring->rx_data; 1969 1970 if (rx_data != NULL) { 1971 rx_data->flag |= IXGBE_RX_STOPPED; 1972 1973 if (rx_data->rcb_pending == 0) { 1974 ixgbe_free_rx_ring_data(rx_data); 1975 rx_ring->rx_data = NULL; 1976 } 1977 } 1978 1979 mutex_exit(&ixgbe->rx_pending_lock); 1980 } 1981 } 1982 1983 /* 1984 * ixgbe_setup_rings - Setup rx/tx rings. 1985 */ 1986 static void 1987 ixgbe_setup_rings(ixgbe_t *ixgbe) 1988 { 1989 /* 1990 * Setup the rx/tx rings, including the following: 1991 * 1992 * 1. Setup the descriptor ring and the control block buffers; 1993 * 2. Initialize necessary registers for receive/transmit; 1994 * 3. Initialize software pointers/parameters for receive/transmit; 1995 */ 1996 ixgbe_setup_rx(ixgbe); 1997 1998 ixgbe_setup_tx(ixgbe); 1999 } 2000 2001 static void 2002 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2003 { 2004 ixgbe_t *ixgbe = rx_ring->ixgbe; 2005 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2006 struct ixgbe_hw *hw = &ixgbe->hw; 2007 rx_control_block_t *rcb; 2008 union ixgbe_adv_rx_desc *rbd; 2009 uint32_t size; 2010 uint32_t buf_low; 2011 uint32_t buf_high; 2012 uint32_t reg_val; 2013 int i; 2014 2015 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2016 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2017 2018 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2019 rcb = rx_data->work_list[i]; 2020 rbd = &rx_data->rbd_ring[i]; 2021 2022 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2023 rbd->read.hdr_addr = NULL; 2024 } 2025 2026 /* 2027 * Initialize the length register 2028 */ 2029 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2030 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2031 2032 /* 2033 * Initialize the base address registers 2034 */ 2035 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2036 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2037 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2038 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2039 2040 /* 2041 * Setup head & tail pointers 2042 */ 2043 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2044 rx_data->ring_size - 1); 2045 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2046 2047 rx_data->rbd_next = 0; 2048 rx_data->lro_first = 0; 2049 2050 /* 2051 * Setup the Receive Descriptor Control Register (RXDCTL) 2052 * PTHRESH=32 descriptors (half the internal cache) 2053 * HTHRESH=0 descriptors (to minimize latency on fetch) 2054 * WTHRESH defaults to 1 (writeback each descriptor) 2055 */ 2056 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2057 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2058 2059 /* Not a valid value for 82599 */ 2060 if (hw->mac.type < ixgbe_mac_82599EB) { 2061 reg_val |= 0x0020; /* pthresh */ 2062 } 2063 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2064 2065 if (hw->mac.type == ixgbe_mac_82599EB) { 2066 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2067 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2068 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2069 } 2070 2071 /* 2072 * Setup the Split and Replication Receive Control Register. 2073 * Set the rx buffer size and the advanced descriptor type. 2074 */ 2075 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2076 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2077 reg_val |= IXGBE_SRRCTL_DROP_EN; 2078 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2079 } 2080 2081 static void 2082 ixgbe_setup_rx(ixgbe_t *ixgbe) 2083 { 2084 ixgbe_rx_ring_t *rx_ring; 2085 struct ixgbe_hw *hw = &ixgbe->hw; 2086 uint32_t reg_val; 2087 uint32_t ring_mapping; 2088 uint32_t i, index; 2089 uint32_t psrtype_rss_bit; 2090 2091 /* PSRTYPE must be configured for 82599 */ 2092 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2093 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2094 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2095 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2096 reg_val |= IXGBE_PSRTYPE_L2HDR; 2097 reg_val |= 0x80000000; 2098 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2099 } else { 2100 if (ixgbe->num_rx_groups > 32) { 2101 psrtype_rss_bit = 0x20000000; 2102 } else { 2103 psrtype_rss_bit = 0x40000000; 2104 } 2105 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2106 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2107 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2108 reg_val |= IXGBE_PSRTYPE_L2HDR; 2109 reg_val |= psrtype_rss_bit; 2110 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2111 } 2112 } 2113 2114 /* 2115 * Set filter control in FCTRL to accept broadcast packets and do 2116 * not pass pause frames to host. Flow control settings are already 2117 * in this register, so preserve them. 2118 */ 2119 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2120 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */ 2121 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */ 2122 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2123 2124 /* 2125 * Hardware checksum settings 2126 */ 2127 if (ixgbe->rx_hcksum_enable) { 2128 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2129 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2130 } 2131 2132 /* 2133 * Setup VMDq and RSS for multiple receive queues 2134 */ 2135 switch (ixgbe->classify_mode) { 2136 case IXGBE_CLASSIFY_RSS: 2137 /* 2138 * One group, only RSS is needed when more than 2139 * one ring enabled. 2140 */ 2141 ixgbe_setup_rss(ixgbe); 2142 break; 2143 2144 case IXGBE_CLASSIFY_VMDQ: 2145 /* 2146 * Multiple groups, each group has one ring, 2147 * only VMDq is needed. 2148 */ 2149 ixgbe_setup_vmdq(ixgbe); 2150 break; 2151 2152 case IXGBE_CLASSIFY_VMDQ_RSS: 2153 /* 2154 * Multiple groups and multiple rings, both 2155 * VMDq and RSS are needed. 2156 */ 2157 ixgbe_setup_vmdq_rss(ixgbe); 2158 break; 2159 2160 default: 2161 break; 2162 } 2163 2164 /* 2165 * Enable the receive unit. This must be done after filter 2166 * control is set in FCTRL. 2167 */ 2168 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */ 2169 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */ 2170 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 2171 2172 /* 2173 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2174 */ 2175 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2176 rx_ring = &ixgbe->rx_rings[i]; 2177 ixgbe_setup_rx_ring(rx_ring); 2178 } 2179 2180 /* 2181 * Setup the per-ring statistics mapping. 2182 */ 2183 ring_mapping = 0; 2184 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2185 index = ixgbe->rx_rings[i].hw_index; 2186 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2187 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2188 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2189 } 2190 2191 /* 2192 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2193 * by four bytes if the packet has a VLAN field, so includes MTU, 2194 * ethernet header and frame check sequence. 2195 * Register is MAXFRS in 82599. 2196 */ 2197 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header) 2198 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2199 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2200 2201 /* 2202 * Setup Jumbo Frame enable bit 2203 */ 2204 if (ixgbe->default_mtu > ETHERMTU) { 2205 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2206 reg_val |= IXGBE_HLREG0_JUMBOEN; 2207 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2208 } 2209 2210 /* 2211 * Setup RSC for multiple receive queues. 2212 */ 2213 if (ixgbe->lro_enable) { 2214 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2215 /* 2216 * Make sure rx_buf_size * MAXDESC not greater 2217 * than 65535. 2218 * Intel recommends 4 for MAXDESC field value. 2219 */ 2220 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2221 reg_val |= IXGBE_RSCCTL_RSCEN; 2222 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2223 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2224 else 2225 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2226 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2227 } 2228 2229 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2230 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2231 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2232 2233 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2234 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2235 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2236 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2237 2238 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2239 } 2240 } 2241 2242 static void 2243 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2244 { 2245 ixgbe_t *ixgbe = tx_ring->ixgbe; 2246 struct ixgbe_hw *hw = &ixgbe->hw; 2247 uint32_t size; 2248 uint32_t buf_low; 2249 uint32_t buf_high; 2250 uint32_t reg_val; 2251 2252 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2253 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2254 2255 /* 2256 * Initialize the length register 2257 */ 2258 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2259 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2260 2261 /* 2262 * Initialize the base address registers 2263 */ 2264 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2265 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2266 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2267 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2268 2269 /* 2270 * Setup head & tail pointers 2271 */ 2272 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2273 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2274 2275 /* 2276 * Setup head write-back 2277 */ 2278 if (ixgbe->tx_head_wb_enable) { 2279 /* 2280 * The memory of the head write-back is allocated using 2281 * the extra tbd beyond the tail of the tbd ring. 2282 */ 2283 tx_ring->tbd_head_wb = (uint32_t *) 2284 ((uintptr_t)tx_ring->tbd_area.address + size); 2285 *tx_ring->tbd_head_wb = 0; 2286 2287 buf_low = (uint32_t) 2288 (tx_ring->tbd_area.dma_address + size); 2289 buf_high = (uint32_t) 2290 ((tx_ring->tbd_area.dma_address + size) >> 32); 2291 2292 /* Set the head write-back enable bit */ 2293 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2294 2295 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2296 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2297 2298 /* 2299 * Turn off relaxed ordering for head write back or it will 2300 * cause problems with the tx recycling 2301 */ 2302 reg_val = IXGBE_READ_REG(hw, 2303 IXGBE_DCA_TXCTRL(tx_ring->index)); 2304 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 2305 IXGBE_WRITE_REG(hw, 2306 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2307 } else { 2308 tx_ring->tbd_head_wb = NULL; 2309 } 2310 2311 tx_ring->tbd_head = 0; 2312 tx_ring->tbd_tail = 0; 2313 tx_ring->tbd_free = tx_ring->ring_size; 2314 2315 if (ixgbe->tx_ring_init == B_TRUE) { 2316 tx_ring->tcb_head = 0; 2317 tx_ring->tcb_tail = 0; 2318 tx_ring->tcb_free = tx_ring->free_list_size; 2319 } 2320 2321 /* 2322 * Initialize the s/w context structure 2323 */ 2324 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2325 } 2326 2327 static void 2328 ixgbe_setup_tx(ixgbe_t *ixgbe) 2329 { 2330 struct ixgbe_hw *hw = &ixgbe->hw; 2331 ixgbe_tx_ring_t *tx_ring; 2332 uint32_t reg_val; 2333 uint32_t ring_mapping; 2334 int i; 2335 2336 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2337 tx_ring = &ixgbe->tx_rings[i]; 2338 ixgbe_setup_tx_ring(tx_ring); 2339 } 2340 2341 /* 2342 * Setup the per-ring statistics mapping. 2343 */ 2344 ring_mapping = 0; 2345 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2346 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2347 if ((i & 0x3) == 0x3) { 2348 if (hw->mac.type >= ixgbe_mac_82599EB) { 2349 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2350 ring_mapping); 2351 } else { 2352 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2353 ring_mapping); 2354 } 2355 ring_mapping = 0; 2356 } 2357 } 2358 if ((i & 0x3) != 0x3) 2359 if (hw->mac.type >= ixgbe_mac_82599EB) { 2360 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2361 } else { 2362 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2363 } 2364 2365 /* 2366 * Enable CRC appending and TX padding (for short tx frames) 2367 */ 2368 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2369 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2370 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2371 2372 /* 2373 * enable DMA for 82599 parts 2374 */ 2375 if (hw->mac.type == ixgbe_mac_82599EB) { 2376 /* DMATXCTL.TE must be set after all Tx config is complete */ 2377 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2378 reg_val |= IXGBE_DMATXCTL_TE; 2379 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2380 } 2381 2382 /* 2383 * Enabling tx queues .. 2384 * For 82599 must be done after DMATXCTL.TE is set 2385 */ 2386 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2387 tx_ring = &ixgbe->tx_rings[i]; 2388 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2389 reg_val |= IXGBE_TXDCTL_ENABLE; 2390 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2391 } 2392 } 2393 2394 /* 2395 * ixgbe_setup_rss - Setup receive-side scaling feature. 2396 */ 2397 static void 2398 ixgbe_setup_rss(ixgbe_t *ixgbe) 2399 { 2400 struct ixgbe_hw *hw = &ixgbe->hw; 2401 uint32_t i, mrqc, rxcsum; 2402 uint32_t random; 2403 uint32_t reta; 2404 uint32_t ring_per_group; 2405 2406 /* 2407 * Fill out redirection table 2408 */ 2409 reta = 0; 2410 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2411 2412 for (i = 0; i < 128; i++) { 2413 reta = (reta << 8) | (i % ring_per_group) | 2414 ((i % ring_per_group) << 4); 2415 if ((i & 3) == 3) 2416 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2417 } 2418 2419 /* 2420 * Fill out hash function seeds with a random constant 2421 */ 2422 for (i = 0; i < 10; i++) { 2423 (void) random_get_pseudo_bytes((uint8_t *)&random, 2424 sizeof (uint32_t)); 2425 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 2426 } 2427 2428 /* 2429 * Enable RSS & perform hash on these packet types 2430 */ 2431 mrqc = IXGBE_MRQC_RSSEN | 2432 IXGBE_MRQC_RSS_FIELD_IPV4 | 2433 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2434 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2435 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2436 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2437 IXGBE_MRQC_RSS_FIELD_IPV6 | 2438 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2439 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2440 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2441 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2442 2443 /* 2444 * Disable Packet Checksum to enable RSS for multiple receive queues. 2445 * It is an adapter hardware limitation that Packet Checksum is 2446 * mutually exclusive with RSS. 2447 */ 2448 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2449 rxcsum |= IXGBE_RXCSUM_PCSD; 2450 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 2451 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2452 } 2453 2454 /* 2455 * ixgbe_setup_vmdq - Setup MAC classification feature 2456 */ 2457 static void 2458 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2459 { 2460 struct ixgbe_hw *hw = &ixgbe->hw; 2461 uint32_t vmdctl, i, vtctl; 2462 2463 /* 2464 * Setup the VMDq Control register, enable VMDq based on 2465 * packet destination MAC address: 2466 */ 2467 switch (hw->mac.type) { 2468 case ixgbe_mac_82598EB: 2469 /* 2470 * VMDq Enable = 1; 2471 * VMDq Filter = 0; MAC filtering 2472 * Default VMDq output index = 0; 2473 */ 2474 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2475 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2476 break; 2477 2478 case ixgbe_mac_82599EB: 2479 /* 2480 * Enable VMDq-only. 2481 */ 2482 vmdctl = IXGBE_MRQC_VMDQEN; 2483 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2484 2485 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2486 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2487 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2488 } 2489 2490 /* 2491 * Enable Virtualization and Replication. 2492 */ 2493 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2494 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2495 2496 /* 2497 * Enable receiving packets to all VFs 2498 */ 2499 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2500 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2501 2502 break; 2503 2504 default: 2505 break; 2506 } 2507 } 2508 2509 /* 2510 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2511 */ 2512 static void 2513 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2514 { 2515 struct ixgbe_hw *hw = &ixgbe->hw; 2516 uint32_t i, mrqc, rxcsum; 2517 uint32_t random; 2518 uint32_t reta; 2519 uint32_t ring_per_group; 2520 uint32_t vmdctl, vtctl; 2521 2522 /* 2523 * Fill out redirection table 2524 */ 2525 reta = 0; 2526 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2527 for (i = 0; i < 128; i++) { 2528 reta = (reta << 8) | (i % ring_per_group) | 2529 ((i % ring_per_group) << 4); 2530 if ((i & 3) == 3) 2531 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2532 } 2533 2534 /* 2535 * Fill out hash function seeds with a random constant 2536 */ 2537 for (i = 0; i < 10; i++) { 2538 (void) random_get_pseudo_bytes((uint8_t *)&random, 2539 sizeof (uint32_t)); 2540 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 2541 } 2542 2543 /* 2544 * Enable and setup RSS and VMDq 2545 */ 2546 switch (hw->mac.type) { 2547 case ixgbe_mac_82598EB: 2548 /* 2549 * Enable RSS & Setup RSS Hash functions 2550 */ 2551 mrqc = IXGBE_MRQC_RSSEN | 2552 IXGBE_MRQC_RSS_FIELD_IPV4 | 2553 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2554 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2555 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2556 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2557 IXGBE_MRQC_RSS_FIELD_IPV6 | 2558 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2559 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2560 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2561 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2562 2563 /* 2564 * Enable and Setup VMDq 2565 * VMDq Filter = 0; MAC filtering 2566 * Default VMDq output index = 0; 2567 */ 2568 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2569 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2570 break; 2571 2572 case ixgbe_mac_82599EB: 2573 /* 2574 * Enable RSS & Setup RSS Hash functions 2575 */ 2576 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2577 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2578 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2579 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2580 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2581 IXGBE_MRQC_RSS_FIELD_IPV6 | 2582 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2583 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2584 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2585 2586 /* 2587 * Enable VMDq+RSS. 2588 */ 2589 if (ixgbe->num_rx_groups > 32) { 2590 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2591 } else { 2592 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2593 } 2594 2595 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2596 2597 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2598 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2599 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2600 } 2601 break; 2602 2603 default: 2604 break; 2605 2606 } 2607 2608 /* 2609 * Disable Packet Checksum to enable RSS for multiple receive queues. 2610 * It is an adapter hardware limitation that Packet Checksum is 2611 * mutually exclusive with RSS. 2612 */ 2613 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2614 rxcsum |= IXGBE_RXCSUM_PCSD; 2615 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 2616 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2617 2618 if (hw->mac.type == ixgbe_mac_82599EB) { 2619 /* 2620 * Enable Virtualization and Replication. 2621 */ 2622 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2623 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2624 2625 /* 2626 * Enable receiving packets to all VFs 2627 */ 2628 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2629 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2630 } 2631 } 2632 2633 /* 2634 * ixgbe_init_unicst - Initialize the unicast addresses. 2635 */ 2636 static void 2637 ixgbe_init_unicst(ixgbe_t *ixgbe) 2638 { 2639 struct ixgbe_hw *hw = &ixgbe->hw; 2640 uint8_t *mac_addr; 2641 int slot; 2642 /* 2643 * Here we should consider two situations: 2644 * 2645 * 1. Chipset is initialized at the first time, 2646 * Clear all the multiple unicast addresses. 2647 * 2648 * 2. Chipset is reset 2649 * Recover the multiple unicast addresses from the 2650 * software data structure to the RAR registers. 2651 */ 2652 if (!ixgbe->unicst_init) { 2653 /* 2654 * Initialize the multiple unicast addresses 2655 */ 2656 ixgbe->unicst_total = hw->mac.num_rar_entries; 2657 ixgbe->unicst_avail = ixgbe->unicst_total; 2658 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2659 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 2660 bzero(mac_addr, ETHERADDRL); 2661 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 2662 ixgbe->unicst_addr[slot].mac.set = 0; 2663 } 2664 ixgbe->unicst_init = B_TRUE; 2665 } else { 2666 /* Re-configure the RAR registers */ 2667 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2668 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 2669 if (ixgbe->unicst_addr[slot].mac.set == 1) { 2670 (void) ixgbe_set_rar(hw, slot, mac_addr, 2671 ixgbe->unicst_addr[slot].mac.group_index, 2672 IXGBE_RAH_AV); 2673 } else { 2674 bzero(mac_addr, ETHERADDRL); 2675 (void) ixgbe_set_rar(hw, slot, mac_addr, 2676 NULL, NULL); 2677 } 2678 } 2679 } 2680 } 2681 2682 /* 2683 * ixgbe_unicst_find - Find the slot for the specified unicast address 2684 */ 2685 int 2686 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 2687 { 2688 int slot; 2689 2690 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2691 2692 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2693 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 2694 mac_addr, ETHERADDRL) == 0) 2695 return (slot); 2696 } 2697 2698 return (-1); 2699 } 2700 2701 /* 2702 * ixgbe_multicst_add - Add a multicst address. 2703 */ 2704 int 2705 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2706 { 2707 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2708 2709 if ((multiaddr[0] & 01) == 0) { 2710 return (EINVAL); 2711 } 2712 2713 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 2714 return (ENOENT); 2715 } 2716 2717 bcopy(multiaddr, 2718 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 2719 ixgbe->mcast_count++; 2720 2721 /* 2722 * Update the multicast table in the hardware 2723 */ 2724 ixgbe_setup_multicst(ixgbe); 2725 2726 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2727 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2728 return (EIO); 2729 } 2730 2731 return (0); 2732 } 2733 2734 /* 2735 * ixgbe_multicst_remove - Remove a multicst address. 2736 */ 2737 int 2738 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2739 { 2740 int i; 2741 2742 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2743 2744 for (i = 0; i < ixgbe->mcast_count; i++) { 2745 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 2746 ETHERADDRL) == 0) { 2747 for (i++; i < ixgbe->mcast_count; i++) { 2748 ixgbe->mcast_table[i - 1] = 2749 ixgbe->mcast_table[i]; 2750 } 2751 ixgbe->mcast_count--; 2752 break; 2753 } 2754 } 2755 2756 /* 2757 * Update the multicast table in the hardware 2758 */ 2759 ixgbe_setup_multicst(ixgbe); 2760 2761 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2762 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2763 return (EIO); 2764 } 2765 2766 return (0); 2767 } 2768 2769 /* 2770 * ixgbe_setup_multicast - Setup multicast data structures. 2771 * 2772 * This routine initializes all of the multicast related structures 2773 * and save them in the hardware registers. 2774 */ 2775 static void 2776 ixgbe_setup_multicst(ixgbe_t *ixgbe) 2777 { 2778 uint8_t *mc_addr_list; 2779 uint32_t mc_addr_count; 2780 struct ixgbe_hw *hw = &ixgbe->hw; 2781 2782 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2783 2784 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 2785 2786 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 2787 mc_addr_count = ixgbe->mcast_count; 2788 2789 /* 2790 * Update the multicast addresses to the MTA registers 2791 */ 2792 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2793 ixgbe_mc_table_itr); 2794 } 2795 2796 /* 2797 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 2798 * 2799 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 2800 * Different chipsets may have different allowed configuration of vmdq and rss. 2801 */ 2802 static void 2803 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 2804 { 2805 struct ixgbe_hw *hw = &ixgbe->hw; 2806 uint32_t ring_per_group; 2807 2808 switch (hw->mac.type) { 2809 case ixgbe_mac_82598EB: 2810 /* 2811 * 82598 supports the following combination: 2812 * vmdq no. x rss no. 2813 * [5..16] x 1 2814 * [1..4] x [1..16] 2815 * However 8 rss queue per pool (vmdq) is sufficient for 2816 * most cases. 2817 */ 2818 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2819 if (ixgbe->num_rx_groups > 4) { 2820 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 2821 } else { 2822 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2823 min(8, ring_per_group); 2824 } 2825 2826 break; 2827 2828 case ixgbe_mac_82599EB: 2829 /* 2830 * 82599 supports the following combination: 2831 * vmdq no. x rss no. 2832 * [33..64] x [1..2] 2833 * [2..32] x [1..4] 2834 * 1 x [1..16] 2835 * However 8 rss queue per pool (vmdq) is sufficient for 2836 * most cases. 2837 */ 2838 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2839 if (ixgbe->num_rx_groups == 1) { 2840 ixgbe->num_rx_rings = min(8, ring_per_group); 2841 } else if (ixgbe->num_rx_groups <= 32) { 2842 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2843 min(4, ring_per_group); 2844 } else if (ixgbe->num_rx_groups <= 64) { 2845 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2846 min(2, ring_per_group); 2847 } 2848 2849 break; 2850 2851 default: 2852 break; 2853 } 2854 2855 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2856 2857 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 2858 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 2859 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 2860 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 2861 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 2862 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 2863 } else { 2864 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 2865 } 2866 2867 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 2868 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 2869 } 2870 2871 /* 2872 * ixgbe_get_conf - Get driver configurations set in driver.conf. 2873 * 2874 * This routine gets user-configured values out of the configuration 2875 * file ixgbe.conf. 2876 * 2877 * For each configurable value, there is a minimum, a maximum, and a 2878 * default. 2879 * If user does not configure a value, use the default. 2880 * If user configures below the minimum, use the minumum. 2881 * If user configures above the maximum, use the maxumum. 2882 */ 2883 static void 2884 ixgbe_get_conf(ixgbe_t *ixgbe) 2885 { 2886 struct ixgbe_hw *hw = &ixgbe->hw; 2887 uint32_t flow_control; 2888 2889 /* 2890 * ixgbe driver supports the following user configurations: 2891 * 2892 * Jumbo frame configuration: 2893 * default_mtu 2894 * 2895 * Ethernet flow control configuration: 2896 * flow_control 2897 * 2898 * Multiple rings configurations: 2899 * tx_queue_number 2900 * tx_ring_size 2901 * rx_queue_number 2902 * rx_ring_size 2903 * 2904 * Call ixgbe_get_prop() to get the value for a specific 2905 * configuration parameter. 2906 */ 2907 2908 /* 2909 * Jumbo frame configuration - max_frame_size controls host buffer 2910 * allocation, so includes MTU, ethernet header, vlan tag and 2911 * frame check sequence. 2912 */ 2913 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 2914 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 2915 2916 ixgbe->max_frame_size = ixgbe->default_mtu + 2917 sizeof (struct ether_vlan_header) + ETHERFCSL; 2918 2919 /* 2920 * Ethernet flow control configuration 2921 */ 2922 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 2923 ixgbe_fc_none, 3, ixgbe_fc_none); 2924 if (flow_control == 3) 2925 flow_control = ixgbe_fc_default; 2926 2927 /* 2928 * fc.requested mode is what the user requests. After autoneg, 2929 * fc.current_mode will be the flow_control mode that was negotiated. 2930 */ 2931 hw->fc.requested_mode = flow_control; 2932 2933 /* 2934 * Multiple rings configurations 2935 */ 2936 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 2937 ixgbe->capab->min_tx_que_num, 2938 ixgbe->capab->max_tx_que_num, 2939 ixgbe->capab->def_tx_que_num); 2940 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 2941 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 2942 2943 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 2944 ixgbe->capab->min_rx_que_num, 2945 ixgbe->capab->max_rx_que_num, 2946 ixgbe->capab->def_rx_que_num); 2947 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 2948 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 2949 2950 /* 2951 * Multiple groups configuration 2952 */ 2953 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 2954 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 2955 ixgbe->capab->def_rx_grp_num); 2956 2957 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 2958 0, 1, DEFAULT_MR_ENABLE); 2959 2960 if (ixgbe->mr_enable == B_FALSE) { 2961 ixgbe->num_tx_rings = 1; 2962 ixgbe->num_rx_rings = 1; 2963 ixgbe->num_rx_groups = 1; 2964 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 2965 } else { 2966 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2967 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 2968 /* 2969 * The combination of num_rx_rings and num_rx_groups 2970 * may be not supported by h/w. We need to adjust 2971 * them to appropriate values. 2972 */ 2973 ixgbe_setup_vmdq_rss_conf(ixgbe); 2974 } 2975 2976 /* 2977 * Tunable used to force an interrupt type. The only use is 2978 * for testing of the lesser interrupt types. 2979 * 0 = don't force interrupt type 2980 * 1 = force interrupt type MSI-X 2981 * 2 = force interrupt type MSI 2982 * 3 = force interrupt type Legacy 2983 */ 2984 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 2985 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 2986 2987 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 2988 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 2989 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 2990 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 2991 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 2992 0, 1, DEFAULT_LSO_ENABLE); 2993 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 2994 0, 1, DEFAULT_LRO_ENABLE); 2995 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 2996 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 2997 2998 /* Head Write Back not recommended for 82599 */ 2999 if (hw->mac.type >= ixgbe_mac_82599EB) { 3000 ixgbe->tx_head_wb_enable = B_FALSE; 3001 } 3002 3003 /* 3004 * ixgbe LSO needs the tx h/w checksum support. 3005 * LSO will be disabled if tx h/w checksum is not 3006 * enabled. 3007 */ 3008 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3009 ixgbe->lso_enable = B_FALSE; 3010 } 3011 3012 /* 3013 * ixgbe LRO needs the rx h/w checksum support. 3014 * LRO will be disabled if rx h/w checksum is not 3015 * enabled. 3016 */ 3017 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3018 ixgbe->lro_enable = B_FALSE; 3019 } 3020 3021 /* 3022 * ixgbe LRO only been supported by 82599 now 3023 */ 3024 if (hw->mac.type != ixgbe_mac_82599EB) { 3025 ixgbe->lro_enable = B_FALSE; 3026 } 3027 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3028 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3029 DEFAULT_TX_COPY_THRESHOLD); 3030 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3031 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3032 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3033 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3034 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3035 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3036 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3037 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3038 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3039 3040 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3041 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3042 DEFAULT_RX_COPY_THRESHOLD); 3043 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3044 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3045 DEFAULT_RX_LIMIT_PER_INTR); 3046 3047 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3048 ixgbe->capab->min_intr_throttle, 3049 ixgbe->capab->max_intr_throttle, 3050 ixgbe->capab->def_intr_throttle); 3051 /* 3052 * 82599 requires the interupt throttling rate is 3053 * a multiple of 8. This is enforced by the register 3054 * definiton. 3055 */ 3056 if (hw->mac.type == ixgbe_mac_82599EB) 3057 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3058 } 3059 3060 static void 3061 ixgbe_init_params(ixgbe_t *ixgbe) 3062 { 3063 ixgbe->param_en_10000fdx_cap = 1; 3064 ixgbe->param_en_1000fdx_cap = 1; 3065 ixgbe->param_en_100fdx_cap = 1; 3066 ixgbe->param_adv_10000fdx_cap = 1; 3067 ixgbe->param_adv_1000fdx_cap = 1; 3068 ixgbe->param_adv_100fdx_cap = 1; 3069 3070 ixgbe->param_pause_cap = 1; 3071 ixgbe->param_asym_pause_cap = 1; 3072 ixgbe->param_rem_fault = 0; 3073 3074 ixgbe->param_adv_autoneg_cap = 1; 3075 ixgbe->param_adv_pause_cap = 1; 3076 ixgbe->param_adv_asym_pause_cap = 1; 3077 ixgbe->param_adv_rem_fault = 0; 3078 3079 ixgbe->param_lp_10000fdx_cap = 0; 3080 ixgbe->param_lp_1000fdx_cap = 0; 3081 ixgbe->param_lp_100fdx_cap = 0; 3082 ixgbe->param_lp_autoneg_cap = 0; 3083 ixgbe->param_lp_pause_cap = 0; 3084 ixgbe->param_lp_asym_pause_cap = 0; 3085 ixgbe->param_lp_rem_fault = 0; 3086 } 3087 3088 /* 3089 * ixgbe_get_prop - Get a property value out of the configuration file 3090 * ixgbe.conf. 3091 * 3092 * Caller provides the name of the property, a default value, a minimum 3093 * value, and a maximum value. 3094 * 3095 * Return configured value of the property, with default, minimum and 3096 * maximum properly applied. 3097 */ 3098 static int 3099 ixgbe_get_prop(ixgbe_t *ixgbe, 3100 char *propname, /* name of the property */ 3101 int minval, /* minimum acceptable value */ 3102 int maxval, /* maximim acceptable value */ 3103 int defval) /* default value */ 3104 { 3105 int value; 3106 3107 /* 3108 * Call ddi_prop_get_int() to read the conf settings 3109 */ 3110 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3111 DDI_PROP_DONTPASS, propname, defval); 3112 if (value > maxval) 3113 value = maxval; 3114 3115 if (value < minval) 3116 value = minval; 3117 3118 return (value); 3119 } 3120 3121 /* 3122 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3123 */ 3124 int 3125 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3126 { 3127 u32 autoneg_advertised = 0; 3128 3129 /* 3130 * No half duplex support with 10Gb parts 3131 */ 3132 if (ixgbe->param_adv_10000fdx_cap == 1) 3133 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3134 3135 if (ixgbe->param_adv_1000fdx_cap == 1) 3136 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3137 3138 if (ixgbe->param_adv_100fdx_cap == 1) 3139 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 3140 3141 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) { 3142 ixgbe_notice(ixgbe, "Invalid link settings. Setup link " 3143 "to autonegotiation with full link capabilities."); 3144 3145 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL | 3146 IXGBE_LINK_SPEED_1GB_FULL | 3147 IXGBE_LINK_SPEED_100_FULL; 3148 } 3149 3150 if (setup_hw) { 3151 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised, 3152 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) { 3153 ixgbe_notice(ixgbe, "Setup link failed on this " 3154 "device."); 3155 return (IXGBE_FAILURE); 3156 } 3157 } 3158 3159 return (IXGBE_SUCCESS); 3160 } 3161 3162 /* 3163 * ixgbe_driver_link_check - Link status processing. 3164 * 3165 * This function can be called in both kernel context and interrupt context 3166 */ 3167 static void 3168 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3169 { 3170 struct ixgbe_hw *hw = &ixgbe->hw; 3171 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3172 boolean_t link_up = B_FALSE; 3173 boolean_t link_changed = B_FALSE; 3174 3175 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3176 3177 (void) ixgbe_check_link(hw, &speed, &link_up, false); 3178 if (link_up) { 3179 ixgbe->link_check_complete = B_TRUE; 3180 3181 /* Link is up, enable flow control settings */ 3182 (void) ixgbe_fc_enable(hw, 0); 3183 3184 /* 3185 * The Link is up, check whether it was marked as down earlier 3186 */ 3187 if (ixgbe->link_state != LINK_STATE_UP) { 3188 switch (speed) { 3189 case IXGBE_LINK_SPEED_10GB_FULL: 3190 ixgbe->link_speed = SPEED_10GB; 3191 break; 3192 case IXGBE_LINK_SPEED_1GB_FULL: 3193 ixgbe->link_speed = SPEED_1GB; 3194 break; 3195 case IXGBE_LINK_SPEED_100_FULL: 3196 ixgbe->link_speed = SPEED_100; 3197 } 3198 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3199 ixgbe->link_state = LINK_STATE_UP; 3200 link_changed = B_TRUE; 3201 } 3202 } else { 3203 if (ixgbe->link_check_complete == B_TRUE || 3204 (ixgbe->link_check_complete == B_FALSE && 3205 gethrtime() >= ixgbe->link_check_hrtime)) { 3206 /* 3207 * The link is really down 3208 */ 3209 ixgbe->link_check_complete = B_TRUE; 3210 3211 if (ixgbe->link_state != LINK_STATE_DOWN) { 3212 ixgbe->link_speed = 0; 3213 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3214 ixgbe->link_state = LINK_STATE_DOWN; 3215 link_changed = B_TRUE; 3216 } 3217 } 3218 } 3219 3220 /* 3221 * this is only reached after a link-status-change interrupt 3222 * so always get new phy state 3223 */ 3224 ixgbe_get_hw_state(ixgbe); 3225 3226 /* 3227 * If we are in an interrupt context, need to re-enable the 3228 * interrupt, which was automasked 3229 */ 3230 if (servicing_interrupt() != 0) { 3231 ixgbe->eims |= IXGBE_EICR_LSC; 3232 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3233 } 3234 3235 if (link_changed) { 3236 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3237 } 3238 } 3239 3240 /* 3241 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3242 */ 3243 static void 3244 ixgbe_sfp_check(void *arg) 3245 { 3246 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3247 uint32_t eicr = ixgbe->eicr; 3248 struct ixgbe_hw *hw = &ixgbe->hw; 3249 3250 mutex_enter(&ixgbe->gen_lock); 3251 if (eicr & IXGBE_EICR_GPI_SDP1) { 3252 /* clear the interrupt */ 3253 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 3254 3255 /* if link up, do multispeed fiber setup */ 3256 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3257 B_TRUE, B_TRUE); 3258 ixgbe_driver_link_check(ixgbe); 3259 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 3260 /* clear the interrupt */ 3261 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 3262 3263 /* if link up, do sfp module setup */ 3264 (void) hw->mac.ops.setup_sfp(hw); 3265 3266 /* do multispeed fiber setup */ 3267 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3268 B_TRUE, B_TRUE); 3269 ixgbe_driver_link_check(ixgbe); 3270 } 3271 mutex_exit(&ixgbe->gen_lock); 3272 } 3273 3274 /* 3275 * ixgbe_link_timer - timer for link status detection 3276 */ 3277 static void 3278 ixgbe_link_timer(void *arg) 3279 { 3280 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3281 3282 mutex_enter(&ixgbe->gen_lock); 3283 ixgbe_driver_link_check(ixgbe); 3284 mutex_exit(&ixgbe->gen_lock); 3285 } 3286 3287 /* 3288 * ixgbe_local_timer - Driver watchdog function. 3289 * 3290 * This function will handle the transmit stall check and other routines. 3291 */ 3292 static void 3293 ixgbe_local_timer(void *arg) 3294 { 3295 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3296 3297 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3298 ixgbe->reset_count++; 3299 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3300 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3301 ixgbe_restart_watchdog_timer(ixgbe); 3302 return; 3303 } 3304 3305 if (ixgbe_stall_check(ixgbe)) { 3306 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3307 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3308 3309 ixgbe->reset_count++; 3310 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3311 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3312 } 3313 3314 ixgbe_restart_watchdog_timer(ixgbe); 3315 } 3316 3317 /* 3318 * ixgbe_stall_check - Check for transmit stall. 3319 * 3320 * This function checks if the adapter is stalled (in transmit). 3321 * 3322 * It is called each time the watchdog timeout is invoked. 3323 * If the transmit descriptor reclaim continuously fails, 3324 * the watchdog value will increment by 1. If the watchdog 3325 * value exceeds the threshold, the ixgbe is assumed to 3326 * have stalled and need to be reset. 3327 */ 3328 static boolean_t 3329 ixgbe_stall_check(ixgbe_t *ixgbe) 3330 { 3331 ixgbe_tx_ring_t *tx_ring; 3332 boolean_t result; 3333 int i; 3334 3335 if (ixgbe->link_state != LINK_STATE_UP) 3336 return (B_FALSE); 3337 3338 /* 3339 * If any tx ring is stalled, we'll reset the chipset 3340 */ 3341 result = B_FALSE; 3342 for (i = 0; i < ixgbe->num_tx_rings; i++) { 3343 tx_ring = &ixgbe->tx_rings[i]; 3344 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 3345 tx_ring->tx_recycle(tx_ring); 3346 } 3347 3348 if (tx_ring->recycle_fail > 0) 3349 tx_ring->stall_watchdog++; 3350 else 3351 tx_ring->stall_watchdog = 0; 3352 3353 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 3354 result = B_TRUE; 3355 break; 3356 } 3357 } 3358 3359 if (result) { 3360 tx_ring->stall_watchdog = 0; 3361 tx_ring->recycle_fail = 0; 3362 } 3363 3364 return (result); 3365 } 3366 3367 3368 /* 3369 * is_valid_mac_addr - Check if the mac address is valid. 3370 */ 3371 static boolean_t 3372 is_valid_mac_addr(uint8_t *mac_addr) 3373 { 3374 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 3375 const uint8_t addr_test2[6] = 3376 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3377 3378 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 3379 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 3380 return (B_FALSE); 3381 3382 return (B_TRUE); 3383 } 3384 3385 static boolean_t 3386 ixgbe_find_mac_address(ixgbe_t *ixgbe) 3387 { 3388 #ifdef __sparc 3389 struct ixgbe_hw *hw = &ixgbe->hw; 3390 uchar_t *bytes; 3391 struct ether_addr sysaddr; 3392 uint_t nelts; 3393 int err; 3394 boolean_t found = B_FALSE; 3395 3396 /* 3397 * The "vendor's factory-set address" may already have 3398 * been extracted from the chip, but if the property 3399 * "local-mac-address" is set we use that instead. 3400 * 3401 * We check whether it looks like an array of 6 3402 * bytes (which it should, if OBP set it). If we can't 3403 * make sense of it this way, we'll ignore it. 3404 */ 3405 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 3406 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 3407 if (err == DDI_PROP_SUCCESS) { 3408 if (nelts == ETHERADDRL) { 3409 while (nelts--) 3410 hw->mac.addr[nelts] = bytes[nelts]; 3411 found = B_TRUE; 3412 } 3413 ddi_prop_free(bytes); 3414 } 3415 3416 /* 3417 * Look up the OBP property "local-mac-address?". If the user has set 3418 * 'local-mac-address? = false', use "the system address" instead. 3419 */ 3420 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 3421 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 3422 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 3423 if (localetheraddr(NULL, &sysaddr) != 0) { 3424 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 3425 found = B_TRUE; 3426 } 3427 } 3428 ddi_prop_free(bytes); 3429 } 3430 3431 /* 3432 * Finally(!), if there's a valid "mac-address" property (created 3433 * if we netbooted from this interface), we must use this instead 3434 * of any of the above to ensure that the NFS/install server doesn't 3435 * get confused by the address changing as Solaris takes over! 3436 */ 3437 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 3438 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 3439 if (err == DDI_PROP_SUCCESS) { 3440 if (nelts == ETHERADDRL) { 3441 while (nelts--) 3442 hw->mac.addr[nelts] = bytes[nelts]; 3443 found = B_TRUE; 3444 } 3445 ddi_prop_free(bytes); 3446 } 3447 3448 if (found) { 3449 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 3450 return (B_TRUE); 3451 } 3452 #else 3453 _NOTE(ARGUNUSED(ixgbe)); 3454 #endif 3455 3456 return (B_TRUE); 3457 } 3458 3459 #pragma inline(ixgbe_arm_watchdog_timer) 3460 static void 3461 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 3462 { 3463 /* 3464 * Fire a watchdog timer 3465 */ 3466 ixgbe->watchdog_tid = 3467 timeout(ixgbe_local_timer, 3468 (void *)ixgbe, 1 * drv_usectohz(1000000)); 3469 3470 } 3471 3472 /* 3473 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 3474 */ 3475 void 3476 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 3477 { 3478 mutex_enter(&ixgbe->watchdog_lock); 3479 3480 if (!ixgbe->watchdog_enable) { 3481 ixgbe->watchdog_enable = B_TRUE; 3482 ixgbe->watchdog_start = B_TRUE; 3483 ixgbe_arm_watchdog_timer(ixgbe); 3484 } 3485 3486 mutex_exit(&ixgbe->watchdog_lock); 3487 } 3488 3489 /* 3490 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 3491 */ 3492 void 3493 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 3494 { 3495 timeout_id_t tid; 3496 3497 mutex_enter(&ixgbe->watchdog_lock); 3498 3499 ixgbe->watchdog_enable = B_FALSE; 3500 ixgbe->watchdog_start = B_FALSE; 3501 tid = ixgbe->watchdog_tid; 3502 ixgbe->watchdog_tid = 0; 3503 3504 mutex_exit(&ixgbe->watchdog_lock); 3505 3506 if (tid != 0) 3507 (void) untimeout(tid); 3508 } 3509 3510 /* 3511 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 3512 */ 3513 void 3514 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 3515 { 3516 mutex_enter(&ixgbe->watchdog_lock); 3517 3518 if (ixgbe->watchdog_enable) { 3519 if (!ixgbe->watchdog_start) { 3520 ixgbe->watchdog_start = B_TRUE; 3521 ixgbe_arm_watchdog_timer(ixgbe); 3522 } 3523 } 3524 3525 mutex_exit(&ixgbe->watchdog_lock); 3526 } 3527 3528 /* 3529 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 3530 */ 3531 static void 3532 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 3533 { 3534 mutex_enter(&ixgbe->watchdog_lock); 3535 3536 if (ixgbe->watchdog_start) 3537 ixgbe_arm_watchdog_timer(ixgbe); 3538 3539 mutex_exit(&ixgbe->watchdog_lock); 3540 } 3541 3542 /* 3543 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 3544 */ 3545 void 3546 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 3547 { 3548 timeout_id_t tid; 3549 3550 mutex_enter(&ixgbe->watchdog_lock); 3551 3552 ixgbe->watchdog_start = B_FALSE; 3553 tid = ixgbe->watchdog_tid; 3554 ixgbe->watchdog_tid = 0; 3555 3556 mutex_exit(&ixgbe->watchdog_lock); 3557 3558 if (tid != 0) 3559 (void) untimeout(tid); 3560 } 3561 3562 /* 3563 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 3564 */ 3565 static void 3566 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 3567 { 3568 struct ixgbe_hw *hw = &ixgbe->hw; 3569 3570 /* 3571 * mask all interrupts off 3572 */ 3573 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 3574 3575 /* 3576 * for MSI-X, also disable autoclear 3577 */ 3578 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 3579 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 3580 } 3581 3582 IXGBE_WRITE_FLUSH(hw); 3583 } 3584 3585 /* 3586 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 3587 */ 3588 static void 3589 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 3590 { 3591 struct ixgbe_hw *hw = &ixgbe->hw; 3592 uint32_t eiac, eiam; 3593 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3594 3595 /* interrupt types to enable */ 3596 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 3597 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 3598 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 3599 3600 /* enable automask on "other" causes that this adapter can generate */ 3601 eiam = ixgbe->capab->other_intr; 3602 3603 /* 3604 * msi-x mode 3605 */ 3606 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 3607 /* enable autoclear but not on bits 29:20 */ 3608 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 3609 3610 /* general purpose interrupt enable */ 3611 gpie |= (IXGBE_GPIE_MSIX_MODE 3612 | IXGBE_GPIE_PBA_SUPPORT 3613 | IXGBE_GPIE_OCD 3614 | IXGBE_GPIE_EIAME); 3615 /* 3616 * non-msi-x mode 3617 */ 3618 } else { 3619 3620 /* disable autoclear, leave gpie at default */ 3621 eiac = 0; 3622 3623 /* 3624 * General purpose interrupt enable. 3625 * For 82599, extended interrupt automask enable 3626 * only in MSI or MSI-X mode 3627 */ 3628 if ((hw->mac.type < ixgbe_mac_82599EB) || 3629 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 3630 gpie |= IXGBE_GPIE_EIAME; 3631 } 3632 } 3633 /* Enable specific interrupts for 82599 */ 3634 if (hw->mac.type == ixgbe_mac_82599EB) { 3635 gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */ 3636 gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */ 3637 } 3638 /* Enable RSC Dealy 8us for 82599 */ 3639 if (ixgbe->lro_enable) { 3640 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 3641 } 3642 /* write to interrupt control registers */ 3643 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3644 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 3645 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 3646 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3647 IXGBE_WRITE_FLUSH(hw); 3648 } 3649 3650 /* 3651 * ixgbe_loopback_ioctl - Loopback support. 3652 */ 3653 enum ioc_reply 3654 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 3655 { 3656 lb_info_sz_t *lbsp; 3657 lb_property_t *lbpp; 3658 uint32_t *lbmp; 3659 uint32_t size; 3660 uint32_t value; 3661 3662 if (mp->b_cont == NULL) 3663 return (IOC_INVAL); 3664 3665 switch (iocp->ioc_cmd) { 3666 default: 3667 return (IOC_INVAL); 3668 3669 case LB_GET_INFO_SIZE: 3670 size = sizeof (lb_info_sz_t); 3671 if (iocp->ioc_count != size) 3672 return (IOC_INVAL); 3673 3674 value = sizeof (lb_normal); 3675 value += sizeof (lb_mac); 3676 value += sizeof (lb_external); 3677 3678 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 3679 *lbsp = value; 3680 break; 3681 3682 case LB_GET_INFO: 3683 value = sizeof (lb_normal); 3684 value += sizeof (lb_mac); 3685 value += sizeof (lb_external); 3686 3687 size = value; 3688 if (iocp->ioc_count != size) 3689 return (IOC_INVAL); 3690 3691 value = 0; 3692 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 3693 3694 lbpp[value++] = lb_normal; 3695 lbpp[value++] = lb_mac; 3696 lbpp[value++] = lb_external; 3697 break; 3698 3699 case LB_GET_MODE: 3700 size = sizeof (uint32_t); 3701 if (iocp->ioc_count != size) 3702 return (IOC_INVAL); 3703 3704 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3705 *lbmp = ixgbe->loopback_mode; 3706 break; 3707 3708 case LB_SET_MODE: 3709 size = 0; 3710 if (iocp->ioc_count != sizeof (uint32_t)) 3711 return (IOC_INVAL); 3712 3713 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3714 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 3715 return (IOC_INVAL); 3716 break; 3717 } 3718 3719 iocp->ioc_count = size; 3720 iocp->ioc_error = 0; 3721 3722 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3723 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3724 return (IOC_INVAL); 3725 } 3726 3727 return (IOC_REPLY); 3728 } 3729 3730 /* 3731 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 3732 */ 3733 static boolean_t 3734 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 3735 { 3736 if (mode == ixgbe->loopback_mode) 3737 return (B_TRUE); 3738 3739 ixgbe->loopback_mode = mode; 3740 3741 if (mode == IXGBE_LB_NONE) { 3742 /* 3743 * Reset the chip 3744 */ 3745 (void) ixgbe_reset(ixgbe); 3746 return (B_TRUE); 3747 } 3748 3749 mutex_enter(&ixgbe->gen_lock); 3750 3751 switch (mode) { 3752 default: 3753 mutex_exit(&ixgbe->gen_lock); 3754 return (B_FALSE); 3755 3756 case IXGBE_LB_EXTERNAL: 3757 break; 3758 3759 case IXGBE_LB_INTERNAL_MAC: 3760 ixgbe_set_internal_mac_loopback(ixgbe); 3761 break; 3762 } 3763 3764 mutex_exit(&ixgbe->gen_lock); 3765 3766 return (B_TRUE); 3767 } 3768 3769 /* 3770 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 3771 */ 3772 static void 3773 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 3774 { 3775 struct ixgbe_hw *hw; 3776 uint32_t reg; 3777 uint8_t atlas; 3778 3779 hw = &ixgbe->hw; 3780 3781 /* 3782 * Setup MAC loopback 3783 */ 3784 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 3785 reg |= IXGBE_HLREG0_LPBK; 3786 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 3787 3788 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 3789 reg &= ~IXGBE_AUTOC_LMS_MASK; 3790 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 3791 3792 /* 3793 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 3794 */ 3795 if (hw->mac.type == ixgbe_mac_82598EB) { 3796 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 3797 &atlas); 3798 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 3799 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 3800 atlas); 3801 3802 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 3803 &atlas); 3804 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 3805 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 3806 atlas); 3807 3808 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 3809 &atlas); 3810 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 3811 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 3812 atlas); 3813 3814 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 3815 &atlas); 3816 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 3817 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 3818 atlas); 3819 } 3820 } 3821 3822 #pragma inline(ixgbe_intr_rx_work) 3823 /* 3824 * ixgbe_intr_rx_work - RX processing of ISR. 3825 */ 3826 static void 3827 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 3828 { 3829 mblk_t *mp; 3830 3831 mutex_enter(&rx_ring->rx_lock); 3832 3833 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 3834 mutex_exit(&rx_ring->rx_lock); 3835 3836 if (mp != NULL) 3837 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 3838 rx_ring->ring_gen_num); 3839 } 3840 3841 #pragma inline(ixgbe_intr_tx_work) 3842 /* 3843 * ixgbe_intr_tx_work - TX processing of ISR. 3844 */ 3845 static void 3846 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 3847 { 3848 ixgbe_t *ixgbe = tx_ring->ixgbe; 3849 3850 /* 3851 * Recycle the tx descriptors 3852 */ 3853 tx_ring->tx_recycle(tx_ring); 3854 3855 /* 3856 * Schedule the re-transmit 3857 */ 3858 if (tx_ring->reschedule && 3859 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 3860 tx_ring->reschedule = B_FALSE; 3861 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 3862 tx_ring->ring_handle); 3863 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 3864 } 3865 } 3866 3867 #pragma inline(ixgbe_intr_other_work) 3868 /* 3869 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 3870 */ 3871 static void 3872 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 3873 { 3874 struct ixgbe_hw *hw = &ixgbe->hw; 3875 3876 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3877 3878 /* 3879 * handle link status change 3880 */ 3881 if (eicr & IXGBE_EICR_LSC) { 3882 ixgbe_driver_link_check(ixgbe); 3883 } 3884 3885 /* 3886 * check for fan failure on adapters with fans 3887 */ 3888 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 3889 (eicr & IXGBE_EICR_GPI_SDP1)) { 3890 if (hw->mac.type < ixgbe_mac_82599EB) { 3891 ixgbe_log(ixgbe, 3892 "Fan has stopped, replace the adapter\n"); 3893 3894 /* re-enable the interrupt, which was automasked */ 3895 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 3896 } 3897 } 3898 3899 /* 3900 * Do SFP check for 82599 3901 */ 3902 if (hw->mac.type == ixgbe_mac_82599EB) { 3903 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 3904 ixgbe_sfp_check, (void *)ixgbe, 3905 DDI_NOSLEEP)) != DDI_SUCCESS) { 3906 ixgbe_log(ixgbe, "No memory available to dispatch " 3907 "taskq for SFP check"); 3908 } 3909 3910 /* 3911 * We need to fully re-check the link later. 3912 */ 3913 ixgbe->link_check_complete = B_FALSE; 3914 ixgbe->link_check_hrtime = gethrtime() + 3915 (IXGBE_LINK_UP_TIME * 100000000ULL); 3916 } 3917 } 3918 3919 /* 3920 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 3921 */ 3922 static uint_t 3923 ixgbe_intr_legacy(void *arg1, void *arg2) 3924 { 3925 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3926 struct ixgbe_hw *hw = &ixgbe->hw; 3927 ixgbe_tx_ring_t *tx_ring; 3928 ixgbe_rx_ring_t *rx_ring; 3929 uint32_t eicr; 3930 mblk_t *mp; 3931 boolean_t tx_reschedule; 3932 uint_t result; 3933 3934 _NOTE(ARGUNUSED(arg2)); 3935 3936 mutex_enter(&ixgbe->gen_lock); 3937 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 3938 mutex_exit(&ixgbe->gen_lock); 3939 return (DDI_INTR_UNCLAIMED); 3940 } 3941 3942 mp = NULL; 3943 tx_reschedule = B_FALSE; 3944 3945 /* 3946 * Any bit set in eicr: claim this interrupt 3947 */ 3948 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3949 3950 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3951 mutex_exit(&ixgbe->gen_lock); 3952 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3953 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 3954 return (DDI_INTR_CLAIMED); 3955 } 3956 3957 if (eicr) { 3958 /* 3959 * For legacy interrupt, we have only one interrupt, 3960 * so we have only one rx ring and one tx ring enabled. 3961 */ 3962 ASSERT(ixgbe->num_rx_rings == 1); 3963 ASSERT(ixgbe->num_tx_rings == 1); 3964 3965 /* 3966 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 3967 */ 3968 if (eicr & 0x1) { 3969 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 3970 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 3971 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 3972 /* 3973 * Clean the rx descriptors 3974 */ 3975 rx_ring = &ixgbe->rx_rings[0]; 3976 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 3977 } 3978 3979 /* 3980 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 3981 */ 3982 if (eicr & 0x2) { 3983 /* 3984 * Recycle the tx descriptors 3985 */ 3986 tx_ring = &ixgbe->tx_rings[0]; 3987 tx_ring->tx_recycle(tx_ring); 3988 3989 /* 3990 * Schedule the re-transmit 3991 */ 3992 tx_reschedule = (tx_ring->reschedule && 3993 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 3994 } 3995 3996 /* any interrupt type other than tx/rx */ 3997 if (eicr & ixgbe->capab->other_intr) { 3998 if (hw->mac.type < ixgbe_mac_82599EB) { 3999 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4000 } 4001 if (hw->mac.type == ixgbe_mac_82599EB) { 4002 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4003 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4004 } 4005 ixgbe_intr_other_work(ixgbe, eicr); 4006 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4007 } 4008 4009 mutex_exit(&ixgbe->gen_lock); 4010 4011 result = DDI_INTR_CLAIMED; 4012 } else { 4013 mutex_exit(&ixgbe->gen_lock); 4014 4015 /* 4016 * No interrupt cause bits set: don't claim this interrupt. 4017 */ 4018 result = DDI_INTR_UNCLAIMED; 4019 } 4020 4021 /* re-enable the interrupts which were automasked */ 4022 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4023 4024 /* 4025 * Do the following work outside of the gen_lock 4026 */ 4027 if (mp != NULL) { 4028 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4029 rx_ring->ring_gen_num); 4030 } 4031 4032 if (tx_reschedule) { 4033 tx_ring->reschedule = B_FALSE; 4034 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4035 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4036 } 4037 4038 return (result); 4039 } 4040 4041 /* 4042 * ixgbe_intr_msi - Interrupt handler for MSI. 4043 */ 4044 static uint_t 4045 ixgbe_intr_msi(void *arg1, void *arg2) 4046 { 4047 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4048 struct ixgbe_hw *hw = &ixgbe->hw; 4049 uint32_t eicr; 4050 4051 _NOTE(ARGUNUSED(arg2)); 4052 4053 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4054 4055 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4056 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4057 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4058 return (DDI_INTR_CLAIMED); 4059 } 4060 4061 /* 4062 * For MSI interrupt, we have only one vector, 4063 * so we have only one rx ring and one tx ring enabled. 4064 */ 4065 ASSERT(ixgbe->num_rx_rings == 1); 4066 ASSERT(ixgbe->num_tx_rings == 1); 4067 4068 /* 4069 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4070 */ 4071 if (eicr & 0x1) { 4072 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4073 } 4074 4075 /* 4076 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4077 */ 4078 if (eicr & 0x2) { 4079 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4080 } 4081 4082 /* any interrupt type other than tx/rx */ 4083 if (eicr & ixgbe->capab->other_intr) { 4084 mutex_enter(&ixgbe->gen_lock); 4085 if (hw->mac.type < ixgbe_mac_82599EB) { 4086 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4087 } 4088 if (hw->mac.type == ixgbe_mac_82599EB) { 4089 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4090 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4091 } 4092 ixgbe_intr_other_work(ixgbe, eicr); 4093 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4094 mutex_exit(&ixgbe->gen_lock); 4095 } 4096 4097 /* re-enable the interrupts which were automasked */ 4098 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4099 4100 return (DDI_INTR_CLAIMED); 4101 } 4102 4103 /* 4104 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4105 */ 4106 static uint_t 4107 ixgbe_intr_msix(void *arg1, void *arg2) 4108 { 4109 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4110 ixgbe_t *ixgbe = vect->ixgbe; 4111 struct ixgbe_hw *hw = &ixgbe->hw; 4112 uint32_t eicr; 4113 int r_idx = 0; 4114 4115 _NOTE(ARGUNUSED(arg2)); 4116 4117 /* 4118 * Clean each rx ring that has its bit set in the map 4119 */ 4120 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4121 while (r_idx >= 0) { 4122 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4123 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4124 (ixgbe->num_rx_rings - 1)); 4125 } 4126 4127 /* 4128 * Clean each tx ring that has its bit set in the map 4129 */ 4130 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4131 while (r_idx >= 0) { 4132 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4133 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4134 (ixgbe->num_tx_rings - 1)); 4135 } 4136 4137 4138 /* 4139 * Clean other interrupt (link change) that has its bit set in the map 4140 */ 4141 if (BT_TEST(vect->other_map, 0) == 1) { 4142 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4143 4144 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4145 DDI_FM_OK) { 4146 ddi_fm_service_impact(ixgbe->dip, 4147 DDI_SERVICE_DEGRADED); 4148 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4149 return (DDI_INTR_CLAIMED); 4150 } 4151 4152 /* 4153 * Need check cause bits and only other causes will 4154 * be processed 4155 */ 4156 /* any interrupt type other than tx/rx */ 4157 if (eicr & ixgbe->capab->other_intr) { 4158 if (hw->mac.type < ixgbe_mac_82599EB) { 4159 mutex_enter(&ixgbe->gen_lock); 4160 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4161 ixgbe_intr_other_work(ixgbe, eicr); 4162 mutex_exit(&ixgbe->gen_lock); 4163 } else { 4164 if (hw->mac.type == ixgbe_mac_82599EB) { 4165 mutex_enter(&ixgbe->gen_lock); 4166 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4167 ixgbe_intr_other_work(ixgbe, eicr); 4168 mutex_exit(&ixgbe->gen_lock); 4169 } 4170 } 4171 } 4172 4173 /* re-enable the interrupts which were automasked */ 4174 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4175 } 4176 4177 return (DDI_INTR_CLAIMED); 4178 } 4179 4180 /* 4181 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4182 * 4183 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4184 * if not successful, try Legacy. 4185 * ixgbe->intr_force can be used to force sequence to start with 4186 * any of the 3 types. 4187 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4188 */ 4189 static int 4190 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4191 { 4192 dev_info_t *devinfo; 4193 int intr_types; 4194 int rc; 4195 4196 devinfo = ixgbe->dip; 4197 4198 /* 4199 * Get supported interrupt types 4200 */ 4201 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4202 4203 if (rc != DDI_SUCCESS) { 4204 ixgbe_log(ixgbe, 4205 "Get supported interrupt types failed: %d", rc); 4206 return (IXGBE_FAILURE); 4207 } 4208 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4209 4210 ixgbe->intr_type = 0; 4211 4212 /* 4213 * Install MSI-X interrupts 4214 */ 4215 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4216 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4217 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4218 if (rc == IXGBE_SUCCESS) 4219 return (IXGBE_SUCCESS); 4220 4221 ixgbe_log(ixgbe, 4222 "Allocate MSI-X failed, trying MSI interrupts..."); 4223 } 4224 4225 /* 4226 * MSI-X not used, force rings and groups to 1 4227 */ 4228 ixgbe->num_rx_rings = 1; 4229 ixgbe->num_rx_groups = 1; 4230 ixgbe->num_tx_rings = 1; 4231 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4232 ixgbe_log(ixgbe, 4233 "MSI-X not used, force rings and groups number to 1"); 4234 4235 /* 4236 * Install MSI interrupts 4237 */ 4238 if ((intr_types & DDI_INTR_TYPE_MSI) && 4239 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 4240 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 4241 if (rc == IXGBE_SUCCESS) 4242 return (IXGBE_SUCCESS); 4243 4244 ixgbe_log(ixgbe, 4245 "Allocate MSI failed, trying Legacy interrupts..."); 4246 } 4247 4248 /* 4249 * Install legacy interrupts 4250 */ 4251 if (intr_types & DDI_INTR_TYPE_FIXED) { 4252 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 4253 if (rc == IXGBE_SUCCESS) 4254 return (IXGBE_SUCCESS); 4255 4256 ixgbe_log(ixgbe, 4257 "Allocate Legacy interrupts failed"); 4258 } 4259 4260 /* 4261 * If none of the 3 types succeeded, return failure 4262 */ 4263 return (IXGBE_FAILURE); 4264 } 4265 4266 /* 4267 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 4268 * 4269 * For legacy and MSI, only 1 handle is needed. For MSI-X, 4270 * if fewer than 2 handles are available, return failure. 4271 * Upon success, this maps the vectors to rx and tx rings for 4272 * interrupts. 4273 */ 4274 static int 4275 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 4276 { 4277 dev_info_t *devinfo; 4278 int request, count, actual; 4279 int minimum; 4280 int rc; 4281 uint32_t ring_per_group; 4282 4283 devinfo = ixgbe->dip; 4284 4285 switch (intr_type) { 4286 case DDI_INTR_TYPE_FIXED: 4287 request = 1; /* Request 1 legacy interrupt handle */ 4288 minimum = 1; 4289 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 4290 break; 4291 4292 case DDI_INTR_TYPE_MSI: 4293 request = 1; /* Request 1 MSI interrupt handle */ 4294 minimum = 1; 4295 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 4296 break; 4297 4298 case DDI_INTR_TYPE_MSIX: 4299 /* 4300 * Best number of vectors for the adapter is 4301 * (# rx rings + # tx rings), however we will 4302 * limit the request number. 4303 */ 4304 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 4305 if (request > ixgbe->capab->max_ring_vect) 4306 request = ixgbe->capab->max_ring_vect; 4307 minimum = 1; 4308 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 4309 break; 4310 4311 default: 4312 ixgbe_log(ixgbe, 4313 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 4314 intr_type); 4315 return (IXGBE_FAILURE); 4316 } 4317 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 4318 request, minimum); 4319 4320 /* 4321 * Get number of supported interrupts 4322 */ 4323 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 4324 if ((rc != DDI_SUCCESS) || (count < minimum)) { 4325 ixgbe_log(ixgbe, 4326 "Get interrupt number failed. Return: %d, count: %d", 4327 rc, count); 4328 return (IXGBE_FAILURE); 4329 } 4330 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 4331 4332 actual = 0; 4333 ixgbe->intr_cnt = 0; 4334 ixgbe->intr_cnt_max = 0; 4335 ixgbe->intr_cnt_min = 0; 4336 4337 /* 4338 * Allocate an array of interrupt handles 4339 */ 4340 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 4341 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 4342 4343 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 4344 request, &actual, DDI_INTR_ALLOC_NORMAL); 4345 if (rc != DDI_SUCCESS) { 4346 ixgbe_log(ixgbe, "Allocate interrupts failed. " 4347 "return: %d, request: %d, actual: %d", 4348 rc, request, actual); 4349 goto alloc_handle_fail; 4350 } 4351 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 4352 4353 /* 4354 * upper/lower limit of interrupts 4355 */ 4356 ixgbe->intr_cnt = actual; 4357 ixgbe->intr_cnt_max = request; 4358 ixgbe->intr_cnt_min = minimum; 4359 4360 /* 4361 * rss number per group should not exceed the rx interrupt number, 4362 * else need to adjust rx ring number. 4363 */ 4364 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 4365 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 4366 if (min(actual, ixgbe->num_rx_rings) < ring_per_group) { 4367 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 4368 min(actual, ixgbe->num_rx_rings); 4369 ixgbe_setup_vmdq_rss_conf(ixgbe); 4370 } 4371 4372 /* 4373 * Now we know the actual number of vectors. Here we map the vector 4374 * to other, rx rings and tx ring. 4375 */ 4376 if (actual < minimum) { 4377 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 4378 actual); 4379 goto alloc_handle_fail; 4380 } 4381 4382 /* 4383 * Get priority for first vector, assume remaining are all the same 4384 */ 4385 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 4386 if (rc != DDI_SUCCESS) { 4387 ixgbe_log(ixgbe, 4388 "Get interrupt priority failed: %d", rc); 4389 goto alloc_handle_fail; 4390 } 4391 4392 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 4393 if (rc != DDI_SUCCESS) { 4394 ixgbe_log(ixgbe, 4395 "Get interrupt cap failed: %d", rc); 4396 goto alloc_handle_fail; 4397 } 4398 4399 ixgbe->intr_type = intr_type; 4400 4401 return (IXGBE_SUCCESS); 4402 4403 alloc_handle_fail: 4404 ixgbe_rem_intrs(ixgbe); 4405 4406 return (IXGBE_FAILURE); 4407 } 4408 4409 /* 4410 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 4411 * 4412 * Before adding the interrupt handlers, the interrupt vectors have 4413 * been allocated, and the rx/tx rings have also been allocated. 4414 */ 4415 static int 4416 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 4417 { 4418 int vector = 0; 4419 int rc; 4420 4421 switch (ixgbe->intr_type) { 4422 case DDI_INTR_TYPE_MSIX: 4423 /* 4424 * Add interrupt handler for all vectors 4425 */ 4426 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 4427 /* 4428 * install pointer to vect_map[vector] 4429 */ 4430 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4431 (ddi_intr_handler_t *)ixgbe_intr_msix, 4432 (void *)&ixgbe->vect_map[vector], NULL); 4433 4434 if (rc != DDI_SUCCESS) { 4435 ixgbe_log(ixgbe, 4436 "Add interrupt handler failed. " 4437 "return: %d, vector: %d", rc, vector); 4438 for (vector--; vector >= 0; vector--) { 4439 (void) ddi_intr_remove_handler( 4440 ixgbe->htable[vector]); 4441 } 4442 return (IXGBE_FAILURE); 4443 } 4444 } 4445 4446 break; 4447 4448 case DDI_INTR_TYPE_MSI: 4449 /* 4450 * Add interrupt handlers for the only vector 4451 */ 4452 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4453 (ddi_intr_handler_t *)ixgbe_intr_msi, 4454 (void *)ixgbe, NULL); 4455 4456 if (rc != DDI_SUCCESS) { 4457 ixgbe_log(ixgbe, 4458 "Add MSI interrupt handler failed: %d", rc); 4459 return (IXGBE_FAILURE); 4460 } 4461 4462 break; 4463 4464 case DDI_INTR_TYPE_FIXED: 4465 /* 4466 * Add interrupt handlers for the only vector 4467 */ 4468 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4469 (ddi_intr_handler_t *)ixgbe_intr_legacy, 4470 (void *)ixgbe, NULL); 4471 4472 if (rc != DDI_SUCCESS) { 4473 ixgbe_log(ixgbe, 4474 "Add legacy interrupt handler failed: %d", rc); 4475 return (IXGBE_FAILURE); 4476 } 4477 4478 break; 4479 4480 default: 4481 return (IXGBE_FAILURE); 4482 } 4483 4484 return (IXGBE_SUCCESS); 4485 } 4486 4487 #pragma inline(ixgbe_map_rxring_to_vector) 4488 /* 4489 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 4490 */ 4491 static void 4492 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 4493 { 4494 /* 4495 * Set bit in map 4496 */ 4497 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 4498 4499 /* 4500 * Count bits set 4501 */ 4502 ixgbe->vect_map[v_idx].rxr_cnt++; 4503 4504 /* 4505 * Remember bit position 4506 */ 4507 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 4508 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 4509 } 4510 4511 #pragma inline(ixgbe_map_txring_to_vector) 4512 /* 4513 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 4514 */ 4515 static void 4516 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 4517 { 4518 /* 4519 * Set bit in map 4520 */ 4521 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 4522 4523 /* 4524 * Count bits set 4525 */ 4526 ixgbe->vect_map[v_idx].txr_cnt++; 4527 4528 /* 4529 * Remember bit position 4530 */ 4531 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 4532 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 4533 } 4534 4535 /* 4536 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 4537 * allocation register (IVAR). 4538 * cause: 4539 * -1 : other cause 4540 * 0 : rx 4541 * 1 : tx 4542 */ 4543 static void 4544 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 4545 int8_t cause) 4546 { 4547 struct ixgbe_hw *hw = &ixgbe->hw; 4548 u32 ivar, index; 4549 4550 switch (hw->mac.type) { 4551 case ixgbe_mac_82598EB: 4552 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4553 if (cause == -1) { 4554 cause = 0; 4555 } 4556 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4557 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4558 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 4559 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 4560 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4561 break; 4562 case ixgbe_mac_82599EB: 4563 if (cause == -1) { 4564 /* other causes */ 4565 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4566 index = (intr_alloc_entry & 1) * 8; 4567 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4568 ivar &= ~(0xFF << index); 4569 ivar |= (msix_vector << index); 4570 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4571 } else { 4572 /* tx or rx causes */ 4573 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4574 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4575 ivar = IXGBE_READ_REG(hw, 4576 IXGBE_IVAR(intr_alloc_entry >> 1)); 4577 ivar &= ~(0xFF << index); 4578 ivar |= (msix_vector << index); 4579 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4580 ivar); 4581 } 4582 break; 4583 default: 4584 break; 4585 } 4586 } 4587 4588 /* 4589 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 4590 * given interrupt vector allocation register (IVAR). 4591 * cause: 4592 * -1 : other cause 4593 * 0 : rx 4594 * 1 : tx 4595 */ 4596 static void 4597 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 4598 { 4599 struct ixgbe_hw *hw = &ixgbe->hw; 4600 u32 ivar, index; 4601 4602 switch (hw->mac.type) { 4603 case ixgbe_mac_82598EB: 4604 if (cause == -1) { 4605 cause = 0; 4606 } 4607 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4608 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4609 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 4610 (intr_alloc_entry & 0x3))); 4611 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4612 break; 4613 case ixgbe_mac_82599EB: 4614 if (cause == -1) { 4615 /* other causes */ 4616 index = (intr_alloc_entry & 1) * 8; 4617 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4618 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 4619 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4620 } else { 4621 /* tx or rx causes */ 4622 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4623 ivar = IXGBE_READ_REG(hw, 4624 IXGBE_IVAR(intr_alloc_entry >> 1)); 4625 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 4626 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4627 ivar); 4628 } 4629 break; 4630 default: 4631 break; 4632 } 4633 } 4634 4635 /* 4636 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 4637 * given interrupt vector allocation register (IVAR). 4638 * cause: 4639 * -1 : other cause 4640 * 0 : rx 4641 * 1 : tx 4642 */ 4643 static void 4644 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 4645 { 4646 struct ixgbe_hw *hw = &ixgbe->hw; 4647 u32 ivar, index; 4648 4649 switch (hw->mac.type) { 4650 case ixgbe_mac_82598EB: 4651 if (cause == -1) { 4652 cause = 0; 4653 } 4654 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4655 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4656 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 4657 (intr_alloc_entry & 0x3))); 4658 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4659 break; 4660 case ixgbe_mac_82599EB: 4661 if (cause == -1) { 4662 /* other causes */ 4663 index = (intr_alloc_entry & 1) * 8; 4664 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4665 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 4666 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4667 } else { 4668 /* tx or rx causes */ 4669 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4670 ivar = IXGBE_READ_REG(hw, 4671 IXGBE_IVAR(intr_alloc_entry >> 1)); 4672 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 4673 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4674 ivar); 4675 } 4676 break; 4677 default: 4678 break; 4679 } 4680 } 4681 4682 /* 4683 * Convert the rx ring index driver maintained to the rx ring index 4684 * in h/w. 4685 */ 4686 static uint32_t 4687 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 4688 { 4689 4690 struct ixgbe_hw *hw = &ixgbe->hw; 4691 uint32_t rx_ring_per_group, hw_rx_index; 4692 4693 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 4694 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 4695 return (sw_rx_index); 4696 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 4697 if (hw->mac.type == ixgbe_mac_82598EB) { 4698 return (sw_rx_index); 4699 } else if (hw->mac.type == ixgbe_mac_82599EB) { 4700 return (sw_rx_index * 2); 4701 } 4702 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 4703 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 4704 4705 if (hw->mac.type == ixgbe_mac_82598EB) { 4706 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 4707 16 + (sw_rx_index % rx_ring_per_group); 4708 return (hw_rx_index); 4709 } else if (hw->mac.type == ixgbe_mac_82599EB) { 4710 if (ixgbe->num_rx_groups > 32) { 4711 hw_rx_index = (sw_rx_index / 4712 rx_ring_per_group) * 2 + 4713 (sw_rx_index % rx_ring_per_group); 4714 } else { 4715 hw_rx_index = (sw_rx_index / 4716 rx_ring_per_group) * 4 + 4717 (sw_rx_index % rx_ring_per_group); 4718 } 4719 return (hw_rx_index); 4720 } 4721 } 4722 4723 /* 4724 * Should never reach. Just to make compiler happy. 4725 */ 4726 return (sw_rx_index); 4727 } 4728 4729 /* 4730 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 4731 * 4732 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 4733 * to vector[0 - (intr_cnt -1)]. 4734 */ 4735 static int 4736 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 4737 { 4738 int i, vector = 0; 4739 4740 /* initialize vector map */ 4741 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 4742 for (i = 0; i < ixgbe->intr_cnt; i++) { 4743 ixgbe->vect_map[i].ixgbe = ixgbe; 4744 } 4745 4746 /* 4747 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 4748 * tx rings[0] on RTxQ[1]. 4749 */ 4750 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 4751 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 4752 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 4753 return (IXGBE_SUCCESS); 4754 } 4755 4756 /* 4757 * Interrupts/vectors mapping for MSI-X 4758 */ 4759 4760 /* 4761 * Map other interrupt to vector 0, 4762 * Set bit in map and count the bits set. 4763 */ 4764 BT_SET(ixgbe->vect_map[vector].other_map, 0); 4765 ixgbe->vect_map[vector].other_cnt++; 4766 4767 /* 4768 * Map rx ring interrupts to vectors 4769 */ 4770 for (i = 0; i < ixgbe->num_rx_rings; i++) { 4771 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 4772 vector = (vector +1) % ixgbe->intr_cnt; 4773 } 4774 4775 /* 4776 * Map tx ring interrupts to vectors 4777 */ 4778 for (i = 0; i < ixgbe->num_tx_rings; i++) { 4779 ixgbe_map_txring_to_vector(ixgbe, i, vector); 4780 vector = (vector +1) % ixgbe->intr_cnt; 4781 } 4782 4783 return (IXGBE_SUCCESS); 4784 } 4785 4786 /* 4787 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 4788 * 4789 * This relies on ring/vector mapping already set up in the 4790 * vect_map[] structures 4791 */ 4792 static void 4793 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 4794 { 4795 struct ixgbe_hw *hw = &ixgbe->hw; 4796 ixgbe_intr_vector_t *vect; /* vector bitmap */ 4797 int r_idx; /* ring index */ 4798 int v_idx; /* vector index */ 4799 uint32_t hw_index; 4800 4801 /* 4802 * Clear any previous entries 4803 */ 4804 switch (hw->mac.type) { 4805 case ixgbe_mac_82598EB: 4806 for (v_idx = 0; v_idx < 25; v_idx++) 4807 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 4808 4809 break; 4810 case ixgbe_mac_82599EB: 4811 for (v_idx = 0; v_idx < 64; v_idx++) 4812 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 4813 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 4814 4815 break; 4816 default: 4817 break; 4818 } 4819 4820 /* 4821 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 4822 * tx rings[0] will use RTxQ[1]. 4823 */ 4824 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 4825 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 4826 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 4827 return; 4828 } 4829 4830 /* 4831 * For MSI-X interrupt, "Other" is always on vector[0]. 4832 */ 4833 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 4834 4835 /* 4836 * For each interrupt vector, populate the IVAR table 4837 */ 4838 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 4839 vect = &ixgbe->vect_map[v_idx]; 4840 4841 /* 4842 * For each rx ring bit set 4843 */ 4844 r_idx = bt_getlowbit(vect->rx_map, 0, 4845 (ixgbe->num_rx_rings - 1)); 4846 4847 while (r_idx >= 0) { 4848 hw_index = ixgbe->rx_rings[r_idx].hw_index; 4849 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 4850 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4851 (ixgbe->num_rx_rings - 1)); 4852 } 4853 4854 /* 4855 * For each tx ring bit set 4856 */ 4857 r_idx = bt_getlowbit(vect->tx_map, 0, 4858 (ixgbe->num_tx_rings - 1)); 4859 4860 while (r_idx >= 0) { 4861 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 4862 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4863 (ixgbe->num_tx_rings - 1)); 4864 } 4865 } 4866 } 4867 4868 /* 4869 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 4870 */ 4871 static void 4872 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 4873 { 4874 int i; 4875 int rc; 4876 4877 for (i = 0; i < ixgbe->intr_cnt; i++) { 4878 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 4879 if (rc != DDI_SUCCESS) { 4880 IXGBE_DEBUGLOG_1(ixgbe, 4881 "Remove intr handler failed: %d", rc); 4882 } 4883 } 4884 } 4885 4886 /* 4887 * ixgbe_rem_intrs - Remove the allocated interrupts. 4888 */ 4889 static void 4890 ixgbe_rem_intrs(ixgbe_t *ixgbe) 4891 { 4892 int i; 4893 int rc; 4894 4895 for (i = 0; i < ixgbe->intr_cnt; i++) { 4896 rc = ddi_intr_free(ixgbe->htable[i]); 4897 if (rc != DDI_SUCCESS) { 4898 IXGBE_DEBUGLOG_1(ixgbe, 4899 "Free intr failed: %d", rc); 4900 } 4901 } 4902 4903 kmem_free(ixgbe->htable, ixgbe->intr_size); 4904 ixgbe->htable = NULL; 4905 } 4906 4907 /* 4908 * ixgbe_enable_intrs - Enable all the ddi interrupts. 4909 */ 4910 static int 4911 ixgbe_enable_intrs(ixgbe_t *ixgbe) 4912 { 4913 int i; 4914 int rc; 4915 4916 /* 4917 * Enable interrupts 4918 */ 4919 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 4920 /* 4921 * Call ddi_intr_block_enable() for MSI 4922 */ 4923 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 4924 if (rc != DDI_SUCCESS) { 4925 ixgbe_log(ixgbe, 4926 "Enable block intr failed: %d", rc); 4927 return (IXGBE_FAILURE); 4928 } 4929 } else { 4930 /* 4931 * Call ddi_intr_enable() for Legacy/MSI non block enable 4932 */ 4933 for (i = 0; i < ixgbe->intr_cnt; i++) { 4934 rc = ddi_intr_enable(ixgbe->htable[i]); 4935 if (rc != DDI_SUCCESS) { 4936 ixgbe_log(ixgbe, 4937 "Enable intr failed: %d", rc); 4938 return (IXGBE_FAILURE); 4939 } 4940 } 4941 } 4942 4943 return (IXGBE_SUCCESS); 4944 } 4945 4946 /* 4947 * ixgbe_disable_intrs - Disable all the interrupts. 4948 */ 4949 static int 4950 ixgbe_disable_intrs(ixgbe_t *ixgbe) 4951 { 4952 int i; 4953 int rc; 4954 4955 /* 4956 * Disable all interrupts 4957 */ 4958 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 4959 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 4960 if (rc != DDI_SUCCESS) { 4961 ixgbe_log(ixgbe, 4962 "Disable block intr failed: %d", rc); 4963 return (IXGBE_FAILURE); 4964 } 4965 } else { 4966 for (i = 0; i < ixgbe->intr_cnt; i++) { 4967 rc = ddi_intr_disable(ixgbe->htable[i]); 4968 if (rc != DDI_SUCCESS) { 4969 ixgbe_log(ixgbe, 4970 "Disable intr failed: %d", rc); 4971 return (IXGBE_FAILURE); 4972 } 4973 } 4974 } 4975 4976 return (IXGBE_SUCCESS); 4977 } 4978 4979 /* 4980 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 4981 */ 4982 static void 4983 ixgbe_get_hw_state(ixgbe_t *ixgbe) 4984 { 4985 struct ixgbe_hw *hw = &ixgbe->hw; 4986 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 4987 boolean_t link_up = B_FALSE; 4988 uint32_t pcs1g_anlp = 0; 4989 uint32_t pcs1g_ana = 0; 4990 4991 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4992 ixgbe->param_lp_1000fdx_cap = 0; 4993 ixgbe->param_lp_100fdx_cap = 0; 4994 4995 /* check for link, don't wait */ 4996 (void) ixgbe_check_link(hw, &speed, &link_up, false); 4997 if (link_up) { 4998 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 4999 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 5000 5001 ixgbe->param_lp_1000fdx_cap = 5002 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5003 ixgbe->param_lp_100fdx_cap = 5004 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5005 } 5006 5007 ixgbe->param_adv_1000fdx_cap = 5008 (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 5009 ixgbe->param_adv_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 5010 } 5011 5012 /* 5013 * ixgbe_get_driver_control - Notify that driver is in control of device. 5014 */ 5015 static void 5016 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5017 { 5018 uint32_t ctrl_ext; 5019 5020 /* 5021 * Notify firmware that driver is in control of device 5022 */ 5023 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5024 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5025 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5026 } 5027 5028 /* 5029 * ixgbe_release_driver_control - Notify that driver is no longer in control 5030 * of device. 5031 */ 5032 static void 5033 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5034 { 5035 uint32_t ctrl_ext; 5036 5037 /* 5038 * Notify firmware that driver is no longer in control of device 5039 */ 5040 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5041 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5042 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5043 } 5044 5045 /* 5046 * ixgbe_atomic_reserve - Atomic decrease operation. 5047 */ 5048 int 5049 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5050 { 5051 uint32_t oldval; 5052 uint32_t newval; 5053 5054 /* 5055 * ATOMICALLY 5056 */ 5057 do { 5058 oldval = *count_p; 5059 if (oldval < n) 5060 return (-1); 5061 newval = oldval - n; 5062 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5063 5064 return (newval); 5065 } 5066 5067 /* 5068 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5069 */ 5070 static uint8_t * 5071 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5072 { 5073 uint8_t *addr = *upd_ptr; 5074 uint8_t *new_ptr; 5075 5076 _NOTE(ARGUNUSED(hw)); 5077 _NOTE(ARGUNUSED(vmdq)); 5078 5079 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5080 *upd_ptr = new_ptr; 5081 return (addr); 5082 } 5083 5084 /* 5085 * FMA support 5086 */ 5087 int 5088 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5089 { 5090 ddi_fm_error_t de; 5091 5092 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5093 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5094 return (de.fme_status); 5095 } 5096 5097 int 5098 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5099 { 5100 ddi_fm_error_t de; 5101 5102 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5103 return (de.fme_status); 5104 } 5105 5106 /* 5107 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5108 */ 5109 static int 5110 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5111 { 5112 _NOTE(ARGUNUSED(impl_data)); 5113 /* 5114 * as the driver can always deal with an error in any dma or 5115 * access handle, we can just return the fme_status value. 5116 */ 5117 pci_ereport_post(dip, err, NULL); 5118 return (err->fme_status); 5119 } 5120 5121 static void 5122 ixgbe_fm_init(ixgbe_t *ixgbe) 5123 { 5124 ddi_iblock_cookie_t iblk; 5125 int fma_dma_flag; 5126 5127 /* 5128 * Only register with IO Fault Services if we have some capability 5129 */ 5130 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5131 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5132 } else { 5133 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5134 } 5135 5136 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5137 fma_dma_flag = 1; 5138 } else { 5139 fma_dma_flag = 0; 5140 } 5141 5142 ixgbe_set_fma_flags(fma_dma_flag); 5143 5144 if (ixgbe->fm_capabilities) { 5145 5146 /* 5147 * Register capabilities with IO Fault Services 5148 */ 5149 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5150 5151 /* 5152 * Initialize pci ereport capabilities if ereport capable 5153 */ 5154 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5155 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5156 pci_ereport_setup(ixgbe->dip); 5157 5158 /* 5159 * Register error callback if error callback capable 5160 */ 5161 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5162 ddi_fm_handler_register(ixgbe->dip, 5163 ixgbe_fm_error_cb, (void*) ixgbe); 5164 } 5165 } 5166 5167 static void 5168 ixgbe_fm_fini(ixgbe_t *ixgbe) 5169 { 5170 /* 5171 * Only unregister FMA capabilities if they are registered 5172 */ 5173 if (ixgbe->fm_capabilities) { 5174 5175 /* 5176 * Release any resources allocated by pci_ereport_setup() 5177 */ 5178 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5179 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5180 pci_ereport_teardown(ixgbe->dip); 5181 5182 /* 5183 * Un-register error callback if error callback capable 5184 */ 5185 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5186 ddi_fm_handler_unregister(ixgbe->dip); 5187 5188 /* 5189 * Unregister from IO Fault Service 5190 */ 5191 ddi_fm_fini(ixgbe->dip); 5192 } 5193 } 5194 5195 void 5196 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 5197 { 5198 uint64_t ena; 5199 char buf[FM_MAX_CLASS]; 5200 5201 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5202 ena = fm_ena_generate(0, FM_ENA_FMT1); 5203 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 5204 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 5205 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 5206 } 5207 } 5208 5209 static int 5210 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 5211 { 5212 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 5213 5214 mutex_enter(&rx_ring->rx_lock); 5215 rx_ring->ring_gen_num = mr_gen_num; 5216 mutex_exit(&rx_ring->rx_lock); 5217 return (0); 5218 } 5219 5220 /* 5221 * Get the global ring index by a ring index within a group. 5222 */ 5223 static int 5224 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 5225 { 5226 ixgbe_rx_ring_t *rx_ring; 5227 int i; 5228 5229 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5230 rx_ring = &ixgbe->rx_rings[i]; 5231 if (rx_ring->group_index == gindex) 5232 rindex--; 5233 if (rindex < 0) 5234 return (i); 5235 } 5236 5237 return (-1); 5238 } 5239 5240 /* 5241 * Callback funtion for MAC layer to register all rings. 5242 */ 5243 /* ARGSUSED */ 5244 void 5245 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 5246 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5247 { 5248 ixgbe_t *ixgbe = (ixgbe_t *)arg; 5249 mac_intr_t *mintr = &infop->mri_intr; 5250 5251 switch (rtype) { 5252 case MAC_RING_TYPE_RX: { 5253 /* 5254 * 'index' is the ring index within the group. 5255 * Need to get the global ring index by searching in groups. 5256 */ 5257 int global_ring_index = ixgbe_get_rx_ring_index( 5258 ixgbe, group_index, ring_index); 5259 5260 ASSERT(global_ring_index >= 0); 5261 5262 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 5263 rx_ring->ring_handle = rh; 5264 5265 infop->mri_driver = (mac_ring_driver_t)rx_ring; 5266 infop->mri_start = ixgbe_ring_start; 5267 infop->mri_stop = NULL; 5268 infop->mri_poll = ixgbe_ring_rx_poll; 5269 infop->mri_stat = ixgbe_rx_ring_stat; 5270 5271 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 5272 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 5273 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 5274 if (ixgbe->intr_type & 5275 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 5276 mintr->mi_ddi_handle = 5277 ixgbe->htable[rx_ring->intr_vector]; 5278 } 5279 5280 break; 5281 } 5282 case MAC_RING_TYPE_TX: { 5283 ASSERT(group_index == -1); 5284 ASSERT(ring_index < ixgbe->num_tx_rings); 5285 5286 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 5287 tx_ring->ring_handle = rh; 5288 5289 infop->mri_driver = (mac_ring_driver_t)tx_ring; 5290 infop->mri_start = NULL; 5291 infop->mri_stop = NULL; 5292 infop->mri_tx = ixgbe_ring_tx; 5293 infop->mri_stat = ixgbe_tx_ring_stat; 5294 if (ixgbe->intr_type & 5295 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 5296 mintr->mi_ddi_handle = 5297 ixgbe->htable[tx_ring->intr_vector]; 5298 } 5299 break; 5300 } 5301 default: 5302 break; 5303 } 5304 } 5305 5306 /* 5307 * Callback funtion for MAC layer to register all groups. 5308 */ 5309 void 5310 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 5311 mac_group_info_t *infop, mac_group_handle_t gh) 5312 { 5313 ixgbe_t *ixgbe = (ixgbe_t *)arg; 5314 5315 switch (rtype) { 5316 case MAC_RING_TYPE_RX: { 5317 ixgbe_rx_group_t *rx_group; 5318 5319 rx_group = &ixgbe->rx_groups[index]; 5320 rx_group->group_handle = gh; 5321 5322 infop->mgi_driver = (mac_group_driver_t)rx_group; 5323 infop->mgi_start = NULL; 5324 infop->mgi_stop = NULL; 5325 infop->mgi_addmac = ixgbe_addmac; 5326 infop->mgi_remmac = ixgbe_remmac; 5327 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 5328 5329 break; 5330 } 5331 case MAC_RING_TYPE_TX: 5332 break; 5333 default: 5334 break; 5335 } 5336 } 5337 5338 /* 5339 * Enable interrupt on the specificed rx ring. 5340 */ 5341 int 5342 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 5343 { 5344 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 5345 ixgbe_t *ixgbe = rx_ring->ixgbe; 5346 int r_idx = rx_ring->index; 5347 int hw_r_idx = rx_ring->hw_index; 5348 int v_idx = rx_ring->intr_vector; 5349 5350 mutex_enter(&ixgbe->gen_lock); 5351 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 5352 mutex_exit(&ixgbe->gen_lock); 5353 /* 5354 * Simply return 0. 5355 * Interrupts are being adjusted. ixgbe_intr_adjust() 5356 * will eventually re-enable the interrupt when it's 5357 * done with the adjustment. 5358 */ 5359 return (0); 5360 } 5361 5362 /* 5363 * To enable interrupt by setting the VAL bit of given interrupt 5364 * vector allocation register (IVAR). 5365 */ 5366 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 5367 5368 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5369 5370 /* 5371 * Trigger a Rx interrupt on this ring 5372 */ 5373 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 5374 IXGBE_WRITE_FLUSH(&ixgbe->hw); 5375 5376 mutex_exit(&ixgbe->gen_lock); 5377 5378 return (0); 5379 } 5380 5381 /* 5382 * Disable interrupt on the specificed rx ring. 5383 */ 5384 int 5385 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 5386 { 5387 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 5388 ixgbe_t *ixgbe = rx_ring->ixgbe; 5389 int r_idx = rx_ring->index; 5390 int hw_r_idx = rx_ring->hw_index; 5391 int v_idx = rx_ring->intr_vector; 5392 5393 mutex_enter(&ixgbe->gen_lock); 5394 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 5395 mutex_exit(&ixgbe->gen_lock); 5396 /* 5397 * Simply return 0. 5398 * In the rare case where an interrupt is being 5399 * disabled while interrupts are being adjusted, 5400 * we don't fail the operation. No interrupts will 5401 * be generated while they are adjusted, and 5402 * ixgbe_intr_adjust() will cause the interrupts 5403 * to be re-enabled once it completes. Note that 5404 * in this case, packets may be delivered to the 5405 * stack via interrupts before xgbe_rx_ring_intr_enable() 5406 * is called again. This is acceptable since interrupt 5407 * adjustment is infrequent, and the stack will be 5408 * able to handle these packets. 5409 */ 5410 return (0); 5411 } 5412 5413 /* 5414 * To disable interrupt by clearing the VAL bit of given interrupt 5415 * vector allocation register (IVAR). 5416 */ 5417 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 5418 5419 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 5420 5421 mutex_exit(&ixgbe->gen_lock); 5422 5423 return (0); 5424 } 5425 5426 /* 5427 * Add a mac address. 5428 */ 5429 static int 5430 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 5431 { 5432 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 5433 ixgbe_t *ixgbe = rx_group->ixgbe; 5434 struct ixgbe_hw *hw = &ixgbe->hw; 5435 int slot, i; 5436 5437 mutex_enter(&ixgbe->gen_lock); 5438 5439 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 5440 mutex_exit(&ixgbe->gen_lock); 5441 return (ECANCELED); 5442 } 5443 5444 if (ixgbe->unicst_avail == 0) { 5445 /* no slots available */ 5446 mutex_exit(&ixgbe->gen_lock); 5447 return (ENOSPC); 5448 } 5449 5450 /* 5451 * The first ixgbe->num_rx_groups slots are reserved for each respective 5452 * group. The rest slots are shared by all groups. While adding a 5453 * MAC address, reserved slots are firstly checked then the shared 5454 * slots are searched. 5455 */ 5456 slot = -1; 5457 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 5458 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 5459 if (ixgbe->unicst_addr[i].mac.set == 0) { 5460 slot = i; 5461 break; 5462 } 5463 } 5464 } else { 5465 slot = rx_group->index; 5466 } 5467 5468 if (slot == -1) { 5469 /* no slots available */ 5470 mutex_exit(&ixgbe->gen_lock); 5471 return (ENOSPC); 5472 } 5473 5474 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 5475 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 5476 rx_group->index, IXGBE_RAH_AV); 5477 ixgbe->unicst_addr[slot].mac.set = 1; 5478 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 5479 ixgbe->unicst_avail--; 5480 5481 mutex_exit(&ixgbe->gen_lock); 5482 5483 return (0); 5484 } 5485 5486 /* 5487 * Remove a mac address. 5488 */ 5489 static int 5490 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 5491 { 5492 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 5493 ixgbe_t *ixgbe = rx_group->ixgbe; 5494 struct ixgbe_hw *hw = &ixgbe->hw; 5495 int slot; 5496 5497 mutex_enter(&ixgbe->gen_lock); 5498 5499 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 5500 mutex_exit(&ixgbe->gen_lock); 5501 return (ECANCELED); 5502 } 5503 5504 slot = ixgbe_unicst_find(ixgbe, mac_addr); 5505 if (slot == -1) { 5506 mutex_exit(&ixgbe->gen_lock); 5507 return (EINVAL); 5508 } 5509 5510 if (ixgbe->unicst_addr[slot].mac.set == 0) { 5511 mutex_exit(&ixgbe->gen_lock); 5512 return (EINVAL); 5513 } 5514 5515 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 5516 (void) ixgbe_clear_rar(hw, slot); 5517 ixgbe->unicst_addr[slot].mac.set = 0; 5518 ixgbe->unicst_avail++; 5519 5520 mutex_exit(&ixgbe->gen_lock); 5521 5522 return (0); 5523 } 5524