1 /* 2 * CDDL HEADER START 3 * 4 * Copyright(c) 2007-2009 Intel Corporation. All rights reserved. 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 25 * Use is subject to license terms. 26 */ 27 28 #include "ixgbe_sw.h" 29 30 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 31 static char ixgbe_version[] = "ixgbe 1.1.4"; 32 33 /* 34 * Local function protoypes 35 */ 36 static int ixgbe_register_mac(ixgbe_t *); 37 static int ixgbe_identify_hardware(ixgbe_t *); 38 static int ixgbe_regs_map(ixgbe_t *); 39 static void ixgbe_init_properties(ixgbe_t *); 40 static int ixgbe_init_driver_settings(ixgbe_t *); 41 static void ixgbe_init_locks(ixgbe_t *); 42 static void ixgbe_destroy_locks(ixgbe_t *); 43 static int ixgbe_init(ixgbe_t *); 44 static int ixgbe_chip_start(ixgbe_t *); 45 static void ixgbe_chip_stop(ixgbe_t *); 46 static int ixgbe_reset(ixgbe_t *); 47 static void ixgbe_tx_clean(ixgbe_t *); 48 static boolean_t ixgbe_tx_drain(ixgbe_t *); 49 static boolean_t ixgbe_rx_drain(ixgbe_t *); 50 static int ixgbe_alloc_rings(ixgbe_t *); 51 static void ixgbe_free_rings(ixgbe_t *); 52 static int ixgbe_alloc_rx_data(ixgbe_t *); 53 static void ixgbe_free_rx_data(ixgbe_t *); 54 static void ixgbe_setup_rings(ixgbe_t *); 55 static void ixgbe_setup_rx(ixgbe_t *); 56 static void ixgbe_setup_tx(ixgbe_t *); 57 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 58 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 59 static void ixgbe_setup_rss(ixgbe_t *); 60 static void ixgbe_setup_vmdq(ixgbe_t *); 61 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 62 static void ixgbe_init_unicst(ixgbe_t *); 63 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 64 static void ixgbe_setup_multicst(ixgbe_t *); 65 static void ixgbe_get_hw_state(ixgbe_t *); 66 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 67 static void ixgbe_get_conf(ixgbe_t *); 68 static void ixgbe_init_params(ixgbe_t *); 69 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 70 static void ixgbe_driver_link_check(ixgbe_t *); 71 static void ixgbe_sfp_check(void *); 72 static void ixgbe_link_timer(void *); 73 static void ixgbe_local_timer(void *); 74 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 75 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 76 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 77 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 78 static boolean_t is_valid_mac_addr(uint8_t *); 79 static boolean_t ixgbe_stall_check(ixgbe_t *); 80 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 81 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 82 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 83 static int ixgbe_alloc_intrs(ixgbe_t *); 84 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 85 static int ixgbe_add_intr_handlers(ixgbe_t *); 86 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 87 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 88 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 89 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 90 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 91 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 92 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 93 static void ixgbe_setup_adapter_vector(ixgbe_t *); 94 static void ixgbe_rem_intr_handlers(ixgbe_t *); 95 static void ixgbe_rem_intrs(ixgbe_t *); 96 static int ixgbe_enable_intrs(ixgbe_t *); 97 static int ixgbe_disable_intrs(ixgbe_t *); 98 static uint_t ixgbe_intr_legacy(void *, void *); 99 static uint_t ixgbe_intr_msi(void *, void *); 100 static uint_t ixgbe_intr_msix(void *, void *); 101 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 102 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 103 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 104 static void ixgbe_get_driver_control(struct ixgbe_hw *); 105 static int ixgbe_addmac(void *, const uint8_t *); 106 static int ixgbe_remmac(void *, const uint8_t *); 107 static void ixgbe_release_driver_control(struct ixgbe_hw *); 108 109 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 110 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 111 static int ixgbe_resume(dev_info_t *); 112 static int ixgbe_suspend(dev_info_t *); 113 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 114 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 115 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 116 static int ixgbe_intr_cb_register(ixgbe_t *); 117 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 118 119 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 120 const void *impl_data); 121 static void ixgbe_fm_init(ixgbe_t *); 122 static void ixgbe_fm_fini(ixgbe_t *); 123 124 char *ixgbe_priv_props[] = { 125 "_tx_copy_thresh", 126 "_tx_recycle_thresh", 127 "_tx_overload_thresh", 128 "_tx_resched_thresh", 129 "_rx_copy_thresh", 130 "_rx_limit_per_intr", 131 "_intr_throttling", 132 "_adv_pause_cap", 133 "_adv_asym_pause_cap", 134 NULL 135 }; 136 137 #define IXGBE_MAX_PRIV_PROPS \ 138 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 139 140 static struct cb_ops ixgbe_cb_ops = { 141 nulldev, /* cb_open */ 142 nulldev, /* cb_close */ 143 nodev, /* cb_strategy */ 144 nodev, /* cb_print */ 145 nodev, /* cb_dump */ 146 nodev, /* cb_read */ 147 nodev, /* cb_write */ 148 nodev, /* cb_ioctl */ 149 nodev, /* cb_devmap */ 150 nodev, /* cb_mmap */ 151 nodev, /* cb_segmap */ 152 nochpoll, /* cb_chpoll */ 153 ddi_prop_op, /* cb_prop_op */ 154 NULL, /* cb_stream */ 155 D_MP | D_HOTPLUG, /* cb_flag */ 156 CB_REV, /* cb_rev */ 157 nodev, /* cb_aread */ 158 nodev /* cb_awrite */ 159 }; 160 161 static struct dev_ops ixgbe_dev_ops = { 162 DEVO_REV, /* devo_rev */ 163 0, /* devo_refcnt */ 164 NULL, /* devo_getinfo */ 165 nulldev, /* devo_identify */ 166 nulldev, /* devo_probe */ 167 ixgbe_attach, /* devo_attach */ 168 ixgbe_detach, /* devo_detach */ 169 nodev, /* devo_reset */ 170 &ixgbe_cb_ops, /* devo_cb_ops */ 171 NULL, /* devo_bus_ops */ 172 ddi_power, /* devo_power */ 173 ddi_quiesce_not_supported, /* devo_quiesce */ 174 }; 175 176 static struct modldrv ixgbe_modldrv = { 177 &mod_driverops, /* Type of module. This one is a driver */ 178 ixgbe_ident, /* Discription string */ 179 &ixgbe_dev_ops /* driver ops */ 180 }; 181 182 static struct modlinkage ixgbe_modlinkage = { 183 MODREV_1, &ixgbe_modldrv, NULL 184 }; 185 186 /* 187 * Access attributes for register mapping 188 */ 189 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 190 DDI_DEVICE_ATTR_V1, 191 DDI_STRUCTURE_LE_ACC, 192 DDI_STRICTORDER_ACC, 193 DDI_FLAGERR_ACC 194 }; 195 196 /* 197 * Loopback property 198 */ 199 static lb_property_t lb_normal = { 200 normal, "normal", IXGBE_LB_NONE 201 }; 202 203 static lb_property_t lb_mac = { 204 internal, "MAC", IXGBE_LB_INTERNAL_MAC 205 }; 206 207 static lb_property_t lb_external = { 208 external, "External", IXGBE_LB_EXTERNAL 209 }; 210 211 #define IXGBE_M_CALLBACK_FLAGS \ 212 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 213 214 static mac_callbacks_t ixgbe_m_callbacks = { 215 IXGBE_M_CALLBACK_FLAGS, 216 ixgbe_m_stat, 217 ixgbe_m_start, 218 ixgbe_m_stop, 219 ixgbe_m_promisc, 220 ixgbe_m_multicst, 221 NULL, 222 NULL, 223 NULL, 224 ixgbe_m_ioctl, 225 ixgbe_m_getcapab, 226 NULL, 227 NULL, 228 ixgbe_m_setprop, 229 ixgbe_m_getprop, 230 ixgbe_m_propinfo 231 }; 232 233 /* 234 * Initialize capabilities of each supported adapter type 235 */ 236 static adapter_info_t ixgbe_82598eb_cap = { 237 64, /* maximum number of rx queues */ 238 1, /* minimum number of rx queues */ 239 64, /* default number of rx queues */ 240 16, /* maximum number of rx groups */ 241 1, /* minimum number of rx groups */ 242 1, /* default number of rx groups */ 243 32, /* maximum number of tx queues */ 244 1, /* minimum number of tx queues */ 245 8, /* default number of tx queues */ 246 16366, /* maximum MTU size */ 247 0xFFFF, /* maximum interrupt throttle rate */ 248 0, /* minimum interrupt throttle rate */ 249 200, /* default interrupt throttle rate */ 250 18, /* maximum total msix vectors */ 251 16, /* maximum number of ring vectors */ 252 2, /* maximum number of other vectors */ 253 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 254 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 255 | IXGBE_FLAG_RSS_CAPABLE 256 | IXGBE_FLAG_VMDQ_CAPABLE) 257 }; 258 259 static adapter_info_t ixgbe_82599eb_cap = { 260 128, /* maximum number of rx queues */ 261 1, /* minimum number of rx queues */ 262 128, /* default number of rx queues */ 263 64, /* maximum number of rx groups */ 264 1, /* minimum number of rx groups */ 265 1, /* default number of rx groups */ 266 128, /* maximum number of tx queues */ 267 1, /* minimum number of tx queues */ 268 8, /* default number of tx queues */ 269 15500, /* maximum MTU size */ 270 0xFF8, /* maximum interrupt throttle rate */ 271 0, /* minimum interrupt throttle rate */ 272 200, /* default interrupt throttle rate */ 273 64, /* maximum total msix vectors */ 274 16, /* maximum number of ring vectors */ 275 2, /* maximum number of other vectors */ 276 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 277 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 278 | IXGBE_FLAG_RSS_CAPABLE 279 | IXGBE_FLAG_VMDQ_CAPABLE 280 | IXGBE_FLAG_RSC_CAPABLE) 281 }; 282 283 /* 284 * Module Initialization Functions. 285 */ 286 287 int 288 _init(void) 289 { 290 int status; 291 292 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 293 294 status = mod_install(&ixgbe_modlinkage); 295 296 if (status != DDI_SUCCESS) { 297 mac_fini_ops(&ixgbe_dev_ops); 298 } 299 300 return (status); 301 } 302 303 int 304 _fini(void) 305 { 306 int status; 307 308 status = mod_remove(&ixgbe_modlinkage); 309 310 if (status == DDI_SUCCESS) { 311 mac_fini_ops(&ixgbe_dev_ops); 312 } 313 314 return (status); 315 } 316 317 int 318 _info(struct modinfo *modinfop) 319 { 320 int status; 321 322 status = mod_info(&ixgbe_modlinkage, modinfop); 323 324 return (status); 325 } 326 327 /* 328 * ixgbe_attach - Driver attach. 329 * 330 * This function is the device specific initialization entry 331 * point. This entry point is required and must be written. 332 * The DDI_ATTACH command must be provided in the attach entry 333 * point. When attach() is called with cmd set to DDI_ATTACH, 334 * all normal kernel services (such as kmem_alloc(9F)) are 335 * available for use by the driver. 336 * 337 * The attach() function will be called once for each instance 338 * of the device on the system with cmd set to DDI_ATTACH. 339 * Until attach() succeeds, the only driver entry points which 340 * may be called are open(9E) and getinfo(9E). 341 */ 342 static int 343 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 344 { 345 ixgbe_t *ixgbe; 346 struct ixgbe_osdep *osdep; 347 struct ixgbe_hw *hw; 348 int instance; 349 char taskqname[32]; 350 351 /* 352 * Check the command and perform corresponding operations 353 */ 354 switch (cmd) { 355 default: 356 return (DDI_FAILURE); 357 358 case DDI_RESUME: 359 return (ixgbe_resume(devinfo)); 360 361 case DDI_ATTACH: 362 break; 363 } 364 365 /* Get the device instance */ 366 instance = ddi_get_instance(devinfo); 367 368 /* Allocate memory for the instance data structure */ 369 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 370 371 ixgbe->dip = devinfo; 372 ixgbe->instance = instance; 373 374 hw = &ixgbe->hw; 375 osdep = &ixgbe->osdep; 376 hw->back = osdep; 377 osdep->ixgbe = ixgbe; 378 379 /* Attach the instance pointer to the dev_info data structure */ 380 ddi_set_driver_private(devinfo, ixgbe); 381 382 /* 383 * Initialize for fma support 384 */ 385 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 386 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 387 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 388 ixgbe_fm_init(ixgbe); 389 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 390 391 /* 392 * Map PCI config space registers 393 */ 394 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 395 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 396 goto attach_fail; 397 } 398 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 399 400 /* 401 * Identify the chipset family 402 */ 403 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 404 ixgbe_error(ixgbe, "Failed to identify hardware"); 405 goto attach_fail; 406 } 407 408 /* 409 * Map device registers 410 */ 411 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 412 ixgbe_error(ixgbe, "Failed to map device registers"); 413 goto attach_fail; 414 } 415 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 416 417 /* 418 * Initialize driver parameters 419 */ 420 ixgbe_init_properties(ixgbe); 421 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 422 423 /* 424 * Register interrupt callback 425 */ 426 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 427 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 428 goto attach_fail; 429 } 430 431 /* 432 * Allocate interrupts 433 */ 434 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 435 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 436 goto attach_fail; 437 } 438 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 439 440 /* 441 * Allocate rx/tx rings based on the ring numbers. 442 * The actual numbers of rx/tx rings are decided by the number of 443 * allocated interrupt vectors, so we should allocate the rings after 444 * interrupts are allocated. 445 */ 446 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 447 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 448 goto attach_fail; 449 } 450 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 451 452 /* 453 * Map rings to interrupt vectors 454 */ 455 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 456 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 457 goto attach_fail; 458 } 459 460 /* 461 * Add interrupt handlers 462 */ 463 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 464 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 465 goto attach_fail; 466 } 467 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 468 469 /* 470 * Create a taskq for sfp-change 471 */ 472 (void) sprintf(taskqname, "ixgbe%d_taskq", instance); 473 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 474 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 475 ixgbe_error(ixgbe, "taskq_create failed"); 476 goto attach_fail; 477 } 478 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 479 480 /* 481 * Initialize driver parameters 482 */ 483 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 484 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 485 goto attach_fail; 486 } 487 488 /* 489 * Initialize mutexes for this device. 490 * Do this before enabling the interrupt handler and 491 * register the softint to avoid the condition where 492 * interrupt handler can try using uninitialized mutex. 493 */ 494 ixgbe_init_locks(ixgbe); 495 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 496 497 /* 498 * Initialize chipset hardware 499 */ 500 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 501 ixgbe_error(ixgbe, "Failed to initialize adapter"); 502 goto attach_fail; 503 } 504 ixgbe->link_check_complete = B_FALSE; 505 ixgbe->link_check_hrtime = gethrtime() + 506 (IXGBE_LINK_UP_TIME * 100000000ULL); 507 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 508 509 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 510 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 511 goto attach_fail; 512 } 513 514 /* 515 * Initialize statistics 516 */ 517 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 518 ixgbe_error(ixgbe, "Failed to initialize statistics"); 519 goto attach_fail; 520 } 521 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 522 523 /* 524 * Register the driver to the MAC 525 */ 526 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 527 ixgbe_error(ixgbe, "Failed to register MAC"); 528 goto attach_fail; 529 } 530 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 531 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 532 533 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 534 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 535 if (ixgbe->periodic_id == 0) { 536 ixgbe_error(ixgbe, "Failed to add the link check timer"); 537 goto attach_fail; 538 } 539 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 540 541 /* 542 * Now that mutex locks are initialized, and the chip is also 543 * initialized, enable interrupts. 544 */ 545 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 546 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 547 goto attach_fail; 548 } 549 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 550 551 ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version); 552 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 553 554 return (DDI_SUCCESS); 555 556 attach_fail: 557 ixgbe_unconfigure(devinfo, ixgbe); 558 return (DDI_FAILURE); 559 } 560 561 /* 562 * ixgbe_detach - Driver detach. 563 * 564 * The detach() function is the complement of the attach routine. 565 * If cmd is set to DDI_DETACH, detach() is used to remove the 566 * state associated with a given instance of a device node 567 * prior to the removal of that instance from the system. 568 * 569 * The detach() function will be called once for each instance 570 * of the device for which there has been a successful attach() 571 * once there are no longer any opens on the device. 572 * 573 * Interrupts routine are disabled, All memory allocated by this 574 * driver are freed. 575 */ 576 static int 577 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 578 { 579 ixgbe_t *ixgbe; 580 581 /* 582 * Check detach command 583 */ 584 switch (cmd) { 585 default: 586 return (DDI_FAILURE); 587 588 case DDI_SUSPEND: 589 return (ixgbe_suspend(devinfo)); 590 591 case DDI_DETACH: 592 break; 593 } 594 595 /* 596 * Get the pointer to the driver private data structure 597 */ 598 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 599 if (ixgbe == NULL) 600 return (DDI_FAILURE); 601 602 /* 603 * If the device is still running, it needs to be stopped first. 604 * This check is necessary because under some specific circumstances, 605 * the detach routine can be called without stopping the interface 606 * first. 607 */ 608 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 609 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 610 mutex_enter(&ixgbe->gen_lock); 611 ixgbe_stop(ixgbe, B_TRUE); 612 mutex_exit(&ixgbe->gen_lock); 613 /* Disable and stop the watchdog timer */ 614 ixgbe_disable_watchdog_timer(ixgbe); 615 } 616 617 /* 618 * Check if there are still rx buffers held by the upper layer. 619 * If so, fail the detach. 620 */ 621 if (!ixgbe_rx_drain(ixgbe)) 622 return (DDI_FAILURE); 623 624 /* 625 * Do the remaining unconfigure routines 626 */ 627 ixgbe_unconfigure(devinfo, ixgbe); 628 629 return (DDI_SUCCESS); 630 } 631 632 static void 633 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 634 { 635 /* 636 * Disable interrupt 637 */ 638 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 639 (void) ixgbe_disable_intrs(ixgbe); 640 } 641 642 /* 643 * remove the link check timer 644 */ 645 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 646 if (ixgbe->periodic_id != NULL) { 647 ddi_periodic_delete(ixgbe->periodic_id); 648 ixgbe->periodic_id = NULL; 649 } 650 } 651 652 /* 653 * Unregister MAC 654 */ 655 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 656 (void) mac_unregister(ixgbe->mac_hdl); 657 } 658 659 /* 660 * Free statistics 661 */ 662 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 663 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 664 } 665 666 /* 667 * Remove interrupt handlers 668 */ 669 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 670 ixgbe_rem_intr_handlers(ixgbe); 671 } 672 673 /* 674 * Remove taskq for sfp-status-change 675 */ 676 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 677 ddi_taskq_destroy(ixgbe->sfp_taskq); 678 } 679 680 /* 681 * Remove interrupts 682 */ 683 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 684 ixgbe_rem_intrs(ixgbe); 685 } 686 687 /* 688 * Unregister interrupt callback handler 689 */ 690 (void) ddi_cb_unregister(ixgbe->cb_hdl); 691 692 /* 693 * Remove driver properties 694 */ 695 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 696 (void) ddi_prop_remove_all(devinfo); 697 } 698 699 /* 700 * Stop the chipset 701 */ 702 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 703 mutex_enter(&ixgbe->gen_lock); 704 ixgbe_chip_stop(ixgbe); 705 mutex_exit(&ixgbe->gen_lock); 706 } 707 708 /* 709 * Free register handle 710 */ 711 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 712 if (ixgbe->osdep.reg_handle != NULL) 713 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 714 } 715 716 /* 717 * Free PCI config handle 718 */ 719 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 720 if (ixgbe->osdep.cfg_handle != NULL) 721 pci_config_teardown(&ixgbe->osdep.cfg_handle); 722 } 723 724 /* 725 * Free locks 726 */ 727 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 728 ixgbe_destroy_locks(ixgbe); 729 } 730 731 /* 732 * Free the rx/tx rings 733 */ 734 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 735 ixgbe_free_rings(ixgbe); 736 } 737 738 /* 739 * Unregister FMA capabilities 740 */ 741 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 742 ixgbe_fm_fini(ixgbe); 743 } 744 745 /* 746 * Free the driver data structure 747 */ 748 kmem_free(ixgbe, sizeof (ixgbe_t)); 749 750 ddi_set_driver_private(devinfo, NULL); 751 } 752 753 /* 754 * ixgbe_register_mac - Register the driver and its function pointers with 755 * the GLD interface. 756 */ 757 static int 758 ixgbe_register_mac(ixgbe_t *ixgbe) 759 { 760 struct ixgbe_hw *hw = &ixgbe->hw; 761 mac_register_t *mac; 762 int status; 763 764 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 765 return (IXGBE_FAILURE); 766 767 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 768 mac->m_driver = ixgbe; 769 mac->m_dip = ixgbe->dip; 770 mac->m_src_addr = hw->mac.addr; 771 mac->m_callbacks = &ixgbe_m_callbacks; 772 mac->m_min_sdu = 0; 773 mac->m_max_sdu = ixgbe->default_mtu; 774 mac->m_margin = VLAN_TAGSZ; 775 mac->m_priv_props = ixgbe_priv_props; 776 mac->m_v12n = MAC_VIRT_LEVEL1; 777 778 status = mac_register(mac, &ixgbe->mac_hdl); 779 780 mac_free(mac); 781 782 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 783 } 784 785 /* 786 * ixgbe_identify_hardware - Identify the type of the chipset. 787 */ 788 static int 789 ixgbe_identify_hardware(ixgbe_t *ixgbe) 790 { 791 struct ixgbe_hw *hw = &ixgbe->hw; 792 struct ixgbe_osdep *osdep = &ixgbe->osdep; 793 794 /* 795 * Get the device id 796 */ 797 hw->vendor_id = 798 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 799 hw->device_id = 800 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 801 hw->revision_id = 802 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 803 hw->subsystem_device_id = 804 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 805 hw->subsystem_vendor_id = 806 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 807 808 /* 809 * Set the mac type of the adapter based on the device id 810 */ 811 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 812 return (IXGBE_FAILURE); 813 } 814 815 /* 816 * Install adapter capabilities 817 */ 818 switch (hw->mac.type) { 819 case ixgbe_mac_82598EB: 820 ixgbe_log(ixgbe, "identify 82598 adapter\n"); 821 ixgbe->capab = &ixgbe_82598eb_cap; 822 823 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 824 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 825 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 826 } 827 ixgbe->capab->other_intr |= IXGBE_EICR_LSC; 828 829 break; 830 case ixgbe_mac_82599EB: 831 ixgbe_log(ixgbe, "identify 82599 adapter\n"); 832 ixgbe->capab = &ixgbe_82599eb_cap; 833 834 ixgbe->capab->other_intr = (IXGBE_EICR_GPI_SDP1 | 835 IXGBE_EICR_GPI_SDP2 | IXGBE_EICR_LSC); 836 837 break; 838 default: 839 ixgbe_log(ixgbe, 840 "adapter not supported in ixgbe_identify_hardware(): %d\n", 841 hw->mac.type); 842 return (IXGBE_FAILURE); 843 } 844 845 return (IXGBE_SUCCESS); 846 } 847 848 /* 849 * ixgbe_regs_map - Map the device registers. 850 * 851 */ 852 static int 853 ixgbe_regs_map(ixgbe_t *ixgbe) 854 { 855 dev_info_t *devinfo = ixgbe->dip; 856 struct ixgbe_hw *hw = &ixgbe->hw; 857 struct ixgbe_osdep *osdep = &ixgbe->osdep; 858 off_t mem_size; 859 860 /* 861 * First get the size of device registers to be mapped. 862 */ 863 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 864 != DDI_SUCCESS) { 865 return (IXGBE_FAILURE); 866 } 867 868 /* 869 * Call ddi_regs_map_setup() to map registers 870 */ 871 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 872 (caddr_t *)&hw->hw_addr, 0, 873 mem_size, &ixgbe_regs_acc_attr, 874 &osdep->reg_handle)) != DDI_SUCCESS) { 875 return (IXGBE_FAILURE); 876 } 877 878 return (IXGBE_SUCCESS); 879 } 880 881 /* 882 * ixgbe_init_properties - Initialize driver properties. 883 */ 884 static void 885 ixgbe_init_properties(ixgbe_t *ixgbe) 886 { 887 /* 888 * Get conf file properties, including link settings 889 * jumbo frames, ring number, descriptor number, etc. 890 */ 891 ixgbe_get_conf(ixgbe); 892 893 ixgbe_init_params(ixgbe); 894 } 895 896 /* 897 * ixgbe_init_driver_settings - Initialize driver settings. 898 * 899 * The settings include hardware function pointers, bus information, 900 * rx/tx rings settings, link state, and any other parameters that 901 * need to be setup during driver initialization. 902 */ 903 static int 904 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 905 { 906 struct ixgbe_hw *hw = &ixgbe->hw; 907 dev_info_t *devinfo = ixgbe->dip; 908 ixgbe_rx_ring_t *rx_ring; 909 ixgbe_rx_group_t *rx_group; 910 ixgbe_tx_ring_t *tx_ring; 911 uint32_t rx_size; 912 uint32_t tx_size; 913 uint32_t ring_per_group; 914 int i; 915 916 /* 917 * Initialize chipset specific hardware function pointers 918 */ 919 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 920 return (IXGBE_FAILURE); 921 } 922 923 /* 924 * Get the system page size 925 */ 926 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 927 928 /* 929 * Set rx buffer size 930 * 931 * The IP header alignment room is counted in the calculation. 932 * The rx buffer size is in unit of 1K that is required by the 933 * chipset hardware. 934 */ 935 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 936 ixgbe->rx_buf_size = ((rx_size >> 10) + 937 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 938 939 /* 940 * Set tx buffer size 941 */ 942 tx_size = ixgbe->max_frame_size; 943 ixgbe->tx_buf_size = ((tx_size >> 10) + 944 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 945 946 /* 947 * Initialize rx/tx rings/groups parameters 948 */ 949 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 950 for (i = 0; i < ixgbe->num_rx_rings; i++) { 951 rx_ring = &ixgbe->rx_rings[i]; 952 rx_ring->index = i; 953 rx_ring->ixgbe = ixgbe; 954 rx_ring->group_index = i / ring_per_group; 955 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 956 } 957 958 for (i = 0; i < ixgbe->num_rx_groups; i++) { 959 rx_group = &ixgbe->rx_groups[i]; 960 rx_group->index = i; 961 rx_group->ixgbe = ixgbe; 962 } 963 964 for (i = 0; i < ixgbe->num_tx_rings; i++) { 965 tx_ring = &ixgbe->tx_rings[i]; 966 tx_ring->index = i; 967 tx_ring->ixgbe = ixgbe; 968 if (ixgbe->tx_head_wb_enable) 969 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 970 else 971 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 972 973 tx_ring->ring_size = ixgbe->tx_ring_size; 974 tx_ring->free_list_size = ixgbe->tx_ring_size + 975 (ixgbe->tx_ring_size >> 1); 976 } 977 978 /* 979 * Initialize values of interrupt throttling rate 980 */ 981 for (i = 1; i < MAX_INTR_VECTOR; i++) 982 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 983 984 /* 985 * The initial link state should be "unknown" 986 */ 987 ixgbe->link_state = LINK_STATE_UNKNOWN; 988 989 return (IXGBE_SUCCESS); 990 } 991 992 /* 993 * ixgbe_init_locks - Initialize locks. 994 */ 995 static void 996 ixgbe_init_locks(ixgbe_t *ixgbe) 997 { 998 ixgbe_rx_ring_t *rx_ring; 999 ixgbe_tx_ring_t *tx_ring; 1000 int i; 1001 1002 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1003 rx_ring = &ixgbe->rx_rings[i]; 1004 mutex_init(&rx_ring->rx_lock, NULL, 1005 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1006 } 1007 1008 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1009 tx_ring = &ixgbe->tx_rings[i]; 1010 mutex_init(&tx_ring->tx_lock, NULL, 1011 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1012 mutex_init(&tx_ring->recycle_lock, NULL, 1013 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1014 mutex_init(&tx_ring->tcb_head_lock, NULL, 1015 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1016 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1017 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1018 } 1019 1020 mutex_init(&ixgbe->gen_lock, NULL, 1021 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1022 1023 mutex_init(&ixgbe->watchdog_lock, NULL, 1024 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1025 } 1026 1027 /* 1028 * ixgbe_destroy_locks - Destroy locks. 1029 */ 1030 static void 1031 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1032 { 1033 ixgbe_rx_ring_t *rx_ring; 1034 ixgbe_tx_ring_t *tx_ring; 1035 int i; 1036 1037 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1038 rx_ring = &ixgbe->rx_rings[i]; 1039 mutex_destroy(&rx_ring->rx_lock); 1040 } 1041 1042 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1043 tx_ring = &ixgbe->tx_rings[i]; 1044 mutex_destroy(&tx_ring->tx_lock); 1045 mutex_destroy(&tx_ring->recycle_lock); 1046 mutex_destroy(&tx_ring->tcb_head_lock); 1047 mutex_destroy(&tx_ring->tcb_tail_lock); 1048 } 1049 1050 mutex_destroy(&ixgbe->gen_lock); 1051 mutex_destroy(&ixgbe->watchdog_lock); 1052 } 1053 1054 static int 1055 ixgbe_resume(dev_info_t *devinfo) 1056 { 1057 ixgbe_t *ixgbe; 1058 int i; 1059 1060 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1061 if (ixgbe == NULL) 1062 return (DDI_FAILURE); 1063 1064 mutex_enter(&ixgbe->gen_lock); 1065 1066 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1067 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1068 mutex_exit(&ixgbe->gen_lock); 1069 return (DDI_FAILURE); 1070 } 1071 1072 /* 1073 * Enable and start the watchdog timer 1074 */ 1075 ixgbe_enable_watchdog_timer(ixgbe); 1076 } 1077 1078 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1079 1080 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1081 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1082 mac_tx_ring_update(ixgbe->mac_hdl, 1083 ixgbe->tx_rings[i].ring_handle); 1084 } 1085 } 1086 1087 mutex_exit(&ixgbe->gen_lock); 1088 1089 return (DDI_SUCCESS); 1090 } 1091 1092 static int 1093 ixgbe_suspend(dev_info_t *devinfo) 1094 { 1095 ixgbe_t *ixgbe; 1096 1097 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1098 if (ixgbe == NULL) 1099 return (DDI_FAILURE); 1100 1101 mutex_enter(&ixgbe->gen_lock); 1102 1103 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1104 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1105 mutex_exit(&ixgbe->gen_lock); 1106 return (DDI_SUCCESS); 1107 } 1108 ixgbe_stop(ixgbe, B_FALSE); 1109 1110 mutex_exit(&ixgbe->gen_lock); 1111 1112 /* 1113 * Disable and stop the watchdog timer 1114 */ 1115 ixgbe_disable_watchdog_timer(ixgbe); 1116 1117 return (DDI_SUCCESS); 1118 } 1119 1120 /* 1121 * ixgbe_init - Initialize the device. 1122 */ 1123 static int 1124 ixgbe_init(ixgbe_t *ixgbe) 1125 { 1126 struct ixgbe_hw *hw = &ixgbe->hw; 1127 1128 mutex_enter(&ixgbe->gen_lock); 1129 1130 /* 1131 * Reset chipset to put the hardware in a known state 1132 * before we try to do anything with the eeprom. 1133 */ 1134 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) { 1135 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1136 goto init_fail; 1137 } 1138 1139 /* 1140 * Need to init eeprom before validating the checksum. 1141 */ 1142 if (ixgbe_init_eeprom_params(hw) < 0) { 1143 ixgbe_error(ixgbe, 1144 "Unable to intitialize the eeprom interface."); 1145 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1146 goto init_fail; 1147 } 1148 1149 /* 1150 * NVM validation 1151 */ 1152 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1153 /* 1154 * Some PCI-E parts fail the first check due to 1155 * the link being in sleep state. Call it again, 1156 * if it fails a second time it's a real issue. 1157 */ 1158 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1159 ixgbe_error(ixgbe, 1160 "Invalid NVM checksum. Please contact " 1161 "the vendor to update the NVM."); 1162 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1163 goto init_fail; 1164 } 1165 } 1166 1167 /* 1168 * Setup default flow control thresholds - enable/disable 1169 * & flow control type is controlled by ixgbe.conf 1170 */ 1171 hw->fc.high_water = DEFAULT_FCRTH; 1172 hw->fc.low_water = DEFAULT_FCRTL; 1173 hw->fc.pause_time = DEFAULT_FCPAUSE; 1174 hw->fc.send_xon = B_TRUE; 1175 1176 /* 1177 * Initialize link settings 1178 */ 1179 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1180 1181 /* 1182 * Initialize the chipset hardware 1183 */ 1184 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1185 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1186 goto init_fail; 1187 } 1188 1189 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1190 goto init_fail; 1191 } 1192 1193 mutex_exit(&ixgbe->gen_lock); 1194 return (IXGBE_SUCCESS); 1195 1196 init_fail: 1197 /* 1198 * Reset PHY 1199 */ 1200 (void) ixgbe_reset_phy(hw); 1201 1202 mutex_exit(&ixgbe->gen_lock); 1203 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1204 return (IXGBE_FAILURE); 1205 } 1206 1207 /* 1208 * ixgbe_chip_start - Initialize and start the chipset hardware. 1209 */ 1210 static int 1211 ixgbe_chip_start(ixgbe_t *ixgbe) 1212 { 1213 struct ixgbe_hw *hw = &ixgbe->hw; 1214 int ret_val, i; 1215 1216 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1217 1218 /* 1219 * Get the mac address 1220 * This function should handle SPARC case correctly. 1221 */ 1222 if (!ixgbe_find_mac_address(ixgbe)) { 1223 ixgbe_error(ixgbe, "Failed to get the mac address"); 1224 return (IXGBE_FAILURE); 1225 } 1226 1227 /* 1228 * Validate the mac address 1229 */ 1230 (void) ixgbe_init_rx_addrs(hw); 1231 if (!is_valid_mac_addr(hw->mac.addr)) { 1232 ixgbe_error(ixgbe, "Invalid mac address"); 1233 return (IXGBE_FAILURE); 1234 } 1235 1236 /* 1237 * Configure/Initialize hardware 1238 */ 1239 ret_val = ixgbe_init_hw(hw); 1240 if (ret_val != IXGBE_SUCCESS) { 1241 if (ret_val == IXGBE_ERR_EEPROM_VERSION) { 1242 ixgbe_error(ixgbe, 1243 "This 82599 device is pre-release and contains" 1244 " outdated firmware, please contact your hardware" 1245 " vendor for a replacement."); 1246 } else { 1247 ixgbe_error(ixgbe, "Failed to initialize hardware"); 1248 return (IXGBE_FAILURE); 1249 } 1250 } 1251 1252 /* 1253 * Setup adapter interrupt vectors 1254 */ 1255 ixgbe_setup_adapter_vector(ixgbe); 1256 1257 /* 1258 * Initialize unicast addresses. 1259 */ 1260 ixgbe_init_unicst(ixgbe); 1261 1262 /* 1263 * Setup and initialize the mctable structures. 1264 */ 1265 ixgbe_setup_multicst(ixgbe); 1266 1267 /* 1268 * Set interrupt throttling rate 1269 */ 1270 for (i = 0; i < ixgbe->intr_cnt; i++) { 1271 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1272 } 1273 1274 /* 1275 * Save the state of the phy 1276 */ 1277 ixgbe_get_hw_state(ixgbe); 1278 1279 /* 1280 * Make sure driver has control 1281 */ 1282 ixgbe_get_driver_control(hw); 1283 1284 return (IXGBE_SUCCESS); 1285 } 1286 1287 /* 1288 * ixgbe_chip_stop - Stop the chipset hardware 1289 */ 1290 static void 1291 ixgbe_chip_stop(ixgbe_t *ixgbe) 1292 { 1293 struct ixgbe_hw *hw = &ixgbe->hw; 1294 1295 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1296 1297 /* 1298 * Tell firmware driver is no longer in control 1299 */ 1300 ixgbe_release_driver_control(hw); 1301 1302 /* 1303 * Reset the chipset 1304 */ 1305 (void) ixgbe_reset_hw(hw); 1306 1307 /* 1308 * Reset PHY 1309 */ 1310 (void) ixgbe_reset_phy(hw); 1311 } 1312 1313 /* 1314 * ixgbe_reset - Reset the chipset and re-start the driver. 1315 * 1316 * It involves stopping and re-starting the chipset, 1317 * and re-configuring the rx/tx rings. 1318 */ 1319 static int 1320 ixgbe_reset(ixgbe_t *ixgbe) 1321 { 1322 int i; 1323 1324 /* 1325 * Disable and stop the watchdog timer 1326 */ 1327 ixgbe_disable_watchdog_timer(ixgbe); 1328 1329 mutex_enter(&ixgbe->gen_lock); 1330 1331 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1332 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1333 1334 ixgbe_stop(ixgbe, B_FALSE); 1335 1336 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1337 mutex_exit(&ixgbe->gen_lock); 1338 return (IXGBE_FAILURE); 1339 } 1340 1341 /* 1342 * After resetting, need to recheck the link status. 1343 */ 1344 ixgbe->link_check_complete = B_FALSE; 1345 ixgbe->link_check_hrtime = gethrtime() + 1346 (IXGBE_LINK_UP_TIME * 100000000ULL); 1347 1348 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1349 1350 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1351 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1352 mac_tx_ring_update(ixgbe->mac_hdl, 1353 ixgbe->tx_rings[i].ring_handle); 1354 } 1355 } 1356 1357 mutex_exit(&ixgbe->gen_lock); 1358 1359 /* 1360 * Enable and start the watchdog timer 1361 */ 1362 ixgbe_enable_watchdog_timer(ixgbe); 1363 1364 return (IXGBE_SUCCESS); 1365 } 1366 1367 /* 1368 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1369 */ 1370 static void 1371 ixgbe_tx_clean(ixgbe_t *ixgbe) 1372 { 1373 ixgbe_tx_ring_t *tx_ring; 1374 tx_control_block_t *tcb; 1375 link_list_t pending_list; 1376 uint32_t desc_num; 1377 int i, j; 1378 1379 LINK_LIST_INIT(&pending_list); 1380 1381 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1382 tx_ring = &ixgbe->tx_rings[i]; 1383 1384 mutex_enter(&tx_ring->recycle_lock); 1385 1386 /* 1387 * Clean the pending tx data - the pending packets in the 1388 * work_list that have no chances to be transmitted again. 1389 * 1390 * We must ensure the chipset is stopped or the link is down 1391 * before cleaning the transmit packets. 1392 */ 1393 desc_num = 0; 1394 for (j = 0; j < tx_ring->ring_size; j++) { 1395 tcb = tx_ring->work_list[j]; 1396 if (tcb != NULL) { 1397 desc_num += tcb->desc_num; 1398 1399 tx_ring->work_list[j] = NULL; 1400 1401 ixgbe_free_tcb(tcb); 1402 1403 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1404 } 1405 } 1406 1407 if (desc_num > 0) { 1408 atomic_add_32(&tx_ring->tbd_free, desc_num); 1409 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1410 1411 /* 1412 * Reset the head and tail pointers of the tbd ring; 1413 * Reset the writeback head if it's enable. 1414 */ 1415 tx_ring->tbd_head = 0; 1416 tx_ring->tbd_tail = 0; 1417 if (ixgbe->tx_head_wb_enable) 1418 *tx_ring->tbd_head_wb = 0; 1419 1420 IXGBE_WRITE_REG(&ixgbe->hw, 1421 IXGBE_TDH(tx_ring->index), 0); 1422 IXGBE_WRITE_REG(&ixgbe->hw, 1423 IXGBE_TDT(tx_ring->index), 0); 1424 } 1425 1426 mutex_exit(&tx_ring->recycle_lock); 1427 1428 /* 1429 * Add the tx control blocks in the pending list to 1430 * the free list. 1431 */ 1432 ixgbe_put_free_list(tx_ring, &pending_list); 1433 } 1434 } 1435 1436 /* 1437 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1438 * transmitted. 1439 */ 1440 static boolean_t 1441 ixgbe_tx_drain(ixgbe_t *ixgbe) 1442 { 1443 ixgbe_tx_ring_t *tx_ring; 1444 boolean_t done; 1445 int i, j; 1446 1447 /* 1448 * Wait for a specific time to allow pending tx packets 1449 * to be transmitted. 1450 * 1451 * Check the counter tbd_free to see if transmission is done. 1452 * No lock protection is needed here. 1453 * 1454 * Return B_TRUE if all pending packets have been transmitted; 1455 * Otherwise return B_FALSE; 1456 */ 1457 for (i = 0; i < TX_DRAIN_TIME; i++) { 1458 1459 done = B_TRUE; 1460 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1461 tx_ring = &ixgbe->tx_rings[j]; 1462 done = done && 1463 (tx_ring->tbd_free == tx_ring->ring_size); 1464 } 1465 1466 if (done) 1467 break; 1468 1469 msec_delay(1); 1470 } 1471 1472 return (done); 1473 } 1474 1475 /* 1476 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1477 */ 1478 static boolean_t 1479 ixgbe_rx_drain(ixgbe_t *ixgbe) 1480 { 1481 boolean_t done = B_TRUE; 1482 int i; 1483 1484 /* 1485 * Polling the rx free list to check if those rx buffers held by 1486 * the upper layer are released. 1487 * 1488 * Check the counter rcb_free to see if all pending buffers are 1489 * released. No lock protection is needed here. 1490 * 1491 * Return B_TRUE if all pending buffers have been released; 1492 * Otherwise return B_FALSE; 1493 */ 1494 for (i = 0; i < RX_DRAIN_TIME; i++) { 1495 done = (ixgbe->rcb_pending == 0); 1496 1497 if (done) 1498 break; 1499 1500 msec_delay(1); 1501 } 1502 1503 return (done); 1504 } 1505 1506 /* 1507 * ixgbe_start - Start the driver/chipset. 1508 */ 1509 int 1510 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1511 { 1512 int i; 1513 1514 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1515 1516 if (alloc_buffer) { 1517 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1518 ixgbe_error(ixgbe, 1519 "Failed to allocate software receive rings"); 1520 return (IXGBE_FAILURE); 1521 } 1522 1523 /* Allocate buffers for all the rx/tx rings */ 1524 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1525 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1526 return (IXGBE_FAILURE); 1527 } 1528 1529 ixgbe->tx_ring_init = B_TRUE; 1530 } else { 1531 ixgbe->tx_ring_init = B_FALSE; 1532 } 1533 1534 for (i = 0; i < ixgbe->num_rx_rings; i++) 1535 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1536 for (i = 0; i < ixgbe->num_tx_rings; i++) 1537 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1538 1539 /* 1540 * Start the chipset hardware 1541 */ 1542 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1543 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1544 goto start_failure; 1545 } 1546 1547 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1548 goto start_failure; 1549 } 1550 1551 /* 1552 * Setup the rx/tx rings 1553 */ 1554 ixgbe_setup_rings(ixgbe); 1555 1556 /* 1557 * ixgbe_start() will be called when resetting, however if reset 1558 * happens, we need to clear the ERROR and STALL flags before 1559 * enabling the interrupts. 1560 */ 1561 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR | IXGBE_STALL)); 1562 1563 /* 1564 * Enable adapter interrupts 1565 * The interrupts must be enabled after the driver state is START 1566 */ 1567 ixgbe_enable_adapter_interrupts(ixgbe); 1568 1569 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1570 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1571 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1572 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1573 1574 return (IXGBE_SUCCESS); 1575 1576 start_failure: 1577 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1578 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1579 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1580 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1581 1582 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1583 1584 return (IXGBE_FAILURE); 1585 } 1586 1587 /* 1588 * ixgbe_stop - Stop the driver/chipset. 1589 */ 1590 void 1591 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1592 { 1593 int i; 1594 1595 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1596 1597 /* 1598 * Disable the adapter interrupts 1599 */ 1600 ixgbe_disable_adapter_interrupts(ixgbe); 1601 1602 /* 1603 * Drain the pending tx packets 1604 */ 1605 (void) ixgbe_tx_drain(ixgbe); 1606 1607 for (i = 0; i < ixgbe->num_rx_rings; i++) 1608 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1609 for (i = 0; i < ixgbe->num_tx_rings; i++) 1610 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1611 1612 /* 1613 * Stop the chipset hardware 1614 */ 1615 ixgbe_chip_stop(ixgbe); 1616 1617 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1618 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1619 } 1620 1621 /* 1622 * Clean the pending tx data/resources 1623 */ 1624 ixgbe_tx_clean(ixgbe); 1625 1626 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1627 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1628 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1629 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1630 1631 if (ixgbe->link_state == LINK_STATE_UP) { 1632 ixgbe->link_state = LINK_STATE_UNKNOWN; 1633 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1634 } 1635 1636 if (free_buffer) { 1637 /* 1638 * Release the DMA/memory resources of rx/tx rings 1639 */ 1640 ixgbe_free_dma(ixgbe); 1641 ixgbe_free_rx_data(ixgbe); 1642 } 1643 } 1644 1645 /* 1646 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1647 */ 1648 /* ARGSUSED */ 1649 static int 1650 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1651 void *arg1, void *arg2) 1652 { 1653 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 1654 1655 switch (cbaction) { 1656 /* IRM callback */ 1657 int count; 1658 case DDI_CB_INTR_ADD: 1659 case DDI_CB_INTR_REMOVE: 1660 count = (int)(uintptr_t)cbarg; 1661 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 1662 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 1663 int, ixgbe->intr_cnt); 1664 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 1665 DDI_SUCCESS) { 1666 ixgbe_error(ixgbe, 1667 "IRM CB: Failed to adjust interrupts"); 1668 goto cb_fail; 1669 } 1670 break; 1671 default: 1672 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 1673 cbaction); 1674 return (DDI_ENOTSUP); 1675 } 1676 return (DDI_SUCCESS); 1677 cb_fail: 1678 return (DDI_FAILURE); 1679 } 1680 1681 /* 1682 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 1683 */ 1684 static int 1685 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 1686 { 1687 int i, rc, actual; 1688 1689 if (count == 0) 1690 return (DDI_SUCCESS); 1691 1692 if ((cbaction == DDI_CB_INTR_ADD && 1693 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 1694 (cbaction == DDI_CB_INTR_REMOVE && 1695 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 1696 return (DDI_FAILURE); 1697 1698 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1699 return (DDI_FAILURE); 1700 } 1701 1702 for (i = 0; i < ixgbe->num_rx_rings; i++) 1703 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 1704 for (i = 0; i < ixgbe->num_tx_rings; i++) 1705 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 1706 1707 mutex_enter(&ixgbe->gen_lock); 1708 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 1709 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 1710 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 1711 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 1712 1713 ixgbe_stop(ixgbe, B_FALSE); 1714 /* 1715 * Disable interrupts 1716 */ 1717 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 1718 rc = ixgbe_disable_intrs(ixgbe); 1719 ASSERT(rc == IXGBE_SUCCESS); 1720 } 1721 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 1722 1723 /* 1724 * Remove interrupt handlers 1725 */ 1726 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 1727 ixgbe_rem_intr_handlers(ixgbe); 1728 } 1729 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 1730 1731 /* 1732 * Clear vect_map 1733 */ 1734 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 1735 switch (cbaction) { 1736 case DDI_CB_INTR_ADD: 1737 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 1738 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 1739 DDI_INTR_ALLOC_NORMAL); 1740 if (rc != DDI_SUCCESS || actual != count) { 1741 ixgbe_log(ixgbe, "Adjust interrupts failed." 1742 "return: %d, irm cb size: %d, actual: %d", 1743 rc, count, actual); 1744 goto intr_adjust_fail; 1745 } 1746 ixgbe->intr_cnt += count; 1747 break; 1748 1749 case DDI_CB_INTR_REMOVE: 1750 for (i = ixgbe->intr_cnt - count; 1751 i < ixgbe->intr_cnt; i ++) { 1752 rc = ddi_intr_free(ixgbe->htable[i]); 1753 ixgbe->htable[i] = NULL; 1754 if (rc != DDI_SUCCESS) { 1755 ixgbe_log(ixgbe, "Adjust interrupts failed." 1756 "return: %d, irm cb size: %d, actual: %d", 1757 rc, count, actual); 1758 goto intr_adjust_fail; 1759 } 1760 } 1761 ixgbe->intr_cnt -= count; 1762 break; 1763 } 1764 1765 /* 1766 * Get priority for first vector, assume remaining are all the same 1767 */ 1768 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 1769 if (rc != DDI_SUCCESS) { 1770 ixgbe_log(ixgbe, 1771 "Get interrupt priority failed: %d", rc); 1772 goto intr_adjust_fail; 1773 } 1774 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 1775 if (rc != DDI_SUCCESS) { 1776 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 1777 goto intr_adjust_fail; 1778 } 1779 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 1780 1781 /* 1782 * Map rings to interrupt vectors 1783 */ 1784 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 1785 ixgbe_error(ixgbe, 1786 "IRM CB: Failed to map interrupts to vectors"); 1787 goto intr_adjust_fail; 1788 } 1789 1790 /* 1791 * Add interrupt handlers 1792 */ 1793 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 1794 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 1795 goto intr_adjust_fail; 1796 } 1797 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 1798 1799 /* 1800 * Now that mutex locks are initialized, and the chip is also 1801 * initialized, enable interrupts. 1802 */ 1803 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 1804 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 1805 goto intr_adjust_fail; 1806 } 1807 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 1808 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1809 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 1810 goto intr_adjust_fail; 1811 } 1812 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 1813 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 1814 ixgbe->ixgbe_state |= IXGBE_STARTED; 1815 mutex_exit(&ixgbe->gen_lock); 1816 1817 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1818 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 1819 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 1820 } 1821 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1822 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 1823 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 1824 } 1825 1826 /* Wakeup all Tx rings */ 1827 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1828 mac_tx_ring_update(ixgbe->mac_hdl, 1829 ixgbe->tx_rings[i].ring_handle); 1830 } 1831 1832 IXGBE_DEBUGLOG_3(ixgbe, 1833 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 1834 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 1835 return (DDI_SUCCESS); 1836 1837 intr_adjust_fail: 1838 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1839 mutex_exit(&ixgbe->gen_lock); 1840 return (DDI_FAILURE); 1841 } 1842 1843 /* 1844 * ixgbe_intr_cb_register - Register interrupt callback function. 1845 */ 1846 static int 1847 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 1848 { 1849 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 1850 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 1851 return (IXGBE_FAILURE); 1852 } 1853 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 1854 return (IXGBE_SUCCESS); 1855 } 1856 1857 /* 1858 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 1859 */ 1860 static int 1861 ixgbe_alloc_rings(ixgbe_t *ixgbe) 1862 { 1863 /* 1864 * Allocate memory space for rx rings 1865 */ 1866 ixgbe->rx_rings = kmem_zalloc( 1867 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 1868 KM_NOSLEEP); 1869 1870 if (ixgbe->rx_rings == NULL) { 1871 return (IXGBE_FAILURE); 1872 } 1873 1874 /* 1875 * Allocate memory space for tx rings 1876 */ 1877 ixgbe->tx_rings = kmem_zalloc( 1878 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 1879 KM_NOSLEEP); 1880 1881 if (ixgbe->tx_rings == NULL) { 1882 kmem_free(ixgbe->rx_rings, 1883 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1884 ixgbe->rx_rings = NULL; 1885 return (IXGBE_FAILURE); 1886 } 1887 1888 /* 1889 * Allocate memory space for rx ring groups 1890 */ 1891 ixgbe->rx_groups = kmem_zalloc( 1892 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 1893 KM_NOSLEEP); 1894 1895 if (ixgbe->rx_groups == NULL) { 1896 kmem_free(ixgbe->rx_rings, 1897 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1898 kmem_free(ixgbe->tx_rings, 1899 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1900 ixgbe->rx_rings = NULL; 1901 ixgbe->tx_rings = NULL; 1902 return (IXGBE_FAILURE); 1903 } 1904 1905 return (IXGBE_SUCCESS); 1906 } 1907 1908 /* 1909 * ixgbe_free_rings - Free the memory space of rx/tx rings. 1910 */ 1911 static void 1912 ixgbe_free_rings(ixgbe_t *ixgbe) 1913 { 1914 if (ixgbe->rx_rings != NULL) { 1915 kmem_free(ixgbe->rx_rings, 1916 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 1917 ixgbe->rx_rings = NULL; 1918 } 1919 1920 if (ixgbe->tx_rings != NULL) { 1921 kmem_free(ixgbe->tx_rings, 1922 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 1923 ixgbe->tx_rings = NULL; 1924 } 1925 1926 if (ixgbe->rx_groups != NULL) { 1927 kmem_free(ixgbe->rx_groups, 1928 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 1929 ixgbe->rx_groups = NULL; 1930 } 1931 } 1932 1933 static int 1934 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 1935 { 1936 ixgbe_rx_ring_t *rx_ring; 1937 int i; 1938 1939 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1940 rx_ring = &ixgbe->rx_rings[i]; 1941 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 1942 goto alloc_rx_rings_failure; 1943 } 1944 return (IXGBE_SUCCESS); 1945 1946 alloc_rx_rings_failure: 1947 ixgbe_free_rx_data(ixgbe); 1948 return (IXGBE_FAILURE); 1949 } 1950 1951 static void 1952 ixgbe_free_rx_data(ixgbe_t *ixgbe) 1953 { 1954 ixgbe_rx_ring_t *rx_ring; 1955 ixgbe_rx_data_t *rx_data; 1956 int i; 1957 1958 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1959 rx_ring = &ixgbe->rx_rings[i]; 1960 1961 mutex_enter(&ixgbe->rx_pending_lock); 1962 rx_data = rx_ring->rx_data; 1963 1964 if (rx_data != NULL) { 1965 rx_data->flag |= IXGBE_RX_STOPPED; 1966 1967 if (rx_data->rcb_pending == 0) { 1968 ixgbe_free_rx_ring_data(rx_data); 1969 rx_ring->rx_data = NULL; 1970 } 1971 } 1972 1973 mutex_exit(&ixgbe->rx_pending_lock); 1974 } 1975 } 1976 1977 /* 1978 * ixgbe_setup_rings - Setup rx/tx rings. 1979 */ 1980 static void 1981 ixgbe_setup_rings(ixgbe_t *ixgbe) 1982 { 1983 /* 1984 * Setup the rx/tx rings, including the following: 1985 * 1986 * 1. Setup the descriptor ring and the control block buffers; 1987 * 2. Initialize necessary registers for receive/transmit; 1988 * 3. Initialize software pointers/parameters for receive/transmit; 1989 */ 1990 ixgbe_setup_rx(ixgbe); 1991 1992 ixgbe_setup_tx(ixgbe); 1993 } 1994 1995 static void 1996 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 1997 { 1998 ixgbe_t *ixgbe = rx_ring->ixgbe; 1999 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2000 struct ixgbe_hw *hw = &ixgbe->hw; 2001 rx_control_block_t *rcb; 2002 union ixgbe_adv_rx_desc *rbd; 2003 uint32_t size; 2004 uint32_t buf_low; 2005 uint32_t buf_high; 2006 uint32_t reg_val; 2007 int i; 2008 2009 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2010 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2011 2012 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2013 rcb = rx_data->work_list[i]; 2014 rbd = &rx_data->rbd_ring[i]; 2015 2016 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2017 rbd->read.hdr_addr = NULL; 2018 } 2019 2020 /* 2021 * Initialize the length register 2022 */ 2023 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2024 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2025 2026 /* 2027 * Initialize the base address registers 2028 */ 2029 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2030 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2031 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2032 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2033 2034 /* 2035 * Setup head & tail pointers 2036 */ 2037 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2038 rx_data->ring_size - 1); 2039 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2040 2041 rx_data->rbd_next = 0; 2042 rx_data->lro_first = 0; 2043 2044 /* 2045 * Setup the Receive Descriptor Control Register (RXDCTL) 2046 * PTHRESH=32 descriptors (half the internal cache) 2047 * HTHRESH=0 descriptors (to minimize latency on fetch) 2048 * WTHRESH defaults to 1 (writeback each descriptor) 2049 */ 2050 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2051 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2052 2053 /* Not a valid value for 82599 */ 2054 if (hw->mac.type < ixgbe_mac_82599EB) { 2055 reg_val |= 0x0020; /* pthresh */ 2056 } 2057 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2058 2059 if (hw->mac.type == ixgbe_mac_82599EB) { 2060 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2061 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2062 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2063 } 2064 2065 /* 2066 * Setup the Split and Replication Receive Control Register. 2067 * Set the rx buffer size and the advanced descriptor type. 2068 */ 2069 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2070 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2071 reg_val |= IXGBE_SRRCTL_DROP_EN; 2072 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2073 } 2074 2075 static void 2076 ixgbe_setup_rx(ixgbe_t *ixgbe) 2077 { 2078 ixgbe_rx_ring_t *rx_ring; 2079 struct ixgbe_hw *hw = &ixgbe->hw; 2080 uint32_t reg_val; 2081 uint32_t ring_mapping; 2082 uint32_t i, index; 2083 uint32_t psrtype_rss_bit; 2084 2085 /* PSRTYPE must be configured for 82599 */ 2086 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2087 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2088 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2089 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2090 reg_val |= IXGBE_PSRTYPE_L2HDR; 2091 reg_val |= 0x80000000; 2092 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2093 } else { 2094 if (ixgbe->num_rx_groups > 32) { 2095 psrtype_rss_bit = 0x20000000; 2096 } else { 2097 psrtype_rss_bit = 0x40000000; 2098 } 2099 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2100 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2101 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2102 reg_val |= IXGBE_PSRTYPE_L2HDR; 2103 reg_val |= psrtype_rss_bit; 2104 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2105 } 2106 } 2107 2108 /* 2109 * Set filter control in FCTRL to accept broadcast packets and do 2110 * not pass pause frames to host. Flow control settings are already 2111 * in this register, so preserve them. 2112 */ 2113 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2114 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */ 2115 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */ 2116 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2117 2118 /* 2119 * Hardware checksum settings 2120 */ 2121 if (ixgbe->rx_hcksum_enable) { 2122 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2123 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2124 } 2125 2126 /* 2127 * Setup VMDq and RSS for multiple receive queues 2128 */ 2129 switch (ixgbe->classify_mode) { 2130 case IXGBE_CLASSIFY_RSS: 2131 /* 2132 * One group, only RSS is needed when more than 2133 * one ring enabled. 2134 */ 2135 ixgbe_setup_rss(ixgbe); 2136 break; 2137 2138 case IXGBE_CLASSIFY_VMDQ: 2139 /* 2140 * Multiple groups, each group has one ring, 2141 * only VMDq is needed. 2142 */ 2143 ixgbe_setup_vmdq(ixgbe); 2144 break; 2145 2146 case IXGBE_CLASSIFY_VMDQ_RSS: 2147 /* 2148 * Multiple groups and multiple rings, both 2149 * VMDq and RSS are needed. 2150 */ 2151 ixgbe_setup_vmdq_rss(ixgbe); 2152 break; 2153 2154 default: 2155 break; 2156 } 2157 2158 /* 2159 * Enable the receive unit. This must be done after filter 2160 * control is set in FCTRL. 2161 */ 2162 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */ 2163 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */ 2164 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 2165 2166 /* 2167 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2168 */ 2169 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2170 rx_ring = &ixgbe->rx_rings[i]; 2171 ixgbe_setup_rx_ring(rx_ring); 2172 } 2173 2174 /* 2175 * Setup the per-ring statistics mapping. 2176 */ 2177 ring_mapping = 0; 2178 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2179 index = ixgbe->rx_rings[i].hw_index; 2180 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2181 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2182 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2183 } 2184 2185 /* 2186 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2187 * by four bytes if the packet has a VLAN field, so includes MTU, 2188 * ethernet header and frame check sequence. 2189 * Register is MAXFRS in 82599. 2190 */ 2191 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header) 2192 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2193 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2194 2195 /* 2196 * Setup Jumbo Frame enable bit 2197 */ 2198 if (ixgbe->default_mtu > ETHERMTU) { 2199 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2200 reg_val |= IXGBE_HLREG0_JUMBOEN; 2201 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2202 } 2203 2204 /* 2205 * Setup RSC for multiple receive queues. 2206 */ 2207 if (ixgbe->lro_enable) { 2208 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2209 /* 2210 * Make sure rx_buf_size * MAXDESC not greater 2211 * than 65535. 2212 * Intel recommends 4 for MAXDESC field value. 2213 */ 2214 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2215 reg_val |= IXGBE_RSCCTL_RSCEN; 2216 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2217 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2218 else 2219 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2220 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2221 } 2222 2223 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2224 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2225 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2226 2227 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2228 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2229 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2230 2231 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2232 } 2233 } 2234 2235 static void 2236 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2237 { 2238 ixgbe_t *ixgbe = tx_ring->ixgbe; 2239 struct ixgbe_hw *hw = &ixgbe->hw; 2240 uint32_t size; 2241 uint32_t buf_low; 2242 uint32_t buf_high; 2243 uint32_t reg_val; 2244 2245 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2246 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2247 2248 /* 2249 * Initialize the length register 2250 */ 2251 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2252 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2253 2254 /* 2255 * Initialize the base address registers 2256 */ 2257 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2258 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2259 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2260 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2261 2262 /* 2263 * Setup head & tail pointers 2264 */ 2265 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2266 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2267 2268 /* 2269 * Setup head write-back 2270 */ 2271 if (ixgbe->tx_head_wb_enable) { 2272 /* 2273 * The memory of the head write-back is allocated using 2274 * the extra tbd beyond the tail of the tbd ring. 2275 */ 2276 tx_ring->tbd_head_wb = (uint32_t *) 2277 ((uintptr_t)tx_ring->tbd_area.address + size); 2278 *tx_ring->tbd_head_wb = 0; 2279 2280 buf_low = (uint32_t) 2281 (tx_ring->tbd_area.dma_address + size); 2282 buf_high = (uint32_t) 2283 ((tx_ring->tbd_area.dma_address + size) >> 32); 2284 2285 /* Set the head write-back enable bit */ 2286 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2287 2288 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2289 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2290 2291 /* 2292 * Turn off relaxed ordering for head write back or it will 2293 * cause problems with the tx recycling 2294 */ 2295 reg_val = IXGBE_READ_REG(hw, 2296 IXGBE_DCA_TXCTRL(tx_ring->index)); 2297 reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 2298 IXGBE_WRITE_REG(hw, 2299 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2300 } else { 2301 tx_ring->tbd_head_wb = NULL; 2302 } 2303 2304 tx_ring->tbd_head = 0; 2305 tx_ring->tbd_tail = 0; 2306 tx_ring->tbd_free = tx_ring->ring_size; 2307 2308 if (ixgbe->tx_ring_init == B_TRUE) { 2309 tx_ring->tcb_head = 0; 2310 tx_ring->tcb_tail = 0; 2311 tx_ring->tcb_free = tx_ring->free_list_size; 2312 } 2313 2314 /* 2315 * Initialize the s/w context structure 2316 */ 2317 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2318 } 2319 2320 static void 2321 ixgbe_setup_tx(ixgbe_t *ixgbe) 2322 { 2323 struct ixgbe_hw *hw = &ixgbe->hw; 2324 ixgbe_tx_ring_t *tx_ring; 2325 uint32_t reg_val; 2326 uint32_t ring_mapping; 2327 int i; 2328 2329 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2330 tx_ring = &ixgbe->tx_rings[i]; 2331 ixgbe_setup_tx_ring(tx_ring); 2332 } 2333 2334 /* 2335 * Setup the per-ring statistics mapping. 2336 */ 2337 ring_mapping = 0; 2338 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2339 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2340 if ((i & 0x3) == 0x3) { 2341 if (hw->mac.type >= ixgbe_mac_82599EB) { 2342 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2343 ring_mapping); 2344 } else { 2345 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2346 ring_mapping); 2347 } 2348 ring_mapping = 0; 2349 } 2350 } 2351 if ((i & 0x3) != 0x3) 2352 if (hw->mac.type >= ixgbe_mac_82599EB) { 2353 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2354 } else { 2355 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2356 } 2357 2358 /* 2359 * Enable CRC appending and TX padding (for short tx frames) 2360 */ 2361 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2362 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2363 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2364 2365 /* 2366 * enable DMA for 82599 parts 2367 */ 2368 if (hw->mac.type == ixgbe_mac_82599EB) { 2369 /* DMATXCTL.TE must be set after all Tx config is complete */ 2370 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2371 reg_val |= IXGBE_DMATXCTL_TE; 2372 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2373 } 2374 2375 /* 2376 * Enabling tx queues .. 2377 * For 82599 must be done after DMATXCTL.TE is set 2378 */ 2379 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2380 tx_ring = &ixgbe->tx_rings[i]; 2381 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2382 reg_val |= IXGBE_TXDCTL_ENABLE; 2383 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2384 } 2385 } 2386 2387 /* 2388 * ixgbe_setup_rss - Setup receive-side scaling feature. 2389 */ 2390 static void 2391 ixgbe_setup_rss(ixgbe_t *ixgbe) 2392 { 2393 struct ixgbe_hw *hw = &ixgbe->hw; 2394 uint32_t i, mrqc, rxcsum; 2395 uint32_t random; 2396 uint32_t reta; 2397 uint32_t ring_per_group; 2398 2399 /* 2400 * Fill out redirection table 2401 */ 2402 reta = 0; 2403 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2404 2405 for (i = 0; i < 128; i++) { 2406 reta = (reta << 8) | (i % ring_per_group) | 2407 ((i % ring_per_group) << 4); 2408 if ((i & 3) == 3) 2409 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2410 } 2411 2412 /* 2413 * Fill out hash function seeds with a random constant 2414 */ 2415 for (i = 0; i < 10; i++) { 2416 (void) random_get_pseudo_bytes((uint8_t *)&random, 2417 sizeof (uint32_t)); 2418 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 2419 } 2420 2421 /* 2422 * Enable RSS & perform hash on these packet types 2423 */ 2424 mrqc = IXGBE_MRQC_RSSEN | 2425 IXGBE_MRQC_RSS_FIELD_IPV4 | 2426 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2427 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2428 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2429 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2430 IXGBE_MRQC_RSS_FIELD_IPV6 | 2431 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2432 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2433 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2434 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2435 2436 /* 2437 * Disable Packet Checksum to enable RSS for multiple receive queues. 2438 * It is an adapter hardware limitation that Packet Checksum is 2439 * mutually exclusive with RSS. 2440 */ 2441 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2442 rxcsum |= IXGBE_RXCSUM_PCSD; 2443 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 2444 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2445 } 2446 2447 /* 2448 * ixgbe_setup_vmdq - Setup MAC classification feature 2449 */ 2450 static void 2451 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2452 { 2453 struct ixgbe_hw *hw = &ixgbe->hw; 2454 uint32_t vmdctl, i, vtctl; 2455 2456 /* 2457 * Setup the VMDq Control register, enable VMDq based on 2458 * packet destination MAC address: 2459 */ 2460 switch (hw->mac.type) { 2461 case ixgbe_mac_82598EB: 2462 /* 2463 * VMDq Enable = 1; 2464 * VMDq Filter = 0; MAC filtering 2465 * Default VMDq output index = 0; 2466 */ 2467 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2468 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2469 break; 2470 2471 case ixgbe_mac_82599EB: 2472 /* 2473 * Enable VMDq-only. 2474 */ 2475 vmdctl = IXGBE_MRQC_VMDQEN; 2476 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2477 2478 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2479 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2480 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2481 } 2482 2483 /* 2484 * Enable Virtualization and Replication. 2485 */ 2486 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2487 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2488 2489 /* 2490 * Enable receiving packets to all VFs 2491 */ 2492 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2493 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2494 2495 break; 2496 2497 default: 2498 break; 2499 } 2500 } 2501 2502 /* 2503 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2504 */ 2505 static void 2506 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2507 { 2508 struct ixgbe_hw *hw = &ixgbe->hw; 2509 uint32_t i, mrqc, rxcsum; 2510 uint32_t random; 2511 uint32_t reta; 2512 uint32_t ring_per_group; 2513 uint32_t vmdctl, vtctl; 2514 2515 /* 2516 * Fill out redirection table 2517 */ 2518 reta = 0; 2519 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2520 for (i = 0; i < 128; i++) { 2521 reta = (reta << 8) | (i % ring_per_group) | 2522 ((i % ring_per_group) << 4); 2523 if ((i & 3) == 3) 2524 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 2525 } 2526 2527 /* 2528 * Fill out hash function seeds with a random constant 2529 */ 2530 for (i = 0; i < 10; i++) { 2531 (void) random_get_pseudo_bytes((uint8_t *)&random, 2532 sizeof (uint32_t)); 2533 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 2534 } 2535 2536 /* 2537 * Enable and setup RSS and VMDq 2538 */ 2539 switch (hw->mac.type) { 2540 case ixgbe_mac_82598EB: 2541 /* 2542 * Enable RSS & Setup RSS Hash functions 2543 */ 2544 mrqc = IXGBE_MRQC_RSSEN | 2545 IXGBE_MRQC_RSS_FIELD_IPV4 | 2546 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2547 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2548 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2549 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2550 IXGBE_MRQC_RSS_FIELD_IPV6 | 2551 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2552 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2553 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2554 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2555 2556 /* 2557 * Enable and Setup VMDq 2558 * VMDq Filter = 0; MAC filtering 2559 * Default VMDq output index = 0; 2560 */ 2561 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2562 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2563 break; 2564 2565 case ixgbe_mac_82599EB: 2566 /* 2567 * Enable RSS & Setup RSS Hash functions 2568 */ 2569 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2570 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2571 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2572 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2573 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2574 IXGBE_MRQC_RSS_FIELD_IPV6 | 2575 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2576 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2577 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2578 2579 /* 2580 * Enable VMDq+RSS. 2581 */ 2582 if (ixgbe->num_rx_groups > 32) { 2583 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2584 } else { 2585 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2586 } 2587 2588 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2589 2590 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2591 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2592 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2593 } 2594 break; 2595 2596 default: 2597 break; 2598 2599 } 2600 2601 /* 2602 * Disable Packet Checksum to enable RSS for multiple receive queues. 2603 * It is an adapter hardware limitation that Packet Checksum is 2604 * mutually exclusive with RSS. 2605 */ 2606 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2607 rxcsum |= IXGBE_RXCSUM_PCSD; 2608 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 2609 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 2610 2611 if (hw->mac.type == ixgbe_mac_82599EB) { 2612 /* 2613 * Enable Virtualization and Replication. 2614 */ 2615 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2616 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2617 2618 /* 2619 * Enable receiving packets to all VFs 2620 */ 2621 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2622 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2623 } 2624 } 2625 2626 /* 2627 * ixgbe_init_unicst - Initialize the unicast addresses. 2628 */ 2629 static void 2630 ixgbe_init_unicst(ixgbe_t *ixgbe) 2631 { 2632 struct ixgbe_hw *hw = &ixgbe->hw; 2633 uint8_t *mac_addr; 2634 int slot; 2635 /* 2636 * Here we should consider two situations: 2637 * 2638 * 1. Chipset is initialized at the first time, 2639 * Clear all the multiple unicast addresses. 2640 * 2641 * 2. Chipset is reset 2642 * Recover the multiple unicast addresses from the 2643 * software data structure to the RAR registers. 2644 */ 2645 if (!ixgbe->unicst_init) { 2646 /* 2647 * Initialize the multiple unicast addresses 2648 */ 2649 ixgbe->unicst_total = hw->mac.num_rar_entries; 2650 ixgbe->unicst_avail = ixgbe->unicst_total; 2651 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2652 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 2653 bzero(mac_addr, ETHERADDRL); 2654 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 2655 ixgbe->unicst_addr[slot].mac.set = 0; 2656 } 2657 ixgbe->unicst_init = B_TRUE; 2658 } else { 2659 /* Re-configure the RAR registers */ 2660 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2661 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 2662 if (ixgbe->unicst_addr[slot].mac.set == 1) { 2663 (void) ixgbe_set_rar(hw, slot, mac_addr, 2664 ixgbe->unicst_addr[slot].mac.group_index, 2665 IXGBE_RAH_AV); 2666 } else { 2667 bzero(mac_addr, ETHERADDRL); 2668 (void) ixgbe_set_rar(hw, slot, mac_addr, 2669 NULL, NULL); 2670 } 2671 } 2672 } 2673 } 2674 2675 /* 2676 * ixgbe_unicst_find - Find the slot for the specified unicast address 2677 */ 2678 int 2679 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 2680 { 2681 int slot; 2682 2683 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2684 2685 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 2686 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 2687 mac_addr, ETHERADDRL) == 0) 2688 return (slot); 2689 } 2690 2691 return (-1); 2692 } 2693 2694 /* 2695 * ixgbe_multicst_add - Add a multicst address. 2696 */ 2697 int 2698 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2699 { 2700 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2701 2702 if ((multiaddr[0] & 01) == 0) { 2703 return (EINVAL); 2704 } 2705 2706 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 2707 return (ENOENT); 2708 } 2709 2710 bcopy(multiaddr, 2711 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 2712 ixgbe->mcast_count++; 2713 2714 /* 2715 * Update the multicast table in the hardware 2716 */ 2717 ixgbe_setup_multicst(ixgbe); 2718 2719 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2720 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2721 return (EIO); 2722 } 2723 2724 return (0); 2725 } 2726 2727 /* 2728 * ixgbe_multicst_remove - Remove a multicst address. 2729 */ 2730 int 2731 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 2732 { 2733 int i; 2734 2735 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2736 2737 for (i = 0; i < ixgbe->mcast_count; i++) { 2738 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 2739 ETHERADDRL) == 0) { 2740 for (i++; i < ixgbe->mcast_count; i++) { 2741 ixgbe->mcast_table[i - 1] = 2742 ixgbe->mcast_table[i]; 2743 } 2744 ixgbe->mcast_count--; 2745 break; 2746 } 2747 } 2748 2749 /* 2750 * Update the multicast table in the hardware 2751 */ 2752 ixgbe_setup_multicst(ixgbe); 2753 2754 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 2755 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 2756 return (EIO); 2757 } 2758 2759 return (0); 2760 } 2761 2762 /* 2763 * ixgbe_setup_multicast - Setup multicast data structures. 2764 * 2765 * This routine initializes all of the multicast related structures 2766 * and save them in the hardware registers. 2767 */ 2768 static void 2769 ixgbe_setup_multicst(ixgbe_t *ixgbe) 2770 { 2771 uint8_t *mc_addr_list; 2772 uint32_t mc_addr_count; 2773 struct ixgbe_hw *hw = &ixgbe->hw; 2774 2775 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2776 2777 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 2778 2779 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 2780 mc_addr_count = ixgbe->mcast_count; 2781 2782 /* 2783 * Update the multicast addresses to the MTA registers 2784 */ 2785 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 2786 ixgbe_mc_table_itr); 2787 } 2788 2789 /* 2790 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 2791 * 2792 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 2793 * Different chipsets may have different allowed configuration of vmdq and rss. 2794 */ 2795 static void 2796 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 2797 { 2798 struct ixgbe_hw *hw = &ixgbe->hw; 2799 uint32_t ring_per_group; 2800 2801 switch (hw->mac.type) { 2802 case ixgbe_mac_82598EB: 2803 /* 2804 * 82598 supports the following combination: 2805 * vmdq no. x rss no. 2806 * [5..16] x 1 2807 * [1..4] x [1..16] 2808 * However 8 rss queue per pool (vmdq) is sufficient for 2809 * most cases. 2810 */ 2811 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2812 if (ixgbe->num_rx_groups > 4) { 2813 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 2814 } else { 2815 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2816 min(8, ring_per_group); 2817 } 2818 2819 break; 2820 2821 case ixgbe_mac_82599EB: 2822 /* 2823 * 82599 supports the following combination: 2824 * vmdq no. x rss no. 2825 * [33..64] x [1..2] 2826 * [2..32] x [1..4] 2827 * 1 x [1..16] 2828 * However 8 rss queue per pool (vmdq) is sufficient for 2829 * most cases. 2830 */ 2831 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2832 if (ixgbe->num_rx_groups == 1) { 2833 ixgbe->num_rx_rings = min(8, ring_per_group); 2834 } else if (ixgbe->num_rx_groups <= 32) { 2835 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2836 min(4, ring_per_group); 2837 } else if (ixgbe->num_rx_groups <= 64) { 2838 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2839 min(2, ring_per_group); 2840 } 2841 2842 break; 2843 2844 default: 2845 break; 2846 } 2847 2848 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 2849 2850 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 2851 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 2852 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 2853 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 2854 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 2855 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 2856 } else { 2857 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 2858 } 2859 2860 ixgbe_log(ixgbe, "rx group number:%d, rx ring number:%d", 2861 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 2862 } 2863 2864 /* 2865 * ixgbe_get_conf - Get driver configurations set in driver.conf. 2866 * 2867 * This routine gets user-configured values out of the configuration 2868 * file ixgbe.conf. 2869 * 2870 * For each configurable value, there is a minimum, a maximum, and a 2871 * default. 2872 * If user does not configure a value, use the default. 2873 * If user configures below the minimum, use the minumum. 2874 * If user configures above the maximum, use the maxumum. 2875 */ 2876 static void 2877 ixgbe_get_conf(ixgbe_t *ixgbe) 2878 { 2879 struct ixgbe_hw *hw = &ixgbe->hw; 2880 uint32_t flow_control; 2881 2882 /* 2883 * ixgbe driver supports the following user configurations: 2884 * 2885 * Jumbo frame configuration: 2886 * default_mtu 2887 * 2888 * Ethernet flow control configuration: 2889 * flow_control 2890 * 2891 * Multiple rings configurations: 2892 * tx_queue_number 2893 * tx_ring_size 2894 * rx_queue_number 2895 * rx_ring_size 2896 * 2897 * Call ixgbe_get_prop() to get the value for a specific 2898 * configuration parameter. 2899 */ 2900 2901 /* 2902 * Jumbo frame configuration - max_frame_size controls host buffer 2903 * allocation, so includes MTU, ethernet header, vlan tag and 2904 * frame check sequence. 2905 */ 2906 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 2907 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 2908 2909 ixgbe->max_frame_size = ixgbe->default_mtu + 2910 sizeof (struct ether_vlan_header) + ETHERFCSL; 2911 2912 /* 2913 * Ethernet flow control configuration 2914 */ 2915 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 2916 ixgbe_fc_none, 3, ixgbe_fc_none); 2917 if (flow_control == 3) 2918 flow_control = ixgbe_fc_default; 2919 2920 /* 2921 * fc.requested mode is what the user requests. After autoneg, 2922 * fc.current_mode will be the flow_control mode that was negotiated. 2923 */ 2924 hw->fc.requested_mode = flow_control; 2925 2926 /* 2927 * Multiple rings configurations 2928 */ 2929 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 2930 ixgbe->capab->min_tx_que_num, 2931 ixgbe->capab->max_tx_que_num, 2932 ixgbe->capab->def_tx_que_num); 2933 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 2934 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 2935 2936 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 2937 ixgbe->capab->min_rx_que_num, 2938 ixgbe->capab->max_rx_que_num, 2939 ixgbe->capab->def_rx_que_num); 2940 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 2941 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 2942 2943 /* 2944 * Multiple groups configuration 2945 */ 2946 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 2947 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 2948 ixgbe->capab->def_rx_grp_num); 2949 2950 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 2951 0, 1, DEFAULT_MR_ENABLE); 2952 2953 if (ixgbe->mr_enable == B_FALSE) { 2954 ixgbe->num_tx_rings = 1; 2955 ixgbe->num_rx_rings = 1; 2956 ixgbe->num_rx_groups = 1; 2957 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 2958 } else { 2959 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 2960 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 2961 /* 2962 * The combination of num_rx_rings and num_rx_groups 2963 * may be not supported by h/w. We need to adjust 2964 * them to appropriate values. 2965 */ 2966 ixgbe_setup_vmdq_rss_conf(ixgbe); 2967 } 2968 2969 /* 2970 * Tunable used to force an interrupt type. The only use is 2971 * for testing of the lesser interrupt types. 2972 * 0 = don't force interrupt type 2973 * 1 = force interrupt type MSI-X 2974 * 2 = force interrupt type MSI 2975 * 3 = force interrupt type Legacy 2976 */ 2977 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 2978 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 2979 2980 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 2981 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 2982 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 2983 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 2984 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 2985 0, 1, DEFAULT_LSO_ENABLE); 2986 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 2987 0, 1, DEFAULT_LRO_ENABLE); 2988 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 2989 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 2990 2991 /* Head Write Back not recommended for 82599 */ 2992 if (hw->mac.type >= ixgbe_mac_82599EB) { 2993 ixgbe->tx_head_wb_enable = B_FALSE; 2994 } 2995 2996 /* 2997 * ixgbe LSO needs the tx h/w checksum support. 2998 * LSO will be disabled if tx h/w checksum is not 2999 * enabled. 3000 */ 3001 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3002 ixgbe->lso_enable = B_FALSE; 3003 } 3004 3005 /* 3006 * ixgbe LRO needs the rx h/w checksum support. 3007 * LRO will be disabled if rx h/w checksum is not 3008 * enabled. 3009 */ 3010 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3011 ixgbe->lro_enable = B_FALSE; 3012 } 3013 3014 /* 3015 * ixgbe LRO only been supported by 82599 now 3016 */ 3017 if (hw->mac.type != ixgbe_mac_82599EB) { 3018 ixgbe->lro_enable = B_FALSE; 3019 } 3020 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3021 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3022 DEFAULT_TX_COPY_THRESHOLD); 3023 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3024 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3025 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3026 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3027 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3028 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3029 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3030 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3031 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3032 3033 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3034 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3035 DEFAULT_RX_COPY_THRESHOLD); 3036 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3037 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3038 DEFAULT_RX_LIMIT_PER_INTR); 3039 3040 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3041 ixgbe->capab->min_intr_throttle, 3042 ixgbe->capab->max_intr_throttle, 3043 ixgbe->capab->def_intr_throttle); 3044 /* 3045 * 82599 requires the interupt throttling rate is 3046 * a multiple of 8. This is enforced by the register 3047 * definiton. 3048 */ 3049 if (hw->mac.type == ixgbe_mac_82599EB) 3050 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3051 } 3052 3053 static void 3054 ixgbe_init_params(ixgbe_t *ixgbe) 3055 { 3056 ixgbe->param_en_10000fdx_cap = 1; 3057 ixgbe->param_en_1000fdx_cap = 1; 3058 ixgbe->param_en_100fdx_cap = 1; 3059 ixgbe->param_adv_10000fdx_cap = 1; 3060 ixgbe->param_adv_1000fdx_cap = 1; 3061 ixgbe->param_adv_100fdx_cap = 1; 3062 3063 ixgbe->param_pause_cap = 1; 3064 ixgbe->param_asym_pause_cap = 1; 3065 ixgbe->param_rem_fault = 0; 3066 3067 ixgbe->param_adv_autoneg_cap = 1; 3068 ixgbe->param_adv_pause_cap = 1; 3069 ixgbe->param_adv_asym_pause_cap = 1; 3070 ixgbe->param_adv_rem_fault = 0; 3071 3072 ixgbe->param_lp_10000fdx_cap = 0; 3073 ixgbe->param_lp_1000fdx_cap = 0; 3074 ixgbe->param_lp_100fdx_cap = 0; 3075 ixgbe->param_lp_autoneg_cap = 0; 3076 ixgbe->param_lp_pause_cap = 0; 3077 ixgbe->param_lp_asym_pause_cap = 0; 3078 ixgbe->param_lp_rem_fault = 0; 3079 } 3080 3081 /* 3082 * ixgbe_get_prop - Get a property value out of the configuration file 3083 * ixgbe.conf. 3084 * 3085 * Caller provides the name of the property, a default value, a minimum 3086 * value, and a maximum value. 3087 * 3088 * Return configured value of the property, with default, minimum and 3089 * maximum properly applied. 3090 */ 3091 static int 3092 ixgbe_get_prop(ixgbe_t *ixgbe, 3093 char *propname, /* name of the property */ 3094 int minval, /* minimum acceptable value */ 3095 int maxval, /* maximim acceptable value */ 3096 int defval) /* default value */ 3097 { 3098 int value; 3099 3100 /* 3101 * Call ddi_prop_get_int() to read the conf settings 3102 */ 3103 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3104 DDI_PROP_DONTPASS, propname, defval); 3105 if (value > maxval) 3106 value = maxval; 3107 3108 if (value < minval) 3109 value = minval; 3110 3111 return (value); 3112 } 3113 3114 /* 3115 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3116 */ 3117 int 3118 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3119 { 3120 u32 autoneg_advertised = 0; 3121 3122 /* 3123 * No half duplex support with 10Gb parts 3124 */ 3125 if (ixgbe->param_adv_10000fdx_cap == 1) 3126 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3127 3128 if (ixgbe->param_adv_1000fdx_cap == 1) 3129 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3130 3131 if (ixgbe->param_adv_100fdx_cap == 1) 3132 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 3133 3134 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) { 3135 ixgbe_notice(ixgbe, "Invalid link settings. Setup link " 3136 "to autonegotiation with full link capabilities."); 3137 3138 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL | 3139 IXGBE_LINK_SPEED_1GB_FULL | 3140 IXGBE_LINK_SPEED_100_FULL; 3141 } 3142 3143 if (setup_hw) { 3144 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised, 3145 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) { 3146 ixgbe_notice(ixgbe, "Setup link failed on this " 3147 "device."); 3148 return (IXGBE_FAILURE); 3149 } 3150 } 3151 3152 return (IXGBE_SUCCESS); 3153 } 3154 3155 /* 3156 * ixgbe_driver_link_check - Link status processing. 3157 * 3158 * This function can be called in both kernel context and interrupt context 3159 */ 3160 static void 3161 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3162 { 3163 struct ixgbe_hw *hw = &ixgbe->hw; 3164 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3165 boolean_t link_up = B_FALSE; 3166 boolean_t link_changed = B_FALSE; 3167 3168 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3169 3170 (void) ixgbe_check_link(hw, &speed, &link_up, false); 3171 if (link_up) { 3172 ixgbe->link_check_complete = B_TRUE; 3173 3174 /* Link is up, enable flow control settings */ 3175 (void) ixgbe_fc_enable(hw, 0); 3176 3177 /* 3178 * The Link is up, check whether it was marked as down earlier 3179 */ 3180 if (ixgbe->link_state != LINK_STATE_UP) { 3181 switch (speed) { 3182 case IXGBE_LINK_SPEED_10GB_FULL: 3183 ixgbe->link_speed = SPEED_10GB; 3184 break; 3185 case IXGBE_LINK_SPEED_1GB_FULL: 3186 ixgbe->link_speed = SPEED_1GB; 3187 break; 3188 case IXGBE_LINK_SPEED_100_FULL: 3189 ixgbe->link_speed = SPEED_100; 3190 } 3191 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3192 ixgbe->link_state = LINK_STATE_UP; 3193 link_changed = B_TRUE; 3194 } 3195 } else { 3196 if (ixgbe->link_check_complete == B_TRUE || 3197 (ixgbe->link_check_complete == B_FALSE && 3198 gethrtime() >= ixgbe->link_check_hrtime)) { 3199 /* 3200 * The link is really down 3201 */ 3202 ixgbe->link_check_complete = B_TRUE; 3203 3204 if (ixgbe->link_state != LINK_STATE_DOWN) { 3205 ixgbe->link_speed = 0; 3206 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3207 ixgbe->link_state = LINK_STATE_DOWN; 3208 link_changed = B_TRUE; 3209 } 3210 } 3211 } 3212 3213 /* 3214 * this is only reached after a link-status-change interrupt 3215 * so always get new phy state 3216 */ 3217 ixgbe_get_hw_state(ixgbe); 3218 3219 /* 3220 * If we are in an interrupt context, need to re-enable the 3221 * interrupt, which was automasked 3222 */ 3223 if (servicing_interrupt() != 0) { 3224 ixgbe->eims |= IXGBE_EICR_LSC; 3225 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3226 } 3227 3228 if (link_changed) { 3229 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3230 } 3231 } 3232 3233 /* 3234 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3235 */ 3236 static void 3237 ixgbe_sfp_check(void *arg) 3238 { 3239 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3240 uint32_t eicr = ixgbe->eicr; 3241 struct ixgbe_hw *hw = &ixgbe->hw; 3242 3243 mutex_enter(&ixgbe->gen_lock); 3244 if (eicr & IXGBE_EICR_GPI_SDP1) { 3245 /* clear the interrupt */ 3246 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 3247 3248 /* if link up, do multispeed fiber setup */ 3249 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3250 B_TRUE, B_TRUE); 3251 ixgbe_driver_link_check(ixgbe); 3252 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 3253 /* clear the interrupt */ 3254 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); 3255 3256 /* if link up, do sfp module setup */ 3257 (void) hw->mac.ops.setup_sfp(hw); 3258 3259 /* do multispeed fiber setup */ 3260 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3261 B_TRUE, B_TRUE); 3262 ixgbe_driver_link_check(ixgbe); 3263 } 3264 mutex_exit(&ixgbe->gen_lock); 3265 } 3266 3267 /* 3268 * ixgbe_link_timer - timer for link status detection 3269 */ 3270 static void 3271 ixgbe_link_timer(void *arg) 3272 { 3273 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3274 3275 mutex_enter(&ixgbe->gen_lock); 3276 ixgbe_driver_link_check(ixgbe); 3277 mutex_exit(&ixgbe->gen_lock); 3278 } 3279 3280 /* 3281 * ixgbe_local_timer - Driver watchdog function. 3282 * 3283 * This function will handle the transmit stall check and other routines. 3284 */ 3285 static void 3286 ixgbe_local_timer(void *arg) 3287 { 3288 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3289 3290 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3291 ixgbe->reset_count++; 3292 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3293 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3294 ixgbe_restart_watchdog_timer(ixgbe); 3295 return; 3296 } 3297 3298 if (ixgbe_stall_check(ixgbe)) { 3299 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3300 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3301 3302 ixgbe->reset_count++; 3303 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3304 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3305 } 3306 3307 ixgbe_restart_watchdog_timer(ixgbe); 3308 } 3309 3310 /* 3311 * ixgbe_stall_check - Check for transmit stall. 3312 * 3313 * This function checks if the adapter is stalled (in transmit). 3314 * 3315 * It is called each time the watchdog timeout is invoked. 3316 * If the transmit descriptor reclaim continuously fails, 3317 * the watchdog value will increment by 1. If the watchdog 3318 * value exceeds the threshold, the ixgbe is assumed to 3319 * have stalled and need to be reset. 3320 */ 3321 static boolean_t 3322 ixgbe_stall_check(ixgbe_t *ixgbe) 3323 { 3324 ixgbe_tx_ring_t *tx_ring; 3325 boolean_t result; 3326 int i; 3327 3328 if (ixgbe->link_state != LINK_STATE_UP) 3329 return (B_FALSE); 3330 3331 /* 3332 * If any tx ring is stalled, we'll reset the chipset 3333 */ 3334 result = B_FALSE; 3335 for (i = 0; i < ixgbe->num_tx_rings; i++) { 3336 tx_ring = &ixgbe->tx_rings[i]; 3337 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 3338 tx_ring->tx_recycle(tx_ring); 3339 } 3340 3341 if (tx_ring->recycle_fail > 0) 3342 tx_ring->stall_watchdog++; 3343 else 3344 tx_ring->stall_watchdog = 0; 3345 3346 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 3347 result = B_TRUE; 3348 break; 3349 } 3350 } 3351 3352 if (result) { 3353 tx_ring->stall_watchdog = 0; 3354 tx_ring->recycle_fail = 0; 3355 } 3356 3357 return (result); 3358 } 3359 3360 3361 /* 3362 * is_valid_mac_addr - Check if the mac address is valid. 3363 */ 3364 static boolean_t 3365 is_valid_mac_addr(uint8_t *mac_addr) 3366 { 3367 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 3368 const uint8_t addr_test2[6] = 3369 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3370 3371 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 3372 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 3373 return (B_FALSE); 3374 3375 return (B_TRUE); 3376 } 3377 3378 static boolean_t 3379 ixgbe_find_mac_address(ixgbe_t *ixgbe) 3380 { 3381 #ifdef __sparc 3382 struct ixgbe_hw *hw = &ixgbe->hw; 3383 uchar_t *bytes; 3384 struct ether_addr sysaddr; 3385 uint_t nelts; 3386 int err; 3387 boolean_t found = B_FALSE; 3388 3389 /* 3390 * The "vendor's factory-set address" may already have 3391 * been extracted from the chip, but if the property 3392 * "local-mac-address" is set we use that instead. 3393 * 3394 * We check whether it looks like an array of 6 3395 * bytes (which it should, if OBP set it). If we can't 3396 * make sense of it this way, we'll ignore it. 3397 */ 3398 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 3399 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 3400 if (err == DDI_PROP_SUCCESS) { 3401 if (nelts == ETHERADDRL) { 3402 while (nelts--) 3403 hw->mac.addr[nelts] = bytes[nelts]; 3404 found = B_TRUE; 3405 } 3406 ddi_prop_free(bytes); 3407 } 3408 3409 /* 3410 * Look up the OBP property "local-mac-address?". If the user has set 3411 * 'local-mac-address? = false', use "the system address" instead. 3412 */ 3413 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 3414 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 3415 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 3416 if (localetheraddr(NULL, &sysaddr) != 0) { 3417 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 3418 found = B_TRUE; 3419 } 3420 } 3421 ddi_prop_free(bytes); 3422 } 3423 3424 /* 3425 * Finally(!), if there's a valid "mac-address" property (created 3426 * if we netbooted from this interface), we must use this instead 3427 * of any of the above to ensure that the NFS/install server doesn't 3428 * get confused by the address changing as Solaris takes over! 3429 */ 3430 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 3431 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 3432 if (err == DDI_PROP_SUCCESS) { 3433 if (nelts == ETHERADDRL) { 3434 while (nelts--) 3435 hw->mac.addr[nelts] = bytes[nelts]; 3436 found = B_TRUE; 3437 } 3438 ddi_prop_free(bytes); 3439 } 3440 3441 if (found) { 3442 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 3443 return (B_TRUE); 3444 } 3445 #else 3446 _NOTE(ARGUNUSED(ixgbe)); 3447 #endif 3448 3449 return (B_TRUE); 3450 } 3451 3452 #pragma inline(ixgbe_arm_watchdog_timer) 3453 static void 3454 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 3455 { 3456 /* 3457 * Fire a watchdog timer 3458 */ 3459 ixgbe->watchdog_tid = 3460 timeout(ixgbe_local_timer, 3461 (void *)ixgbe, 1 * drv_usectohz(1000000)); 3462 3463 } 3464 3465 /* 3466 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 3467 */ 3468 void 3469 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 3470 { 3471 mutex_enter(&ixgbe->watchdog_lock); 3472 3473 if (!ixgbe->watchdog_enable) { 3474 ixgbe->watchdog_enable = B_TRUE; 3475 ixgbe->watchdog_start = B_TRUE; 3476 ixgbe_arm_watchdog_timer(ixgbe); 3477 } 3478 3479 mutex_exit(&ixgbe->watchdog_lock); 3480 } 3481 3482 /* 3483 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 3484 */ 3485 void 3486 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 3487 { 3488 timeout_id_t tid; 3489 3490 mutex_enter(&ixgbe->watchdog_lock); 3491 3492 ixgbe->watchdog_enable = B_FALSE; 3493 ixgbe->watchdog_start = B_FALSE; 3494 tid = ixgbe->watchdog_tid; 3495 ixgbe->watchdog_tid = 0; 3496 3497 mutex_exit(&ixgbe->watchdog_lock); 3498 3499 if (tid != 0) 3500 (void) untimeout(tid); 3501 } 3502 3503 /* 3504 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 3505 */ 3506 void 3507 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 3508 { 3509 mutex_enter(&ixgbe->watchdog_lock); 3510 3511 if (ixgbe->watchdog_enable) { 3512 if (!ixgbe->watchdog_start) { 3513 ixgbe->watchdog_start = B_TRUE; 3514 ixgbe_arm_watchdog_timer(ixgbe); 3515 } 3516 } 3517 3518 mutex_exit(&ixgbe->watchdog_lock); 3519 } 3520 3521 /* 3522 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 3523 */ 3524 static void 3525 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 3526 { 3527 mutex_enter(&ixgbe->watchdog_lock); 3528 3529 if (ixgbe->watchdog_start) 3530 ixgbe_arm_watchdog_timer(ixgbe); 3531 3532 mutex_exit(&ixgbe->watchdog_lock); 3533 } 3534 3535 /* 3536 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 3537 */ 3538 void 3539 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 3540 { 3541 timeout_id_t tid; 3542 3543 mutex_enter(&ixgbe->watchdog_lock); 3544 3545 ixgbe->watchdog_start = B_FALSE; 3546 tid = ixgbe->watchdog_tid; 3547 ixgbe->watchdog_tid = 0; 3548 3549 mutex_exit(&ixgbe->watchdog_lock); 3550 3551 if (tid != 0) 3552 (void) untimeout(tid); 3553 } 3554 3555 /* 3556 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 3557 */ 3558 static void 3559 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 3560 { 3561 struct ixgbe_hw *hw = &ixgbe->hw; 3562 3563 /* 3564 * mask all interrupts off 3565 */ 3566 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 3567 3568 /* 3569 * for MSI-X, also disable autoclear 3570 */ 3571 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 3572 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 3573 } 3574 3575 IXGBE_WRITE_FLUSH(hw); 3576 } 3577 3578 /* 3579 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 3580 */ 3581 static void 3582 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 3583 { 3584 struct ixgbe_hw *hw = &ixgbe->hw; 3585 uint32_t eiac, eiam; 3586 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 3587 3588 /* interrupt types to enable */ 3589 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 3590 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 3591 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 3592 3593 /* enable automask on "other" causes that this adapter can generate */ 3594 eiam = ixgbe->capab->other_intr; 3595 3596 /* 3597 * msi-x mode 3598 */ 3599 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 3600 /* enable autoclear but not on bits 29:20 */ 3601 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 3602 3603 /* general purpose interrupt enable */ 3604 gpie |= (IXGBE_GPIE_MSIX_MODE 3605 | IXGBE_GPIE_PBA_SUPPORT 3606 | IXGBE_GPIE_OCD 3607 | IXGBE_GPIE_EIAME); 3608 /* 3609 * non-msi-x mode 3610 */ 3611 } else { 3612 3613 /* disable autoclear, leave gpie at default */ 3614 eiac = 0; 3615 3616 /* 3617 * General purpose interrupt enable. 3618 * For 82599, extended interrupt automask enable 3619 * only in MSI or MSI-X mode 3620 */ 3621 if ((hw->mac.type < ixgbe_mac_82599EB) || 3622 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 3623 gpie |= IXGBE_GPIE_EIAME; 3624 } 3625 } 3626 /* Enable specific interrupts for 82599 */ 3627 if (hw->mac.type == ixgbe_mac_82599EB) { 3628 gpie |= IXGBE_SDP2_GPIEN; /* pluggable optics intr */ 3629 gpie |= IXGBE_SDP1_GPIEN; /* LSC interrupt */ 3630 } 3631 /* Enable RSC Dealy 8us for 82599 */ 3632 if (ixgbe->lro_enable) { 3633 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 3634 } 3635 /* write to interrupt control registers */ 3636 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3637 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 3638 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 3639 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 3640 IXGBE_WRITE_FLUSH(hw); 3641 } 3642 3643 /* 3644 * ixgbe_loopback_ioctl - Loopback support. 3645 */ 3646 enum ioc_reply 3647 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 3648 { 3649 lb_info_sz_t *lbsp; 3650 lb_property_t *lbpp; 3651 uint32_t *lbmp; 3652 uint32_t size; 3653 uint32_t value; 3654 3655 if (mp->b_cont == NULL) 3656 return (IOC_INVAL); 3657 3658 switch (iocp->ioc_cmd) { 3659 default: 3660 return (IOC_INVAL); 3661 3662 case LB_GET_INFO_SIZE: 3663 size = sizeof (lb_info_sz_t); 3664 if (iocp->ioc_count != size) 3665 return (IOC_INVAL); 3666 3667 value = sizeof (lb_normal); 3668 value += sizeof (lb_mac); 3669 value += sizeof (lb_external); 3670 3671 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 3672 *lbsp = value; 3673 break; 3674 3675 case LB_GET_INFO: 3676 value = sizeof (lb_normal); 3677 value += sizeof (lb_mac); 3678 value += sizeof (lb_external); 3679 3680 size = value; 3681 if (iocp->ioc_count != size) 3682 return (IOC_INVAL); 3683 3684 value = 0; 3685 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 3686 3687 lbpp[value++] = lb_normal; 3688 lbpp[value++] = lb_mac; 3689 lbpp[value++] = lb_external; 3690 break; 3691 3692 case LB_GET_MODE: 3693 size = sizeof (uint32_t); 3694 if (iocp->ioc_count != size) 3695 return (IOC_INVAL); 3696 3697 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3698 *lbmp = ixgbe->loopback_mode; 3699 break; 3700 3701 case LB_SET_MODE: 3702 size = 0; 3703 if (iocp->ioc_count != sizeof (uint32_t)) 3704 return (IOC_INVAL); 3705 3706 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 3707 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 3708 return (IOC_INVAL); 3709 break; 3710 } 3711 3712 iocp->ioc_count = size; 3713 iocp->ioc_error = 0; 3714 3715 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3716 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3717 return (IOC_INVAL); 3718 } 3719 3720 return (IOC_REPLY); 3721 } 3722 3723 /* 3724 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 3725 */ 3726 static boolean_t 3727 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 3728 { 3729 if (mode == ixgbe->loopback_mode) 3730 return (B_TRUE); 3731 3732 ixgbe->loopback_mode = mode; 3733 3734 if (mode == IXGBE_LB_NONE) { 3735 /* 3736 * Reset the chip 3737 */ 3738 (void) ixgbe_reset(ixgbe); 3739 return (B_TRUE); 3740 } 3741 3742 mutex_enter(&ixgbe->gen_lock); 3743 3744 switch (mode) { 3745 default: 3746 mutex_exit(&ixgbe->gen_lock); 3747 return (B_FALSE); 3748 3749 case IXGBE_LB_EXTERNAL: 3750 break; 3751 3752 case IXGBE_LB_INTERNAL_MAC: 3753 ixgbe_set_internal_mac_loopback(ixgbe); 3754 break; 3755 } 3756 3757 mutex_exit(&ixgbe->gen_lock); 3758 3759 return (B_TRUE); 3760 } 3761 3762 /* 3763 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 3764 */ 3765 static void 3766 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 3767 { 3768 struct ixgbe_hw *hw; 3769 uint32_t reg; 3770 uint8_t atlas; 3771 3772 hw = &ixgbe->hw; 3773 3774 /* 3775 * Setup MAC loopback 3776 */ 3777 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 3778 reg |= IXGBE_HLREG0_LPBK; 3779 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 3780 3781 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 3782 reg &= ~IXGBE_AUTOC_LMS_MASK; 3783 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 3784 3785 /* 3786 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 3787 */ 3788 if (hw->mac.type == ixgbe_mac_82598EB) { 3789 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 3790 &atlas); 3791 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 3792 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 3793 atlas); 3794 3795 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 3796 &atlas); 3797 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 3798 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 3799 atlas); 3800 3801 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 3802 &atlas); 3803 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 3804 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 3805 atlas); 3806 3807 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 3808 &atlas); 3809 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 3810 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 3811 atlas); 3812 } 3813 } 3814 3815 #pragma inline(ixgbe_intr_rx_work) 3816 /* 3817 * ixgbe_intr_rx_work - RX processing of ISR. 3818 */ 3819 static void 3820 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 3821 { 3822 mblk_t *mp; 3823 3824 mutex_enter(&rx_ring->rx_lock); 3825 3826 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 3827 mutex_exit(&rx_ring->rx_lock); 3828 3829 if (mp != NULL) 3830 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 3831 rx_ring->ring_gen_num); 3832 } 3833 3834 #pragma inline(ixgbe_intr_tx_work) 3835 /* 3836 * ixgbe_intr_tx_work - TX processing of ISR. 3837 */ 3838 static void 3839 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 3840 { 3841 ixgbe_t *ixgbe = tx_ring->ixgbe; 3842 3843 /* 3844 * Recycle the tx descriptors 3845 */ 3846 tx_ring->tx_recycle(tx_ring); 3847 3848 /* 3849 * Schedule the re-transmit 3850 */ 3851 if (tx_ring->reschedule && 3852 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 3853 tx_ring->reschedule = B_FALSE; 3854 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 3855 tx_ring->ring_handle); 3856 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 3857 } 3858 } 3859 3860 #pragma inline(ixgbe_intr_other_work) 3861 /* 3862 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 3863 */ 3864 static void 3865 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 3866 { 3867 struct ixgbe_hw *hw = &ixgbe->hw; 3868 3869 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3870 3871 /* 3872 * handle link status change 3873 */ 3874 if (eicr & IXGBE_EICR_LSC) { 3875 ixgbe_driver_link_check(ixgbe); 3876 } 3877 3878 /* 3879 * check for fan failure on adapters with fans 3880 */ 3881 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 3882 (eicr & IXGBE_EICR_GPI_SDP1)) { 3883 if (hw->mac.type < ixgbe_mac_82599EB) { 3884 ixgbe_log(ixgbe, 3885 "Fan has stopped, replace the adapter\n"); 3886 3887 /* re-enable the interrupt, which was automasked */ 3888 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 3889 } 3890 } 3891 3892 /* 3893 * Do SFP check for 82599 3894 */ 3895 if (hw->mac.type == ixgbe_mac_82599EB) { 3896 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 3897 ixgbe_sfp_check, (void *)ixgbe, 3898 DDI_NOSLEEP)) != DDI_SUCCESS) { 3899 ixgbe_log(ixgbe, "No memory available to dispatch " 3900 "taskq for SFP check"); 3901 } 3902 3903 /* 3904 * We need to fully re-check the link later. 3905 */ 3906 ixgbe->link_check_complete = B_FALSE; 3907 ixgbe->link_check_hrtime = gethrtime() + 3908 (IXGBE_LINK_UP_TIME * 100000000ULL); 3909 } 3910 } 3911 3912 /* 3913 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 3914 */ 3915 static uint_t 3916 ixgbe_intr_legacy(void *arg1, void *arg2) 3917 { 3918 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 3919 struct ixgbe_hw *hw = &ixgbe->hw; 3920 ixgbe_tx_ring_t *tx_ring; 3921 ixgbe_rx_ring_t *rx_ring; 3922 uint32_t eicr; 3923 mblk_t *mp; 3924 boolean_t tx_reschedule; 3925 uint_t result; 3926 3927 _NOTE(ARGUNUSED(arg2)); 3928 3929 mutex_enter(&ixgbe->gen_lock); 3930 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 3931 mutex_exit(&ixgbe->gen_lock); 3932 return (DDI_INTR_UNCLAIMED); 3933 } 3934 3935 mp = NULL; 3936 tx_reschedule = B_FALSE; 3937 3938 /* 3939 * Any bit set in eicr: claim this interrupt 3940 */ 3941 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 3942 3943 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3944 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3945 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 3946 return (DDI_INTR_CLAIMED); 3947 } 3948 3949 if (eicr) { 3950 /* 3951 * For legacy interrupt, we have only one interrupt, 3952 * so we have only one rx ring and one tx ring enabled. 3953 */ 3954 ASSERT(ixgbe->num_rx_rings == 1); 3955 ASSERT(ixgbe->num_tx_rings == 1); 3956 3957 /* 3958 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 3959 */ 3960 if (eicr & 0x1) { 3961 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 3962 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 3963 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 3964 /* 3965 * Clean the rx descriptors 3966 */ 3967 rx_ring = &ixgbe->rx_rings[0]; 3968 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 3969 } 3970 3971 /* 3972 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 3973 */ 3974 if (eicr & 0x2) { 3975 /* 3976 * Recycle the tx descriptors 3977 */ 3978 tx_ring = &ixgbe->tx_rings[0]; 3979 tx_ring->tx_recycle(tx_ring); 3980 3981 /* 3982 * Schedule the re-transmit 3983 */ 3984 tx_reschedule = (tx_ring->reschedule && 3985 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 3986 } 3987 3988 /* any interrupt type other than tx/rx */ 3989 if (eicr & ixgbe->capab->other_intr) { 3990 if (hw->mac.type < ixgbe_mac_82599EB) { 3991 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 3992 } 3993 if (hw->mac.type == ixgbe_mac_82599EB) { 3994 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 3995 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 3996 } 3997 ixgbe_intr_other_work(ixgbe, eicr); 3998 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 3999 } 4000 4001 mutex_exit(&ixgbe->gen_lock); 4002 4003 result = DDI_INTR_CLAIMED; 4004 } else { 4005 mutex_exit(&ixgbe->gen_lock); 4006 4007 /* 4008 * No interrupt cause bits set: don't claim this interrupt. 4009 */ 4010 result = DDI_INTR_UNCLAIMED; 4011 } 4012 4013 /* re-enable the interrupts which were automasked */ 4014 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4015 4016 /* 4017 * Do the following work outside of the gen_lock 4018 */ 4019 if (mp != NULL) { 4020 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4021 rx_ring->ring_gen_num); 4022 } 4023 4024 if (tx_reschedule) { 4025 tx_ring->reschedule = B_FALSE; 4026 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4027 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule); 4028 } 4029 4030 return (result); 4031 } 4032 4033 /* 4034 * ixgbe_intr_msi - Interrupt handler for MSI. 4035 */ 4036 static uint_t 4037 ixgbe_intr_msi(void *arg1, void *arg2) 4038 { 4039 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4040 struct ixgbe_hw *hw = &ixgbe->hw; 4041 uint32_t eicr; 4042 4043 _NOTE(ARGUNUSED(arg2)); 4044 4045 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4046 4047 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4048 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4049 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4050 return (DDI_INTR_CLAIMED); 4051 } 4052 4053 /* 4054 * For MSI interrupt, we have only one vector, 4055 * so we have only one rx ring and one tx ring enabled. 4056 */ 4057 ASSERT(ixgbe->num_rx_rings == 1); 4058 ASSERT(ixgbe->num_tx_rings == 1); 4059 4060 /* 4061 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4062 */ 4063 if (eicr & 0x1) { 4064 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4065 } 4066 4067 /* 4068 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4069 */ 4070 if (eicr & 0x2) { 4071 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4072 } 4073 4074 /* any interrupt type other than tx/rx */ 4075 if (eicr & ixgbe->capab->other_intr) { 4076 mutex_enter(&ixgbe->gen_lock); 4077 if (hw->mac.type < ixgbe_mac_82599EB) { 4078 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4079 } 4080 if (hw->mac.type == ixgbe_mac_82599EB) { 4081 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4082 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4083 } 4084 ixgbe_intr_other_work(ixgbe, eicr); 4085 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4086 mutex_exit(&ixgbe->gen_lock); 4087 } 4088 4089 /* re-enable the interrupts which were automasked */ 4090 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4091 4092 return (DDI_INTR_CLAIMED); 4093 } 4094 4095 /* 4096 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4097 */ 4098 static uint_t 4099 ixgbe_intr_msix(void *arg1, void *arg2) 4100 { 4101 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4102 ixgbe_t *ixgbe = vect->ixgbe; 4103 struct ixgbe_hw *hw = &ixgbe->hw; 4104 uint32_t eicr; 4105 int r_idx = 0; 4106 4107 _NOTE(ARGUNUSED(arg2)); 4108 4109 /* 4110 * Clean each rx ring that has its bit set in the map 4111 */ 4112 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4113 while (r_idx >= 0) { 4114 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4115 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4116 (ixgbe->num_rx_rings - 1)); 4117 } 4118 4119 /* 4120 * Clean each tx ring that has its bit set in the map 4121 */ 4122 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4123 while (r_idx >= 0) { 4124 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4125 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4126 (ixgbe->num_tx_rings - 1)); 4127 } 4128 4129 4130 /* 4131 * Clean other interrupt (link change) that has its bit set in the map 4132 */ 4133 if (BT_TEST(vect->other_map, 0) == 1) { 4134 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4135 4136 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4137 DDI_FM_OK) { 4138 ddi_fm_service_impact(ixgbe->dip, 4139 DDI_SERVICE_DEGRADED); 4140 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4141 return (DDI_INTR_CLAIMED); 4142 } 4143 4144 /* 4145 * Need check cause bits and only other causes will 4146 * be processed 4147 */ 4148 /* any interrupt type other than tx/rx */ 4149 if (eicr & ixgbe->capab->other_intr) { 4150 if (hw->mac.type < ixgbe_mac_82599EB) { 4151 mutex_enter(&ixgbe->gen_lock); 4152 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4153 ixgbe_intr_other_work(ixgbe, eicr); 4154 mutex_exit(&ixgbe->gen_lock); 4155 } else { 4156 if (hw->mac.type == ixgbe_mac_82599EB) { 4157 mutex_enter(&ixgbe->gen_lock); 4158 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4159 ixgbe_intr_other_work(ixgbe, eicr); 4160 mutex_exit(&ixgbe->gen_lock); 4161 } 4162 } 4163 } 4164 4165 /* re-enable the interrupts which were automasked */ 4166 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4167 } 4168 4169 return (DDI_INTR_CLAIMED); 4170 } 4171 4172 /* 4173 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4174 * 4175 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4176 * if not successful, try Legacy. 4177 * ixgbe->intr_force can be used to force sequence to start with 4178 * any of the 3 types. 4179 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4180 */ 4181 static int 4182 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4183 { 4184 dev_info_t *devinfo; 4185 int intr_types; 4186 int rc; 4187 4188 devinfo = ixgbe->dip; 4189 4190 /* 4191 * Get supported interrupt types 4192 */ 4193 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4194 4195 if (rc != DDI_SUCCESS) { 4196 ixgbe_log(ixgbe, 4197 "Get supported interrupt types failed: %d", rc); 4198 return (IXGBE_FAILURE); 4199 } 4200 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4201 4202 ixgbe->intr_type = 0; 4203 4204 /* 4205 * Install MSI-X interrupts 4206 */ 4207 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4208 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4209 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4210 if (rc == IXGBE_SUCCESS) 4211 return (IXGBE_SUCCESS); 4212 4213 ixgbe_log(ixgbe, 4214 "Allocate MSI-X failed, trying MSI interrupts..."); 4215 } 4216 4217 /* 4218 * MSI-X not used, force rings and groups to 1 4219 */ 4220 ixgbe->num_rx_rings = 1; 4221 ixgbe->num_rx_groups = 1; 4222 ixgbe->num_tx_rings = 1; 4223 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4224 ixgbe_log(ixgbe, 4225 "MSI-X not used, force rings and groups number to 1"); 4226 4227 /* 4228 * Install MSI interrupts 4229 */ 4230 if ((intr_types & DDI_INTR_TYPE_MSI) && 4231 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 4232 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 4233 if (rc == IXGBE_SUCCESS) 4234 return (IXGBE_SUCCESS); 4235 4236 ixgbe_log(ixgbe, 4237 "Allocate MSI failed, trying Legacy interrupts..."); 4238 } 4239 4240 /* 4241 * Install legacy interrupts 4242 */ 4243 if (intr_types & DDI_INTR_TYPE_FIXED) { 4244 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 4245 if (rc == IXGBE_SUCCESS) 4246 return (IXGBE_SUCCESS); 4247 4248 ixgbe_log(ixgbe, 4249 "Allocate Legacy interrupts failed"); 4250 } 4251 4252 /* 4253 * If none of the 3 types succeeded, return failure 4254 */ 4255 return (IXGBE_FAILURE); 4256 } 4257 4258 /* 4259 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 4260 * 4261 * For legacy and MSI, only 1 handle is needed. For MSI-X, 4262 * if fewer than 2 handles are available, return failure. 4263 * Upon success, this maps the vectors to rx and tx rings for 4264 * interrupts. 4265 */ 4266 static int 4267 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 4268 { 4269 dev_info_t *devinfo; 4270 int request, count, actual; 4271 int minimum; 4272 int rc; 4273 uint32_t ring_per_group; 4274 4275 devinfo = ixgbe->dip; 4276 4277 switch (intr_type) { 4278 case DDI_INTR_TYPE_FIXED: 4279 request = 1; /* Request 1 legacy interrupt handle */ 4280 minimum = 1; 4281 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 4282 break; 4283 4284 case DDI_INTR_TYPE_MSI: 4285 request = 1; /* Request 1 MSI interrupt handle */ 4286 minimum = 1; 4287 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 4288 break; 4289 4290 case DDI_INTR_TYPE_MSIX: 4291 /* 4292 * Best number of vectors for the adapter is 4293 * (# rx rings + # tx rings), however we will 4294 * limit the request number. 4295 */ 4296 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 4297 if (request > ixgbe->capab->max_ring_vect) 4298 request = ixgbe->capab->max_ring_vect; 4299 minimum = 1; 4300 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 4301 break; 4302 4303 default: 4304 ixgbe_log(ixgbe, 4305 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 4306 intr_type); 4307 return (IXGBE_FAILURE); 4308 } 4309 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 4310 request, minimum); 4311 4312 /* 4313 * Get number of supported interrupts 4314 */ 4315 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 4316 if ((rc != DDI_SUCCESS) || (count < minimum)) { 4317 ixgbe_log(ixgbe, 4318 "Get interrupt number failed. Return: %d, count: %d", 4319 rc, count); 4320 return (IXGBE_FAILURE); 4321 } 4322 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 4323 4324 actual = 0; 4325 ixgbe->intr_cnt = 0; 4326 ixgbe->intr_cnt_max = 0; 4327 ixgbe->intr_cnt_min = 0; 4328 4329 /* 4330 * Allocate an array of interrupt handles 4331 */ 4332 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 4333 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 4334 4335 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 4336 request, &actual, DDI_INTR_ALLOC_NORMAL); 4337 if (rc != DDI_SUCCESS) { 4338 ixgbe_log(ixgbe, "Allocate interrupts failed. " 4339 "return: %d, request: %d, actual: %d", 4340 rc, request, actual); 4341 goto alloc_handle_fail; 4342 } 4343 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 4344 4345 /* 4346 * upper/lower limit of interrupts 4347 */ 4348 ixgbe->intr_cnt = actual; 4349 ixgbe->intr_cnt_max = request; 4350 ixgbe->intr_cnt_min = minimum; 4351 4352 /* 4353 * rss number per group should not exceed the rx interrupt number, 4354 * else need to adjust rx ring number. 4355 */ 4356 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 4357 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 4358 if (min(actual, ixgbe->num_rx_rings) < ring_per_group) { 4359 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 4360 min(actual, ixgbe->num_rx_rings); 4361 ixgbe_setup_vmdq_rss_conf(ixgbe); 4362 } 4363 4364 /* 4365 * Now we know the actual number of vectors. Here we map the vector 4366 * to other, rx rings and tx ring. 4367 */ 4368 if (actual < minimum) { 4369 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 4370 actual); 4371 goto alloc_handle_fail; 4372 } 4373 4374 /* 4375 * Get priority for first vector, assume remaining are all the same 4376 */ 4377 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 4378 if (rc != DDI_SUCCESS) { 4379 ixgbe_log(ixgbe, 4380 "Get interrupt priority failed: %d", rc); 4381 goto alloc_handle_fail; 4382 } 4383 4384 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 4385 if (rc != DDI_SUCCESS) { 4386 ixgbe_log(ixgbe, 4387 "Get interrupt cap failed: %d", rc); 4388 goto alloc_handle_fail; 4389 } 4390 4391 ixgbe->intr_type = intr_type; 4392 4393 return (IXGBE_SUCCESS); 4394 4395 alloc_handle_fail: 4396 ixgbe_rem_intrs(ixgbe); 4397 4398 return (IXGBE_FAILURE); 4399 } 4400 4401 /* 4402 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 4403 * 4404 * Before adding the interrupt handlers, the interrupt vectors have 4405 * been allocated, and the rx/tx rings have also been allocated. 4406 */ 4407 static int 4408 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 4409 { 4410 int vector = 0; 4411 int rc; 4412 4413 switch (ixgbe->intr_type) { 4414 case DDI_INTR_TYPE_MSIX: 4415 /* 4416 * Add interrupt handler for all vectors 4417 */ 4418 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 4419 /* 4420 * install pointer to vect_map[vector] 4421 */ 4422 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4423 (ddi_intr_handler_t *)ixgbe_intr_msix, 4424 (void *)&ixgbe->vect_map[vector], NULL); 4425 4426 if (rc != DDI_SUCCESS) { 4427 ixgbe_log(ixgbe, 4428 "Add rx interrupt handler failed. " 4429 "return: %d, vector: %d", rc, vector); 4430 for (vector--; vector >= 0; vector--) { 4431 (void) ddi_intr_remove_handler( 4432 ixgbe->htable[vector]); 4433 } 4434 return (IXGBE_FAILURE); 4435 } 4436 } 4437 4438 break; 4439 4440 case DDI_INTR_TYPE_MSI: 4441 /* 4442 * Add interrupt handlers for the only vector 4443 */ 4444 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4445 (ddi_intr_handler_t *)ixgbe_intr_msi, 4446 (void *)ixgbe, NULL); 4447 4448 if (rc != DDI_SUCCESS) { 4449 ixgbe_log(ixgbe, 4450 "Add MSI interrupt handler failed: %d", rc); 4451 return (IXGBE_FAILURE); 4452 } 4453 4454 break; 4455 4456 case DDI_INTR_TYPE_FIXED: 4457 /* 4458 * Add interrupt handlers for the only vector 4459 */ 4460 rc = ddi_intr_add_handler(ixgbe->htable[vector], 4461 (ddi_intr_handler_t *)ixgbe_intr_legacy, 4462 (void *)ixgbe, NULL); 4463 4464 if (rc != DDI_SUCCESS) { 4465 ixgbe_log(ixgbe, 4466 "Add legacy interrupt handler failed: %d", rc); 4467 return (IXGBE_FAILURE); 4468 } 4469 4470 break; 4471 4472 default: 4473 return (IXGBE_FAILURE); 4474 } 4475 4476 return (IXGBE_SUCCESS); 4477 } 4478 4479 #pragma inline(ixgbe_map_rxring_to_vector) 4480 /* 4481 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 4482 */ 4483 static void 4484 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 4485 { 4486 /* 4487 * Set bit in map 4488 */ 4489 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 4490 4491 /* 4492 * Count bits set 4493 */ 4494 ixgbe->vect_map[v_idx].rxr_cnt++; 4495 4496 /* 4497 * Remember bit position 4498 */ 4499 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 4500 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 4501 } 4502 4503 #pragma inline(ixgbe_map_txring_to_vector) 4504 /* 4505 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 4506 */ 4507 static void 4508 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 4509 { 4510 /* 4511 * Set bit in map 4512 */ 4513 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 4514 4515 /* 4516 * Count bits set 4517 */ 4518 ixgbe->vect_map[v_idx].txr_cnt++; 4519 4520 /* 4521 * Remember bit position 4522 */ 4523 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 4524 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 4525 } 4526 4527 /* 4528 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 4529 * allocation register (IVAR). 4530 * cause: 4531 * -1 : other cause 4532 * 0 : rx 4533 * 1 : tx 4534 */ 4535 static void 4536 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 4537 int8_t cause) 4538 { 4539 struct ixgbe_hw *hw = &ixgbe->hw; 4540 u32 ivar, index; 4541 4542 switch (hw->mac.type) { 4543 case ixgbe_mac_82598EB: 4544 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4545 if (cause == -1) { 4546 cause = 0; 4547 } 4548 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4549 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4550 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 4551 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 4552 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4553 break; 4554 case ixgbe_mac_82599EB: 4555 if (cause == -1) { 4556 /* other causes */ 4557 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4558 index = (intr_alloc_entry & 1) * 8; 4559 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4560 ivar &= ~(0xFF << index); 4561 ivar |= (msix_vector << index); 4562 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4563 } else { 4564 /* tx or rx causes */ 4565 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 4566 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4567 ivar = IXGBE_READ_REG(hw, 4568 IXGBE_IVAR(intr_alloc_entry >> 1)); 4569 ivar &= ~(0xFF << index); 4570 ivar |= (msix_vector << index); 4571 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4572 ivar); 4573 } 4574 break; 4575 default: 4576 break; 4577 } 4578 } 4579 4580 /* 4581 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 4582 * given interrupt vector allocation register (IVAR). 4583 * cause: 4584 * -1 : other cause 4585 * 0 : rx 4586 * 1 : tx 4587 */ 4588 static void 4589 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 4590 { 4591 struct ixgbe_hw *hw = &ixgbe->hw; 4592 u32 ivar, index; 4593 4594 switch (hw->mac.type) { 4595 case ixgbe_mac_82598EB: 4596 if (cause == -1) { 4597 cause = 0; 4598 } 4599 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4600 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4601 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 4602 (intr_alloc_entry & 0x3))); 4603 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4604 break; 4605 case ixgbe_mac_82599EB: 4606 if (cause == -1) { 4607 /* other causes */ 4608 index = (intr_alloc_entry & 1) * 8; 4609 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4610 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 4611 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4612 } else { 4613 /* tx or rx causes */ 4614 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4615 ivar = IXGBE_READ_REG(hw, 4616 IXGBE_IVAR(intr_alloc_entry >> 1)); 4617 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 4618 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4619 ivar); 4620 } 4621 break; 4622 default: 4623 break; 4624 } 4625 } 4626 4627 /* 4628 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 4629 * given interrupt vector allocation register (IVAR). 4630 * cause: 4631 * -1 : other cause 4632 * 0 : rx 4633 * 1 : tx 4634 */ 4635 static void 4636 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 4637 { 4638 struct ixgbe_hw *hw = &ixgbe->hw; 4639 u32 ivar, index; 4640 4641 switch (hw->mac.type) { 4642 case ixgbe_mac_82598EB: 4643 if (cause == -1) { 4644 cause = 0; 4645 } 4646 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 4647 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 4648 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 4649 (intr_alloc_entry & 0x3))); 4650 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 4651 break; 4652 case ixgbe_mac_82599EB: 4653 if (cause == -1) { 4654 /* other causes */ 4655 index = (intr_alloc_entry & 1) * 8; 4656 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 4657 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 4658 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 4659 } else { 4660 /* tx or rx causes */ 4661 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 4662 ivar = IXGBE_READ_REG(hw, 4663 IXGBE_IVAR(intr_alloc_entry >> 1)); 4664 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 4665 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 4666 ivar); 4667 } 4668 break; 4669 default: 4670 break; 4671 } 4672 } 4673 4674 /* 4675 * Convert the rx ring index driver maintained to the rx ring index 4676 * in h/w. 4677 */ 4678 static uint32_t 4679 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 4680 { 4681 4682 struct ixgbe_hw *hw = &ixgbe->hw; 4683 uint32_t rx_ring_per_group, hw_rx_index; 4684 4685 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 4686 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 4687 return (sw_rx_index); 4688 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 4689 if (hw->mac.type == ixgbe_mac_82598EB) { 4690 return (sw_rx_index); 4691 } else if (hw->mac.type == ixgbe_mac_82599EB) { 4692 return (sw_rx_index * 2); 4693 } 4694 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 4695 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 4696 4697 if (hw->mac.type == ixgbe_mac_82598EB) { 4698 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 4699 16 + (sw_rx_index % rx_ring_per_group); 4700 return (hw_rx_index); 4701 } else if (hw->mac.type == ixgbe_mac_82599EB) { 4702 if (ixgbe->num_rx_groups > 32) { 4703 hw_rx_index = (sw_rx_index / 4704 rx_ring_per_group) * 2 + 4705 (sw_rx_index % rx_ring_per_group); 4706 } else { 4707 hw_rx_index = (sw_rx_index / 4708 rx_ring_per_group) * 4 + 4709 (sw_rx_index % rx_ring_per_group); 4710 } 4711 return (hw_rx_index); 4712 } 4713 } 4714 4715 /* 4716 * Should never reach. Just to make compiler happy. 4717 */ 4718 return (sw_rx_index); 4719 } 4720 4721 /* 4722 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 4723 * 4724 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 4725 * to vector[0 - (intr_cnt -1)]. 4726 */ 4727 static int 4728 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 4729 { 4730 int i, vector = 0; 4731 4732 /* initialize vector map */ 4733 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 4734 for (i = 0; i < ixgbe->intr_cnt; i++) { 4735 ixgbe->vect_map[i].ixgbe = ixgbe; 4736 } 4737 4738 /* 4739 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 4740 * tx rings[0] on RTxQ[1]. 4741 */ 4742 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 4743 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 4744 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 4745 return (IXGBE_SUCCESS); 4746 } 4747 4748 /* 4749 * Interrupts/vectors mapping for MSI-X 4750 */ 4751 4752 /* 4753 * Map other interrupt to vector 0, 4754 * Set bit in map and count the bits set. 4755 */ 4756 BT_SET(ixgbe->vect_map[vector].other_map, 0); 4757 ixgbe->vect_map[vector].other_cnt++; 4758 4759 /* 4760 * Map rx ring interrupts to vectors 4761 */ 4762 for (i = 0; i < ixgbe->num_rx_rings; i++) { 4763 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 4764 vector = (vector +1) % ixgbe->intr_cnt; 4765 } 4766 4767 /* 4768 * Map tx ring interrupts to vectors 4769 */ 4770 for (i = 0; i < ixgbe->num_tx_rings; i++) { 4771 ixgbe_map_txring_to_vector(ixgbe, i, vector); 4772 vector = (vector +1) % ixgbe->intr_cnt; 4773 } 4774 4775 return (IXGBE_SUCCESS); 4776 } 4777 4778 /* 4779 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 4780 * 4781 * This relies on ring/vector mapping already set up in the 4782 * vect_map[] structures 4783 */ 4784 static void 4785 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 4786 { 4787 struct ixgbe_hw *hw = &ixgbe->hw; 4788 ixgbe_intr_vector_t *vect; /* vector bitmap */ 4789 int r_idx; /* ring index */ 4790 int v_idx; /* vector index */ 4791 uint32_t hw_index; 4792 4793 /* 4794 * Clear any previous entries 4795 */ 4796 switch (hw->mac.type) { 4797 case ixgbe_mac_82598EB: 4798 for (v_idx = 0; v_idx < 25; v_idx++) 4799 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 4800 4801 break; 4802 case ixgbe_mac_82599EB: 4803 for (v_idx = 0; v_idx < 64; v_idx++) 4804 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 4805 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 4806 4807 break; 4808 default: 4809 break; 4810 } 4811 4812 /* 4813 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 4814 * tx rings[0] will use RTxQ[1]. 4815 */ 4816 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 4817 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 4818 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 4819 return; 4820 } 4821 4822 /* 4823 * For MSI-X interrupt, "Other" is always on vector[0]. 4824 */ 4825 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 4826 4827 /* 4828 * For each interrupt vector, populate the IVAR table 4829 */ 4830 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 4831 vect = &ixgbe->vect_map[v_idx]; 4832 4833 /* 4834 * For each rx ring bit set 4835 */ 4836 r_idx = bt_getlowbit(vect->rx_map, 0, 4837 (ixgbe->num_rx_rings - 1)); 4838 4839 while (r_idx >= 0) { 4840 hw_index = ixgbe->rx_rings[r_idx].hw_index; 4841 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 4842 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4843 (ixgbe->num_rx_rings - 1)); 4844 } 4845 4846 /* 4847 * For each tx ring bit set 4848 */ 4849 r_idx = bt_getlowbit(vect->tx_map, 0, 4850 (ixgbe->num_tx_rings - 1)); 4851 4852 while (r_idx >= 0) { 4853 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 4854 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4855 (ixgbe->num_tx_rings - 1)); 4856 } 4857 } 4858 } 4859 4860 /* 4861 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 4862 */ 4863 static void 4864 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 4865 { 4866 int i; 4867 int rc; 4868 4869 for (i = 0; i < ixgbe->intr_cnt; i++) { 4870 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 4871 if (rc != DDI_SUCCESS) { 4872 IXGBE_DEBUGLOG_1(ixgbe, 4873 "Remove intr handler failed: %d", rc); 4874 } 4875 } 4876 } 4877 4878 /* 4879 * ixgbe_rem_intrs - Remove the allocated interrupts. 4880 */ 4881 static void 4882 ixgbe_rem_intrs(ixgbe_t *ixgbe) 4883 { 4884 int i; 4885 int rc; 4886 4887 for (i = 0; i < ixgbe->intr_cnt; i++) { 4888 rc = ddi_intr_free(ixgbe->htable[i]); 4889 if (rc != DDI_SUCCESS) { 4890 IXGBE_DEBUGLOG_1(ixgbe, 4891 "Free intr failed: %d", rc); 4892 } 4893 } 4894 4895 kmem_free(ixgbe->htable, ixgbe->intr_size); 4896 ixgbe->htable = NULL; 4897 } 4898 4899 /* 4900 * ixgbe_enable_intrs - Enable all the ddi interrupts. 4901 */ 4902 static int 4903 ixgbe_enable_intrs(ixgbe_t *ixgbe) 4904 { 4905 int i; 4906 int rc; 4907 4908 /* 4909 * Enable interrupts 4910 */ 4911 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 4912 /* 4913 * Call ddi_intr_block_enable() for MSI 4914 */ 4915 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 4916 if (rc != DDI_SUCCESS) { 4917 ixgbe_log(ixgbe, 4918 "Enable block intr failed: %d", rc); 4919 return (IXGBE_FAILURE); 4920 } 4921 } else { 4922 /* 4923 * Call ddi_intr_enable() for Legacy/MSI non block enable 4924 */ 4925 for (i = 0; i < ixgbe->intr_cnt; i++) { 4926 rc = ddi_intr_enable(ixgbe->htable[i]); 4927 if (rc != DDI_SUCCESS) { 4928 ixgbe_log(ixgbe, 4929 "Enable intr failed: %d", rc); 4930 return (IXGBE_FAILURE); 4931 } 4932 } 4933 } 4934 4935 return (IXGBE_SUCCESS); 4936 } 4937 4938 /* 4939 * ixgbe_disable_intrs - Disable all the interrupts. 4940 */ 4941 static int 4942 ixgbe_disable_intrs(ixgbe_t *ixgbe) 4943 { 4944 int i; 4945 int rc; 4946 4947 /* 4948 * Disable all interrupts 4949 */ 4950 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 4951 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 4952 if (rc != DDI_SUCCESS) { 4953 ixgbe_log(ixgbe, 4954 "Disable block intr failed: %d", rc); 4955 return (IXGBE_FAILURE); 4956 } 4957 } else { 4958 for (i = 0; i < ixgbe->intr_cnt; i++) { 4959 rc = ddi_intr_disable(ixgbe->htable[i]); 4960 if (rc != DDI_SUCCESS) { 4961 ixgbe_log(ixgbe, 4962 "Disable intr failed: %d", rc); 4963 return (IXGBE_FAILURE); 4964 } 4965 } 4966 } 4967 4968 return (IXGBE_SUCCESS); 4969 } 4970 4971 /* 4972 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 4973 */ 4974 static void 4975 ixgbe_get_hw_state(ixgbe_t *ixgbe) 4976 { 4977 struct ixgbe_hw *hw = &ixgbe->hw; 4978 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 4979 boolean_t link_up = B_FALSE; 4980 uint32_t pcs1g_anlp = 0; 4981 uint32_t pcs1g_ana = 0; 4982 4983 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4984 ixgbe->param_lp_1000fdx_cap = 0; 4985 ixgbe->param_lp_100fdx_cap = 0; 4986 4987 /* check for link, don't wait */ 4988 (void) ixgbe_check_link(hw, &speed, &link_up, false); 4989 if (link_up) { 4990 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 4991 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 4992 4993 ixgbe->param_lp_1000fdx_cap = 4994 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 4995 ixgbe->param_lp_100fdx_cap = 4996 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 4997 } 4998 4999 ixgbe->param_adv_1000fdx_cap = 5000 (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 5001 ixgbe->param_adv_100fdx_cap = (pcs1g_ana & IXGBE_PCS1GANA_FDC) ? 1 : 0; 5002 } 5003 5004 /* 5005 * ixgbe_get_driver_control - Notify that driver is in control of device. 5006 */ 5007 static void 5008 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5009 { 5010 uint32_t ctrl_ext; 5011 5012 /* 5013 * Notify firmware that driver is in control of device 5014 */ 5015 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5016 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5017 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5018 } 5019 5020 /* 5021 * ixgbe_release_driver_control - Notify that driver is no longer in control 5022 * of device. 5023 */ 5024 static void 5025 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5026 { 5027 uint32_t ctrl_ext; 5028 5029 /* 5030 * Notify firmware that driver is no longer in control of device 5031 */ 5032 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5033 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5034 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5035 } 5036 5037 /* 5038 * ixgbe_atomic_reserve - Atomic decrease operation. 5039 */ 5040 int 5041 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5042 { 5043 uint32_t oldval; 5044 uint32_t newval; 5045 5046 /* 5047 * ATOMICALLY 5048 */ 5049 do { 5050 oldval = *count_p; 5051 if (oldval < n) 5052 return (-1); 5053 newval = oldval - n; 5054 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5055 5056 return (newval); 5057 } 5058 5059 /* 5060 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5061 */ 5062 static uint8_t * 5063 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5064 { 5065 uint8_t *addr = *upd_ptr; 5066 uint8_t *new_ptr; 5067 5068 _NOTE(ARGUNUSED(hw)); 5069 _NOTE(ARGUNUSED(vmdq)); 5070 5071 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5072 *upd_ptr = new_ptr; 5073 return (addr); 5074 } 5075 5076 /* 5077 * FMA support 5078 */ 5079 int 5080 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5081 { 5082 ddi_fm_error_t de; 5083 5084 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5085 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5086 return (de.fme_status); 5087 } 5088 5089 int 5090 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5091 { 5092 ddi_fm_error_t de; 5093 5094 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5095 return (de.fme_status); 5096 } 5097 5098 /* 5099 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5100 */ 5101 static int 5102 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5103 { 5104 _NOTE(ARGUNUSED(impl_data)); 5105 /* 5106 * as the driver can always deal with an error in any dma or 5107 * access handle, we can just return the fme_status value. 5108 */ 5109 pci_ereport_post(dip, err, NULL); 5110 return (err->fme_status); 5111 } 5112 5113 static void 5114 ixgbe_fm_init(ixgbe_t *ixgbe) 5115 { 5116 ddi_iblock_cookie_t iblk; 5117 int fma_dma_flag; 5118 5119 /* 5120 * Only register with IO Fault Services if we have some capability 5121 */ 5122 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5123 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5124 } else { 5125 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5126 } 5127 5128 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5129 fma_dma_flag = 1; 5130 } else { 5131 fma_dma_flag = 0; 5132 } 5133 5134 ixgbe_set_fma_flags(fma_dma_flag); 5135 5136 if (ixgbe->fm_capabilities) { 5137 5138 /* 5139 * Register capabilities with IO Fault Services 5140 */ 5141 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5142 5143 /* 5144 * Initialize pci ereport capabilities if ereport capable 5145 */ 5146 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5147 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5148 pci_ereport_setup(ixgbe->dip); 5149 5150 /* 5151 * Register error callback if error callback capable 5152 */ 5153 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5154 ddi_fm_handler_register(ixgbe->dip, 5155 ixgbe_fm_error_cb, (void*) ixgbe); 5156 } 5157 } 5158 5159 static void 5160 ixgbe_fm_fini(ixgbe_t *ixgbe) 5161 { 5162 /* 5163 * Only unregister FMA capabilities if they are registered 5164 */ 5165 if (ixgbe->fm_capabilities) { 5166 5167 /* 5168 * Release any resources allocated by pci_ereport_setup() 5169 */ 5170 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5171 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5172 pci_ereport_teardown(ixgbe->dip); 5173 5174 /* 5175 * Un-register error callback if error callback capable 5176 */ 5177 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5178 ddi_fm_handler_unregister(ixgbe->dip); 5179 5180 /* 5181 * Unregister from IO Fault Service 5182 */ 5183 ddi_fm_fini(ixgbe->dip); 5184 } 5185 } 5186 5187 void 5188 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 5189 { 5190 uint64_t ena; 5191 char buf[FM_MAX_CLASS]; 5192 5193 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 5194 ena = fm_ena_generate(0, FM_ENA_FMT1); 5195 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 5196 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 5197 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 5198 } 5199 } 5200 5201 static int 5202 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 5203 { 5204 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 5205 5206 mutex_enter(&rx_ring->rx_lock); 5207 rx_ring->ring_gen_num = mr_gen_num; 5208 mutex_exit(&rx_ring->rx_lock); 5209 return (0); 5210 } 5211 5212 /* 5213 * Get the global ring index by a ring index within a group. 5214 */ 5215 static int 5216 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 5217 { 5218 ixgbe_rx_ring_t *rx_ring; 5219 int i; 5220 5221 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5222 rx_ring = &ixgbe->rx_rings[i]; 5223 if (rx_ring->group_index == gindex) 5224 rindex--; 5225 if (rindex < 0) 5226 return (i); 5227 } 5228 5229 return (-1); 5230 } 5231 5232 /* 5233 * Callback funtion for MAC layer to register all rings. 5234 */ 5235 /* ARGSUSED */ 5236 void 5237 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 5238 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 5239 { 5240 ixgbe_t *ixgbe = (ixgbe_t *)arg; 5241 mac_intr_t *mintr = &infop->mri_intr; 5242 5243 switch (rtype) { 5244 case MAC_RING_TYPE_RX: { 5245 /* 5246 * 'index' is the ring index within the group. 5247 * Need to get the global ring index by searching in groups. 5248 */ 5249 int global_ring_index = ixgbe_get_rx_ring_index( 5250 ixgbe, group_index, ring_index); 5251 5252 ASSERT(global_ring_index >= 0); 5253 5254 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 5255 rx_ring->ring_handle = rh; 5256 5257 infop->mri_driver = (mac_ring_driver_t)rx_ring; 5258 infop->mri_start = ixgbe_ring_start; 5259 infop->mri_stop = NULL; 5260 infop->mri_poll = ixgbe_ring_rx_poll; 5261 infop->mri_stat = ixgbe_rx_ring_stat; 5262 5263 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 5264 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 5265 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 5266 if (ixgbe->intr_type & 5267 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 5268 mintr->mi_ddi_handle = 5269 ixgbe->htable[rx_ring->intr_vector]; 5270 } 5271 5272 break; 5273 } 5274 case MAC_RING_TYPE_TX: { 5275 ASSERT(group_index == -1); 5276 ASSERT(ring_index < ixgbe->num_tx_rings); 5277 5278 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 5279 tx_ring->ring_handle = rh; 5280 5281 infop->mri_driver = (mac_ring_driver_t)tx_ring; 5282 infop->mri_start = NULL; 5283 infop->mri_stop = NULL; 5284 infop->mri_tx = ixgbe_ring_tx; 5285 infop->mri_stat = ixgbe_tx_ring_stat; 5286 if (ixgbe->intr_type & 5287 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 5288 mintr->mi_ddi_handle = 5289 ixgbe->htable[tx_ring->intr_vector]; 5290 } 5291 break; 5292 } 5293 default: 5294 break; 5295 } 5296 } 5297 5298 /* 5299 * Callback funtion for MAC layer to register all groups. 5300 */ 5301 void 5302 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 5303 mac_group_info_t *infop, mac_group_handle_t gh) 5304 { 5305 ixgbe_t *ixgbe = (ixgbe_t *)arg; 5306 5307 switch (rtype) { 5308 case MAC_RING_TYPE_RX: { 5309 ixgbe_rx_group_t *rx_group; 5310 5311 rx_group = &ixgbe->rx_groups[index]; 5312 rx_group->group_handle = gh; 5313 5314 infop->mgi_driver = (mac_group_driver_t)rx_group; 5315 infop->mgi_start = NULL; 5316 infop->mgi_stop = NULL; 5317 infop->mgi_addmac = ixgbe_addmac; 5318 infop->mgi_remmac = ixgbe_remmac; 5319 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 5320 5321 break; 5322 } 5323 case MAC_RING_TYPE_TX: 5324 break; 5325 default: 5326 break; 5327 } 5328 } 5329 5330 /* 5331 * Enable interrupt on the specificed rx ring. 5332 */ 5333 int 5334 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 5335 { 5336 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 5337 ixgbe_t *ixgbe = rx_ring->ixgbe; 5338 int r_idx = rx_ring->index; 5339 int hw_r_idx = rx_ring->hw_index; 5340 int v_idx = rx_ring->intr_vector; 5341 5342 mutex_enter(&ixgbe->gen_lock); 5343 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 5344 mutex_exit(&ixgbe->gen_lock); 5345 /* 5346 * Simply return 0. 5347 * Interrupts are being adjusted. ixgbe_intr_adjust() 5348 * will eventually re-enable the interrupt when it's 5349 * done with the adjustment. 5350 */ 5351 return (0); 5352 } 5353 5354 /* 5355 * To enable interrupt by setting the VAL bit of given interrupt 5356 * vector allocation register (IVAR). 5357 */ 5358 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 5359 5360 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5361 5362 /* 5363 * To trigger a Rx interrupt to on this ring 5364 */ 5365 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 5366 IXGBE_WRITE_FLUSH(&ixgbe->hw); 5367 5368 mutex_exit(&ixgbe->gen_lock); 5369 5370 return (0); 5371 } 5372 5373 /* 5374 * Disable interrupt on the specificed rx ring. 5375 */ 5376 int 5377 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 5378 { 5379 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 5380 ixgbe_t *ixgbe = rx_ring->ixgbe; 5381 int r_idx = rx_ring->index; 5382 int hw_r_idx = rx_ring->hw_index; 5383 int v_idx = rx_ring->intr_vector; 5384 5385 mutex_enter(&ixgbe->gen_lock); 5386 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 5387 mutex_exit(&ixgbe->gen_lock); 5388 /* 5389 * Simply return 0. 5390 * In the rare case where an interrupt is being 5391 * disabled while interrupts are being adjusted, 5392 * we don't fail the operation. No interrupts will 5393 * be generated while they are adjusted, and 5394 * ixgbe_intr_adjust() will cause the interrupts 5395 * to be re-enabled once it completes. Note that 5396 * in this case, packets may be delivered to the 5397 * stack via interrupts before xgbe_rx_ring_intr_enable() 5398 * is called again. This is acceptable since interrupt 5399 * adjustment is infrequent, and the stack will be 5400 * able to handle these packets. 5401 */ 5402 return (0); 5403 } 5404 5405 /* 5406 * To disable interrupt by clearing the VAL bit of given interrupt 5407 * vector allocation register (IVAR). 5408 */ 5409 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 5410 5411 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 5412 5413 mutex_exit(&ixgbe->gen_lock); 5414 5415 return (0); 5416 } 5417 5418 /* 5419 * Add a mac address. 5420 */ 5421 static int 5422 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 5423 { 5424 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 5425 ixgbe_t *ixgbe = rx_group->ixgbe; 5426 struct ixgbe_hw *hw = &ixgbe->hw; 5427 int slot, i; 5428 5429 mutex_enter(&ixgbe->gen_lock); 5430 5431 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 5432 mutex_exit(&ixgbe->gen_lock); 5433 return (ECANCELED); 5434 } 5435 5436 if (ixgbe->unicst_avail == 0) { 5437 /* no slots available */ 5438 mutex_exit(&ixgbe->gen_lock); 5439 return (ENOSPC); 5440 } 5441 5442 /* 5443 * The first ixgbe->num_rx_groups slots are reserved for each respective 5444 * group. The rest slots are shared by all groups. While adding a 5445 * MAC address, reserved slots are firstly checked then the shared 5446 * slots are searched. 5447 */ 5448 slot = -1; 5449 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 5450 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 5451 if (ixgbe->unicst_addr[i].mac.set == 0) { 5452 slot = i; 5453 break; 5454 } 5455 } 5456 } else { 5457 slot = rx_group->index; 5458 } 5459 5460 if (slot == -1) { 5461 /* no slots available */ 5462 mutex_exit(&ixgbe->gen_lock); 5463 return (ENOSPC); 5464 } 5465 5466 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 5467 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 5468 rx_group->index, IXGBE_RAH_AV); 5469 ixgbe->unicst_addr[slot].mac.set = 1; 5470 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 5471 ixgbe->unicst_avail--; 5472 5473 mutex_exit(&ixgbe->gen_lock); 5474 5475 return (0); 5476 } 5477 5478 /* 5479 * Remove a mac address. 5480 */ 5481 static int 5482 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 5483 { 5484 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 5485 ixgbe_t *ixgbe = rx_group->ixgbe; 5486 struct ixgbe_hw *hw = &ixgbe->hw; 5487 int slot; 5488 5489 mutex_enter(&ixgbe->gen_lock); 5490 5491 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 5492 mutex_exit(&ixgbe->gen_lock); 5493 return (ECANCELED); 5494 } 5495 5496 slot = ixgbe_unicst_find(ixgbe, mac_addr); 5497 if (slot == -1) { 5498 mutex_exit(&ixgbe->gen_lock); 5499 return (EINVAL); 5500 } 5501 5502 if (ixgbe->unicst_addr[slot].mac.set == 0) { 5503 mutex_exit(&ixgbe->gen_lock); 5504 return (EINVAL); 5505 } 5506 5507 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 5508 (void) ixgbe_clear_rar(hw, slot); 5509 ixgbe->unicst_addr[slot].mac.set = 0; 5510 ixgbe->unicst_avail++; 5511 5512 mutex_exit(&ixgbe->gen_lock); 5513 5514 return (0); 5515 } 5516