1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 28 * Copyright (c) 2017, Joyent, Inc. 29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved. 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved. 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved. 33 */ 34 35 #include "ixgbe_sw.h" 36 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet"; 38 39 /* 40 * Local function protoypes 41 */ 42 static int ixgbe_register_mac(ixgbe_t *); 43 static int ixgbe_identify_hardware(ixgbe_t *); 44 static int ixgbe_regs_map(ixgbe_t *); 45 static void ixgbe_init_properties(ixgbe_t *); 46 static int ixgbe_init_driver_settings(ixgbe_t *); 47 static void ixgbe_init_locks(ixgbe_t *); 48 static void ixgbe_destroy_locks(ixgbe_t *); 49 static int ixgbe_init(ixgbe_t *); 50 static int ixgbe_chip_start(ixgbe_t *); 51 static void ixgbe_chip_stop(ixgbe_t *); 52 static int ixgbe_reset(ixgbe_t *); 53 static void ixgbe_tx_clean(ixgbe_t *); 54 static boolean_t ixgbe_tx_drain(ixgbe_t *); 55 static boolean_t ixgbe_rx_drain(ixgbe_t *); 56 static int ixgbe_alloc_rings(ixgbe_t *); 57 static void ixgbe_free_rings(ixgbe_t *); 58 static int ixgbe_alloc_rx_data(ixgbe_t *); 59 static void ixgbe_free_rx_data(ixgbe_t *); 60 static void ixgbe_setup_rings(ixgbe_t *); 61 static void ixgbe_setup_rx(ixgbe_t *); 62 static void ixgbe_setup_tx(ixgbe_t *); 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *); 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *); 65 static void ixgbe_setup_rss(ixgbe_t *); 66 static void ixgbe_setup_vmdq(ixgbe_t *); 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *); 68 static void ixgbe_setup_rss_table(ixgbe_t *); 69 static void ixgbe_init_unicst(ixgbe_t *); 70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *); 71 static void ixgbe_setup_multicst(ixgbe_t *); 72 static void ixgbe_get_hw_state(ixgbe_t *); 73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe); 74 static void ixgbe_get_conf(ixgbe_t *); 75 static void ixgbe_init_params(ixgbe_t *); 76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int); 77 static void ixgbe_driver_link_check(ixgbe_t *); 78 static void ixgbe_sfp_check(void *); 79 static void ixgbe_overtemp_check(void *); 80 static void ixgbe_phy_check(void *); 81 static void ixgbe_link_timer(void *); 82 static void ixgbe_local_timer(void *); 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *); 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *); 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *); 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *); 87 static boolean_t is_valid_mac_addr(uint8_t *); 88 static boolean_t ixgbe_stall_check(ixgbe_t *); 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t); 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *); 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *); 92 static int ixgbe_alloc_intrs(ixgbe_t *); 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int); 94 static int ixgbe_add_intr_handlers(ixgbe_t *); 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int); 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int); 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t); 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t); 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t); 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index); 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *); 102 static void ixgbe_setup_adapter_vector(ixgbe_t *); 103 static void ixgbe_rem_intr_handlers(ixgbe_t *); 104 static void ixgbe_rem_intrs(ixgbe_t *); 105 static int ixgbe_enable_intrs(ixgbe_t *); 106 static int ixgbe_disable_intrs(ixgbe_t *); 107 static uint_t ixgbe_intr_legacy(void *, void *); 108 static uint_t ixgbe_intr_msi(void *, void *); 109 static uint_t ixgbe_intr_msix(void *, void *); 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *); 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *); 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t); 113 static void ixgbe_get_driver_control(struct ixgbe_hw *); 114 static int ixgbe_addmac(void *, const uint8_t *); 115 static int ixgbe_remmac(void *, const uint8_t *); 116 static void ixgbe_release_driver_control(struct ixgbe_hw *); 117 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t); 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t); 120 static int ixgbe_resume(dev_info_t *); 121 static int ixgbe_suspend(dev_info_t *); 122 static int ixgbe_quiesce(dev_info_t *); 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *); 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *); 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *); 126 static int ixgbe_intr_cb_register(ixgbe_t *); 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int); 128 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, 130 const void *impl_data); 131 static void ixgbe_fm_init(ixgbe_t *); 132 static void ixgbe_fm_fini(ixgbe_t *); 133 134 char *ixgbe_priv_props[] = { 135 "_tx_copy_thresh", 136 "_tx_recycle_thresh", 137 "_tx_overload_thresh", 138 "_tx_resched_thresh", 139 "_rx_copy_thresh", 140 "_rx_limit_per_intr", 141 "_intr_throttling", 142 "_adv_pause_cap", 143 "_adv_asym_pause_cap", 144 NULL 145 }; 146 147 #define IXGBE_MAX_PRIV_PROPS \ 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t)) 149 150 static struct cb_ops ixgbe_cb_ops = { 151 nulldev, /* cb_open */ 152 nulldev, /* cb_close */ 153 nodev, /* cb_strategy */ 154 nodev, /* cb_print */ 155 nodev, /* cb_dump */ 156 nodev, /* cb_read */ 157 nodev, /* cb_write */ 158 nodev, /* cb_ioctl */ 159 nodev, /* cb_devmap */ 160 nodev, /* cb_mmap */ 161 nodev, /* cb_segmap */ 162 nochpoll, /* cb_chpoll */ 163 ddi_prop_op, /* cb_prop_op */ 164 NULL, /* cb_stream */ 165 D_MP | D_HOTPLUG, /* cb_flag */ 166 CB_REV, /* cb_rev */ 167 nodev, /* cb_aread */ 168 nodev /* cb_awrite */ 169 }; 170 171 static struct dev_ops ixgbe_dev_ops = { 172 DEVO_REV, /* devo_rev */ 173 0, /* devo_refcnt */ 174 NULL, /* devo_getinfo */ 175 nulldev, /* devo_identify */ 176 nulldev, /* devo_probe */ 177 ixgbe_attach, /* devo_attach */ 178 ixgbe_detach, /* devo_detach */ 179 nodev, /* devo_reset */ 180 &ixgbe_cb_ops, /* devo_cb_ops */ 181 NULL, /* devo_bus_ops */ 182 ddi_power, /* devo_power */ 183 ixgbe_quiesce, /* devo_quiesce */ 184 }; 185 186 static struct modldrv ixgbe_modldrv = { 187 &mod_driverops, /* Type of module. This one is a driver */ 188 ixgbe_ident, /* Discription string */ 189 &ixgbe_dev_ops /* driver ops */ 190 }; 191 192 static struct modlinkage ixgbe_modlinkage = { 193 MODREV_1, &ixgbe_modldrv, NULL 194 }; 195 196 /* 197 * Access attributes for register mapping 198 */ 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = { 200 DDI_DEVICE_ATTR_V1, 201 DDI_STRUCTURE_LE_ACC, 202 DDI_STRICTORDER_ACC, 203 DDI_FLAGERR_ACC 204 }; 205 206 /* 207 * Loopback property 208 */ 209 static lb_property_t lb_normal = { 210 normal, "normal", IXGBE_LB_NONE 211 }; 212 213 static lb_property_t lb_mac = { 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC 215 }; 216 217 static lb_property_t lb_external = { 218 external, "External", IXGBE_LB_EXTERNAL 219 }; 220 221 #define IXGBE_M_CALLBACK_FLAGS \ 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO) 223 224 static mac_callbacks_t ixgbe_m_callbacks = { 225 IXGBE_M_CALLBACK_FLAGS, 226 ixgbe_m_stat, 227 ixgbe_m_start, 228 ixgbe_m_stop, 229 ixgbe_m_promisc, 230 ixgbe_m_multicst, 231 NULL, 232 NULL, 233 NULL, 234 ixgbe_m_ioctl, 235 ixgbe_m_getcapab, 236 NULL, 237 NULL, 238 ixgbe_m_setprop, 239 ixgbe_m_getprop, 240 ixgbe_m_propinfo 241 }; 242 243 /* 244 * Initialize capabilities of each supported adapter type 245 */ 246 static adapter_info_t ixgbe_82598eb_cap = { 247 64, /* maximum number of rx queues */ 248 1, /* minimum number of rx queues */ 249 64, /* default number of rx queues */ 250 16, /* maximum number of rx groups */ 251 1, /* minimum number of rx groups */ 252 1, /* default number of rx groups */ 253 32, /* maximum number of tx queues */ 254 1, /* minimum number of tx queues */ 255 8, /* default number of tx queues */ 256 16366, /* maximum MTU size */ 257 0xFFFF, /* maximum interrupt throttle rate */ 258 0, /* minimum interrupt throttle rate */ 259 200, /* default interrupt throttle rate */ 260 18, /* maximum total msix vectors */ 261 16, /* maximum number of ring vectors */ 262 2, /* maximum number of other vectors */ 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 264 0, /* "other" interrupt types enable mask */ 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */ 266 | IXGBE_FLAG_RSS_CAPABLE 267 | IXGBE_FLAG_VMDQ_CAPABLE) 268 }; 269 270 static adapter_info_t ixgbe_82599eb_cap = { 271 128, /* maximum number of rx queues */ 272 1, /* minimum number of rx queues */ 273 128, /* default number of rx queues */ 274 64, /* maximum number of rx groups */ 275 1, /* minimum number of rx groups */ 276 1, /* default number of rx groups */ 277 128, /* maximum number of tx queues */ 278 1, /* minimum number of tx queues */ 279 8, /* default number of tx queues */ 280 15500, /* maximum MTU size */ 281 0xFF8, /* maximum interrupt throttle rate */ 282 0, /* minimum interrupt throttle rate */ 283 200, /* default interrupt throttle rate */ 284 64, /* maximum total msix vectors */ 285 16, /* maximum number of ring vectors */ 286 2, /* maximum number of other vectors */ 287 (IXGBE_EICR_LSC 288 | IXGBE_EICR_GPI_SDP1 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */ 290 291 (IXGBE_SDP1_GPIEN 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */ 293 294 (IXGBE_FLAG_DCA_CAPABLE 295 | IXGBE_FLAG_RSS_CAPABLE 296 | IXGBE_FLAG_VMDQ_CAPABLE 297 | IXGBE_FLAG_RSC_CAPABLE 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */ 299 }; 300 301 static adapter_info_t ixgbe_X540_cap = { 302 128, /* maximum number of rx queues */ 303 1, /* minimum number of rx queues */ 304 128, /* default number of rx queues */ 305 64, /* maximum number of rx groups */ 306 1, /* minimum number of rx groups */ 307 1, /* default number of rx groups */ 308 128, /* maximum number of tx queues */ 309 1, /* minimum number of tx queues */ 310 8, /* default number of tx queues */ 311 15500, /* maximum MTU size */ 312 0xFF8, /* maximum interrupt throttle rate */ 313 0, /* minimum interrupt throttle rate */ 314 200, /* default interrupt throttle rate */ 315 64, /* maximum total msix vectors */ 316 16, /* maximum number of ring vectors */ 317 2, /* maximum number of other vectors */ 318 (IXGBE_EICR_LSC 319 | IXGBE_EICR_GPI_SDP1_X540 320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */ 321 322 (IXGBE_SDP1_GPIEN_X540 323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */ 324 325 (IXGBE_FLAG_DCA_CAPABLE 326 | IXGBE_FLAG_RSS_CAPABLE 327 | IXGBE_FLAG_VMDQ_CAPABLE 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 329 }; 330 331 static adapter_info_t ixgbe_X550_cap = { 332 128, /* maximum number of rx queues */ 333 1, /* minimum number of rx queues */ 334 128, /* default number of rx queues */ 335 64, /* maximum number of rx groups */ 336 1, /* minimum number of rx groups */ 337 1, /* default number of rx groups */ 338 128, /* maximum number of tx queues */ 339 1, /* minimum number of tx queues */ 340 8, /* default number of tx queues */ 341 15500, /* maximum MTU size */ 342 0xFF8, /* maximum interrupt throttle rate */ 343 0, /* minimum interrupt throttle rate */ 344 0x200, /* default interrupt throttle rate */ 345 64, /* maximum total msix vectors */ 346 16, /* maximum number of ring vectors */ 347 2, /* maximum number of other vectors */ 348 IXGBE_EICR_LSC, /* "other" interrupt types handled */ 349 0, /* "other" interrupt types enable mask */ 350 (IXGBE_FLAG_RSS_CAPABLE 351 | IXGBE_FLAG_VMDQ_CAPABLE 352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */ 353 }; 354 355 /* 356 * Module Initialization Functions. 357 */ 358 359 int 360 _init(void) 361 { 362 int status; 363 364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME); 365 366 status = mod_install(&ixgbe_modlinkage); 367 368 if (status != DDI_SUCCESS) { 369 mac_fini_ops(&ixgbe_dev_ops); 370 } 371 372 return (status); 373 } 374 375 int 376 _fini(void) 377 { 378 int status; 379 380 status = mod_remove(&ixgbe_modlinkage); 381 382 if (status == DDI_SUCCESS) { 383 mac_fini_ops(&ixgbe_dev_ops); 384 } 385 386 return (status); 387 } 388 389 int 390 _info(struct modinfo *modinfop) 391 { 392 int status; 393 394 status = mod_info(&ixgbe_modlinkage, modinfop); 395 396 return (status); 397 } 398 399 /* 400 * ixgbe_attach - Driver attach. 401 * 402 * This function is the device specific initialization entry 403 * point. This entry point is required and must be written. 404 * The DDI_ATTACH command must be provided in the attach entry 405 * point. When attach() is called with cmd set to DDI_ATTACH, 406 * all normal kernel services (such as kmem_alloc(9F)) are 407 * available for use by the driver. 408 * 409 * The attach() function will be called once for each instance 410 * of the device on the system with cmd set to DDI_ATTACH. 411 * Until attach() succeeds, the only driver entry points which 412 * may be called are open(9E) and getinfo(9E). 413 */ 414 static int 415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 416 { 417 ixgbe_t *ixgbe; 418 struct ixgbe_osdep *osdep; 419 struct ixgbe_hw *hw; 420 int instance; 421 char taskqname[32]; 422 423 /* 424 * Check the command and perform corresponding operations 425 */ 426 switch (cmd) { 427 default: 428 return (DDI_FAILURE); 429 430 case DDI_RESUME: 431 return (ixgbe_resume(devinfo)); 432 433 case DDI_ATTACH: 434 break; 435 } 436 437 /* Get the device instance */ 438 instance = ddi_get_instance(devinfo); 439 440 /* Allocate memory for the instance data structure */ 441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP); 442 443 ixgbe->dip = devinfo; 444 ixgbe->instance = instance; 445 446 hw = &ixgbe->hw; 447 osdep = &ixgbe->osdep; 448 hw->back = osdep; 449 osdep->ixgbe = ixgbe; 450 451 /* Attach the instance pointer to the dev_info data structure */ 452 ddi_set_driver_private(devinfo, ixgbe); 453 454 /* 455 * Initialize for FMA support 456 */ 457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE, 458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 460 ixgbe_fm_init(ixgbe); 461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT; 462 463 /* 464 * Map PCI config space registers 465 */ 466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) { 467 ixgbe_error(ixgbe, "Failed to map PCI configurations"); 468 goto attach_fail; 469 } 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG; 471 472 /* 473 * Identify the chipset family 474 */ 475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) { 476 ixgbe_error(ixgbe, "Failed to identify hardware"); 477 goto attach_fail; 478 } 479 480 /* 481 * Map device registers 482 */ 483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) { 484 ixgbe_error(ixgbe, "Failed to map device registers"); 485 goto attach_fail; 486 } 487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP; 488 489 /* 490 * Initialize driver parameters 491 */ 492 ixgbe_init_properties(ixgbe); 493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS; 494 495 /* 496 * Register interrupt callback 497 */ 498 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) { 499 ixgbe_error(ixgbe, "Failed to register interrupt callback"); 500 goto attach_fail; 501 } 502 503 /* 504 * Allocate interrupts 505 */ 506 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) { 507 ixgbe_error(ixgbe, "Failed to allocate interrupts"); 508 goto attach_fail; 509 } 510 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 511 512 /* 513 * Allocate rx/tx rings based on the ring numbers. 514 * The actual numbers of rx/tx rings are decided by the number of 515 * allocated interrupt vectors, so we should allocate the rings after 516 * interrupts are allocated. 517 */ 518 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) { 519 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings"); 520 goto attach_fail; 521 } 522 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS; 523 524 /* 525 * Map rings to interrupt vectors 526 */ 527 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 528 ixgbe_error(ixgbe, "Failed to map interrupts to vectors"); 529 goto attach_fail; 530 } 531 532 /* 533 * Add interrupt handlers 534 */ 535 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 536 ixgbe_error(ixgbe, "Failed to add interrupt handlers"); 537 goto attach_fail; 538 } 539 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 540 541 /* 542 * Create a taskq for sfp-change 543 */ 544 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance); 545 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname, 546 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 547 ixgbe_error(ixgbe, "sfp_taskq create failed"); 548 goto attach_fail; 549 } 550 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ; 551 552 /* 553 * Create a taskq for over-temp 554 */ 555 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance); 556 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname, 557 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 558 ixgbe_error(ixgbe, "overtemp_taskq create failed"); 559 goto attach_fail; 560 } 561 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ; 562 563 /* 564 * Create a taskq for processing external PHY interrupts 565 */ 566 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance); 567 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname, 568 1, TASKQ_DEFAULTPRI, 0)) == NULL) { 569 ixgbe_error(ixgbe, "phy_taskq create failed"); 570 goto attach_fail; 571 } 572 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ; 573 574 /* 575 * Initialize driver parameters 576 */ 577 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) { 578 ixgbe_error(ixgbe, "Failed to initialize driver settings"); 579 goto attach_fail; 580 } 581 582 /* 583 * Initialize mutexes for this device. 584 * Do this before enabling the interrupt handler and 585 * register the softint to avoid the condition where 586 * interrupt handler can try using uninitialized mutex. 587 */ 588 ixgbe_init_locks(ixgbe); 589 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS; 590 591 /* 592 * Initialize chipset hardware 593 */ 594 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) { 595 ixgbe_error(ixgbe, "Failed to initialize adapter"); 596 goto attach_fail; 597 } 598 ixgbe->link_check_complete = B_FALSE; 599 ixgbe->link_check_hrtime = gethrtime() + 600 (IXGBE_LINK_UP_TIME * 100000000ULL); 601 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT; 602 603 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) { 604 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 605 goto attach_fail; 606 } 607 608 /* 609 * Initialize adapter capabilities 610 */ 611 ixgbe_init_params(ixgbe); 612 613 /* 614 * Initialize statistics 615 */ 616 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) { 617 ixgbe_error(ixgbe, "Failed to initialize statistics"); 618 goto attach_fail; 619 } 620 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS; 621 622 /* 623 * Register the driver to the MAC 624 */ 625 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) { 626 ixgbe_error(ixgbe, "Failed to register MAC"); 627 goto attach_fail; 628 } 629 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 630 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC; 631 632 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe, 633 IXGBE_CYCLIC_PERIOD, DDI_IPL_0); 634 if (ixgbe->periodic_id == 0) { 635 ixgbe_error(ixgbe, "Failed to add the link check timer"); 636 goto attach_fail; 637 } 638 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER; 639 640 /* 641 * Now that mutex locks are initialized, and the chip is also 642 * initialized, enable interrupts. 643 */ 644 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 645 ixgbe_error(ixgbe, "Failed to enable DDI interrupts"); 646 goto attach_fail; 647 } 648 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 649 650 ixgbe_log(ixgbe, "%s", ixgbe_ident); 651 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED); 652 653 return (DDI_SUCCESS); 654 655 attach_fail: 656 ixgbe_unconfigure(devinfo, ixgbe); 657 return (DDI_FAILURE); 658 } 659 660 /* 661 * ixgbe_detach - Driver detach. 662 * 663 * The detach() function is the complement of the attach routine. 664 * If cmd is set to DDI_DETACH, detach() is used to remove the 665 * state associated with a given instance of a device node 666 * prior to the removal of that instance from the system. 667 * 668 * The detach() function will be called once for each instance 669 * of the device for which there has been a successful attach() 670 * once there are no longer any opens on the device. 671 * 672 * Interrupts routine are disabled, All memory allocated by this 673 * driver are freed. 674 */ 675 static int 676 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 677 { 678 ixgbe_t *ixgbe; 679 680 /* 681 * Check detach command 682 */ 683 switch (cmd) { 684 default: 685 return (DDI_FAILURE); 686 687 case DDI_SUSPEND: 688 return (ixgbe_suspend(devinfo)); 689 690 case DDI_DETACH: 691 break; 692 } 693 694 /* 695 * Get the pointer to the driver private data structure 696 */ 697 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 698 if (ixgbe == NULL) 699 return (DDI_FAILURE); 700 701 /* 702 * If the device is still running, it needs to be stopped first. 703 * This check is necessary because under some specific circumstances, 704 * the detach routine can be called without stopping the interface 705 * first. 706 */ 707 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 708 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 709 mutex_enter(&ixgbe->gen_lock); 710 ixgbe_stop(ixgbe, B_TRUE); 711 mutex_exit(&ixgbe->gen_lock); 712 /* Disable and stop the watchdog timer */ 713 ixgbe_disable_watchdog_timer(ixgbe); 714 } 715 716 /* 717 * Check if there are still rx buffers held by the upper layer. 718 * If so, fail the detach. 719 */ 720 if (!ixgbe_rx_drain(ixgbe)) 721 return (DDI_FAILURE); 722 723 /* 724 * Do the remaining unconfigure routines 725 */ 726 ixgbe_unconfigure(devinfo, ixgbe); 727 728 return (DDI_SUCCESS); 729 } 730 731 /* 732 * quiesce(9E) entry point. 733 * 734 * This function is called when the system is single-threaded at high 735 * PIL with preemption disabled. Therefore, this function must not be 736 * blocked. 737 * 738 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure. 739 * DDI_FAILURE indicates an error condition and should almost never happen. 740 */ 741 static int 742 ixgbe_quiesce(dev_info_t *devinfo) 743 { 744 ixgbe_t *ixgbe; 745 struct ixgbe_hw *hw; 746 747 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 748 749 if (ixgbe == NULL) 750 return (DDI_FAILURE); 751 752 hw = &ixgbe->hw; 753 754 /* 755 * Disable the adapter interrupts 756 */ 757 ixgbe_disable_adapter_interrupts(ixgbe); 758 759 /* 760 * Tell firmware driver is no longer in control 761 */ 762 ixgbe_release_driver_control(hw); 763 764 /* 765 * Reset the chipset 766 */ 767 (void) ixgbe_reset_hw(hw); 768 769 /* 770 * Reset PHY 771 */ 772 (void) ixgbe_reset_phy(hw); 773 774 return (DDI_SUCCESS); 775 } 776 777 static void 778 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe) 779 { 780 /* 781 * Disable interrupt 782 */ 783 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 784 (void) ixgbe_disable_intrs(ixgbe); 785 } 786 787 /* 788 * remove the link check timer 789 */ 790 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) { 791 if (ixgbe->periodic_id != NULL) { 792 ddi_periodic_delete(ixgbe->periodic_id); 793 ixgbe->periodic_id = NULL; 794 } 795 } 796 797 /* 798 * Unregister MAC 799 */ 800 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) { 801 (void) mac_unregister(ixgbe->mac_hdl); 802 } 803 804 /* 805 * Free statistics 806 */ 807 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) { 808 kstat_delete((kstat_t *)ixgbe->ixgbe_ks); 809 } 810 811 /* 812 * Remove interrupt handlers 813 */ 814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 815 ixgbe_rem_intr_handlers(ixgbe); 816 } 817 818 /* 819 * Remove taskq for sfp-status-change 820 */ 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) { 822 ddi_taskq_destroy(ixgbe->sfp_taskq); 823 } 824 825 /* 826 * Remove taskq for over-temp 827 */ 828 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) { 829 ddi_taskq_destroy(ixgbe->overtemp_taskq); 830 } 831 832 /* 833 * Remove taskq for external PHYs 834 */ 835 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) { 836 ddi_taskq_destroy(ixgbe->phy_taskq); 837 } 838 839 /* 840 * Remove interrupts 841 */ 842 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) { 843 ixgbe_rem_intrs(ixgbe); 844 } 845 846 /* 847 * Unregister interrupt callback handler 848 */ 849 if (ixgbe->cb_hdl != NULL) { 850 (void) ddi_cb_unregister(ixgbe->cb_hdl); 851 } 852 853 /* 854 * Remove driver properties 855 */ 856 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) { 857 (void) ddi_prop_remove_all(devinfo); 858 } 859 860 /* 861 * Stop the chipset 862 */ 863 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) { 864 mutex_enter(&ixgbe->gen_lock); 865 ixgbe_chip_stop(ixgbe); 866 mutex_exit(&ixgbe->gen_lock); 867 } 868 869 /* 870 * Free register handle 871 */ 872 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) { 873 if (ixgbe->osdep.reg_handle != NULL) 874 ddi_regs_map_free(&ixgbe->osdep.reg_handle); 875 } 876 877 /* 878 * Free PCI config handle 879 */ 880 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) { 881 if (ixgbe->osdep.cfg_handle != NULL) 882 pci_config_teardown(&ixgbe->osdep.cfg_handle); 883 } 884 885 /* 886 * Free locks 887 */ 888 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) { 889 ixgbe_destroy_locks(ixgbe); 890 } 891 892 /* 893 * Free the rx/tx rings 894 */ 895 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) { 896 ixgbe_free_rings(ixgbe); 897 } 898 899 /* 900 * Unregister FMA capabilities 901 */ 902 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) { 903 ixgbe_fm_fini(ixgbe); 904 } 905 906 /* 907 * Free the driver data structure 908 */ 909 kmem_free(ixgbe, sizeof (ixgbe_t)); 910 911 ddi_set_driver_private(devinfo, NULL); 912 } 913 914 /* 915 * ixgbe_register_mac - Register the driver and its function pointers with 916 * the GLD interface. 917 */ 918 static int 919 ixgbe_register_mac(ixgbe_t *ixgbe) 920 { 921 struct ixgbe_hw *hw = &ixgbe->hw; 922 mac_register_t *mac; 923 int status; 924 925 if ((mac = mac_alloc(MAC_VERSION)) == NULL) 926 return (IXGBE_FAILURE); 927 928 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; 929 mac->m_driver = ixgbe; 930 mac->m_dip = ixgbe->dip; 931 mac->m_src_addr = hw->mac.addr; 932 mac->m_callbacks = &ixgbe_m_callbacks; 933 mac->m_min_sdu = 0; 934 mac->m_max_sdu = ixgbe->default_mtu; 935 mac->m_margin = VLAN_TAGSZ; 936 mac->m_priv_props = ixgbe_priv_props; 937 mac->m_v12n = MAC_VIRT_LEVEL1; 938 939 status = mac_register(mac, &ixgbe->mac_hdl); 940 941 mac_free(mac); 942 943 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE); 944 } 945 946 /* 947 * ixgbe_identify_hardware - Identify the type of the chipset. 948 */ 949 static int 950 ixgbe_identify_hardware(ixgbe_t *ixgbe) 951 { 952 struct ixgbe_hw *hw = &ixgbe->hw; 953 struct ixgbe_osdep *osdep = &ixgbe->osdep; 954 955 /* 956 * Get the device id 957 */ 958 hw->vendor_id = 959 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID); 960 hw->device_id = 961 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID); 962 hw->revision_id = 963 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID); 964 hw->subsystem_device_id = 965 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID); 966 hw->subsystem_vendor_id = 967 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID); 968 969 /* 970 * Set the mac type of the adapter based on the device id 971 */ 972 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) { 973 return (IXGBE_FAILURE); 974 } 975 976 /* 977 * Install adapter capabilities 978 */ 979 switch (hw->mac.type) { 980 case ixgbe_mac_82598EB: 981 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n"); 982 ixgbe->capab = &ixgbe_82598eb_cap; 983 984 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) { 985 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 986 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1; 987 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN; 988 } 989 break; 990 991 case ixgbe_mac_82599EB: 992 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n"); 993 ixgbe->capab = &ixgbe_82599eb_cap; 994 995 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) { 996 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE; 997 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0; 998 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN; 999 } 1000 break; 1001 1002 case ixgbe_mac_X540: 1003 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n"); 1004 ixgbe->capab = &ixgbe_X540_cap; 1005 /* 1006 * For now, X540 is all set in its capab structure. 1007 * As other X540 variants show up, things can change here. 1008 */ 1009 break; 1010 1011 case ixgbe_mac_X550: 1012 case ixgbe_mac_X550EM_x: 1013 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n"); 1014 ixgbe->capab = &ixgbe_X550_cap; 1015 1016 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) 1017 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE; 1018 1019 /* 1020 * Link detection on X552 SFP+ and X552/X557-AT 1021 */ 1022 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP || 1023 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { 1024 ixgbe->capab->other_intr |= 1025 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw); 1026 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540; 1027 } 1028 break; 1029 1030 default: 1031 IXGBE_DEBUGLOG_1(ixgbe, 1032 "adapter not supported in ixgbe_identify_hardware(): %d\n", 1033 hw->mac.type); 1034 return (IXGBE_FAILURE); 1035 } 1036 1037 return (IXGBE_SUCCESS); 1038 } 1039 1040 /* 1041 * ixgbe_regs_map - Map the device registers. 1042 * 1043 */ 1044 static int 1045 ixgbe_regs_map(ixgbe_t *ixgbe) 1046 { 1047 dev_info_t *devinfo = ixgbe->dip; 1048 struct ixgbe_hw *hw = &ixgbe->hw; 1049 struct ixgbe_osdep *osdep = &ixgbe->osdep; 1050 off_t mem_size; 1051 1052 /* 1053 * First get the size of device registers to be mapped. 1054 */ 1055 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size) 1056 != DDI_SUCCESS) { 1057 return (IXGBE_FAILURE); 1058 } 1059 1060 /* 1061 * Call ddi_regs_map_setup() to map registers 1062 */ 1063 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET, 1064 (caddr_t *)&hw->hw_addr, 0, 1065 mem_size, &ixgbe_regs_acc_attr, 1066 &osdep->reg_handle)) != DDI_SUCCESS) { 1067 return (IXGBE_FAILURE); 1068 } 1069 1070 return (IXGBE_SUCCESS); 1071 } 1072 1073 /* 1074 * ixgbe_init_properties - Initialize driver properties. 1075 */ 1076 static void 1077 ixgbe_init_properties(ixgbe_t *ixgbe) 1078 { 1079 /* 1080 * Get conf file properties, including link settings 1081 * jumbo frames, ring number, descriptor number, etc. 1082 */ 1083 ixgbe_get_conf(ixgbe); 1084 } 1085 1086 /* 1087 * ixgbe_init_driver_settings - Initialize driver settings. 1088 * 1089 * The settings include hardware function pointers, bus information, 1090 * rx/tx rings settings, link state, and any other parameters that 1091 * need to be setup during driver initialization. 1092 */ 1093 static int 1094 ixgbe_init_driver_settings(ixgbe_t *ixgbe) 1095 { 1096 struct ixgbe_hw *hw = &ixgbe->hw; 1097 dev_info_t *devinfo = ixgbe->dip; 1098 ixgbe_rx_ring_t *rx_ring; 1099 ixgbe_rx_group_t *rx_group; 1100 ixgbe_tx_ring_t *tx_ring; 1101 uint32_t rx_size; 1102 uint32_t tx_size; 1103 uint32_t ring_per_group; 1104 int i; 1105 1106 /* 1107 * Initialize chipset specific hardware function pointers 1108 */ 1109 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) { 1110 return (IXGBE_FAILURE); 1111 } 1112 1113 /* 1114 * Get the system page size 1115 */ 1116 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1); 1117 1118 /* 1119 * Set rx buffer size 1120 * 1121 * The IP header alignment room is counted in the calculation. 1122 * The rx buffer size is in unit of 1K that is required by the 1123 * chipset hardware. 1124 */ 1125 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM; 1126 ixgbe->rx_buf_size = ((rx_size >> 10) + 1127 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1128 1129 /* 1130 * Set tx buffer size 1131 */ 1132 tx_size = ixgbe->max_frame_size; 1133 ixgbe->tx_buf_size = ((tx_size >> 10) + 1134 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10; 1135 1136 /* 1137 * Initialize rx/tx rings/groups parameters 1138 */ 1139 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 1140 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1141 rx_ring = &ixgbe->rx_rings[i]; 1142 rx_ring->index = i; 1143 rx_ring->ixgbe = ixgbe; 1144 rx_ring->group_index = i / ring_per_group; 1145 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i); 1146 } 1147 1148 for (i = 0; i < ixgbe->num_rx_groups; i++) { 1149 rx_group = &ixgbe->rx_groups[i]; 1150 rx_group->index = i; 1151 rx_group->ixgbe = ixgbe; 1152 } 1153 1154 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1155 tx_ring = &ixgbe->tx_rings[i]; 1156 tx_ring->index = i; 1157 tx_ring->ixgbe = ixgbe; 1158 if (ixgbe->tx_head_wb_enable) 1159 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb; 1160 else 1161 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy; 1162 1163 tx_ring->ring_size = ixgbe->tx_ring_size; 1164 tx_ring->free_list_size = ixgbe->tx_ring_size + 1165 (ixgbe->tx_ring_size >> 1); 1166 } 1167 1168 /* 1169 * Initialize values of interrupt throttling rate 1170 */ 1171 for (i = 1; i < MAX_INTR_VECTOR; i++) 1172 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0]; 1173 1174 /* 1175 * The initial link state should be "unknown" 1176 */ 1177 ixgbe->link_state = LINK_STATE_UNKNOWN; 1178 1179 return (IXGBE_SUCCESS); 1180 } 1181 1182 /* 1183 * ixgbe_init_locks - Initialize locks. 1184 */ 1185 static void 1186 ixgbe_init_locks(ixgbe_t *ixgbe) 1187 { 1188 ixgbe_rx_ring_t *rx_ring; 1189 ixgbe_tx_ring_t *tx_ring; 1190 int i; 1191 1192 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1193 rx_ring = &ixgbe->rx_rings[i]; 1194 mutex_init(&rx_ring->rx_lock, NULL, 1195 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1196 } 1197 1198 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1199 tx_ring = &ixgbe->tx_rings[i]; 1200 mutex_init(&tx_ring->tx_lock, NULL, 1201 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1202 mutex_init(&tx_ring->recycle_lock, NULL, 1203 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1204 mutex_init(&tx_ring->tcb_head_lock, NULL, 1205 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1206 mutex_init(&tx_ring->tcb_tail_lock, NULL, 1207 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1208 } 1209 1210 mutex_init(&ixgbe->gen_lock, NULL, 1211 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1212 1213 mutex_init(&ixgbe->watchdog_lock, NULL, 1214 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri)); 1215 } 1216 1217 /* 1218 * ixgbe_destroy_locks - Destroy locks. 1219 */ 1220 static void 1221 ixgbe_destroy_locks(ixgbe_t *ixgbe) 1222 { 1223 ixgbe_rx_ring_t *rx_ring; 1224 ixgbe_tx_ring_t *tx_ring; 1225 int i; 1226 1227 for (i = 0; i < ixgbe->num_rx_rings; i++) { 1228 rx_ring = &ixgbe->rx_rings[i]; 1229 mutex_destroy(&rx_ring->rx_lock); 1230 } 1231 1232 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1233 tx_ring = &ixgbe->tx_rings[i]; 1234 mutex_destroy(&tx_ring->tx_lock); 1235 mutex_destroy(&tx_ring->recycle_lock); 1236 mutex_destroy(&tx_ring->tcb_head_lock); 1237 mutex_destroy(&tx_ring->tcb_tail_lock); 1238 } 1239 1240 mutex_destroy(&ixgbe->gen_lock); 1241 mutex_destroy(&ixgbe->watchdog_lock); 1242 } 1243 1244 /* 1245 * We need to try and determine which LED index in hardware corresponds to the 1246 * link/activity LED. This is the one that'll be overwritten when we perform 1247 * GLDv3 LED activity. 1248 */ 1249 static void 1250 ixgbe_led_init(ixgbe_t *ixgbe) 1251 { 1252 uint32_t reg, i; 1253 struct ixgbe_hw *hw = &ixgbe->hw; 1254 1255 reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 1256 for (i = 0; i < 4; i++) { 1257 if (((reg >> IXGBE_LED_MODE_SHIFT(i)) & 1258 IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) { 1259 ixgbe->ixgbe_led_index = i; 1260 return; 1261 } 1262 } 1263 1264 /* 1265 * If we couldn't determine this, we use the default for various MACs 1266 * based on information Intel has inserted into other drivers over the 1267 * years. Note, when we have support for the X553 which should add the 1268 * ixgbe_x550_em_a mac type, that should be at index 0. 1269 */ 1270 switch (hw->mac.type) { 1271 case ixgbe_mac_X550EM_x: 1272 ixgbe->ixgbe_led_index = 1; 1273 break; 1274 default: 1275 ixgbe->ixgbe_led_index = 2; 1276 break; 1277 } 1278 } 1279 1280 static int 1281 ixgbe_resume(dev_info_t *devinfo) 1282 { 1283 ixgbe_t *ixgbe; 1284 int i; 1285 1286 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1287 if (ixgbe == NULL) 1288 return (DDI_FAILURE); 1289 1290 mutex_enter(&ixgbe->gen_lock); 1291 1292 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1293 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1294 mutex_exit(&ixgbe->gen_lock); 1295 return (DDI_FAILURE); 1296 } 1297 1298 /* 1299 * Enable and start the watchdog timer 1300 */ 1301 ixgbe_enable_watchdog_timer(ixgbe); 1302 } 1303 1304 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED); 1305 1306 if (ixgbe->ixgbe_state & IXGBE_STARTED) { 1307 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1308 mac_tx_ring_update(ixgbe->mac_hdl, 1309 ixgbe->tx_rings[i].ring_handle); 1310 } 1311 } 1312 1313 mutex_exit(&ixgbe->gen_lock); 1314 1315 return (DDI_SUCCESS); 1316 } 1317 1318 static int 1319 ixgbe_suspend(dev_info_t *devinfo) 1320 { 1321 ixgbe_t *ixgbe; 1322 1323 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo); 1324 if (ixgbe == NULL) 1325 return (DDI_FAILURE); 1326 1327 mutex_enter(&ixgbe->gen_lock); 1328 1329 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED); 1330 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 1331 mutex_exit(&ixgbe->gen_lock); 1332 return (DDI_SUCCESS); 1333 } 1334 ixgbe_stop(ixgbe, B_FALSE); 1335 1336 mutex_exit(&ixgbe->gen_lock); 1337 1338 /* 1339 * Disable and stop the watchdog timer 1340 */ 1341 ixgbe_disable_watchdog_timer(ixgbe); 1342 1343 return (DDI_SUCCESS); 1344 } 1345 1346 /* 1347 * ixgbe_init - Initialize the device. 1348 */ 1349 static int 1350 ixgbe_init(ixgbe_t *ixgbe) 1351 { 1352 struct ixgbe_hw *hw = &ixgbe->hw; 1353 u8 pbanum[IXGBE_PBANUM_LENGTH]; 1354 int rv; 1355 1356 mutex_enter(&ixgbe->gen_lock); 1357 1358 /* 1359 * Configure/Initialize hardware 1360 */ 1361 rv = ixgbe_init_hw(hw); 1362 if (rv != IXGBE_SUCCESS) { 1363 switch (rv) { 1364 1365 /* 1366 * The first three errors are not prohibitive to us progressing 1367 * further, and are maily advisory in nature. In the case of a 1368 * SFP module not being present or not deemed supported by the 1369 * common code, we adivse the operator of this fact but carry on 1370 * instead of failing hard, as SFPs can be inserted or replaced 1371 * while the driver is running. In the case of a unknown error, 1372 * we fail-hard, logging the reason and emitting a FMA event. 1373 */ 1374 case IXGBE_ERR_EEPROM_VERSION: 1375 ixgbe_error(ixgbe, 1376 "This Intel 10Gb Ethernet device is pre-release and" 1377 " contains outdated firmware. Please contact your" 1378 " hardware vendor for a replacement."); 1379 break; 1380 case IXGBE_ERR_SFP_NOT_PRESENT: 1381 ixgbe_error(ixgbe, 1382 "No SFP+ module detected on this interface. Please " 1383 "install a supported SFP+ module for this " 1384 "interface to become operational."); 1385 break; 1386 case IXGBE_ERR_SFP_NOT_SUPPORTED: 1387 ixgbe_error(ixgbe, 1388 "Unsupported SFP+ module detected. Please replace " 1389 "it with a supported SFP+ module per Intel " 1390 "documentation, or bypass this check with " 1391 "allow_unsupported_sfp=1 in ixgbe.conf."); 1392 break; 1393 default: 1394 ixgbe_error(ixgbe, 1395 "Failed to initialize hardware. ixgbe_init_hw " 1396 "returned %d", rv); 1397 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1398 goto init_fail; 1399 } 1400 } 1401 1402 /* 1403 * Need to init eeprom before validating the checksum. 1404 */ 1405 if (ixgbe_init_eeprom_params(hw) < 0) { 1406 ixgbe_error(ixgbe, 1407 "Unable to intitialize the eeprom interface."); 1408 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1409 goto init_fail; 1410 } 1411 1412 /* 1413 * NVM validation 1414 */ 1415 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1416 /* 1417 * Some PCI-E parts fail the first check due to 1418 * the link being in sleep state. Call it again, 1419 * if it fails a second time it's a real issue. 1420 */ 1421 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 1422 ixgbe_error(ixgbe, 1423 "Invalid NVM checksum. Please contact " 1424 "the vendor to update the NVM."); 1425 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1426 goto init_fail; 1427 } 1428 } 1429 1430 /* 1431 * Setup default flow control thresholds - enable/disable 1432 * & flow control type is controlled by ixgbe.conf 1433 */ 1434 hw->fc.high_water[0] = DEFAULT_FCRTH; 1435 hw->fc.low_water[0] = DEFAULT_FCRTL; 1436 hw->fc.pause_time = DEFAULT_FCPAUSE; 1437 hw->fc.send_xon = B_TRUE; 1438 1439 /* 1440 * Initialize flow control 1441 */ 1442 (void) ixgbe_start_hw(hw); 1443 1444 /* 1445 * Initialize link settings 1446 */ 1447 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE); 1448 1449 /* 1450 * Initialize the chipset hardware 1451 */ 1452 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1453 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1454 goto init_fail; 1455 } 1456 1457 /* 1458 * Read identifying information and place in devinfo. 1459 */ 1460 pbanum[0] = '\0'; 1461 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum)); 1462 if (*pbanum != '\0') { 1463 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip, 1464 "printed-board-assembly", (char *)pbanum); 1465 } 1466 1467 /* 1468 * Determine LED index. 1469 */ 1470 ixgbe_led_init(ixgbe); 1471 1472 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1473 goto init_fail; 1474 } 1475 1476 mutex_exit(&ixgbe->gen_lock); 1477 return (IXGBE_SUCCESS); 1478 1479 init_fail: 1480 /* 1481 * Reset PHY 1482 */ 1483 (void) ixgbe_reset_phy(hw); 1484 1485 mutex_exit(&ixgbe->gen_lock); 1486 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1487 return (IXGBE_FAILURE); 1488 } 1489 1490 /* 1491 * ixgbe_chip_start - Initialize and start the chipset hardware. 1492 */ 1493 static int 1494 ixgbe_chip_start(ixgbe_t *ixgbe) 1495 { 1496 struct ixgbe_hw *hw = &ixgbe->hw; 1497 int i; 1498 1499 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1500 1501 /* 1502 * Get the mac address 1503 * This function should handle SPARC case correctly. 1504 */ 1505 if (!ixgbe_find_mac_address(ixgbe)) { 1506 ixgbe_error(ixgbe, "Failed to get the mac address"); 1507 return (IXGBE_FAILURE); 1508 } 1509 1510 /* 1511 * Validate the mac address 1512 */ 1513 (void) ixgbe_init_rx_addrs(hw); 1514 if (!is_valid_mac_addr(hw->mac.addr)) { 1515 ixgbe_error(ixgbe, "Invalid mac address"); 1516 return (IXGBE_FAILURE); 1517 } 1518 1519 /* 1520 * Re-enable relaxed ordering for performance. It is disabled 1521 * by default in the hardware init. 1522 */ 1523 if (ixgbe->relax_order_enable == B_TRUE) 1524 ixgbe_enable_relaxed_ordering(hw); 1525 1526 /* 1527 * Setup adapter interrupt vectors 1528 */ 1529 ixgbe_setup_adapter_vector(ixgbe); 1530 1531 /* 1532 * Initialize unicast addresses. 1533 */ 1534 ixgbe_init_unicst(ixgbe); 1535 1536 /* 1537 * Setup and initialize the mctable structures. 1538 */ 1539 ixgbe_setup_multicst(ixgbe); 1540 1541 /* 1542 * Set interrupt throttling rate 1543 */ 1544 for (i = 0; i < ixgbe->intr_cnt; i++) { 1545 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]); 1546 } 1547 1548 /* 1549 * Disable Wake-on-LAN 1550 */ 1551 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0); 1552 1553 /* 1554 * Some adapters offer Energy Efficient Ethernet (EEE) support. 1555 * Due to issues with EEE in e1000g/igb, we disable this by default 1556 * as a precautionary measure. 1557 * 1558 * Currently, the only known adapter which supports EEE in the ixgbe 1559 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the 1560 * first revision of it, as well as any X550 with MAC type 6 (non-EM) 1561 */ 1562 (void) ixgbe_setup_eee(hw, B_FALSE); 1563 1564 /* 1565 * Turn on any present SFP Tx laser 1566 */ 1567 ixgbe_enable_tx_laser(hw); 1568 1569 /* 1570 * Power on the PHY 1571 */ 1572 (void) ixgbe_set_phy_power(hw, B_TRUE); 1573 1574 /* 1575 * Save the state of the PHY 1576 */ 1577 ixgbe_get_hw_state(ixgbe); 1578 1579 /* 1580 * Make sure driver has control 1581 */ 1582 ixgbe_get_driver_control(hw); 1583 1584 return (IXGBE_SUCCESS); 1585 } 1586 1587 /* 1588 * ixgbe_chip_stop - Stop the chipset hardware 1589 */ 1590 static void 1591 ixgbe_chip_stop(ixgbe_t *ixgbe) 1592 { 1593 struct ixgbe_hw *hw = &ixgbe->hw; 1594 int rv; 1595 1596 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1597 1598 /* 1599 * Stop interupt generation and disable Tx unit 1600 */ 1601 hw->adapter_stopped = B_FALSE; 1602 (void) ixgbe_stop_adapter(hw); 1603 1604 /* 1605 * Reset the chipset 1606 */ 1607 (void) ixgbe_reset_hw(hw); 1608 1609 /* 1610 * Reset PHY 1611 */ 1612 (void) ixgbe_reset_phy(hw); 1613 1614 /* 1615 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting 1616 * the PHY while doing so. Else, just power down the PHY. 1617 */ 1618 if (hw->phy.ops.enter_lplu != NULL) { 1619 hw->phy.reset_disable = B_TRUE; 1620 rv = hw->phy.ops.enter_lplu(hw); 1621 if (rv != IXGBE_SUCCESS) 1622 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv); 1623 hw->phy.reset_disable = B_FALSE; 1624 } else { 1625 (void) ixgbe_set_phy_power(hw, B_FALSE); 1626 } 1627 1628 /* 1629 * Turn off any present SFP Tx laser 1630 * Expected for health and safety reasons 1631 */ 1632 ixgbe_disable_tx_laser(hw); 1633 1634 /* 1635 * Tell firmware driver is no longer in control 1636 */ 1637 ixgbe_release_driver_control(hw); 1638 1639 } 1640 1641 /* 1642 * ixgbe_reset - Reset the chipset and re-start the driver. 1643 * 1644 * It involves stopping and re-starting the chipset, 1645 * and re-configuring the rx/tx rings. 1646 */ 1647 static int 1648 ixgbe_reset(ixgbe_t *ixgbe) 1649 { 1650 int i; 1651 1652 /* 1653 * Disable and stop the watchdog timer 1654 */ 1655 ixgbe_disable_watchdog_timer(ixgbe); 1656 1657 mutex_enter(&ixgbe->gen_lock); 1658 1659 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED); 1660 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED); 1661 1662 ixgbe_stop(ixgbe, B_FALSE); 1663 1664 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 1665 mutex_exit(&ixgbe->gen_lock); 1666 return (IXGBE_FAILURE); 1667 } 1668 1669 /* 1670 * After resetting, need to recheck the link status. 1671 */ 1672 ixgbe->link_check_complete = B_FALSE; 1673 ixgbe->link_check_hrtime = gethrtime() + 1674 (IXGBE_LINK_UP_TIME * 100000000ULL); 1675 1676 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED); 1677 1678 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) { 1679 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1680 mac_tx_ring_update(ixgbe->mac_hdl, 1681 ixgbe->tx_rings[i].ring_handle); 1682 } 1683 } 1684 1685 mutex_exit(&ixgbe->gen_lock); 1686 1687 /* 1688 * Enable and start the watchdog timer 1689 */ 1690 ixgbe_enable_watchdog_timer(ixgbe); 1691 1692 return (IXGBE_SUCCESS); 1693 } 1694 1695 /* 1696 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources. 1697 */ 1698 static void 1699 ixgbe_tx_clean(ixgbe_t *ixgbe) 1700 { 1701 ixgbe_tx_ring_t *tx_ring; 1702 tx_control_block_t *tcb; 1703 link_list_t pending_list; 1704 uint32_t desc_num; 1705 int i, j; 1706 1707 LINK_LIST_INIT(&pending_list); 1708 1709 for (i = 0; i < ixgbe->num_tx_rings; i++) { 1710 tx_ring = &ixgbe->tx_rings[i]; 1711 1712 mutex_enter(&tx_ring->recycle_lock); 1713 1714 /* 1715 * Clean the pending tx data - the pending packets in the 1716 * work_list that have no chances to be transmitted again. 1717 * 1718 * We must ensure the chipset is stopped or the link is down 1719 * before cleaning the transmit packets. 1720 */ 1721 desc_num = 0; 1722 for (j = 0; j < tx_ring->ring_size; j++) { 1723 tcb = tx_ring->work_list[j]; 1724 if (tcb != NULL) { 1725 desc_num += tcb->desc_num; 1726 1727 tx_ring->work_list[j] = NULL; 1728 1729 ixgbe_free_tcb(tcb); 1730 1731 LIST_PUSH_TAIL(&pending_list, &tcb->link); 1732 } 1733 } 1734 1735 if (desc_num > 0) { 1736 atomic_add_32(&tx_ring->tbd_free, desc_num); 1737 ASSERT(tx_ring->tbd_free == tx_ring->ring_size); 1738 1739 /* 1740 * Reset the head and tail pointers of the tbd ring; 1741 * Reset the writeback head if it's enable. 1742 */ 1743 tx_ring->tbd_head = 0; 1744 tx_ring->tbd_tail = 0; 1745 if (ixgbe->tx_head_wb_enable) 1746 *tx_ring->tbd_head_wb = 0; 1747 1748 IXGBE_WRITE_REG(&ixgbe->hw, 1749 IXGBE_TDH(tx_ring->index), 0); 1750 IXGBE_WRITE_REG(&ixgbe->hw, 1751 IXGBE_TDT(tx_ring->index), 0); 1752 } 1753 1754 mutex_exit(&tx_ring->recycle_lock); 1755 1756 /* 1757 * Add the tx control blocks in the pending list to 1758 * the free list. 1759 */ 1760 ixgbe_put_free_list(tx_ring, &pending_list); 1761 } 1762 } 1763 1764 /* 1765 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be 1766 * transmitted. 1767 */ 1768 static boolean_t 1769 ixgbe_tx_drain(ixgbe_t *ixgbe) 1770 { 1771 ixgbe_tx_ring_t *tx_ring; 1772 boolean_t done; 1773 int i, j; 1774 1775 /* 1776 * Wait for a specific time to allow pending tx packets 1777 * to be transmitted. 1778 * 1779 * Check the counter tbd_free to see if transmission is done. 1780 * No lock protection is needed here. 1781 * 1782 * Return B_TRUE if all pending packets have been transmitted; 1783 * Otherwise return B_FALSE; 1784 */ 1785 for (i = 0; i < TX_DRAIN_TIME; i++) { 1786 1787 done = B_TRUE; 1788 for (j = 0; j < ixgbe->num_tx_rings; j++) { 1789 tx_ring = &ixgbe->tx_rings[j]; 1790 done = done && 1791 (tx_ring->tbd_free == tx_ring->ring_size); 1792 } 1793 1794 if (done) 1795 break; 1796 1797 msec_delay(1); 1798 } 1799 1800 return (done); 1801 } 1802 1803 /* 1804 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer. 1805 */ 1806 static boolean_t 1807 ixgbe_rx_drain(ixgbe_t *ixgbe) 1808 { 1809 boolean_t done = B_TRUE; 1810 int i; 1811 1812 /* 1813 * Polling the rx free list to check if those rx buffers held by 1814 * the upper layer are released. 1815 * 1816 * Check the counter rcb_free to see if all pending buffers are 1817 * released. No lock protection is needed here. 1818 * 1819 * Return B_TRUE if all pending buffers have been released; 1820 * Otherwise return B_FALSE; 1821 */ 1822 for (i = 0; i < RX_DRAIN_TIME; i++) { 1823 done = (ixgbe->rcb_pending == 0); 1824 1825 if (done) 1826 break; 1827 1828 msec_delay(1); 1829 } 1830 1831 return (done); 1832 } 1833 1834 /* 1835 * ixgbe_start - Start the driver/chipset. 1836 */ 1837 int 1838 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer) 1839 { 1840 struct ixgbe_hw *hw = &ixgbe->hw; 1841 int i; 1842 1843 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1844 1845 if (alloc_buffer) { 1846 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) { 1847 ixgbe_error(ixgbe, 1848 "Failed to allocate software receive rings"); 1849 return (IXGBE_FAILURE); 1850 } 1851 1852 /* Allocate buffers for all the rx/tx rings */ 1853 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) { 1854 ixgbe_error(ixgbe, "Failed to allocate DMA resource"); 1855 return (IXGBE_FAILURE); 1856 } 1857 1858 ixgbe->tx_ring_init = B_TRUE; 1859 } else { 1860 ixgbe->tx_ring_init = B_FALSE; 1861 } 1862 1863 for (i = 0; i < ixgbe->num_rx_rings; i++) 1864 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1865 for (i = 0; i < ixgbe->num_tx_rings; i++) 1866 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1867 1868 /* 1869 * Start the chipset hardware 1870 */ 1871 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) { 1872 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE); 1873 goto start_failure; 1874 } 1875 1876 /* 1877 * Configure link now for X550 1878 * 1879 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the 1880 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550, 1881 * the resting state of the link would be the maximum speed that 1882 * autonegotiation will allow (usually 10Gb, infrastructure allowing) 1883 * so we never bothered with explicitly setting the link to 10Gb as it 1884 * would already be at that state on driver attach. With X550, we must 1885 * trigger a re-negotiation of the link in order to switch from a LPLU 1886 * 1Gb link to 10Gb (cable and link partner permitting.) 1887 */ 1888 if (hw->mac.type == ixgbe_mac_X550 || 1889 hw->mac.type == ixgbe_mac_X550EM_x) { 1890 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE); 1891 ixgbe_get_hw_state(ixgbe); 1892 } 1893 1894 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1895 goto start_failure; 1896 } 1897 1898 /* 1899 * Setup the rx/tx rings 1900 */ 1901 ixgbe_setup_rings(ixgbe); 1902 1903 /* 1904 * ixgbe_start() will be called when resetting, however if reset 1905 * happens, we need to clear the ERROR, STALL and OVERTEMP flags 1906 * before enabling the interrupts. 1907 */ 1908 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR 1909 | IXGBE_STALL| IXGBE_OVERTEMP)); 1910 1911 /* 1912 * Enable adapter interrupts 1913 * The interrupts must be enabled after the driver state is START 1914 */ 1915 ixgbe_enable_adapter_interrupts(ixgbe); 1916 1917 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1918 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1919 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1920 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1921 1922 return (IXGBE_SUCCESS); 1923 1924 start_failure: 1925 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1926 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1927 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1928 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1929 1930 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1931 1932 return (IXGBE_FAILURE); 1933 } 1934 1935 /* 1936 * ixgbe_stop - Stop the driver/chipset. 1937 */ 1938 void 1939 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer) 1940 { 1941 int i; 1942 1943 ASSERT(mutex_owned(&ixgbe->gen_lock)); 1944 1945 /* 1946 * Disable the adapter interrupts 1947 */ 1948 ixgbe_disable_adapter_interrupts(ixgbe); 1949 1950 /* 1951 * Drain the pending tx packets 1952 */ 1953 (void) ixgbe_tx_drain(ixgbe); 1954 1955 for (i = 0; i < ixgbe->num_rx_rings; i++) 1956 mutex_enter(&ixgbe->rx_rings[i].rx_lock); 1957 for (i = 0; i < ixgbe->num_tx_rings; i++) 1958 mutex_enter(&ixgbe->tx_rings[i].tx_lock); 1959 1960 /* 1961 * Stop the chipset hardware 1962 */ 1963 ixgbe_chip_stop(ixgbe); 1964 1965 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 1966 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 1967 } 1968 1969 /* 1970 * Clean the pending tx data/resources 1971 */ 1972 ixgbe_tx_clean(ixgbe); 1973 1974 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--) 1975 mutex_exit(&ixgbe->tx_rings[i].tx_lock); 1976 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--) 1977 mutex_exit(&ixgbe->rx_rings[i].rx_lock); 1978 1979 if (ixgbe->link_state == LINK_STATE_UP) { 1980 ixgbe->link_state = LINK_STATE_UNKNOWN; 1981 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 1982 } 1983 1984 if (free_buffer) { 1985 /* 1986 * Release the DMA/memory resources of rx/tx rings 1987 */ 1988 ixgbe_free_dma(ixgbe); 1989 ixgbe_free_rx_data(ixgbe); 1990 } 1991 } 1992 1993 /* 1994 * ixgbe_cbfunc - Driver interface for generic DDI callbacks 1995 */ 1996 /* ARGSUSED */ 1997 static int 1998 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg, 1999 void *arg1, void *arg2) 2000 { 2001 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 2002 2003 switch (cbaction) { 2004 /* IRM callback */ 2005 int count; 2006 case DDI_CB_INTR_ADD: 2007 case DDI_CB_INTR_REMOVE: 2008 count = (int)(uintptr_t)cbarg; 2009 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX); 2010 DTRACE_PROBE2(ixgbe__irm__callback, int, count, 2011 int, ixgbe->intr_cnt); 2012 if (ixgbe_intr_adjust(ixgbe, cbaction, count) != 2013 DDI_SUCCESS) { 2014 ixgbe_error(ixgbe, 2015 "IRM CB: Failed to adjust interrupts"); 2016 goto cb_fail; 2017 } 2018 break; 2019 default: 2020 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported", 2021 cbaction); 2022 return (DDI_ENOTSUP); 2023 } 2024 return (DDI_SUCCESS); 2025 cb_fail: 2026 return (DDI_FAILURE); 2027 } 2028 2029 /* 2030 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request. 2031 */ 2032 static int 2033 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count) 2034 { 2035 int i, rc, actual; 2036 2037 if (count == 0) 2038 return (DDI_SUCCESS); 2039 2040 if ((cbaction == DDI_CB_INTR_ADD && 2041 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) || 2042 (cbaction == DDI_CB_INTR_REMOVE && 2043 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)) 2044 return (DDI_FAILURE); 2045 2046 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) { 2047 return (DDI_FAILURE); 2048 } 2049 2050 for (i = 0; i < ixgbe->num_rx_rings; i++) 2051 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL); 2052 for (i = 0; i < ixgbe->num_tx_rings; i++) 2053 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL); 2054 2055 mutex_enter(&ixgbe->gen_lock); 2056 ixgbe->ixgbe_state &= ~IXGBE_STARTED; 2057 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST; 2058 ixgbe->ixgbe_state |= IXGBE_SUSPENDED; 2059 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN); 2060 2061 ixgbe_stop(ixgbe, B_FALSE); 2062 /* 2063 * Disable interrupts 2064 */ 2065 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) { 2066 rc = ixgbe_disable_intrs(ixgbe); 2067 ASSERT(rc == IXGBE_SUCCESS); 2068 } 2069 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR; 2070 2071 /* 2072 * Remove interrupt handlers 2073 */ 2074 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) { 2075 ixgbe_rem_intr_handlers(ixgbe); 2076 } 2077 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR; 2078 2079 /* 2080 * Clear vect_map 2081 */ 2082 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 2083 switch (cbaction) { 2084 case DDI_CB_INTR_ADD: 2085 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable, 2086 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual, 2087 DDI_INTR_ALLOC_NORMAL); 2088 if (rc != DDI_SUCCESS || actual != count) { 2089 ixgbe_log(ixgbe, "Adjust interrupts failed." 2090 "return: %d, irm cb size: %d, actual: %d", 2091 rc, count, actual); 2092 goto intr_adjust_fail; 2093 } 2094 ixgbe->intr_cnt += count; 2095 break; 2096 2097 case DDI_CB_INTR_REMOVE: 2098 for (i = ixgbe->intr_cnt - count; 2099 i < ixgbe->intr_cnt; i ++) { 2100 rc = ddi_intr_free(ixgbe->htable[i]); 2101 ixgbe->htable[i] = NULL; 2102 if (rc != DDI_SUCCESS) { 2103 ixgbe_log(ixgbe, "Adjust interrupts failed." 2104 "return: %d, irm cb size: %d, actual: %d", 2105 rc, count, actual); 2106 goto intr_adjust_fail; 2107 } 2108 } 2109 ixgbe->intr_cnt -= count; 2110 break; 2111 } 2112 2113 /* 2114 * Get priority for first vector, assume remaining are all the same 2115 */ 2116 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 2117 if (rc != DDI_SUCCESS) { 2118 ixgbe_log(ixgbe, 2119 "Get interrupt priority failed: %d", rc); 2120 goto intr_adjust_fail; 2121 } 2122 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 2123 if (rc != DDI_SUCCESS) { 2124 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc); 2125 goto intr_adjust_fail; 2126 } 2127 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR; 2128 2129 /* 2130 * Map rings to interrupt vectors 2131 */ 2132 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) { 2133 ixgbe_error(ixgbe, 2134 "IRM CB: Failed to map interrupts to vectors"); 2135 goto intr_adjust_fail; 2136 } 2137 2138 /* 2139 * Add interrupt handlers 2140 */ 2141 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) { 2142 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers"); 2143 goto intr_adjust_fail; 2144 } 2145 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR; 2146 2147 /* 2148 * Now that mutex locks are initialized, and the chip is also 2149 * initialized, enable interrupts. 2150 */ 2151 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) { 2152 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts"); 2153 goto intr_adjust_fail; 2154 } 2155 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR; 2156 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) { 2157 ixgbe_error(ixgbe, "IRM CB: Failed to start"); 2158 goto intr_adjust_fail; 2159 } 2160 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST; 2161 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED; 2162 ixgbe->ixgbe_state |= IXGBE_STARTED; 2163 mutex_exit(&ixgbe->gen_lock); 2164 2165 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2166 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, 2167 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]); 2168 } 2169 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2170 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, 2171 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]); 2172 } 2173 2174 /* Wakeup all Tx rings */ 2175 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2176 mac_tx_ring_update(ixgbe->mac_hdl, 2177 ixgbe->tx_rings[i].ring_handle); 2178 } 2179 2180 IXGBE_DEBUGLOG_3(ixgbe, 2181 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).", 2182 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max); 2183 return (DDI_SUCCESS); 2184 2185 intr_adjust_fail: 2186 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 2187 mutex_exit(&ixgbe->gen_lock); 2188 return (DDI_FAILURE); 2189 } 2190 2191 /* 2192 * ixgbe_intr_cb_register - Register interrupt callback function. 2193 */ 2194 static int 2195 ixgbe_intr_cb_register(ixgbe_t *ixgbe) 2196 { 2197 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc, 2198 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) { 2199 return (IXGBE_FAILURE); 2200 } 2201 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered."); 2202 return (IXGBE_SUCCESS); 2203 } 2204 2205 /* 2206 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings. 2207 */ 2208 static int 2209 ixgbe_alloc_rings(ixgbe_t *ixgbe) 2210 { 2211 /* 2212 * Allocate memory space for rx rings 2213 */ 2214 ixgbe->rx_rings = kmem_zalloc( 2215 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings, 2216 KM_NOSLEEP); 2217 2218 if (ixgbe->rx_rings == NULL) { 2219 return (IXGBE_FAILURE); 2220 } 2221 2222 /* 2223 * Allocate memory space for tx rings 2224 */ 2225 ixgbe->tx_rings = kmem_zalloc( 2226 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings, 2227 KM_NOSLEEP); 2228 2229 if (ixgbe->tx_rings == NULL) { 2230 kmem_free(ixgbe->rx_rings, 2231 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2232 ixgbe->rx_rings = NULL; 2233 return (IXGBE_FAILURE); 2234 } 2235 2236 /* 2237 * Allocate memory space for rx ring groups 2238 */ 2239 ixgbe->rx_groups = kmem_zalloc( 2240 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups, 2241 KM_NOSLEEP); 2242 2243 if (ixgbe->rx_groups == NULL) { 2244 kmem_free(ixgbe->rx_rings, 2245 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2246 kmem_free(ixgbe->tx_rings, 2247 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2248 ixgbe->rx_rings = NULL; 2249 ixgbe->tx_rings = NULL; 2250 return (IXGBE_FAILURE); 2251 } 2252 2253 return (IXGBE_SUCCESS); 2254 } 2255 2256 /* 2257 * ixgbe_free_rings - Free the memory space of rx/tx rings. 2258 */ 2259 static void 2260 ixgbe_free_rings(ixgbe_t *ixgbe) 2261 { 2262 if (ixgbe->rx_rings != NULL) { 2263 kmem_free(ixgbe->rx_rings, 2264 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings); 2265 ixgbe->rx_rings = NULL; 2266 } 2267 2268 if (ixgbe->tx_rings != NULL) { 2269 kmem_free(ixgbe->tx_rings, 2270 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings); 2271 ixgbe->tx_rings = NULL; 2272 } 2273 2274 if (ixgbe->rx_groups != NULL) { 2275 kmem_free(ixgbe->rx_groups, 2276 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups); 2277 ixgbe->rx_groups = NULL; 2278 } 2279 } 2280 2281 static int 2282 ixgbe_alloc_rx_data(ixgbe_t *ixgbe) 2283 { 2284 ixgbe_rx_ring_t *rx_ring; 2285 int i; 2286 2287 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2288 rx_ring = &ixgbe->rx_rings[i]; 2289 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS) 2290 goto alloc_rx_rings_failure; 2291 } 2292 return (IXGBE_SUCCESS); 2293 2294 alloc_rx_rings_failure: 2295 ixgbe_free_rx_data(ixgbe); 2296 return (IXGBE_FAILURE); 2297 } 2298 2299 static void 2300 ixgbe_free_rx_data(ixgbe_t *ixgbe) 2301 { 2302 ixgbe_rx_ring_t *rx_ring; 2303 ixgbe_rx_data_t *rx_data; 2304 int i; 2305 2306 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2307 rx_ring = &ixgbe->rx_rings[i]; 2308 2309 mutex_enter(&ixgbe->rx_pending_lock); 2310 rx_data = rx_ring->rx_data; 2311 2312 if (rx_data != NULL) { 2313 rx_data->flag |= IXGBE_RX_STOPPED; 2314 2315 if (rx_data->rcb_pending == 0) { 2316 ixgbe_free_rx_ring_data(rx_data); 2317 rx_ring->rx_data = NULL; 2318 } 2319 } 2320 2321 mutex_exit(&ixgbe->rx_pending_lock); 2322 } 2323 } 2324 2325 /* 2326 * ixgbe_setup_rings - Setup rx/tx rings. 2327 */ 2328 static void 2329 ixgbe_setup_rings(ixgbe_t *ixgbe) 2330 { 2331 /* 2332 * Setup the rx/tx rings, including the following: 2333 * 2334 * 1. Setup the descriptor ring and the control block buffers; 2335 * 2. Initialize necessary registers for receive/transmit; 2336 * 3. Initialize software pointers/parameters for receive/transmit; 2337 */ 2338 ixgbe_setup_rx(ixgbe); 2339 2340 ixgbe_setup_tx(ixgbe); 2341 } 2342 2343 static void 2344 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring) 2345 { 2346 ixgbe_t *ixgbe = rx_ring->ixgbe; 2347 ixgbe_rx_data_t *rx_data = rx_ring->rx_data; 2348 struct ixgbe_hw *hw = &ixgbe->hw; 2349 rx_control_block_t *rcb; 2350 union ixgbe_adv_rx_desc *rbd; 2351 uint32_t size; 2352 uint32_t buf_low; 2353 uint32_t buf_high; 2354 uint32_t reg_val; 2355 int i; 2356 2357 ASSERT(mutex_owned(&rx_ring->rx_lock)); 2358 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2359 2360 for (i = 0; i < ixgbe->rx_ring_size; i++) { 2361 rcb = rx_data->work_list[i]; 2362 rbd = &rx_data->rbd_ring[i]; 2363 2364 rbd->read.pkt_addr = rcb->rx_buf.dma_address; 2365 rbd->read.hdr_addr = NULL; 2366 } 2367 2368 /* 2369 * Initialize the length register 2370 */ 2371 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc); 2372 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size); 2373 2374 /* 2375 * Initialize the base address registers 2376 */ 2377 buf_low = (uint32_t)rx_data->rbd_area.dma_address; 2378 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32); 2379 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high); 2380 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low); 2381 2382 /* 2383 * Setup head & tail pointers 2384 */ 2385 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index), 2386 rx_data->ring_size - 1); 2387 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0); 2388 2389 rx_data->rbd_next = 0; 2390 rx_data->lro_first = 0; 2391 2392 /* 2393 * Setup the Receive Descriptor Control Register (RXDCTL) 2394 * PTHRESH=32 descriptors (half the internal cache) 2395 * HTHRESH=0 descriptors (to minimize latency on fetch) 2396 * WTHRESH defaults to 1 (writeback each descriptor) 2397 */ 2398 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index)); 2399 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */ 2400 2401 /* Not a valid value for 82599, X540 or X550 */ 2402 if (hw->mac.type == ixgbe_mac_82598EB) { 2403 reg_val |= 0x0020; /* pthresh */ 2404 } 2405 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val); 2406 2407 if (hw->mac.type == ixgbe_mac_82599EB || 2408 hw->mac.type == ixgbe_mac_X540 || 2409 hw->mac.type == ixgbe_mac_X550 || 2410 hw->mac.type == ixgbe_mac_X550EM_x) { 2411 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2412 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS); 2413 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2414 } 2415 2416 /* 2417 * Setup the Split and Replication Receive Control Register. 2418 * Set the rx buffer size and the advanced descriptor type. 2419 */ 2420 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) | 2421 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2422 reg_val |= IXGBE_SRRCTL_DROP_EN; 2423 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val); 2424 } 2425 2426 static void 2427 ixgbe_setup_rx(ixgbe_t *ixgbe) 2428 { 2429 ixgbe_rx_ring_t *rx_ring; 2430 struct ixgbe_hw *hw = &ixgbe->hw; 2431 uint32_t reg_val; 2432 uint32_t ring_mapping; 2433 uint32_t i, index; 2434 uint32_t psrtype_rss_bit; 2435 2436 /* 2437 * Ensure that Rx is disabled while setting up 2438 * the Rx unit and Rx descriptor ring(s) 2439 */ 2440 ixgbe_disable_rx(hw); 2441 2442 /* PSRTYPE must be configured for 82599 */ 2443 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ && 2444 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) { 2445 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2446 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2447 reg_val |= IXGBE_PSRTYPE_L2HDR; 2448 reg_val |= 0x80000000; 2449 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val); 2450 } else { 2451 if (ixgbe->num_rx_groups > 32) { 2452 psrtype_rss_bit = 0x20000000; 2453 } else { 2454 psrtype_rss_bit = 0x40000000; 2455 } 2456 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) { 2457 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | 2458 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR; 2459 reg_val |= IXGBE_PSRTYPE_L2HDR; 2460 reg_val |= psrtype_rss_bit; 2461 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val); 2462 } 2463 } 2464 2465 /* 2466 * Set filter control in FCTRL to determine types of packets are passed 2467 * up to the driver. 2468 * - Pass broadcast packets. 2469 * - Do not pass flow control pause frames (82598-specific) 2470 */ 2471 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2472 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */ 2473 if (hw->mac.type == ixgbe_mac_82598EB) { 2474 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */ 2475 } 2476 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val); 2477 2478 /* 2479 * Hardware checksum settings 2480 */ 2481 if (ixgbe->rx_hcksum_enable) { 2482 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 2483 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */ 2484 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val); 2485 } 2486 2487 /* 2488 * Setup VMDq and RSS for multiple receive queues 2489 */ 2490 switch (ixgbe->classify_mode) { 2491 case IXGBE_CLASSIFY_RSS: 2492 /* 2493 * One group, only RSS is needed when more than 2494 * one ring enabled. 2495 */ 2496 ixgbe_setup_rss(ixgbe); 2497 break; 2498 2499 case IXGBE_CLASSIFY_VMDQ: 2500 /* 2501 * Multiple groups, each group has one ring, 2502 * only VMDq is needed. 2503 */ 2504 ixgbe_setup_vmdq(ixgbe); 2505 break; 2506 2507 case IXGBE_CLASSIFY_VMDQ_RSS: 2508 /* 2509 * Multiple groups and multiple rings, both 2510 * VMDq and RSS are needed. 2511 */ 2512 ixgbe_setup_vmdq_rss(ixgbe); 2513 break; 2514 2515 default: 2516 break; 2517 } 2518 2519 /* 2520 * Enable the receive unit. This must be done after filter 2521 * control is set in FCTRL. On 82598, we disable the descriptor monitor. 2522 * 82598 is the only adapter which defines this RXCTRL option. 2523 */ 2524 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2525 if (hw->mac.type == ixgbe_mac_82598EB) 2526 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */ 2527 reg_val |= IXGBE_RXCTRL_RXEN; 2528 (void) ixgbe_enable_rx_dma(hw, reg_val); 2529 2530 /* 2531 * ixgbe_setup_rx_ring must be called after configuring RXCTRL 2532 */ 2533 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2534 rx_ring = &ixgbe->rx_rings[i]; 2535 ixgbe_setup_rx_ring(rx_ring); 2536 } 2537 2538 /* 2539 * Setup the per-ring statistics mapping. 2540 */ 2541 ring_mapping = 0; 2542 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2543 index = ixgbe->rx_rings[i].hw_index; 2544 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2)); 2545 ring_mapping |= (i & 0xF) << (8 * (index & 0x3)); 2546 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping); 2547 } 2548 2549 /* 2550 * The Max Frame Size in MHADD/MAXFRS will be internally increased 2551 * by four bytes if the packet has a VLAN field, so includes MTU, 2552 * ethernet header and frame check sequence. 2553 * Register is MAXFRS in 82599. 2554 */ 2555 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD); 2556 reg_val &= ~IXGBE_MHADD_MFS_MASK; 2557 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header) 2558 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT; 2559 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val); 2560 2561 /* 2562 * Setup Jumbo Frame enable bit 2563 */ 2564 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2565 if (ixgbe->default_mtu > ETHERMTU) 2566 reg_val |= IXGBE_HLREG0_JUMBOEN; 2567 else 2568 reg_val &= ~IXGBE_HLREG0_JUMBOEN; 2569 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2570 2571 /* 2572 * Setup RSC for multiple receive queues. 2573 */ 2574 if (ixgbe->lro_enable) { 2575 for (i = 0; i < ixgbe->num_rx_rings; i++) { 2576 /* 2577 * Make sure rx_buf_size * MAXDESC not greater 2578 * than 65535. 2579 * Intel recommends 4 for MAXDESC field value. 2580 */ 2581 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i)); 2582 reg_val |= IXGBE_RSCCTL_RSCEN; 2583 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k) 2584 reg_val |= IXGBE_RSCCTL_MAXDESC_1; 2585 else 2586 reg_val |= IXGBE_RSCCTL_MAXDESC_4; 2587 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val); 2588 } 2589 2590 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU); 2591 reg_val |= IXGBE_RSCDBU_RSCACKDIS; 2592 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val); 2593 2594 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); 2595 reg_val |= IXGBE_RDRXCTL_RSCACKC; 2596 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX; 2597 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; 2598 2599 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val); 2600 } 2601 } 2602 2603 static void 2604 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring) 2605 { 2606 ixgbe_t *ixgbe = tx_ring->ixgbe; 2607 struct ixgbe_hw *hw = &ixgbe->hw; 2608 uint32_t size; 2609 uint32_t buf_low; 2610 uint32_t buf_high; 2611 uint32_t reg_val; 2612 2613 ASSERT(mutex_owned(&tx_ring->tx_lock)); 2614 ASSERT(mutex_owned(&ixgbe->gen_lock)); 2615 2616 /* 2617 * Initialize the length register 2618 */ 2619 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc); 2620 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size); 2621 2622 /* 2623 * Initialize the base address registers 2624 */ 2625 buf_low = (uint32_t)tx_ring->tbd_area.dma_address; 2626 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32); 2627 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low); 2628 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high); 2629 2630 /* 2631 * Setup head & tail pointers 2632 */ 2633 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0); 2634 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0); 2635 2636 /* 2637 * Setup head write-back 2638 */ 2639 if (ixgbe->tx_head_wb_enable) { 2640 /* 2641 * The memory of the head write-back is allocated using 2642 * the extra tbd beyond the tail of the tbd ring. 2643 */ 2644 tx_ring->tbd_head_wb = (uint32_t *) 2645 ((uintptr_t)tx_ring->tbd_area.address + size); 2646 *tx_ring->tbd_head_wb = 0; 2647 2648 buf_low = (uint32_t) 2649 (tx_ring->tbd_area.dma_address + size); 2650 buf_high = (uint32_t) 2651 ((tx_ring->tbd_area.dma_address + size) >> 32); 2652 2653 /* Set the head write-back enable bit */ 2654 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE; 2655 2656 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low); 2657 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high); 2658 2659 /* 2660 * Turn off relaxed ordering for head write back or it will 2661 * cause problems with the tx recycling 2662 */ 2663 2664 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ? 2665 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) : 2666 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index)); 2667 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; 2668 if (hw->mac.type == ixgbe_mac_82598EB) { 2669 IXGBE_WRITE_REG(hw, 2670 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val); 2671 } else { 2672 IXGBE_WRITE_REG(hw, 2673 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val); 2674 } 2675 } else { 2676 tx_ring->tbd_head_wb = NULL; 2677 } 2678 2679 tx_ring->tbd_head = 0; 2680 tx_ring->tbd_tail = 0; 2681 tx_ring->tbd_free = tx_ring->ring_size; 2682 2683 if (ixgbe->tx_ring_init == B_TRUE) { 2684 tx_ring->tcb_head = 0; 2685 tx_ring->tcb_tail = 0; 2686 tx_ring->tcb_free = tx_ring->free_list_size; 2687 } 2688 2689 /* 2690 * Initialize the s/w context structure 2691 */ 2692 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t)); 2693 } 2694 2695 static void 2696 ixgbe_setup_tx(ixgbe_t *ixgbe) 2697 { 2698 struct ixgbe_hw *hw = &ixgbe->hw; 2699 ixgbe_tx_ring_t *tx_ring; 2700 uint32_t reg_val; 2701 uint32_t ring_mapping; 2702 int i; 2703 2704 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2705 tx_ring = &ixgbe->tx_rings[i]; 2706 ixgbe_setup_tx_ring(tx_ring); 2707 } 2708 2709 /* 2710 * Setup the per-ring statistics mapping. 2711 */ 2712 ring_mapping = 0; 2713 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2714 ring_mapping |= (i & 0xF) << (8 * (i & 0x3)); 2715 if ((i & 0x3) == 0x3) { 2716 switch (hw->mac.type) { 2717 case ixgbe_mac_82598EB: 2718 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), 2719 ring_mapping); 2720 break; 2721 2722 case ixgbe_mac_82599EB: 2723 case ixgbe_mac_X540: 2724 case ixgbe_mac_X550: 2725 case ixgbe_mac_X550EM_x: 2726 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), 2727 ring_mapping); 2728 break; 2729 2730 default: 2731 break; 2732 } 2733 2734 ring_mapping = 0; 2735 } 2736 } 2737 if (i & 0x3) { 2738 switch (hw->mac.type) { 2739 case ixgbe_mac_82598EB: 2740 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping); 2741 break; 2742 2743 case ixgbe_mac_82599EB: 2744 case ixgbe_mac_X540: 2745 case ixgbe_mac_X550: 2746 case ixgbe_mac_X550EM_x: 2747 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping); 2748 break; 2749 2750 default: 2751 break; 2752 } 2753 } 2754 2755 /* 2756 * Enable CRC appending and TX padding (for short tx frames) 2757 */ 2758 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0); 2759 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN; 2760 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val); 2761 2762 /* 2763 * enable DMA for 82599, X540 and X550 parts 2764 */ 2765 if (hw->mac.type == ixgbe_mac_82599EB || 2766 hw->mac.type == ixgbe_mac_X540 || 2767 hw->mac.type == ixgbe_mac_X550 || 2768 hw->mac.type == ixgbe_mac_X550EM_x) { 2769 /* DMATXCTL.TE must be set after all Tx config is complete */ 2770 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 2771 reg_val |= IXGBE_DMATXCTL_TE; 2772 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val); 2773 2774 /* Disable arbiter to set MTQC */ 2775 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS); 2776 reg_val |= IXGBE_RTTDCS_ARBDIS; 2777 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2778 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); 2779 reg_val &= ~IXGBE_RTTDCS_ARBDIS; 2780 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val); 2781 } 2782 2783 /* 2784 * Enabling tx queues .. 2785 * For 82599 must be done after DMATXCTL.TE is set 2786 */ 2787 for (i = 0; i < ixgbe->num_tx_rings; i++) { 2788 tx_ring = &ixgbe->tx_rings[i]; 2789 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index)); 2790 reg_val |= IXGBE_TXDCTL_ENABLE; 2791 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val); 2792 } 2793 } 2794 2795 /* 2796 * ixgbe_setup_rss - Setup receive-side scaling feature. 2797 */ 2798 static void 2799 ixgbe_setup_rss(ixgbe_t *ixgbe) 2800 { 2801 struct ixgbe_hw *hw = &ixgbe->hw; 2802 uint32_t mrqc; 2803 2804 /* 2805 * Initialize RETA/ERETA table 2806 */ 2807 ixgbe_setup_rss_table(ixgbe); 2808 2809 /* 2810 * Enable RSS & perform hash on these packet types 2811 */ 2812 mrqc = IXGBE_MRQC_RSSEN | 2813 IXGBE_MRQC_RSS_FIELD_IPV4 | 2814 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2815 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2816 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2817 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2818 IXGBE_MRQC_RSS_FIELD_IPV6 | 2819 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2820 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2821 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2822 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2823 } 2824 2825 /* 2826 * ixgbe_setup_vmdq - Setup MAC classification feature 2827 */ 2828 static void 2829 ixgbe_setup_vmdq(ixgbe_t *ixgbe) 2830 { 2831 struct ixgbe_hw *hw = &ixgbe->hw; 2832 uint32_t vmdctl, i, vtctl; 2833 2834 /* 2835 * Setup the VMDq Control register, enable VMDq based on 2836 * packet destination MAC address: 2837 */ 2838 switch (hw->mac.type) { 2839 case ixgbe_mac_82598EB: 2840 /* 2841 * VMDq Enable = 1; 2842 * VMDq Filter = 0; MAC filtering 2843 * Default VMDq output index = 0; 2844 */ 2845 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2846 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2847 break; 2848 2849 case ixgbe_mac_82599EB: 2850 case ixgbe_mac_X540: 2851 case ixgbe_mac_X550: 2852 case ixgbe_mac_X550EM_x: 2853 /* 2854 * Enable VMDq-only. 2855 */ 2856 vmdctl = IXGBE_MRQC_VMDQEN; 2857 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl); 2858 2859 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2860 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2861 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2862 } 2863 2864 /* 2865 * Enable Virtualization and Replication. 2866 */ 2867 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2868 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2869 2870 /* 2871 * Enable receiving packets to all VFs 2872 */ 2873 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2874 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2875 break; 2876 2877 default: 2878 break; 2879 } 2880 } 2881 2882 /* 2883 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature. 2884 */ 2885 static void 2886 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe) 2887 { 2888 struct ixgbe_hw *hw = &ixgbe->hw; 2889 uint32_t i, mrqc; 2890 uint32_t vtctl, vmdctl; 2891 2892 /* 2893 * Initialize RETA/ERETA table 2894 */ 2895 ixgbe_setup_rss_table(ixgbe); 2896 2897 /* 2898 * Enable and setup RSS and VMDq 2899 */ 2900 switch (hw->mac.type) { 2901 case ixgbe_mac_82598EB: 2902 /* 2903 * Enable RSS & Setup RSS Hash functions 2904 */ 2905 mrqc = IXGBE_MRQC_RSSEN | 2906 IXGBE_MRQC_RSS_FIELD_IPV4 | 2907 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2908 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2909 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2910 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2911 IXGBE_MRQC_RSS_FIELD_IPV6 | 2912 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2913 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2914 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2915 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2916 2917 /* 2918 * Enable and Setup VMDq 2919 * VMDq Filter = 0; MAC filtering 2920 * Default VMDq output index = 0; 2921 */ 2922 vmdctl = IXGBE_VMD_CTL_VMDQ_EN; 2923 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); 2924 break; 2925 2926 case ixgbe_mac_82599EB: 2927 case ixgbe_mac_X540: 2928 case ixgbe_mac_X550: 2929 case ixgbe_mac_X550EM_x: 2930 /* 2931 * Enable RSS & Setup RSS Hash functions 2932 */ 2933 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 | 2934 IXGBE_MRQC_RSS_FIELD_IPV4_TCP | 2935 IXGBE_MRQC_RSS_FIELD_IPV4_UDP | 2936 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP | 2937 IXGBE_MRQC_RSS_FIELD_IPV6_EX | 2938 IXGBE_MRQC_RSS_FIELD_IPV6 | 2939 IXGBE_MRQC_RSS_FIELD_IPV6_TCP | 2940 IXGBE_MRQC_RSS_FIELD_IPV6_UDP | 2941 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 2942 2943 /* 2944 * Enable VMDq+RSS. 2945 */ 2946 if (ixgbe->num_rx_groups > 32) { 2947 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN; 2948 } else { 2949 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN; 2950 } 2951 2952 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 2953 2954 for (i = 0; i < hw->mac.num_rar_entries; i++) { 2955 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0); 2956 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0); 2957 } 2958 break; 2959 2960 default: 2961 break; 2962 2963 } 2964 2965 if (hw->mac.type == ixgbe_mac_82599EB || 2966 hw->mac.type == ixgbe_mac_X540 || 2967 hw->mac.type == ixgbe_mac_X550 || 2968 hw->mac.type == ixgbe_mac_X550EM_x) { 2969 /* 2970 * Enable Virtualization and Replication. 2971 */ 2972 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; 2973 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); 2974 2975 /* 2976 * Enable receiving packets to all VFs 2977 */ 2978 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL); 2979 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL); 2980 } 2981 } 2982 2983 /* 2984 * ixgbe_setup_rss_table - Setup RSS table 2985 */ 2986 static void 2987 ixgbe_setup_rss_table(ixgbe_t *ixgbe) 2988 { 2989 struct ixgbe_hw *hw = &ixgbe->hw; 2990 uint32_t i, j; 2991 uint32_t random; 2992 uint32_t reta; 2993 uint32_t ring_per_group; 2994 uint32_t ring; 2995 uint32_t table_size; 2996 uint32_t index_mult; 2997 uint32_t rxcsum; 2998 2999 /* 3000 * Set multiplier for RETA setup and table size based on MAC type. 3001 * RETA table sizes vary by model: 3002 * 3003 * 82598, 82599, X540: 128 table entries. 3004 * X550: 512 table entries. 3005 */ 3006 index_mult = 0x1; 3007 table_size = 128; 3008 switch (ixgbe->hw.mac.type) { 3009 case ixgbe_mac_82598EB: 3010 index_mult = 0x11; 3011 break; 3012 case ixgbe_mac_X550: 3013 case ixgbe_mac_X550EM_x: 3014 table_size = 512; 3015 break; 3016 default: 3017 break; 3018 } 3019 3020 /* 3021 * Fill out RSS redirection table. The configuation of the indices is 3022 * hardware-dependent. 3023 * 3024 * 82598: 8 bits wide containing two 4 bit RSS indices 3025 * 82599, X540: 8 bits wide containing one 4 bit RSS index 3026 * X550: 8 bits wide containing one 6 bit RSS index 3027 */ 3028 reta = 0; 3029 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3030 3031 for (i = 0, j = 0; i < table_size; i++, j++) { 3032 if (j == ring_per_group) j = 0; 3033 3034 /* 3035 * The low 8 bits are for hash value (n+0); 3036 * The next 8 bits are for hash value (n+1), etc. 3037 */ 3038 ring = (j * index_mult); 3039 reta = reta >> 8; 3040 reta = reta | (((uint32_t)ring) << 24); 3041 3042 if ((i & 3) == 3) { 3043 /* 3044 * The first 128 table entries are programmed into the 3045 * RETA register, with any beyond that (eg; on X550) 3046 * into ERETA. 3047 */ 3048 if (i < 128) 3049 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); 3050 else 3051 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), 3052 reta); 3053 reta = 0; 3054 } 3055 } 3056 3057 /* 3058 * Fill out hash function seeds with a random constant 3059 */ 3060 for (i = 0; i < 10; i++) { 3061 (void) random_get_pseudo_bytes((uint8_t *)&random, 3062 sizeof (uint32_t)); 3063 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random); 3064 } 3065 3066 /* 3067 * Disable Packet Checksum to enable RSS for multiple receive queues. 3068 * It is an adapter hardware limitation that Packet Checksum is 3069 * mutually exclusive with RSS. 3070 */ 3071 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 3072 rxcsum |= IXGBE_RXCSUM_PCSD; 3073 rxcsum &= ~IXGBE_RXCSUM_IPPCSE; 3074 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); 3075 } 3076 3077 /* 3078 * ixgbe_init_unicst - Initialize the unicast addresses. 3079 */ 3080 static void 3081 ixgbe_init_unicst(ixgbe_t *ixgbe) 3082 { 3083 struct ixgbe_hw *hw = &ixgbe->hw; 3084 uint8_t *mac_addr; 3085 int slot; 3086 /* 3087 * Here we should consider two situations: 3088 * 3089 * 1. Chipset is initialized at the first time, 3090 * Clear all the multiple unicast addresses. 3091 * 3092 * 2. Chipset is reset 3093 * Recover the multiple unicast addresses from the 3094 * software data structure to the RAR registers. 3095 */ 3096 if (!ixgbe->unicst_init) { 3097 /* 3098 * Initialize the multiple unicast addresses 3099 */ 3100 ixgbe->unicst_total = hw->mac.num_rar_entries; 3101 ixgbe->unicst_avail = ixgbe->unicst_total; 3102 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3103 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3104 bzero(mac_addr, ETHERADDRL); 3105 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL); 3106 ixgbe->unicst_addr[slot].mac.set = 0; 3107 } 3108 ixgbe->unicst_init = B_TRUE; 3109 } else { 3110 /* Re-configure the RAR registers */ 3111 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3112 mac_addr = ixgbe->unicst_addr[slot].mac.addr; 3113 if (ixgbe->unicst_addr[slot].mac.set == 1) { 3114 (void) ixgbe_set_rar(hw, slot, mac_addr, 3115 ixgbe->unicst_addr[slot].mac.group_index, 3116 IXGBE_RAH_AV); 3117 } else { 3118 bzero(mac_addr, ETHERADDRL); 3119 (void) ixgbe_set_rar(hw, slot, mac_addr, 3120 NULL, NULL); 3121 } 3122 } 3123 } 3124 } 3125 3126 /* 3127 * ixgbe_unicst_find - Find the slot for the specified unicast address 3128 */ 3129 int 3130 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr) 3131 { 3132 int slot; 3133 3134 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3135 3136 for (slot = 0; slot < ixgbe->unicst_total; slot++) { 3137 if (bcmp(ixgbe->unicst_addr[slot].mac.addr, 3138 mac_addr, ETHERADDRL) == 0) 3139 return (slot); 3140 } 3141 3142 return (-1); 3143 } 3144 3145 /* 3146 * ixgbe_multicst_add - Add a multicst address. 3147 */ 3148 int 3149 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3150 { 3151 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3152 3153 if ((multiaddr[0] & 01) == 0) { 3154 return (EINVAL); 3155 } 3156 3157 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) { 3158 return (ENOENT); 3159 } 3160 3161 bcopy(multiaddr, 3162 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL); 3163 ixgbe->mcast_count++; 3164 3165 /* 3166 * Update the multicast table in the hardware 3167 */ 3168 ixgbe_setup_multicst(ixgbe); 3169 3170 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3171 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3172 return (EIO); 3173 } 3174 3175 return (0); 3176 } 3177 3178 /* 3179 * ixgbe_multicst_remove - Remove a multicst address. 3180 */ 3181 int 3182 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr) 3183 { 3184 int i; 3185 3186 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3187 3188 for (i = 0; i < ixgbe->mcast_count; i++) { 3189 if (bcmp(multiaddr, &ixgbe->mcast_table[i], 3190 ETHERADDRL) == 0) { 3191 for (i++; i < ixgbe->mcast_count; i++) { 3192 ixgbe->mcast_table[i - 1] = 3193 ixgbe->mcast_table[i]; 3194 } 3195 ixgbe->mcast_count--; 3196 break; 3197 } 3198 } 3199 3200 /* 3201 * Update the multicast table in the hardware 3202 */ 3203 ixgbe_setup_multicst(ixgbe); 3204 3205 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 3206 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3207 return (EIO); 3208 } 3209 3210 return (0); 3211 } 3212 3213 /* 3214 * ixgbe_setup_multicast - Setup multicast data structures. 3215 * 3216 * This routine initializes all of the multicast related structures 3217 * and save them in the hardware registers. 3218 */ 3219 static void 3220 ixgbe_setup_multicst(ixgbe_t *ixgbe) 3221 { 3222 uint8_t *mc_addr_list; 3223 uint32_t mc_addr_count; 3224 struct ixgbe_hw *hw = &ixgbe->hw; 3225 3226 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3227 3228 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES); 3229 3230 mc_addr_list = (uint8_t *)ixgbe->mcast_table; 3231 mc_addr_count = ixgbe->mcast_count; 3232 3233 /* 3234 * Update the multicast addresses to the MTA registers 3235 */ 3236 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count, 3237 ixgbe_mc_table_itr, TRUE); 3238 } 3239 3240 /* 3241 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode). 3242 * 3243 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers. 3244 * Different chipsets may have different allowed configuration of vmdq and rss. 3245 */ 3246 static void 3247 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe) 3248 { 3249 struct ixgbe_hw *hw = &ixgbe->hw; 3250 uint32_t ring_per_group; 3251 3252 switch (hw->mac.type) { 3253 case ixgbe_mac_82598EB: 3254 /* 3255 * 82598 supports the following combination: 3256 * vmdq no. x rss no. 3257 * [5..16] x 1 3258 * [1..4] x [1..16] 3259 * However 8 rss queue per pool (vmdq) is sufficient for 3260 * most cases. 3261 */ 3262 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3263 if (ixgbe->num_rx_groups > 4) { 3264 ixgbe->num_rx_rings = ixgbe->num_rx_groups; 3265 } else { 3266 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3267 min(8, ring_per_group); 3268 } 3269 3270 break; 3271 3272 case ixgbe_mac_82599EB: 3273 case ixgbe_mac_X540: 3274 case ixgbe_mac_X550: 3275 case ixgbe_mac_X550EM_x: 3276 /* 3277 * 82599 supports the following combination: 3278 * vmdq no. x rss no. 3279 * [33..64] x [1..2] 3280 * [2..32] x [1..4] 3281 * 1 x [1..16] 3282 * However 8 rss queue per pool (vmdq) is sufficient for 3283 * most cases. 3284 * 3285 * For now, treat X540 and X550 like the 82599. 3286 */ 3287 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3288 if (ixgbe->num_rx_groups == 1) { 3289 ixgbe->num_rx_rings = min(8, ring_per_group); 3290 } else if (ixgbe->num_rx_groups <= 32) { 3291 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3292 min(4, ring_per_group); 3293 } else if (ixgbe->num_rx_groups <= 64) { 3294 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3295 min(2, ring_per_group); 3296 } 3297 break; 3298 3299 default: 3300 break; 3301 } 3302 3303 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 3304 3305 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) { 3306 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3307 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) { 3308 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ; 3309 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) { 3310 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS; 3311 } else { 3312 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS; 3313 } 3314 3315 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d", 3316 ixgbe->num_rx_groups, ixgbe->num_rx_rings); 3317 } 3318 3319 /* 3320 * ixgbe_get_conf - Get driver configurations set in driver.conf. 3321 * 3322 * This routine gets user-configured values out of the configuration 3323 * file ixgbe.conf. 3324 * 3325 * For each configurable value, there is a minimum, a maximum, and a 3326 * default. 3327 * If user does not configure a value, use the default. 3328 * If user configures below the minimum, use the minumum. 3329 * If user configures above the maximum, use the maxumum. 3330 */ 3331 static void 3332 ixgbe_get_conf(ixgbe_t *ixgbe) 3333 { 3334 struct ixgbe_hw *hw = &ixgbe->hw; 3335 uint32_t flow_control; 3336 3337 /* 3338 * ixgbe driver supports the following user configurations: 3339 * 3340 * Jumbo frame configuration: 3341 * default_mtu 3342 * 3343 * Ethernet flow control configuration: 3344 * flow_control 3345 * 3346 * Multiple rings configurations: 3347 * tx_queue_number 3348 * tx_ring_size 3349 * rx_queue_number 3350 * rx_ring_size 3351 * 3352 * Call ixgbe_get_prop() to get the value for a specific 3353 * configuration parameter. 3354 */ 3355 3356 /* 3357 * Jumbo frame configuration - max_frame_size controls host buffer 3358 * allocation, so includes MTU, ethernet header, vlan tag and 3359 * frame check sequence. 3360 */ 3361 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU, 3362 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU); 3363 3364 ixgbe->max_frame_size = ixgbe->default_mtu + 3365 sizeof (struct ether_vlan_header) + ETHERFCSL; 3366 3367 /* 3368 * Ethernet flow control configuration 3369 */ 3370 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL, 3371 ixgbe_fc_none, 3, ixgbe_fc_none); 3372 if (flow_control == 3) 3373 flow_control = ixgbe_fc_default; 3374 3375 /* 3376 * fc.requested mode is what the user requests. After autoneg, 3377 * fc.current_mode will be the flow_control mode that was negotiated. 3378 */ 3379 hw->fc.requested_mode = flow_control; 3380 3381 /* 3382 * Multiple rings configurations 3383 */ 3384 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM, 3385 ixgbe->capab->min_tx_que_num, 3386 ixgbe->capab->max_tx_que_num, 3387 ixgbe->capab->def_tx_que_num); 3388 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE, 3389 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE); 3390 3391 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM, 3392 ixgbe->capab->min_rx_que_num, 3393 ixgbe->capab->max_rx_que_num, 3394 ixgbe->capab->def_rx_que_num); 3395 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE, 3396 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE); 3397 3398 /* 3399 * Multiple groups configuration 3400 */ 3401 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM, 3402 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num, 3403 ixgbe->capab->def_rx_grp_num); 3404 3405 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE, 3406 0, 1, DEFAULT_MR_ENABLE); 3407 3408 if (ixgbe->mr_enable == B_FALSE) { 3409 ixgbe->num_tx_rings = 1; 3410 ixgbe->num_rx_rings = 1; 3411 ixgbe->num_rx_groups = 1; 3412 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 3413 } else { 3414 ixgbe->num_rx_rings = ixgbe->num_rx_groups * 3415 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1); 3416 /* 3417 * The combination of num_rx_rings and num_rx_groups 3418 * may be not supported by h/w. We need to adjust 3419 * them to appropriate values. 3420 */ 3421 ixgbe_setup_vmdq_rss_conf(ixgbe); 3422 } 3423 3424 /* 3425 * Tunable used to force an interrupt type. The only use is 3426 * for testing of the lesser interrupt types. 3427 * 0 = don't force interrupt type 3428 * 1 = force interrupt type MSI-X 3429 * 2 = force interrupt type MSI 3430 * 3 = force interrupt type Legacy 3431 */ 3432 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE, 3433 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE); 3434 3435 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE, 3436 0, 1, DEFAULT_TX_HCKSUM_ENABLE); 3437 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE, 3438 0, 1, DEFAULT_RX_HCKSUM_ENABLE); 3439 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE, 3440 0, 1, DEFAULT_LSO_ENABLE); 3441 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE, 3442 0, 1, DEFAULT_LRO_ENABLE); 3443 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE, 3444 0, 1, DEFAULT_TX_HEAD_WB_ENABLE); 3445 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe, 3446 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE); 3447 3448 /* Head Write Back not recommended for 82599, X540 and X550 */ 3449 if (hw->mac.type == ixgbe_mac_82599EB || 3450 hw->mac.type == ixgbe_mac_X540 || 3451 hw->mac.type == ixgbe_mac_X550 || 3452 hw->mac.type == ixgbe_mac_X550EM_x) { 3453 ixgbe->tx_head_wb_enable = B_FALSE; 3454 } 3455 3456 /* 3457 * ixgbe LSO needs the tx h/w checksum support. 3458 * LSO will be disabled if tx h/w checksum is not 3459 * enabled. 3460 */ 3461 if (ixgbe->tx_hcksum_enable == B_FALSE) { 3462 ixgbe->lso_enable = B_FALSE; 3463 } 3464 3465 /* 3466 * ixgbe LRO needs the rx h/w checksum support. 3467 * LRO will be disabled if rx h/w checksum is not 3468 * enabled. 3469 */ 3470 if (ixgbe->rx_hcksum_enable == B_FALSE) { 3471 ixgbe->lro_enable = B_FALSE; 3472 } 3473 3474 /* 3475 * ixgbe LRO only supported by 82599, X540 and X550 3476 */ 3477 if (hw->mac.type == ixgbe_mac_82598EB) { 3478 ixgbe->lro_enable = B_FALSE; 3479 } 3480 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD, 3481 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD, 3482 DEFAULT_TX_COPY_THRESHOLD); 3483 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe, 3484 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD, 3485 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD); 3486 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe, 3487 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD, 3488 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD); 3489 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe, 3490 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD, 3491 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD); 3492 3493 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD, 3494 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD, 3495 DEFAULT_RX_COPY_THRESHOLD); 3496 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR, 3497 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR, 3498 DEFAULT_RX_LIMIT_PER_INTR); 3499 3500 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING, 3501 ixgbe->capab->min_intr_throttle, 3502 ixgbe->capab->max_intr_throttle, 3503 ixgbe->capab->def_intr_throttle); 3504 /* 3505 * 82599, X540 and X550 require the interrupt throttling rate is 3506 * a multiple of 8. This is enforced by the register definiton. 3507 */ 3508 if (hw->mac.type == ixgbe_mac_82599EB || 3509 hw->mac.type == ixgbe_mac_X540 || 3510 hw->mac.type == ixgbe_mac_X550 || 3511 hw->mac.type == ixgbe_mac_X550EM_x) 3512 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8; 3513 3514 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe, 3515 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP); 3516 } 3517 3518 static void 3519 ixgbe_init_params(ixgbe_t *ixgbe) 3520 { 3521 struct ixgbe_hw *hw = &ixgbe->hw; 3522 ixgbe_link_speed speeds_supported = 0; 3523 boolean_t negotiate; 3524 3525 /* 3526 * Get a list of speeds the adapter supports. If the hw struct hasn't 3527 * been populated with this information yet, retrieve it from the 3528 * adapter and save it to our own variable. 3529 * 3530 * On certain adapters, such as ones which use SFPs, the contents of 3531 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not 3532 * updated, so we must rely on calling ixgbe_get_link_capabilities() 3533 * in order to ascertain the speeds which we are capable of supporting, 3534 * and in the case of SFP-equipped adapters, which speed we are 3535 * advertising. If ixgbe_get_link_capabilities() fails for some reason, 3536 * we'll go with a default list of speeds as a last resort. 3537 */ 3538 speeds_supported = hw->phy.speeds_supported; 3539 3540 if (speeds_supported == 0) { 3541 if (ixgbe_get_link_capabilities(hw, &speeds_supported, 3542 &negotiate) != IXGBE_SUCCESS) { 3543 if (hw->mac.type == ixgbe_mac_82598EB) { 3544 speeds_supported = 3545 IXGBE_LINK_SPEED_82598_AUTONEG; 3546 } else { 3547 speeds_supported = 3548 IXGBE_LINK_SPEED_82599_AUTONEG; 3549 } 3550 } 3551 } 3552 ixgbe->speeds_supported = speeds_supported; 3553 3554 /* 3555 * By default, all supported speeds are enabled and advertised. 3556 */ 3557 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) { 3558 ixgbe->param_en_10000fdx_cap = 1; 3559 ixgbe->param_adv_10000fdx_cap = 1; 3560 } else { 3561 ixgbe->param_en_10000fdx_cap = 0; 3562 ixgbe->param_adv_10000fdx_cap = 0; 3563 } 3564 3565 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) { 3566 ixgbe->param_en_5000fdx_cap = 1; 3567 ixgbe->param_adv_5000fdx_cap = 1; 3568 } else { 3569 ixgbe->param_en_5000fdx_cap = 0; 3570 ixgbe->param_adv_5000fdx_cap = 0; 3571 } 3572 3573 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) { 3574 ixgbe->param_en_2500fdx_cap = 1; 3575 ixgbe->param_adv_2500fdx_cap = 1; 3576 } else { 3577 ixgbe->param_en_2500fdx_cap = 0; 3578 ixgbe->param_adv_2500fdx_cap = 0; 3579 } 3580 3581 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) { 3582 ixgbe->param_en_1000fdx_cap = 1; 3583 ixgbe->param_adv_1000fdx_cap = 1; 3584 } else { 3585 ixgbe->param_en_1000fdx_cap = 0; 3586 ixgbe->param_adv_1000fdx_cap = 0; 3587 } 3588 3589 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) { 3590 ixgbe->param_en_100fdx_cap = 1; 3591 ixgbe->param_adv_100fdx_cap = 1; 3592 } else { 3593 ixgbe->param_en_100fdx_cap = 0; 3594 ixgbe->param_adv_100fdx_cap = 0; 3595 } 3596 3597 ixgbe->param_pause_cap = 1; 3598 ixgbe->param_asym_pause_cap = 1; 3599 ixgbe->param_rem_fault = 0; 3600 3601 ixgbe->param_adv_autoneg_cap = 1; 3602 ixgbe->param_adv_pause_cap = 1; 3603 ixgbe->param_adv_asym_pause_cap = 1; 3604 ixgbe->param_adv_rem_fault = 0; 3605 3606 ixgbe->param_lp_10000fdx_cap = 0; 3607 ixgbe->param_lp_5000fdx_cap = 0; 3608 ixgbe->param_lp_2500fdx_cap = 0; 3609 ixgbe->param_lp_1000fdx_cap = 0; 3610 ixgbe->param_lp_100fdx_cap = 0; 3611 ixgbe->param_lp_autoneg_cap = 0; 3612 ixgbe->param_lp_pause_cap = 0; 3613 ixgbe->param_lp_asym_pause_cap = 0; 3614 ixgbe->param_lp_rem_fault = 0; 3615 } 3616 3617 /* 3618 * ixgbe_get_prop - Get a property value out of the configuration file 3619 * ixgbe.conf. 3620 * 3621 * Caller provides the name of the property, a default value, a minimum 3622 * value, and a maximum value. 3623 * 3624 * Return configured value of the property, with default, minimum and 3625 * maximum properly applied. 3626 */ 3627 static int 3628 ixgbe_get_prop(ixgbe_t *ixgbe, 3629 char *propname, /* name of the property */ 3630 int minval, /* minimum acceptable value */ 3631 int maxval, /* maximim acceptable value */ 3632 int defval) /* default value */ 3633 { 3634 int value; 3635 3636 /* 3637 * Call ddi_prop_get_int() to read the conf settings 3638 */ 3639 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip, 3640 DDI_PROP_DONTPASS, propname, defval); 3641 if (value > maxval) 3642 value = maxval; 3643 3644 if (value < minval) 3645 value = minval; 3646 3647 return (value); 3648 } 3649 3650 /* 3651 * ixgbe_driver_setup_link - Using the link properties to setup the link. 3652 */ 3653 int 3654 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw) 3655 { 3656 struct ixgbe_hw *hw = &ixgbe->hw; 3657 ixgbe_link_speed advertised = 0; 3658 3659 /* 3660 * Assemble a list of enabled speeds to auto-negotiate with. 3661 */ 3662 if (ixgbe->param_en_10000fdx_cap == 1) 3663 advertised |= IXGBE_LINK_SPEED_10GB_FULL; 3664 3665 if (ixgbe->param_en_5000fdx_cap == 1) 3666 advertised |= IXGBE_LINK_SPEED_5GB_FULL; 3667 3668 if (ixgbe->param_en_2500fdx_cap == 1) 3669 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; 3670 3671 if (ixgbe->param_en_1000fdx_cap == 1) 3672 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 3673 3674 if (ixgbe->param_en_100fdx_cap == 1) 3675 advertised |= IXGBE_LINK_SPEED_100_FULL; 3676 3677 /* 3678 * As a last resort, autoneg with a default list of speeds. 3679 */ 3680 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) { 3681 ixgbe_notice(ixgbe, "Invalid link settings. Setting link " 3682 "to autonegotiate with full capabilities."); 3683 3684 if (hw->mac.type == ixgbe_mac_82598EB) 3685 advertised = IXGBE_LINK_SPEED_82598_AUTONEG; 3686 else 3687 advertised = IXGBE_LINK_SPEED_82599_AUTONEG; 3688 } 3689 3690 if (setup_hw) { 3691 if (ixgbe_setup_link(&ixgbe->hw, advertised, 3692 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) { 3693 ixgbe_notice(ixgbe, "Setup link failed on this " 3694 "device."); 3695 return (IXGBE_FAILURE); 3696 } 3697 } 3698 3699 return (IXGBE_SUCCESS); 3700 } 3701 3702 /* 3703 * ixgbe_driver_link_check - Link status processing. 3704 * 3705 * This function can be called in both kernel context and interrupt context 3706 */ 3707 static void 3708 ixgbe_driver_link_check(ixgbe_t *ixgbe) 3709 { 3710 struct ixgbe_hw *hw = &ixgbe->hw; 3711 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN; 3712 boolean_t link_up = B_FALSE; 3713 boolean_t link_changed = B_FALSE; 3714 3715 ASSERT(mutex_owned(&ixgbe->gen_lock)); 3716 3717 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3718 if (link_up) { 3719 ixgbe->link_check_complete = B_TRUE; 3720 3721 /* Link is up, enable flow control settings */ 3722 (void) ixgbe_fc_enable(hw); 3723 3724 /* 3725 * The Link is up, check whether it was marked as down earlier 3726 */ 3727 if (ixgbe->link_state != LINK_STATE_UP) { 3728 switch (speed) { 3729 case IXGBE_LINK_SPEED_10GB_FULL: 3730 ixgbe->link_speed = SPEED_10GB; 3731 break; 3732 case IXGBE_LINK_SPEED_5GB_FULL: 3733 ixgbe->link_speed = SPEED_5GB; 3734 break; 3735 case IXGBE_LINK_SPEED_2_5GB_FULL: 3736 ixgbe->link_speed = SPEED_2_5GB; 3737 break; 3738 case IXGBE_LINK_SPEED_1GB_FULL: 3739 ixgbe->link_speed = SPEED_1GB; 3740 break; 3741 case IXGBE_LINK_SPEED_100_FULL: 3742 ixgbe->link_speed = SPEED_100; 3743 } 3744 ixgbe->link_duplex = LINK_DUPLEX_FULL; 3745 ixgbe->link_state = LINK_STATE_UP; 3746 link_changed = B_TRUE; 3747 } 3748 } else { 3749 if (ixgbe->link_check_complete == B_TRUE || 3750 (ixgbe->link_check_complete == B_FALSE && 3751 gethrtime() >= ixgbe->link_check_hrtime)) { 3752 /* 3753 * The link is really down 3754 */ 3755 ixgbe->link_check_complete = B_TRUE; 3756 3757 if (ixgbe->link_state != LINK_STATE_DOWN) { 3758 ixgbe->link_speed = 0; 3759 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN; 3760 ixgbe->link_state = LINK_STATE_DOWN; 3761 link_changed = B_TRUE; 3762 } 3763 } 3764 } 3765 3766 /* 3767 * If we are in an interrupt context, need to re-enable the 3768 * interrupt, which was automasked 3769 */ 3770 if (servicing_interrupt() != 0) { 3771 ixgbe->eims |= IXGBE_EICR_LSC; 3772 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 3773 } 3774 3775 if (link_changed) { 3776 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state); 3777 } 3778 } 3779 3780 /* 3781 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599. 3782 */ 3783 static void 3784 ixgbe_sfp_check(void *arg) 3785 { 3786 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3787 uint32_t eicr = ixgbe->eicr; 3788 struct ixgbe_hw *hw = &ixgbe->hw; 3789 3790 mutex_enter(&ixgbe->gen_lock); 3791 (void) hw->phy.ops.identify_sfp(hw); 3792 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) { 3793 /* clear the interrupt */ 3794 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); 3795 3796 /* if link up, do multispeed fiber setup */ 3797 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3798 B_TRUE); 3799 ixgbe_driver_link_check(ixgbe); 3800 ixgbe_get_hw_state(ixgbe); 3801 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) { 3802 /* clear the interrupt */ 3803 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw)); 3804 3805 /* if link up, do sfp module setup */ 3806 (void) hw->mac.ops.setup_sfp(hw); 3807 3808 /* do multispeed fiber setup */ 3809 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG, 3810 B_TRUE); 3811 ixgbe_driver_link_check(ixgbe); 3812 ixgbe_get_hw_state(ixgbe); 3813 } 3814 mutex_exit(&ixgbe->gen_lock); 3815 3816 /* 3817 * We need to fully re-check the link later. 3818 */ 3819 ixgbe->link_check_complete = B_FALSE; 3820 ixgbe->link_check_hrtime = gethrtime() + 3821 (IXGBE_LINK_UP_TIME * 100000000ULL); 3822 } 3823 3824 /* 3825 * ixgbe_overtemp_check - overtemp module processing done in taskq 3826 * 3827 * This routine will only be called on adapters with temperature sensor. 3828 * The indication of over-temperature can be either SDP0 interrupt or the link 3829 * status change interrupt. 3830 */ 3831 static void 3832 ixgbe_overtemp_check(void *arg) 3833 { 3834 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3835 struct ixgbe_hw *hw = &ixgbe->hw; 3836 uint32_t eicr = ixgbe->eicr; 3837 ixgbe_link_speed speed; 3838 boolean_t link_up; 3839 3840 mutex_enter(&ixgbe->gen_lock); 3841 3842 /* make sure we know current state of link */ 3843 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 3844 3845 /* check over-temp condition */ 3846 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) || 3847 (eicr & IXGBE_EICR_LSC)) { 3848 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) { 3849 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3850 3851 /* 3852 * Disable the adapter interrupts 3853 */ 3854 ixgbe_disable_adapter_interrupts(ixgbe); 3855 3856 /* 3857 * Disable Rx/Tx units 3858 */ 3859 (void) ixgbe_stop_adapter(hw); 3860 3861 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3862 ixgbe_error(ixgbe, 3863 "Problem: Network adapter has been stopped " 3864 "because it has overheated"); 3865 ixgbe_error(ixgbe, 3866 "Action: Restart the computer. " 3867 "If the problem persists, power off the system " 3868 "and replace the adapter"); 3869 } 3870 } 3871 3872 /* write to clear the interrupt */ 3873 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr); 3874 3875 mutex_exit(&ixgbe->gen_lock); 3876 } 3877 3878 /* 3879 * ixgbe_phy_check - taskq to process interrupts from an external PHY 3880 * 3881 * This routine will only be called on adapters with external PHYs 3882 * (such as X550) that may be trying to raise our attention to some event. 3883 * Currently, this is limited to claiming PHY overtemperature and link status 3884 * change (LSC) events, however this may expand to include other things in 3885 * future adapters. 3886 */ 3887 static void 3888 ixgbe_phy_check(void *arg) 3889 { 3890 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3891 struct ixgbe_hw *hw = &ixgbe->hw; 3892 int rv; 3893 3894 mutex_enter(&ixgbe->gen_lock); 3895 3896 /* 3897 * X550 baseT PHY overtemp and LSC events are handled here. 3898 * 3899 * If an overtemp event occurs, it will be reflected in the 3900 * return value of phy.ops.handle_lasi() and the common code will 3901 * automatically power off the baseT PHY. This is our cue to trigger 3902 * an FMA event. 3903 * 3904 * If a link status change event occurs, phy.ops.handle_lasi() will 3905 * automatically initiate a link setup between the integrated KR PHY 3906 * and the external X557 PHY to ensure that the link speed between 3907 * them matches the link speed of the baseT link. 3908 */ 3909 rv = ixgbe_handle_lasi(hw); 3910 3911 if (rv == IXGBE_ERR_OVERTEMP) { 3912 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 3913 3914 /* 3915 * Disable the adapter interrupts 3916 */ 3917 ixgbe_disable_adapter_interrupts(ixgbe); 3918 3919 /* 3920 * Disable Rx/Tx units 3921 */ 3922 (void) ixgbe_stop_adapter(hw); 3923 3924 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 3925 ixgbe_error(ixgbe, 3926 "Problem: Network adapter has been stopped due to a " 3927 "overtemperature event being detected."); 3928 ixgbe_error(ixgbe, 3929 "Action: Shut down or restart the computer. If the issue " 3930 "persists, please take action in accordance with the " 3931 "recommendations from your system vendor."); 3932 } 3933 3934 mutex_exit(&ixgbe->gen_lock); 3935 } 3936 3937 /* 3938 * ixgbe_link_timer - timer for link status detection 3939 */ 3940 static void 3941 ixgbe_link_timer(void *arg) 3942 { 3943 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3944 3945 mutex_enter(&ixgbe->gen_lock); 3946 ixgbe_driver_link_check(ixgbe); 3947 mutex_exit(&ixgbe->gen_lock); 3948 } 3949 3950 /* 3951 * ixgbe_local_timer - Driver watchdog function. 3952 * 3953 * This function will handle the transmit stall check and other routines. 3954 */ 3955 static void 3956 ixgbe_local_timer(void *arg) 3957 { 3958 ixgbe_t *ixgbe = (ixgbe_t *)arg; 3959 3960 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP) 3961 goto out; 3962 3963 if (ixgbe->ixgbe_state & IXGBE_ERROR) { 3964 ixgbe->reset_count++; 3965 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3966 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3967 goto out; 3968 } 3969 3970 if (ixgbe_stall_check(ixgbe)) { 3971 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL); 3972 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 3973 3974 ixgbe->reset_count++; 3975 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS) 3976 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED); 3977 } 3978 3979 out: 3980 ixgbe_restart_watchdog_timer(ixgbe); 3981 } 3982 3983 /* 3984 * ixgbe_stall_check - Check for transmit stall. 3985 * 3986 * This function checks if the adapter is stalled (in transmit). 3987 * 3988 * It is called each time the watchdog timeout is invoked. 3989 * If the transmit descriptor reclaim continuously fails, 3990 * the watchdog value will increment by 1. If the watchdog 3991 * value exceeds the threshold, the ixgbe is assumed to 3992 * have stalled and need to be reset. 3993 */ 3994 static boolean_t 3995 ixgbe_stall_check(ixgbe_t *ixgbe) 3996 { 3997 ixgbe_tx_ring_t *tx_ring; 3998 boolean_t result; 3999 int i; 4000 4001 if (ixgbe->link_state != LINK_STATE_UP) 4002 return (B_FALSE); 4003 4004 /* 4005 * If any tx ring is stalled, we'll reset the chipset 4006 */ 4007 result = B_FALSE; 4008 for (i = 0; i < ixgbe->num_tx_rings; i++) { 4009 tx_ring = &ixgbe->tx_rings[i]; 4010 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) { 4011 tx_ring->tx_recycle(tx_ring); 4012 } 4013 4014 if (tx_ring->recycle_fail > 0) 4015 tx_ring->stall_watchdog++; 4016 else 4017 tx_ring->stall_watchdog = 0; 4018 4019 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) { 4020 result = B_TRUE; 4021 break; 4022 } 4023 } 4024 4025 if (result) { 4026 tx_ring->stall_watchdog = 0; 4027 tx_ring->recycle_fail = 0; 4028 } 4029 4030 return (result); 4031 } 4032 4033 4034 /* 4035 * is_valid_mac_addr - Check if the mac address is valid. 4036 */ 4037 static boolean_t 4038 is_valid_mac_addr(uint8_t *mac_addr) 4039 { 4040 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 }; 4041 const uint8_t addr_test2[6] = 4042 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 4043 4044 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) || 4045 !(bcmp(addr_test2, mac_addr, ETHERADDRL))) 4046 return (B_FALSE); 4047 4048 return (B_TRUE); 4049 } 4050 4051 static boolean_t 4052 ixgbe_find_mac_address(ixgbe_t *ixgbe) 4053 { 4054 #ifdef __sparc 4055 struct ixgbe_hw *hw = &ixgbe->hw; 4056 uchar_t *bytes; 4057 struct ether_addr sysaddr; 4058 uint_t nelts; 4059 int err; 4060 boolean_t found = B_FALSE; 4061 4062 /* 4063 * The "vendor's factory-set address" may already have 4064 * been extracted from the chip, but if the property 4065 * "local-mac-address" is set we use that instead. 4066 * 4067 * We check whether it looks like an array of 6 4068 * bytes (which it should, if OBP set it). If we can't 4069 * make sense of it this way, we'll ignore it. 4070 */ 4071 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4072 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts); 4073 if (err == DDI_PROP_SUCCESS) { 4074 if (nelts == ETHERADDRL) { 4075 while (nelts--) 4076 hw->mac.addr[nelts] = bytes[nelts]; 4077 found = B_TRUE; 4078 } 4079 ddi_prop_free(bytes); 4080 } 4081 4082 /* 4083 * Look up the OBP property "local-mac-address?". If the user has set 4084 * 'local-mac-address? = false', use "the system address" instead. 4085 */ 4086 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0, 4087 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) { 4088 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) { 4089 if (localetheraddr(NULL, &sysaddr) != 0) { 4090 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL); 4091 found = B_TRUE; 4092 } 4093 } 4094 ddi_prop_free(bytes); 4095 } 4096 4097 /* 4098 * Finally(!), if there's a valid "mac-address" property (created 4099 * if we netbooted from this interface), we must use this instead 4100 * of any of the above to ensure that the NFS/install server doesn't 4101 * get confused by the address changing as illumos takes over! 4102 */ 4103 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 4104 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts); 4105 if (err == DDI_PROP_SUCCESS) { 4106 if (nelts == ETHERADDRL) { 4107 while (nelts--) 4108 hw->mac.addr[nelts] = bytes[nelts]; 4109 found = B_TRUE; 4110 } 4111 ddi_prop_free(bytes); 4112 } 4113 4114 if (found) { 4115 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 4116 return (B_TRUE); 4117 } 4118 #else 4119 _NOTE(ARGUNUSED(ixgbe)); 4120 #endif 4121 4122 return (B_TRUE); 4123 } 4124 4125 #pragma inline(ixgbe_arm_watchdog_timer) 4126 static void 4127 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe) 4128 { 4129 /* 4130 * Fire a watchdog timer 4131 */ 4132 ixgbe->watchdog_tid = 4133 timeout(ixgbe_local_timer, 4134 (void *)ixgbe, 1 * drv_usectohz(1000000)); 4135 4136 } 4137 4138 /* 4139 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer. 4140 */ 4141 void 4142 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe) 4143 { 4144 mutex_enter(&ixgbe->watchdog_lock); 4145 4146 if (!ixgbe->watchdog_enable) { 4147 ixgbe->watchdog_enable = B_TRUE; 4148 ixgbe->watchdog_start = B_TRUE; 4149 ixgbe_arm_watchdog_timer(ixgbe); 4150 } 4151 4152 mutex_exit(&ixgbe->watchdog_lock); 4153 } 4154 4155 /* 4156 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer. 4157 */ 4158 void 4159 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe) 4160 { 4161 timeout_id_t tid; 4162 4163 mutex_enter(&ixgbe->watchdog_lock); 4164 4165 ixgbe->watchdog_enable = B_FALSE; 4166 ixgbe->watchdog_start = B_FALSE; 4167 tid = ixgbe->watchdog_tid; 4168 ixgbe->watchdog_tid = 0; 4169 4170 mutex_exit(&ixgbe->watchdog_lock); 4171 4172 if (tid != 0) 4173 (void) untimeout(tid); 4174 } 4175 4176 /* 4177 * ixgbe_start_watchdog_timer - Start the driver watchdog timer. 4178 */ 4179 void 4180 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe) 4181 { 4182 mutex_enter(&ixgbe->watchdog_lock); 4183 4184 if (ixgbe->watchdog_enable) { 4185 if (!ixgbe->watchdog_start) { 4186 ixgbe->watchdog_start = B_TRUE; 4187 ixgbe_arm_watchdog_timer(ixgbe); 4188 } 4189 } 4190 4191 mutex_exit(&ixgbe->watchdog_lock); 4192 } 4193 4194 /* 4195 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer. 4196 */ 4197 static void 4198 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe) 4199 { 4200 mutex_enter(&ixgbe->watchdog_lock); 4201 4202 if (ixgbe->watchdog_start) 4203 ixgbe_arm_watchdog_timer(ixgbe); 4204 4205 mutex_exit(&ixgbe->watchdog_lock); 4206 } 4207 4208 /* 4209 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer. 4210 */ 4211 void 4212 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe) 4213 { 4214 timeout_id_t tid; 4215 4216 mutex_enter(&ixgbe->watchdog_lock); 4217 4218 ixgbe->watchdog_start = B_FALSE; 4219 tid = ixgbe->watchdog_tid; 4220 ixgbe->watchdog_tid = 0; 4221 4222 mutex_exit(&ixgbe->watchdog_lock); 4223 4224 if (tid != 0) 4225 (void) untimeout(tid); 4226 } 4227 4228 /* 4229 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts. 4230 */ 4231 static void 4232 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe) 4233 { 4234 struct ixgbe_hw *hw = &ixgbe->hw; 4235 4236 /* 4237 * mask all interrupts off 4238 */ 4239 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff); 4240 4241 /* 4242 * for MSI-X, also disable autoclear 4243 */ 4244 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4245 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0); 4246 } 4247 4248 IXGBE_WRITE_FLUSH(hw); 4249 } 4250 4251 /* 4252 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts. 4253 */ 4254 static void 4255 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe) 4256 { 4257 struct ixgbe_hw *hw = &ixgbe->hw; 4258 uint32_t eiac, eiam; 4259 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 4260 4261 /* interrupt types to enable */ 4262 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */ 4263 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */ 4264 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */ 4265 4266 /* enable automask on "other" causes that this adapter can generate */ 4267 eiam = ixgbe->capab->other_intr; 4268 4269 /* 4270 * msi-x mode 4271 */ 4272 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) { 4273 /* enable autoclear but not on bits 29:20 */ 4274 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR); 4275 4276 /* general purpose interrupt enable */ 4277 gpie |= (IXGBE_GPIE_MSIX_MODE 4278 | IXGBE_GPIE_PBA_SUPPORT 4279 | IXGBE_GPIE_OCD 4280 | IXGBE_GPIE_EIAME); 4281 /* 4282 * non-msi-x mode 4283 */ 4284 } else { 4285 4286 /* disable autoclear, leave gpie at default */ 4287 eiac = 0; 4288 4289 /* 4290 * General purpose interrupt enable. 4291 * For 82599, X540 and X550, extended interrupt 4292 * automask enable only in MSI or MSI-X mode 4293 */ 4294 if ((hw->mac.type == ixgbe_mac_82598EB) || 4295 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) { 4296 gpie |= IXGBE_GPIE_EIAME; 4297 } 4298 } 4299 4300 /* Enable specific "other" interrupt types */ 4301 switch (hw->mac.type) { 4302 case ixgbe_mac_82598EB: 4303 gpie |= ixgbe->capab->other_gpie; 4304 break; 4305 4306 case ixgbe_mac_82599EB: 4307 case ixgbe_mac_X540: 4308 case ixgbe_mac_X550: 4309 case ixgbe_mac_X550EM_x: 4310 gpie |= ixgbe->capab->other_gpie; 4311 4312 /* Enable RSC Delay 8us when LRO enabled */ 4313 if (ixgbe->lro_enable) { 4314 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT); 4315 } 4316 break; 4317 4318 default: 4319 break; 4320 } 4321 4322 /* write to interrupt control registers */ 4323 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4324 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac); 4325 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam); 4326 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); 4327 IXGBE_WRITE_FLUSH(hw); 4328 } 4329 4330 /* 4331 * ixgbe_loopback_ioctl - Loopback support. 4332 */ 4333 enum ioc_reply 4334 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp) 4335 { 4336 lb_info_sz_t *lbsp; 4337 lb_property_t *lbpp; 4338 uint32_t *lbmp; 4339 uint32_t size; 4340 uint32_t value; 4341 4342 if (mp->b_cont == NULL) 4343 return (IOC_INVAL); 4344 4345 switch (iocp->ioc_cmd) { 4346 default: 4347 return (IOC_INVAL); 4348 4349 case LB_GET_INFO_SIZE: 4350 size = sizeof (lb_info_sz_t); 4351 if (iocp->ioc_count != size) 4352 return (IOC_INVAL); 4353 4354 value = sizeof (lb_normal); 4355 value += sizeof (lb_mac); 4356 value += sizeof (lb_external); 4357 4358 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr; 4359 *lbsp = value; 4360 break; 4361 4362 case LB_GET_INFO: 4363 value = sizeof (lb_normal); 4364 value += sizeof (lb_mac); 4365 value += sizeof (lb_external); 4366 4367 size = value; 4368 if (iocp->ioc_count != size) 4369 return (IOC_INVAL); 4370 4371 value = 0; 4372 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr; 4373 4374 lbpp[value++] = lb_normal; 4375 lbpp[value++] = lb_mac; 4376 lbpp[value++] = lb_external; 4377 break; 4378 4379 case LB_GET_MODE: 4380 size = sizeof (uint32_t); 4381 if (iocp->ioc_count != size) 4382 return (IOC_INVAL); 4383 4384 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4385 *lbmp = ixgbe->loopback_mode; 4386 break; 4387 4388 case LB_SET_MODE: 4389 size = 0; 4390 if (iocp->ioc_count != sizeof (uint32_t)) 4391 return (IOC_INVAL); 4392 4393 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr; 4394 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp)) 4395 return (IOC_INVAL); 4396 break; 4397 } 4398 4399 iocp->ioc_count = size; 4400 iocp->ioc_error = 0; 4401 4402 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4403 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4404 return (IOC_INVAL); 4405 } 4406 4407 return (IOC_REPLY); 4408 } 4409 4410 /* 4411 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode. 4412 */ 4413 static boolean_t 4414 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode) 4415 { 4416 if (mode == ixgbe->loopback_mode) 4417 return (B_TRUE); 4418 4419 ixgbe->loopback_mode = mode; 4420 4421 if (mode == IXGBE_LB_NONE) { 4422 /* 4423 * Reset the chip 4424 */ 4425 (void) ixgbe_reset(ixgbe); 4426 return (B_TRUE); 4427 } 4428 4429 mutex_enter(&ixgbe->gen_lock); 4430 4431 switch (mode) { 4432 default: 4433 mutex_exit(&ixgbe->gen_lock); 4434 return (B_FALSE); 4435 4436 case IXGBE_LB_EXTERNAL: 4437 break; 4438 4439 case IXGBE_LB_INTERNAL_MAC: 4440 ixgbe_set_internal_mac_loopback(ixgbe); 4441 break; 4442 } 4443 4444 mutex_exit(&ixgbe->gen_lock); 4445 4446 return (B_TRUE); 4447 } 4448 4449 /* 4450 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode. 4451 */ 4452 static void 4453 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe) 4454 { 4455 struct ixgbe_hw *hw; 4456 uint32_t reg; 4457 uint8_t atlas; 4458 4459 hw = &ixgbe->hw; 4460 4461 /* 4462 * Setup MAC loopback 4463 */ 4464 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0); 4465 reg |= IXGBE_HLREG0_LPBK; 4466 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg); 4467 4468 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4469 reg &= ~IXGBE_AUTOC_LMS_MASK; 4470 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4471 4472 /* 4473 * Disable Atlas Tx lanes to keep packets in loopback and not on wire 4474 */ 4475 switch (hw->mac.type) { 4476 case ixgbe_mac_82598EB: 4477 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4478 &atlas); 4479 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN; 4480 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK, 4481 atlas); 4482 4483 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4484 &atlas); 4485 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 4486 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G, 4487 atlas); 4488 4489 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4490 &atlas); 4491 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 4492 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G, 4493 atlas); 4494 4495 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4496 &atlas); 4497 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 4498 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN, 4499 atlas); 4500 break; 4501 4502 case ixgbe_mac_82599EB: 4503 case ixgbe_mac_X540: 4504 case ixgbe_mac_X550: 4505 case ixgbe_mac_X550EM_x: 4506 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC); 4507 reg |= (IXGBE_AUTOC_FLU | 4508 IXGBE_AUTOC_10G_KX4); 4509 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg); 4510 4511 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL, 4512 B_FALSE); 4513 break; 4514 4515 default: 4516 break; 4517 } 4518 } 4519 4520 #pragma inline(ixgbe_intr_rx_work) 4521 /* 4522 * ixgbe_intr_rx_work - RX processing of ISR. 4523 */ 4524 static void 4525 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring) 4526 { 4527 mblk_t *mp; 4528 4529 mutex_enter(&rx_ring->rx_lock); 4530 4531 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4532 mutex_exit(&rx_ring->rx_lock); 4533 4534 if (mp != NULL) 4535 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4536 rx_ring->ring_gen_num); 4537 } 4538 4539 #pragma inline(ixgbe_intr_tx_work) 4540 /* 4541 * ixgbe_intr_tx_work - TX processing of ISR. 4542 */ 4543 static void 4544 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring) 4545 { 4546 ixgbe_t *ixgbe = tx_ring->ixgbe; 4547 4548 /* 4549 * Recycle the tx descriptors 4550 */ 4551 tx_ring->tx_recycle(tx_ring); 4552 4553 /* 4554 * Schedule the re-transmit 4555 */ 4556 if (tx_ring->reschedule && 4557 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) { 4558 tx_ring->reschedule = B_FALSE; 4559 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl, 4560 tx_ring->ring_handle); 4561 tx_ring->stat_reschedule++; 4562 } 4563 } 4564 4565 #pragma inline(ixgbe_intr_other_work) 4566 /* 4567 * ixgbe_intr_other_work - Process interrupt types other than tx/rx 4568 */ 4569 static void 4570 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr) 4571 { 4572 struct ixgbe_hw *hw = &ixgbe->hw; 4573 4574 ASSERT(mutex_owned(&ixgbe->gen_lock)); 4575 4576 /* 4577 * handle link status change 4578 */ 4579 if (eicr & IXGBE_EICR_LSC) { 4580 ixgbe_driver_link_check(ixgbe); 4581 ixgbe_get_hw_state(ixgbe); 4582 } 4583 4584 /* 4585 * check for fan failure on adapters with fans 4586 */ 4587 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && 4588 (eicr & IXGBE_EICR_GPI_SDP1)) { 4589 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP); 4590 4591 /* 4592 * Disable the adapter interrupts 4593 */ 4594 ixgbe_disable_adapter_interrupts(ixgbe); 4595 4596 /* 4597 * Disable Rx/Tx units 4598 */ 4599 (void) ixgbe_stop_adapter(&ixgbe->hw); 4600 4601 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST); 4602 ixgbe_error(ixgbe, 4603 "Problem: Network adapter has been stopped " 4604 "because the fan has stopped.\n"); 4605 ixgbe_error(ixgbe, 4606 "Action: Replace the adapter.\n"); 4607 4608 /* re-enable the interrupt, which was automasked */ 4609 ixgbe->eims |= IXGBE_EICR_GPI_SDP1; 4610 } 4611 4612 /* 4613 * Do SFP check for adapters with hot-plug capability 4614 */ 4615 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) && 4616 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) || 4617 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) { 4618 ixgbe->eicr = eicr; 4619 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq, 4620 ixgbe_sfp_check, (void *)ixgbe, 4621 DDI_NOSLEEP)) != DDI_SUCCESS) { 4622 ixgbe_log(ixgbe, "No memory available to dispatch " 4623 "taskq for SFP check"); 4624 } 4625 } 4626 4627 /* 4628 * Do over-temperature check for adapters with temp sensor 4629 */ 4630 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) && 4631 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) || 4632 (eicr & IXGBE_EICR_LSC))) { 4633 ixgbe->eicr = eicr; 4634 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq, 4635 ixgbe_overtemp_check, (void *)ixgbe, 4636 DDI_NOSLEEP)) != DDI_SUCCESS) { 4637 ixgbe_log(ixgbe, "No memory available to dispatch " 4638 "taskq for overtemp check"); 4639 } 4640 } 4641 4642 /* 4643 * Process an external PHY interrupt 4644 */ 4645 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && 4646 (eicr & IXGBE_EICR_GPI_SDP0_X540)) { 4647 ixgbe->eicr = eicr; 4648 if ((ddi_taskq_dispatch(ixgbe->phy_taskq, 4649 ixgbe_phy_check, (void *)ixgbe, 4650 DDI_NOSLEEP)) != DDI_SUCCESS) { 4651 ixgbe_log(ixgbe, "No memory available to dispatch " 4652 "taskq for PHY check"); 4653 } 4654 } 4655 } 4656 4657 /* 4658 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts. 4659 */ 4660 static uint_t 4661 ixgbe_intr_legacy(void *arg1, void *arg2) 4662 { 4663 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4664 struct ixgbe_hw *hw = &ixgbe->hw; 4665 ixgbe_tx_ring_t *tx_ring; 4666 ixgbe_rx_ring_t *rx_ring; 4667 uint32_t eicr; 4668 mblk_t *mp; 4669 boolean_t tx_reschedule; 4670 uint_t result; 4671 4672 _NOTE(ARGUNUSED(arg2)); 4673 4674 mutex_enter(&ixgbe->gen_lock); 4675 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 4676 mutex_exit(&ixgbe->gen_lock); 4677 return (DDI_INTR_UNCLAIMED); 4678 } 4679 4680 mp = NULL; 4681 tx_reschedule = B_FALSE; 4682 4683 /* 4684 * Any bit set in eicr: claim this interrupt 4685 */ 4686 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4687 4688 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4689 mutex_exit(&ixgbe->gen_lock); 4690 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4691 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4692 return (DDI_INTR_CLAIMED); 4693 } 4694 4695 if (eicr) { 4696 /* 4697 * For legacy interrupt, we have only one interrupt, 4698 * so we have only one rx ring and one tx ring enabled. 4699 */ 4700 ASSERT(ixgbe->num_rx_rings == 1); 4701 ASSERT(ixgbe->num_tx_rings == 1); 4702 4703 /* 4704 * For legacy interrupt, rx rings[0] will use RTxQ[0]. 4705 */ 4706 if (eicr & 0x1) { 4707 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE; 4708 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4709 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4710 /* 4711 * Clean the rx descriptors 4712 */ 4713 rx_ring = &ixgbe->rx_rings[0]; 4714 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL); 4715 } 4716 4717 /* 4718 * For legacy interrupt, tx rings[0] will use RTxQ[1]. 4719 */ 4720 if (eicr & 0x2) { 4721 /* 4722 * Recycle the tx descriptors 4723 */ 4724 tx_ring = &ixgbe->tx_rings[0]; 4725 tx_ring->tx_recycle(tx_ring); 4726 4727 /* 4728 * Schedule the re-transmit 4729 */ 4730 tx_reschedule = (tx_ring->reschedule && 4731 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)); 4732 } 4733 4734 /* any interrupt type other than tx/rx */ 4735 if (eicr & ixgbe->capab->other_intr) { 4736 switch (hw->mac.type) { 4737 case ixgbe_mac_82598EB: 4738 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4739 break; 4740 4741 case ixgbe_mac_82599EB: 4742 case ixgbe_mac_X540: 4743 case ixgbe_mac_X550: 4744 case ixgbe_mac_X550EM_x: 4745 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4746 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4747 break; 4748 4749 default: 4750 break; 4751 } 4752 ixgbe_intr_other_work(ixgbe, eicr); 4753 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4754 } 4755 4756 mutex_exit(&ixgbe->gen_lock); 4757 4758 result = DDI_INTR_CLAIMED; 4759 } else { 4760 mutex_exit(&ixgbe->gen_lock); 4761 4762 /* 4763 * No interrupt cause bits set: don't claim this interrupt. 4764 */ 4765 result = DDI_INTR_UNCLAIMED; 4766 } 4767 4768 /* re-enable the interrupts which were automasked */ 4769 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4770 4771 /* 4772 * Do the following work outside of the gen_lock 4773 */ 4774 if (mp != NULL) { 4775 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp, 4776 rx_ring->ring_gen_num); 4777 } 4778 4779 if (tx_reschedule) { 4780 tx_ring->reschedule = B_FALSE; 4781 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle); 4782 tx_ring->stat_reschedule++; 4783 } 4784 4785 return (result); 4786 } 4787 4788 /* 4789 * ixgbe_intr_msi - Interrupt handler for MSI. 4790 */ 4791 static uint_t 4792 ixgbe_intr_msi(void *arg1, void *arg2) 4793 { 4794 ixgbe_t *ixgbe = (ixgbe_t *)arg1; 4795 struct ixgbe_hw *hw = &ixgbe->hw; 4796 uint32_t eicr; 4797 4798 _NOTE(ARGUNUSED(arg2)); 4799 4800 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4801 4802 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) { 4803 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED); 4804 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4805 return (DDI_INTR_CLAIMED); 4806 } 4807 4808 /* 4809 * For MSI interrupt, we have only one vector, 4810 * so we have only one rx ring and one tx ring enabled. 4811 */ 4812 ASSERT(ixgbe->num_rx_rings == 1); 4813 ASSERT(ixgbe->num_tx_rings == 1); 4814 4815 /* 4816 * For MSI interrupt, rx rings[0] will use RTxQ[0]. 4817 */ 4818 if (eicr & 0x1) { 4819 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]); 4820 } 4821 4822 /* 4823 * For MSI interrupt, tx rings[0] will use RTxQ[1]. 4824 */ 4825 if (eicr & 0x2) { 4826 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]); 4827 } 4828 4829 /* any interrupt type other than tx/rx */ 4830 if (eicr & ixgbe->capab->other_intr) { 4831 mutex_enter(&ixgbe->gen_lock); 4832 switch (hw->mac.type) { 4833 case ixgbe_mac_82598EB: 4834 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4835 break; 4836 4837 case ixgbe_mac_82599EB: 4838 case ixgbe_mac_X540: 4839 case ixgbe_mac_X550: 4840 case ixgbe_mac_X550EM_x: 4841 ixgbe->eimc = IXGBE_82599_OTHER_INTR; 4842 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc); 4843 break; 4844 4845 default: 4846 break; 4847 } 4848 ixgbe_intr_other_work(ixgbe, eicr); 4849 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4850 mutex_exit(&ixgbe->gen_lock); 4851 } 4852 4853 /* re-enable the interrupts which were automasked */ 4854 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4855 4856 return (DDI_INTR_CLAIMED); 4857 } 4858 4859 /* 4860 * ixgbe_intr_msix - Interrupt handler for MSI-X. 4861 */ 4862 static uint_t 4863 ixgbe_intr_msix(void *arg1, void *arg2) 4864 { 4865 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1; 4866 ixgbe_t *ixgbe = vect->ixgbe; 4867 struct ixgbe_hw *hw = &ixgbe->hw; 4868 uint32_t eicr; 4869 int r_idx = 0; 4870 4871 _NOTE(ARGUNUSED(arg2)); 4872 4873 /* 4874 * Clean each rx ring that has its bit set in the map 4875 */ 4876 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1)); 4877 while (r_idx >= 0) { 4878 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]); 4879 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 4880 (ixgbe->num_rx_rings - 1)); 4881 } 4882 4883 /* 4884 * Clean each tx ring that has its bit set in the map 4885 */ 4886 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1)); 4887 while (r_idx >= 0) { 4888 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]); 4889 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 4890 (ixgbe->num_tx_rings - 1)); 4891 } 4892 4893 4894 /* 4895 * Clean other interrupt (link change) that has its bit set in the map 4896 */ 4897 if (BT_TEST(vect->other_map, 0) == 1) { 4898 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 4899 4900 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != 4901 DDI_FM_OK) { 4902 ddi_fm_service_impact(ixgbe->dip, 4903 DDI_SERVICE_DEGRADED); 4904 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR); 4905 return (DDI_INTR_CLAIMED); 4906 } 4907 4908 /* 4909 * Check "other" cause bits: any interrupt type other than tx/rx 4910 */ 4911 if (eicr & ixgbe->capab->other_intr) { 4912 mutex_enter(&ixgbe->gen_lock); 4913 switch (hw->mac.type) { 4914 case ixgbe_mac_82598EB: 4915 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR); 4916 ixgbe_intr_other_work(ixgbe, eicr); 4917 break; 4918 4919 case ixgbe_mac_82599EB: 4920 case ixgbe_mac_X540: 4921 case ixgbe_mac_X550: 4922 case ixgbe_mac_X550EM_x: 4923 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE; 4924 ixgbe_intr_other_work(ixgbe, eicr); 4925 break; 4926 4927 default: 4928 break; 4929 } 4930 mutex_exit(&ixgbe->gen_lock); 4931 } 4932 4933 /* re-enable the interrupts which were automasked */ 4934 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims); 4935 } 4936 4937 return (DDI_INTR_CLAIMED); 4938 } 4939 4940 /* 4941 * ixgbe_alloc_intrs - Allocate interrupts for the driver. 4942 * 4943 * Normal sequence is to try MSI-X; if not sucessful, try MSI; 4944 * if not successful, try Legacy. 4945 * ixgbe->intr_force can be used to force sequence to start with 4946 * any of the 3 types. 4947 * If MSI-X is not used, number of tx/rx rings is forced to 1. 4948 */ 4949 static int 4950 ixgbe_alloc_intrs(ixgbe_t *ixgbe) 4951 { 4952 dev_info_t *devinfo; 4953 int intr_types; 4954 int rc; 4955 4956 devinfo = ixgbe->dip; 4957 4958 /* 4959 * Get supported interrupt types 4960 */ 4961 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 4962 4963 if (rc != DDI_SUCCESS) { 4964 ixgbe_log(ixgbe, 4965 "Get supported interrupt types failed: %d", rc); 4966 return (IXGBE_FAILURE); 4967 } 4968 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types); 4969 4970 ixgbe->intr_type = 0; 4971 4972 /* 4973 * Install MSI-X interrupts 4974 */ 4975 if ((intr_types & DDI_INTR_TYPE_MSIX) && 4976 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) { 4977 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX); 4978 if (rc == IXGBE_SUCCESS) 4979 return (IXGBE_SUCCESS); 4980 4981 ixgbe_log(ixgbe, 4982 "Allocate MSI-X failed, trying MSI interrupts..."); 4983 } 4984 4985 /* 4986 * MSI-X not used, force rings and groups to 1 4987 */ 4988 ixgbe->num_rx_rings = 1; 4989 ixgbe->num_rx_groups = 1; 4990 ixgbe->num_tx_rings = 1; 4991 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE; 4992 ixgbe_log(ixgbe, 4993 "MSI-X not used, force rings and groups number to 1"); 4994 4995 /* 4996 * Install MSI interrupts 4997 */ 4998 if ((intr_types & DDI_INTR_TYPE_MSI) && 4999 (ixgbe->intr_force <= IXGBE_INTR_MSI)) { 5000 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI); 5001 if (rc == IXGBE_SUCCESS) 5002 return (IXGBE_SUCCESS); 5003 5004 ixgbe_log(ixgbe, 5005 "Allocate MSI failed, trying Legacy interrupts..."); 5006 } 5007 5008 /* 5009 * Install legacy interrupts 5010 */ 5011 if (intr_types & DDI_INTR_TYPE_FIXED) { 5012 /* 5013 * Disallow legacy interrupts for X550. X550 has a silicon 5014 * bug which prevents Shared Legacy interrupts from working. 5015 * For details, please reference: 5016 * 5017 * Intel Ethernet Controller X550 Specification Update rev. 2.1 5018 * May 2016, erratum 22: PCIe Interrupt Status Bit 5019 */ 5020 if (ixgbe->hw.mac.type == ixgbe_mac_X550 || 5021 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x || 5022 ixgbe->hw.mac.type == ixgbe_mac_X550_vf || 5023 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) { 5024 ixgbe_log(ixgbe, 5025 "Legacy interrupts are not supported on this " 5026 "adapter. Please use MSI or MSI-X instead."); 5027 return (IXGBE_FAILURE); 5028 } 5029 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED); 5030 if (rc == IXGBE_SUCCESS) 5031 return (IXGBE_SUCCESS); 5032 5033 ixgbe_log(ixgbe, 5034 "Allocate Legacy interrupts failed"); 5035 } 5036 5037 /* 5038 * If none of the 3 types succeeded, return failure 5039 */ 5040 return (IXGBE_FAILURE); 5041 } 5042 5043 /* 5044 * ixgbe_alloc_intr_handles - Allocate interrupt handles. 5045 * 5046 * For legacy and MSI, only 1 handle is needed. For MSI-X, 5047 * if fewer than 2 handles are available, return failure. 5048 * Upon success, this maps the vectors to rx and tx rings for 5049 * interrupts. 5050 */ 5051 static int 5052 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type) 5053 { 5054 dev_info_t *devinfo; 5055 int request, count, actual; 5056 int minimum; 5057 int rc; 5058 uint32_t ring_per_group; 5059 5060 devinfo = ixgbe->dip; 5061 5062 switch (intr_type) { 5063 case DDI_INTR_TYPE_FIXED: 5064 request = 1; /* Request 1 legacy interrupt handle */ 5065 minimum = 1; 5066 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy"); 5067 break; 5068 5069 case DDI_INTR_TYPE_MSI: 5070 request = 1; /* Request 1 MSI interrupt handle */ 5071 minimum = 1; 5072 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI"); 5073 break; 5074 5075 case DDI_INTR_TYPE_MSIX: 5076 /* 5077 * Best number of vectors for the adapter is 5078 * (# rx rings + # tx rings), however we will 5079 * limit the request number. 5080 */ 5081 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings); 5082 if (request > ixgbe->capab->max_ring_vect) 5083 request = ixgbe->capab->max_ring_vect; 5084 minimum = 1; 5085 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X"); 5086 break; 5087 5088 default: 5089 ixgbe_log(ixgbe, 5090 "invalid call to ixgbe_alloc_intr_handles(): %d\n", 5091 intr_type); 5092 return (IXGBE_FAILURE); 5093 } 5094 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d", 5095 request, minimum); 5096 5097 /* 5098 * Get number of supported interrupts 5099 */ 5100 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 5101 if ((rc != DDI_SUCCESS) || (count < minimum)) { 5102 ixgbe_log(ixgbe, 5103 "Get interrupt number failed. Return: %d, count: %d", 5104 rc, count); 5105 return (IXGBE_FAILURE); 5106 } 5107 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count); 5108 5109 actual = 0; 5110 ixgbe->intr_cnt = 0; 5111 ixgbe->intr_cnt_max = 0; 5112 ixgbe->intr_cnt_min = 0; 5113 5114 /* 5115 * Allocate an array of interrupt handles 5116 */ 5117 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t); 5118 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP); 5119 5120 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0, 5121 request, &actual, DDI_INTR_ALLOC_NORMAL); 5122 if (rc != DDI_SUCCESS) { 5123 ixgbe_log(ixgbe, "Allocate interrupts failed. " 5124 "return: %d, request: %d, actual: %d", 5125 rc, request, actual); 5126 goto alloc_handle_fail; 5127 } 5128 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual); 5129 5130 /* 5131 * upper/lower limit of interrupts 5132 */ 5133 ixgbe->intr_cnt = actual; 5134 ixgbe->intr_cnt_max = request; 5135 ixgbe->intr_cnt_min = minimum; 5136 5137 /* 5138 * rss number per group should not exceed the rx interrupt number, 5139 * else need to adjust rx ring number. 5140 */ 5141 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5142 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0); 5143 if (actual < ring_per_group) { 5144 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual; 5145 ixgbe_setup_vmdq_rss_conf(ixgbe); 5146 } 5147 5148 /* 5149 * Now we know the actual number of vectors. Here we map the vector 5150 * to other, rx rings and tx ring. 5151 */ 5152 if (actual < minimum) { 5153 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d", 5154 actual); 5155 goto alloc_handle_fail; 5156 } 5157 5158 /* 5159 * Get priority for first vector, assume remaining are all the same 5160 */ 5161 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri); 5162 if (rc != DDI_SUCCESS) { 5163 ixgbe_log(ixgbe, 5164 "Get interrupt priority failed: %d", rc); 5165 goto alloc_handle_fail; 5166 } 5167 5168 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap); 5169 if (rc != DDI_SUCCESS) { 5170 ixgbe_log(ixgbe, 5171 "Get interrupt cap failed: %d", rc); 5172 goto alloc_handle_fail; 5173 } 5174 5175 ixgbe->intr_type = intr_type; 5176 5177 return (IXGBE_SUCCESS); 5178 5179 alloc_handle_fail: 5180 ixgbe_rem_intrs(ixgbe); 5181 5182 return (IXGBE_FAILURE); 5183 } 5184 5185 /* 5186 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type. 5187 * 5188 * Before adding the interrupt handlers, the interrupt vectors have 5189 * been allocated, and the rx/tx rings have also been allocated. 5190 */ 5191 static int 5192 ixgbe_add_intr_handlers(ixgbe_t *ixgbe) 5193 { 5194 int vector = 0; 5195 int rc; 5196 5197 switch (ixgbe->intr_type) { 5198 case DDI_INTR_TYPE_MSIX: 5199 /* 5200 * Add interrupt handler for all vectors 5201 */ 5202 for (vector = 0; vector < ixgbe->intr_cnt; vector++) { 5203 /* 5204 * install pointer to vect_map[vector] 5205 */ 5206 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5207 (ddi_intr_handler_t *)ixgbe_intr_msix, 5208 (void *)&ixgbe->vect_map[vector], NULL); 5209 5210 if (rc != DDI_SUCCESS) { 5211 ixgbe_log(ixgbe, 5212 "Add interrupt handler failed. " 5213 "return: %d, vector: %d", rc, vector); 5214 for (vector--; vector >= 0; vector--) { 5215 (void) ddi_intr_remove_handler( 5216 ixgbe->htable[vector]); 5217 } 5218 return (IXGBE_FAILURE); 5219 } 5220 } 5221 5222 break; 5223 5224 case DDI_INTR_TYPE_MSI: 5225 /* 5226 * Add interrupt handlers for the only vector 5227 */ 5228 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5229 (ddi_intr_handler_t *)ixgbe_intr_msi, 5230 (void *)ixgbe, NULL); 5231 5232 if (rc != DDI_SUCCESS) { 5233 ixgbe_log(ixgbe, 5234 "Add MSI interrupt handler failed: %d", rc); 5235 return (IXGBE_FAILURE); 5236 } 5237 5238 break; 5239 5240 case DDI_INTR_TYPE_FIXED: 5241 /* 5242 * Add interrupt handlers for the only vector 5243 */ 5244 rc = ddi_intr_add_handler(ixgbe->htable[vector], 5245 (ddi_intr_handler_t *)ixgbe_intr_legacy, 5246 (void *)ixgbe, NULL); 5247 5248 if (rc != DDI_SUCCESS) { 5249 ixgbe_log(ixgbe, 5250 "Add legacy interrupt handler failed: %d", rc); 5251 return (IXGBE_FAILURE); 5252 } 5253 5254 break; 5255 5256 default: 5257 return (IXGBE_FAILURE); 5258 } 5259 5260 return (IXGBE_SUCCESS); 5261 } 5262 5263 #pragma inline(ixgbe_map_rxring_to_vector) 5264 /* 5265 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector. 5266 */ 5267 static void 5268 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx) 5269 { 5270 /* 5271 * Set bit in map 5272 */ 5273 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 5274 5275 /* 5276 * Count bits set 5277 */ 5278 ixgbe->vect_map[v_idx].rxr_cnt++; 5279 5280 /* 5281 * Remember bit position 5282 */ 5283 ixgbe->rx_rings[r_idx].intr_vector = v_idx; 5284 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx; 5285 } 5286 5287 #pragma inline(ixgbe_map_txring_to_vector) 5288 /* 5289 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector. 5290 */ 5291 static void 5292 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx) 5293 { 5294 /* 5295 * Set bit in map 5296 */ 5297 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx); 5298 5299 /* 5300 * Count bits set 5301 */ 5302 ixgbe->vect_map[v_idx].txr_cnt++; 5303 5304 /* 5305 * Remember bit position 5306 */ 5307 ixgbe->tx_rings[t_idx].intr_vector = v_idx; 5308 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx; 5309 } 5310 5311 /* 5312 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector 5313 * allocation register (IVAR). 5314 * cause: 5315 * -1 : other cause 5316 * 0 : rx 5317 * 1 : tx 5318 */ 5319 static void 5320 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector, 5321 int8_t cause) 5322 { 5323 struct ixgbe_hw *hw = &ixgbe->hw; 5324 u32 ivar, index; 5325 5326 switch (hw->mac.type) { 5327 case ixgbe_mac_82598EB: 5328 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5329 if (cause == -1) { 5330 cause = 0; 5331 } 5332 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5333 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5334 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3))); 5335 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3))); 5336 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5337 break; 5338 5339 case ixgbe_mac_82599EB: 5340 case ixgbe_mac_X540: 5341 case ixgbe_mac_X550: 5342 case ixgbe_mac_X550EM_x: 5343 if (cause == -1) { 5344 /* other causes */ 5345 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5346 index = (intr_alloc_entry & 1) * 8; 5347 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5348 ivar &= ~(0xFF << index); 5349 ivar |= (msix_vector << index); 5350 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5351 } else { 5352 /* tx or rx causes */ 5353 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 5354 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5355 ivar = IXGBE_READ_REG(hw, 5356 IXGBE_IVAR(intr_alloc_entry >> 1)); 5357 ivar &= ~(0xFF << index); 5358 ivar |= (msix_vector << index); 5359 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5360 ivar); 5361 } 5362 break; 5363 5364 default: 5365 break; 5366 } 5367 } 5368 5369 /* 5370 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of 5371 * given interrupt vector allocation register (IVAR). 5372 * cause: 5373 * -1 : other cause 5374 * 0 : rx 5375 * 1 : tx 5376 */ 5377 static void 5378 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5379 { 5380 struct ixgbe_hw *hw = &ixgbe->hw; 5381 u32 ivar, index; 5382 5383 switch (hw->mac.type) { 5384 case ixgbe_mac_82598EB: 5385 if (cause == -1) { 5386 cause = 0; 5387 } 5388 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5389 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5390 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 * 5391 (intr_alloc_entry & 0x3))); 5392 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5393 break; 5394 5395 case ixgbe_mac_82599EB: 5396 case ixgbe_mac_X540: 5397 case ixgbe_mac_X550: 5398 case ixgbe_mac_X550EM_x: 5399 if (cause == -1) { 5400 /* other causes */ 5401 index = (intr_alloc_entry & 1) * 8; 5402 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5403 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5404 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5405 } else { 5406 /* tx or rx causes */ 5407 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5408 ivar = IXGBE_READ_REG(hw, 5409 IXGBE_IVAR(intr_alloc_entry >> 1)); 5410 ivar |= (IXGBE_IVAR_ALLOC_VAL << index); 5411 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5412 ivar); 5413 } 5414 break; 5415 5416 default: 5417 break; 5418 } 5419 } 5420 5421 /* 5422 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of 5423 * given interrupt vector allocation register (IVAR). 5424 * cause: 5425 * -1 : other cause 5426 * 0 : rx 5427 * 1 : tx 5428 */ 5429 static void 5430 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause) 5431 { 5432 struct ixgbe_hw *hw = &ixgbe->hw; 5433 u32 ivar, index; 5434 5435 switch (hw->mac.type) { 5436 case ixgbe_mac_82598EB: 5437 if (cause == -1) { 5438 cause = 0; 5439 } 5440 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F; 5441 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); 5442 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 * 5443 (intr_alloc_entry & 0x3))); 5444 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 5445 break; 5446 5447 case ixgbe_mac_82599EB: 5448 case ixgbe_mac_X540: 5449 case ixgbe_mac_X550: 5450 case ixgbe_mac_X550EM_x: 5451 if (cause == -1) { 5452 /* other causes */ 5453 index = (intr_alloc_entry & 1) * 8; 5454 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); 5455 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5456 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); 5457 } else { 5458 /* tx or rx causes */ 5459 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause)); 5460 ivar = IXGBE_READ_REG(hw, 5461 IXGBE_IVAR(intr_alloc_entry >> 1)); 5462 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index); 5463 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1), 5464 ivar); 5465 } 5466 break; 5467 5468 default: 5469 break; 5470 } 5471 } 5472 5473 /* 5474 * Convert the rx ring index driver maintained to the rx ring index 5475 * in h/w. 5476 */ 5477 static uint32_t 5478 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index) 5479 { 5480 5481 struct ixgbe_hw *hw = &ixgbe->hw; 5482 uint32_t rx_ring_per_group, hw_rx_index; 5483 5484 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS || 5485 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) { 5486 return (sw_rx_index); 5487 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) { 5488 switch (hw->mac.type) { 5489 case ixgbe_mac_82598EB: 5490 return (sw_rx_index); 5491 5492 case ixgbe_mac_82599EB: 5493 case ixgbe_mac_X540: 5494 case ixgbe_mac_X550: 5495 case ixgbe_mac_X550EM_x: 5496 return (sw_rx_index * 2); 5497 5498 default: 5499 break; 5500 } 5501 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) { 5502 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups; 5503 5504 switch (hw->mac.type) { 5505 case ixgbe_mac_82598EB: 5506 hw_rx_index = (sw_rx_index / rx_ring_per_group) * 5507 16 + (sw_rx_index % rx_ring_per_group); 5508 return (hw_rx_index); 5509 5510 case ixgbe_mac_82599EB: 5511 case ixgbe_mac_X540: 5512 case ixgbe_mac_X550: 5513 case ixgbe_mac_X550EM_x: 5514 if (ixgbe->num_rx_groups > 32) { 5515 hw_rx_index = (sw_rx_index / 5516 rx_ring_per_group) * 2 + 5517 (sw_rx_index % rx_ring_per_group); 5518 } else { 5519 hw_rx_index = (sw_rx_index / 5520 rx_ring_per_group) * 4 + 5521 (sw_rx_index % rx_ring_per_group); 5522 } 5523 return (hw_rx_index); 5524 5525 default: 5526 break; 5527 } 5528 } 5529 5530 /* 5531 * Should never reach. Just to make compiler happy. 5532 */ 5533 return (sw_rx_index); 5534 } 5535 5536 /* 5537 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors. 5538 * 5539 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt 5540 * to vector[0 - (intr_cnt -1)]. 5541 */ 5542 static int 5543 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe) 5544 { 5545 int i, vector = 0; 5546 5547 /* initialize vector map */ 5548 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map)); 5549 for (i = 0; i < ixgbe->intr_cnt; i++) { 5550 ixgbe->vect_map[i].ixgbe = ixgbe; 5551 } 5552 5553 /* 5554 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0], 5555 * tx rings[0] on RTxQ[1]. 5556 */ 5557 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5558 ixgbe_map_rxring_to_vector(ixgbe, 0, 0); 5559 ixgbe_map_txring_to_vector(ixgbe, 0, 1); 5560 return (IXGBE_SUCCESS); 5561 } 5562 5563 /* 5564 * Interrupts/vectors mapping for MSI-X 5565 */ 5566 5567 /* 5568 * Map other interrupt to vector 0, 5569 * Set bit in map and count the bits set. 5570 */ 5571 BT_SET(ixgbe->vect_map[vector].other_map, 0); 5572 ixgbe->vect_map[vector].other_cnt++; 5573 5574 /* 5575 * Map rx ring interrupts to vectors 5576 */ 5577 for (i = 0; i < ixgbe->num_rx_rings; i++) { 5578 ixgbe_map_rxring_to_vector(ixgbe, i, vector); 5579 vector = (vector +1) % ixgbe->intr_cnt; 5580 } 5581 5582 /* 5583 * Map tx ring interrupts to vectors 5584 */ 5585 for (i = 0; i < ixgbe->num_tx_rings; i++) { 5586 ixgbe_map_txring_to_vector(ixgbe, i, vector); 5587 vector = (vector +1) % ixgbe->intr_cnt; 5588 } 5589 5590 return (IXGBE_SUCCESS); 5591 } 5592 5593 /* 5594 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s). 5595 * 5596 * This relies on ring/vector mapping already set up in the 5597 * vect_map[] structures 5598 */ 5599 static void 5600 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe) 5601 { 5602 struct ixgbe_hw *hw = &ixgbe->hw; 5603 ixgbe_intr_vector_t *vect; /* vector bitmap */ 5604 int r_idx; /* ring index */ 5605 int v_idx; /* vector index */ 5606 uint32_t hw_index; 5607 5608 /* 5609 * Clear any previous entries 5610 */ 5611 switch (hw->mac.type) { 5612 case ixgbe_mac_82598EB: 5613 for (v_idx = 0; v_idx < 25; v_idx++) 5614 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5615 break; 5616 5617 case ixgbe_mac_82599EB: 5618 case ixgbe_mac_X540: 5619 case ixgbe_mac_X550: 5620 case ixgbe_mac_X550EM_x: 5621 for (v_idx = 0; v_idx < 64; v_idx++) 5622 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0); 5623 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0); 5624 break; 5625 5626 default: 5627 break; 5628 } 5629 5630 /* 5631 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and 5632 * tx rings[0] will use RTxQ[1]. 5633 */ 5634 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) { 5635 ixgbe_setup_ivar(ixgbe, 0, 0, 0); 5636 ixgbe_setup_ivar(ixgbe, 0, 1, 1); 5637 return; 5638 } 5639 5640 /* 5641 * For MSI-X interrupt, "Other" is always on vector[0]. 5642 */ 5643 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1); 5644 5645 /* 5646 * For each interrupt vector, populate the IVAR table 5647 */ 5648 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) { 5649 vect = &ixgbe->vect_map[v_idx]; 5650 5651 /* 5652 * For each rx ring bit set 5653 */ 5654 r_idx = bt_getlowbit(vect->rx_map, 0, 5655 (ixgbe->num_rx_rings - 1)); 5656 5657 while (r_idx >= 0) { 5658 hw_index = ixgbe->rx_rings[r_idx].hw_index; 5659 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0); 5660 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1), 5661 (ixgbe->num_rx_rings - 1)); 5662 } 5663 5664 /* 5665 * For each tx ring bit set 5666 */ 5667 r_idx = bt_getlowbit(vect->tx_map, 0, 5668 (ixgbe->num_tx_rings - 1)); 5669 5670 while (r_idx >= 0) { 5671 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1); 5672 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1), 5673 (ixgbe->num_tx_rings - 1)); 5674 } 5675 } 5676 } 5677 5678 /* 5679 * ixgbe_rem_intr_handlers - Remove the interrupt handlers. 5680 */ 5681 static void 5682 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe) 5683 { 5684 int i; 5685 int rc; 5686 5687 for (i = 0; i < ixgbe->intr_cnt; i++) { 5688 rc = ddi_intr_remove_handler(ixgbe->htable[i]); 5689 if (rc != DDI_SUCCESS) { 5690 IXGBE_DEBUGLOG_1(ixgbe, 5691 "Remove intr handler failed: %d", rc); 5692 } 5693 } 5694 } 5695 5696 /* 5697 * ixgbe_rem_intrs - Remove the allocated interrupts. 5698 */ 5699 static void 5700 ixgbe_rem_intrs(ixgbe_t *ixgbe) 5701 { 5702 int i; 5703 int rc; 5704 5705 for (i = 0; i < ixgbe->intr_cnt; i++) { 5706 rc = ddi_intr_free(ixgbe->htable[i]); 5707 if (rc != DDI_SUCCESS) { 5708 IXGBE_DEBUGLOG_1(ixgbe, 5709 "Free intr failed: %d", rc); 5710 } 5711 } 5712 5713 kmem_free(ixgbe->htable, ixgbe->intr_size); 5714 ixgbe->htable = NULL; 5715 } 5716 5717 /* 5718 * ixgbe_enable_intrs - Enable all the ddi interrupts. 5719 */ 5720 static int 5721 ixgbe_enable_intrs(ixgbe_t *ixgbe) 5722 { 5723 int i; 5724 int rc; 5725 5726 /* 5727 * Enable interrupts 5728 */ 5729 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5730 /* 5731 * Call ddi_intr_block_enable() for MSI 5732 */ 5733 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt); 5734 if (rc != DDI_SUCCESS) { 5735 ixgbe_log(ixgbe, 5736 "Enable block intr failed: %d", rc); 5737 return (IXGBE_FAILURE); 5738 } 5739 } else { 5740 /* 5741 * Call ddi_intr_enable() for Legacy/MSI non block enable 5742 */ 5743 for (i = 0; i < ixgbe->intr_cnt; i++) { 5744 rc = ddi_intr_enable(ixgbe->htable[i]); 5745 if (rc != DDI_SUCCESS) { 5746 ixgbe_log(ixgbe, 5747 "Enable intr failed: %d", rc); 5748 return (IXGBE_FAILURE); 5749 } 5750 } 5751 } 5752 5753 return (IXGBE_SUCCESS); 5754 } 5755 5756 /* 5757 * ixgbe_disable_intrs - Disable all the interrupts. 5758 */ 5759 static int 5760 ixgbe_disable_intrs(ixgbe_t *ixgbe) 5761 { 5762 int i; 5763 int rc; 5764 5765 /* 5766 * Disable all interrupts 5767 */ 5768 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) { 5769 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt); 5770 if (rc != DDI_SUCCESS) { 5771 ixgbe_log(ixgbe, 5772 "Disable block intr failed: %d", rc); 5773 return (IXGBE_FAILURE); 5774 } 5775 } else { 5776 for (i = 0; i < ixgbe->intr_cnt; i++) { 5777 rc = ddi_intr_disable(ixgbe->htable[i]); 5778 if (rc != DDI_SUCCESS) { 5779 ixgbe_log(ixgbe, 5780 "Disable intr failed: %d", rc); 5781 return (IXGBE_FAILURE); 5782 } 5783 } 5784 } 5785 5786 return (IXGBE_SUCCESS); 5787 } 5788 5789 /* 5790 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware. 5791 */ 5792 static void 5793 ixgbe_get_hw_state(ixgbe_t *ixgbe) 5794 { 5795 struct ixgbe_hw *hw = &ixgbe->hw; 5796 ixgbe_link_speed speed = 0; 5797 boolean_t link_up = B_FALSE; 5798 uint32_t pcs1g_anlp = 0; 5799 5800 ASSERT(mutex_owned(&ixgbe->gen_lock)); 5801 ixgbe->param_lp_1000fdx_cap = 0; 5802 ixgbe->param_lp_100fdx_cap = 0; 5803 5804 /* check for link, don't wait */ 5805 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE); 5806 5807 /* 5808 * Update the observed Link Partner's capabilities. Not all adapters 5809 * can provide full information on the LP's capable speeds, so we 5810 * provide what we can. 5811 */ 5812 if (link_up) { 5813 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 5814 5815 ixgbe->param_lp_1000fdx_cap = 5816 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5817 ixgbe->param_lp_100fdx_cap = 5818 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0; 5819 } 5820 5821 /* 5822 * Update GLD's notion of the adapter's currently advertised speeds. 5823 * Since the common code doesn't always record the current autonegotiate 5824 * settings in the phy struct for all parts (specifically, adapters with 5825 * SFPs) we first test to see if it is 0, and if so, we fall back to 5826 * using the adapter's speed capabilities which we saved during instance 5827 * init in ixgbe_init_params(). 5828 * 5829 * Adapters with SFPs will always be shown as advertising all of their 5830 * supported speeds, and adapters with baseT PHYs (where the phy struct 5831 * is maintained by the common code) will always have a factual view of 5832 * their currently-advertised speeds. In the case of SFPs, this is 5833 * acceptable as we default to advertising all speeds that the adapter 5834 * claims to support, and those properties are immutable; unlike on 5835 * baseT (copper) PHYs, where speeds can be enabled or disabled at will. 5836 */ 5837 speed = hw->phy.autoneg_advertised; 5838 if (speed == 0) 5839 speed = ixgbe->speeds_supported; 5840 5841 ixgbe->param_adv_10000fdx_cap = 5842 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0; 5843 ixgbe->param_adv_5000fdx_cap = 5844 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0; 5845 ixgbe->param_adv_2500fdx_cap = 5846 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0; 5847 ixgbe->param_adv_1000fdx_cap = 5848 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0; 5849 ixgbe->param_adv_100fdx_cap = 5850 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0; 5851 } 5852 5853 /* 5854 * ixgbe_get_driver_control - Notify that driver is in control of device. 5855 */ 5856 static void 5857 ixgbe_get_driver_control(struct ixgbe_hw *hw) 5858 { 5859 uint32_t ctrl_ext; 5860 5861 /* 5862 * Notify firmware that driver is in control of device 5863 */ 5864 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5865 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; 5866 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5867 } 5868 5869 /* 5870 * ixgbe_release_driver_control - Notify that driver is no longer in control 5871 * of device. 5872 */ 5873 static void 5874 ixgbe_release_driver_control(struct ixgbe_hw *hw) 5875 { 5876 uint32_t ctrl_ext; 5877 5878 /* 5879 * Notify firmware that driver is no longer in control of device 5880 */ 5881 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 5882 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; 5883 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 5884 } 5885 5886 /* 5887 * ixgbe_atomic_reserve - Atomic decrease operation. 5888 */ 5889 int 5890 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n) 5891 { 5892 uint32_t oldval; 5893 uint32_t newval; 5894 5895 /* 5896 * ATOMICALLY 5897 */ 5898 do { 5899 oldval = *count_p; 5900 if (oldval < n) 5901 return (-1); 5902 newval = oldval - n; 5903 } while (atomic_cas_32(count_p, oldval, newval) != oldval); 5904 5905 return (newval); 5906 } 5907 5908 /* 5909 * ixgbe_mc_table_itr - Traverse the entries in the multicast table. 5910 */ 5911 static uint8_t * 5912 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq) 5913 { 5914 uint8_t *addr = *upd_ptr; 5915 uint8_t *new_ptr; 5916 5917 _NOTE(ARGUNUSED(hw)); 5918 _NOTE(ARGUNUSED(vmdq)); 5919 5920 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; 5921 *upd_ptr = new_ptr; 5922 return (addr); 5923 } 5924 5925 /* 5926 * FMA support 5927 */ 5928 int 5929 ixgbe_check_acc_handle(ddi_acc_handle_t handle) 5930 { 5931 ddi_fm_error_t de; 5932 5933 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 5934 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 5935 return (de.fme_status); 5936 } 5937 5938 int 5939 ixgbe_check_dma_handle(ddi_dma_handle_t handle) 5940 { 5941 ddi_fm_error_t de; 5942 5943 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 5944 return (de.fme_status); 5945 } 5946 5947 /* 5948 * ixgbe_fm_error_cb - The IO fault service error handling callback function. 5949 */ 5950 static int 5951 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 5952 { 5953 _NOTE(ARGUNUSED(impl_data)); 5954 /* 5955 * as the driver can always deal with an error in any dma or 5956 * access handle, we can just return the fme_status value. 5957 */ 5958 pci_ereport_post(dip, err, NULL); 5959 return (err->fme_status); 5960 } 5961 5962 static void 5963 ixgbe_fm_init(ixgbe_t *ixgbe) 5964 { 5965 ddi_iblock_cookie_t iblk; 5966 int fma_dma_flag; 5967 5968 /* 5969 * Only register with IO Fault Services if we have some capability 5970 */ 5971 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 5972 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 5973 } else { 5974 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 5975 } 5976 5977 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 5978 fma_dma_flag = 1; 5979 } else { 5980 fma_dma_flag = 0; 5981 } 5982 5983 ixgbe_set_fma_flags(fma_dma_flag); 5984 5985 if (ixgbe->fm_capabilities) { 5986 5987 /* 5988 * Register capabilities with IO Fault Services 5989 */ 5990 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk); 5991 5992 /* 5993 * Initialize pci ereport capabilities if ereport capable 5994 */ 5995 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 5996 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 5997 pci_ereport_setup(ixgbe->dip); 5998 5999 /* 6000 * Register error callback if error callback capable 6001 */ 6002 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6003 ddi_fm_handler_register(ixgbe->dip, 6004 ixgbe_fm_error_cb, (void*) ixgbe); 6005 } 6006 } 6007 6008 static void 6009 ixgbe_fm_fini(ixgbe_t *ixgbe) 6010 { 6011 /* 6012 * Only unregister FMA capabilities if they are registered 6013 */ 6014 if (ixgbe->fm_capabilities) { 6015 6016 /* 6017 * Release any resources allocated by pci_ereport_setup() 6018 */ 6019 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) || 6020 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6021 pci_ereport_teardown(ixgbe->dip); 6022 6023 /* 6024 * Un-register error callback if error callback capable 6025 */ 6026 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities)) 6027 ddi_fm_handler_unregister(ixgbe->dip); 6028 6029 /* 6030 * Unregister from IO Fault Service 6031 */ 6032 ddi_fm_fini(ixgbe->dip); 6033 } 6034 } 6035 6036 void 6037 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail) 6038 { 6039 uint64_t ena; 6040 char buf[FM_MAX_CLASS]; 6041 6042 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 6043 ena = fm_ena_generate(0, FM_ENA_FMT1); 6044 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) { 6045 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP, 6046 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 6047 } 6048 } 6049 6050 static int 6051 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num) 6052 { 6053 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh; 6054 6055 mutex_enter(&rx_ring->rx_lock); 6056 rx_ring->ring_gen_num = mr_gen_num; 6057 mutex_exit(&rx_ring->rx_lock); 6058 return (0); 6059 } 6060 6061 /* 6062 * Get the global ring index by a ring index within a group. 6063 */ 6064 static int 6065 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex) 6066 { 6067 ixgbe_rx_ring_t *rx_ring; 6068 int i; 6069 6070 for (i = 0; i < ixgbe->num_rx_rings; i++) { 6071 rx_ring = &ixgbe->rx_rings[i]; 6072 if (rx_ring->group_index == gindex) 6073 rindex--; 6074 if (rindex < 0) 6075 return (i); 6076 } 6077 6078 return (-1); 6079 } 6080 6081 /* 6082 * Callback funtion for MAC layer to register all rings. 6083 */ 6084 /* ARGSUSED */ 6085 void 6086 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index, 6087 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh) 6088 { 6089 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6090 mac_intr_t *mintr = &infop->mri_intr; 6091 6092 switch (rtype) { 6093 case MAC_RING_TYPE_RX: { 6094 /* 6095 * 'index' is the ring index within the group. 6096 * Need to get the global ring index by searching in groups. 6097 */ 6098 int global_ring_index = ixgbe_get_rx_ring_index( 6099 ixgbe, group_index, ring_index); 6100 6101 ASSERT(global_ring_index >= 0); 6102 6103 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index]; 6104 rx_ring->ring_handle = rh; 6105 6106 infop->mri_driver = (mac_ring_driver_t)rx_ring; 6107 infop->mri_start = ixgbe_ring_start; 6108 infop->mri_stop = NULL; 6109 infop->mri_poll = ixgbe_ring_rx_poll; 6110 infop->mri_stat = ixgbe_rx_ring_stat; 6111 6112 mintr->mi_handle = (mac_intr_handle_t)rx_ring; 6113 mintr->mi_enable = ixgbe_rx_ring_intr_enable; 6114 mintr->mi_disable = ixgbe_rx_ring_intr_disable; 6115 if (ixgbe->intr_type & 6116 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6117 mintr->mi_ddi_handle = 6118 ixgbe->htable[rx_ring->intr_vector]; 6119 } 6120 6121 break; 6122 } 6123 case MAC_RING_TYPE_TX: { 6124 ASSERT(group_index == -1); 6125 ASSERT(ring_index < ixgbe->num_tx_rings); 6126 6127 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index]; 6128 tx_ring->ring_handle = rh; 6129 6130 infop->mri_driver = (mac_ring_driver_t)tx_ring; 6131 infop->mri_start = NULL; 6132 infop->mri_stop = NULL; 6133 infop->mri_tx = ixgbe_ring_tx; 6134 infop->mri_stat = ixgbe_tx_ring_stat; 6135 if (ixgbe->intr_type & 6136 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) { 6137 mintr->mi_ddi_handle = 6138 ixgbe->htable[tx_ring->intr_vector]; 6139 } 6140 break; 6141 } 6142 default: 6143 break; 6144 } 6145 } 6146 6147 /* 6148 * Callback funtion for MAC layer to register all groups. 6149 */ 6150 void 6151 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index, 6152 mac_group_info_t *infop, mac_group_handle_t gh) 6153 { 6154 ixgbe_t *ixgbe = (ixgbe_t *)arg; 6155 6156 switch (rtype) { 6157 case MAC_RING_TYPE_RX: { 6158 ixgbe_rx_group_t *rx_group; 6159 6160 rx_group = &ixgbe->rx_groups[index]; 6161 rx_group->group_handle = gh; 6162 6163 infop->mgi_driver = (mac_group_driver_t)rx_group; 6164 infop->mgi_start = NULL; 6165 infop->mgi_stop = NULL; 6166 infop->mgi_addmac = ixgbe_addmac; 6167 infop->mgi_remmac = ixgbe_remmac; 6168 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups); 6169 6170 break; 6171 } 6172 case MAC_RING_TYPE_TX: 6173 break; 6174 default: 6175 break; 6176 } 6177 } 6178 6179 /* 6180 * Enable interrupt on the specificed rx ring. 6181 */ 6182 int 6183 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh) 6184 { 6185 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6186 ixgbe_t *ixgbe = rx_ring->ixgbe; 6187 int r_idx = rx_ring->index; 6188 int hw_r_idx = rx_ring->hw_index; 6189 int v_idx = rx_ring->intr_vector; 6190 6191 mutex_enter(&ixgbe->gen_lock); 6192 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6193 mutex_exit(&ixgbe->gen_lock); 6194 /* 6195 * Simply return 0. 6196 * Interrupts are being adjusted. ixgbe_intr_adjust() 6197 * will eventually re-enable the interrupt when it's 6198 * done with the adjustment. 6199 */ 6200 return (0); 6201 } 6202 6203 /* 6204 * To enable interrupt by setting the VAL bit of given interrupt 6205 * vector allocation register (IVAR). 6206 */ 6207 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0); 6208 6209 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx); 6210 6211 /* 6212 * Trigger a Rx interrupt on this ring 6213 */ 6214 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx)); 6215 IXGBE_WRITE_FLUSH(&ixgbe->hw); 6216 6217 mutex_exit(&ixgbe->gen_lock); 6218 6219 return (0); 6220 } 6221 6222 /* 6223 * Disable interrupt on the specificed rx ring. 6224 */ 6225 int 6226 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh) 6227 { 6228 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh; 6229 ixgbe_t *ixgbe = rx_ring->ixgbe; 6230 int r_idx = rx_ring->index; 6231 int hw_r_idx = rx_ring->hw_index; 6232 int v_idx = rx_ring->intr_vector; 6233 6234 mutex_enter(&ixgbe->gen_lock); 6235 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) { 6236 mutex_exit(&ixgbe->gen_lock); 6237 /* 6238 * Simply return 0. 6239 * In the rare case where an interrupt is being 6240 * disabled while interrupts are being adjusted, 6241 * we don't fail the operation. No interrupts will 6242 * be generated while they are adjusted, and 6243 * ixgbe_intr_adjust() will cause the interrupts 6244 * to be re-enabled once it completes. Note that 6245 * in this case, packets may be delivered to the 6246 * stack via interrupts before xgbe_rx_ring_intr_enable() 6247 * is called again. This is acceptable since interrupt 6248 * adjustment is infrequent, and the stack will be 6249 * able to handle these packets. 6250 */ 6251 return (0); 6252 } 6253 6254 /* 6255 * To disable interrupt by clearing the VAL bit of given interrupt 6256 * vector allocation register (IVAR). 6257 */ 6258 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0); 6259 6260 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx); 6261 6262 mutex_exit(&ixgbe->gen_lock); 6263 6264 return (0); 6265 } 6266 6267 /* 6268 * Add a mac address. 6269 */ 6270 static int 6271 ixgbe_addmac(void *arg, const uint8_t *mac_addr) 6272 { 6273 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6274 ixgbe_t *ixgbe = rx_group->ixgbe; 6275 struct ixgbe_hw *hw = &ixgbe->hw; 6276 int slot, i; 6277 6278 mutex_enter(&ixgbe->gen_lock); 6279 6280 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6281 mutex_exit(&ixgbe->gen_lock); 6282 return (ECANCELED); 6283 } 6284 6285 if (ixgbe->unicst_avail == 0) { 6286 /* no slots available */ 6287 mutex_exit(&ixgbe->gen_lock); 6288 return (ENOSPC); 6289 } 6290 6291 /* 6292 * The first ixgbe->num_rx_groups slots are reserved for each respective 6293 * group. The rest slots are shared by all groups. While adding a 6294 * MAC address, reserved slots are firstly checked then the shared 6295 * slots are searched. 6296 */ 6297 slot = -1; 6298 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) { 6299 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) { 6300 if (ixgbe->unicst_addr[i].mac.set == 0) { 6301 slot = i; 6302 break; 6303 } 6304 } 6305 } else { 6306 slot = rx_group->index; 6307 } 6308 6309 if (slot == -1) { 6310 /* no slots available */ 6311 mutex_exit(&ixgbe->gen_lock); 6312 return (ENOSPC); 6313 } 6314 6315 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6316 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr, 6317 rx_group->index, IXGBE_RAH_AV); 6318 ixgbe->unicst_addr[slot].mac.set = 1; 6319 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index; 6320 ixgbe->unicst_avail--; 6321 6322 mutex_exit(&ixgbe->gen_lock); 6323 6324 return (0); 6325 } 6326 6327 /* 6328 * Remove a mac address. 6329 */ 6330 static int 6331 ixgbe_remmac(void *arg, const uint8_t *mac_addr) 6332 { 6333 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg; 6334 ixgbe_t *ixgbe = rx_group->ixgbe; 6335 struct ixgbe_hw *hw = &ixgbe->hw; 6336 int slot; 6337 6338 mutex_enter(&ixgbe->gen_lock); 6339 6340 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) { 6341 mutex_exit(&ixgbe->gen_lock); 6342 return (ECANCELED); 6343 } 6344 6345 slot = ixgbe_unicst_find(ixgbe, mac_addr); 6346 if (slot == -1) { 6347 mutex_exit(&ixgbe->gen_lock); 6348 return (EINVAL); 6349 } 6350 6351 if (ixgbe->unicst_addr[slot].mac.set == 0) { 6352 mutex_exit(&ixgbe->gen_lock); 6353 return (EINVAL); 6354 } 6355 6356 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL); 6357 (void) ixgbe_clear_rar(hw, slot); 6358 ixgbe->unicst_addr[slot].mac.set = 0; 6359 ixgbe->unicst_avail++; 6360 6361 mutex_exit(&ixgbe->gen_lock); 6362 6363 return (0); 6364 } 6365